diff options
| -rw-r--r-- | kernel/sched.c | 8 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 19 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 6 |
3 files changed, 17 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 65eb484dc268..49a5fb0cdea0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -788,8 +788,8 @@ static void update_curr_load(struct rq *rq, u64 now) | |||
| 788 | u64 start; | 788 | u64 start; |
| 789 | 789 | ||
| 790 | start = ls->load_update_start; | 790 | start = ls->load_update_start; |
| 791 | ls->load_update_start = now; | 791 | ls->load_update_start = rq->clock; |
| 792 | ls->delta_stat += now - start; | 792 | ls->delta_stat += rq->clock - start; |
| 793 | /* | 793 | /* |
| 794 | * Stagger updates to ls->delta_fair. Very frequent updates | 794 | * Stagger updates to ls->delta_fair. Very frequent updates |
| 795 | * can be expensive. | 795 | * can be expensive. |
| @@ -1979,8 +1979,8 @@ static void update_cpu_load(struct rq *this_rq) | |||
| 1979 | exec_delta64 = ls->delta_exec + 1; | 1979 | exec_delta64 = ls->delta_exec + 1; |
| 1980 | ls->delta_exec = 0; | 1980 | ls->delta_exec = 0; |
| 1981 | 1981 | ||
| 1982 | sample_interval64 = now - ls->load_update_last; | 1982 | sample_interval64 = this_rq->clock - ls->load_update_last; |
| 1983 | ls->load_update_last = now; | 1983 | ls->load_update_last = this_rq->clock; |
| 1984 | 1984 | ||
| 1985 | if ((s64)sample_interval64 < (s64)TICK_NSEC) | 1985 | if ((s64)sample_interval64 < (s64)TICK_NSEC) |
| 1986 | sample_interval64 = TICK_NSEC; | 1986 | sample_interval64 = TICK_NSEC; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bd20fad3deff..bcf5fc59e8e9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -333,7 +333,7 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now) | |||
| 333 | * since the last time we changed load (this cannot | 333 | * since the last time we changed load (this cannot |
| 334 | * overflow on 32 bits): | 334 | * overflow on 32 bits): |
| 335 | */ | 335 | */ |
| 336 | delta_exec = (unsigned long)(now - curr->exec_start); | 336 | delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start); |
| 337 | 337 | ||
| 338 | curr->delta_exec += delta_exec; | 338 | curr->delta_exec += delta_exec; |
| 339 | 339 | ||
| @@ -341,14 +341,14 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now) | |||
| 341 | __update_curr(cfs_rq, curr, now); | 341 | __update_curr(cfs_rq, curr, now); |
| 342 | curr->delta_exec = 0; | 342 | curr->delta_exec = 0; |
| 343 | } | 343 | } |
| 344 | curr->exec_start = now; | 344 | curr->exec_start = rq_of(cfs_rq)->clock; |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | static inline void | 347 | static inline void |
| 348 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | 348 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) |
| 349 | { | 349 | { |
| 350 | se->wait_start_fair = cfs_rq->fair_clock; | 350 | se->wait_start_fair = cfs_rq->fair_clock; |
| 351 | schedstat_set(se->wait_start, now); | 351 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); |
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | /* | 354 | /* |
| @@ -421,7 +421,8 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
| 421 | { | 421 | { |
| 422 | unsigned long delta_fair = se->delta_fair_run; | 422 | unsigned long delta_fair = se->delta_fair_run; |
| 423 | 423 | ||
| 424 | schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start)); | 424 | schedstat_set(se->wait_max, max(se->wait_max, |
| 425 | rq_of(cfs_rq)->clock - se->wait_start)); | ||
| 425 | 426 | ||
| 426 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 427 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
| 427 | delta_fair = calc_weighted(delta_fair, se->load.weight, | 428 | delta_fair = calc_weighted(delta_fair, se->load.weight, |
| @@ -470,7 +471,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
| 470 | /* | 471 | /* |
| 471 | * We are starting a new run period: | 472 | * We are starting a new run period: |
| 472 | */ | 473 | */ |
| 473 | se->exec_start = now; | 474 | se->exec_start = rq_of(cfs_rq)->clock; |
| 474 | } | 475 | } |
| 475 | 476 | ||
| 476 | /* | 477 | /* |
| @@ -545,7 +546,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
| 545 | 546 | ||
| 546 | #ifdef CONFIG_SCHEDSTATS | 547 | #ifdef CONFIG_SCHEDSTATS |
| 547 | if (se->sleep_start) { | 548 | if (se->sleep_start) { |
| 548 | u64 delta = now - se->sleep_start; | 549 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; |
| 549 | 550 | ||
| 550 | if ((s64)delta < 0) | 551 | if ((s64)delta < 0) |
| 551 | delta = 0; | 552 | delta = 0; |
| @@ -557,7 +558,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
| 557 | se->sum_sleep_runtime += delta; | 558 | se->sum_sleep_runtime += delta; |
| 558 | } | 559 | } |
| 559 | if (se->block_start) { | 560 | if (se->block_start) { |
| 560 | u64 delta = now - se->block_start; | 561 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; |
| 561 | 562 | ||
| 562 | if ((s64)delta < 0) | 563 | if ((s64)delta < 0) |
| 563 | delta = 0; | 564 | delta = 0; |
| @@ -599,9 +600,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
| 599 | struct task_struct *tsk = task_of(se); | 600 | struct task_struct *tsk = task_of(se); |
| 600 | 601 | ||
| 601 | if (tsk->state & TASK_INTERRUPTIBLE) | 602 | if (tsk->state & TASK_INTERRUPTIBLE) |
| 602 | se->sleep_start = now; | 603 | se->sleep_start = rq_of(cfs_rq)->clock; |
| 603 | if (tsk->state & TASK_UNINTERRUPTIBLE) | 604 | if (tsk->state & TASK_UNINTERRUPTIBLE) |
| 604 | se->block_start = now; | 605 | se->block_start = rq_of(cfs_rq)->clock; |
| 605 | } | 606 | } |
| 606 | cfs_rq->wait_runtime -= se->wait_runtime; | 607 | cfs_rq->wait_runtime -= se->wait_runtime; |
| 607 | #endif | 608 | #endif |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 5b559e8c8aa6..5fbd87ad0f56 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -15,14 +15,14 @@ static inline void update_curr_rt(struct rq *rq, u64 now) | |||
| 15 | if (!task_has_rt_policy(curr)) | 15 | if (!task_has_rt_policy(curr)) |
| 16 | return; | 16 | return; |
| 17 | 17 | ||
| 18 | delta_exec = now - curr->se.exec_start; | 18 | delta_exec = rq->clock - curr->se.exec_start; |
| 19 | if (unlikely((s64)delta_exec < 0)) | 19 | if (unlikely((s64)delta_exec < 0)) |
| 20 | delta_exec = 0; | 20 | delta_exec = 0; |
| 21 | 21 | ||
| 22 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); | 22 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); |
| 23 | 23 | ||
| 24 | curr->se.sum_exec_runtime += delta_exec; | 24 | curr->se.sum_exec_runtime += delta_exec; |
| 25 | curr->se.exec_start = now; | 25 | curr->se.exec_start = rq->clock; |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | static void | 28 | static void |
| @@ -89,7 +89,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now) | |||
| 89 | queue = array->queue + idx; | 89 | queue = array->queue + idx; |
| 90 | next = list_entry(queue->next, struct task_struct, run_list); | 90 | next = list_entry(queue->next, struct task_struct, run_list); |
| 91 | 91 | ||
| 92 | next->se.exec_start = now; | 92 | next->se.exec_start = rq->clock; |
| 93 | 93 | ||
| 94 | return next; | 94 | return next; |
| 95 | } | 95 | } |
