diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bd20fad3deff..bcf5fc59e8e9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -333,7 +333,7 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now) | |||
333 | * since the last time we changed load (this cannot | 333 | * since the last time we changed load (this cannot |
334 | * overflow on 32 bits): | 334 | * overflow on 32 bits): |
335 | */ | 335 | */ |
336 | delta_exec = (unsigned long)(now - curr->exec_start); | 336 | delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start); |
337 | 337 | ||
338 | curr->delta_exec += delta_exec; | 338 | curr->delta_exec += delta_exec; |
339 | 339 | ||
@@ -341,14 +341,14 @@ static void update_curr(struct cfs_rq *cfs_rq, u64 now) | |||
341 | __update_curr(cfs_rq, curr, now); | 341 | __update_curr(cfs_rq, curr, now); |
342 | curr->delta_exec = 0; | 342 | curr->delta_exec = 0; |
343 | } | 343 | } |
344 | curr->exec_start = now; | 344 | curr->exec_start = rq_of(cfs_rq)->clock; |
345 | } | 345 | } |
346 | 346 | ||
347 | static inline void | 347 | static inline void |
348 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | 348 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) |
349 | { | 349 | { |
350 | se->wait_start_fair = cfs_rq->fair_clock; | 350 | se->wait_start_fair = cfs_rq->fair_clock; |
351 | schedstat_set(se->wait_start, now); | 351 | schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); |
352 | } | 352 | } |
353 | 353 | ||
354 | /* | 354 | /* |
@@ -421,7 +421,8 @@ __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
421 | { | 421 | { |
422 | unsigned long delta_fair = se->delta_fair_run; | 422 | unsigned long delta_fair = se->delta_fair_run; |
423 | 423 | ||
424 | schedstat_set(se->wait_max, max(se->wait_max, now - se->wait_start)); | 424 | schedstat_set(se->wait_max, max(se->wait_max, |
425 | rq_of(cfs_rq)->clock - se->wait_start)); | ||
425 | 426 | ||
426 | if (unlikely(se->load.weight != NICE_0_LOAD)) | 427 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
427 | delta_fair = calc_weighted(delta_fair, se->load.weight, | 428 | delta_fair = calc_weighted(delta_fair, se->load.weight, |
@@ -470,7 +471,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
470 | /* | 471 | /* |
471 | * We are starting a new run period: | 472 | * We are starting a new run period: |
472 | */ | 473 | */ |
473 | se->exec_start = now; | 474 | se->exec_start = rq_of(cfs_rq)->clock; |
474 | } | 475 | } |
475 | 476 | ||
476 | /* | 477 | /* |
@@ -545,7 +546,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
545 | 546 | ||
546 | #ifdef CONFIG_SCHEDSTATS | 547 | #ifdef CONFIG_SCHEDSTATS |
547 | if (se->sleep_start) { | 548 | if (se->sleep_start) { |
548 | u64 delta = now - se->sleep_start; | 549 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; |
549 | 550 | ||
550 | if ((s64)delta < 0) | 551 | if ((s64)delta < 0) |
551 | delta = 0; | 552 | delta = 0; |
@@ -557,7 +558,7 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 now) | |||
557 | se->sum_sleep_runtime += delta; | 558 | se->sum_sleep_runtime += delta; |
558 | } | 559 | } |
559 | if (se->block_start) { | 560 | if (se->block_start) { |
560 | u64 delta = now - se->block_start; | 561 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; |
561 | 562 | ||
562 | if ((s64)delta < 0) | 563 | if ((s64)delta < 0) |
563 | delta = 0; | 564 | delta = 0; |
@@ -599,9 +600,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
599 | struct task_struct *tsk = task_of(se); | 600 | struct task_struct *tsk = task_of(se); |
600 | 601 | ||
601 | if (tsk->state & TASK_INTERRUPTIBLE) | 602 | if (tsk->state & TASK_INTERRUPTIBLE) |
602 | se->sleep_start = now; | 603 | se->sleep_start = rq_of(cfs_rq)->clock; |
603 | if (tsk->state & TASK_UNINTERRUPTIBLE) | 604 | if (tsk->state & TASK_UNINTERRUPTIBLE) |
604 | se->block_start = now; | 605 | se->block_start = rq_of(cfs_rq)->clock; |
605 | } | 606 | } |
606 | cfs_rq->wait_runtime -= se->wait_runtime; | 607 | cfs_rq->wait_runtime -= se->wait_runtime; |
607 | #endif | 608 | #endif |