diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 48 |
1 files changed, 31 insertions, 17 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5f9650e8fe75..652e8bdef9aa 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | |||
266 | return min_vruntime; | 266 | return min_vruntime; |
267 | } | 267 | } |
268 | 268 | ||
269 | static inline int entity_before(struct sched_entity *a, | ||
270 | struct sched_entity *b) | ||
271 | { | ||
272 | return (s64)(a->vruntime - b->vruntime) < 0; | ||
273 | } | ||
274 | |||
269 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | 275 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) |
270 | { | 276 | { |
271 | return se->vruntime - cfs_rq->min_vruntime; | 277 | return se->vruntime - cfs_rq->min_vruntime; |
@@ -430,12 +436,13 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
430 | 436 | ||
431 | for_each_sched_entity(se) { | 437 | for_each_sched_entity(se) { |
432 | struct load_weight *load; | 438 | struct load_weight *load; |
439 | struct load_weight lw; | ||
433 | 440 | ||
434 | cfs_rq = cfs_rq_of(se); | 441 | cfs_rq = cfs_rq_of(se); |
435 | load = &cfs_rq->load; | 442 | load = &cfs_rq->load; |
436 | 443 | ||
437 | if (unlikely(!se->on_rq)) { | 444 | if (unlikely(!se->on_rq)) { |
438 | struct load_weight lw = cfs_rq->load; | 445 | lw = cfs_rq->load; |
439 | 446 | ||
440 | update_load_add(&lw, se->load.weight); | 447 | update_load_add(&lw, se->load.weight); |
441 | load = &lw; | 448 | load = &lw; |
@@ -604,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
604 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 611 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
605 | { | 612 | { |
606 | #ifdef CONFIG_SCHEDSTATS | 613 | #ifdef CONFIG_SCHEDSTATS |
614 | struct task_struct *tsk = NULL; | ||
615 | |||
616 | if (entity_is_task(se)) | ||
617 | tsk = task_of(se); | ||
618 | |||
607 | if (se->sleep_start) { | 619 | if (se->sleep_start) { |
608 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 620 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; |
609 | struct task_struct *tsk = task_of(se); | ||
610 | 621 | ||
611 | if ((s64)delta < 0) | 622 | if ((s64)delta < 0) |
612 | delta = 0; | 623 | delta = 0; |
@@ -617,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
617 | se->sleep_start = 0; | 628 | se->sleep_start = 0; |
618 | se->sum_sleep_runtime += delta; | 629 | se->sum_sleep_runtime += delta; |
619 | 630 | ||
620 | account_scheduler_latency(tsk, delta >> 10, 1); | 631 | if (tsk) |
632 | account_scheduler_latency(tsk, delta >> 10, 1); | ||
621 | } | 633 | } |
622 | if (se->block_start) { | 634 | if (se->block_start) { |
623 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 635 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; |
624 | struct task_struct *tsk = task_of(se); | ||
625 | 636 | ||
626 | if ((s64)delta < 0) | 637 | if ((s64)delta < 0) |
627 | delta = 0; | 638 | delta = 0; |
@@ -632,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
632 | se->block_start = 0; | 643 | se->block_start = 0; |
633 | se->sum_sleep_runtime += delta; | 644 | se->sum_sleep_runtime += delta; |
634 | 645 | ||
635 | /* | 646 | if (tsk) { |
636 | * Blocking time is in units of nanosecs, so shift by 20 to | 647 | /* |
637 | * get a milliseconds-range estimation of the amount of | 648 | * Blocking time is in units of nanosecs, so shift by |
638 | * time that the task spent sleeping: | 649 | * 20 to get a milliseconds-range estimation of the |
639 | */ | 650 | * amount of time that the task spent sleeping: |
640 | if (unlikely(prof_on == SLEEP_PROFILING)) { | 651 | */ |
641 | 652 | if (unlikely(prof_on == SLEEP_PROFILING)) { | |
642 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | 653 | profile_hits(SLEEP_PROFILING, |
643 | delta >> 20); | 654 | (void *)get_wchan(tsk), |
655 | delta >> 20); | ||
656 | } | ||
657 | account_scheduler_latency(tsk, delta >> 10, 0); | ||
644 | } | 658 | } |
645 | account_scheduler_latency(tsk, delta >> 10, 0); | ||
646 | } | 659 | } |
647 | #endif | 660 | #endif |
648 | } | 661 | } |
@@ -686,7 +699,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
686 | * all of which have the same weight. | 699 | * all of which have the same weight. |
687 | */ | 700 | */ |
688 | if (sched_feat(NORMALIZED_SLEEPER) && | 701 | if (sched_feat(NORMALIZED_SLEEPER) && |
689 | task_of(se)->policy != SCHED_IDLE) | 702 | (!entity_is_task(se) || |
703 | task_of(se)->policy != SCHED_IDLE)) | ||
690 | thresh = calc_delta_fair(thresh, se); | 704 | thresh = calc_delta_fair(thresh, se); |
691 | 705 | ||
692 | vruntime -= thresh; | 706 | vruntime -= thresh; |
@@ -1015,7 +1029,7 @@ static void yield_task_fair(struct rq *rq) | |||
1015 | /* | 1029 | /* |
1016 | * Already in the rightmost position? | 1030 | * Already in the rightmost position? |
1017 | */ | 1031 | */ |
1018 | if (unlikely(!rightmost || rightmost->vruntime < se->vruntime)) | 1032 | if (unlikely(!rightmost || entity_before(rightmost, se))) |
1019 | return; | 1033 | return; |
1020 | 1034 | ||
1021 | /* | 1035 | /* |
@@ -1711,7 +1725,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1711 | 1725 | ||
1712 | /* 'curr' will be NULL if the child belongs to a different group */ | 1726 | /* 'curr' will be NULL if the child belongs to a different group */ |
1713 | if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && | 1727 | if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && |
1714 | curr && curr->vruntime < se->vruntime) { | 1728 | curr && entity_before(curr, se)) { |
1715 | /* | 1729 | /* |
1716 | * Upon rescheduling, sched_class::put_prev_task() will place | 1730 | * Upon rescheduling, sched_class::put_prev_task() will place |
1717 | * 'current' within the tree based on its new key value. | 1731 | * 'current' within the tree based on its new key value. |