diff options
author | Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 2007-10-15 11:00:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:12 -0400 |
commit | fad095a7b963d9e914e0cdb73e27355c47709441 (patch) | |
tree | afb9056ce0416a48d3f62b1f1e85f3d5627de913 /kernel/sched_fair.c | |
parent | fb615581c78efee25e4d04f1145e8fa8ec705dc3 (diff) |
sched: group scheduler, fix bloat
Recent fix to check_preempt_wakeup() to check for preemption at higher
levels caused a size bloat for !CONFIG_FAIR_GROUP_SCHED.
Fix the problem.
42277 10598 320 53195 cfcb kernel/sched.o-before_this_patch
42216 10598 320 53134 cf8e kernel/sched.o-after_this_patch
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 43 |
1 files changed, 25 insertions, 18 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c44a295eee0e..57e7f3672fd7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -652,15 +652,21 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | |||
652 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 652 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ |
653 | list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | 653 | list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) |
654 | 654 | ||
655 | /* Do the two (enqueued) tasks belong to the same group ? */ | 655 | /* Do the two (enqueued) entities belong to the same group ? */ |
656 | static inline int is_same_group(struct task_struct *curr, struct task_struct *p) | 656 | static inline int |
657 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | ||
657 | { | 658 | { |
658 | if (curr->se.cfs_rq == p->se.cfs_rq) | 659 | if (se->cfs_rq == pse->cfs_rq) |
659 | return 1; | 660 | return 1; |
660 | 661 | ||
661 | return 0; | 662 | return 0; |
662 | } | 663 | } |
663 | 664 | ||
665 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | ||
666 | { | ||
667 | return se->parent; | ||
668 | } | ||
669 | |||
664 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 670 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
665 | 671 | ||
666 | #define for_each_sched_entity(se) \ | 672 | #define for_each_sched_entity(se) \ |
@@ -693,11 +699,17 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | |||
693 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | 699 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ |
694 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | 700 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) |
695 | 701 | ||
696 | static inline int is_same_group(struct task_struct *curr, struct task_struct *p) | 702 | static inline int |
703 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | ||
697 | { | 704 | { |
698 | return 1; | 705 | return 1; |
699 | } | 706 | } |
700 | 707 | ||
708 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | ||
709 | { | ||
710 | return NULL; | ||
711 | } | ||
712 | |||
701 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 713 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
702 | 714 | ||
703 | /* | 715 | /* |
@@ -787,8 +799,9 @@ static void yield_task_fair(struct rq *rq) | |||
787 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | 799 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) |
788 | { | 800 | { |
789 | struct task_struct *curr = rq->curr; | 801 | struct task_struct *curr = rq->curr; |
790 | struct cfs_rq *cfs_rq = task_cfs_rq(curr), *pcfs_rq; | 802 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
791 | struct sched_entity *se = &curr->se, *pse = &p->se; | 803 | struct sched_entity *se = &curr->se, *pse = &p->se; |
804 | s64 delta; | ||
792 | 805 | ||
793 | if (unlikely(rt_prio(p->prio))) { | 806 | if (unlikely(rt_prio(p->prio))) { |
794 | update_rq_clock(rq); | 807 | update_rq_clock(rq); |
@@ -797,21 +810,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
797 | return; | 810 | return; |
798 | } | 811 | } |
799 | 812 | ||
800 | for_each_sched_entity(se) { | 813 | while (!is_same_group(se, pse)) { |
801 | cfs_rq = cfs_rq_of(se); | 814 | se = parent_entity(se); |
802 | pcfs_rq = cfs_rq_of(pse); | 815 | pse = parent_entity(pse); |
816 | } | ||
803 | 817 | ||
804 | if (cfs_rq == pcfs_rq) { | 818 | delta = se->vruntime - pse->vruntime; |
805 | s64 delta = se->vruntime - pse->vruntime; | ||
806 | 819 | ||
807 | if (delta > (s64)sysctl_sched_wakeup_granularity) | 820 | if (delta > (s64)sysctl_sched_wakeup_granularity) |
808 | resched_task(curr); | 821 | resched_task(curr); |
809 | break; | ||
810 | } | ||
811 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
812 | pse = pse->parent; | ||
813 | #endif | ||
814 | } | ||
815 | } | 822 | } |
816 | 823 | ||
817 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 824 | static struct task_struct *pick_next_task_fair(struct rq *rq) |