aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c43
1 files changed, 25 insertions, 18 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c44a295eee0e..57e7f3672fd7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -652,15 +652,21 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
652#define for_each_leaf_cfs_rq(rq, cfs_rq) \ 652#define for_each_leaf_cfs_rq(rq, cfs_rq) \
653 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) 653 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
654 654
655/* Do the two (enqueued) tasks belong to the same group ? */ 655/* Do the two (enqueued) entities belong to the same group ? */
656static inline int is_same_group(struct task_struct *curr, struct task_struct *p) 656static inline int
657is_same_group(struct sched_entity *se, struct sched_entity *pse)
657{ 658{
658 if (curr->se.cfs_rq == p->se.cfs_rq) 659 if (se->cfs_rq == pse->cfs_rq)
659 return 1; 660 return 1;
660 661
661 return 0; 662 return 0;
662} 663}
663 664
665static inline struct sched_entity *parent_entity(struct sched_entity *se)
666{
667 return se->parent;
668}
669
664#else /* CONFIG_FAIR_GROUP_SCHED */ 670#else /* CONFIG_FAIR_GROUP_SCHED */
665 671
666#define for_each_sched_entity(se) \ 672#define for_each_sched_entity(se) \
@@ -693,11 +699,17 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
693#define for_each_leaf_cfs_rq(rq, cfs_rq) \ 699#define for_each_leaf_cfs_rq(rq, cfs_rq) \
694 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) 700 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
695 701
696static inline int is_same_group(struct task_struct *curr, struct task_struct *p) 702static inline int
703is_same_group(struct sched_entity *se, struct sched_entity *pse)
697{ 704{
698 return 1; 705 return 1;
699} 706}
700 707
708static inline struct sched_entity *parent_entity(struct sched_entity *se)
709{
710 return NULL;
711}
712
701#endif /* CONFIG_FAIR_GROUP_SCHED */ 713#endif /* CONFIG_FAIR_GROUP_SCHED */
702 714
703/* 715/*
@@ -787,8 +799,9 @@ static void yield_task_fair(struct rq *rq)
787static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) 799static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
788{ 800{
789 struct task_struct *curr = rq->curr; 801 struct task_struct *curr = rq->curr;
790 struct cfs_rq *cfs_rq = task_cfs_rq(curr), *pcfs_rq; 802 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
791 struct sched_entity *se = &curr->se, *pse = &p->se; 803 struct sched_entity *se = &curr->se, *pse = &p->se;
804 s64 delta;
792 805
793 if (unlikely(rt_prio(p->prio))) { 806 if (unlikely(rt_prio(p->prio))) {
794 update_rq_clock(rq); 807 update_rq_clock(rq);
@@ -797,21 +810,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
797 return; 810 return;
798 } 811 }
799 812
800 for_each_sched_entity(se) { 813 while (!is_same_group(se, pse)) {
801 cfs_rq = cfs_rq_of(se); 814 se = parent_entity(se);
802 pcfs_rq = cfs_rq_of(pse); 815 pse = parent_entity(pse);
816 }
803 817
804 if (cfs_rq == pcfs_rq) { 818 delta = se->vruntime - pse->vruntime;
805 s64 delta = se->vruntime - pse->vruntime;
806 819
807 if (delta > (s64)sysctl_sched_wakeup_granularity) 820 if (delta > (s64)sysctl_sched_wakeup_granularity)
808 resched_task(curr); 821 resched_task(curr);
809 break;
810 }
811#ifdef CONFIG_FAIR_GROUP_SCHED
812 pse = pse->parent;
813#endif
814 }
815} 822}
816 823
817static struct task_struct *pick_next_task_fair(struct rq *rq) 824static struct task_struct *pick_next_task_fair(struct rq *rq)