diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a543a5b202a4..b320753aa6c9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -146,8 +146,20 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | |||
146 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 146 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
147 | { | 147 | { |
148 | if (!cfs_rq->on_list) { | 148 | if (!cfs_rq->on_list) { |
149 | list_add_rcu(&cfs_rq->leaf_cfs_rq_list, | 149 | /* |
150 | * Ensure we either appear before our parent (if already | ||
151 | * enqueued) or force our parent to appear after us when it is | ||
152 | * enqueued. The fact that we always enqueue bottom-up | ||
153 | * reduces this to two cases. | ||
154 | */ | ||
155 | if (cfs_rq->tg->parent && | ||
156 | cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { | ||
157 | list_add_rcu(&cfs_rq->leaf_cfs_rq_list, | ||
158 | &rq_of(cfs_rq)->leaf_cfs_rq_list); | ||
159 | } else { | ||
160 | list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, | ||
150 | &rq_of(cfs_rq)->leaf_cfs_rq_list); | 161 | &rq_of(cfs_rq)->leaf_cfs_rq_list); |
162 | } | ||
151 | 163 | ||
152 | cfs_rq->on_list = 1; | 164 | cfs_rq->on_list = 1; |
153 | } | 165 | } |
@@ -2016,7 +2028,7 @@ out: | |||
2016 | /* | 2028 | /* |
2017 | * update tg->load_weight by folding this cpu's load_avg | 2029 | * update tg->load_weight by folding this cpu's load_avg |
2018 | */ | 2030 | */ |
2019 | static int tg_shares_up(struct task_group *tg, int cpu) | 2031 | static int update_shares_cpu(struct task_group *tg, int cpu) |
2020 | { | 2032 | { |
2021 | struct cfs_rq *cfs_rq; | 2033 | struct cfs_rq *cfs_rq; |
2022 | unsigned long flags; | 2034 | unsigned long flags; |
@@ -2056,14 +2068,8 @@ static void update_shares(int cpu) | |||
2056 | struct rq *rq = cpu_rq(cpu); | 2068 | struct rq *rq = cpu_rq(cpu); |
2057 | 2069 | ||
2058 | rcu_read_lock(); | 2070 | rcu_read_lock(); |
2059 | for_each_leaf_cfs_rq(rq, cfs_rq) { | 2071 | for_each_leaf_cfs_rq(rq, cfs_rq) |
2060 | struct task_group *tg = cfs_rq->tg; | 2072 | update_shares_cpu(cfs_rq->tg, cpu); |
2061 | |||
2062 | do { | ||
2063 | tg_shares_up(tg, cpu); | ||
2064 | tg = tg->parent; | ||
2065 | } while (tg); | ||
2066 | } | ||
2067 | rcu_read_unlock(); | 2073 | rcu_read_unlock(); |
2068 | } | 2074 | } |
2069 | 2075 | ||