diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 72 |
1 files changed, 42 insertions, 30 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c768588e180b..bc8ee9993814 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -135,14 +135,6 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | |||
135 | return grp->my_q; | 135 | return grp->my_q; |
136 | } | 136 | } |
137 | 137 | ||
138 | /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on | ||
139 | * another cpu ('this_cpu') | ||
140 | */ | ||
141 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | ||
142 | { | ||
143 | return cfs_rq->tg->cfs_rq[this_cpu]; | ||
144 | } | ||
145 | |||
146 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 138 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
147 | { | 139 | { |
148 | if (!cfs_rq->on_list) { | 140 | if (!cfs_rq->on_list) { |
@@ -271,11 +263,6 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | |||
271 | return NULL; | 263 | return NULL; |
272 | } | 264 | } |
273 | 265 | ||
274 | static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) | ||
275 | { | ||
276 | return &cpu_rq(this_cpu)->cfs; | ||
277 | } | ||
278 | |||
279 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | 266 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
280 | { | 267 | { |
281 | } | 268 | } |
@@ -334,11 +321,6 @@ static inline int entity_before(struct sched_entity *a, | |||
334 | return (s64)(a->vruntime - b->vruntime) < 0; | 321 | return (s64)(a->vruntime - b->vruntime) < 0; |
335 | } | 322 | } |
336 | 323 | ||
337 | static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
338 | { | ||
339 | return se->vruntime - cfs_rq->min_vruntime; | ||
340 | } | ||
341 | |||
342 | static void update_min_vruntime(struct cfs_rq *cfs_rq) | 324 | static void update_min_vruntime(struct cfs_rq *cfs_rq) |
343 | { | 325 | { |
344 | u64 vruntime = cfs_rq->min_vruntime; | 326 | u64 vruntime = cfs_rq->min_vruntime; |
@@ -372,7 +354,6 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
372 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | 354 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; |
373 | struct rb_node *parent = NULL; | 355 | struct rb_node *parent = NULL; |
374 | struct sched_entity *entry; | 356 | struct sched_entity *entry; |
375 | s64 key = entity_key(cfs_rq, se); | ||
376 | int leftmost = 1; | 357 | int leftmost = 1; |
377 | 358 | ||
378 | /* | 359 | /* |
@@ -385,7 +366,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
385 | * We dont care about collisions. Nodes with | 366 | * We dont care about collisions. Nodes with |
386 | * the same key stay together. | 367 | * the same key stay together. |
387 | */ | 368 | */ |
388 | if (key < entity_key(cfs_rq, entry)) { | 369 | if (entity_before(se, entry)) { |
389 | link = &parent->rb_left; | 370 | link = &parent->rb_left; |
390 | } else { | 371 | } else { |
391 | link = &parent->rb_right; | 372 | link = &parent->rb_right; |
@@ -1336,7 +1317,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
1336 | } | 1317 | } |
1337 | 1318 | ||
1338 | for_each_sched_entity(se) { | 1319 | for_each_sched_entity(se) { |
1339 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 1320 | cfs_rq = cfs_rq_of(se); |
1340 | 1321 | ||
1341 | update_cfs_load(cfs_rq, 0); | 1322 | update_cfs_load(cfs_rq, 0); |
1342 | update_cfs_shares(cfs_rq); | 1323 | update_cfs_shares(cfs_rq); |
@@ -1370,13 +1351,16 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
1370 | */ | 1351 | */ |
1371 | if (task_sleep && parent_entity(se)) | 1352 | if (task_sleep && parent_entity(se)) |
1372 | set_next_buddy(parent_entity(se)); | 1353 | set_next_buddy(parent_entity(se)); |
1354 | |||
1355 | /* avoid re-evaluating load for this entity */ | ||
1356 | se = parent_entity(se); | ||
1373 | break; | 1357 | break; |
1374 | } | 1358 | } |
1375 | flags |= DEQUEUE_SLEEP; | 1359 | flags |= DEQUEUE_SLEEP; |
1376 | } | 1360 | } |
1377 | 1361 | ||
1378 | for_each_sched_entity(se) { | 1362 | for_each_sched_entity(se) { |
1379 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 1363 | cfs_rq = cfs_rq_of(se); |
1380 | 1364 | ||
1381 | update_cfs_load(cfs_rq, 0); | 1365 | update_cfs_load(cfs_rq, 0); |
1382 | update_cfs_shares(cfs_rq); | 1366 | update_cfs_shares(cfs_rq); |
@@ -1481,7 +1465,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1481 | * effect of the currently running task from the load | 1465 | * effect of the currently running task from the load |
1482 | * of the current CPU: | 1466 | * of the current CPU: |
1483 | */ | 1467 | */ |
1484 | rcu_read_lock(); | ||
1485 | if (sync) { | 1468 | if (sync) { |
1486 | tg = task_group(current); | 1469 | tg = task_group(current); |
1487 | weight = current->se.load.weight; | 1470 | weight = current->se.load.weight; |
@@ -1517,7 +1500,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1517 | balanced = this_eff_load <= prev_eff_load; | 1500 | balanced = this_eff_load <= prev_eff_load; |
1518 | } else | 1501 | } else |
1519 | balanced = true; | 1502 | balanced = true; |
1520 | rcu_read_unlock(); | ||
1521 | 1503 | ||
1522 | /* | 1504 | /* |
1523 | * If the currently running task will sleep within | 1505 | * If the currently running task will sleep within |
@@ -1921,8 +1903,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1921 | if (!sched_feat(WAKEUP_PREEMPT)) | 1903 | if (!sched_feat(WAKEUP_PREEMPT)) |
1922 | return; | 1904 | return; |
1923 | 1905 | ||
1924 | update_curr(cfs_rq); | ||
1925 | find_matching_se(&se, &pse); | 1906 | find_matching_se(&se, &pse); |
1907 | update_curr(cfs_rq_of(se)); | ||
1926 | BUG_ON(!pse); | 1908 | BUG_ON(!pse); |
1927 | if (wakeup_preempt_entity(se, pse) == 1) { | 1909 | if (wakeup_preempt_entity(se, pse) == 1) { |
1928 | /* | 1910 | /* |
@@ -2231,11 +2213,43 @@ static void update_shares(int cpu) | |||
2231 | struct rq *rq = cpu_rq(cpu); | 2213 | struct rq *rq = cpu_rq(cpu); |
2232 | 2214 | ||
2233 | rcu_read_lock(); | 2215 | rcu_read_lock(); |
2216 | /* | ||
2217 | * Iterates the task_group tree in a bottom up fashion, see | ||
2218 | * list_add_leaf_cfs_rq() for details. | ||
2219 | */ | ||
2234 | for_each_leaf_cfs_rq(rq, cfs_rq) | 2220 | for_each_leaf_cfs_rq(rq, cfs_rq) |
2235 | update_shares_cpu(cfs_rq->tg, cpu); | 2221 | update_shares_cpu(cfs_rq->tg, cpu); |
2236 | rcu_read_unlock(); | 2222 | rcu_read_unlock(); |
2237 | } | 2223 | } |
2238 | 2224 | ||
2225 | /* | ||
2226 | * Compute the cpu's hierarchical load factor for each task group. | ||
2227 | * This needs to be done in a top-down fashion because the load of a child | ||
2228 | * group is a fraction of its parents load. | ||
2229 | */ | ||
2230 | static int tg_load_down(struct task_group *tg, void *data) | ||
2231 | { | ||
2232 | unsigned long load; | ||
2233 | long cpu = (long)data; | ||
2234 | |||
2235 | if (!tg->parent) { | ||
2236 | load = cpu_rq(cpu)->load.weight; | ||
2237 | } else { | ||
2238 | load = tg->parent->cfs_rq[cpu]->h_load; | ||
2239 | load *= tg->se[cpu]->load.weight; | ||
2240 | load /= tg->parent->cfs_rq[cpu]->load.weight + 1; | ||
2241 | } | ||
2242 | |||
2243 | tg->cfs_rq[cpu]->h_load = load; | ||
2244 | |||
2245 | return 0; | ||
2246 | } | ||
2247 | |||
2248 | static void update_h_load(long cpu) | ||
2249 | { | ||
2250 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | ||
2251 | } | ||
2252 | |||
2239 | static unsigned long | 2253 | static unsigned long |
2240 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2254 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2241 | unsigned long max_load_move, | 2255 | unsigned long max_load_move, |
@@ -2243,14 +2257,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
2243 | int *all_pinned) | 2257 | int *all_pinned) |
2244 | { | 2258 | { |
2245 | long rem_load_move = max_load_move; | 2259 | long rem_load_move = max_load_move; |
2246 | int busiest_cpu = cpu_of(busiest); | 2260 | struct cfs_rq *busiest_cfs_rq; |
2247 | struct task_group *tg; | ||
2248 | 2261 | ||
2249 | rcu_read_lock(); | 2262 | rcu_read_lock(); |
2250 | update_h_load(busiest_cpu); | 2263 | update_h_load(cpu_of(busiest)); |
2251 | 2264 | ||
2252 | list_for_each_entry_rcu(tg, &task_groups, list) { | 2265 | for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) { |
2253 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | ||
2254 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; | 2266 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; |
2255 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; | 2267 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; |
2256 | u64 rem_load, moved_load; | 2268 | u64 rem_load, moved_load; |