diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-11-15 11:14:39 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-11-17 06:20:22 -0500 |
commit | 391e43da797a96aeb65410281891f6d0b0e9611c (patch) | |
tree | 0ce6784525a5a8f75b377170cf1a7d60abccea29 /kernel/sched_fair.c | |
parent | 029632fbb7b7c9d85063cc9eb470de6c54873df3 (diff) |
sched: Move all scheduler bits into kernel/sched/
There's too many sched*.[ch] files in kernel/, give them their own
directory.
(No code changed, other than Makefile glue added.)
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 5601 |
1 files changed, 0 insertions, 5601 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c deleted file mode 100644 index cd3b64219d9f..000000000000 --- a/kernel/sched_fair.c +++ /dev/null | |||
@@ -1,5601 +0,0 @@ | |||
1 | /* | ||
2 | * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) | ||
3 | * | ||
4 | * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
5 | * | ||
6 | * Interactivity improvements by Mike Galbraith | ||
7 | * (C) 2007 Mike Galbraith <efault@gmx.de> | ||
8 | * | ||
9 | * Various enhancements by Dmitry Adamushko. | ||
10 | * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> | ||
11 | * | ||
12 | * Group scheduling enhancements by Srivatsa Vaddagiri | ||
13 | * Copyright IBM Corporation, 2007 | ||
14 | * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | ||
15 | * | ||
16 | * Scaled math optimizations by Thomas Gleixner | ||
17 | * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> | ||
18 | * | ||
19 | * Adaptive scheduling granularity, math enhancements by Peter Zijlstra | ||
20 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
21 | */ | ||
22 | |||
23 | #include <linux/latencytop.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/cpumask.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/profile.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | |||
30 | #include <trace/events/sched.h> | ||
31 | |||
32 | #include "sched.h" | ||
33 | |||
34 | /* | ||
35 | * Targeted preemption latency for CPU-bound tasks: | ||
36 | * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) | ||
37 | * | ||
38 | * NOTE: this latency value is not the same as the concept of | ||
39 | * 'timeslice length' - timeslices in CFS are of variable length | ||
40 | * and have no persistent notion like in traditional, time-slice | ||
41 | * based scheduling concepts. | ||
42 | * | ||
43 | * (to see the precise effective timeslice length of your workload, | ||
44 | * run vmstat and monitor the context-switches (cs) field) | ||
45 | */ | ||
46 | unsigned int sysctl_sched_latency = 6000000ULL; | ||
47 | unsigned int normalized_sysctl_sched_latency = 6000000ULL; | ||
48 | |||
49 | /* | ||
50 | * The initial- and re-scaling of tunables is configurable | ||
51 | * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) | ||
52 | * | ||
53 | * Options are: | ||
54 | * SCHED_TUNABLESCALING_NONE - unscaled, always *1 | ||
55 | * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) | ||
56 | * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus | ||
57 | */ | ||
58 | enum sched_tunable_scaling sysctl_sched_tunable_scaling | ||
59 | = SCHED_TUNABLESCALING_LOG; | ||
60 | |||
61 | /* | ||
62 | * Minimal preemption granularity for CPU-bound tasks: | ||
63 | * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) | ||
64 | */ | ||
65 | unsigned int sysctl_sched_min_granularity = 750000ULL; | ||
66 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; | ||
67 | |||
68 | /* | ||
69 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | ||
70 | */ | ||
71 | static unsigned int sched_nr_latency = 8; | ||
72 | |||
73 | /* | ||
74 | * After fork, child runs first. If set to 0 (default) then | ||
75 | * parent will (try to) run first. | ||
76 | */ | ||
77 | unsigned int sysctl_sched_child_runs_first __read_mostly; | ||
78 | |||
79 | /* | ||
80 | * SCHED_OTHER wake-up granularity. | ||
81 | * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) | ||
82 | * | ||
83 | * This option delays the preemption effects of decoupled workloads | ||
84 | * and reduces their over-scheduling. Synchronous workloads will still | ||
85 | * have immediate wakeup/sleep latencies. | ||
86 | */ | ||
87 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; | ||
88 | unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; | ||
89 | |||
90 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | ||
91 | |||
92 | /* | ||
93 | * The exponential sliding window over which load is averaged for shares | ||
94 | * distribution. | ||
95 | * (default: 10msec) | ||
96 | */ | ||
97 | unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; | ||
98 | |||
99 | #ifdef CONFIG_CFS_BANDWIDTH | ||
100 | /* | ||
101 | * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool | ||
102 | * each time a cfs_rq requests quota. | ||
103 | * | ||
104 | * Note: in the case that the slice exceeds the runtime remaining (either due | ||
105 | * to consumption or the quota being specified to be smaller than the slice) | ||
106 | * we will always only issue the remaining available time. | ||
107 | * | ||
108 | * default: 5 msec, units: microseconds | ||
109 | */ | ||
110 | unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * Increase the granularity value when there are more CPUs, | ||
115 | * because with more CPUs the 'effective latency' as visible | ||
116 | * to users decreases. But the relationship is not linear, | ||
117 | * so pick a second-best guess by going with the log2 of the | ||
118 | * number of CPUs. | ||
119 | * | ||
120 | * This idea comes from the SD scheduler of Con Kolivas: | ||
121 | */ | ||
122 | static int get_update_sysctl_factor(void) | ||
123 | { | ||
124 | unsigned int cpus = min_t(int, num_online_cpus(), 8); | ||
125 | unsigned int factor; | ||
126 | |||
127 | switch (sysctl_sched_tunable_scaling) { | ||
128 | case SCHED_TUNABLESCALING_NONE: | ||
129 | factor = 1; | ||
130 | break; | ||
131 | case SCHED_TUNABLESCALING_LINEAR: | ||
132 | factor = cpus; | ||
133 | break; | ||
134 | case SCHED_TUNABLESCALING_LOG: | ||
135 | default: | ||
136 | factor = 1 + ilog2(cpus); | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | return factor; | ||
141 | } | ||
142 | |||
143 | static void update_sysctl(void) | ||
144 | { | ||
145 | unsigned int factor = get_update_sysctl_factor(); | ||
146 | |||
147 | #define SET_SYSCTL(name) \ | ||
148 | (sysctl_##name = (factor) * normalized_sysctl_##name) | ||
149 | SET_SYSCTL(sched_min_granularity); | ||
150 | SET_SYSCTL(sched_latency); | ||
151 | SET_SYSCTL(sched_wakeup_granularity); | ||
152 | #undef SET_SYSCTL | ||
153 | } | ||
154 | |||
155 | void sched_init_granularity(void) | ||
156 | { | ||
157 | update_sysctl(); | ||
158 | } | ||
159 | |||
160 | #if BITS_PER_LONG == 32 | ||
161 | # define WMULT_CONST (~0UL) | ||
162 | #else | ||
163 | # define WMULT_CONST (1UL << 32) | ||
164 | #endif | ||
165 | |||
166 | #define WMULT_SHIFT 32 | ||
167 | |||
168 | /* | ||
169 | * Shift right and round: | ||
170 | */ | ||
171 | #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) | ||
172 | |||
173 | /* | ||
174 | * delta *= weight / lw | ||
175 | */ | ||
176 | static unsigned long | ||
177 | calc_delta_mine(unsigned long delta_exec, unsigned long weight, | ||
178 | struct load_weight *lw) | ||
179 | { | ||
180 | u64 tmp; | ||
181 | |||
182 | /* | ||
183 | * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched | ||
184 | * entities since MIN_SHARES = 2. Treat weight as 1 if less than | ||
185 | * 2^SCHED_LOAD_RESOLUTION. | ||
186 | */ | ||
187 | if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) | ||
188 | tmp = (u64)delta_exec * scale_load_down(weight); | ||
189 | else | ||
190 | tmp = (u64)delta_exec; | ||
191 | |||
192 | if (!lw->inv_weight) { | ||
193 | unsigned long w = scale_load_down(lw->weight); | ||
194 | |||
195 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | ||
196 | lw->inv_weight = 1; | ||
197 | else if (unlikely(!w)) | ||
198 | lw->inv_weight = WMULT_CONST; | ||
199 | else | ||
200 | lw->inv_weight = WMULT_CONST / w; | ||
201 | } | ||
202 | |||
203 | /* | ||
204 | * Check whether we'd overflow the 64-bit multiplication: | ||
205 | */ | ||
206 | if (unlikely(tmp > WMULT_CONST)) | ||
207 | tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight, | ||
208 | WMULT_SHIFT/2); | ||
209 | else | ||
210 | tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT); | ||
211 | |||
212 | return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); | ||
213 | } | ||
214 | |||
215 | |||
216 | const struct sched_class fair_sched_class; | ||
217 | |||
218 | /************************************************************** | ||
219 | * CFS operations on generic schedulable entities: | ||
220 | */ | ||
221 | |||
222 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
223 | |||
224 | /* cpu runqueue to which this cfs_rq is attached */ | ||
225 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | ||
226 | { | ||
227 | return cfs_rq->rq; | ||
228 | } | ||
229 | |||
230 | /* An entity is a task if it doesn't "own" a runqueue */ | ||
231 | #define entity_is_task(se) (!se->my_q) | ||
232 | |||
233 | static inline struct task_struct *task_of(struct sched_entity *se) | ||
234 | { | ||
235 | #ifdef CONFIG_SCHED_DEBUG | ||
236 | WARN_ON_ONCE(!entity_is_task(se)); | ||
237 | #endif | ||
238 | return container_of(se, struct task_struct, se); | ||
239 | } | ||
240 | |||
241 | /* Walk up scheduling entities hierarchy */ | ||
242 | #define for_each_sched_entity(se) \ | ||
243 | for (; se; se = se->parent) | ||
244 | |||
245 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | ||
246 | { | ||
247 | return p->se.cfs_rq; | ||
248 | } | ||
249 | |||
250 | /* runqueue on which this entity is (to be) queued */ | ||
251 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | ||
252 | { | ||
253 | return se->cfs_rq; | ||
254 | } | ||
255 | |||
256 | /* runqueue "owned" by this group */ | ||
257 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | ||
258 | { | ||
259 | return grp->my_q; | ||
260 | } | ||
261 | |||
262 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | ||
263 | { | ||
264 | if (!cfs_rq->on_list) { | ||
265 | /* | ||
266 | * Ensure we either appear before our parent (if already | ||
267 | * enqueued) or force our parent to appear after us when it is | ||
268 | * enqueued. The fact that we always enqueue bottom-up | ||
269 | * reduces this to two cases. | ||
270 | */ | ||
271 | if (cfs_rq->tg->parent && | ||
272 | cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) { | ||
273 | list_add_rcu(&cfs_rq->leaf_cfs_rq_list, | ||
274 | &rq_of(cfs_rq)->leaf_cfs_rq_list); | ||
275 | } else { | ||
276 | list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, | ||
277 | &rq_of(cfs_rq)->leaf_cfs_rq_list); | ||
278 | } | ||
279 | |||
280 | cfs_rq->on_list = 1; | ||
281 | } | ||
282 | } | ||
283 | |||
284 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | ||
285 | { | ||
286 | if (cfs_rq->on_list) { | ||
287 | list_del_rcu(&cfs_rq->leaf_cfs_rq_list); | ||
288 | cfs_rq->on_list = 0; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ | ||
293 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | ||
294 | list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) | ||
295 | |||
296 | /* Do the two (enqueued) entities belong to the same group ? */ | ||
297 | static inline int | ||
298 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | ||
299 | { | ||
300 | if (se->cfs_rq == pse->cfs_rq) | ||
301 | return 1; | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | ||
307 | { | ||
308 | return se->parent; | ||
309 | } | ||
310 | |||
311 | /* return depth at which a sched entity is present in the hierarchy */ | ||
312 | static inline int depth_se(struct sched_entity *se) | ||
313 | { | ||
314 | int depth = 0; | ||
315 | |||
316 | for_each_sched_entity(se) | ||
317 | depth++; | ||
318 | |||
319 | return depth; | ||
320 | } | ||
321 | |||
322 | static void | ||
323 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | ||
324 | { | ||
325 | int se_depth, pse_depth; | ||
326 | |||
327 | /* | ||
328 | * preemption test can be made between sibling entities who are in the | ||
329 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | ||
330 | * both tasks until we find their ancestors who are siblings of common | ||
331 | * parent. | ||
332 | */ | ||
333 | |||
334 | /* First walk up until both entities are at same depth */ | ||
335 | se_depth = depth_se(*se); | ||
336 | pse_depth = depth_se(*pse); | ||
337 | |||
338 | while (se_depth > pse_depth) { | ||
339 | se_depth--; | ||
340 | *se = parent_entity(*se); | ||
341 | } | ||
342 | |||
343 | while (pse_depth > se_depth) { | ||
344 | pse_depth--; | ||
345 | *pse = parent_entity(*pse); | ||
346 | } | ||
347 | |||
348 | while (!is_same_group(*se, *pse)) { | ||
349 | *se = parent_entity(*se); | ||
350 | *pse = parent_entity(*pse); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | #else /* !CONFIG_FAIR_GROUP_SCHED */ | ||
355 | |||
356 | static inline struct task_struct *task_of(struct sched_entity *se) | ||
357 | { | ||
358 | return container_of(se, struct task_struct, se); | ||
359 | } | ||
360 | |||
361 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | ||
362 | { | ||
363 | return container_of(cfs_rq, struct rq, cfs); | ||
364 | } | ||
365 | |||
366 | #define entity_is_task(se) 1 | ||
367 | |||
368 | #define for_each_sched_entity(se) \ | ||
369 | for (; se; se = NULL) | ||
370 | |||
371 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) | ||
372 | { | ||
373 | return &task_rq(p)->cfs; | ||
374 | } | ||
375 | |||
376 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) | ||
377 | { | ||
378 | struct task_struct *p = task_of(se); | ||
379 | struct rq *rq = task_rq(p); | ||
380 | |||
381 | return &rq->cfs; | ||
382 | } | ||
383 | |||
384 | /* runqueue "owned" by this group */ | ||
385 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) | ||
386 | { | ||
387 | return NULL; | ||
388 | } | ||
389 | |||
390 | static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) | ||
391 | { | ||
392 | } | ||
393 | |||
394 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) | ||
395 | { | ||
396 | } | ||
397 | |||
398 | #define for_each_leaf_cfs_rq(rq, cfs_rq) \ | ||
399 | for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) | ||
400 | |||
401 | static inline int | ||
402 | is_same_group(struct sched_entity *se, struct sched_entity *pse) | ||
403 | { | ||
404 | return 1; | ||
405 | } | ||
406 | |||
407 | static inline struct sched_entity *parent_entity(struct sched_entity *se) | ||
408 | { | ||
409 | return NULL; | ||
410 | } | ||
411 | |||
412 | static inline void | ||
413 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | ||
414 | { | ||
415 | } | ||
416 | |||
417 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
418 | |||
419 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | ||
420 | unsigned long delta_exec); | ||
421 | |||
422 | /************************************************************** | ||
423 | * Scheduling class tree data structure manipulation methods: | ||
424 | */ | ||
425 | |||
426 | static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime) | ||
427 | { | ||
428 | s64 delta = (s64)(vruntime - min_vruntime); | ||
429 | if (delta > 0) | ||
430 | min_vruntime = vruntime; | ||
431 | |||
432 | return min_vruntime; | ||
433 | } | ||
434 | |||
435 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) | ||
436 | { | ||
437 | s64 delta = (s64)(vruntime - min_vruntime); | ||
438 | if (delta < 0) | ||
439 | min_vruntime = vruntime; | ||
440 | |||
441 | return min_vruntime; | ||
442 | } | ||
443 | |||
444 | static inline int entity_before(struct sched_entity *a, | ||
445 | struct sched_entity *b) | ||
446 | { | ||
447 | return (s64)(a->vruntime - b->vruntime) < 0; | ||
448 | } | ||
449 | |||
450 | static void update_min_vruntime(struct cfs_rq *cfs_rq) | ||
451 | { | ||
452 | u64 vruntime = cfs_rq->min_vruntime; | ||
453 | |||
454 | if (cfs_rq->curr) | ||
455 | vruntime = cfs_rq->curr->vruntime; | ||
456 | |||
457 | if (cfs_rq->rb_leftmost) { | ||
458 | struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, | ||
459 | struct sched_entity, | ||
460 | run_node); | ||
461 | |||
462 | if (!cfs_rq->curr) | ||
463 | vruntime = se->vruntime; | ||
464 | else | ||
465 | vruntime = min_vruntime(vruntime, se->vruntime); | ||
466 | } | ||
467 | |||
468 | cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); | ||
469 | #ifndef CONFIG_64BIT | ||
470 | smp_wmb(); | ||
471 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | ||
472 | #endif | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * Enqueue an entity into the rb-tree: | ||
477 | */ | ||
478 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
479 | { | ||
480 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | ||
481 | struct rb_node *parent = NULL; | ||
482 | struct sched_entity *entry; | ||
483 | int leftmost = 1; | ||
484 | |||
485 | /* | ||
486 | * Find the right place in the rbtree: | ||
487 | */ | ||
488 | while (*link) { | ||
489 | parent = *link; | ||
490 | entry = rb_entry(parent, struct sched_entity, run_node); | ||
491 | /* | ||
492 | * We dont care about collisions. Nodes with | ||
493 | * the same key stay together. | ||
494 | */ | ||
495 | if (entity_before(se, entry)) { | ||
496 | link = &parent->rb_left; | ||
497 | } else { | ||
498 | link = &parent->rb_right; | ||
499 | leftmost = 0; | ||
500 | } | ||
501 | } | ||
502 | |||
503 | /* | ||
504 | * Maintain a cache of leftmost tree entries (it is frequently | ||
505 | * used): | ||
506 | */ | ||
507 | if (leftmost) | ||
508 | cfs_rq->rb_leftmost = &se->run_node; | ||
509 | |||
510 | rb_link_node(&se->run_node, parent, link); | ||
511 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | ||
512 | } | ||
513 | |||
514 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
515 | { | ||
516 | if (cfs_rq->rb_leftmost == &se->run_node) { | ||
517 | struct rb_node *next_node; | ||
518 | |||
519 | next_node = rb_next(&se->run_node); | ||
520 | cfs_rq->rb_leftmost = next_node; | ||
521 | } | ||
522 | |||
523 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | ||
524 | } | ||
525 | |||
526 | struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) | ||
527 | { | ||
528 | struct rb_node *left = cfs_rq->rb_leftmost; | ||
529 | |||
530 | if (!left) | ||
531 | return NULL; | ||
532 | |||
533 | return rb_entry(left, struct sched_entity, run_node); | ||
534 | } | ||
535 | |||
536 | static struct sched_entity *__pick_next_entity(struct sched_entity *se) | ||
537 | { | ||
538 | struct rb_node *next = rb_next(&se->run_node); | ||
539 | |||
540 | if (!next) | ||
541 | return NULL; | ||
542 | |||
543 | return rb_entry(next, struct sched_entity, run_node); | ||
544 | } | ||
545 | |||
546 | #ifdef CONFIG_SCHED_DEBUG | ||
547 | struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | ||
548 | { | ||
549 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); | ||
550 | |||
551 | if (!last) | ||
552 | return NULL; | ||
553 | |||
554 | return rb_entry(last, struct sched_entity, run_node); | ||
555 | } | ||
556 | |||
557 | /************************************************************** | ||
558 | * Scheduling class statistics methods: | ||
559 | */ | ||
560 | |||
561 | int sched_proc_update_handler(struct ctl_table *table, int write, | ||
562 | void __user *buffer, size_t *lenp, | ||
563 | loff_t *ppos) | ||
564 | { | ||
565 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
566 | int factor = get_update_sysctl_factor(); | ||
567 | |||
568 | if (ret || !write) | ||
569 | return ret; | ||
570 | |||
571 | sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, | ||
572 | sysctl_sched_min_granularity); | ||
573 | |||
574 | #define WRT_SYSCTL(name) \ | ||
575 | (normalized_sysctl_##name = sysctl_##name / (factor)) | ||
576 | WRT_SYSCTL(sched_min_granularity); | ||
577 | WRT_SYSCTL(sched_latency); | ||
578 | WRT_SYSCTL(sched_wakeup_granularity); | ||
579 | #undef WRT_SYSCTL | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | #endif | ||
584 | |||
585 | /* | ||
586 | * delta /= w | ||
587 | */ | ||
588 | static inline unsigned long | ||
589 | calc_delta_fair(unsigned long delta, struct sched_entity *se) | ||
590 | { | ||
591 | if (unlikely(se->load.weight != NICE_0_LOAD)) | ||
592 | delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load); | ||
593 | |||
594 | return delta; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * The idea is to set a period in which each task runs once. | ||
599 | * | ||
600 | * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | ||
601 | * this period because otherwise the slices get too small. | ||
602 | * | ||
603 | * p = (nr <= nl) ? l : l*nr/nl | ||
604 | */ | ||
605 | static u64 __sched_period(unsigned long nr_running) | ||
606 | { | ||
607 | u64 period = sysctl_sched_latency; | ||
608 | unsigned long nr_latency = sched_nr_latency; | ||
609 | |||
610 | if (unlikely(nr_running > nr_latency)) { | ||
611 | period = sysctl_sched_min_granularity; | ||
612 | period *= nr_running; | ||
613 | } | ||
614 | |||
615 | return period; | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * We calculate the wall-time slice from the period by taking a part | ||
620 | * proportional to the weight. | ||
621 | * | ||
622 | * s = p*P[w/rw] | ||
623 | */ | ||
624 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
625 | { | ||
626 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); | ||
627 | |||
628 | for_each_sched_entity(se) { | ||
629 | struct load_weight *load; | ||
630 | struct load_weight lw; | ||
631 | |||
632 | cfs_rq = cfs_rq_of(se); | ||
633 | load = &cfs_rq->load; | ||
634 | |||
635 | if (unlikely(!se->on_rq)) { | ||
636 | lw = cfs_rq->load; | ||
637 | |||
638 | update_load_add(&lw, se->load.weight); | ||
639 | load = &lw; | ||
640 | } | ||
641 | slice = calc_delta_mine(slice, se->load.weight, load); | ||
642 | } | ||
643 | return slice; | ||
644 | } | ||
645 | |||
646 | /* | ||
647 | * We calculate the vruntime slice of a to be inserted task | ||
648 | * | ||
649 | * vs = s/w | ||
650 | */ | ||
651 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
652 | { | ||
653 | return calc_delta_fair(sched_slice(cfs_rq, se), se); | ||
654 | } | ||
655 | |||
656 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update); | ||
657 | static void update_cfs_shares(struct cfs_rq *cfs_rq); | ||
658 | |||
659 | /* | ||
660 | * Update the current task's runtime statistics. Skip current tasks that | ||
661 | * are not in our scheduling class. | ||
662 | */ | ||
663 | static inline void | ||
664 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | ||
665 | unsigned long delta_exec) | ||
666 | { | ||
667 | unsigned long delta_exec_weighted; | ||
668 | |||
669 | schedstat_set(curr->statistics.exec_max, | ||
670 | max((u64)delta_exec, curr->statistics.exec_max)); | ||
671 | |||
672 | curr->sum_exec_runtime += delta_exec; | ||
673 | schedstat_add(cfs_rq, exec_clock, delta_exec); | ||
674 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | ||
675 | |||
676 | curr->vruntime += delta_exec_weighted; | ||
677 | update_min_vruntime(cfs_rq); | ||
678 | |||
679 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | ||
680 | cfs_rq->load_unacc_exec_time += delta_exec; | ||
681 | #endif | ||
682 | } | ||
683 | |||
684 | static void update_curr(struct cfs_rq *cfs_rq) | ||
685 | { | ||
686 | struct sched_entity *curr = cfs_rq->curr; | ||
687 | u64 now = rq_of(cfs_rq)->clock_task; | ||
688 | unsigned long delta_exec; | ||
689 | |||
690 | if (unlikely(!curr)) | ||
691 | return; | ||
692 | |||
693 | /* | ||
694 | * Get the amount of time the current task was running | ||
695 | * since the last time we changed load (this cannot | ||
696 | * overflow on 32 bits): | ||
697 | */ | ||
698 | delta_exec = (unsigned long)(now - curr->exec_start); | ||
699 | if (!delta_exec) | ||
700 | return; | ||
701 | |||
702 | __update_curr(cfs_rq, curr, delta_exec); | ||
703 | curr->exec_start = now; | ||
704 | |||
705 | if (entity_is_task(curr)) { | ||
706 | struct task_struct *curtask = task_of(curr); | ||
707 | |||
708 | trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); | ||
709 | cpuacct_charge(curtask, delta_exec); | ||
710 | account_group_exec_runtime(curtask, delta_exec); | ||
711 | } | ||
712 | |||
713 | account_cfs_rq_runtime(cfs_rq, delta_exec); | ||
714 | } | ||
715 | |||
716 | static inline void | ||
717 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
718 | { | ||
719 | schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock); | ||
720 | } | ||
721 | |||
722 | /* | ||
723 | * Task is being enqueued - update stats: | ||
724 | */ | ||
725 | static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
726 | { | ||
727 | /* | ||
728 | * Are we enqueueing a waiting task? (for current tasks | ||
729 | * a dequeue/enqueue event is a NOP) | ||
730 | */ | ||
731 | if (se != cfs_rq->curr) | ||
732 | update_stats_wait_start(cfs_rq, se); | ||
733 | } | ||
734 | |||
735 | static void | ||
736 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
737 | { | ||
738 | schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max, | ||
739 | rq_of(cfs_rq)->clock - se->statistics.wait_start)); | ||
740 | schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1); | ||
741 | schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum + | ||
742 | rq_of(cfs_rq)->clock - se->statistics.wait_start); | ||
743 | #ifdef CONFIG_SCHEDSTATS | ||
744 | if (entity_is_task(se)) { | ||
745 | trace_sched_stat_wait(task_of(se), | ||
746 | rq_of(cfs_rq)->clock - se->statistics.wait_start); | ||
747 | } | ||
748 | #endif | ||
749 | schedstat_set(se->statistics.wait_start, 0); | ||
750 | } | ||
751 | |||
752 | static inline void | ||
753 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
754 | { | ||
755 | /* | ||
756 | * Mark the end of the wait period if dequeueing a | ||
757 | * waiting task: | ||
758 | */ | ||
759 | if (se != cfs_rq->curr) | ||
760 | update_stats_wait_end(cfs_rq, se); | ||
761 | } | ||
762 | |||
763 | /* | ||
764 | * We are picking a new current task - update its stats: | ||
765 | */ | ||
766 | static inline void | ||
767 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
768 | { | ||
769 | /* | ||
770 | * We are starting a new run period: | ||
771 | */ | ||
772 | se->exec_start = rq_of(cfs_rq)->clock_task; | ||
773 | } | ||
774 | |||
775 | /************************************************** | ||
776 | * Scheduling class queueing methods: | ||
777 | */ | ||
778 | |||
779 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | ||
780 | static void | ||
781 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | ||
782 | { | ||
783 | cfs_rq->task_weight += weight; | ||
784 | } | ||
785 | #else | ||
786 | static inline void | ||
787 | add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) | ||
788 | { | ||
789 | } | ||
790 | #endif | ||
791 | |||
792 | static void | ||
793 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
794 | { | ||
795 | update_load_add(&cfs_rq->load, se->load.weight); | ||
796 | if (!parent_entity(se)) | ||
797 | update_load_add(&rq_of(cfs_rq)->load, se->load.weight); | ||
798 | if (entity_is_task(se)) { | ||
799 | add_cfs_task_weight(cfs_rq, se->load.weight); | ||
800 | list_add(&se->group_node, &cfs_rq->tasks); | ||
801 | } | ||
802 | cfs_rq->nr_running++; | ||
803 | } | ||
804 | |||
805 | static void | ||
806 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
807 | { | ||
808 | update_load_sub(&cfs_rq->load, se->load.weight); | ||
809 | if (!parent_entity(se)) | ||
810 | update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); | ||
811 | if (entity_is_task(se)) { | ||
812 | add_cfs_task_weight(cfs_rq, -se->load.weight); | ||
813 | list_del_init(&se->group_node); | ||
814 | } | ||
815 | cfs_rq->nr_running--; | ||
816 | } | ||
817 | |||
818 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
819 | /* we need this in update_cfs_load and load-balance functions below */ | ||
820 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); | ||
821 | # ifdef CONFIG_SMP | ||
822 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, | ||
823 | int global_update) | ||
824 | { | ||
825 | struct task_group *tg = cfs_rq->tg; | ||
826 | long load_avg; | ||
827 | |||
828 | load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); | ||
829 | load_avg -= cfs_rq->load_contribution; | ||
830 | |||
831 | if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) { | ||
832 | atomic_add(load_avg, &tg->load_weight); | ||
833 | cfs_rq->load_contribution += load_avg; | ||
834 | } | ||
835 | } | ||
836 | |||
837 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | ||
838 | { | ||
839 | u64 period = sysctl_sched_shares_window; | ||
840 | u64 now, delta; | ||
841 | unsigned long load = cfs_rq->load.weight; | ||
842 | |||
843 | if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq)) | ||
844 | return; | ||
845 | |||
846 | now = rq_of(cfs_rq)->clock_task; | ||
847 | delta = now - cfs_rq->load_stamp; | ||
848 | |||
849 | /* truncate load history at 4 idle periods */ | ||
850 | if (cfs_rq->load_stamp > cfs_rq->load_last && | ||
851 | now - cfs_rq->load_last > 4 * period) { | ||
852 | cfs_rq->load_period = 0; | ||
853 | cfs_rq->load_avg = 0; | ||
854 | delta = period - 1; | ||
855 | } | ||
856 | |||
857 | cfs_rq->load_stamp = now; | ||
858 | cfs_rq->load_unacc_exec_time = 0; | ||
859 | cfs_rq->load_period += delta; | ||
860 | if (load) { | ||
861 | cfs_rq->load_last = now; | ||
862 | cfs_rq->load_avg += delta * load; | ||
863 | } | ||
864 | |||
865 | /* consider updating load contribution on each fold or truncate */ | ||
866 | if (global_update || cfs_rq->load_period > period | ||
867 | || !cfs_rq->load_period) | ||
868 | update_cfs_rq_load_contribution(cfs_rq, global_update); | ||
869 | |||
870 | while (cfs_rq->load_period > period) { | ||
871 | /* | ||
872 | * Inline assembly required to prevent the compiler | ||
873 | * optimising this loop into a divmod call. | ||
874 | * See __iter_div_u64_rem() for another example of this. | ||
875 | */ | ||
876 | asm("" : "+rm" (cfs_rq->load_period)); | ||
877 | cfs_rq->load_period /= 2; | ||
878 | cfs_rq->load_avg /= 2; | ||
879 | } | ||
880 | |||
881 | if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg) | ||
882 | list_del_leaf_cfs_rq(cfs_rq); | ||
883 | } | ||
884 | |||
885 | static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) | ||
886 | { | ||
887 | long tg_weight; | ||
888 | |||
889 | /* | ||
890 | * Use this CPU's actual weight instead of the last load_contribution | ||
891 | * to gain a more accurate current total weight. See | ||
892 | * update_cfs_rq_load_contribution(). | ||
893 | */ | ||
894 | tg_weight = atomic_read(&tg->load_weight); | ||
895 | tg_weight -= cfs_rq->load_contribution; | ||
896 | tg_weight += cfs_rq->load.weight; | ||
897 | |||
898 | return tg_weight; | ||
899 | } | ||
900 | |||
901 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) | ||
902 | { | ||
903 | long tg_weight, load, shares; | ||
904 | |||
905 | tg_weight = calc_tg_weight(tg, cfs_rq); | ||
906 | load = cfs_rq->load.weight; | ||
907 | |||
908 | shares = (tg->shares * load); | ||
909 | if (tg_weight) | ||
910 | shares /= tg_weight; | ||
911 | |||
912 | if (shares < MIN_SHARES) | ||
913 | shares = MIN_SHARES; | ||
914 | if (shares > tg->shares) | ||
915 | shares = tg->shares; | ||
916 | |||
917 | return shares; | ||
918 | } | ||
919 | |||
920 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
921 | { | ||
922 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
923 | update_cfs_load(cfs_rq, 0); | ||
924 | update_cfs_shares(cfs_rq); | ||
925 | } | ||
926 | } | ||
927 | # else /* CONFIG_SMP */ | ||
928 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | ||
929 | { | ||
930 | } | ||
931 | |||
932 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) | ||
933 | { | ||
934 | return tg->shares; | ||
935 | } | ||
936 | |||
937 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
938 | { | ||
939 | } | ||
940 | # endif /* CONFIG_SMP */ | ||
941 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | ||
942 | unsigned long weight) | ||
943 | { | ||
944 | if (se->on_rq) { | ||
945 | /* commit outstanding execution time */ | ||
946 | if (cfs_rq->curr == se) | ||
947 | update_curr(cfs_rq); | ||
948 | account_entity_dequeue(cfs_rq, se); | ||
949 | } | ||
950 | |||
951 | update_load_set(&se->load, weight); | ||
952 | |||
953 | if (se->on_rq) | ||
954 | account_entity_enqueue(cfs_rq, se); | ||
955 | } | ||
956 | |||
957 | static void update_cfs_shares(struct cfs_rq *cfs_rq) | ||
958 | { | ||
959 | struct task_group *tg; | ||
960 | struct sched_entity *se; | ||
961 | long shares; | ||
962 | |||
963 | tg = cfs_rq->tg; | ||
964 | se = tg->se[cpu_of(rq_of(cfs_rq))]; | ||
965 | if (!se || throttled_hierarchy(cfs_rq)) | ||
966 | return; | ||
967 | #ifndef CONFIG_SMP | ||
968 | if (likely(se->load.weight == tg->shares)) | ||
969 | return; | ||
970 | #endif | ||
971 | shares = calc_cfs_shares(cfs_rq, tg); | ||
972 | |||
973 | reweight_entity(cfs_rq_of(se), se, shares); | ||
974 | } | ||
975 | #else /* CONFIG_FAIR_GROUP_SCHED */ | ||
976 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | ||
977 | { | ||
978 | } | ||
979 | |||
980 | static inline void update_cfs_shares(struct cfs_rq *cfs_rq) | ||
981 | { | ||
982 | } | ||
983 | |||
984 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
985 | { | ||
986 | } | ||
987 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
988 | |||
989 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
990 | { | ||
991 | #ifdef CONFIG_SCHEDSTATS | ||
992 | struct task_struct *tsk = NULL; | ||
993 | |||
994 | if (entity_is_task(se)) | ||
995 | tsk = task_of(se); | ||
996 | |||
997 | if (se->statistics.sleep_start) { | ||
998 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start; | ||
999 | |||
1000 | if ((s64)delta < 0) | ||
1001 | delta = 0; | ||
1002 | |||
1003 | if (unlikely(delta > se->statistics.sleep_max)) | ||
1004 | se->statistics.sleep_max = delta; | ||
1005 | |||
1006 | se->statistics.sleep_start = 0; | ||
1007 | se->statistics.sum_sleep_runtime += delta; | ||
1008 | |||
1009 | if (tsk) { | ||
1010 | account_scheduler_latency(tsk, delta >> 10, 1); | ||
1011 | trace_sched_stat_sleep(tsk, delta); | ||
1012 | } | ||
1013 | } | ||
1014 | if (se->statistics.block_start) { | ||
1015 | u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start; | ||
1016 | |||
1017 | if ((s64)delta < 0) | ||
1018 | delta = 0; | ||
1019 | |||
1020 | if (unlikely(delta > se->statistics.block_max)) | ||
1021 | se->statistics.block_max = delta; | ||
1022 | |||
1023 | se->statistics.block_start = 0; | ||
1024 | se->statistics.sum_sleep_runtime += delta; | ||
1025 | |||
1026 | if (tsk) { | ||
1027 | if (tsk->in_iowait) { | ||
1028 | se->statistics.iowait_sum += delta; | ||
1029 | se->statistics.iowait_count++; | ||
1030 | trace_sched_stat_iowait(tsk, delta); | ||
1031 | } | ||
1032 | |||
1033 | /* | ||
1034 | * Blocking time is in units of nanosecs, so shift by | ||
1035 | * 20 to get a milliseconds-range estimation of the | ||
1036 | * amount of time that the task spent sleeping: | ||
1037 | */ | ||
1038 | if (unlikely(prof_on == SLEEP_PROFILING)) { | ||
1039 | profile_hits(SLEEP_PROFILING, | ||
1040 | (void *)get_wchan(tsk), | ||
1041 | delta >> 20); | ||
1042 | } | ||
1043 | account_scheduler_latency(tsk, delta >> 10, 0); | ||
1044 | } | ||
1045 | } | ||
1046 | #endif | ||
1047 | } | ||
1048 | |||
1049 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
1050 | { | ||
1051 | #ifdef CONFIG_SCHED_DEBUG | ||
1052 | s64 d = se->vruntime - cfs_rq->min_vruntime; | ||
1053 | |||
1054 | if (d < 0) | ||
1055 | d = -d; | ||
1056 | |||
1057 | if (d > 3*sysctl_sched_latency) | ||
1058 | schedstat_inc(cfs_rq, nr_spread_over); | ||
1059 | #endif | ||
1060 | } | ||
1061 | |||
1062 | static void | ||
1063 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | ||
1064 | { | ||
1065 | u64 vruntime = cfs_rq->min_vruntime; | ||
1066 | |||
1067 | /* | ||
1068 | * The 'current' period is already promised to the current tasks, | ||
1069 | * however the extra weight of the new task will slow them down a | ||
1070 | * little, place the new task so that it fits in the slot that | ||
1071 | * stays open at the end. | ||
1072 | */ | ||
1073 | if (initial && sched_feat(START_DEBIT)) | ||
1074 | vruntime += sched_vslice(cfs_rq, se); | ||
1075 | |||
1076 | /* sleeps up to a single latency don't count. */ | ||
1077 | if (!initial) { | ||
1078 | unsigned long thresh = sysctl_sched_latency; | ||
1079 | |||
1080 | /* | ||
1081 | * Halve their sleep time's effect, to allow | ||
1082 | * for a gentler effect of sleepers: | ||
1083 | */ | ||
1084 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) | ||
1085 | thresh >>= 1; | ||
1086 | |||
1087 | vruntime -= thresh; | ||
1088 | } | ||
1089 | |||
1090 | /* ensure we never gain time by being placed backwards. */ | ||
1091 | vruntime = max_vruntime(se->vruntime, vruntime); | ||
1092 | |||
1093 | se->vruntime = vruntime; | ||
1094 | } | ||
1095 | |||
1096 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); | ||
1097 | |||
1098 | static void | ||
1099 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | ||
1100 | { | ||
1101 | /* | ||
1102 | * Update the normalized vruntime before updating min_vruntime | ||
1103 | * through callig update_curr(). | ||
1104 | */ | ||
1105 | if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING)) | ||
1106 | se->vruntime += cfs_rq->min_vruntime; | ||
1107 | |||
1108 | /* | ||
1109 | * Update run-time statistics of the 'current'. | ||
1110 | */ | ||
1111 | update_curr(cfs_rq); | ||
1112 | update_cfs_load(cfs_rq, 0); | ||
1113 | account_entity_enqueue(cfs_rq, se); | ||
1114 | update_cfs_shares(cfs_rq); | ||
1115 | |||
1116 | if (flags & ENQUEUE_WAKEUP) { | ||
1117 | place_entity(cfs_rq, se, 0); | ||
1118 | enqueue_sleeper(cfs_rq, se); | ||
1119 | } | ||
1120 | |||
1121 | update_stats_enqueue(cfs_rq, se); | ||
1122 | check_spread(cfs_rq, se); | ||
1123 | if (se != cfs_rq->curr) | ||
1124 | __enqueue_entity(cfs_rq, se); | ||
1125 | se->on_rq = 1; | ||
1126 | |||
1127 | if (cfs_rq->nr_running == 1) { | ||
1128 | list_add_leaf_cfs_rq(cfs_rq); | ||
1129 | check_enqueue_throttle(cfs_rq); | ||
1130 | } | ||
1131 | } | ||
1132 | |||
1133 | static void __clear_buddies_last(struct sched_entity *se) | ||
1134 | { | ||
1135 | for_each_sched_entity(se) { | ||
1136 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||
1137 | if (cfs_rq->last == se) | ||
1138 | cfs_rq->last = NULL; | ||
1139 | else | ||
1140 | break; | ||
1141 | } | ||
1142 | } | ||
1143 | |||
1144 | static void __clear_buddies_next(struct sched_entity *se) | ||
1145 | { | ||
1146 | for_each_sched_entity(se) { | ||
1147 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||
1148 | if (cfs_rq->next == se) | ||
1149 | cfs_rq->next = NULL; | ||
1150 | else | ||
1151 | break; | ||
1152 | } | ||
1153 | } | ||
1154 | |||
1155 | static void __clear_buddies_skip(struct sched_entity *se) | ||
1156 | { | ||
1157 | for_each_sched_entity(se) { | ||
1158 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||
1159 | if (cfs_rq->skip == se) | ||
1160 | cfs_rq->skip = NULL; | ||
1161 | else | ||
1162 | break; | ||
1163 | } | ||
1164 | } | ||
1165 | |||
1166 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
1167 | { | ||
1168 | if (cfs_rq->last == se) | ||
1169 | __clear_buddies_last(se); | ||
1170 | |||
1171 | if (cfs_rq->next == se) | ||
1172 | __clear_buddies_next(se); | ||
1173 | |||
1174 | if (cfs_rq->skip == se) | ||
1175 | __clear_buddies_skip(se); | ||
1176 | } | ||
1177 | |||
1178 | static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); | ||
1179 | |||
1180 | static void | ||
1181 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | ||
1182 | { | ||
1183 | /* | ||
1184 | * Update run-time statistics of the 'current'. | ||
1185 | */ | ||
1186 | update_curr(cfs_rq); | ||
1187 | |||
1188 | update_stats_dequeue(cfs_rq, se); | ||
1189 | if (flags & DEQUEUE_SLEEP) { | ||
1190 | #ifdef CONFIG_SCHEDSTATS | ||
1191 | if (entity_is_task(se)) { | ||
1192 | struct task_struct *tsk = task_of(se); | ||
1193 | |||
1194 | if (tsk->state & TASK_INTERRUPTIBLE) | ||
1195 | se->statistics.sleep_start = rq_of(cfs_rq)->clock; | ||
1196 | if (tsk->state & TASK_UNINTERRUPTIBLE) | ||
1197 | se->statistics.block_start = rq_of(cfs_rq)->clock; | ||
1198 | } | ||
1199 | #endif | ||
1200 | } | ||
1201 | |||
1202 | clear_buddies(cfs_rq, se); | ||
1203 | |||
1204 | if (se != cfs_rq->curr) | ||
1205 | __dequeue_entity(cfs_rq, se); | ||
1206 | se->on_rq = 0; | ||
1207 | update_cfs_load(cfs_rq, 0); | ||
1208 | account_entity_dequeue(cfs_rq, se); | ||
1209 | |||
1210 | /* | ||
1211 | * Normalize the entity after updating the min_vruntime because the | ||
1212 | * update can refer to the ->curr item and we need to reflect this | ||
1213 | * movement in our normalized position. | ||
1214 | */ | ||
1215 | if (!(flags & DEQUEUE_SLEEP)) | ||
1216 | se->vruntime -= cfs_rq->min_vruntime; | ||
1217 | |||
1218 | /* return excess runtime on last dequeue */ | ||
1219 | return_cfs_rq_runtime(cfs_rq); | ||
1220 | |||
1221 | update_min_vruntime(cfs_rq); | ||
1222 | update_cfs_shares(cfs_rq); | ||
1223 | } | ||
1224 | |||
1225 | /* | ||
1226 | * Preempt the current task with a newly woken task if needed: | ||
1227 | */ | ||
1228 | static void | ||
1229 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | ||
1230 | { | ||
1231 | unsigned long ideal_runtime, delta_exec; | ||
1232 | struct sched_entity *se; | ||
1233 | s64 delta; | ||
1234 | |||
1235 | ideal_runtime = sched_slice(cfs_rq, curr); | ||
1236 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | ||
1237 | if (delta_exec > ideal_runtime) { | ||
1238 | resched_task(rq_of(cfs_rq)->curr); | ||
1239 | /* | ||
1240 | * The current task ran long enough, ensure it doesn't get | ||
1241 | * re-elected due to buddy favours. | ||
1242 | */ | ||
1243 | clear_buddies(cfs_rq, curr); | ||
1244 | return; | ||
1245 | } | ||
1246 | |||
1247 | /* | ||
1248 | * Ensure that a task that missed wakeup preemption by a | ||
1249 | * narrow margin doesn't have to wait for a full slice. | ||
1250 | * This also mitigates buddy induced latencies under load. | ||
1251 | */ | ||
1252 | if (delta_exec < sysctl_sched_min_granularity) | ||
1253 | return; | ||
1254 | |||
1255 | se = __pick_first_entity(cfs_rq); | ||
1256 | delta = curr->vruntime - se->vruntime; | ||
1257 | |||
1258 | if (delta < 0) | ||
1259 | return; | ||
1260 | |||
1261 | if (delta > ideal_runtime) | ||
1262 | resched_task(rq_of(cfs_rq)->curr); | ||
1263 | } | ||
1264 | |||
1265 | static void | ||
1266 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
1267 | { | ||
1268 | /* 'current' is not kept within the tree. */ | ||
1269 | if (se->on_rq) { | ||
1270 | /* | ||
1271 | * Any task has to be enqueued before it get to execute on | ||
1272 | * a CPU. So account for the time it spent waiting on the | ||
1273 | * runqueue. | ||
1274 | */ | ||
1275 | update_stats_wait_end(cfs_rq, se); | ||
1276 | __dequeue_entity(cfs_rq, se); | ||
1277 | } | ||
1278 | |||
1279 | update_stats_curr_start(cfs_rq, se); | ||
1280 | cfs_rq->curr = se; | ||
1281 | #ifdef CONFIG_SCHEDSTATS | ||
1282 | /* | ||
1283 | * Track our maximum slice length, if the CPU's load is at | ||
1284 | * least twice that of our own weight (i.e. dont track it | ||
1285 | * when there are only lesser-weight tasks around): | ||
1286 | */ | ||
1287 | if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { | ||
1288 | se->statistics.slice_max = max(se->statistics.slice_max, | ||
1289 | se->sum_exec_runtime - se->prev_sum_exec_runtime); | ||
1290 | } | ||
1291 | #endif | ||
1292 | se->prev_sum_exec_runtime = se->sum_exec_runtime; | ||
1293 | } | ||
1294 | |||
1295 | static int | ||
1296 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | ||
1297 | |||
1298 | /* | ||
1299 | * Pick the next process, keeping these things in mind, in this order: | ||
1300 | * 1) keep things fair between processes/task groups | ||
1301 | * 2) pick the "next" process, since someone really wants that to run | ||
1302 | * 3) pick the "last" process, for cache locality | ||
1303 | * 4) do not run the "skip" process, if something else is available | ||
1304 | */ | ||
1305 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | ||
1306 | { | ||
1307 | struct sched_entity *se = __pick_first_entity(cfs_rq); | ||
1308 | struct sched_entity *left = se; | ||
1309 | |||
1310 | /* | ||
1311 | * Avoid running the skip buddy, if running something else can | ||
1312 | * be done without getting too unfair. | ||
1313 | */ | ||
1314 | if (cfs_rq->skip == se) { | ||
1315 | struct sched_entity *second = __pick_next_entity(se); | ||
1316 | if (second && wakeup_preempt_entity(second, left) < 1) | ||
1317 | se = second; | ||
1318 | } | ||
1319 | |||
1320 | /* | ||
1321 | * Prefer last buddy, try to return the CPU to a preempted task. | ||
1322 | */ | ||
1323 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) | ||
1324 | se = cfs_rq->last; | ||
1325 | |||
1326 | /* | ||
1327 | * Someone really wants this to run. If it's not unfair, run it. | ||
1328 | */ | ||
1329 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) | ||
1330 | se = cfs_rq->next; | ||
1331 | |||
1332 | clear_buddies(cfs_rq, se); | ||
1333 | |||
1334 | return se; | ||
1335 | } | ||
1336 | |||
1337 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq); | ||
1338 | |||
1339 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | ||
1340 | { | ||
1341 | /* | ||
1342 | * If still on the runqueue then deactivate_task() | ||
1343 | * was not called and update_curr() has to be done: | ||
1344 | */ | ||
1345 | if (prev->on_rq) | ||
1346 | update_curr(cfs_rq); | ||
1347 | |||
1348 | /* throttle cfs_rqs exceeding runtime */ | ||
1349 | check_cfs_rq_runtime(cfs_rq); | ||
1350 | |||
1351 | check_spread(cfs_rq, prev); | ||
1352 | if (prev->on_rq) { | ||
1353 | update_stats_wait_start(cfs_rq, prev); | ||
1354 | /* Put 'current' back into the tree. */ | ||
1355 | __enqueue_entity(cfs_rq, prev); | ||
1356 | } | ||
1357 | cfs_rq->curr = NULL; | ||
1358 | } | ||
1359 | |||
1360 | static void | ||
1361 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | ||
1362 | { | ||
1363 | /* | ||
1364 | * Update run-time statistics of the 'current'. | ||
1365 | */ | ||
1366 | update_curr(cfs_rq); | ||
1367 | |||
1368 | /* | ||
1369 | * Update share accounting for long-running entities. | ||
1370 | */ | ||
1371 | update_entity_shares_tick(cfs_rq); | ||
1372 | |||
1373 | #ifdef CONFIG_SCHED_HRTICK | ||
1374 | /* | ||
1375 | * queued ticks are scheduled to match the slice, so don't bother | ||
1376 | * validating it and just reschedule. | ||
1377 | */ | ||
1378 | if (queued) { | ||
1379 | resched_task(rq_of(cfs_rq)->curr); | ||
1380 | return; | ||
1381 | } | ||
1382 | /* | ||
1383 | * don't let the period tick interfere with the hrtick preemption | ||
1384 | */ | ||
1385 | if (!sched_feat(DOUBLE_TICK) && | ||
1386 | hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) | ||
1387 | return; | ||
1388 | #endif | ||
1389 | |||
1390 | if (cfs_rq->nr_running > 1) | ||
1391 | check_preempt_tick(cfs_rq, curr); | ||
1392 | } | ||
1393 | |||
1394 | |||
1395 | /************************************************** | ||
1396 | * CFS bandwidth control machinery | ||
1397 | */ | ||
1398 | |||
1399 | #ifdef CONFIG_CFS_BANDWIDTH | ||
1400 | |||
1401 | #ifdef HAVE_JUMP_LABEL | ||
1402 | static struct jump_label_key __cfs_bandwidth_used; | ||
1403 | |||
1404 | static inline bool cfs_bandwidth_used(void) | ||
1405 | { | ||
1406 | return static_branch(&__cfs_bandwidth_used); | ||
1407 | } | ||
1408 | |||
1409 | void account_cfs_bandwidth_used(int enabled, int was_enabled) | ||
1410 | { | ||
1411 | /* only need to count groups transitioning between enabled/!enabled */ | ||
1412 | if (enabled && !was_enabled) | ||
1413 | jump_label_inc(&__cfs_bandwidth_used); | ||
1414 | else if (!enabled && was_enabled) | ||
1415 | jump_label_dec(&__cfs_bandwidth_used); | ||
1416 | } | ||
1417 | #else /* HAVE_JUMP_LABEL */ | ||
1418 | static bool cfs_bandwidth_used(void) | ||
1419 | { | ||
1420 | return true; | ||
1421 | } | ||
1422 | |||
1423 | void account_cfs_bandwidth_used(int enabled, int was_enabled) {} | ||
1424 | #endif /* HAVE_JUMP_LABEL */ | ||
1425 | |||
1426 | /* | ||
1427 | * default period for cfs group bandwidth. | ||
1428 | * default: 0.1s, units: nanoseconds | ||
1429 | */ | ||
1430 | static inline u64 default_cfs_period(void) | ||
1431 | { | ||
1432 | return 100000000ULL; | ||
1433 | } | ||
1434 | |||
1435 | static inline u64 sched_cfs_bandwidth_slice(void) | ||
1436 | { | ||
1437 | return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; | ||
1438 | } | ||
1439 | |||
1440 | /* | ||
1441 | * Replenish runtime according to assigned quota and update expiration time. | ||
1442 | * We use sched_clock_cpu directly instead of rq->clock to avoid adding | ||
1443 | * additional synchronization around rq->lock. | ||
1444 | * | ||
1445 | * requires cfs_b->lock | ||
1446 | */ | ||
1447 | void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) | ||
1448 | { | ||
1449 | u64 now; | ||
1450 | |||
1451 | if (cfs_b->quota == RUNTIME_INF) | ||
1452 | return; | ||
1453 | |||
1454 | now = sched_clock_cpu(smp_processor_id()); | ||
1455 | cfs_b->runtime = cfs_b->quota; | ||
1456 | cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); | ||
1457 | } | ||
1458 | |||
1459 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) | ||
1460 | { | ||
1461 | return &tg->cfs_bandwidth; | ||
1462 | } | ||
1463 | |||
1464 | /* returns 0 on failure to allocate runtime */ | ||
1465 | static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) | ||
1466 | { | ||
1467 | struct task_group *tg = cfs_rq->tg; | ||
1468 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); | ||
1469 | u64 amount = 0, min_amount, expires; | ||
1470 | |||
1471 | /* note: this is a positive sum as runtime_remaining <= 0 */ | ||
1472 | min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; | ||
1473 | |||
1474 | raw_spin_lock(&cfs_b->lock); | ||
1475 | if (cfs_b->quota == RUNTIME_INF) | ||
1476 | amount = min_amount; | ||
1477 | else { | ||
1478 | /* | ||
1479 | * If the bandwidth pool has become inactive, then at least one | ||
1480 | * period must have elapsed since the last consumption. | ||
1481 | * Refresh the global state and ensure bandwidth timer becomes | ||
1482 | * active. | ||
1483 | */ | ||
1484 | if (!cfs_b->timer_active) { | ||
1485 | __refill_cfs_bandwidth_runtime(cfs_b); | ||
1486 | __start_cfs_bandwidth(cfs_b); | ||
1487 | } | ||
1488 | |||
1489 | if (cfs_b->runtime > 0) { | ||
1490 | amount = min(cfs_b->runtime, min_amount); | ||
1491 | cfs_b->runtime -= amount; | ||
1492 | cfs_b->idle = 0; | ||
1493 | } | ||
1494 | } | ||
1495 | expires = cfs_b->runtime_expires; | ||
1496 | raw_spin_unlock(&cfs_b->lock); | ||
1497 | |||
1498 | cfs_rq->runtime_remaining += amount; | ||
1499 | /* | ||
1500 | * we may have advanced our local expiration to account for allowed | ||
1501 | * spread between our sched_clock and the one on which runtime was | ||
1502 | * issued. | ||
1503 | */ | ||
1504 | if ((s64)(expires - cfs_rq->runtime_expires) > 0) | ||
1505 | cfs_rq->runtime_expires = expires; | ||
1506 | |||
1507 | return cfs_rq->runtime_remaining > 0; | ||
1508 | } | ||
1509 | |||
1510 | /* | ||
1511 | * Note: This depends on the synchronization provided by sched_clock and the | ||
1512 | * fact that rq->clock snapshots this value. | ||
1513 | */ | ||
1514 | static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) | ||
1515 | { | ||
1516 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | ||
1517 | struct rq *rq = rq_of(cfs_rq); | ||
1518 | |||
1519 | /* if the deadline is ahead of our clock, nothing to do */ | ||
1520 | if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0)) | ||
1521 | return; | ||
1522 | |||
1523 | if (cfs_rq->runtime_remaining < 0) | ||
1524 | return; | ||
1525 | |||
1526 | /* | ||
1527 | * If the local deadline has passed we have to consider the | ||
1528 | * possibility that our sched_clock is 'fast' and the global deadline | ||
1529 | * has not truly expired. | ||
1530 | * | ||
1531 | * Fortunately we can check determine whether this the case by checking | ||
1532 | * whether the global deadline has advanced. | ||
1533 | */ | ||
1534 | |||
1535 | if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) { | ||
1536 | /* extend local deadline, drift is bounded above by 2 ticks */ | ||
1537 | cfs_rq->runtime_expires += TICK_NSEC; | ||
1538 | } else { | ||
1539 | /* global deadline is ahead, expiration has passed */ | ||
1540 | cfs_rq->runtime_remaining = 0; | ||
1541 | } | ||
1542 | } | ||
1543 | |||
1544 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | ||
1545 | unsigned long delta_exec) | ||
1546 | { | ||
1547 | /* dock delta_exec before expiring quota (as it could span periods) */ | ||
1548 | cfs_rq->runtime_remaining -= delta_exec; | ||
1549 | expire_cfs_rq_runtime(cfs_rq); | ||
1550 | |||
1551 | if (likely(cfs_rq->runtime_remaining > 0)) | ||
1552 | return; | ||
1553 | |||
1554 | /* | ||
1555 | * if we're unable to extend our runtime we resched so that the active | ||
1556 | * hierarchy can be throttled | ||
1557 | */ | ||
1558 | if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) | ||
1559 | resched_task(rq_of(cfs_rq)->curr); | ||
1560 | } | ||
1561 | |||
1562 | static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | ||
1563 | unsigned long delta_exec) | ||
1564 | { | ||
1565 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) | ||
1566 | return; | ||
1567 | |||
1568 | __account_cfs_rq_runtime(cfs_rq, delta_exec); | ||
1569 | } | ||
1570 | |||
1571 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) | ||
1572 | { | ||
1573 | return cfs_bandwidth_used() && cfs_rq->throttled; | ||
1574 | } | ||
1575 | |||
1576 | /* check whether cfs_rq, or any parent, is throttled */ | ||
1577 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) | ||
1578 | { | ||
1579 | return cfs_bandwidth_used() && cfs_rq->throttle_count; | ||
1580 | } | ||
1581 | |||
1582 | /* | ||
1583 | * Ensure that neither of the group entities corresponding to src_cpu or | ||
1584 | * dest_cpu are members of a throttled hierarchy when performing group | ||
1585 | * load-balance operations. | ||
1586 | */ | ||
1587 | static inline int throttled_lb_pair(struct task_group *tg, | ||
1588 | int src_cpu, int dest_cpu) | ||
1589 | { | ||
1590 | struct cfs_rq *src_cfs_rq, *dest_cfs_rq; | ||
1591 | |||
1592 | src_cfs_rq = tg->cfs_rq[src_cpu]; | ||
1593 | dest_cfs_rq = tg->cfs_rq[dest_cpu]; | ||
1594 | |||
1595 | return throttled_hierarchy(src_cfs_rq) || | ||
1596 | throttled_hierarchy(dest_cfs_rq); | ||
1597 | } | ||
1598 | |||
1599 | /* updated child weight may affect parent so we have to do this bottom up */ | ||
1600 | static int tg_unthrottle_up(struct task_group *tg, void *data) | ||
1601 | { | ||
1602 | struct rq *rq = data; | ||
1603 | struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; | ||
1604 | |||
1605 | cfs_rq->throttle_count--; | ||
1606 | #ifdef CONFIG_SMP | ||
1607 | if (!cfs_rq->throttle_count) { | ||
1608 | u64 delta = rq->clock_task - cfs_rq->load_stamp; | ||
1609 | |||
1610 | /* leaving throttled state, advance shares averaging windows */ | ||
1611 | cfs_rq->load_stamp += delta; | ||
1612 | cfs_rq->load_last += delta; | ||
1613 | |||
1614 | /* update entity weight now that we are on_rq again */ | ||
1615 | update_cfs_shares(cfs_rq); | ||
1616 | } | ||
1617 | #endif | ||
1618 | |||
1619 | return 0; | ||
1620 | } | ||
1621 | |||
1622 | static int tg_throttle_down(struct task_group *tg, void *data) | ||
1623 | { | ||
1624 | struct rq *rq = data; | ||
1625 | struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; | ||
1626 | |||
1627 | /* group is entering throttled state, record last load */ | ||
1628 | if (!cfs_rq->throttle_count) | ||
1629 | update_cfs_load(cfs_rq, 0); | ||
1630 | cfs_rq->throttle_count++; | ||
1631 | |||
1632 | return 0; | ||
1633 | } | ||
1634 | |||
1635 | static void throttle_cfs_rq(struct cfs_rq *cfs_rq) | ||
1636 | { | ||
1637 | struct rq *rq = rq_of(cfs_rq); | ||
1638 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | ||
1639 | struct sched_entity *se; | ||
1640 | long task_delta, dequeue = 1; | ||
1641 | |||
1642 | se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; | ||
1643 | |||
1644 | /* account load preceding throttle */ | ||
1645 | rcu_read_lock(); | ||
1646 | walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); | ||
1647 | rcu_read_unlock(); | ||
1648 | |||
1649 | task_delta = cfs_rq->h_nr_running; | ||
1650 | for_each_sched_entity(se) { | ||
1651 | struct cfs_rq *qcfs_rq = cfs_rq_of(se); | ||
1652 | /* throttled entity or throttle-on-deactivate */ | ||
1653 | if (!se->on_rq) | ||
1654 | break; | ||
1655 | |||
1656 | if (dequeue) | ||
1657 | dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); | ||
1658 | qcfs_rq->h_nr_running -= task_delta; | ||
1659 | |||
1660 | if (qcfs_rq->load.weight) | ||
1661 | dequeue = 0; | ||
1662 | } | ||
1663 | |||
1664 | if (!se) | ||
1665 | rq->nr_running -= task_delta; | ||
1666 | |||
1667 | cfs_rq->throttled = 1; | ||
1668 | cfs_rq->throttled_timestamp = rq->clock; | ||
1669 | raw_spin_lock(&cfs_b->lock); | ||
1670 | list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); | ||
1671 | raw_spin_unlock(&cfs_b->lock); | ||
1672 | } | ||
1673 | |||
1674 | void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) | ||
1675 | { | ||
1676 | struct rq *rq = rq_of(cfs_rq); | ||
1677 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | ||
1678 | struct sched_entity *se; | ||
1679 | int enqueue = 1; | ||
1680 | long task_delta; | ||
1681 | |||
1682 | se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; | ||
1683 | |||
1684 | cfs_rq->throttled = 0; | ||
1685 | raw_spin_lock(&cfs_b->lock); | ||
1686 | cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp; | ||
1687 | list_del_rcu(&cfs_rq->throttled_list); | ||
1688 | raw_spin_unlock(&cfs_b->lock); | ||
1689 | cfs_rq->throttled_timestamp = 0; | ||
1690 | |||
1691 | update_rq_clock(rq); | ||
1692 | /* update hierarchical throttle state */ | ||
1693 | walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); | ||
1694 | |||
1695 | if (!cfs_rq->load.weight) | ||
1696 | return; | ||
1697 | |||
1698 | task_delta = cfs_rq->h_nr_running; | ||
1699 | for_each_sched_entity(se) { | ||
1700 | if (se->on_rq) | ||
1701 | enqueue = 0; | ||
1702 | |||
1703 | cfs_rq = cfs_rq_of(se); | ||
1704 | if (enqueue) | ||
1705 | enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); | ||
1706 | cfs_rq->h_nr_running += task_delta; | ||
1707 | |||
1708 | if (cfs_rq_throttled(cfs_rq)) | ||
1709 | break; | ||
1710 | } | ||
1711 | |||
1712 | if (!se) | ||
1713 | rq->nr_running += task_delta; | ||
1714 | |||
1715 | /* determine whether we need to wake up potentially idle cpu */ | ||
1716 | if (rq->curr == rq->idle && rq->cfs.nr_running) | ||
1717 | resched_task(rq->curr); | ||
1718 | } | ||
1719 | |||
1720 | static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, | ||
1721 | u64 remaining, u64 expires) | ||
1722 | { | ||
1723 | struct cfs_rq *cfs_rq; | ||
1724 | u64 runtime = remaining; | ||
1725 | |||
1726 | rcu_read_lock(); | ||
1727 | list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, | ||
1728 | throttled_list) { | ||
1729 | struct rq *rq = rq_of(cfs_rq); | ||
1730 | |||
1731 | raw_spin_lock(&rq->lock); | ||
1732 | if (!cfs_rq_throttled(cfs_rq)) | ||
1733 | goto next; | ||
1734 | |||
1735 | runtime = -cfs_rq->runtime_remaining + 1; | ||
1736 | if (runtime > remaining) | ||
1737 | runtime = remaining; | ||
1738 | remaining -= runtime; | ||
1739 | |||
1740 | cfs_rq->runtime_remaining += runtime; | ||
1741 | cfs_rq->runtime_expires = expires; | ||
1742 | |||
1743 | /* we check whether we're throttled above */ | ||
1744 | if (cfs_rq->runtime_remaining > 0) | ||
1745 | unthrottle_cfs_rq(cfs_rq); | ||
1746 | |||
1747 | next: | ||
1748 | raw_spin_unlock(&rq->lock); | ||
1749 | |||
1750 | if (!remaining) | ||
1751 | break; | ||
1752 | } | ||
1753 | rcu_read_unlock(); | ||
1754 | |||
1755 | return remaining; | ||
1756 | } | ||
1757 | |||
1758 | /* | ||
1759 | * Responsible for refilling a task_group's bandwidth and unthrottling its | ||
1760 | * cfs_rqs as appropriate. If there has been no activity within the last | ||
1761 | * period the timer is deactivated until scheduling resumes; cfs_b->idle is | ||
1762 | * used to track this state. | ||
1763 | */ | ||
1764 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) | ||
1765 | { | ||
1766 | u64 runtime, runtime_expires; | ||
1767 | int idle = 1, throttled; | ||
1768 | |||
1769 | raw_spin_lock(&cfs_b->lock); | ||
1770 | /* no need to continue the timer with no bandwidth constraint */ | ||
1771 | if (cfs_b->quota == RUNTIME_INF) | ||
1772 | goto out_unlock; | ||
1773 | |||
1774 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); | ||
1775 | /* idle depends on !throttled (for the case of a large deficit) */ | ||
1776 | idle = cfs_b->idle && !throttled; | ||
1777 | cfs_b->nr_periods += overrun; | ||
1778 | |||
1779 | /* if we're going inactive then everything else can be deferred */ | ||
1780 | if (idle) | ||
1781 | goto out_unlock; | ||
1782 | |||
1783 | __refill_cfs_bandwidth_runtime(cfs_b); | ||
1784 | |||
1785 | if (!throttled) { | ||
1786 | /* mark as potentially idle for the upcoming period */ | ||
1787 | cfs_b->idle = 1; | ||
1788 | goto out_unlock; | ||
1789 | } | ||
1790 | |||
1791 | /* account preceding periods in which throttling occurred */ | ||
1792 | cfs_b->nr_throttled += overrun; | ||
1793 | |||
1794 | /* | ||
1795 | * There are throttled entities so we must first use the new bandwidth | ||
1796 | * to unthrottle them before making it generally available. This | ||
1797 | * ensures that all existing debts will be paid before a new cfs_rq is | ||
1798 | * allowed to run. | ||
1799 | */ | ||
1800 | runtime = cfs_b->runtime; | ||
1801 | runtime_expires = cfs_b->runtime_expires; | ||
1802 | cfs_b->runtime = 0; | ||
1803 | |||
1804 | /* | ||
1805 | * This check is repeated as we are holding onto the new bandwidth | ||
1806 | * while we unthrottle. This can potentially race with an unthrottled | ||
1807 | * group trying to acquire new bandwidth from the global pool. | ||
1808 | */ | ||
1809 | while (throttled && runtime > 0) { | ||
1810 | raw_spin_unlock(&cfs_b->lock); | ||
1811 | /* we can't nest cfs_b->lock while distributing bandwidth */ | ||
1812 | runtime = distribute_cfs_runtime(cfs_b, runtime, | ||
1813 | runtime_expires); | ||
1814 | raw_spin_lock(&cfs_b->lock); | ||
1815 | |||
1816 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); | ||
1817 | } | ||
1818 | |||
1819 | /* return (any) remaining runtime */ | ||
1820 | cfs_b->runtime = runtime; | ||
1821 | /* | ||
1822 | * While we are ensured activity in the period following an | ||
1823 | * unthrottle, this also covers the case in which the new bandwidth is | ||
1824 | * insufficient to cover the existing bandwidth deficit. (Forcing the | ||
1825 | * timer to remain active while there are any throttled entities.) | ||
1826 | */ | ||
1827 | cfs_b->idle = 0; | ||
1828 | out_unlock: | ||
1829 | if (idle) | ||
1830 | cfs_b->timer_active = 0; | ||
1831 | raw_spin_unlock(&cfs_b->lock); | ||
1832 | |||
1833 | return idle; | ||
1834 | } | ||
1835 | |||
1836 | /* a cfs_rq won't donate quota below this amount */ | ||
1837 | static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; | ||
1838 | /* minimum remaining period time to redistribute slack quota */ | ||
1839 | static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; | ||
1840 | /* how long we wait to gather additional slack before distributing */ | ||
1841 | static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; | ||
1842 | |||
1843 | /* are we near the end of the current quota period? */ | ||
1844 | static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) | ||
1845 | { | ||
1846 | struct hrtimer *refresh_timer = &cfs_b->period_timer; | ||
1847 | u64 remaining; | ||
1848 | |||
1849 | /* if the call-back is running a quota refresh is already occurring */ | ||
1850 | if (hrtimer_callback_running(refresh_timer)) | ||
1851 | return 1; | ||
1852 | |||
1853 | /* is a quota refresh about to occur? */ | ||
1854 | remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); | ||
1855 | if (remaining < min_expire) | ||
1856 | return 1; | ||
1857 | |||
1858 | return 0; | ||
1859 | } | ||
1860 | |||
1861 | static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) | ||
1862 | { | ||
1863 | u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; | ||
1864 | |||
1865 | /* if there's a quota refresh soon don't bother with slack */ | ||
1866 | if (runtime_refresh_within(cfs_b, min_left)) | ||
1867 | return; | ||
1868 | |||
1869 | start_bandwidth_timer(&cfs_b->slack_timer, | ||
1870 | ns_to_ktime(cfs_bandwidth_slack_period)); | ||
1871 | } | ||
1872 | |||
1873 | /* we know any runtime found here is valid as update_curr() precedes return */ | ||
1874 | static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) | ||
1875 | { | ||
1876 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | ||
1877 | s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; | ||
1878 | |||
1879 | if (slack_runtime <= 0) | ||
1880 | return; | ||
1881 | |||
1882 | raw_spin_lock(&cfs_b->lock); | ||
1883 | if (cfs_b->quota != RUNTIME_INF && | ||
1884 | cfs_rq->runtime_expires == cfs_b->runtime_expires) { | ||
1885 | cfs_b->runtime += slack_runtime; | ||
1886 | |||
1887 | /* we are under rq->lock, defer unthrottling using a timer */ | ||
1888 | if (cfs_b->runtime > sched_cfs_bandwidth_slice() && | ||
1889 | !list_empty(&cfs_b->throttled_cfs_rq)) | ||
1890 | start_cfs_slack_bandwidth(cfs_b); | ||
1891 | } | ||
1892 | raw_spin_unlock(&cfs_b->lock); | ||
1893 | |||
1894 | /* even if it's not valid for return we don't want to try again */ | ||
1895 | cfs_rq->runtime_remaining -= slack_runtime; | ||
1896 | } | ||
1897 | |||
1898 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) | ||
1899 | { | ||
1900 | if (!cfs_bandwidth_used()) | ||
1901 | return; | ||
1902 | |||
1903 | if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) | ||
1904 | return; | ||
1905 | |||
1906 | __return_cfs_rq_runtime(cfs_rq); | ||
1907 | } | ||
1908 | |||
1909 | /* | ||
1910 | * This is done with a timer (instead of inline with bandwidth return) since | ||
1911 | * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. | ||
1912 | */ | ||
1913 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) | ||
1914 | { | ||
1915 | u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); | ||
1916 | u64 expires; | ||
1917 | |||
1918 | /* confirm we're still not at a refresh boundary */ | ||
1919 | if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) | ||
1920 | return; | ||
1921 | |||
1922 | raw_spin_lock(&cfs_b->lock); | ||
1923 | if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) { | ||
1924 | runtime = cfs_b->runtime; | ||
1925 | cfs_b->runtime = 0; | ||
1926 | } | ||
1927 | expires = cfs_b->runtime_expires; | ||
1928 | raw_spin_unlock(&cfs_b->lock); | ||
1929 | |||
1930 | if (!runtime) | ||
1931 | return; | ||
1932 | |||
1933 | runtime = distribute_cfs_runtime(cfs_b, runtime, expires); | ||
1934 | |||
1935 | raw_spin_lock(&cfs_b->lock); | ||
1936 | if (expires == cfs_b->runtime_expires) | ||
1937 | cfs_b->runtime = runtime; | ||
1938 | raw_spin_unlock(&cfs_b->lock); | ||
1939 | } | ||
1940 | |||
1941 | /* | ||
1942 | * When a group wakes up we want to make sure that its quota is not already | ||
1943 | * expired/exceeded, otherwise it may be allowed to steal additional ticks of | ||
1944 | * runtime as update_curr() throttling can not not trigger until it's on-rq. | ||
1945 | */ | ||
1946 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) | ||
1947 | { | ||
1948 | if (!cfs_bandwidth_used()) | ||
1949 | return; | ||
1950 | |||
1951 | /* an active group must be handled by the update_curr()->put() path */ | ||
1952 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) | ||
1953 | return; | ||
1954 | |||
1955 | /* ensure the group is not already throttled */ | ||
1956 | if (cfs_rq_throttled(cfs_rq)) | ||
1957 | return; | ||
1958 | |||
1959 | /* update runtime allocation */ | ||
1960 | account_cfs_rq_runtime(cfs_rq, 0); | ||
1961 | if (cfs_rq->runtime_remaining <= 0) | ||
1962 | throttle_cfs_rq(cfs_rq); | ||
1963 | } | ||
1964 | |||
1965 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ | ||
1966 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) | ||
1967 | { | ||
1968 | if (!cfs_bandwidth_used()) | ||
1969 | return; | ||
1970 | |||
1971 | if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) | ||
1972 | return; | ||
1973 | |||
1974 | /* | ||
1975 | * it's possible for a throttled entity to be forced into a running | ||
1976 | * state (e.g. set_curr_task), in this case we're finished. | ||
1977 | */ | ||
1978 | if (cfs_rq_throttled(cfs_rq)) | ||
1979 | return; | ||
1980 | |||
1981 | throttle_cfs_rq(cfs_rq); | ||
1982 | } | ||
1983 | |||
1984 | static inline u64 default_cfs_period(void); | ||
1985 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun); | ||
1986 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b); | ||
1987 | |||
1988 | static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) | ||
1989 | { | ||
1990 | struct cfs_bandwidth *cfs_b = | ||
1991 | container_of(timer, struct cfs_bandwidth, slack_timer); | ||
1992 | do_sched_cfs_slack_timer(cfs_b); | ||
1993 | |||
1994 | return HRTIMER_NORESTART; | ||
1995 | } | ||
1996 | |||
1997 | static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) | ||
1998 | { | ||
1999 | struct cfs_bandwidth *cfs_b = | ||
2000 | container_of(timer, struct cfs_bandwidth, period_timer); | ||
2001 | ktime_t now; | ||
2002 | int overrun; | ||
2003 | int idle = 0; | ||
2004 | |||
2005 | for (;;) { | ||
2006 | now = hrtimer_cb_get_time(timer); | ||
2007 | overrun = hrtimer_forward(timer, now, cfs_b->period); | ||
2008 | |||
2009 | if (!overrun) | ||
2010 | break; | ||
2011 | |||
2012 | idle = do_sched_cfs_period_timer(cfs_b, overrun); | ||
2013 | } | ||
2014 | |||
2015 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; | ||
2016 | } | ||
2017 | |||
2018 | void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | ||
2019 | { | ||
2020 | raw_spin_lock_init(&cfs_b->lock); | ||
2021 | cfs_b->runtime = 0; | ||
2022 | cfs_b->quota = RUNTIME_INF; | ||
2023 | cfs_b->period = ns_to_ktime(default_cfs_period()); | ||
2024 | |||
2025 | INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); | ||
2026 | hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
2027 | cfs_b->period_timer.function = sched_cfs_period_timer; | ||
2028 | hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
2029 | cfs_b->slack_timer.function = sched_cfs_slack_timer; | ||
2030 | } | ||
2031 | |||
2032 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) | ||
2033 | { | ||
2034 | cfs_rq->runtime_enabled = 0; | ||
2035 | INIT_LIST_HEAD(&cfs_rq->throttled_list); | ||
2036 | } | ||
2037 | |||
2038 | /* requires cfs_b->lock, may release to reprogram timer */ | ||
2039 | void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | ||
2040 | { | ||
2041 | /* | ||
2042 | * The timer may be active because we're trying to set a new bandwidth | ||
2043 | * period or because we're racing with the tear-down path | ||
2044 | * (timer_active==0 becomes visible before the hrtimer call-back | ||
2045 | * terminates). In either case we ensure that it's re-programmed | ||
2046 | */ | ||
2047 | while (unlikely(hrtimer_active(&cfs_b->period_timer))) { | ||
2048 | raw_spin_unlock(&cfs_b->lock); | ||
2049 | /* ensure cfs_b->lock is available while we wait */ | ||
2050 | hrtimer_cancel(&cfs_b->period_timer); | ||
2051 | |||
2052 | raw_spin_lock(&cfs_b->lock); | ||
2053 | /* if someone else restarted the timer then we're done */ | ||
2054 | if (cfs_b->timer_active) | ||
2055 | return; | ||
2056 | } | ||
2057 | |||
2058 | cfs_b->timer_active = 1; | ||
2059 | start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period); | ||
2060 | } | ||
2061 | |||
2062 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | ||
2063 | { | ||
2064 | hrtimer_cancel(&cfs_b->period_timer); | ||
2065 | hrtimer_cancel(&cfs_b->slack_timer); | ||
2066 | } | ||
2067 | |||
2068 | void unthrottle_offline_cfs_rqs(struct rq *rq) | ||
2069 | { | ||
2070 | struct cfs_rq *cfs_rq; | ||
2071 | |||
2072 | for_each_leaf_cfs_rq(rq, cfs_rq) { | ||
2073 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); | ||
2074 | |||
2075 | if (!cfs_rq->runtime_enabled) | ||
2076 | continue; | ||
2077 | |||
2078 | /* | ||
2079 | * clock_task is not advancing so we just need to make sure | ||
2080 | * there's some valid quota amount | ||
2081 | */ | ||
2082 | cfs_rq->runtime_remaining = cfs_b->quota; | ||
2083 | if (cfs_rq_throttled(cfs_rq)) | ||
2084 | unthrottle_cfs_rq(cfs_rq); | ||
2085 | } | ||
2086 | } | ||
2087 | |||
2088 | #else /* CONFIG_CFS_BANDWIDTH */ | ||
2089 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, | ||
2090 | unsigned long delta_exec) {} | ||
2091 | static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | ||
2092 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} | ||
2093 | static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | ||
2094 | |||
2095 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) | ||
2096 | { | ||
2097 | return 0; | ||
2098 | } | ||
2099 | |||
2100 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) | ||
2101 | { | ||
2102 | return 0; | ||
2103 | } | ||
2104 | |||
2105 | static inline int throttled_lb_pair(struct task_group *tg, | ||
2106 | int src_cpu, int dest_cpu) | ||
2107 | { | ||
2108 | return 0; | ||
2109 | } | ||
2110 | |||
2111 | void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} | ||
2112 | |||
2113 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
2114 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} | ||
2115 | #endif | ||
2116 | |||
2117 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) | ||
2118 | { | ||
2119 | return NULL; | ||
2120 | } | ||
2121 | static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} | ||
2122 | void unthrottle_offline_cfs_rqs(struct rq *rq) {} | ||
2123 | |||
2124 | #endif /* CONFIG_CFS_BANDWIDTH */ | ||
2125 | |||
2126 | /************************************************** | ||
2127 | * CFS operations on tasks: | ||
2128 | */ | ||
2129 | |||
2130 | #ifdef CONFIG_SCHED_HRTICK | ||
2131 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | ||
2132 | { | ||
2133 | struct sched_entity *se = &p->se; | ||
2134 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||
2135 | |||
2136 | WARN_ON(task_rq(p) != rq); | ||
2137 | |||
2138 | if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { | ||
2139 | u64 slice = sched_slice(cfs_rq, se); | ||
2140 | u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; | ||
2141 | s64 delta = slice - ran; | ||
2142 | |||
2143 | if (delta < 0) { | ||
2144 | if (rq->curr == p) | ||
2145 | resched_task(p); | ||
2146 | return; | ||
2147 | } | ||
2148 | |||
2149 | /* | ||
2150 | * Don't schedule slices shorter than 10000ns, that just | ||
2151 | * doesn't make sense. Rely on vruntime for fairness. | ||
2152 | */ | ||
2153 | if (rq->curr != p) | ||
2154 | delta = max_t(s64, 10000LL, delta); | ||
2155 | |||
2156 | hrtick_start(rq, delta); | ||
2157 | } | ||
2158 | } | ||
2159 | |||
2160 | /* | ||
2161 | * called from enqueue/dequeue and updates the hrtick when the | ||
2162 | * current task is from our class and nr_running is low enough | ||
2163 | * to matter. | ||
2164 | */ | ||
2165 | static void hrtick_update(struct rq *rq) | ||
2166 | { | ||
2167 | struct task_struct *curr = rq->curr; | ||
2168 | |||
2169 | if (curr->sched_class != &fair_sched_class) | ||
2170 | return; | ||
2171 | |||
2172 | if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) | ||
2173 | hrtick_start_fair(rq, curr); | ||
2174 | } | ||
2175 | #else /* !CONFIG_SCHED_HRTICK */ | ||
2176 | static inline void | ||
2177 | hrtick_start_fair(struct rq *rq, struct task_struct *p) | ||
2178 | { | ||
2179 | } | ||
2180 | |||
2181 | static inline void hrtick_update(struct rq *rq) | ||
2182 | { | ||
2183 | } | ||
2184 | #endif | ||
2185 | |||
2186 | /* | ||
2187 | * The enqueue_task method is called before nr_running is | ||
2188 | * increased. Here we update the fair scheduling stats and | ||
2189 | * then put the task into the rbtree: | ||
2190 | */ | ||
2191 | static void | ||
2192 | enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | ||
2193 | { | ||
2194 | struct cfs_rq *cfs_rq; | ||
2195 | struct sched_entity *se = &p->se; | ||
2196 | |||
2197 | for_each_sched_entity(se) { | ||
2198 | if (se->on_rq) | ||
2199 | break; | ||
2200 | cfs_rq = cfs_rq_of(se); | ||
2201 | enqueue_entity(cfs_rq, se, flags); | ||
2202 | |||
2203 | /* | ||
2204 | * end evaluation on encountering a throttled cfs_rq | ||
2205 | * | ||
2206 | * note: in the case of encountering a throttled cfs_rq we will | ||
2207 | * post the final h_nr_running increment below. | ||
2208 | */ | ||
2209 | if (cfs_rq_throttled(cfs_rq)) | ||
2210 | break; | ||
2211 | cfs_rq->h_nr_running++; | ||
2212 | |||
2213 | flags = ENQUEUE_WAKEUP; | ||
2214 | } | ||
2215 | |||
2216 | for_each_sched_entity(se) { | ||
2217 | cfs_rq = cfs_rq_of(se); | ||
2218 | cfs_rq->h_nr_running++; | ||
2219 | |||
2220 | if (cfs_rq_throttled(cfs_rq)) | ||
2221 | break; | ||
2222 | |||
2223 | update_cfs_load(cfs_rq, 0); | ||
2224 | update_cfs_shares(cfs_rq); | ||
2225 | } | ||
2226 | |||
2227 | if (!se) | ||
2228 | inc_nr_running(rq); | ||
2229 | hrtick_update(rq); | ||
2230 | } | ||
2231 | |||
2232 | static void set_next_buddy(struct sched_entity *se); | ||
2233 | |||
2234 | /* | ||
2235 | * The dequeue_task method is called before nr_running is | ||
2236 | * decreased. We remove the task from the rbtree and | ||
2237 | * update the fair scheduling stats: | ||
2238 | */ | ||
2239 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | ||
2240 | { | ||
2241 | struct cfs_rq *cfs_rq; | ||
2242 | struct sched_entity *se = &p->se; | ||
2243 | int task_sleep = flags & DEQUEUE_SLEEP; | ||
2244 | |||
2245 | for_each_sched_entity(se) { | ||
2246 | cfs_rq = cfs_rq_of(se); | ||
2247 | dequeue_entity(cfs_rq, se, flags); | ||
2248 | |||
2249 | /* | ||
2250 | * end evaluation on encountering a throttled cfs_rq | ||
2251 | * | ||
2252 | * note: in the case of encountering a throttled cfs_rq we will | ||
2253 | * post the final h_nr_running decrement below. | ||
2254 | */ | ||
2255 | if (cfs_rq_throttled(cfs_rq)) | ||
2256 | break; | ||
2257 | cfs_rq->h_nr_running--; | ||
2258 | |||
2259 | /* Don't dequeue parent if it has other entities besides us */ | ||
2260 | if (cfs_rq->load.weight) { | ||
2261 | /* | ||
2262 | * Bias pick_next to pick a task from this cfs_rq, as | ||
2263 | * p is sleeping when it is within its sched_slice. | ||
2264 | */ | ||
2265 | if (task_sleep && parent_entity(se)) | ||
2266 | set_next_buddy(parent_entity(se)); | ||
2267 | |||
2268 | /* avoid re-evaluating load for this entity */ | ||
2269 | se = parent_entity(se); | ||
2270 | break; | ||
2271 | } | ||
2272 | flags |= DEQUEUE_SLEEP; | ||
2273 | } | ||
2274 | |||
2275 | for_each_sched_entity(se) { | ||
2276 | cfs_rq = cfs_rq_of(se); | ||
2277 | cfs_rq->h_nr_running--; | ||
2278 | |||
2279 | if (cfs_rq_throttled(cfs_rq)) | ||
2280 | break; | ||
2281 | |||
2282 | update_cfs_load(cfs_rq, 0); | ||
2283 | update_cfs_shares(cfs_rq); | ||
2284 | } | ||
2285 | |||
2286 | if (!se) | ||
2287 | dec_nr_running(rq); | ||
2288 | hrtick_update(rq); | ||
2289 | } | ||
2290 | |||
2291 | #ifdef CONFIG_SMP | ||
2292 | /* Used instead of source_load when we know the type == 0 */ | ||
2293 | static unsigned long weighted_cpuload(const int cpu) | ||
2294 | { | ||
2295 | return cpu_rq(cpu)->load.weight; | ||
2296 | } | ||
2297 | |||
2298 | /* | ||
2299 | * Return a low guess at the load of a migration-source cpu weighted | ||
2300 | * according to the scheduling class and "nice" value. | ||
2301 | * | ||
2302 | * We want to under-estimate the load of migration sources, to | ||
2303 | * balance conservatively. | ||
2304 | */ | ||
2305 | static unsigned long source_load(int cpu, int type) | ||
2306 | { | ||
2307 | struct rq *rq = cpu_rq(cpu); | ||
2308 | unsigned long total = weighted_cpuload(cpu); | ||
2309 | |||
2310 | if (type == 0 || !sched_feat(LB_BIAS)) | ||
2311 | return total; | ||
2312 | |||
2313 | return min(rq->cpu_load[type-1], total); | ||
2314 | } | ||
2315 | |||
2316 | /* | ||
2317 | * Return a high guess at the load of a migration-target cpu weighted | ||
2318 | * according to the scheduling class and "nice" value. | ||
2319 | */ | ||
2320 | static unsigned long target_load(int cpu, int type) | ||
2321 | { | ||
2322 | struct rq *rq = cpu_rq(cpu); | ||
2323 | unsigned long total = weighted_cpuload(cpu); | ||
2324 | |||
2325 | if (type == 0 || !sched_feat(LB_BIAS)) | ||
2326 | return total; | ||
2327 | |||
2328 | return max(rq->cpu_load[type-1], total); | ||
2329 | } | ||
2330 | |||
2331 | static unsigned long power_of(int cpu) | ||
2332 | { | ||
2333 | return cpu_rq(cpu)->cpu_power; | ||
2334 | } | ||
2335 | |||
2336 | static unsigned long cpu_avg_load_per_task(int cpu) | ||
2337 | { | ||
2338 | struct rq *rq = cpu_rq(cpu); | ||
2339 | unsigned long nr_running = ACCESS_ONCE(rq->nr_running); | ||
2340 | |||
2341 | if (nr_running) | ||
2342 | return rq->load.weight / nr_running; | ||
2343 | |||
2344 | return 0; | ||
2345 | } | ||
2346 | |||
2347 | |||
2348 | static void task_waking_fair(struct task_struct *p) | ||
2349 | { | ||
2350 | struct sched_entity *se = &p->se; | ||
2351 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||
2352 | u64 min_vruntime; | ||
2353 | |||
2354 | #ifndef CONFIG_64BIT | ||
2355 | u64 min_vruntime_copy; | ||
2356 | |||
2357 | do { | ||
2358 | min_vruntime_copy = cfs_rq->min_vruntime_copy; | ||
2359 | smp_rmb(); | ||
2360 | min_vruntime = cfs_rq->min_vruntime; | ||
2361 | } while (min_vruntime != min_vruntime_copy); | ||
2362 | #else | ||
2363 | min_vruntime = cfs_rq->min_vruntime; | ||
2364 | #endif | ||
2365 | |||
2366 | se->vruntime -= min_vruntime; | ||
2367 | } | ||
2368 | |||
2369 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
2370 | /* | ||
2371 | * effective_load() calculates the load change as seen from the root_task_group | ||
2372 | * | ||
2373 | * Adding load to a group doesn't make a group heavier, but can cause movement | ||
2374 | * of group shares between cpus. Assuming the shares were perfectly aligned one | ||
2375 | * can calculate the shift in shares. | ||
2376 | * | ||
2377 | * Calculate the effective load difference if @wl is added (subtracted) to @tg | ||
2378 | * on this @cpu and results in a total addition (subtraction) of @wg to the | ||
2379 | * total group weight. | ||
2380 | * | ||
2381 | * Given a runqueue weight distribution (rw_i) we can compute a shares | ||
2382 | * distribution (s_i) using: | ||
2383 | * | ||
2384 | * s_i = rw_i / \Sum rw_j (1) | ||
2385 | * | ||
2386 | * Suppose we have 4 CPUs and our @tg is a direct child of the root group and | ||
2387 | * has 7 equal weight tasks, distributed as below (rw_i), with the resulting | ||
2388 | * shares distribution (s_i): | ||
2389 | * | ||
2390 | * rw_i = { 2, 4, 1, 0 } | ||
2391 | * s_i = { 2/7, 4/7, 1/7, 0 } | ||
2392 | * | ||
2393 | * As per wake_affine() we're interested in the load of two CPUs (the CPU the | ||
2394 | * task used to run on and the CPU the waker is running on), we need to | ||
2395 | * compute the effect of waking a task on either CPU and, in case of a sync | ||
2396 | * wakeup, compute the effect of the current task going to sleep. | ||
2397 | * | ||
2398 | * So for a change of @wl to the local @cpu with an overall group weight change | ||
2399 | * of @wl we can compute the new shares distribution (s'_i) using: | ||
2400 | * | ||
2401 | * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2) | ||
2402 | * | ||
2403 | * Suppose we're interested in CPUs 0 and 1, and want to compute the load | ||
2404 | * differences in waking a task to CPU 0. The additional task changes the | ||
2405 | * weight and shares distributions like: | ||
2406 | * | ||
2407 | * rw'_i = { 3, 4, 1, 0 } | ||
2408 | * s'_i = { 3/8, 4/8, 1/8, 0 } | ||
2409 | * | ||
2410 | * We can then compute the difference in effective weight by using: | ||
2411 | * | ||
2412 | * dw_i = S * (s'_i - s_i) (3) | ||
2413 | * | ||
2414 | * Where 'S' is the group weight as seen by its parent. | ||
2415 | * | ||
2416 | * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7) | ||
2417 | * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 - | ||
2418 | * 4/7) times the weight of the group. | ||
2419 | */ | ||
2420 | static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | ||
2421 | { | ||
2422 | struct sched_entity *se = tg->se[cpu]; | ||
2423 | |||
2424 | if (!tg->parent) /* the trivial, non-cgroup case */ | ||
2425 | return wl; | ||
2426 | |||
2427 | for_each_sched_entity(se) { | ||
2428 | long w, W; | ||
2429 | |||
2430 | tg = se->my_q->tg; | ||
2431 | |||
2432 | /* | ||
2433 | * W = @wg + \Sum rw_j | ||
2434 | */ | ||
2435 | W = wg + calc_tg_weight(tg, se->my_q); | ||
2436 | |||
2437 | /* | ||
2438 | * w = rw_i + @wl | ||
2439 | */ | ||
2440 | w = se->my_q->load.weight + wl; | ||
2441 | |||
2442 | /* | ||
2443 | * wl = S * s'_i; see (2) | ||
2444 | */ | ||
2445 | if (W > 0 && w < W) | ||
2446 | wl = (w * tg->shares) / W; | ||
2447 | else | ||
2448 | wl = tg->shares; | ||
2449 | |||
2450 | /* | ||
2451 | * Per the above, wl is the new se->load.weight value; since | ||
2452 | * those are clipped to [MIN_SHARES, ...) do so now. See | ||
2453 | * calc_cfs_shares(). | ||
2454 | */ | ||
2455 | if (wl < MIN_SHARES) | ||
2456 | wl = MIN_SHARES; | ||
2457 | |||
2458 | /* | ||
2459 | * wl = dw_i = S * (s'_i - s_i); see (3) | ||
2460 | */ | ||
2461 | wl -= se->load.weight; | ||
2462 | |||
2463 | /* | ||
2464 | * Recursively apply this logic to all parent groups to compute | ||
2465 | * the final effective load change on the root group. Since | ||
2466 | * only the @tg group gets extra weight, all parent groups can | ||
2467 | * only redistribute existing shares. @wl is the shift in shares | ||
2468 | * resulting from this level per the above. | ||
2469 | */ | ||
2470 | wg = 0; | ||
2471 | } | ||
2472 | |||
2473 | return wl; | ||
2474 | } | ||
2475 | #else | ||
2476 | |||
2477 | static inline unsigned long effective_load(struct task_group *tg, int cpu, | ||
2478 | unsigned long wl, unsigned long wg) | ||
2479 | { | ||
2480 | return wl; | ||
2481 | } | ||
2482 | |||
2483 | #endif | ||
2484 | |||
2485 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | ||
2486 | { | ||
2487 | s64 this_load, load; | ||
2488 | int idx, this_cpu, prev_cpu; | ||
2489 | unsigned long tl_per_task; | ||
2490 | struct task_group *tg; | ||
2491 | unsigned long weight; | ||
2492 | int balanced; | ||
2493 | |||
2494 | idx = sd->wake_idx; | ||
2495 | this_cpu = smp_processor_id(); | ||
2496 | prev_cpu = task_cpu(p); | ||
2497 | load = source_load(prev_cpu, idx); | ||
2498 | this_load = target_load(this_cpu, idx); | ||
2499 | |||
2500 | /* | ||
2501 | * If sync wakeup then subtract the (maximum possible) | ||
2502 | * effect of the currently running task from the load | ||
2503 | * of the current CPU: | ||
2504 | */ | ||
2505 | if (sync) { | ||
2506 | tg = task_group(current); | ||
2507 | weight = current->se.load.weight; | ||
2508 | |||
2509 | this_load += effective_load(tg, this_cpu, -weight, -weight); | ||
2510 | load += effective_load(tg, prev_cpu, 0, -weight); | ||
2511 | } | ||
2512 | |||
2513 | tg = task_group(p); | ||
2514 | weight = p->se.load.weight; | ||
2515 | |||
2516 | /* | ||
2517 | * In low-load situations, where prev_cpu is idle and this_cpu is idle | ||
2518 | * due to the sync cause above having dropped this_load to 0, we'll | ||
2519 | * always have an imbalance, but there's really nothing you can do | ||
2520 | * about that, so that's good too. | ||
2521 | * | ||
2522 | * Otherwise check if either cpus are near enough in load to allow this | ||
2523 | * task to be woken on this_cpu. | ||
2524 | */ | ||
2525 | if (this_load > 0) { | ||
2526 | s64 this_eff_load, prev_eff_load; | ||
2527 | |||
2528 | this_eff_load = 100; | ||
2529 | this_eff_load *= power_of(prev_cpu); | ||
2530 | this_eff_load *= this_load + | ||
2531 | effective_load(tg, this_cpu, weight, weight); | ||
2532 | |||
2533 | prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; | ||
2534 | prev_eff_load *= power_of(this_cpu); | ||
2535 | prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); | ||
2536 | |||
2537 | balanced = this_eff_load <= prev_eff_load; | ||
2538 | } else | ||
2539 | balanced = true; | ||
2540 | |||
2541 | /* | ||
2542 | * If the currently running task will sleep within | ||
2543 | * a reasonable amount of time then attract this newly | ||
2544 | * woken task: | ||
2545 | */ | ||
2546 | if (sync && balanced) | ||
2547 | return 1; | ||
2548 | |||
2549 | schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts); | ||
2550 | tl_per_task = cpu_avg_load_per_task(this_cpu); | ||
2551 | |||
2552 | if (balanced || | ||
2553 | (this_load <= load && | ||
2554 | this_load + target_load(prev_cpu, idx) <= tl_per_task)) { | ||
2555 | /* | ||
2556 | * This domain has SD_WAKE_AFFINE and | ||
2557 | * p is cache cold in this domain, and | ||
2558 | * there is no bad imbalance. | ||
2559 | */ | ||
2560 | schedstat_inc(sd, ttwu_move_affine); | ||
2561 | schedstat_inc(p, se.statistics.nr_wakeups_affine); | ||
2562 | |||
2563 | return 1; | ||
2564 | } | ||
2565 | return 0; | ||
2566 | } | ||
2567 | |||
2568 | /* | ||
2569 | * find_idlest_group finds and returns the least busy CPU group within the | ||
2570 | * domain. | ||
2571 | */ | ||
2572 | static struct sched_group * | ||
2573 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, | ||
2574 | int this_cpu, int load_idx) | ||
2575 | { | ||
2576 | struct sched_group *idlest = NULL, *group = sd->groups; | ||
2577 | unsigned long min_load = ULONG_MAX, this_load = 0; | ||
2578 | int imbalance = 100 + (sd->imbalance_pct-100)/2; | ||
2579 | |||
2580 | do { | ||
2581 | unsigned long load, avg_load; | ||
2582 | int local_group; | ||
2583 | int i; | ||
2584 | |||
2585 | /* Skip over this group if it has no CPUs allowed */ | ||
2586 | if (!cpumask_intersects(sched_group_cpus(group), | ||
2587 | tsk_cpus_allowed(p))) | ||
2588 | continue; | ||
2589 | |||
2590 | local_group = cpumask_test_cpu(this_cpu, | ||
2591 | sched_group_cpus(group)); | ||
2592 | |||
2593 | /* Tally up the load of all CPUs in the group */ | ||
2594 | avg_load = 0; | ||
2595 | |||
2596 | for_each_cpu(i, sched_group_cpus(group)) { | ||
2597 | /* Bias balancing toward cpus of our domain */ | ||
2598 | if (local_group) | ||
2599 | load = source_load(i, load_idx); | ||
2600 | else | ||
2601 | load = target_load(i, load_idx); | ||
2602 | |||
2603 | avg_load += load; | ||
2604 | } | ||
2605 | |||
2606 | /* Adjust by relative CPU power of the group */ | ||
2607 | avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power; | ||
2608 | |||
2609 | if (local_group) { | ||
2610 | this_load = avg_load; | ||
2611 | } else if (avg_load < min_load) { | ||
2612 | min_load = avg_load; | ||
2613 | idlest = group; | ||
2614 | } | ||
2615 | } while (group = group->next, group != sd->groups); | ||
2616 | |||
2617 | if (!idlest || 100*this_load < imbalance*min_load) | ||
2618 | return NULL; | ||
2619 | return idlest; | ||
2620 | } | ||
2621 | |||
2622 | /* | ||
2623 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | ||
2624 | */ | ||
2625 | static int | ||
2626 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | ||
2627 | { | ||
2628 | unsigned long load, min_load = ULONG_MAX; | ||
2629 | int idlest = -1; | ||
2630 | int i; | ||
2631 | |||
2632 | /* Traverse only the allowed CPUs */ | ||
2633 | for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { | ||
2634 | load = weighted_cpuload(i); | ||
2635 | |||
2636 | if (load < min_load || (load == min_load && i == this_cpu)) { | ||
2637 | min_load = load; | ||
2638 | idlest = i; | ||
2639 | } | ||
2640 | } | ||
2641 | |||
2642 | return idlest; | ||
2643 | } | ||
2644 | |||
2645 | /* | ||
2646 | * Try and locate an idle CPU in the sched_domain. | ||
2647 | */ | ||
2648 | static int select_idle_sibling(struct task_struct *p, int target) | ||
2649 | { | ||
2650 | int cpu = smp_processor_id(); | ||
2651 | int prev_cpu = task_cpu(p); | ||
2652 | struct sched_domain *sd; | ||
2653 | struct sched_group *sg; | ||
2654 | int i, smt = 0; | ||
2655 | |||
2656 | /* | ||
2657 | * If the task is going to be woken-up on this cpu and if it is | ||
2658 | * already idle, then it is the right target. | ||
2659 | */ | ||
2660 | if (target == cpu && idle_cpu(cpu)) | ||
2661 | return cpu; | ||
2662 | |||
2663 | /* | ||
2664 | * If the task is going to be woken-up on the cpu where it previously | ||
2665 | * ran and if it is currently idle, then it the right target. | ||
2666 | */ | ||
2667 | if (target == prev_cpu && idle_cpu(prev_cpu)) | ||
2668 | return prev_cpu; | ||
2669 | |||
2670 | /* | ||
2671 | * Otherwise, iterate the domains and find an elegible idle cpu. | ||
2672 | */ | ||
2673 | rcu_read_lock(); | ||
2674 | again: | ||
2675 | for_each_domain(target, sd) { | ||
2676 | if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) | ||
2677 | continue; | ||
2678 | |||
2679 | if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) { | ||
2680 | if (!smt) { | ||
2681 | smt = 1; | ||
2682 | goto again; | ||
2683 | } | ||
2684 | break; | ||
2685 | } | ||
2686 | |||
2687 | sg = sd->groups; | ||
2688 | do { | ||
2689 | if (!cpumask_intersects(sched_group_cpus(sg), | ||
2690 | tsk_cpus_allowed(p))) | ||
2691 | goto next; | ||
2692 | |||
2693 | for_each_cpu(i, sched_group_cpus(sg)) { | ||
2694 | if (!idle_cpu(i)) | ||
2695 | goto next; | ||
2696 | } | ||
2697 | |||
2698 | target = cpumask_first_and(sched_group_cpus(sg), | ||
2699 | tsk_cpus_allowed(p)); | ||
2700 | goto done; | ||
2701 | next: | ||
2702 | sg = sg->next; | ||
2703 | } while (sg != sd->groups); | ||
2704 | } | ||
2705 | done: | ||
2706 | rcu_read_unlock(); | ||
2707 | |||
2708 | return target; | ||
2709 | } | ||
2710 | |||
2711 | /* | ||
2712 | * sched_balance_self: balance the current task (running on cpu) in domains | ||
2713 | * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and | ||
2714 | * SD_BALANCE_EXEC. | ||
2715 | * | ||
2716 | * Balance, ie. select the least loaded group. | ||
2717 | * | ||
2718 | * Returns the target CPU number, or the same CPU if no balancing is needed. | ||
2719 | * | ||
2720 | * preempt must be disabled. | ||
2721 | */ | ||
2722 | static int | ||
2723 | select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | ||
2724 | { | ||
2725 | struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; | ||
2726 | int cpu = smp_processor_id(); | ||
2727 | int prev_cpu = task_cpu(p); | ||
2728 | int new_cpu = cpu; | ||
2729 | int want_affine = 0; | ||
2730 | int want_sd = 1; | ||
2731 | int sync = wake_flags & WF_SYNC; | ||
2732 | |||
2733 | if (sd_flag & SD_BALANCE_WAKE) { | ||
2734 | if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) | ||
2735 | want_affine = 1; | ||
2736 | new_cpu = prev_cpu; | ||
2737 | } | ||
2738 | |||
2739 | rcu_read_lock(); | ||
2740 | for_each_domain(cpu, tmp) { | ||
2741 | if (!(tmp->flags & SD_LOAD_BALANCE)) | ||
2742 | continue; | ||
2743 | |||
2744 | /* | ||
2745 | * If power savings logic is enabled for a domain, see if we | ||
2746 | * are not overloaded, if so, don't balance wider. | ||
2747 | */ | ||
2748 | if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) { | ||
2749 | unsigned long power = 0; | ||
2750 | unsigned long nr_running = 0; | ||
2751 | unsigned long capacity; | ||
2752 | int i; | ||
2753 | |||
2754 | for_each_cpu(i, sched_domain_span(tmp)) { | ||
2755 | power += power_of(i); | ||
2756 | nr_running += cpu_rq(i)->cfs.nr_running; | ||
2757 | } | ||
2758 | |||
2759 | capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); | ||
2760 | |||
2761 | if (tmp->flags & SD_POWERSAVINGS_BALANCE) | ||
2762 | nr_running /= 2; | ||
2763 | |||
2764 | if (nr_running < capacity) | ||
2765 | want_sd = 0; | ||
2766 | } | ||
2767 | |||
2768 | /* | ||
2769 | * If both cpu and prev_cpu are part of this domain, | ||
2770 | * cpu is a valid SD_WAKE_AFFINE target. | ||
2771 | */ | ||
2772 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | ||
2773 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | ||
2774 | affine_sd = tmp; | ||
2775 | want_affine = 0; | ||
2776 | } | ||
2777 | |||
2778 | if (!want_sd && !want_affine) | ||
2779 | break; | ||
2780 | |||
2781 | if (!(tmp->flags & sd_flag)) | ||
2782 | continue; | ||
2783 | |||
2784 | if (want_sd) | ||
2785 | sd = tmp; | ||
2786 | } | ||
2787 | |||
2788 | if (affine_sd) { | ||
2789 | if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) | ||
2790 | prev_cpu = cpu; | ||
2791 | |||
2792 | new_cpu = select_idle_sibling(p, prev_cpu); | ||
2793 | goto unlock; | ||
2794 | } | ||
2795 | |||
2796 | while (sd) { | ||
2797 | int load_idx = sd->forkexec_idx; | ||
2798 | struct sched_group *group; | ||
2799 | int weight; | ||
2800 | |||
2801 | if (!(sd->flags & sd_flag)) { | ||
2802 | sd = sd->child; | ||
2803 | continue; | ||
2804 | } | ||
2805 | |||
2806 | if (sd_flag & SD_BALANCE_WAKE) | ||
2807 | load_idx = sd->wake_idx; | ||
2808 | |||
2809 | group = find_idlest_group(sd, p, cpu, load_idx); | ||
2810 | if (!group) { | ||
2811 | sd = sd->child; | ||
2812 | continue; | ||
2813 | } | ||
2814 | |||
2815 | new_cpu = find_idlest_cpu(group, p, cpu); | ||
2816 | if (new_cpu == -1 || new_cpu == cpu) { | ||
2817 | /* Now try balancing at a lower domain level of cpu */ | ||
2818 | sd = sd->child; | ||
2819 | continue; | ||
2820 | } | ||
2821 | |||
2822 | /* Now try balancing at a lower domain level of new_cpu */ | ||
2823 | cpu = new_cpu; | ||
2824 | weight = sd->span_weight; | ||
2825 | sd = NULL; | ||
2826 | for_each_domain(cpu, tmp) { | ||
2827 | if (weight <= tmp->span_weight) | ||
2828 | break; | ||
2829 | if (tmp->flags & sd_flag) | ||
2830 | sd = tmp; | ||
2831 | } | ||
2832 | /* while loop will break here if sd == NULL */ | ||
2833 | } | ||
2834 | unlock: | ||
2835 | rcu_read_unlock(); | ||
2836 | |||
2837 | return new_cpu; | ||
2838 | } | ||
2839 | #endif /* CONFIG_SMP */ | ||
2840 | |||
2841 | static unsigned long | ||
2842 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) | ||
2843 | { | ||
2844 | unsigned long gran = sysctl_sched_wakeup_granularity; | ||
2845 | |||
2846 | /* | ||
2847 | * Since its curr running now, convert the gran from real-time | ||
2848 | * to virtual-time in his units. | ||
2849 | * | ||
2850 | * By using 'se' instead of 'curr' we penalize light tasks, so | ||
2851 | * they get preempted easier. That is, if 'se' < 'curr' then | ||
2852 | * the resulting gran will be larger, therefore penalizing the | ||
2853 | * lighter, if otoh 'se' > 'curr' then the resulting gran will | ||
2854 | * be smaller, again penalizing the lighter task. | ||
2855 | * | ||
2856 | * This is especially important for buddies when the leftmost | ||
2857 | * task is higher priority than the buddy. | ||
2858 | */ | ||
2859 | return calc_delta_fair(gran, se); | ||
2860 | } | ||
2861 | |||
2862 | /* | ||
2863 | * Should 'se' preempt 'curr'. | ||
2864 | * | ||
2865 | * |s1 | ||
2866 | * |s2 | ||
2867 | * |s3 | ||
2868 | * g | ||
2869 | * |<--->|c | ||
2870 | * | ||
2871 | * w(c, s1) = -1 | ||
2872 | * w(c, s2) = 0 | ||
2873 | * w(c, s3) = 1 | ||
2874 | * | ||
2875 | */ | ||
2876 | static int | ||
2877 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | ||
2878 | { | ||
2879 | s64 gran, vdiff = curr->vruntime - se->vruntime; | ||
2880 | |||
2881 | if (vdiff <= 0) | ||
2882 | return -1; | ||
2883 | |||
2884 | gran = wakeup_gran(curr, se); | ||
2885 | if (vdiff > gran) | ||
2886 | return 1; | ||
2887 | |||
2888 | return 0; | ||
2889 | } | ||
2890 | |||
2891 | static void set_last_buddy(struct sched_entity *se) | ||
2892 | { | ||
2893 | if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) | ||
2894 | return; | ||
2895 | |||
2896 | for_each_sched_entity(se) | ||
2897 | cfs_rq_of(se)->last = se; | ||
2898 | } | ||
2899 | |||
2900 | static void set_next_buddy(struct sched_entity *se) | ||
2901 | { | ||
2902 | if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) | ||
2903 | return; | ||
2904 | |||
2905 | for_each_sched_entity(se) | ||
2906 | cfs_rq_of(se)->next = se; | ||
2907 | } | ||
2908 | |||
2909 | static void set_skip_buddy(struct sched_entity *se) | ||
2910 | { | ||
2911 | for_each_sched_entity(se) | ||
2912 | cfs_rq_of(se)->skip = se; | ||
2913 | } | ||
2914 | |||
2915 | /* | ||
2916 | * Preempt the current task with a newly woken task if needed: | ||
2917 | */ | ||
2918 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | ||
2919 | { | ||
2920 | struct task_struct *curr = rq->curr; | ||
2921 | struct sched_entity *se = &curr->se, *pse = &p->se; | ||
2922 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
2923 | int scale = cfs_rq->nr_running >= sched_nr_latency; | ||
2924 | int next_buddy_marked = 0; | ||
2925 | |||
2926 | if (unlikely(se == pse)) | ||
2927 | return; | ||
2928 | |||
2929 | /* | ||
2930 | * This is possible from callers such as pull_task(), in which we | ||
2931 | * unconditionally check_prempt_curr() after an enqueue (which may have | ||
2932 | * lead to a throttle). This both saves work and prevents false | ||
2933 | * next-buddy nomination below. | ||
2934 | */ | ||
2935 | if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) | ||
2936 | return; | ||
2937 | |||
2938 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { | ||
2939 | set_next_buddy(pse); | ||
2940 | next_buddy_marked = 1; | ||
2941 | } | ||
2942 | |||
2943 | /* | ||
2944 | * We can come here with TIF_NEED_RESCHED already set from new task | ||
2945 | * wake up path. | ||
2946 | * | ||
2947 | * Note: this also catches the edge-case of curr being in a throttled | ||
2948 | * group (e.g. via set_curr_task), since update_curr() (in the | ||
2949 | * enqueue of curr) will have resulted in resched being set. This | ||
2950 | * prevents us from potentially nominating it as a false LAST_BUDDY | ||
2951 | * below. | ||
2952 | */ | ||
2953 | if (test_tsk_need_resched(curr)) | ||
2954 | return; | ||
2955 | |||
2956 | /* Idle tasks are by definition preempted by non-idle tasks. */ | ||
2957 | if (unlikely(curr->policy == SCHED_IDLE) && | ||
2958 | likely(p->policy != SCHED_IDLE)) | ||
2959 | goto preempt; | ||
2960 | |||
2961 | /* | ||
2962 | * Batch and idle tasks do not preempt non-idle tasks (their preemption | ||
2963 | * is driven by the tick): | ||
2964 | */ | ||
2965 | if (unlikely(p->policy != SCHED_NORMAL)) | ||
2966 | return; | ||
2967 | |||
2968 | find_matching_se(&se, &pse); | ||
2969 | update_curr(cfs_rq_of(se)); | ||
2970 | BUG_ON(!pse); | ||
2971 | if (wakeup_preempt_entity(se, pse) == 1) { | ||
2972 | /* | ||
2973 | * Bias pick_next to pick the sched entity that is | ||
2974 | * triggering this preemption. | ||
2975 | */ | ||
2976 | if (!next_buddy_marked) | ||
2977 | set_next_buddy(pse); | ||
2978 | goto preempt; | ||
2979 | } | ||
2980 | |||
2981 | return; | ||
2982 | |||
2983 | preempt: | ||
2984 | resched_task(curr); | ||
2985 | /* | ||
2986 | * Only set the backward buddy when the current task is still | ||
2987 | * on the rq. This can happen when a wakeup gets interleaved | ||
2988 | * with schedule on the ->pre_schedule() or idle_balance() | ||
2989 | * point, either of which can * drop the rq lock. | ||
2990 | * | ||
2991 | * Also, during early boot the idle thread is in the fair class, | ||
2992 | * for obvious reasons its a bad idea to schedule back to it. | ||
2993 | */ | ||
2994 | if (unlikely(!se->on_rq || curr == rq->idle)) | ||
2995 | return; | ||
2996 | |||
2997 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) | ||
2998 | set_last_buddy(se); | ||
2999 | } | ||
3000 | |||
3001 | static struct task_struct *pick_next_task_fair(struct rq *rq) | ||
3002 | { | ||
3003 | struct task_struct *p; | ||
3004 | struct cfs_rq *cfs_rq = &rq->cfs; | ||
3005 | struct sched_entity *se; | ||
3006 | |||
3007 | if (!cfs_rq->nr_running) | ||
3008 | return NULL; | ||
3009 | |||
3010 | do { | ||
3011 | se = pick_next_entity(cfs_rq); | ||
3012 | set_next_entity(cfs_rq, se); | ||
3013 | cfs_rq = group_cfs_rq(se); | ||
3014 | } while (cfs_rq); | ||
3015 | |||
3016 | p = task_of(se); | ||
3017 | hrtick_start_fair(rq, p); | ||
3018 | |||
3019 | return p; | ||
3020 | } | ||
3021 | |||
3022 | /* | ||
3023 | * Account for a descheduled task: | ||
3024 | */ | ||
3025 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) | ||
3026 | { | ||
3027 | struct sched_entity *se = &prev->se; | ||
3028 | struct cfs_rq *cfs_rq; | ||
3029 | |||
3030 | for_each_sched_entity(se) { | ||
3031 | cfs_rq = cfs_rq_of(se); | ||
3032 | put_prev_entity(cfs_rq, se); | ||
3033 | } | ||
3034 | } | ||
3035 | |||
3036 | /* | ||
3037 | * sched_yield() is very simple | ||
3038 | * | ||
3039 | * The magic of dealing with the ->skip buddy is in pick_next_entity. | ||
3040 | */ | ||
3041 | static void yield_task_fair(struct rq *rq) | ||
3042 | { | ||
3043 | struct task_struct *curr = rq->curr; | ||
3044 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
3045 | struct sched_entity *se = &curr->se; | ||
3046 | |||
3047 | /* | ||
3048 | * Are we the only task in the tree? | ||
3049 | */ | ||
3050 | if (unlikely(rq->nr_running == 1)) | ||
3051 | return; | ||
3052 | |||
3053 | clear_buddies(cfs_rq, se); | ||
3054 | |||
3055 | if (curr->policy != SCHED_BATCH) { | ||
3056 | update_rq_clock(rq); | ||
3057 | /* | ||
3058 | * Update run-time statistics of the 'current'. | ||
3059 | */ | ||
3060 | update_curr(cfs_rq); | ||
3061 | } | ||
3062 | |||
3063 | set_skip_buddy(se); | ||
3064 | } | ||
3065 | |||
3066 | static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) | ||
3067 | { | ||
3068 | struct sched_entity *se = &p->se; | ||
3069 | |||
3070 | /* throttled hierarchies are not runnable */ | ||
3071 | if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) | ||
3072 | return false; | ||
3073 | |||
3074 | /* Tell the scheduler that we'd really like pse to run next. */ | ||
3075 | set_next_buddy(se); | ||
3076 | |||
3077 | yield_task_fair(rq); | ||
3078 | |||
3079 | return true; | ||
3080 | } | ||
3081 | |||
3082 | #ifdef CONFIG_SMP | ||
3083 | /************************************************** | ||
3084 | * Fair scheduling class load-balancing methods: | ||
3085 | */ | ||
3086 | |||
3087 | /* | ||
3088 | * pull_task - move a task from a remote runqueue to the local runqueue. | ||
3089 | * Both runqueues must be locked. | ||
3090 | */ | ||
3091 | static void pull_task(struct rq *src_rq, struct task_struct *p, | ||
3092 | struct rq *this_rq, int this_cpu) | ||
3093 | { | ||
3094 | deactivate_task(src_rq, p, 0); | ||
3095 | set_task_cpu(p, this_cpu); | ||
3096 | activate_task(this_rq, p, 0); | ||
3097 | check_preempt_curr(this_rq, p, 0); | ||
3098 | } | ||
3099 | |||
3100 | /* | ||
3101 | * Is this task likely cache-hot: | ||
3102 | */ | ||
3103 | static int | ||
3104 | task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | ||
3105 | { | ||
3106 | s64 delta; | ||
3107 | |||
3108 | if (p->sched_class != &fair_sched_class) | ||
3109 | return 0; | ||
3110 | |||
3111 | if (unlikely(p->policy == SCHED_IDLE)) | ||
3112 | return 0; | ||
3113 | |||
3114 | /* | ||
3115 | * Buddy candidates are cache hot: | ||
3116 | */ | ||
3117 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && | ||
3118 | (&p->se == cfs_rq_of(&p->se)->next || | ||
3119 | &p->se == cfs_rq_of(&p->se)->last)) | ||
3120 | return 1; | ||
3121 | |||
3122 | if (sysctl_sched_migration_cost == -1) | ||
3123 | return 1; | ||
3124 | if (sysctl_sched_migration_cost == 0) | ||
3125 | return 0; | ||
3126 | |||
3127 | delta = now - p->se.exec_start; | ||
3128 | |||
3129 | return delta < (s64)sysctl_sched_migration_cost; | ||
3130 | } | ||
3131 | |||
3132 | /* | ||
3133 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? | ||
3134 | */ | ||
3135 | static | ||
3136 | int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | ||
3137 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
3138 | int *all_pinned) | ||
3139 | { | ||
3140 | int tsk_cache_hot = 0; | ||
3141 | /* | ||
3142 | * We do not migrate tasks that are: | ||
3143 | * 1) running (obviously), or | ||
3144 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | ||
3145 | * 3) are cache-hot on their current CPU. | ||
3146 | */ | ||
3147 | if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) { | ||
3148 | schedstat_inc(p, se.statistics.nr_failed_migrations_affine); | ||
3149 | return 0; | ||
3150 | } | ||
3151 | *all_pinned = 0; | ||
3152 | |||
3153 | if (task_running(rq, p)) { | ||
3154 | schedstat_inc(p, se.statistics.nr_failed_migrations_running); | ||
3155 | return 0; | ||
3156 | } | ||
3157 | |||
3158 | /* | ||
3159 | * Aggressive migration if: | ||
3160 | * 1) task is cache cold, or | ||
3161 | * 2) too many balance attempts have failed. | ||
3162 | */ | ||
3163 | |||
3164 | tsk_cache_hot = task_hot(p, rq->clock_task, sd); | ||
3165 | if (!tsk_cache_hot || | ||
3166 | sd->nr_balance_failed > sd->cache_nice_tries) { | ||
3167 | #ifdef CONFIG_SCHEDSTATS | ||
3168 | if (tsk_cache_hot) { | ||
3169 | schedstat_inc(sd, lb_hot_gained[idle]); | ||
3170 | schedstat_inc(p, se.statistics.nr_forced_migrations); | ||
3171 | } | ||
3172 | #endif | ||
3173 | return 1; | ||
3174 | } | ||
3175 | |||
3176 | if (tsk_cache_hot) { | ||
3177 | schedstat_inc(p, se.statistics.nr_failed_migrations_hot); | ||
3178 | return 0; | ||
3179 | } | ||
3180 | return 1; | ||
3181 | } | ||
3182 | |||
3183 | /* | ||
3184 | * move_one_task tries to move exactly one task from busiest to this_rq, as | ||
3185 | * part of active balancing operations within "domain". | ||
3186 | * Returns 1 if successful and 0 otherwise. | ||
3187 | * | ||
3188 | * Called with both runqueues locked. | ||
3189 | */ | ||
3190 | static int | ||
3191 | move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3192 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
3193 | { | ||
3194 | struct task_struct *p, *n; | ||
3195 | struct cfs_rq *cfs_rq; | ||
3196 | int pinned = 0; | ||
3197 | |||
3198 | for_each_leaf_cfs_rq(busiest, cfs_rq) { | ||
3199 | list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) { | ||
3200 | if (throttled_lb_pair(task_group(p), | ||
3201 | busiest->cpu, this_cpu)) | ||
3202 | break; | ||
3203 | |||
3204 | if (!can_migrate_task(p, busiest, this_cpu, | ||
3205 | sd, idle, &pinned)) | ||
3206 | continue; | ||
3207 | |||
3208 | pull_task(busiest, p, this_rq, this_cpu); | ||
3209 | /* | ||
3210 | * Right now, this is only the second place pull_task() | ||
3211 | * is called, so we can safely collect pull_task() | ||
3212 | * stats here rather than inside pull_task(). | ||
3213 | */ | ||
3214 | schedstat_inc(sd, lb_gained[idle]); | ||
3215 | return 1; | ||
3216 | } | ||
3217 | } | ||
3218 | |||
3219 | return 0; | ||
3220 | } | ||
3221 | |||
3222 | static unsigned long | ||
3223 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3224 | unsigned long max_load_move, struct sched_domain *sd, | ||
3225 | enum cpu_idle_type idle, int *all_pinned, | ||
3226 | struct cfs_rq *busiest_cfs_rq) | ||
3227 | { | ||
3228 | int loops = 0, pulled = 0; | ||
3229 | long rem_load_move = max_load_move; | ||
3230 | struct task_struct *p, *n; | ||
3231 | |||
3232 | if (max_load_move == 0) | ||
3233 | goto out; | ||
3234 | |||
3235 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { | ||
3236 | if (loops++ > sysctl_sched_nr_migrate) | ||
3237 | break; | ||
3238 | |||
3239 | if ((p->se.load.weight >> 1) > rem_load_move || | ||
3240 | !can_migrate_task(p, busiest, this_cpu, sd, idle, | ||
3241 | all_pinned)) | ||
3242 | continue; | ||
3243 | |||
3244 | pull_task(busiest, p, this_rq, this_cpu); | ||
3245 | pulled++; | ||
3246 | rem_load_move -= p->se.load.weight; | ||
3247 | |||
3248 | #ifdef CONFIG_PREEMPT | ||
3249 | /* | ||
3250 | * NEWIDLE balancing is a source of latency, so preemptible | ||
3251 | * kernels will stop after the first task is pulled to minimize | ||
3252 | * the critical section. | ||
3253 | */ | ||
3254 | if (idle == CPU_NEWLY_IDLE) | ||
3255 | break; | ||
3256 | #endif | ||
3257 | |||
3258 | /* | ||
3259 | * We only want to steal up to the prescribed amount of | ||
3260 | * weighted load. | ||
3261 | */ | ||
3262 | if (rem_load_move <= 0) | ||
3263 | break; | ||
3264 | } | ||
3265 | out: | ||
3266 | /* | ||
3267 | * Right now, this is one of only two places pull_task() is called, | ||
3268 | * so we can safely collect pull_task() stats here rather than | ||
3269 | * inside pull_task(). | ||
3270 | */ | ||
3271 | schedstat_add(sd, lb_gained[idle], pulled); | ||
3272 | |||
3273 | return max_load_move - rem_load_move; | ||
3274 | } | ||
3275 | |||
3276 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
3277 | /* | ||
3278 | * update tg->load_weight by folding this cpu's load_avg | ||
3279 | */ | ||
3280 | static int update_shares_cpu(struct task_group *tg, int cpu) | ||
3281 | { | ||
3282 | struct cfs_rq *cfs_rq; | ||
3283 | unsigned long flags; | ||
3284 | struct rq *rq; | ||
3285 | |||
3286 | if (!tg->se[cpu]) | ||
3287 | return 0; | ||
3288 | |||
3289 | rq = cpu_rq(cpu); | ||
3290 | cfs_rq = tg->cfs_rq[cpu]; | ||
3291 | |||
3292 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
3293 | |||
3294 | update_rq_clock(rq); | ||
3295 | update_cfs_load(cfs_rq, 1); | ||
3296 | |||
3297 | /* | ||
3298 | * We need to update shares after updating tg->load_weight in | ||
3299 | * order to adjust the weight of groups with long running tasks. | ||
3300 | */ | ||
3301 | update_cfs_shares(cfs_rq); | ||
3302 | |||
3303 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
3304 | |||
3305 | return 0; | ||
3306 | } | ||
3307 | |||
3308 | static void update_shares(int cpu) | ||
3309 | { | ||
3310 | struct cfs_rq *cfs_rq; | ||
3311 | struct rq *rq = cpu_rq(cpu); | ||
3312 | |||
3313 | rcu_read_lock(); | ||
3314 | /* | ||
3315 | * Iterates the task_group tree in a bottom up fashion, see | ||
3316 | * list_add_leaf_cfs_rq() for details. | ||
3317 | */ | ||
3318 | for_each_leaf_cfs_rq(rq, cfs_rq) { | ||
3319 | /* throttled entities do not contribute to load */ | ||
3320 | if (throttled_hierarchy(cfs_rq)) | ||
3321 | continue; | ||
3322 | |||
3323 | update_shares_cpu(cfs_rq->tg, cpu); | ||
3324 | } | ||
3325 | rcu_read_unlock(); | ||
3326 | } | ||
3327 | |||
3328 | /* | ||
3329 | * Compute the cpu's hierarchical load factor for each task group. | ||
3330 | * This needs to be done in a top-down fashion because the load of a child | ||
3331 | * group is a fraction of its parents load. | ||
3332 | */ | ||
3333 | static int tg_load_down(struct task_group *tg, void *data) | ||
3334 | { | ||
3335 | unsigned long load; | ||
3336 | long cpu = (long)data; | ||
3337 | |||
3338 | if (!tg->parent) { | ||
3339 | load = cpu_rq(cpu)->load.weight; | ||
3340 | } else { | ||
3341 | load = tg->parent->cfs_rq[cpu]->h_load; | ||
3342 | load *= tg->se[cpu]->load.weight; | ||
3343 | load /= tg->parent->cfs_rq[cpu]->load.weight + 1; | ||
3344 | } | ||
3345 | |||
3346 | tg->cfs_rq[cpu]->h_load = load; | ||
3347 | |||
3348 | return 0; | ||
3349 | } | ||
3350 | |||
3351 | static void update_h_load(long cpu) | ||
3352 | { | ||
3353 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | ||
3354 | } | ||
3355 | |||
3356 | static unsigned long | ||
3357 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3358 | unsigned long max_load_move, | ||
3359 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
3360 | int *all_pinned) | ||
3361 | { | ||
3362 | long rem_load_move = max_load_move; | ||
3363 | struct cfs_rq *busiest_cfs_rq; | ||
3364 | |||
3365 | rcu_read_lock(); | ||
3366 | update_h_load(cpu_of(busiest)); | ||
3367 | |||
3368 | for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) { | ||
3369 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; | ||
3370 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; | ||
3371 | u64 rem_load, moved_load; | ||
3372 | |||
3373 | /* | ||
3374 | * empty group or part of a throttled hierarchy | ||
3375 | */ | ||
3376 | if (!busiest_cfs_rq->task_weight || | ||
3377 | throttled_lb_pair(busiest_cfs_rq->tg, cpu_of(busiest), this_cpu)) | ||
3378 | continue; | ||
3379 | |||
3380 | rem_load = (u64)rem_load_move * busiest_weight; | ||
3381 | rem_load = div_u64(rem_load, busiest_h_load + 1); | ||
3382 | |||
3383 | moved_load = balance_tasks(this_rq, this_cpu, busiest, | ||
3384 | rem_load, sd, idle, all_pinned, | ||
3385 | busiest_cfs_rq); | ||
3386 | |||
3387 | if (!moved_load) | ||
3388 | continue; | ||
3389 | |||
3390 | moved_load *= busiest_h_load; | ||
3391 | moved_load = div_u64(moved_load, busiest_weight + 1); | ||
3392 | |||
3393 | rem_load_move -= moved_load; | ||
3394 | if (rem_load_move < 0) | ||
3395 | break; | ||
3396 | } | ||
3397 | rcu_read_unlock(); | ||
3398 | |||
3399 | return max_load_move - rem_load_move; | ||
3400 | } | ||
3401 | #else | ||
3402 | static inline void update_shares(int cpu) | ||
3403 | { | ||
3404 | } | ||
3405 | |||
3406 | static unsigned long | ||
3407 | load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3408 | unsigned long max_load_move, | ||
3409 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
3410 | int *all_pinned) | ||
3411 | { | ||
3412 | return balance_tasks(this_rq, this_cpu, busiest, | ||
3413 | max_load_move, sd, idle, all_pinned, | ||
3414 | &busiest->cfs); | ||
3415 | } | ||
3416 | #endif | ||
3417 | |||
3418 | /* | ||
3419 | * move_tasks tries to move up to max_load_move weighted load from busiest to | ||
3420 | * this_rq, as part of a balancing operation within domain "sd". | ||
3421 | * Returns 1 if successful and 0 otherwise. | ||
3422 | * | ||
3423 | * Called with both runqueues locked. | ||
3424 | */ | ||
3425 | static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3426 | unsigned long max_load_move, | ||
3427 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
3428 | int *all_pinned) | ||
3429 | { | ||
3430 | unsigned long total_load_moved = 0, load_moved; | ||
3431 | |||
3432 | do { | ||
3433 | load_moved = load_balance_fair(this_rq, this_cpu, busiest, | ||
3434 | max_load_move - total_load_moved, | ||
3435 | sd, idle, all_pinned); | ||
3436 | |||
3437 | total_load_moved += load_moved; | ||
3438 | |||
3439 | #ifdef CONFIG_PREEMPT | ||
3440 | /* | ||
3441 | * NEWIDLE balancing is a source of latency, so preemptible | ||
3442 | * kernels will stop after the first task is pulled to minimize | ||
3443 | * the critical section. | ||
3444 | */ | ||
3445 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) | ||
3446 | break; | ||
3447 | |||
3448 | if (raw_spin_is_contended(&this_rq->lock) || | ||
3449 | raw_spin_is_contended(&busiest->lock)) | ||
3450 | break; | ||
3451 | #endif | ||
3452 | } while (load_moved && max_load_move > total_load_moved); | ||
3453 | |||
3454 | return total_load_moved > 0; | ||
3455 | } | ||
3456 | |||
3457 | /********** Helpers for find_busiest_group ************************/ | ||
3458 | /* | ||
3459 | * sd_lb_stats - Structure to store the statistics of a sched_domain | ||
3460 | * during load balancing. | ||
3461 | */ | ||
3462 | struct sd_lb_stats { | ||
3463 | struct sched_group *busiest; /* Busiest group in this sd */ | ||
3464 | struct sched_group *this; /* Local group in this sd */ | ||
3465 | unsigned long total_load; /* Total load of all groups in sd */ | ||
3466 | unsigned long total_pwr; /* Total power of all groups in sd */ | ||
3467 | unsigned long avg_load; /* Average load across all groups in sd */ | ||
3468 | |||
3469 | /** Statistics of this group */ | ||
3470 | unsigned long this_load; | ||
3471 | unsigned long this_load_per_task; | ||
3472 | unsigned long this_nr_running; | ||
3473 | unsigned long this_has_capacity; | ||
3474 | unsigned int this_idle_cpus; | ||
3475 | |||
3476 | /* Statistics of the busiest group */ | ||
3477 | unsigned int busiest_idle_cpus; | ||
3478 | unsigned long max_load; | ||
3479 | unsigned long busiest_load_per_task; | ||
3480 | unsigned long busiest_nr_running; | ||
3481 | unsigned long busiest_group_capacity; | ||
3482 | unsigned long busiest_has_capacity; | ||
3483 | unsigned int busiest_group_weight; | ||
3484 | |||
3485 | int group_imb; /* Is there imbalance in this sd */ | ||
3486 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
3487 | int power_savings_balance; /* Is powersave balance needed for this sd */ | ||
3488 | struct sched_group *group_min; /* Least loaded group in sd */ | ||
3489 | struct sched_group *group_leader; /* Group which relieves group_min */ | ||
3490 | unsigned long min_load_per_task; /* load_per_task in group_min */ | ||
3491 | unsigned long leader_nr_running; /* Nr running of group_leader */ | ||
3492 | unsigned long min_nr_running; /* Nr running of group_min */ | ||
3493 | #endif | ||
3494 | }; | ||
3495 | |||
3496 | /* | ||
3497 | * sg_lb_stats - stats of a sched_group required for load_balancing | ||
3498 | */ | ||
3499 | struct sg_lb_stats { | ||
3500 | unsigned long avg_load; /*Avg load across the CPUs of the group */ | ||
3501 | unsigned long group_load; /* Total load over the CPUs of the group */ | ||
3502 | unsigned long sum_nr_running; /* Nr tasks running in the group */ | ||
3503 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | ||
3504 | unsigned long group_capacity; | ||
3505 | unsigned long idle_cpus; | ||
3506 | unsigned long group_weight; | ||
3507 | int group_imb; /* Is there an imbalance in the group ? */ | ||
3508 | int group_has_capacity; /* Is there extra capacity in the group? */ | ||
3509 | }; | ||
3510 | |||
3511 | /** | ||
3512 | * get_sd_load_idx - Obtain the load index for a given sched domain. | ||
3513 | * @sd: The sched_domain whose load_idx is to be obtained. | ||
3514 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | ||
3515 | */ | ||
3516 | static inline int get_sd_load_idx(struct sched_domain *sd, | ||
3517 | enum cpu_idle_type idle) | ||
3518 | { | ||
3519 | int load_idx; | ||
3520 | |||
3521 | switch (idle) { | ||
3522 | case CPU_NOT_IDLE: | ||
3523 | load_idx = sd->busy_idx; | ||
3524 | break; | ||
3525 | |||
3526 | case CPU_NEWLY_IDLE: | ||
3527 | load_idx = sd->newidle_idx; | ||
3528 | break; | ||
3529 | default: | ||
3530 | load_idx = sd->idle_idx; | ||
3531 | break; | ||
3532 | } | ||
3533 | |||
3534 | return load_idx; | ||
3535 | } | ||
3536 | |||
3537 | |||
3538 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
3539 | /** | ||
3540 | * init_sd_power_savings_stats - Initialize power savings statistics for | ||
3541 | * the given sched_domain, during load balancing. | ||
3542 | * | ||
3543 | * @sd: Sched domain whose power-savings statistics are to be initialized. | ||
3544 | * @sds: Variable containing the statistics for sd. | ||
3545 | * @idle: Idle status of the CPU at which we're performing load-balancing. | ||
3546 | */ | ||
3547 | static inline void init_sd_power_savings_stats(struct sched_domain *sd, | ||
3548 | struct sd_lb_stats *sds, enum cpu_idle_type idle) | ||
3549 | { | ||
3550 | /* | ||
3551 | * Busy processors will not participate in power savings | ||
3552 | * balance. | ||
3553 | */ | ||
3554 | if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | ||
3555 | sds->power_savings_balance = 0; | ||
3556 | else { | ||
3557 | sds->power_savings_balance = 1; | ||
3558 | sds->min_nr_running = ULONG_MAX; | ||
3559 | sds->leader_nr_running = 0; | ||
3560 | } | ||
3561 | } | ||
3562 | |||
3563 | /** | ||
3564 | * update_sd_power_savings_stats - Update the power saving stats for a | ||
3565 | * sched_domain while performing load balancing. | ||
3566 | * | ||
3567 | * @group: sched_group belonging to the sched_domain under consideration. | ||
3568 | * @sds: Variable containing the statistics of the sched_domain | ||
3569 | * @local_group: Does group contain the CPU for which we're performing | ||
3570 | * load balancing ? | ||
3571 | * @sgs: Variable containing the statistics of the group. | ||
3572 | */ | ||
3573 | static inline void update_sd_power_savings_stats(struct sched_group *group, | ||
3574 | struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs) | ||
3575 | { | ||
3576 | |||
3577 | if (!sds->power_savings_balance) | ||
3578 | return; | ||
3579 | |||
3580 | /* | ||
3581 | * If the local group is idle or completely loaded | ||
3582 | * no need to do power savings balance at this domain | ||
3583 | */ | ||
3584 | if (local_group && (sds->this_nr_running >= sgs->group_capacity || | ||
3585 | !sds->this_nr_running)) | ||
3586 | sds->power_savings_balance = 0; | ||
3587 | |||
3588 | /* | ||
3589 | * If a group is already running at full capacity or idle, | ||
3590 | * don't include that group in power savings calculations | ||
3591 | */ | ||
3592 | if (!sds->power_savings_balance || | ||
3593 | sgs->sum_nr_running >= sgs->group_capacity || | ||
3594 | !sgs->sum_nr_running) | ||
3595 | return; | ||
3596 | |||
3597 | /* | ||
3598 | * Calculate the group which has the least non-idle load. | ||
3599 | * This is the group from where we need to pick up the load | ||
3600 | * for saving power | ||
3601 | */ | ||
3602 | if ((sgs->sum_nr_running < sds->min_nr_running) || | ||
3603 | (sgs->sum_nr_running == sds->min_nr_running && | ||
3604 | group_first_cpu(group) > group_first_cpu(sds->group_min))) { | ||
3605 | sds->group_min = group; | ||
3606 | sds->min_nr_running = sgs->sum_nr_running; | ||
3607 | sds->min_load_per_task = sgs->sum_weighted_load / | ||
3608 | sgs->sum_nr_running; | ||
3609 | } | ||
3610 | |||
3611 | /* | ||
3612 | * Calculate the group which is almost near its | ||
3613 | * capacity but still has some space to pick up some load | ||
3614 | * from other group and save more power | ||
3615 | */ | ||
3616 | if (sgs->sum_nr_running + 1 > sgs->group_capacity) | ||
3617 | return; | ||
3618 | |||
3619 | if (sgs->sum_nr_running > sds->leader_nr_running || | ||
3620 | (sgs->sum_nr_running == sds->leader_nr_running && | ||
3621 | group_first_cpu(group) < group_first_cpu(sds->group_leader))) { | ||
3622 | sds->group_leader = group; | ||
3623 | sds->leader_nr_running = sgs->sum_nr_running; | ||
3624 | } | ||
3625 | } | ||
3626 | |||
3627 | /** | ||
3628 | * check_power_save_busiest_group - see if there is potential for some power-savings balance | ||
3629 | * @sds: Variable containing the statistics of the sched_domain | ||
3630 | * under consideration. | ||
3631 | * @this_cpu: Cpu at which we're currently performing load-balancing. | ||
3632 | * @imbalance: Variable to store the imbalance. | ||
3633 | * | ||
3634 | * Description: | ||
3635 | * Check if we have potential to perform some power-savings balance. | ||
3636 | * If yes, set the busiest group to be the least loaded group in the | ||
3637 | * sched_domain, so that it's CPUs can be put to idle. | ||
3638 | * | ||
3639 | * Returns 1 if there is potential to perform power-savings balance. | ||
3640 | * Else returns 0. | ||
3641 | */ | ||
3642 | static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | ||
3643 | int this_cpu, unsigned long *imbalance) | ||
3644 | { | ||
3645 | if (!sds->power_savings_balance) | ||
3646 | return 0; | ||
3647 | |||
3648 | if (sds->this != sds->group_leader || | ||
3649 | sds->group_leader == sds->group_min) | ||
3650 | return 0; | ||
3651 | |||
3652 | *imbalance = sds->min_load_per_task; | ||
3653 | sds->busiest = sds->group_min; | ||
3654 | |||
3655 | return 1; | ||
3656 | |||
3657 | } | ||
3658 | #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | ||
3659 | static inline void init_sd_power_savings_stats(struct sched_domain *sd, | ||
3660 | struct sd_lb_stats *sds, enum cpu_idle_type idle) | ||
3661 | { | ||
3662 | return; | ||
3663 | } | ||
3664 | |||
3665 | static inline void update_sd_power_savings_stats(struct sched_group *group, | ||
3666 | struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs) | ||
3667 | { | ||
3668 | return; | ||
3669 | } | ||
3670 | |||
3671 | static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | ||
3672 | int this_cpu, unsigned long *imbalance) | ||
3673 | { | ||
3674 | return 0; | ||
3675 | } | ||
3676 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | ||
3677 | |||
3678 | |||
3679 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) | ||
3680 | { | ||
3681 | return SCHED_POWER_SCALE; | ||
3682 | } | ||
3683 | |||
3684 | unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) | ||
3685 | { | ||
3686 | return default_scale_freq_power(sd, cpu); | ||
3687 | } | ||
3688 | |||
3689 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) | ||
3690 | { | ||
3691 | unsigned long weight = sd->span_weight; | ||
3692 | unsigned long smt_gain = sd->smt_gain; | ||
3693 | |||
3694 | smt_gain /= weight; | ||
3695 | |||
3696 | return smt_gain; | ||
3697 | } | ||
3698 | |||
3699 | unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) | ||
3700 | { | ||
3701 | return default_scale_smt_power(sd, cpu); | ||
3702 | } | ||
3703 | |||
3704 | unsigned long scale_rt_power(int cpu) | ||
3705 | { | ||
3706 | struct rq *rq = cpu_rq(cpu); | ||
3707 | u64 total, available; | ||
3708 | |||
3709 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | ||
3710 | |||
3711 | if (unlikely(total < rq->rt_avg)) { | ||
3712 | /* Ensures that power won't end up being negative */ | ||
3713 | available = 0; | ||
3714 | } else { | ||
3715 | available = total - rq->rt_avg; | ||
3716 | } | ||
3717 | |||
3718 | if (unlikely((s64)total < SCHED_POWER_SCALE)) | ||
3719 | total = SCHED_POWER_SCALE; | ||
3720 | |||
3721 | total >>= SCHED_POWER_SHIFT; | ||
3722 | |||
3723 | return div_u64(available, total); | ||
3724 | } | ||
3725 | |||
3726 | static void update_cpu_power(struct sched_domain *sd, int cpu) | ||
3727 | { | ||
3728 | unsigned long weight = sd->span_weight; | ||
3729 | unsigned long power = SCHED_POWER_SCALE; | ||
3730 | struct sched_group *sdg = sd->groups; | ||
3731 | |||
3732 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { | ||
3733 | if (sched_feat(ARCH_POWER)) | ||
3734 | power *= arch_scale_smt_power(sd, cpu); | ||
3735 | else | ||
3736 | power *= default_scale_smt_power(sd, cpu); | ||
3737 | |||
3738 | power >>= SCHED_POWER_SHIFT; | ||
3739 | } | ||
3740 | |||
3741 | sdg->sgp->power_orig = power; | ||
3742 | |||
3743 | if (sched_feat(ARCH_POWER)) | ||
3744 | power *= arch_scale_freq_power(sd, cpu); | ||
3745 | else | ||
3746 | power *= default_scale_freq_power(sd, cpu); | ||
3747 | |||
3748 | power >>= SCHED_POWER_SHIFT; | ||
3749 | |||
3750 | power *= scale_rt_power(cpu); | ||
3751 | power >>= SCHED_POWER_SHIFT; | ||
3752 | |||
3753 | if (!power) | ||
3754 | power = 1; | ||
3755 | |||
3756 | cpu_rq(cpu)->cpu_power = power; | ||
3757 | sdg->sgp->power = power; | ||
3758 | } | ||
3759 | |||
3760 | void update_group_power(struct sched_domain *sd, int cpu) | ||
3761 | { | ||
3762 | struct sched_domain *child = sd->child; | ||
3763 | struct sched_group *group, *sdg = sd->groups; | ||
3764 | unsigned long power; | ||
3765 | |||
3766 | if (!child) { | ||
3767 | update_cpu_power(sd, cpu); | ||
3768 | return; | ||
3769 | } | ||
3770 | |||
3771 | power = 0; | ||
3772 | |||
3773 | group = child->groups; | ||
3774 | do { | ||
3775 | power += group->sgp->power; | ||
3776 | group = group->next; | ||
3777 | } while (group != child->groups); | ||
3778 | |||
3779 | sdg->sgp->power = power; | ||
3780 | } | ||
3781 | |||
3782 | /* | ||
3783 | * Try and fix up capacity for tiny siblings, this is needed when | ||
3784 | * things like SD_ASYM_PACKING need f_b_g to select another sibling | ||
3785 | * which on its own isn't powerful enough. | ||
3786 | * | ||
3787 | * See update_sd_pick_busiest() and check_asym_packing(). | ||
3788 | */ | ||
3789 | static inline int | ||
3790 | fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | ||
3791 | { | ||
3792 | /* | ||
3793 | * Only siblings can have significantly less than SCHED_POWER_SCALE | ||
3794 | */ | ||
3795 | if (!(sd->flags & SD_SHARE_CPUPOWER)) | ||
3796 | return 0; | ||
3797 | |||
3798 | /* | ||
3799 | * If ~90% of the cpu_power is still there, we're good. | ||
3800 | */ | ||
3801 | if (group->sgp->power * 32 > group->sgp->power_orig * 29) | ||
3802 | return 1; | ||
3803 | |||
3804 | return 0; | ||
3805 | } | ||
3806 | |||
3807 | /** | ||
3808 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | ||
3809 | * @sd: The sched_domain whose statistics are to be updated. | ||
3810 | * @group: sched_group whose statistics are to be updated. | ||
3811 | * @this_cpu: Cpu for which load balance is currently performed. | ||
3812 | * @idle: Idle status of this_cpu | ||
3813 | * @load_idx: Load index of sched_domain of this_cpu for load calc. | ||
3814 | * @local_group: Does group contain this_cpu. | ||
3815 | * @cpus: Set of cpus considered for load balancing. | ||
3816 | * @balance: Should we balance. | ||
3817 | * @sgs: variable to hold the statistics for this group. | ||
3818 | */ | ||
3819 | static inline void update_sg_lb_stats(struct sched_domain *sd, | ||
3820 | struct sched_group *group, int this_cpu, | ||
3821 | enum cpu_idle_type idle, int load_idx, | ||
3822 | int local_group, const struct cpumask *cpus, | ||
3823 | int *balance, struct sg_lb_stats *sgs) | ||
3824 | { | ||
3825 | unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; | ||
3826 | int i; | ||
3827 | unsigned int balance_cpu = -1, first_idle_cpu = 0; | ||
3828 | unsigned long avg_load_per_task = 0; | ||
3829 | |||
3830 | if (local_group) | ||
3831 | balance_cpu = group_first_cpu(group); | ||
3832 | |||
3833 | /* Tally up the load of all CPUs in the group */ | ||
3834 | max_cpu_load = 0; | ||
3835 | min_cpu_load = ~0UL; | ||
3836 | max_nr_running = 0; | ||
3837 | |||
3838 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { | ||
3839 | struct rq *rq = cpu_rq(i); | ||
3840 | |||
3841 | /* Bias balancing toward cpus of our domain */ | ||
3842 | if (local_group) { | ||
3843 | if (idle_cpu(i) && !first_idle_cpu) { | ||
3844 | first_idle_cpu = 1; | ||
3845 | balance_cpu = i; | ||
3846 | } | ||
3847 | |||
3848 | load = target_load(i, load_idx); | ||
3849 | } else { | ||
3850 | load = source_load(i, load_idx); | ||
3851 | if (load > max_cpu_load) { | ||
3852 | max_cpu_load = load; | ||
3853 | max_nr_running = rq->nr_running; | ||
3854 | } | ||
3855 | if (min_cpu_load > load) | ||
3856 | min_cpu_load = load; | ||
3857 | } | ||
3858 | |||
3859 | sgs->group_load += load; | ||
3860 | sgs->sum_nr_running += rq->nr_running; | ||
3861 | sgs->sum_weighted_load += weighted_cpuload(i); | ||
3862 | if (idle_cpu(i)) | ||
3863 | sgs->idle_cpus++; | ||
3864 | } | ||
3865 | |||
3866 | /* | ||
3867 | * First idle cpu or the first cpu(busiest) in this sched group | ||
3868 | * is eligible for doing load balancing at this and above | ||
3869 | * domains. In the newly idle case, we will allow all the cpu's | ||
3870 | * to do the newly idle load balance. | ||
3871 | */ | ||
3872 | if (idle != CPU_NEWLY_IDLE && local_group) { | ||
3873 | if (balance_cpu != this_cpu) { | ||
3874 | *balance = 0; | ||
3875 | return; | ||
3876 | } | ||
3877 | update_group_power(sd, this_cpu); | ||
3878 | } | ||
3879 | |||
3880 | /* Adjust by relative CPU power of the group */ | ||
3881 | sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power; | ||
3882 | |||
3883 | /* | ||
3884 | * Consider the group unbalanced when the imbalance is larger | ||
3885 | * than the average weight of a task. | ||
3886 | * | ||
3887 | * APZ: with cgroup the avg task weight can vary wildly and | ||
3888 | * might not be a suitable number - should we keep a | ||
3889 | * normalized nr_running number somewhere that negates | ||
3890 | * the hierarchy? | ||
3891 | */ | ||
3892 | if (sgs->sum_nr_running) | ||
3893 | avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; | ||
3894 | |||
3895 | if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1) | ||
3896 | sgs->group_imb = 1; | ||
3897 | |||
3898 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power, | ||
3899 | SCHED_POWER_SCALE); | ||
3900 | if (!sgs->group_capacity) | ||
3901 | sgs->group_capacity = fix_small_capacity(sd, group); | ||
3902 | sgs->group_weight = group->group_weight; | ||
3903 | |||
3904 | if (sgs->group_capacity > sgs->sum_nr_running) | ||
3905 | sgs->group_has_capacity = 1; | ||
3906 | } | ||
3907 | |||
3908 | /** | ||
3909 | * update_sd_pick_busiest - return 1 on busiest group | ||
3910 | * @sd: sched_domain whose statistics are to be checked | ||
3911 | * @sds: sched_domain statistics | ||
3912 | * @sg: sched_group candidate to be checked for being the busiest | ||
3913 | * @sgs: sched_group statistics | ||
3914 | * @this_cpu: the current cpu | ||
3915 | * | ||
3916 | * Determine if @sg is a busier group than the previously selected | ||
3917 | * busiest group. | ||
3918 | */ | ||
3919 | static bool update_sd_pick_busiest(struct sched_domain *sd, | ||
3920 | struct sd_lb_stats *sds, | ||
3921 | struct sched_group *sg, | ||
3922 | struct sg_lb_stats *sgs, | ||
3923 | int this_cpu) | ||
3924 | { | ||
3925 | if (sgs->avg_load <= sds->max_load) | ||
3926 | return false; | ||
3927 | |||
3928 | if (sgs->sum_nr_running > sgs->group_capacity) | ||
3929 | return true; | ||
3930 | |||
3931 | if (sgs->group_imb) | ||
3932 | return true; | ||
3933 | |||
3934 | /* | ||
3935 | * ASYM_PACKING needs to move all the work to the lowest | ||
3936 | * numbered CPUs in the group, therefore mark all groups | ||
3937 | * higher than ourself as busy. | ||
3938 | */ | ||
3939 | if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running && | ||
3940 | this_cpu < group_first_cpu(sg)) { | ||
3941 | if (!sds->busiest) | ||
3942 | return true; | ||
3943 | |||
3944 | if (group_first_cpu(sds->busiest) > group_first_cpu(sg)) | ||
3945 | return true; | ||
3946 | } | ||
3947 | |||
3948 | return false; | ||
3949 | } | ||
3950 | |||
3951 | /** | ||
3952 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. | ||
3953 | * @sd: sched_domain whose statistics are to be updated. | ||
3954 | * @this_cpu: Cpu for which load balance is currently performed. | ||
3955 | * @idle: Idle status of this_cpu | ||
3956 | * @cpus: Set of cpus considered for load balancing. | ||
3957 | * @balance: Should we balance. | ||
3958 | * @sds: variable to hold the statistics for this sched_domain. | ||
3959 | */ | ||
3960 | static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | ||
3961 | enum cpu_idle_type idle, const struct cpumask *cpus, | ||
3962 | int *balance, struct sd_lb_stats *sds) | ||
3963 | { | ||
3964 | struct sched_domain *child = sd->child; | ||
3965 | struct sched_group *sg = sd->groups; | ||
3966 | struct sg_lb_stats sgs; | ||
3967 | int load_idx, prefer_sibling = 0; | ||
3968 | |||
3969 | if (child && child->flags & SD_PREFER_SIBLING) | ||
3970 | prefer_sibling = 1; | ||
3971 | |||
3972 | init_sd_power_savings_stats(sd, sds, idle); | ||
3973 | load_idx = get_sd_load_idx(sd, idle); | ||
3974 | |||
3975 | do { | ||
3976 | int local_group; | ||
3977 | |||
3978 | local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg)); | ||
3979 | memset(&sgs, 0, sizeof(sgs)); | ||
3980 | update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, | ||
3981 | local_group, cpus, balance, &sgs); | ||
3982 | |||
3983 | if (local_group && !(*balance)) | ||
3984 | return; | ||
3985 | |||
3986 | sds->total_load += sgs.group_load; | ||
3987 | sds->total_pwr += sg->sgp->power; | ||
3988 | |||
3989 | /* | ||
3990 | * In case the child domain prefers tasks go to siblings | ||
3991 | * first, lower the sg capacity to one so that we'll try | ||
3992 | * and move all the excess tasks away. We lower the capacity | ||
3993 | * of a group only if the local group has the capacity to fit | ||
3994 | * these excess tasks, i.e. nr_running < group_capacity. The | ||
3995 | * extra check prevents the case where you always pull from the | ||
3996 | * heaviest group when it is already under-utilized (possible | ||
3997 | * with a large weight task outweighs the tasks on the system). | ||
3998 | */ | ||
3999 | if (prefer_sibling && !local_group && sds->this_has_capacity) | ||
4000 | sgs.group_capacity = min(sgs.group_capacity, 1UL); | ||
4001 | |||
4002 | if (local_group) { | ||
4003 | sds->this_load = sgs.avg_load; | ||
4004 | sds->this = sg; | ||
4005 | sds->this_nr_running = sgs.sum_nr_running; | ||
4006 | sds->this_load_per_task = sgs.sum_weighted_load; | ||
4007 | sds->this_has_capacity = sgs.group_has_capacity; | ||
4008 | sds->this_idle_cpus = sgs.idle_cpus; | ||
4009 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { | ||
4010 | sds->max_load = sgs.avg_load; | ||
4011 | sds->busiest = sg; | ||
4012 | sds->busiest_nr_running = sgs.sum_nr_running; | ||
4013 | sds->busiest_idle_cpus = sgs.idle_cpus; | ||
4014 | sds->busiest_group_capacity = sgs.group_capacity; | ||
4015 | sds->busiest_load_per_task = sgs.sum_weighted_load; | ||
4016 | sds->busiest_has_capacity = sgs.group_has_capacity; | ||
4017 | sds->busiest_group_weight = sgs.group_weight; | ||
4018 | sds->group_imb = sgs.group_imb; | ||
4019 | } | ||
4020 | |||
4021 | update_sd_power_savings_stats(sg, sds, local_group, &sgs); | ||
4022 | sg = sg->next; | ||
4023 | } while (sg != sd->groups); | ||
4024 | } | ||
4025 | |||
4026 | /** | ||
4027 | * check_asym_packing - Check to see if the group is packed into the | ||
4028 | * sched doman. | ||
4029 | * | ||
4030 | * This is primarily intended to used at the sibling level. Some | ||
4031 | * cores like POWER7 prefer to use lower numbered SMT threads. In the | ||
4032 | * case of POWER7, it can move to lower SMT modes only when higher | ||
4033 | * threads are idle. When in lower SMT modes, the threads will | ||
4034 | * perform better since they share less core resources. Hence when we | ||
4035 | * have idle threads, we want them to be the higher ones. | ||
4036 | * | ||
4037 | * This packing function is run on idle threads. It checks to see if | ||
4038 | * the busiest CPU in this domain (core in the P7 case) has a higher | ||
4039 | * CPU number than the packing function is being run on. Here we are | ||
4040 | * assuming lower CPU number will be equivalent to lower a SMT thread | ||
4041 | * number. | ||
4042 | * | ||
4043 | * Returns 1 when packing is required and a task should be moved to | ||
4044 | * this CPU. The amount of the imbalance is returned in *imbalance. | ||
4045 | * | ||
4046 | * @sd: The sched_domain whose packing is to be checked. | ||
4047 | * @sds: Statistics of the sched_domain which is to be packed | ||
4048 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | ||
4049 | * @imbalance: returns amount of imbalanced due to packing. | ||
4050 | */ | ||
4051 | static int check_asym_packing(struct sched_domain *sd, | ||
4052 | struct sd_lb_stats *sds, | ||
4053 | int this_cpu, unsigned long *imbalance) | ||
4054 | { | ||
4055 | int busiest_cpu; | ||
4056 | |||
4057 | if (!(sd->flags & SD_ASYM_PACKING)) | ||
4058 | return 0; | ||
4059 | |||
4060 | if (!sds->busiest) | ||
4061 | return 0; | ||
4062 | |||
4063 | busiest_cpu = group_first_cpu(sds->busiest); | ||
4064 | if (this_cpu > busiest_cpu) | ||
4065 | return 0; | ||
4066 | |||
4067 | *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power, | ||
4068 | SCHED_POWER_SCALE); | ||
4069 | return 1; | ||
4070 | } | ||
4071 | |||
4072 | /** | ||
4073 | * fix_small_imbalance - Calculate the minor imbalance that exists | ||
4074 | * amongst the groups of a sched_domain, during | ||
4075 | * load balancing. | ||
4076 | * @sds: Statistics of the sched_domain whose imbalance is to be calculated. | ||
4077 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | ||
4078 | * @imbalance: Variable to store the imbalance. | ||
4079 | */ | ||
4080 | static inline void fix_small_imbalance(struct sd_lb_stats *sds, | ||
4081 | int this_cpu, unsigned long *imbalance) | ||
4082 | { | ||
4083 | unsigned long tmp, pwr_now = 0, pwr_move = 0; | ||
4084 | unsigned int imbn = 2; | ||
4085 | unsigned long scaled_busy_load_per_task; | ||
4086 | |||
4087 | if (sds->this_nr_running) { | ||
4088 | sds->this_load_per_task /= sds->this_nr_running; | ||
4089 | if (sds->busiest_load_per_task > | ||
4090 | sds->this_load_per_task) | ||
4091 | imbn = 1; | ||
4092 | } else | ||
4093 | sds->this_load_per_task = | ||
4094 | cpu_avg_load_per_task(this_cpu); | ||
4095 | |||
4096 | scaled_busy_load_per_task = sds->busiest_load_per_task | ||
4097 | * SCHED_POWER_SCALE; | ||
4098 | scaled_busy_load_per_task /= sds->busiest->sgp->power; | ||
4099 | |||
4100 | if (sds->max_load - sds->this_load + scaled_busy_load_per_task >= | ||
4101 | (scaled_busy_load_per_task * imbn)) { | ||
4102 | *imbalance = sds->busiest_load_per_task; | ||
4103 | return; | ||
4104 | } | ||
4105 | |||
4106 | /* | ||
4107 | * OK, we don't have enough imbalance to justify moving tasks, | ||
4108 | * however we may be able to increase total CPU power used by | ||
4109 | * moving them. | ||
4110 | */ | ||
4111 | |||
4112 | pwr_now += sds->busiest->sgp->power * | ||
4113 | min(sds->busiest_load_per_task, sds->max_load); | ||
4114 | pwr_now += sds->this->sgp->power * | ||
4115 | min(sds->this_load_per_task, sds->this_load); | ||
4116 | pwr_now /= SCHED_POWER_SCALE; | ||
4117 | |||
4118 | /* Amount of load we'd subtract */ | ||
4119 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / | ||
4120 | sds->busiest->sgp->power; | ||
4121 | if (sds->max_load > tmp) | ||
4122 | pwr_move += sds->busiest->sgp->power * | ||
4123 | min(sds->busiest_load_per_task, sds->max_load - tmp); | ||
4124 | |||
4125 | /* Amount of load we'd add */ | ||
4126 | if (sds->max_load * sds->busiest->sgp->power < | ||
4127 | sds->busiest_load_per_task * SCHED_POWER_SCALE) | ||
4128 | tmp = (sds->max_load * sds->busiest->sgp->power) / | ||
4129 | sds->this->sgp->power; | ||
4130 | else | ||
4131 | tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / | ||
4132 | sds->this->sgp->power; | ||
4133 | pwr_move += sds->this->sgp->power * | ||
4134 | min(sds->this_load_per_task, sds->this_load + tmp); | ||
4135 | pwr_move /= SCHED_POWER_SCALE; | ||
4136 | |||
4137 | /* Move if we gain throughput */ | ||
4138 | if (pwr_move > pwr_now) | ||
4139 | *imbalance = sds->busiest_load_per_task; | ||
4140 | } | ||
4141 | |||
4142 | /** | ||
4143 | * calculate_imbalance - Calculate the amount of imbalance present within the | ||
4144 | * groups of a given sched_domain during load balance. | ||
4145 | * @sds: statistics of the sched_domain whose imbalance is to be calculated. | ||
4146 | * @this_cpu: Cpu for which currently load balance is being performed. | ||
4147 | * @imbalance: The variable to store the imbalance. | ||
4148 | */ | ||
4149 | static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, | ||
4150 | unsigned long *imbalance) | ||
4151 | { | ||
4152 | unsigned long max_pull, load_above_capacity = ~0UL; | ||
4153 | |||
4154 | sds->busiest_load_per_task /= sds->busiest_nr_running; | ||
4155 | if (sds->group_imb) { | ||
4156 | sds->busiest_load_per_task = | ||
4157 | min(sds->busiest_load_per_task, sds->avg_load); | ||
4158 | } | ||
4159 | |||
4160 | /* | ||
4161 | * In the presence of smp nice balancing, certain scenarios can have | ||
4162 | * max load less than avg load(as we skip the groups at or below | ||
4163 | * its cpu_power, while calculating max_load..) | ||
4164 | */ | ||
4165 | if (sds->max_load < sds->avg_load) { | ||
4166 | *imbalance = 0; | ||
4167 | return fix_small_imbalance(sds, this_cpu, imbalance); | ||
4168 | } | ||
4169 | |||
4170 | if (!sds->group_imb) { | ||
4171 | /* | ||
4172 | * Don't want to pull so many tasks that a group would go idle. | ||
4173 | */ | ||
4174 | load_above_capacity = (sds->busiest_nr_running - | ||
4175 | sds->busiest_group_capacity); | ||
4176 | |||
4177 | load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); | ||
4178 | |||
4179 | load_above_capacity /= sds->busiest->sgp->power; | ||
4180 | } | ||
4181 | |||
4182 | /* | ||
4183 | * We're trying to get all the cpus to the average_load, so we don't | ||
4184 | * want to push ourselves above the average load, nor do we wish to | ||
4185 | * reduce the max loaded cpu below the average load. At the same time, | ||
4186 | * we also don't want to reduce the group load below the group capacity | ||
4187 | * (so that we can implement power-savings policies etc). Thus we look | ||
4188 | * for the minimum possible imbalance. | ||
4189 | * Be careful of negative numbers as they'll appear as very large values | ||
4190 | * with unsigned longs. | ||
4191 | */ | ||
4192 | max_pull = min(sds->max_load - sds->avg_load, load_above_capacity); | ||
4193 | |||
4194 | /* How much load to actually move to equalise the imbalance */ | ||
4195 | *imbalance = min(max_pull * sds->busiest->sgp->power, | ||
4196 | (sds->avg_load - sds->this_load) * sds->this->sgp->power) | ||
4197 | / SCHED_POWER_SCALE; | ||
4198 | |||
4199 | /* | ||
4200 | * if *imbalance is less than the average load per runnable task | ||
4201 | * there is no guarantee that any tasks will be moved so we'll have | ||
4202 | * a think about bumping its value to force at least one task to be | ||
4203 | * moved | ||
4204 | */ | ||
4205 | if (*imbalance < sds->busiest_load_per_task) | ||
4206 | return fix_small_imbalance(sds, this_cpu, imbalance); | ||
4207 | |||
4208 | } | ||
4209 | |||
4210 | /******* find_busiest_group() helpers end here *********************/ | ||
4211 | |||
4212 | /** | ||
4213 | * find_busiest_group - Returns the busiest group within the sched_domain | ||
4214 | * if there is an imbalance. If there isn't an imbalance, and | ||
4215 | * the user has opted for power-savings, it returns a group whose | ||
4216 | * CPUs can be put to idle by rebalancing those tasks elsewhere, if | ||
4217 | * such a group exists. | ||
4218 | * | ||
4219 | * Also calculates the amount of weighted load which should be moved | ||
4220 | * to restore balance. | ||
4221 | * | ||
4222 | * @sd: The sched_domain whose busiest group is to be returned. | ||
4223 | * @this_cpu: The cpu for which load balancing is currently being performed. | ||
4224 | * @imbalance: Variable which stores amount of weighted load which should | ||
4225 | * be moved to restore balance/put a group to idle. | ||
4226 | * @idle: The idle status of this_cpu. | ||
4227 | * @cpus: The set of CPUs under consideration for load-balancing. | ||
4228 | * @balance: Pointer to a variable indicating if this_cpu | ||
4229 | * is the appropriate cpu to perform load balancing at this_level. | ||
4230 | * | ||
4231 | * Returns: - the busiest group if imbalance exists. | ||
4232 | * - If no imbalance and user has opted for power-savings balance, | ||
4233 | * return the least loaded group whose CPUs can be | ||
4234 | * put to idle by rebalancing its tasks onto our group. | ||
4235 | */ | ||
4236 | static struct sched_group * | ||
4237 | find_busiest_group(struct sched_domain *sd, int this_cpu, | ||
4238 | unsigned long *imbalance, enum cpu_idle_type idle, | ||
4239 | const struct cpumask *cpus, int *balance) | ||
4240 | { | ||
4241 | struct sd_lb_stats sds; | ||
4242 | |||
4243 | memset(&sds, 0, sizeof(sds)); | ||
4244 | |||
4245 | /* | ||
4246 | * Compute the various statistics relavent for load balancing at | ||
4247 | * this level. | ||
4248 | */ | ||
4249 | update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds); | ||
4250 | |||
4251 | /* | ||
4252 | * this_cpu is not the appropriate cpu to perform load balancing at | ||
4253 | * this level. | ||
4254 | */ | ||
4255 | if (!(*balance)) | ||
4256 | goto ret; | ||
4257 | |||
4258 | if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) && | ||
4259 | check_asym_packing(sd, &sds, this_cpu, imbalance)) | ||
4260 | return sds.busiest; | ||
4261 | |||
4262 | /* There is no busy sibling group to pull tasks from */ | ||
4263 | if (!sds.busiest || sds.busiest_nr_running == 0) | ||
4264 | goto out_balanced; | ||
4265 | |||
4266 | sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr; | ||
4267 | |||
4268 | /* | ||
4269 | * If the busiest group is imbalanced the below checks don't | ||
4270 | * work because they assumes all things are equal, which typically | ||
4271 | * isn't true due to cpus_allowed constraints and the like. | ||
4272 | */ | ||
4273 | if (sds.group_imb) | ||
4274 | goto force_balance; | ||
4275 | |||
4276 | /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ | ||
4277 | if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && | ||
4278 | !sds.busiest_has_capacity) | ||
4279 | goto force_balance; | ||
4280 | |||
4281 | /* | ||
4282 | * If the local group is more busy than the selected busiest group | ||
4283 | * don't try and pull any tasks. | ||
4284 | */ | ||
4285 | if (sds.this_load >= sds.max_load) | ||
4286 | goto out_balanced; | ||
4287 | |||
4288 | /* | ||
4289 | * Don't pull any tasks if this group is already above the domain | ||
4290 | * average load. | ||
4291 | */ | ||
4292 | if (sds.this_load >= sds.avg_load) | ||
4293 | goto out_balanced; | ||
4294 | |||
4295 | if (idle == CPU_IDLE) { | ||
4296 | /* | ||
4297 | * This cpu is idle. If the busiest group load doesn't | ||
4298 | * have more tasks than the number of available cpu's and | ||
4299 | * there is no imbalance between this and busiest group | ||
4300 | * wrt to idle cpu's, it is balanced. | ||
4301 | */ | ||
4302 | if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && | ||
4303 | sds.busiest_nr_running <= sds.busiest_group_weight) | ||
4304 | goto out_balanced; | ||
4305 | } else { | ||
4306 | /* | ||
4307 | * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use | ||
4308 | * imbalance_pct to be conservative. | ||
4309 | */ | ||
4310 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | ||
4311 | goto out_balanced; | ||
4312 | } | ||
4313 | |||
4314 | force_balance: | ||
4315 | /* Looks like there is an imbalance. Compute it */ | ||
4316 | calculate_imbalance(&sds, this_cpu, imbalance); | ||
4317 | return sds.busiest; | ||
4318 | |||
4319 | out_balanced: | ||
4320 | /* | ||
4321 | * There is no obvious imbalance. But check if we can do some balancing | ||
4322 | * to save power. | ||
4323 | */ | ||
4324 | if (check_power_save_busiest_group(&sds, this_cpu, imbalance)) | ||
4325 | return sds.busiest; | ||
4326 | ret: | ||
4327 | *imbalance = 0; | ||
4328 | return NULL; | ||
4329 | } | ||
4330 | |||
4331 | /* | ||
4332 | * find_busiest_queue - find the busiest runqueue among the cpus in group. | ||
4333 | */ | ||
4334 | static struct rq * | ||
4335 | find_busiest_queue(struct sched_domain *sd, struct sched_group *group, | ||
4336 | enum cpu_idle_type idle, unsigned long imbalance, | ||
4337 | const struct cpumask *cpus) | ||
4338 | { | ||
4339 | struct rq *busiest = NULL, *rq; | ||
4340 | unsigned long max_load = 0; | ||
4341 | int i; | ||
4342 | |||
4343 | for_each_cpu(i, sched_group_cpus(group)) { | ||
4344 | unsigned long power = power_of(i); | ||
4345 | unsigned long capacity = DIV_ROUND_CLOSEST(power, | ||
4346 | SCHED_POWER_SCALE); | ||
4347 | unsigned long wl; | ||
4348 | |||
4349 | if (!capacity) | ||
4350 | capacity = fix_small_capacity(sd, group); | ||
4351 | |||
4352 | if (!cpumask_test_cpu(i, cpus)) | ||
4353 | continue; | ||
4354 | |||
4355 | rq = cpu_rq(i); | ||
4356 | wl = weighted_cpuload(i); | ||
4357 | |||
4358 | /* | ||
4359 | * When comparing with imbalance, use weighted_cpuload() | ||
4360 | * which is not scaled with the cpu power. | ||
4361 | */ | ||
4362 | if (capacity && rq->nr_running == 1 && wl > imbalance) | ||
4363 | continue; | ||
4364 | |||
4365 | /* | ||
4366 | * For the load comparisons with the other cpu's, consider | ||
4367 | * the weighted_cpuload() scaled with the cpu power, so that | ||
4368 | * the load can be moved away from the cpu that is potentially | ||
4369 | * running at a lower capacity. | ||
4370 | */ | ||
4371 | wl = (wl * SCHED_POWER_SCALE) / power; | ||
4372 | |||
4373 | if (wl > max_load) { | ||
4374 | max_load = wl; | ||
4375 | busiest = rq; | ||
4376 | } | ||
4377 | } | ||
4378 | |||
4379 | return busiest; | ||
4380 | } | ||
4381 | |||
4382 | /* | ||
4383 | * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but | ||
4384 | * so long as it is large enough. | ||
4385 | */ | ||
4386 | #define MAX_PINNED_INTERVAL 512 | ||
4387 | |||
4388 | /* Working cpumask for load_balance and load_balance_newidle. */ | ||
4389 | DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); | ||
4390 | |||
4391 | static int need_active_balance(struct sched_domain *sd, int idle, | ||
4392 | int busiest_cpu, int this_cpu) | ||
4393 | { | ||
4394 | if (idle == CPU_NEWLY_IDLE) { | ||
4395 | |||
4396 | /* | ||
4397 | * ASYM_PACKING needs to force migrate tasks from busy but | ||
4398 | * higher numbered CPUs in order to pack all tasks in the | ||
4399 | * lowest numbered CPUs. | ||
4400 | */ | ||
4401 | if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu) | ||
4402 | return 1; | ||
4403 | |||
4404 | /* | ||
4405 | * The only task running in a non-idle cpu can be moved to this | ||
4406 | * cpu in an attempt to completely freeup the other CPU | ||
4407 | * package. | ||
4408 | * | ||
4409 | * The package power saving logic comes from | ||
4410 | * find_busiest_group(). If there are no imbalance, then | ||
4411 | * f_b_g() will return NULL. However when sched_mc={1,2} then | ||
4412 | * f_b_g() will select a group from which a running task may be | ||
4413 | * pulled to this cpu in order to make the other package idle. | ||
4414 | * If there is no opportunity to make a package idle and if | ||
4415 | * there are no imbalance, then f_b_g() will return NULL and no | ||
4416 | * action will be taken in load_balance_newidle(). | ||
4417 | * | ||
4418 | * Under normal task pull operation due to imbalance, there | ||
4419 | * will be more than one task in the source run queue and | ||
4420 | * move_tasks() will succeed. ld_moved will be true and this | ||
4421 | * active balance code will not be triggered. | ||
4422 | */ | ||
4423 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
4424 | return 0; | ||
4425 | } | ||
4426 | |||
4427 | return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); | ||
4428 | } | ||
4429 | |||
4430 | static int active_load_balance_cpu_stop(void *data); | ||
4431 | |||
4432 | /* | ||
4433 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | ||
4434 | * tasks if there is an imbalance. | ||
4435 | */ | ||
4436 | static int load_balance(int this_cpu, struct rq *this_rq, | ||
4437 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
4438 | int *balance) | ||
4439 | { | ||
4440 | int ld_moved, all_pinned = 0, active_balance = 0; | ||
4441 | struct sched_group *group; | ||
4442 | unsigned long imbalance; | ||
4443 | struct rq *busiest; | ||
4444 | unsigned long flags; | ||
4445 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | ||
4446 | |||
4447 | cpumask_copy(cpus, cpu_active_mask); | ||
4448 | |||
4449 | schedstat_inc(sd, lb_count[idle]); | ||
4450 | |||
4451 | redo: | ||
4452 | group = find_busiest_group(sd, this_cpu, &imbalance, idle, | ||
4453 | cpus, balance); | ||
4454 | |||
4455 | if (*balance == 0) | ||
4456 | goto out_balanced; | ||
4457 | |||
4458 | if (!group) { | ||
4459 | schedstat_inc(sd, lb_nobusyg[idle]); | ||
4460 | goto out_balanced; | ||
4461 | } | ||
4462 | |||
4463 | busiest = find_busiest_queue(sd, group, idle, imbalance, cpus); | ||
4464 | if (!busiest) { | ||
4465 | schedstat_inc(sd, lb_nobusyq[idle]); | ||
4466 | goto out_balanced; | ||
4467 | } | ||
4468 | |||
4469 | BUG_ON(busiest == this_rq); | ||
4470 | |||
4471 | schedstat_add(sd, lb_imbalance[idle], imbalance); | ||
4472 | |||
4473 | ld_moved = 0; | ||
4474 | if (busiest->nr_running > 1) { | ||
4475 | /* | ||
4476 | * Attempt to move tasks. If find_busiest_group has found | ||
4477 | * an imbalance but busiest->nr_running <= 1, the group is | ||
4478 | * still unbalanced. ld_moved simply stays zero, so it is | ||
4479 | * correctly treated as an imbalance. | ||
4480 | */ | ||
4481 | all_pinned = 1; | ||
4482 | local_irq_save(flags); | ||
4483 | double_rq_lock(this_rq, busiest); | ||
4484 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | ||
4485 | imbalance, sd, idle, &all_pinned); | ||
4486 | double_rq_unlock(this_rq, busiest); | ||
4487 | local_irq_restore(flags); | ||
4488 | |||
4489 | /* | ||
4490 | * some other cpu did the load balance for us. | ||
4491 | */ | ||
4492 | if (ld_moved && this_cpu != smp_processor_id()) | ||
4493 | resched_cpu(this_cpu); | ||
4494 | |||
4495 | /* All tasks on this runqueue were pinned by CPU affinity */ | ||
4496 | if (unlikely(all_pinned)) { | ||
4497 | cpumask_clear_cpu(cpu_of(busiest), cpus); | ||
4498 | if (!cpumask_empty(cpus)) | ||
4499 | goto redo; | ||
4500 | goto out_balanced; | ||
4501 | } | ||
4502 | } | ||
4503 | |||
4504 | if (!ld_moved) { | ||
4505 | schedstat_inc(sd, lb_failed[idle]); | ||
4506 | /* | ||
4507 | * Increment the failure counter only on periodic balance. | ||
4508 | * We do not want newidle balance, which can be very | ||
4509 | * frequent, pollute the failure counter causing | ||
4510 | * excessive cache_hot migrations and active balances. | ||
4511 | */ | ||
4512 | if (idle != CPU_NEWLY_IDLE) | ||
4513 | sd->nr_balance_failed++; | ||
4514 | |||
4515 | if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) { | ||
4516 | raw_spin_lock_irqsave(&busiest->lock, flags); | ||
4517 | |||
4518 | /* don't kick the active_load_balance_cpu_stop, | ||
4519 | * if the curr task on busiest cpu can't be | ||
4520 | * moved to this_cpu | ||
4521 | */ | ||
4522 | if (!cpumask_test_cpu(this_cpu, | ||
4523 | tsk_cpus_allowed(busiest->curr))) { | ||
4524 | raw_spin_unlock_irqrestore(&busiest->lock, | ||
4525 | flags); | ||
4526 | all_pinned = 1; | ||
4527 | goto out_one_pinned; | ||
4528 | } | ||
4529 | |||
4530 | /* | ||
4531 | * ->active_balance synchronizes accesses to | ||
4532 | * ->active_balance_work. Once set, it's cleared | ||
4533 | * only after active load balance is finished. | ||
4534 | */ | ||
4535 | if (!busiest->active_balance) { | ||
4536 | busiest->active_balance = 1; | ||
4537 | busiest->push_cpu = this_cpu; | ||
4538 | active_balance = 1; | ||
4539 | } | ||
4540 | raw_spin_unlock_irqrestore(&busiest->lock, flags); | ||
4541 | |||
4542 | if (active_balance) | ||
4543 | stop_one_cpu_nowait(cpu_of(busiest), | ||
4544 | active_load_balance_cpu_stop, busiest, | ||
4545 | &busiest->active_balance_work); | ||
4546 | |||
4547 | /* | ||
4548 | * We've kicked active balancing, reset the failure | ||
4549 | * counter. | ||
4550 | */ | ||
4551 | sd->nr_balance_failed = sd->cache_nice_tries+1; | ||
4552 | } | ||
4553 | } else | ||
4554 | sd->nr_balance_failed = 0; | ||
4555 | |||
4556 | if (likely(!active_balance)) { | ||
4557 | /* We were unbalanced, so reset the balancing interval */ | ||
4558 | sd->balance_interval = sd->min_interval; | ||
4559 | } else { | ||
4560 | /* | ||
4561 | * If we've begun active balancing, start to back off. This | ||
4562 | * case may not be covered by the all_pinned logic if there | ||
4563 | * is only 1 task on the busy runqueue (because we don't call | ||
4564 | * move_tasks). | ||
4565 | */ | ||
4566 | if (sd->balance_interval < sd->max_interval) | ||
4567 | sd->balance_interval *= 2; | ||
4568 | } | ||
4569 | |||
4570 | goto out; | ||
4571 | |||
4572 | out_balanced: | ||
4573 | schedstat_inc(sd, lb_balanced[idle]); | ||
4574 | |||
4575 | sd->nr_balance_failed = 0; | ||
4576 | |||
4577 | out_one_pinned: | ||
4578 | /* tune up the balancing interval */ | ||
4579 | if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || | ||
4580 | (sd->balance_interval < sd->max_interval)) | ||
4581 | sd->balance_interval *= 2; | ||
4582 | |||
4583 | ld_moved = 0; | ||
4584 | out: | ||
4585 | return ld_moved; | ||
4586 | } | ||
4587 | |||
4588 | /* | ||
4589 | * idle_balance is called by schedule() if this_cpu is about to become | ||
4590 | * idle. Attempts to pull tasks from other CPUs. | ||
4591 | */ | ||
4592 | void idle_balance(int this_cpu, struct rq *this_rq) | ||
4593 | { | ||
4594 | struct sched_domain *sd; | ||
4595 | int pulled_task = 0; | ||
4596 | unsigned long next_balance = jiffies + HZ; | ||
4597 | |||
4598 | this_rq->idle_stamp = this_rq->clock; | ||
4599 | |||
4600 | if (this_rq->avg_idle < sysctl_sched_migration_cost) | ||
4601 | return; | ||
4602 | |||
4603 | /* | ||
4604 | * Drop the rq->lock, but keep IRQ/preempt disabled. | ||
4605 | */ | ||
4606 | raw_spin_unlock(&this_rq->lock); | ||
4607 | |||
4608 | update_shares(this_cpu); | ||
4609 | rcu_read_lock(); | ||
4610 | for_each_domain(this_cpu, sd) { | ||
4611 | unsigned long interval; | ||
4612 | int balance = 1; | ||
4613 | |||
4614 | if (!(sd->flags & SD_LOAD_BALANCE)) | ||
4615 | continue; | ||
4616 | |||
4617 | if (sd->flags & SD_BALANCE_NEWIDLE) { | ||
4618 | /* If we've pulled tasks over stop searching: */ | ||
4619 | pulled_task = load_balance(this_cpu, this_rq, | ||
4620 | sd, CPU_NEWLY_IDLE, &balance); | ||
4621 | } | ||
4622 | |||
4623 | interval = msecs_to_jiffies(sd->balance_interval); | ||
4624 | if (time_after(next_balance, sd->last_balance + interval)) | ||
4625 | next_balance = sd->last_balance + interval; | ||
4626 | if (pulled_task) { | ||
4627 | this_rq->idle_stamp = 0; | ||
4628 | break; | ||
4629 | } | ||
4630 | } | ||
4631 | rcu_read_unlock(); | ||
4632 | |||
4633 | raw_spin_lock(&this_rq->lock); | ||
4634 | |||
4635 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | ||
4636 | /* | ||
4637 | * We are going idle. next_balance may be set based on | ||
4638 | * a busy processor. So reset next_balance. | ||
4639 | */ | ||
4640 | this_rq->next_balance = next_balance; | ||
4641 | } | ||
4642 | } | ||
4643 | |||
4644 | /* | ||
4645 | * active_load_balance_cpu_stop is run by cpu stopper. It pushes | ||
4646 | * running tasks off the busiest CPU onto idle CPUs. It requires at | ||
4647 | * least 1 task to be running on each physical CPU where possible, and | ||
4648 | * avoids physical / logical imbalances. | ||
4649 | */ | ||
4650 | static int active_load_balance_cpu_stop(void *data) | ||
4651 | { | ||
4652 | struct rq *busiest_rq = data; | ||
4653 | int busiest_cpu = cpu_of(busiest_rq); | ||
4654 | int target_cpu = busiest_rq->push_cpu; | ||
4655 | struct rq *target_rq = cpu_rq(target_cpu); | ||
4656 | struct sched_domain *sd; | ||
4657 | |||
4658 | raw_spin_lock_irq(&busiest_rq->lock); | ||
4659 | |||
4660 | /* make sure the requested cpu hasn't gone down in the meantime */ | ||
4661 | if (unlikely(busiest_cpu != smp_processor_id() || | ||
4662 | !busiest_rq->active_balance)) | ||
4663 | goto out_unlock; | ||
4664 | |||
4665 | /* Is there any task to move? */ | ||
4666 | if (busiest_rq->nr_running <= 1) | ||
4667 | goto out_unlock; | ||
4668 | |||
4669 | /* | ||
4670 | * This condition is "impossible", if it occurs | ||
4671 | * we need to fix it. Originally reported by | ||
4672 | * Bjorn Helgaas on a 128-cpu setup. | ||
4673 | */ | ||
4674 | BUG_ON(busiest_rq == target_rq); | ||
4675 | |||
4676 | /* move a task from busiest_rq to target_rq */ | ||
4677 | double_lock_balance(busiest_rq, target_rq); | ||
4678 | |||
4679 | /* Search for an sd spanning us and the target CPU. */ | ||
4680 | rcu_read_lock(); | ||
4681 | for_each_domain(target_cpu, sd) { | ||
4682 | if ((sd->flags & SD_LOAD_BALANCE) && | ||
4683 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) | ||
4684 | break; | ||
4685 | } | ||
4686 | |||
4687 | if (likely(sd)) { | ||
4688 | schedstat_inc(sd, alb_count); | ||
4689 | |||
4690 | if (move_one_task(target_rq, target_cpu, busiest_rq, | ||
4691 | sd, CPU_IDLE)) | ||
4692 | schedstat_inc(sd, alb_pushed); | ||
4693 | else | ||
4694 | schedstat_inc(sd, alb_failed); | ||
4695 | } | ||
4696 | rcu_read_unlock(); | ||
4697 | double_unlock_balance(busiest_rq, target_rq); | ||
4698 | out_unlock: | ||
4699 | busiest_rq->active_balance = 0; | ||
4700 | raw_spin_unlock_irq(&busiest_rq->lock); | ||
4701 | return 0; | ||
4702 | } | ||
4703 | |||
4704 | #ifdef CONFIG_NO_HZ | ||
4705 | /* | ||
4706 | * idle load balancing details | ||
4707 | * - One of the idle CPUs nominates itself as idle load_balancer, while | ||
4708 | * entering idle. | ||
4709 | * - This idle load balancer CPU will also go into tickless mode when | ||
4710 | * it is idle, just like all other idle CPUs | ||
4711 | * - When one of the busy CPUs notice that there may be an idle rebalancing | ||
4712 | * needed, they will kick the idle load balancer, which then does idle | ||
4713 | * load balancing for all the idle CPUs. | ||
4714 | */ | ||
4715 | static struct { | ||
4716 | atomic_t load_balancer; | ||
4717 | atomic_t first_pick_cpu; | ||
4718 | atomic_t second_pick_cpu; | ||
4719 | cpumask_var_t idle_cpus_mask; | ||
4720 | cpumask_var_t grp_idle_mask; | ||
4721 | unsigned long next_balance; /* in jiffy units */ | ||
4722 | } nohz ____cacheline_aligned; | ||
4723 | |||
4724 | int get_nohz_load_balancer(void) | ||
4725 | { | ||
4726 | return atomic_read(&nohz.load_balancer); | ||
4727 | } | ||
4728 | |||
4729 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
4730 | /** | ||
4731 | * lowest_flag_domain - Return lowest sched_domain containing flag. | ||
4732 | * @cpu: The cpu whose lowest level of sched domain is to | ||
4733 | * be returned. | ||
4734 | * @flag: The flag to check for the lowest sched_domain | ||
4735 | * for the given cpu. | ||
4736 | * | ||
4737 | * Returns the lowest sched_domain of a cpu which contains the given flag. | ||
4738 | */ | ||
4739 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) | ||
4740 | { | ||
4741 | struct sched_domain *sd; | ||
4742 | |||
4743 | for_each_domain(cpu, sd) | ||
4744 | if (sd->flags & flag) | ||
4745 | break; | ||
4746 | |||
4747 | return sd; | ||
4748 | } | ||
4749 | |||
4750 | /** | ||
4751 | * for_each_flag_domain - Iterates over sched_domains containing the flag. | ||
4752 | * @cpu: The cpu whose domains we're iterating over. | ||
4753 | * @sd: variable holding the value of the power_savings_sd | ||
4754 | * for cpu. | ||
4755 | * @flag: The flag to filter the sched_domains to be iterated. | ||
4756 | * | ||
4757 | * Iterates over all the scheduler domains for a given cpu that has the 'flag' | ||
4758 | * set, starting from the lowest sched_domain to the highest. | ||
4759 | */ | ||
4760 | #define for_each_flag_domain(cpu, sd, flag) \ | ||
4761 | for (sd = lowest_flag_domain(cpu, flag); \ | ||
4762 | (sd && (sd->flags & flag)); sd = sd->parent) | ||
4763 | |||
4764 | /** | ||
4765 | * is_semi_idle_group - Checks if the given sched_group is semi-idle. | ||
4766 | * @ilb_group: group to be checked for semi-idleness | ||
4767 | * | ||
4768 | * Returns: 1 if the group is semi-idle. 0 otherwise. | ||
4769 | * | ||
4770 | * We define a sched_group to be semi idle if it has atleast one idle-CPU | ||
4771 | * and atleast one non-idle CPU. This helper function checks if the given | ||
4772 | * sched_group is semi-idle or not. | ||
4773 | */ | ||
4774 | static inline int is_semi_idle_group(struct sched_group *ilb_group) | ||
4775 | { | ||
4776 | cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask, | ||
4777 | sched_group_cpus(ilb_group)); | ||
4778 | |||
4779 | /* | ||
4780 | * A sched_group is semi-idle when it has atleast one busy cpu | ||
4781 | * and atleast one idle cpu. | ||
4782 | */ | ||
4783 | if (cpumask_empty(nohz.grp_idle_mask)) | ||
4784 | return 0; | ||
4785 | |||
4786 | if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group))) | ||
4787 | return 0; | ||
4788 | |||
4789 | return 1; | ||
4790 | } | ||
4791 | /** | ||
4792 | * find_new_ilb - Finds the optimum idle load balancer for nomination. | ||
4793 | * @cpu: The cpu which is nominating a new idle_load_balancer. | ||
4794 | * | ||
4795 | * Returns: Returns the id of the idle load balancer if it exists, | ||
4796 | * Else, returns >= nr_cpu_ids. | ||
4797 | * | ||
4798 | * This algorithm picks the idle load balancer such that it belongs to a | ||
4799 | * semi-idle powersavings sched_domain. The idea is to try and avoid | ||
4800 | * completely idle packages/cores just for the purpose of idle load balancing | ||
4801 | * when there are other idle cpu's which are better suited for that job. | ||
4802 | */ | ||
4803 | static int find_new_ilb(int cpu) | ||
4804 | { | ||
4805 | struct sched_domain *sd; | ||
4806 | struct sched_group *ilb_group; | ||
4807 | int ilb = nr_cpu_ids; | ||
4808 | |||
4809 | /* | ||
4810 | * Have idle load balancer selection from semi-idle packages only | ||
4811 | * when power-aware load balancing is enabled | ||
4812 | */ | ||
4813 | if (!(sched_smt_power_savings || sched_mc_power_savings)) | ||
4814 | goto out_done; | ||
4815 | |||
4816 | /* | ||
4817 | * Optimize for the case when we have no idle CPUs or only one | ||
4818 | * idle CPU. Don't walk the sched_domain hierarchy in such cases | ||
4819 | */ | ||
4820 | if (cpumask_weight(nohz.idle_cpus_mask) < 2) | ||
4821 | goto out_done; | ||
4822 | |||
4823 | rcu_read_lock(); | ||
4824 | for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { | ||
4825 | ilb_group = sd->groups; | ||
4826 | |||
4827 | do { | ||
4828 | if (is_semi_idle_group(ilb_group)) { | ||
4829 | ilb = cpumask_first(nohz.grp_idle_mask); | ||
4830 | goto unlock; | ||
4831 | } | ||
4832 | |||
4833 | ilb_group = ilb_group->next; | ||
4834 | |||
4835 | } while (ilb_group != sd->groups); | ||
4836 | } | ||
4837 | unlock: | ||
4838 | rcu_read_unlock(); | ||
4839 | |||
4840 | out_done: | ||
4841 | return ilb; | ||
4842 | } | ||
4843 | #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ | ||
4844 | static inline int find_new_ilb(int call_cpu) | ||
4845 | { | ||
4846 | return nr_cpu_ids; | ||
4847 | } | ||
4848 | #endif | ||
4849 | |||
4850 | /* | ||
4851 | * Kick a CPU to do the nohz balancing, if it is time for it. We pick the | ||
4852 | * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle | ||
4853 | * CPU (if there is one). | ||
4854 | */ | ||
4855 | static void nohz_balancer_kick(int cpu) | ||
4856 | { | ||
4857 | int ilb_cpu; | ||
4858 | |||
4859 | nohz.next_balance++; | ||
4860 | |||
4861 | ilb_cpu = get_nohz_load_balancer(); | ||
4862 | |||
4863 | if (ilb_cpu >= nr_cpu_ids) { | ||
4864 | ilb_cpu = cpumask_first(nohz.idle_cpus_mask); | ||
4865 | if (ilb_cpu >= nr_cpu_ids) | ||
4866 | return; | ||
4867 | } | ||
4868 | |||
4869 | if (!cpu_rq(ilb_cpu)->nohz_balance_kick) { | ||
4870 | cpu_rq(ilb_cpu)->nohz_balance_kick = 1; | ||
4871 | |||
4872 | smp_mb(); | ||
4873 | /* | ||
4874 | * Use smp_send_reschedule() instead of resched_cpu(). | ||
4875 | * This way we generate a sched IPI on the target cpu which | ||
4876 | * is idle. And the softirq performing nohz idle load balance | ||
4877 | * will be run before returning from the IPI. | ||
4878 | */ | ||
4879 | smp_send_reschedule(ilb_cpu); | ||
4880 | } | ||
4881 | return; | ||
4882 | } | ||
4883 | |||
4884 | /* | ||
4885 | * This routine will try to nominate the ilb (idle load balancing) | ||
4886 | * owner among the cpus whose ticks are stopped. ilb owner will do the idle | ||
4887 | * load balancing on behalf of all those cpus. | ||
4888 | * | ||
4889 | * When the ilb owner becomes busy, we will not have new ilb owner until some | ||
4890 | * idle CPU wakes up and goes back to idle or some busy CPU tries to kick | ||
4891 | * idle load balancing by kicking one of the idle CPUs. | ||
4892 | * | ||
4893 | * Ticks are stopped for the ilb owner as well, with busy CPU kicking this | ||
4894 | * ilb owner CPU in future (when there is a need for idle load balancing on | ||
4895 | * behalf of all idle CPUs). | ||
4896 | */ | ||
4897 | void select_nohz_load_balancer(int stop_tick) | ||
4898 | { | ||
4899 | int cpu = smp_processor_id(); | ||
4900 | |||
4901 | if (stop_tick) { | ||
4902 | if (!cpu_active(cpu)) { | ||
4903 | if (atomic_read(&nohz.load_balancer) != cpu) | ||
4904 | return; | ||
4905 | |||
4906 | /* | ||
4907 | * If we are going offline and still the leader, | ||
4908 | * give up! | ||
4909 | */ | ||
4910 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, | ||
4911 | nr_cpu_ids) != cpu) | ||
4912 | BUG(); | ||
4913 | |||
4914 | return; | ||
4915 | } | ||
4916 | |||
4917 | cpumask_set_cpu(cpu, nohz.idle_cpus_mask); | ||
4918 | |||
4919 | if (atomic_read(&nohz.first_pick_cpu) == cpu) | ||
4920 | atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids); | ||
4921 | if (atomic_read(&nohz.second_pick_cpu) == cpu) | ||
4922 | atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids); | ||
4923 | |||
4924 | if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) { | ||
4925 | int new_ilb; | ||
4926 | |||
4927 | /* make me the ilb owner */ | ||
4928 | if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids, | ||
4929 | cpu) != nr_cpu_ids) | ||
4930 | return; | ||
4931 | |||
4932 | /* | ||
4933 | * Check to see if there is a more power-efficient | ||
4934 | * ilb. | ||
4935 | */ | ||
4936 | new_ilb = find_new_ilb(cpu); | ||
4937 | if (new_ilb < nr_cpu_ids && new_ilb != cpu) { | ||
4938 | atomic_set(&nohz.load_balancer, nr_cpu_ids); | ||
4939 | resched_cpu(new_ilb); | ||
4940 | return; | ||
4941 | } | ||
4942 | return; | ||
4943 | } | ||
4944 | } else { | ||
4945 | if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) | ||
4946 | return; | ||
4947 | |||
4948 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | ||
4949 | |||
4950 | if (atomic_read(&nohz.load_balancer) == cpu) | ||
4951 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, | ||
4952 | nr_cpu_ids) != cpu) | ||
4953 | BUG(); | ||
4954 | } | ||
4955 | return; | ||
4956 | } | ||
4957 | #endif | ||
4958 | |||
4959 | static DEFINE_SPINLOCK(balancing); | ||
4960 | |||
4961 | static unsigned long __read_mostly max_load_balance_interval = HZ/10; | ||
4962 | |||
4963 | /* | ||
4964 | * Scale the max load_balance interval with the number of CPUs in the system. | ||
4965 | * This trades load-balance latency on larger machines for less cross talk. | ||
4966 | */ | ||
4967 | void update_max_interval(void) | ||
4968 | { | ||
4969 | max_load_balance_interval = HZ*num_online_cpus()/10; | ||
4970 | } | ||
4971 | |||
4972 | /* | ||
4973 | * It checks each scheduling domain to see if it is due to be balanced, | ||
4974 | * and initiates a balancing operation if so. | ||
4975 | * | ||
4976 | * Balancing parameters are set up in arch_init_sched_domains. | ||
4977 | */ | ||
4978 | static void rebalance_domains(int cpu, enum cpu_idle_type idle) | ||
4979 | { | ||
4980 | int balance = 1; | ||
4981 | struct rq *rq = cpu_rq(cpu); | ||
4982 | unsigned long interval; | ||
4983 | struct sched_domain *sd; | ||
4984 | /* Earliest time when we have to do rebalance again */ | ||
4985 | unsigned long next_balance = jiffies + 60*HZ; | ||
4986 | int update_next_balance = 0; | ||
4987 | int need_serialize; | ||
4988 | |||
4989 | update_shares(cpu); | ||
4990 | |||
4991 | rcu_read_lock(); | ||
4992 | for_each_domain(cpu, sd) { | ||
4993 | if (!(sd->flags & SD_LOAD_BALANCE)) | ||
4994 | continue; | ||
4995 | |||
4996 | interval = sd->balance_interval; | ||
4997 | if (idle != CPU_IDLE) | ||
4998 | interval *= sd->busy_factor; | ||
4999 | |||
5000 | /* scale ms to jiffies */ | ||
5001 | interval = msecs_to_jiffies(interval); | ||
5002 | interval = clamp(interval, 1UL, max_load_balance_interval); | ||
5003 | |||
5004 | need_serialize = sd->flags & SD_SERIALIZE; | ||
5005 | |||
5006 | if (need_serialize) { | ||
5007 | if (!spin_trylock(&balancing)) | ||
5008 | goto out; | ||
5009 | } | ||
5010 | |||
5011 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | ||
5012 | if (load_balance(cpu, rq, sd, idle, &balance)) { | ||
5013 | /* | ||
5014 | * We've pulled tasks over so either we're no | ||
5015 | * longer idle. | ||
5016 | */ | ||
5017 | idle = CPU_NOT_IDLE; | ||
5018 | } | ||
5019 | sd->last_balance = jiffies; | ||
5020 | } | ||
5021 | if (need_serialize) | ||
5022 | spin_unlock(&balancing); | ||
5023 | out: | ||
5024 | if (time_after(next_balance, sd->last_balance + interval)) { | ||
5025 | next_balance = sd->last_balance + interval; | ||
5026 | update_next_balance = 1; | ||
5027 | } | ||
5028 | |||
5029 | /* | ||
5030 | * Stop the load balance at this level. There is another | ||
5031 | * CPU in our sched group which is doing load balancing more | ||
5032 | * actively. | ||
5033 | */ | ||
5034 | if (!balance) | ||
5035 | break; | ||
5036 | } | ||
5037 | rcu_read_unlock(); | ||
5038 | |||
5039 | /* | ||
5040 | * next_balance will be updated only when there is a need. | ||
5041 | * When the cpu is attached to null domain for ex, it will not be | ||
5042 | * updated. | ||
5043 | */ | ||
5044 | if (likely(update_next_balance)) | ||
5045 | rq->next_balance = next_balance; | ||
5046 | } | ||
5047 | |||
5048 | #ifdef CONFIG_NO_HZ | ||
5049 | /* | ||
5050 | * In CONFIG_NO_HZ case, the idle balance kickee will do the | ||
5051 | * rebalancing for all the cpus for whom scheduler ticks are stopped. | ||
5052 | */ | ||
5053 | static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) | ||
5054 | { | ||
5055 | struct rq *this_rq = cpu_rq(this_cpu); | ||
5056 | struct rq *rq; | ||
5057 | int balance_cpu; | ||
5058 | |||
5059 | if (idle != CPU_IDLE || !this_rq->nohz_balance_kick) | ||
5060 | return; | ||
5061 | |||
5062 | for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { | ||
5063 | if (balance_cpu == this_cpu) | ||
5064 | continue; | ||
5065 | |||
5066 | /* | ||
5067 | * If this cpu gets work to do, stop the load balancing | ||
5068 | * work being done for other cpus. Next load | ||
5069 | * balancing owner will pick it up. | ||
5070 | */ | ||
5071 | if (need_resched()) { | ||
5072 | this_rq->nohz_balance_kick = 0; | ||
5073 | break; | ||
5074 | } | ||
5075 | |||
5076 | raw_spin_lock_irq(&this_rq->lock); | ||
5077 | update_rq_clock(this_rq); | ||
5078 | update_cpu_load(this_rq); | ||
5079 | raw_spin_unlock_irq(&this_rq->lock); | ||
5080 | |||
5081 | rebalance_domains(balance_cpu, CPU_IDLE); | ||
5082 | |||
5083 | rq = cpu_rq(balance_cpu); | ||
5084 | if (time_after(this_rq->next_balance, rq->next_balance)) | ||
5085 | this_rq->next_balance = rq->next_balance; | ||
5086 | } | ||
5087 | nohz.next_balance = this_rq->next_balance; | ||
5088 | this_rq->nohz_balance_kick = 0; | ||
5089 | } | ||
5090 | |||
5091 | /* | ||
5092 | * Current heuristic for kicking the idle load balancer | ||
5093 | * - first_pick_cpu is the one of the busy CPUs. It will kick | ||
5094 | * idle load balancer when it has more than one process active. This | ||
5095 | * eliminates the need for idle load balancing altogether when we have | ||
5096 | * only one running process in the system (common case). | ||
5097 | * - If there are more than one busy CPU, idle load balancer may have | ||
5098 | * to run for active_load_balance to happen (i.e., two busy CPUs are | ||
5099 | * SMT or core siblings and can run better if they move to different | ||
5100 | * physical CPUs). So, second_pick_cpu is the second of the busy CPUs | ||
5101 | * which will kick idle load balancer as soon as it has any load. | ||
5102 | */ | ||
5103 | static inline int nohz_kick_needed(struct rq *rq, int cpu) | ||
5104 | { | ||
5105 | unsigned long now = jiffies; | ||
5106 | int ret; | ||
5107 | int first_pick_cpu, second_pick_cpu; | ||
5108 | |||
5109 | if (time_before(now, nohz.next_balance)) | ||
5110 | return 0; | ||
5111 | |||
5112 | if (idle_cpu(cpu)) | ||
5113 | return 0; | ||
5114 | |||
5115 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | ||
5116 | second_pick_cpu = atomic_read(&nohz.second_pick_cpu); | ||
5117 | |||
5118 | if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu && | ||
5119 | second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu) | ||
5120 | return 0; | ||
5121 | |||
5122 | ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu); | ||
5123 | if (ret == nr_cpu_ids || ret == cpu) { | ||
5124 | atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids); | ||
5125 | if (rq->nr_running > 1) | ||
5126 | return 1; | ||
5127 | } else { | ||
5128 | ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu); | ||
5129 | if (ret == nr_cpu_ids || ret == cpu) { | ||
5130 | if (rq->nr_running) | ||
5131 | return 1; | ||
5132 | } | ||
5133 | } | ||
5134 | return 0; | ||
5135 | } | ||
5136 | #else | ||
5137 | static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } | ||
5138 | #endif | ||
5139 | |||
5140 | /* | ||
5141 | * run_rebalance_domains is triggered when needed from the scheduler tick. | ||
5142 | * Also triggered for nohz idle balancing (with nohz_balancing_kick set). | ||
5143 | */ | ||
5144 | static void run_rebalance_domains(struct softirq_action *h) | ||
5145 | { | ||
5146 | int this_cpu = smp_processor_id(); | ||
5147 | struct rq *this_rq = cpu_rq(this_cpu); | ||
5148 | enum cpu_idle_type idle = this_rq->idle_balance ? | ||
5149 | CPU_IDLE : CPU_NOT_IDLE; | ||
5150 | |||
5151 | rebalance_domains(this_cpu, idle); | ||
5152 | |||
5153 | /* | ||
5154 | * If this cpu has a pending nohz_balance_kick, then do the | ||
5155 | * balancing on behalf of the other idle cpus whose ticks are | ||
5156 | * stopped. | ||
5157 | */ | ||
5158 | nohz_idle_balance(this_cpu, idle); | ||
5159 | } | ||
5160 | |||
5161 | static inline int on_null_domain(int cpu) | ||
5162 | { | ||
5163 | return !rcu_dereference_sched(cpu_rq(cpu)->sd); | ||
5164 | } | ||
5165 | |||
5166 | /* | ||
5167 | * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. | ||
5168 | */ | ||
5169 | void trigger_load_balance(struct rq *rq, int cpu) | ||
5170 | { | ||
5171 | /* Don't need to rebalance while attached to NULL domain */ | ||
5172 | if (time_after_eq(jiffies, rq->next_balance) && | ||
5173 | likely(!on_null_domain(cpu))) | ||
5174 | raise_softirq(SCHED_SOFTIRQ); | ||
5175 | #ifdef CONFIG_NO_HZ | ||
5176 | else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu))) | ||
5177 | nohz_balancer_kick(cpu); | ||
5178 | #endif | ||
5179 | } | ||
5180 | |||
5181 | static void rq_online_fair(struct rq *rq) | ||
5182 | { | ||
5183 | update_sysctl(); | ||
5184 | } | ||
5185 | |||
5186 | static void rq_offline_fair(struct rq *rq) | ||
5187 | { | ||
5188 | update_sysctl(); | ||
5189 | } | ||
5190 | |||
5191 | #endif /* CONFIG_SMP */ | ||
5192 | |||
5193 | /* | ||
5194 | * scheduler tick hitting a task of our scheduling class: | ||
5195 | */ | ||
5196 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | ||
5197 | { | ||
5198 | struct cfs_rq *cfs_rq; | ||
5199 | struct sched_entity *se = &curr->se; | ||
5200 | |||
5201 | for_each_sched_entity(se) { | ||
5202 | cfs_rq = cfs_rq_of(se); | ||
5203 | entity_tick(cfs_rq, se, queued); | ||
5204 | } | ||
5205 | } | ||
5206 | |||
5207 | /* | ||
5208 | * called on fork with the child task as argument from the parent's context | ||
5209 | * - child not yet on the tasklist | ||
5210 | * - preemption disabled | ||
5211 | */ | ||
5212 | static void task_fork_fair(struct task_struct *p) | ||
5213 | { | ||
5214 | struct cfs_rq *cfs_rq = task_cfs_rq(current); | ||
5215 | struct sched_entity *se = &p->se, *curr = cfs_rq->curr; | ||
5216 | int this_cpu = smp_processor_id(); | ||
5217 | struct rq *rq = this_rq(); | ||
5218 | unsigned long flags; | ||
5219 | |||
5220 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
5221 | |||
5222 | update_rq_clock(rq); | ||
5223 | |||
5224 | if (unlikely(task_cpu(p) != this_cpu)) { | ||
5225 | rcu_read_lock(); | ||
5226 | __set_task_cpu(p, this_cpu); | ||
5227 | rcu_read_unlock(); | ||
5228 | } | ||
5229 | |||
5230 | update_curr(cfs_rq); | ||
5231 | |||
5232 | if (curr) | ||
5233 | se->vruntime = curr->vruntime; | ||
5234 | place_entity(cfs_rq, se, 1); | ||
5235 | |||
5236 | if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { | ||
5237 | /* | ||
5238 | * Upon rescheduling, sched_class::put_prev_task() will place | ||
5239 | * 'current' within the tree based on its new key value. | ||
5240 | */ | ||
5241 | swap(curr->vruntime, se->vruntime); | ||
5242 | resched_task(rq->curr); | ||
5243 | } | ||
5244 | |||
5245 | se->vruntime -= cfs_rq->min_vruntime; | ||
5246 | |||
5247 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
5248 | } | ||
5249 | |||
5250 | /* | ||
5251 | * Priority of the task has changed. Check to see if we preempt | ||
5252 | * the current task. | ||
5253 | */ | ||
5254 | static void | ||
5255 | prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) | ||
5256 | { | ||
5257 | if (!p->se.on_rq) | ||
5258 | return; | ||
5259 | |||
5260 | /* | ||
5261 | * Reschedule if we are currently running on this runqueue and | ||
5262 | * our priority decreased, or if we are not currently running on | ||
5263 | * this runqueue and our priority is higher than the current's | ||
5264 | */ | ||
5265 | if (rq->curr == p) { | ||
5266 | if (p->prio > oldprio) | ||
5267 | resched_task(rq->curr); | ||
5268 | } else | ||
5269 | check_preempt_curr(rq, p, 0); | ||
5270 | } | ||
5271 | |||
5272 | static void switched_from_fair(struct rq *rq, struct task_struct *p) | ||
5273 | { | ||
5274 | struct sched_entity *se = &p->se; | ||
5275 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||
5276 | |||
5277 | /* | ||
5278 | * Ensure the task's vruntime is normalized, so that when its | ||
5279 | * switched back to the fair class the enqueue_entity(.flags=0) will | ||
5280 | * do the right thing. | ||
5281 | * | ||
5282 | * If it was on_rq, then the dequeue_entity(.flags=0) will already | ||
5283 | * have normalized the vruntime, if it was !on_rq, then only when | ||
5284 | * the task is sleeping will it still have non-normalized vruntime. | ||
5285 | */ | ||
5286 | if (!se->on_rq && p->state != TASK_RUNNING) { | ||
5287 | /* | ||
5288 | * Fix up our vruntime so that the current sleep doesn't | ||
5289 | * cause 'unlimited' sleep bonus. | ||
5290 | */ | ||
5291 | place_entity(cfs_rq, se, 0); | ||
5292 | se->vruntime -= cfs_rq->min_vruntime; | ||
5293 | } | ||
5294 | } | ||
5295 | |||
5296 | /* | ||
5297 | * We switched to the sched_fair class. | ||
5298 | */ | ||
5299 | static void switched_to_fair(struct rq *rq, struct task_struct *p) | ||
5300 | { | ||
5301 | if (!p->se.on_rq) | ||
5302 | return; | ||
5303 | |||
5304 | /* | ||
5305 | * We were most likely switched from sched_rt, so | ||
5306 | * kick off the schedule if running, otherwise just see | ||
5307 | * if we can still preempt the current task. | ||
5308 | */ | ||
5309 | if (rq->curr == p) | ||
5310 | resched_task(rq->curr); | ||
5311 | else | ||
5312 | check_preempt_curr(rq, p, 0); | ||
5313 | } | ||
5314 | |||
5315 | /* Account for a task changing its policy or group. | ||
5316 | * | ||
5317 | * This routine is mostly called to set cfs_rq->curr field when a task | ||
5318 | * migrates between groups/classes. | ||
5319 | */ | ||
5320 | static void set_curr_task_fair(struct rq *rq) | ||
5321 | { | ||
5322 | struct sched_entity *se = &rq->curr->se; | ||
5323 | |||
5324 | for_each_sched_entity(se) { | ||
5325 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | ||
5326 | |||
5327 | set_next_entity(cfs_rq, se); | ||
5328 | /* ensure bandwidth has been allocated on our new cfs_rq */ | ||
5329 | account_cfs_rq_runtime(cfs_rq, 0); | ||
5330 | } | ||
5331 | } | ||
5332 | |||
5333 | void init_cfs_rq(struct cfs_rq *cfs_rq) | ||
5334 | { | ||
5335 | cfs_rq->tasks_timeline = RB_ROOT; | ||
5336 | INIT_LIST_HEAD(&cfs_rq->tasks); | ||
5337 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); | ||
5338 | #ifndef CONFIG_64BIT | ||
5339 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | ||
5340 | #endif | ||
5341 | } | ||
5342 | |||
5343 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
5344 | static void task_move_group_fair(struct task_struct *p, int on_rq) | ||
5345 | { | ||
5346 | /* | ||
5347 | * If the task was not on the rq at the time of this cgroup movement | ||
5348 | * it must have been asleep, sleeping tasks keep their ->vruntime | ||
5349 | * absolute on their old rq until wakeup (needed for the fair sleeper | ||
5350 | * bonus in place_entity()). | ||
5351 | * | ||
5352 | * If it was on the rq, we've just 'preempted' it, which does convert | ||
5353 | * ->vruntime to a relative base. | ||
5354 | * | ||
5355 | * Make sure both cases convert their relative position when migrating | ||
5356 | * to another cgroup's rq. This does somewhat interfere with the | ||
5357 | * fair sleeper stuff for the first placement, but who cares. | ||
5358 | */ | ||
5359 | if (!on_rq) | ||
5360 | p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; | ||
5361 | set_task_rq(p, task_cpu(p)); | ||
5362 | if (!on_rq) | ||
5363 | p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime; | ||
5364 | } | ||
5365 | |||
5366 | void free_fair_sched_group(struct task_group *tg) | ||
5367 | { | ||
5368 | int i; | ||
5369 | |||
5370 | destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); | ||
5371 | |||
5372 | for_each_possible_cpu(i) { | ||
5373 | if (tg->cfs_rq) | ||
5374 | kfree(tg->cfs_rq[i]); | ||
5375 | if (tg->se) | ||
5376 | kfree(tg->se[i]); | ||
5377 | } | ||
5378 | |||
5379 | kfree(tg->cfs_rq); | ||
5380 | kfree(tg->se); | ||
5381 | } | ||
5382 | |||
5383 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | ||
5384 | { | ||
5385 | struct cfs_rq *cfs_rq; | ||
5386 | struct sched_entity *se; | ||
5387 | int i; | ||
5388 | |||
5389 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); | ||
5390 | if (!tg->cfs_rq) | ||
5391 | goto err; | ||
5392 | tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL); | ||
5393 | if (!tg->se) | ||
5394 | goto err; | ||
5395 | |||
5396 | tg->shares = NICE_0_LOAD; | ||
5397 | |||
5398 | init_cfs_bandwidth(tg_cfs_bandwidth(tg)); | ||
5399 | |||
5400 | for_each_possible_cpu(i) { | ||
5401 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), | ||
5402 | GFP_KERNEL, cpu_to_node(i)); | ||
5403 | if (!cfs_rq) | ||
5404 | goto err; | ||
5405 | |||
5406 | se = kzalloc_node(sizeof(struct sched_entity), | ||
5407 | GFP_KERNEL, cpu_to_node(i)); | ||
5408 | if (!se) | ||
5409 | goto err_free_rq; | ||
5410 | |||
5411 | init_cfs_rq(cfs_rq); | ||
5412 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); | ||
5413 | } | ||
5414 | |||
5415 | return 1; | ||
5416 | |||
5417 | err_free_rq: | ||
5418 | kfree(cfs_rq); | ||
5419 | err: | ||
5420 | return 0; | ||
5421 | } | ||
5422 | |||
5423 | void unregister_fair_sched_group(struct task_group *tg, int cpu) | ||
5424 | { | ||
5425 | struct rq *rq = cpu_rq(cpu); | ||
5426 | unsigned long flags; | ||
5427 | |||
5428 | /* | ||
5429 | * Only empty task groups can be destroyed; so we can speculatively | ||
5430 | * check on_list without danger of it being re-added. | ||
5431 | */ | ||
5432 | if (!tg->cfs_rq[cpu]->on_list) | ||
5433 | return; | ||
5434 | |||
5435 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
5436 | list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); | ||
5437 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
5438 | } | ||
5439 | |||
5440 | void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | ||
5441 | struct sched_entity *se, int cpu, | ||
5442 | struct sched_entity *parent) | ||
5443 | { | ||
5444 | struct rq *rq = cpu_rq(cpu); | ||
5445 | |||
5446 | cfs_rq->tg = tg; | ||
5447 | cfs_rq->rq = rq; | ||
5448 | #ifdef CONFIG_SMP | ||
5449 | /* allow initial update_cfs_load() to truncate */ | ||
5450 | cfs_rq->load_stamp = 1; | ||
5451 | #endif | ||
5452 | init_cfs_rq_runtime(cfs_rq); | ||
5453 | |||
5454 | tg->cfs_rq[cpu] = cfs_rq; | ||
5455 | tg->se[cpu] = se; | ||
5456 | |||
5457 | /* se could be NULL for root_task_group */ | ||
5458 | if (!se) | ||
5459 | return; | ||
5460 | |||
5461 | if (!parent) | ||
5462 | se->cfs_rq = &rq->cfs; | ||
5463 | else | ||
5464 | se->cfs_rq = parent->my_q; | ||
5465 | |||
5466 | se->my_q = cfs_rq; | ||
5467 | update_load_set(&se->load, 0); | ||
5468 | se->parent = parent; | ||
5469 | } | ||
5470 | |||
5471 | static DEFINE_MUTEX(shares_mutex); | ||
5472 | |||
5473 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) | ||
5474 | { | ||
5475 | int i; | ||
5476 | unsigned long flags; | ||
5477 | |||
5478 | /* | ||
5479 | * We can't change the weight of the root cgroup. | ||
5480 | */ | ||
5481 | if (!tg->se[0]) | ||
5482 | return -EINVAL; | ||
5483 | |||
5484 | shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); | ||
5485 | |||
5486 | mutex_lock(&shares_mutex); | ||
5487 | if (tg->shares == shares) | ||
5488 | goto done; | ||
5489 | |||
5490 | tg->shares = shares; | ||
5491 | for_each_possible_cpu(i) { | ||
5492 | struct rq *rq = cpu_rq(i); | ||
5493 | struct sched_entity *se; | ||
5494 | |||
5495 | se = tg->se[i]; | ||
5496 | /* Propagate contribution to hierarchy */ | ||
5497 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
5498 | for_each_sched_entity(se) | ||
5499 | update_cfs_shares(group_cfs_rq(se)); | ||
5500 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
5501 | } | ||
5502 | |||
5503 | done: | ||
5504 | mutex_unlock(&shares_mutex); | ||
5505 | return 0; | ||
5506 | } | ||
5507 | #else /* CONFIG_FAIR_GROUP_SCHED */ | ||
5508 | |||
5509 | void free_fair_sched_group(struct task_group *tg) { } | ||
5510 | |||
5511 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | ||
5512 | { | ||
5513 | return 1; | ||
5514 | } | ||
5515 | |||
5516 | void unregister_fair_sched_group(struct task_group *tg, int cpu) { } | ||
5517 | |||
5518 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
5519 | |||
5520 | |||
5521 | static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) | ||
5522 | { | ||
5523 | struct sched_entity *se = &task->se; | ||
5524 | unsigned int rr_interval = 0; | ||
5525 | |||
5526 | /* | ||
5527 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise | ||
5528 | * idle runqueue: | ||
5529 | */ | ||
5530 | if (rq->cfs.load.weight) | ||
5531 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | ||
5532 | |||
5533 | return rr_interval; | ||
5534 | } | ||
5535 | |||
5536 | /* | ||
5537 | * All the scheduling class methods: | ||
5538 | */ | ||
5539 | const struct sched_class fair_sched_class = { | ||
5540 | .next = &idle_sched_class, | ||
5541 | .enqueue_task = enqueue_task_fair, | ||
5542 | .dequeue_task = dequeue_task_fair, | ||
5543 | .yield_task = yield_task_fair, | ||
5544 | .yield_to_task = yield_to_task_fair, | ||
5545 | |||
5546 | .check_preempt_curr = check_preempt_wakeup, | ||
5547 | |||
5548 | .pick_next_task = pick_next_task_fair, | ||
5549 | .put_prev_task = put_prev_task_fair, | ||
5550 | |||
5551 | #ifdef CONFIG_SMP | ||
5552 | .select_task_rq = select_task_rq_fair, | ||
5553 | |||
5554 | .rq_online = rq_online_fair, | ||
5555 | .rq_offline = rq_offline_fair, | ||
5556 | |||
5557 | .task_waking = task_waking_fair, | ||
5558 | #endif | ||
5559 | |||
5560 | .set_curr_task = set_curr_task_fair, | ||
5561 | .task_tick = task_tick_fair, | ||
5562 | .task_fork = task_fork_fair, | ||
5563 | |||
5564 | .prio_changed = prio_changed_fair, | ||
5565 | .switched_from = switched_from_fair, | ||
5566 | .switched_to = switched_to_fair, | ||
5567 | |||
5568 | .get_rr_interval = get_rr_interval_fair, | ||
5569 | |||
5570 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
5571 | .task_move_group = task_move_group_fair, | ||
5572 | #endif | ||
5573 | }; | ||
5574 | |||
5575 | #ifdef CONFIG_SCHED_DEBUG | ||
5576 | void print_cfs_stats(struct seq_file *m, int cpu) | ||
5577 | { | ||
5578 | struct cfs_rq *cfs_rq; | ||
5579 | |||
5580 | rcu_read_lock(); | ||
5581 | for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) | ||
5582 | print_cfs_rq(m, cpu, cfs_rq); | ||
5583 | rcu_read_unlock(); | ||
5584 | } | ||
5585 | #endif | ||
5586 | |||
5587 | __init void init_sched_fair_class(void) | ||
5588 | { | ||
5589 | #ifdef CONFIG_SMP | ||
5590 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); | ||
5591 | |||
5592 | #ifdef CONFIG_NO_HZ | ||
5593 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); | ||
5594 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); | ||
5595 | atomic_set(&nohz.load_balancer, nr_cpu_ids); | ||
5596 | atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); | ||
5597 | atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); | ||
5598 | #endif | ||
5599 | #endif /* SMP */ | ||
5600 | |||
5601 | } | ||