diff options
author | Michal Marek <mmarek@suse.cz> | 2010-08-04 08:05:07 -0400 |
---|---|---|
committer | Michal Marek <mmarek@suse.cz> | 2010-08-04 08:05:07 -0400 |
commit | 7a996d3ab150bb0e1b71fa182f70199a703efdd1 (patch) | |
tree | 96a36947d90c9b96580899abd38cb3b70cd9d40b /kernel/sched.c | |
parent | 7cf3d73b4360e91b14326632ab1aeda4cb26308d (diff) | |
parent | 9fe6206f400646a2322096b56c59891d530e8d51 (diff) |
Merge commit 'v2.6.35' into kbuild/kconfig
Conflicts:
scripts/kconfig/Makefile
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 3093 |
1 files changed, 524 insertions, 2569 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4508fe7048be..f52a8801b7a2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -55,9 +55,9 @@ | |||
55 | #include <linux/cpu.h> | 55 | #include <linux/cpu.h> |
56 | #include <linux/cpuset.h> | 56 | #include <linux/cpuset.h> |
57 | #include <linux/percpu.h> | 57 | #include <linux/percpu.h> |
58 | #include <linux/kthread.h> | ||
59 | #include <linux/proc_fs.h> | 58 | #include <linux/proc_fs.h> |
60 | #include <linux/seq_file.h> | 59 | #include <linux/seq_file.h> |
60 | #include <linux/stop_machine.h> | ||
61 | #include <linux/sysctl.h> | 61 | #include <linux/sysctl.h> |
62 | #include <linux/syscalls.h> | 62 | #include <linux/syscalls.h> |
63 | #include <linux/times.h> | 63 | #include <linux/times.h> |
@@ -71,6 +71,7 @@ | |||
71 | #include <linux/debugfs.h> | 71 | #include <linux/debugfs.h> |
72 | #include <linux/ctype.h> | 72 | #include <linux/ctype.h> |
73 | #include <linux/ftrace.h> | 73 | #include <linux/ftrace.h> |
74 | #include <linux/slab.h> | ||
74 | 75 | ||
75 | #include <asm/tlb.h> | 76 | #include <asm/tlb.h> |
76 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
@@ -233,7 +234,7 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
233 | */ | 234 | */ |
234 | static DEFINE_MUTEX(sched_domains_mutex); | 235 | static DEFINE_MUTEX(sched_domains_mutex); |
235 | 236 | ||
236 | #ifdef CONFIG_GROUP_SCHED | 237 | #ifdef CONFIG_CGROUP_SCHED |
237 | 238 | ||
238 | #include <linux/cgroup.h> | 239 | #include <linux/cgroup.h> |
239 | 240 | ||
@@ -243,13 +244,7 @@ static LIST_HEAD(task_groups); | |||
243 | 244 | ||
244 | /* task group related information */ | 245 | /* task group related information */ |
245 | struct task_group { | 246 | struct task_group { |
246 | #ifdef CONFIG_CGROUP_SCHED | ||
247 | struct cgroup_subsys_state css; | 247 | struct cgroup_subsys_state css; |
248 | #endif | ||
249 | |||
250 | #ifdef CONFIG_USER_SCHED | ||
251 | uid_t uid; | ||
252 | #endif | ||
253 | 248 | ||
254 | #ifdef CONFIG_FAIR_GROUP_SCHED | 249 | #ifdef CONFIG_FAIR_GROUP_SCHED |
255 | /* schedulable entities of this group on each cpu */ | 250 | /* schedulable entities of this group on each cpu */ |
@@ -274,35 +269,7 @@ struct task_group { | |||
274 | struct list_head children; | 269 | struct list_head children; |
275 | }; | 270 | }; |
276 | 271 | ||
277 | #ifdef CONFIG_USER_SCHED | ||
278 | |||
279 | /* Helper function to pass uid information to create_sched_user() */ | ||
280 | void set_tg_uid(struct user_struct *user) | ||
281 | { | ||
282 | user->tg->uid = user->uid; | ||
283 | } | ||
284 | |||
285 | /* | ||
286 | * Root task group. | ||
287 | * Every UID task group (including init_task_group aka UID-0) will | ||
288 | * be a child to this group. | ||
289 | */ | ||
290 | struct task_group root_task_group; | ||
291 | |||
292 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
293 | /* Default task group's sched entity on each cpu */ | ||
294 | static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); | ||
295 | /* Default task group's cfs_rq on each cpu */ | ||
296 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq); | ||
297 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | ||
298 | |||
299 | #ifdef CONFIG_RT_GROUP_SCHED | ||
300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | ||
301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); | ||
302 | #endif /* CONFIG_RT_GROUP_SCHED */ | ||
303 | #else /* !CONFIG_USER_SCHED */ | ||
304 | #define root_task_group init_task_group | 272 | #define root_task_group init_task_group |
305 | #endif /* CONFIG_USER_SCHED */ | ||
306 | 273 | ||
307 | /* task_group_lock serializes add/remove of task groups and also changes to | 274 | /* task_group_lock serializes add/remove of task groups and also changes to |
308 | * a task group's cpu shares. | 275 | * a task group's cpu shares. |
@@ -318,11 +285,7 @@ static int root_task_group_empty(void) | |||
318 | } | 285 | } |
319 | #endif | 286 | #endif |
320 | 287 | ||
321 | #ifdef CONFIG_USER_SCHED | ||
322 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) | ||
323 | #else /* !CONFIG_USER_SCHED */ | ||
324 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD | 288 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD |
325 | #endif /* CONFIG_USER_SCHED */ | ||
326 | 289 | ||
327 | /* | 290 | /* |
328 | * A weight of 0 or 1 can cause arithmetics problems. | 291 | * A weight of 0 or 1 can cause arithmetics problems. |
@@ -343,47 +306,7 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD; | |||
343 | */ | 306 | */ |
344 | struct task_group init_task_group; | 307 | struct task_group init_task_group; |
345 | 308 | ||
346 | /* return group to which a task belongs */ | 309 | #endif /* CONFIG_CGROUP_SCHED */ |
347 | static inline struct task_group *task_group(struct task_struct *p) | ||
348 | { | ||
349 | struct task_group *tg; | ||
350 | |||
351 | #ifdef CONFIG_USER_SCHED | ||
352 | rcu_read_lock(); | ||
353 | tg = __task_cred(p)->user->tg; | ||
354 | rcu_read_unlock(); | ||
355 | #elif defined(CONFIG_CGROUP_SCHED) | ||
356 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), | ||
357 | struct task_group, css); | ||
358 | #else | ||
359 | tg = &init_task_group; | ||
360 | #endif | ||
361 | return tg; | ||
362 | } | ||
363 | |||
364 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | ||
365 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | ||
366 | { | ||
367 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
368 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; | ||
369 | p->se.parent = task_group(p)->se[cpu]; | ||
370 | #endif | ||
371 | |||
372 | #ifdef CONFIG_RT_GROUP_SCHED | ||
373 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; | ||
374 | p->rt.parent = task_group(p)->rt_se[cpu]; | ||
375 | #endif | ||
376 | } | ||
377 | |||
378 | #else | ||
379 | |||
380 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } | ||
381 | static inline struct task_group *task_group(struct task_struct *p) | ||
382 | { | ||
383 | return NULL; | ||
384 | } | ||
385 | |||
386 | #endif /* CONFIG_GROUP_SCHED */ | ||
387 | 310 | ||
388 | /* CFS-related fields in a runqueue */ | 311 | /* CFS-related fields in a runqueue */ |
389 | struct cfs_rq { | 312 | struct cfs_rq { |
@@ -478,7 +401,6 @@ struct rt_rq { | |||
478 | struct rq *rq; | 401 | struct rq *rq; |
479 | struct list_head leaf_rt_rq_list; | 402 | struct list_head leaf_rt_rq_list; |
480 | struct task_group *tg; | 403 | struct task_group *tg; |
481 | struct sched_rt_entity *rt_se; | ||
482 | #endif | 404 | #endif |
483 | }; | 405 | }; |
484 | 406 | ||
@@ -535,8 +457,11 @@ struct rq { | |||
535 | #define CPU_LOAD_IDX_MAX 5 | 457 | #define CPU_LOAD_IDX_MAX 5 |
536 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 458 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
537 | #ifdef CONFIG_NO_HZ | 459 | #ifdef CONFIG_NO_HZ |
460 | u64 nohz_stamp; | ||
538 | unsigned char in_nohz_recently; | 461 | unsigned char in_nohz_recently; |
539 | #endif | 462 | #endif |
463 | unsigned int skip_clock_update; | ||
464 | |||
540 | /* capture load from *all* tasks on this cpu: */ | 465 | /* capture load from *all* tasks on this cpu: */ |
541 | struct load_weight load; | 466 | struct load_weight load; |
542 | unsigned long nr_load_updates; | 467 | unsigned long nr_load_updates; |
@@ -573,20 +498,20 @@ struct rq { | |||
573 | struct root_domain *rd; | 498 | struct root_domain *rd; |
574 | struct sched_domain *sd; | 499 | struct sched_domain *sd; |
575 | 500 | ||
501 | unsigned long cpu_power; | ||
502 | |||
576 | unsigned char idle_at_tick; | 503 | unsigned char idle_at_tick; |
577 | /* For active balancing */ | 504 | /* For active balancing */ |
578 | int post_schedule; | 505 | int post_schedule; |
579 | int active_balance; | 506 | int active_balance; |
580 | int push_cpu; | 507 | int push_cpu; |
508 | struct cpu_stop_work active_balance_work; | ||
581 | /* cpu of this runqueue: */ | 509 | /* cpu of this runqueue: */ |
582 | int cpu; | 510 | int cpu; |
583 | int online; | 511 | int online; |
584 | 512 | ||
585 | unsigned long avg_load_per_task; | 513 | unsigned long avg_load_per_task; |
586 | 514 | ||
587 | struct task_struct *migration_thread; | ||
588 | struct list_head migration_queue; | ||
589 | |||
590 | u64 rt_avg; | 515 | u64 rt_avg; |
591 | u64 age_stamp; | 516 | u64 age_stamp; |
592 | u64 idle_stamp; | 517 | u64 idle_stamp; |
@@ -634,6 +559,13 @@ static inline | |||
634 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | 559 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
635 | { | 560 | { |
636 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | 561 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
562 | |||
563 | /* | ||
564 | * A queue event has occurred, and we're going to schedule. In | ||
565 | * this case, we can save a useless back to back clock update. | ||
566 | */ | ||
567 | if (test_tsk_need_resched(p)) | ||
568 | rq->skip_clock_update = 1; | ||
637 | } | 569 | } |
638 | 570 | ||
639 | static inline int cpu_of(struct rq *rq) | 571 | static inline int cpu_of(struct rq *rq) |
@@ -645,6 +577,11 @@ static inline int cpu_of(struct rq *rq) | |||
645 | #endif | 577 | #endif |
646 | } | 578 | } |
647 | 579 | ||
580 | #define rcu_dereference_check_sched_domain(p) \ | ||
581 | rcu_dereference_check((p), \ | ||
582 | rcu_read_lock_sched_held() || \ | ||
583 | lockdep_is_held(&sched_domains_mutex)) | ||
584 | |||
648 | /* | 585 | /* |
649 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. | 586 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
650 | * See detach_destroy_domains: synchronize_sched for details. | 587 | * See detach_destroy_domains: synchronize_sched for details. |
@@ -653,7 +590,7 @@ static inline int cpu_of(struct rq *rq) | |||
653 | * preempt-disabled sections. | 590 | * preempt-disabled sections. |
654 | */ | 591 | */ |
655 | #define for_each_domain(cpu, __sd) \ | 592 | #define for_each_domain(cpu, __sd) \ |
656 | for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) | 593 | for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) |
657 | 594 | ||
658 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) | 595 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
659 | #define this_rq() (&__get_cpu_var(runqueues)) | 596 | #define this_rq() (&__get_cpu_var(runqueues)) |
@@ -661,9 +598,53 @@ static inline int cpu_of(struct rq *rq) | |||
661 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 598 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
662 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) | 599 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) |
663 | 600 | ||
601 | #ifdef CONFIG_CGROUP_SCHED | ||
602 | |||
603 | /* | ||
604 | * Return the group to which this tasks belongs. | ||
605 | * | ||
606 | * We use task_subsys_state_check() and extend the RCU verification | ||
607 | * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() | ||
608 | * holds that lock for each task it moves into the cgroup. Therefore | ||
609 | * by holding that lock, we pin the task to the current cgroup. | ||
610 | */ | ||
611 | static inline struct task_group *task_group(struct task_struct *p) | ||
612 | { | ||
613 | struct cgroup_subsys_state *css; | ||
614 | |||
615 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, | ||
616 | lockdep_is_held(&task_rq(p)->lock)); | ||
617 | return container_of(css, struct task_group, css); | ||
618 | } | ||
619 | |||
620 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | ||
621 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | ||
622 | { | ||
623 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
624 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; | ||
625 | p->se.parent = task_group(p)->se[cpu]; | ||
626 | #endif | ||
627 | |||
628 | #ifdef CONFIG_RT_GROUP_SCHED | ||
629 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; | ||
630 | p->rt.parent = task_group(p)->rt_se[cpu]; | ||
631 | #endif | ||
632 | } | ||
633 | |||
634 | #else /* CONFIG_CGROUP_SCHED */ | ||
635 | |||
636 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } | ||
637 | static inline struct task_group *task_group(struct task_struct *p) | ||
638 | { | ||
639 | return NULL; | ||
640 | } | ||
641 | |||
642 | #endif /* CONFIG_CGROUP_SCHED */ | ||
643 | |||
664 | inline void update_rq_clock(struct rq *rq) | 644 | inline void update_rq_clock(struct rq *rq) |
665 | { | 645 | { |
666 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 646 | if (!rq->skip_clock_update) |
647 | rq->clock = sched_clock_cpu(cpu_of(rq)); | ||
667 | } | 648 | } |
668 | 649 | ||
669 | /* | 650 | /* |
@@ -941,14 +922,25 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
941 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 922 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
942 | 923 | ||
943 | /* | 924 | /* |
925 | * Check whether the task is waking, we use this to synchronize ->cpus_allowed | ||
926 | * against ttwu(). | ||
927 | */ | ||
928 | static inline int task_is_waking(struct task_struct *p) | ||
929 | { | ||
930 | return unlikely(p->state == TASK_WAKING); | ||
931 | } | ||
932 | |||
933 | /* | ||
944 | * __task_rq_lock - lock the runqueue a given task resides on. | 934 | * __task_rq_lock - lock the runqueue a given task resides on. |
945 | * Must be called interrupts disabled. | 935 | * Must be called interrupts disabled. |
946 | */ | 936 | */ |
947 | static inline struct rq *__task_rq_lock(struct task_struct *p) | 937 | static inline struct rq *__task_rq_lock(struct task_struct *p) |
948 | __acquires(rq->lock) | 938 | __acquires(rq->lock) |
949 | { | 939 | { |
940 | struct rq *rq; | ||
941 | |||
950 | for (;;) { | 942 | for (;;) { |
951 | struct rq *rq = task_rq(p); | 943 | rq = task_rq(p); |
952 | raw_spin_lock(&rq->lock); | 944 | raw_spin_lock(&rq->lock); |
953 | if (likely(rq == task_rq(p))) | 945 | if (likely(rq == task_rq(p))) |
954 | return rq; | 946 | return rq; |
@@ -976,14 +968,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
976 | } | 968 | } |
977 | } | 969 | } |
978 | 970 | ||
979 | void task_rq_unlock_wait(struct task_struct *p) | ||
980 | { | ||
981 | struct rq *rq = task_rq(p); | ||
982 | |||
983 | smp_mb(); /* spin-unlock-wait is not a full memory barrier */ | ||
984 | raw_spin_unlock_wait(&rq->lock); | ||
985 | } | ||
986 | |||
987 | static void __task_rq_unlock(struct rq *rq) | 971 | static void __task_rq_unlock(struct rq *rq) |
988 | __releases(rq->lock) | 972 | __releases(rq->lock) |
989 | { | 973 | { |
@@ -1247,6 +1231,17 @@ void wake_up_idle_cpu(int cpu) | |||
1247 | if (!tsk_is_polling(rq->idle)) | 1231 | if (!tsk_is_polling(rq->idle)) |
1248 | smp_send_reschedule(cpu); | 1232 | smp_send_reschedule(cpu); |
1249 | } | 1233 | } |
1234 | |||
1235 | int nohz_ratelimit(int cpu) | ||
1236 | { | ||
1237 | struct rq *rq = cpu_rq(cpu); | ||
1238 | u64 diff = rq->clock - rq->nohz_stamp; | ||
1239 | |||
1240 | rq->nohz_stamp = rq->clock; | ||
1241 | |||
1242 | return diff < (NSEC_PER_SEC / HZ) >> 1; | ||
1243 | } | ||
1244 | |||
1250 | #endif /* CONFIG_NO_HZ */ | 1245 | #endif /* CONFIG_NO_HZ */ |
1251 | 1246 | ||
1252 | static u64 sched_avg_period(void) | 1247 | static u64 sched_avg_period(void) |
@@ -1259,6 +1254,12 @@ static void sched_avg_update(struct rq *rq) | |||
1259 | s64 period = sched_avg_period(); | 1254 | s64 period = sched_avg_period(); |
1260 | 1255 | ||
1261 | while ((s64)(rq->clock - rq->age_stamp) > period) { | 1256 | while ((s64)(rq->clock - rq->age_stamp) > period) { |
1257 | /* | ||
1258 | * Inline assembly required to prevent the compiler | ||
1259 | * optimising this loop into a divmod call. | ||
1260 | * See __iter_div_u64_rem() for another example of this. | ||
1261 | */ | ||
1262 | asm("" : "+rm" (rq->age_stamp)); | ||
1262 | rq->age_stamp += period; | 1263 | rq->age_stamp += period; |
1263 | rq->rt_avg /= 2; | 1264 | rq->rt_avg /= 2; |
1264 | } | 1265 | } |
@@ -1390,32 +1391,6 @@ static const u32 prio_to_wmult[40] = { | |||
1390 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, | 1391 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
1391 | }; | 1392 | }; |
1392 | 1393 | ||
1393 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup); | ||
1394 | |||
1395 | /* | ||
1396 | * runqueue iterator, to support SMP load-balancing between different | ||
1397 | * scheduling classes, without having to expose their internal data | ||
1398 | * structures to the load-balancing proper: | ||
1399 | */ | ||
1400 | struct rq_iterator { | ||
1401 | void *arg; | ||
1402 | struct task_struct *(*start)(void *); | ||
1403 | struct task_struct *(*next)(void *); | ||
1404 | }; | ||
1405 | |||
1406 | #ifdef CONFIG_SMP | ||
1407 | static unsigned long | ||
1408 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1409 | unsigned long max_load_move, struct sched_domain *sd, | ||
1410 | enum cpu_idle_type idle, int *all_pinned, | ||
1411 | int *this_best_prio, struct rq_iterator *iterator); | ||
1412 | |||
1413 | static int | ||
1414 | iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
1415 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
1416 | struct rq_iterator *iterator); | ||
1417 | #endif | ||
1418 | |||
1419 | /* Time spent by the tasks of the cpu accounting group executing in ... */ | 1394 | /* Time spent by the tasks of the cpu accounting group executing in ... */ |
1420 | enum cpuacct_stat_index { | 1395 | enum cpuacct_stat_index { |
1421 | CPUACCT_STAT_USER, /* ... user mode */ | 1396 | CPUACCT_STAT_USER, /* ... user mode */ |
@@ -1529,24 +1504,9 @@ static unsigned long target_load(int cpu, int type) | |||
1529 | return max(rq->cpu_load[type-1], total); | 1504 | return max(rq->cpu_load[type-1], total); |
1530 | } | 1505 | } |
1531 | 1506 | ||
1532 | static struct sched_group *group_of(int cpu) | ||
1533 | { | ||
1534 | struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); | ||
1535 | |||
1536 | if (!sd) | ||
1537 | return NULL; | ||
1538 | |||
1539 | return sd->groups; | ||
1540 | } | ||
1541 | |||
1542 | static unsigned long power_of(int cpu) | 1507 | static unsigned long power_of(int cpu) |
1543 | { | 1508 | { |
1544 | struct sched_group *group = group_of(cpu); | 1509 | return cpu_rq(cpu)->cpu_power; |
1545 | |||
1546 | if (!group) | ||
1547 | return SCHED_LOAD_SCALE; | ||
1548 | |||
1549 | return group->cpu_power; | ||
1550 | } | 1510 | } |
1551 | 1511 | ||
1552 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | 1512 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); |
@@ -1566,7 +1526,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1566 | 1526 | ||
1567 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1527 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1568 | 1528 | ||
1569 | static __read_mostly unsigned long *update_shares_data; | 1529 | static __read_mostly unsigned long __percpu *update_shares_data; |
1570 | 1530 | ||
1571 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1531 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1572 | 1532 | ||
@@ -1701,21 +1661,8 @@ static void update_shares(struct sched_domain *sd) | |||
1701 | } | 1661 | } |
1702 | } | 1662 | } |
1703 | 1663 | ||
1704 | static void update_shares_locked(struct rq *rq, struct sched_domain *sd) | ||
1705 | { | ||
1706 | if (root_task_group_empty()) | ||
1707 | return; | ||
1708 | |||
1709 | raw_spin_unlock(&rq->lock); | ||
1710 | update_shares(sd); | ||
1711 | raw_spin_lock(&rq->lock); | ||
1712 | } | ||
1713 | |||
1714 | static void update_h_load(long cpu) | 1664 | static void update_h_load(long cpu) |
1715 | { | 1665 | { |
1716 | if (root_task_group_empty()) | ||
1717 | return; | ||
1718 | |||
1719 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | 1666 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
1720 | } | 1667 | } |
1721 | 1668 | ||
@@ -1725,10 +1672,6 @@ static inline void update_shares(struct sched_domain *sd) | |||
1725 | { | 1672 | { |
1726 | } | 1673 | } |
1727 | 1674 | ||
1728 | static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | ||
1729 | { | ||
1730 | } | ||
1731 | |||
1732 | #endif | 1675 | #endif |
1733 | 1676 | ||
1734 | #ifdef CONFIG_PREEMPT | 1677 | #ifdef CONFIG_PREEMPT |
@@ -1805,6 +1748,49 @@ static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | |||
1805 | raw_spin_unlock(&busiest->lock); | 1748 | raw_spin_unlock(&busiest->lock); |
1806 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | 1749 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
1807 | } | 1750 | } |
1751 | |||
1752 | /* | ||
1753 | * double_rq_lock - safely lock two runqueues | ||
1754 | * | ||
1755 | * Note this does not disable interrupts like task_rq_lock, | ||
1756 | * you need to do so manually before calling. | ||
1757 | */ | ||
1758 | static void double_rq_lock(struct rq *rq1, struct rq *rq2) | ||
1759 | __acquires(rq1->lock) | ||
1760 | __acquires(rq2->lock) | ||
1761 | { | ||
1762 | BUG_ON(!irqs_disabled()); | ||
1763 | if (rq1 == rq2) { | ||
1764 | raw_spin_lock(&rq1->lock); | ||
1765 | __acquire(rq2->lock); /* Fake it out ;) */ | ||
1766 | } else { | ||
1767 | if (rq1 < rq2) { | ||
1768 | raw_spin_lock(&rq1->lock); | ||
1769 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); | ||
1770 | } else { | ||
1771 | raw_spin_lock(&rq2->lock); | ||
1772 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); | ||
1773 | } | ||
1774 | } | ||
1775 | } | ||
1776 | |||
1777 | /* | ||
1778 | * double_rq_unlock - safely unlock two runqueues | ||
1779 | * | ||
1780 | * Note this does not restore interrupts like task_rq_unlock, | ||
1781 | * you need to do so manually after calling. | ||
1782 | */ | ||
1783 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | ||
1784 | __releases(rq1->lock) | ||
1785 | __releases(rq2->lock) | ||
1786 | { | ||
1787 | raw_spin_unlock(&rq1->lock); | ||
1788 | if (rq1 != rq2) | ||
1789 | raw_spin_unlock(&rq2->lock); | ||
1790 | else | ||
1791 | __release(rq2->lock); | ||
1792 | } | ||
1793 | |||
1808 | #endif | 1794 | #endif |
1809 | 1795 | ||
1810 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1796 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -1816,7 +1802,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
1816 | } | 1802 | } |
1817 | #endif | 1803 | #endif |
1818 | 1804 | ||
1819 | static void calc_load_account_active(struct rq *this_rq); | 1805 | static void calc_load_account_idle(struct rq *this_rq); |
1820 | static void update_sysctl(void); | 1806 | static void update_sysctl(void); |
1821 | static int get_update_sysctl_factor(void); | 1807 | static int get_update_sysctl_factor(void); |
1822 | 1808 | ||
@@ -1834,18 +1820,14 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
1834 | #endif | 1820 | #endif |
1835 | } | 1821 | } |
1836 | 1822 | ||
1837 | #include "sched_stats.h" | 1823 | static const struct sched_class rt_sched_class; |
1838 | #include "sched_idletask.c" | ||
1839 | #include "sched_fair.c" | ||
1840 | #include "sched_rt.c" | ||
1841 | #ifdef CONFIG_SCHED_DEBUG | ||
1842 | # include "sched_debug.c" | ||
1843 | #endif | ||
1844 | 1824 | ||
1845 | #define sched_class_highest (&rt_sched_class) | 1825 | #define sched_class_highest (&rt_sched_class) |
1846 | #define for_each_class(class) \ | 1826 | #define for_each_class(class) \ |
1847 | for (class = sched_class_highest; class; class = class->next) | 1827 | for (class = sched_class_highest; class; class = class->next) |
1848 | 1828 | ||
1829 | #include "sched_stats.h" | ||
1830 | |||
1849 | static void inc_nr_running(struct rq *rq) | 1831 | static void inc_nr_running(struct rq *rq) |
1850 | { | 1832 | { |
1851 | rq->nr_running++; | 1833 | rq->nr_running++; |
@@ -1859,8 +1841,8 @@ static void dec_nr_running(struct rq *rq) | |||
1859 | static void set_load_weight(struct task_struct *p) | 1841 | static void set_load_weight(struct task_struct *p) |
1860 | { | 1842 | { |
1861 | if (task_has_rt_policy(p)) { | 1843 | if (task_has_rt_policy(p)) { |
1862 | p->se.load.weight = prio_to_weight[0] * 2; | 1844 | p->se.load.weight = 0; |
1863 | p->se.load.inv_weight = prio_to_wmult[0] >> 1; | 1845 | p->se.load.inv_weight = WMULT_CONST; |
1864 | return; | 1846 | return; |
1865 | } | 1847 | } |
1866 | 1848 | ||
@@ -1877,40 +1859,53 @@ static void set_load_weight(struct task_struct *p) | |||
1877 | p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; | 1859 | p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; |
1878 | } | 1860 | } |
1879 | 1861 | ||
1880 | static void update_avg(u64 *avg, u64 sample) | 1862 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
1881 | { | 1863 | { |
1882 | s64 diff = sample - *avg; | 1864 | update_rq_clock(rq); |
1883 | *avg += diff >> 3; | 1865 | sched_info_queued(p); |
1866 | p->sched_class->enqueue_task(rq, p, flags); | ||
1867 | p->se.on_rq = 1; | ||
1884 | } | 1868 | } |
1885 | 1869 | ||
1886 | static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) | 1870 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
1887 | { | 1871 | { |
1888 | if (wakeup) | 1872 | update_rq_clock(rq); |
1889 | p->se.start_runtime = p->se.sum_exec_runtime; | 1873 | sched_info_dequeued(p); |
1874 | p->sched_class->dequeue_task(rq, p, flags); | ||
1875 | p->se.on_rq = 0; | ||
1876 | } | ||
1890 | 1877 | ||
1891 | sched_info_queued(p); | 1878 | /* |
1892 | p->sched_class->enqueue_task(rq, p, wakeup); | 1879 | * activate_task - move a task to the runqueue. |
1893 | p->se.on_rq = 1; | 1880 | */ |
1881 | static void activate_task(struct rq *rq, struct task_struct *p, int flags) | ||
1882 | { | ||
1883 | if (task_contributes_to_load(p)) | ||
1884 | rq->nr_uninterruptible--; | ||
1885 | |||
1886 | enqueue_task(rq, p, flags); | ||
1887 | inc_nr_running(rq); | ||
1894 | } | 1888 | } |
1895 | 1889 | ||
1896 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) | 1890 | /* |
1891 | * deactivate_task - remove a task from the runqueue. | ||
1892 | */ | ||
1893 | static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) | ||
1897 | { | 1894 | { |
1898 | if (sleep) { | 1895 | if (task_contributes_to_load(p)) |
1899 | if (p->se.last_wakeup) { | 1896 | rq->nr_uninterruptible++; |
1900 | update_avg(&p->se.avg_overlap, | ||
1901 | p->se.sum_exec_runtime - p->se.last_wakeup); | ||
1902 | p->se.last_wakeup = 0; | ||
1903 | } else { | ||
1904 | update_avg(&p->se.avg_wakeup, | ||
1905 | sysctl_sched_wakeup_granularity); | ||
1906 | } | ||
1907 | } | ||
1908 | 1897 | ||
1909 | sched_info_dequeued(p); | 1898 | dequeue_task(rq, p, flags); |
1910 | p->sched_class->dequeue_task(rq, p, sleep); | 1899 | dec_nr_running(rq); |
1911 | p->se.on_rq = 0; | ||
1912 | } | 1900 | } |
1913 | 1901 | ||
1902 | #include "sched_idletask.c" | ||
1903 | #include "sched_fair.c" | ||
1904 | #include "sched_rt.c" | ||
1905 | #ifdef CONFIG_SCHED_DEBUG | ||
1906 | # include "sched_debug.c" | ||
1907 | #endif | ||
1908 | |||
1914 | /* | 1909 | /* |
1915 | * __normal_prio - return the priority that is based on the static prio | 1910 | * __normal_prio - return the priority that is based on the static prio |
1916 | */ | 1911 | */ |
@@ -1957,30 +1952,6 @@ static int effective_prio(struct task_struct *p) | |||
1957 | return p->prio; | 1952 | return p->prio; |
1958 | } | 1953 | } |
1959 | 1954 | ||
1960 | /* | ||
1961 | * activate_task - move a task to the runqueue. | ||
1962 | */ | ||
1963 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | ||
1964 | { | ||
1965 | if (task_contributes_to_load(p)) | ||
1966 | rq->nr_uninterruptible--; | ||
1967 | |||
1968 | enqueue_task(rq, p, wakeup); | ||
1969 | inc_nr_running(rq); | ||
1970 | } | ||
1971 | |||
1972 | /* | ||
1973 | * deactivate_task - remove a task from the runqueue. | ||
1974 | */ | ||
1975 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | ||
1976 | { | ||
1977 | if (task_contributes_to_load(p)) | ||
1978 | rq->nr_uninterruptible++; | ||
1979 | |||
1980 | dequeue_task(rq, p, sleep); | ||
1981 | dec_nr_running(rq); | ||
1982 | } | ||
1983 | |||
1984 | /** | 1955 | /** |
1985 | * task_curr - is this task currently executing on a CPU? | 1956 | * task_curr - is this task currently executing on a CPU? |
1986 | * @p: the task in question. | 1957 | * @p: the task in question. |
@@ -2053,21 +2024,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
2053 | __set_task_cpu(p, new_cpu); | 2024 | __set_task_cpu(p, new_cpu); |
2054 | } | 2025 | } |
2055 | 2026 | ||
2056 | struct migration_req { | 2027 | struct migration_arg { |
2057 | struct list_head list; | ||
2058 | |||
2059 | struct task_struct *task; | 2028 | struct task_struct *task; |
2060 | int dest_cpu; | 2029 | int dest_cpu; |
2061 | |||
2062 | struct completion done; | ||
2063 | }; | 2030 | }; |
2064 | 2031 | ||
2032 | static int migration_cpu_stop(void *data); | ||
2033 | |||
2065 | /* | 2034 | /* |
2066 | * The task's runqueue lock must be held. | 2035 | * The task's runqueue lock must be held. |
2067 | * Returns true if you have to wait for migration thread. | 2036 | * Returns true if you have to wait for migration thread. |
2068 | */ | 2037 | */ |
2069 | static int | 2038 | static bool migrate_task(struct task_struct *p, int dest_cpu) |
2070 | migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | ||
2071 | { | 2039 | { |
2072 | struct rq *rq = task_rq(p); | 2040 | struct rq *rq = task_rq(p); |
2073 | 2041 | ||
@@ -2075,58 +2043,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2075 | * If the task is not on a runqueue (and not running), then | 2043 | * If the task is not on a runqueue (and not running), then |
2076 | * the next wake-up will properly place the task. | 2044 | * the next wake-up will properly place the task. |
2077 | */ | 2045 | */ |
2078 | if (!p->se.on_rq && !task_running(rq, p)) | 2046 | return p->se.on_rq || task_running(rq, p); |
2079 | return 0; | ||
2080 | |||
2081 | init_completion(&req->done); | ||
2082 | req->task = p; | ||
2083 | req->dest_cpu = dest_cpu; | ||
2084 | list_add(&req->list, &rq->migration_queue); | ||
2085 | |||
2086 | return 1; | ||
2087 | } | ||
2088 | |||
2089 | /* | ||
2090 | * wait_task_context_switch - wait for a thread to complete at least one | ||
2091 | * context switch. | ||
2092 | * | ||
2093 | * @p must not be current. | ||
2094 | */ | ||
2095 | void wait_task_context_switch(struct task_struct *p) | ||
2096 | { | ||
2097 | unsigned long nvcsw, nivcsw, flags; | ||
2098 | int running; | ||
2099 | struct rq *rq; | ||
2100 | |||
2101 | nvcsw = p->nvcsw; | ||
2102 | nivcsw = p->nivcsw; | ||
2103 | for (;;) { | ||
2104 | /* | ||
2105 | * The runqueue is assigned before the actual context | ||
2106 | * switch. We need to take the runqueue lock. | ||
2107 | * | ||
2108 | * We could check initially without the lock but it is | ||
2109 | * very likely that we need to take the lock in every | ||
2110 | * iteration. | ||
2111 | */ | ||
2112 | rq = task_rq_lock(p, &flags); | ||
2113 | running = task_running(rq, p); | ||
2114 | task_rq_unlock(rq, &flags); | ||
2115 | |||
2116 | if (likely(!running)) | ||
2117 | break; | ||
2118 | /* | ||
2119 | * The switch count is incremented before the actual | ||
2120 | * context switch. We thus wait for two switches to be | ||
2121 | * sure at least one completed. | ||
2122 | */ | ||
2123 | if ((p->nvcsw - nvcsw) > 1) | ||
2124 | break; | ||
2125 | if ((p->nivcsw - nivcsw) > 1) | ||
2126 | break; | ||
2127 | |||
2128 | cpu_relax(); | ||
2129 | } | ||
2130 | } | 2047 | } |
2131 | 2048 | ||
2132 | /* | 2049 | /* |
@@ -2184,7 +2101,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) | |||
2184 | * just go back and repeat. | 2101 | * just go back and repeat. |
2185 | */ | 2102 | */ |
2186 | rq = task_rq_lock(p, &flags); | 2103 | rq = task_rq_lock(p, &flags); |
2187 | trace_sched_wait_task(rq, p); | 2104 | trace_sched_wait_task(p); |
2188 | running = task_running(rq, p); | 2105 | running = task_running(rq, p); |
2189 | on_rq = p->se.on_rq; | 2106 | on_rq = p->se.on_rq; |
2190 | ncsw = 0; | 2107 | ncsw = 0; |
@@ -2282,6 +2199,9 @@ void task_oncpu_function_call(struct task_struct *p, | |||
2282 | } | 2199 | } |
2283 | 2200 | ||
2284 | #ifdef CONFIG_SMP | 2201 | #ifdef CONFIG_SMP |
2202 | /* | ||
2203 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. | ||
2204 | */ | ||
2285 | static int select_fallback_rq(int cpu, struct task_struct *p) | 2205 | static int select_fallback_rq(int cpu, struct task_struct *p) |
2286 | { | 2206 | { |
2287 | int dest_cpu; | 2207 | int dest_cpu; |
@@ -2298,12 +2218,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
2298 | return dest_cpu; | 2218 | return dest_cpu; |
2299 | 2219 | ||
2300 | /* No more Mr. Nice Guy. */ | 2220 | /* No more Mr. Nice Guy. */ |
2301 | if (dest_cpu >= nr_cpu_ids) { | 2221 | if (unlikely(dest_cpu >= nr_cpu_ids)) { |
2302 | rcu_read_lock(); | 2222 | dest_cpu = cpuset_cpus_allowed_fallback(p); |
2303 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
2304 | rcu_read_unlock(); | ||
2305 | dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); | ||
2306 | |||
2307 | /* | 2223 | /* |
2308 | * Don't tell them about moving exiting tasks or | 2224 | * Don't tell them about moving exiting tasks or |
2309 | * kernel threads (both mm NULL), since they never | 2225 | * kernel threads (both mm NULL), since they never |
@@ -2320,19 +2236,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
2320 | } | 2236 | } |
2321 | 2237 | ||
2322 | /* | 2238 | /* |
2323 | * Called from: | 2239 | * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable. |
2324 | * | ||
2325 | * - fork, @p is stable because it isn't on the tasklist yet | ||
2326 | * | ||
2327 | * - exec, @p is unstable, retry loop | ||
2328 | * | ||
2329 | * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so | ||
2330 | * we should be good. | ||
2331 | */ | 2240 | */ |
2332 | static inline | 2241 | static inline |
2333 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | 2242 | int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags) |
2334 | { | 2243 | { |
2335 | int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); | 2244 | int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags); |
2336 | 2245 | ||
2337 | /* | 2246 | /* |
2338 | * In order not to call set_task_cpu() on a blocking task we need | 2247 | * In order not to call set_task_cpu() on a blocking task we need |
@@ -2350,6 +2259,12 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | |||
2350 | 2259 | ||
2351 | return cpu; | 2260 | return cpu; |
2352 | } | 2261 | } |
2262 | |||
2263 | static void update_avg(u64 *avg, u64 sample) | ||
2264 | { | ||
2265 | s64 diff = sample - *avg; | ||
2266 | *avg += diff >> 3; | ||
2267 | } | ||
2353 | #endif | 2268 | #endif |
2354 | 2269 | ||
2355 | /*** | 2270 | /*** |
@@ -2371,16 +2286,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2371 | { | 2286 | { |
2372 | int cpu, orig_cpu, this_cpu, success = 0; | 2287 | int cpu, orig_cpu, this_cpu, success = 0; |
2373 | unsigned long flags; | 2288 | unsigned long flags; |
2374 | struct rq *rq, *orig_rq; | 2289 | unsigned long en_flags = ENQUEUE_WAKEUP; |
2375 | 2290 | struct rq *rq; | |
2376 | if (!sched_feat(SYNC_WAKEUPS)) | ||
2377 | wake_flags &= ~WF_SYNC; | ||
2378 | 2291 | ||
2379 | this_cpu = get_cpu(); | 2292 | this_cpu = get_cpu(); |
2380 | 2293 | ||
2381 | smp_wmb(); | 2294 | smp_wmb(); |
2382 | rq = orig_rq = task_rq_lock(p, &flags); | 2295 | rq = task_rq_lock(p, &flags); |
2383 | update_rq_clock(rq); | ||
2384 | if (!(p->state & state)) | 2296 | if (!(p->state & state)) |
2385 | goto out; | 2297 | goto out; |
2386 | 2298 | ||
@@ -2400,24 +2312,35 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2400 | * | 2312 | * |
2401 | * First fix up the nr_uninterruptible count: | 2313 | * First fix up the nr_uninterruptible count: |
2402 | */ | 2314 | */ |
2403 | if (task_contributes_to_load(p)) | 2315 | if (task_contributes_to_load(p)) { |
2404 | rq->nr_uninterruptible--; | 2316 | if (likely(cpu_online(orig_cpu))) |
2317 | rq->nr_uninterruptible--; | ||
2318 | else | ||
2319 | this_rq()->nr_uninterruptible--; | ||
2320 | } | ||
2405 | p->state = TASK_WAKING; | 2321 | p->state = TASK_WAKING; |
2406 | 2322 | ||
2407 | if (p->sched_class->task_waking) | 2323 | if (p->sched_class->task_waking) { |
2408 | p->sched_class->task_waking(rq, p); | 2324 | p->sched_class->task_waking(rq, p); |
2325 | en_flags |= ENQUEUE_WAKING; | ||
2326 | } | ||
2409 | 2327 | ||
2410 | __task_rq_unlock(rq); | 2328 | cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); |
2411 | |||
2412 | cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); | ||
2413 | if (cpu != orig_cpu) | 2329 | if (cpu != orig_cpu) |
2414 | set_task_cpu(p, cpu); | 2330 | set_task_cpu(p, cpu); |
2331 | __task_rq_unlock(rq); | ||
2415 | 2332 | ||
2416 | rq = __task_rq_lock(p); | 2333 | rq = cpu_rq(cpu); |
2417 | update_rq_clock(rq); | 2334 | raw_spin_lock(&rq->lock); |
2418 | 2335 | ||
2336 | /* | ||
2337 | * We migrated the task without holding either rq->lock, however | ||
2338 | * since the task is not on the task list itself, nobody else | ||
2339 | * will try and migrate the task, hence the rq should match the | ||
2340 | * cpu we just moved it to. | ||
2341 | */ | ||
2342 | WARN_ON(task_cpu(p) != cpu); | ||
2419 | WARN_ON(p->state != TASK_WAKING); | 2343 | WARN_ON(p->state != TASK_WAKING); |
2420 | cpu = task_cpu(p); | ||
2421 | 2344 | ||
2422 | #ifdef CONFIG_SCHEDSTATS | 2345 | #ifdef CONFIG_SCHEDSTATS |
2423 | schedstat_inc(rq, ttwu_count); | 2346 | schedstat_inc(rq, ttwu_count); |
@@ -2436,36 +2359,20 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2436 | 2359 | ||
2437 | out_activate: | 2360 | out_activate: |
2438 | #endif /* CONFIG_SMP */ | 2361 | #endif /* CONFIG_SMP */ |
2439 | schedstat_inc(p, se.nr_wakeups); | 2362 | schedstat_inc(p, se.statistics.nr_wakeups); |
2440 | if (wake_flags & WF_SYNC) | 2363 | if (wake_flags & WF_SYNC) |
2441 | schedstat_inc(p, se.nr_wakeups_sync); | 2364 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
2442 | if (orig_cpu != cpu) | 2365 | if (orig_cpu != cpu) |
2443 | schedstat_inc(p, se.nr_wakeups_migrate); | 2366 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
2444 | if (cpu == this_cpu) | 2367 | if (cpu == this_cpu) |
2445 | schedstat_inc(p, se.nr_wakeups_local); | 2368 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
2446 | else | 2369 | else |
2447 | schedstat_inc(p, se.nr_wakeups_remote); | 2370 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
2448 | activate_task(rq, p, 1); | 2371 | activate_task(rq, p, en_flags); |
2449 | success = 1; | 2372 | success = 1; |
2450 | 2373 | ||
2451 | /* | ||
2452 | * Only attribute actual wakeups done by this task. | ||
2453 | */ | ||
2454 | if (!in_interrupt()) { | ||
2455 | struct sched_entity *se = ¤t->se; | ||
2456 | u64 sample = se->sum_exec_runtime; | ||
2457 | |||
2458 | if (se->last_wakeup) | ||
2459 | sample -= se->last_wakeup; | ||
2460 | else | ||
2461 | sample -= se->start_runtime; | ||
2462 | update_avg(&se->avg_wakeup, sample); | ||
2463 | |||
2464 | se->last_wakeup = se->sum_exec_runtime; | ||
2465 | } | ||
2466 | |||
2467 | out_running: | 2374 | out_running: |
2468 | trace_sched_wakeup(rq, p, success); | 2375 | trace_sched_wakeup(p, success); |
2469 | check_preempt_curr(rq, p, wake_flags); | 2376 | check_preempt_curr(rq, p, wake_flags); |
2470 | 2377 | ||
2471 | p->state = TASK_RUNNING; | 2378 | p->state = TASK_RUNNING; |
@@ -2525,42 +2432,9 @@ static void __sched_fork(struct task_struct *p) | |||
2525 | p->se.sum_exec_runtime = 0; | 2432 | p->se.sum_exec_runtime = 0; |
2526 | p->se.prev_sum_exec_runtime = 0; | 2433 | p->se.prev_sum_exec_runtime = 0; |
2527 | p->se.nr_migrations = 0; | 2434 | p->se.nr_migrations = 0; |
2528 | p->se.last_wakeup = 0; | ||
2529 | p->se.avg_overlap = 0; | ||
2530 | p->se.start_runtime = 0; | ||
2531 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | ||
2532 | 2435 | ||
2533 | #ifdef CONFIG_SCHEDSTATS | 2436 | #ifdef CONFIG_SCHEDSTATS |
2534 | p->se.wait_start = 0; | 2437 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
2535 | p->se.wait_max = 0; | ||
2536 | p->se.wait_count = 0; | ||
2537 | p->se.wait_sum = 0; | ||
2538 | |||
2539 | p->se.sleep_start = 0; | ||
2540 | p->se.sleep_max = 0; | ||
2541 | p->se.sum_sleep_runtime = 0; | ||
2542 | |||
2543 | p->se.block_start = 0; | ||
2544 | p->se.block_max = 0; | ||
2545 | p->se.exec_max = 0; | ||
2546 | p->se.slice_max = 0; | ||
2547 | |||
2548 | p->se.nr_migrations_cold = 0; | ||
2549 | p->se.nr_failed_migrations_affine = 0; | ||
2550 | p->se.nr_failed_migrations_running = 0; | ||
2551 | p->se.nr_failed_migrations_hot = 0; | ||
2552 | p->se.nr_forced_migrations = 0; | ||
2553 | |||
2554 | p->se.nr_wakeups = 0; | ||
2555 | p->se.nr_wakeups_sync = 0; | ||
2556 | p->se.nr_wakeups_migrate = 0; | ||
2557 | p->se.nr_wakeups_local = 0; | ||
2558 | p->se.nr_wakeups_remote = 0; | ||
2559 | p->se.nr_wakeups_affine = 0; | ||
2560 | p->se.nr_wakeups_affine_attempts = 0; | ||
2561 | p->se.nr_wakeups_passive = 0; | ||
2562 | p->se.nr_wakeups_idle = 0; | ||
2563 | |||
2564 | #endif | 2438 | #endif |
2565 | 2439 | ||
2566 | INIT_LIST_HEAD(&p->rt.run_list); | 2440 | INIT_LIST_HEAD(&p->rt.run_list); |
@@ -2581,11 +2455,11 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2581 | 2455 | ||
2582 | __sched_fork(p); | 2456 | __sched_fork(p); |
2583 | /* | 2457 | /* |
2584 | * We mark the process as waking here. This guarantees that | 2458 | * We mark the process as running here. This guarantees that |
2585 | * nobody will actually run it, and a signal or other external | 2459 | * nobody will actually run it, and a signal or other external |
2586 | * event cannot wake it up and insert it on the runqueue either. | 2460 | * event cannot wake it up and insert it on the runqueue either. |
2587 | */ | 2461 | */ |
2588 | p->state = TASK_WAKING; | 2462 | p->state = TASK_RUNNING; |
2589 | 2463 | ||
2590 | /* | 2464 | /* |
2591 | * Revert to default priority/policy on fork if requested. | 2465 | * Revert to default priority/policy on fork if requested. |
@@ -2620,10 +2494,16 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2620 | if (p->sched_class->task_fork) | 2494 | if (p->sched_class->task_fork) |
2621 | p->sched_class->task_fork(p); | 2495 | p->sched_class->task_fork(p); |
2622 | 2496 | ||
2623 | #ifdef CONFIG_SMP | 2497 | /* |
2624 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); | 2498 | * The child is not yet in the pid-hash so no cgroup attach races, |
2625 | #endif | 2499 | * and the cgroup is pinned to this child due to cgroup_fork() |
2500 | * is ran before sched_fork(). | ||
2501 | * | ||
2502 | * Silence PROVE_RCU. | ||
2503 | */ | ||
2504 | rcu_read_lock(); | ||
2626 | set_task_cpu(p, cpu); | 2505 | set_task_cpu(p, cpu); |
2506 | rcu_read_unlock(); | ||
2627 | 2507 | ||
2628 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2508 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
2629 | if (likely(sched_info_on())) | 2509 | if (likely(sched_info_on())) |
@@ -2652,19 +2532,37 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2652 | { | 2532 | { |
2653 | unsigned long flags; | 2533 | unsigned long flags; |
2654 | struct rq *rq; | 2534 | struct rq *rq; |
2535 | int cpu __maybe_unused = get_cpu(); | ||
2655 | 2536 | ||
2537 | #ifdef CONFIG_SMP | ||
2656 | rq = task_rq_lock(p, &flags); | 2538 | rq = task_rq_lock(p, &flags); |
2657 | BUG_ON(p->state != TASK_WAKING); | 2539 | p->state = TASK_WAKING; |
2540 | |||
2541 | /* | ||
2542 | * Fork balancing, do it here and not earlier because: | ||
2543 | * - cpus_allowed can change in the fork path | ||
2544 | * - any previously selected cpu might disappear through hotplug | ||
2545 | * | ||
2546 | * We set TASK_WAKING so that select_task_rq() can drop rq->lock | ||
2547 | * without people poking at ->cpus_allowed. | ||
2548 | */ | ||
2549 | cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); | ||
2550 | set_task_cpu(p, cpu); | ||
2551 | |||
2658 | p->state = TASK_RUNNING; | 2552 | p->state = TASK_RUNNING; |
2659 | update_rq_clock(rq); | 2553 | task_rq_unlock(rq, &flags); |
2554 | #endif | ||
2555 | |||
2556 | rq = task_rq_lock(p, &flags); | ||
2660 | activate_task(rq, p, 0); | 2557 | activate_task(rq, p, 0); |
2661 | trace_sched_wakeup_new(rq, p, 1); | 2558 | trace_sched_wakeup_new(p, 1); |
2662 | check_preempt_curr(rq, p, WF_FORK); | 2559 | check_preempt_curr(rq, p, WF_FORK); |
2663 | #ifdef CONFIG_SMP | 2560 | #ifdef CONFIG_SMP |
2664 | if (p->sched_class->task_woken) | 2561 | if (p->sched_class->task_woken) |
2665 | p->sched_class->task_woken(rq, p); | 2562 | p->sched_class->task_woken(rq, p); |
2666 | #endif | 2563 | #endif |
2667 | task_rq_unlock(rq, &flags); | 2564 | task_rq_unlock(rq, &flags); |
2565 | put_cpu(); | ||
2668 | } | 2566 | } |
2669 | 2567 | ||
2670 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2568 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
@@ -2783,7 +2681,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2783 | */ | 2681 | */ |
2784 | prev_state = prev->state; | 2682 | prev_state = prev->state; |
2785 | finish_arch_switch(prev); | 2683 | finish_arch_switch(prev); |
2786 | perf_event_task_sched_in(current, cpu_of(rq)); | 2684 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW |
2685 | local_irq_disable(); | ||
2686 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
2687 | perf_event_task_sched_in(current); | ||
2688 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
2689 | local_irq_enable(); | ||
2690 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
2787 | finish_lock_switch(rq, prev); | 2691 | finish_lock_switch(rq, prev); |
2788 | 2692 | ||
2789 | fire_sched_in_preempt_notifiers(current); | 2693 | fire_sched_in_preempt_notifiers(current); |
@@ -2871,7 +2775,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2871 | struct mm_struct *mm, *oldmm; | 2775 | struct mm_struct *mm, *oldmm; |
2872 | 2776 | ||
2873 | prepare_task_switch(rq, prev, next); | 2777 | prepare_task_switch(rq, prev, next); |
2874 | trace_sched_switch(rq, prev, next); | 2778 | trace_sched_switch(prev, next); |
2875 | mm = next->mm; | 2779 | mm = next->mm; |
2876 | oldmm = prev->active_mm; | 2780 | oldmm = prev->active_mm; |
2877 | /* | 2781 | /* |
@@ -2969,9 +2873,9 @@ unsigned long nr_iowait(void) | |||
2969 | return sum; | 2873 | return sum; |
2970 | } | 2874 | } |
2971 | 2875 | ||
2972 | unsigned long nr_iowait_cpu(void) | 2876 | unsigned long nr_iowait_cpu(int cpu) |
2973 | { | 2877 | { |
2974 | struct rq *this = this_rq(); | 2878 | struct rq *this = cpu_rq(cpu); |
2975 | return atomic_read(&this->nr_iowait); | 2879 | return atomic_read(&this->nr_iowait); |
2976 | } | 2880 | } |
2977 | 2881 | ||
@@ -2988,6 +2892,61 @@ static unsigned long calc_load_update; | |||
2988 | unsigned long avenrun[3]; | 2892 | unsigned long avenrun[3]; |
2989 | EXPORT_SYMBOL(avenrun); | 2893 | EXPORT_SYMBOL(avenrun); |
2990 | 2894 | ||
2895 | static long calc_load_fold_active(struct rq *this_rq) | ||
2896 | { | ||
2897 | long nr_active, delta = 0; | ||
2898 | |||
2899 | nr_active = this_rq->nr_running; | ||
2900 | nr_active += (long) this_rq->nr_uninterruptible; | ||
2901 | |||
2902 | if (nr_active != this_rq->calc_load_active) { | ||
2903 | delta = nr_active - this_rq->calc_load_active; | ||
2904 | this_rq->calc_load_active = nr_active; | ||
2905 | } | ||
2906 | |||
2907 | return delta; | ||
2908 | } | ||
2909 | |||
2910 | #ifdef CONFIG_NO_HZ | ||
2911 | /* | ||
2912 | * For NO_HZ we delay the active fold to the next LOAD_FREQ update. | ||
2913 | * | ||
2914 | * When making the ILB scale, we should try to pull this in as well. | ||
2915 | */ | ||
2916 | static atomic_long_t calc_load_tasks_idle; | ||
2917 | |||
2918 | static void calc_load_account_idle(struct rq *this_rq) | ||
2919 | { | ||
2920 | long delta; | ||
2921 | |||
2922 | delta = calc_load_fold_active(this_rq); | ||
2923 | if (delta) | ||
2924 | atomic_long_add(delta, &calc_load_tasks_idle); | ||
2925 | } | ||
2926 | |||
2927 | static long calc_load_fold_idle(void) | ||
2928 | { | ||
2929 | long delta = 0; | ||
2930 | |||
2931 | /* | ||
2932 | * Its got a race, we don't care... | ||
2933 | */ | ||
2934 | if (atomic_long_read(&calc_load_tasks_idle)) | ||
2935 | delta = atomic_long_xchg(&calc_load_tasks_idle, 0); | ||
2936 | |||
2937 | return delta; | ||
2938 | } | ||
2939 | #else | ||
2940 | static void calc_load_account_idle(struct rq *this_rq) | ||
2941 | { | ||
2942 | } | ||
2943 | |||
2944 | static inline long calc_load_fold_idle(void) | ||
2945 | { | ||
2946 | return 0; | ||
2947 | } | ||
2948 | #endif | ||
2949 | |||
2991 | /** | 2950 | /** |
2992 | * get_avenrun - get the load average array | 2951 | * get_avenrun - get the load average array |
2993 | * @loads: pointer to dest load array | 2952 | * @loads: pointer to dest load array |
@@ -3034,20 +2993,22 @@ void calc_global_load(void) | |||
3034 | } | 2993 | } |
3035 | 2994 | ||
3036 | /* | 2995 | /* |
3037 | * Either called from update_cpu_load() or from a cpu going idle | 2996 | * Called from update_cpu_load() to periodically update this CPU's |
2997 | * active count. | ||
3038 | */ | 2998 | */ |
3039 | static void calc_load_account_active(struct rq *this_rq) | 2999 | static void calc_load_account_active(struct rq *this_rq) |
3040 | { | 3000 | { |
3041 | long nr_active, delta; | 3001 | long delta; |
3042 | 3002 | ||
3043 | nr_active = this_rq->nr_running; | 3003 | if (time_before(jiffies, this_rq->calc_load_update)) |
3044 | nr_active += (long) this_rq->nr_uninterruptible; | 3004 | return; |
3045 | 3005 | ||
3046 | if (nr_active != this_rq->calc_load_active) { | 3006 | delta = calc_load_fold_active(this_rq); |
3047 | delta = nr_active - this_rq->calc_load_active; | 3007 | delta += calc_load_fold_idle(); |
3048 | this_rq->calc_load_active = nr_active; | 3008 | if (delta) |
3049 | atomic_long_add(delta, &calc_load_tasks); | 3009 | atomic_long_add(delta, &calc_load_tasks); |
3050 | } | 3010 | |
3011 | this_rq->calc_load_update += LOAD_FREQ; | ||
3051 | } | 3012 | } |
3052 | 3013 | ||
3053 | /* | 3014 | /* |
@@ -3079,1871 +3040,42 @@ static void update_cpu_load(struct rq *this_rq) | |||
3079 | this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; | 3040 | this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; |
3080 | } | 3041 | } |
3081 | 3042 | ||
3082 | if (time_after_eq(jiffies, this_rq->calc_load_update)) { | 3043 | calc_load_account_active(this_rq); |
3083 | this_rq->calc_load_update += LOAD_FREQ; | ||
3084 | calc_load_account_active(this_rq); | ||
3085 | } | ||
3086 | } | 3044 | } |
3087 | 3045 | ||
3088 | #ifdef CONFIG_SMP | 3046 | #ifdef CONFIG_SMP |
3089 | 3047 | ||
3090 | /* | 3048 | /* |
3091 | * double_rq_lock - safely lock two runqueues | ||
3092 | * | ||
3093 | * Note this does not disable interrupts like task_rq_lock, | ||
3094 | * you need to do so manually before calling. | ||
3095 | */ | ||
3096 | static void double_rq_lock(struct rq *rq1, struct rq *rq2) | ||
3097 | __acquires(rq1->lock) | ||
3098 | __acquires(rq2->lock) | ||
3099 | { | ||
3100 | BUG_ON(!irqs_disabled()); | ||
3101 | if (rq1 == rq2) { | ||
3102 | raw_spin_lock(&rq1->lock); | ||
3103 | __acquire(rq2->lock); /* Fake it out ;) */ | ||
3104 | } else { | ||
3105 | if (rq1 < rq2) { | ||
3106 | raw_spin_lock(&rq1->lock); | ||
3107 | raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); | ||
3108 | } else { | ||
3109 | raw_spin_lock(&rq2->lock); | ||
3110 | raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); | ||
3111 | } | ||
3112 | } | ||
3113 | update_rq_clock(rq1); | ||
3114 | update_rq_clock(rq2); | ||
3115 | } | ||
3116 | |||
3117 | /* | ||
3118 | * double_rq_unlock - safely unlock two runqueues | ||
3119 | * | ||
3120 | * Note this does not restore interrupts like task_rq_unlock, | ||
3121 | * you need to do so manually after calling. | ||
3122 | */ | ||
3123 | static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | ||
3124 | __releases(rq1->lock) | ||
3125 | __releases(rq2->lock) | ||
3126 | { | ||
3127 | raw_spin_unlock(&rq1->lock); | ||
3128 | if (rq1 != rq2) | ||
3129 | raw_spin_unlock(&rq2->lock); | ||
3130 | else | ||
3131 | __release(rq2->lock); | ||
3132 | } | ||
3133 | |||
3134 | /* | ||
3135 | * sched_exec - execve() is a valuable balancing opportunity, because at | 3049 | * sched_exec - execve() is a valuable balancing opportunity, because at |
3136 | * this point the task has the smallest effective memory and cache footprint. | 3050 | * this point the task has the smallest effective memory and cache footprint. |
3137 | */ | 3051 | */ |
3138 | void sched_exec(void) | 3052 | void sched_exec(void) |
3139 | { | 3053 | { |
3140 | struct task_struct *p = current; | 3054 | struct task_struct *p = current; |
3141 | struct migration_req req; | ||
3142 | int dest_cpu, this_cpu; | ||
3143 | unsigned long flags; | 3055 | unsigned long flags; |
3144 | struct rq *rq; | 3056 | struct rq *rq; |
3145 | 3057 | int dest_cpu; | |
3146 | again: | ||
3147 | this_cpu = get_cpu(); | ||
3148 | dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0); | ||
3149 | if (dest_cpu == this_cpu) { | ||
3150 | put_cpu(); | ||
3151 | return; | ||
3152 | } | ||
3153 | 3058 | ||
3154 | rq = task_rq_lock(p, &flags); | 3059 | rq = task_rq_lock(p, &flags); |
3155 | put_cpu(); | 3060 | dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0); |
3061 | if (dest_cpu == smp_processor_id()) | ||
3062 | goto unlock; | ||
3156 | 3063 | ||
3157 | /* | 3064 | /* |
3158 | * select_task_rq() can race against ->cpus_allowed | 3065 | * select_task_rq() can race against ->cpus_allowed |
3159 | */ | 3066 | */ |
3160 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) | 3067 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) && |
3161 | || unlikely(!cpu_active(dest_cpu))) { | 3068 | likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) { |
3162 | task_rq_unlock(rq, &flags); | 3069 | struct migration_arg arg = { p, dest_cpu }; |
3163 | goto again; | ||
3164 | } | ||
3165 | 3070 | ||
3166 | /* force the process onto the specified CPU */ | ||
3167 | if (migrate_task(p, dest_cpu, &req)) { | ||
3168 | /* Need to wait for migration thread (might exit: take ref). */ | ||
3169 | struct task_struct *mt = rq->migration_thread; | ||
3170 | |||
3171 | get_task_struct(mt); | ||
3172 | task_rq_unlock(rq, &flags); | 3071 | task_rq_unlock(rq, &flags); |
3173 | wake_up_process(mt); | 3072 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
3174 | put_task_struct(mt); | ||
3175 | wait_for_completion(&req.done); | ||
3176 | |||
3177 | return; | 3073 | return; |
3178 | } | 3074 | } |
3075 | unlock: | ||
3179 | task_rq_unlock(rq, &flags); | 3076 | task_rq_unlock(rq, &flags); |
3180 | } | 3077 | } |
3181 | 3078 | ||
3182 | /* | ||
3183 | * pull_task - move a task from a remote runqueue to the local runqueue. | ||
3184 | * Both runqueues must be locked. | ||
3185 | */ | ||
3186 | static void pull_task(struct rq *src_rq, struct task_struct *p, | ||
3187 | struct rq *this_rq, int this_cpu) | ||
3188 | { | ||
3189 | deactivate_task(src_rq, p, 0); | ||
3190 | set_task_cpu(p, this_cpu); | ||
3191 | activate_task(this_rq, p, 0); | ||
3192 | check_preempt_curr(this_rq, p, 0); | ||
3193 | } | ||
3194 | |||
3195 | /* | ||
3196 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? | ||
3197 | */ | ||
3198 | static | ||
3199 | int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | ||
3200 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
3201 | int *all_pinned) | ||
3202 | { | ||
3203 | int tsk_cache_hot = 0; | ||
3204 | /* | ||
3205 | * We do not migrate tasks that are: | ||
3206 | * 1) running (obviously), or | ||
3207 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | ||
3208 | * 3) are cache-hot on their current CPU. | ||
3209 | */ | ||
3210 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { | ||
3211 | schedstat_inc(p, se.nr_failed_migrations_affine); | ||
3212 | return 0; | ||
3213 | } | ||
3214 | *all_pinned = 0; | ||
3215 | |||
3216 | if (task_running(rq, p)) { | ||
3217 | schedstat_inc(p, se.nr_failed_migrations_running); | ||
3218 | return 0; | ||
3219 | } | ||
3220 | |||
3221 | /* | ||
3222 | * Aggressive migration if: | ||
3223 | * 1) task is cache cold, or | ||
3224 | * 2) too many balance attempts have failed. | ||
3225 | */ | ||
3226 | |||
3227 | tsk_cache_hot = task_hot(p, rq->clock, sd); | ||
3228 | if (!tsk_cache_hot || | ||
3229 | sd->nr_balance_failed > sd->cache_nice_tries) { | ||
3230 | #ifdef CONFIG_SCHEDSTATS | ||
3231 | if (tsk_cache_hot) { | ||
3232 | schedstat_inc(sd, lb_hot_gained[idle]); | ||
3233 | schedstat_inc(p, se.nr_forced_migrations); | ||
3234 | } | ||
3235 | #endif | ||
3236 | return 1; | ||
3237 | } | ||
3238 | |||
3239 | if (tsk_cache_hot) { | ||
3240 | schedstat_inc(p, se.nr_failed_migrations_hot); | ||
3241 | return 0; | ||
3242 | } | ||
3243 | return 1; | ||
3244 | } | ||
3245 | |||
3246 | static unsigned long | ||
3247 | balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3248 | unsigned long max_load_move, struct sched_domain *sd, | ||
3249 | enum cpu_idle_type idle, int *all_pinned, | ||
3250 | int *this_best_prio, struct rq_iterator *iterator) | ||
3251 | { | ||
3252 | int loops = 0, pulled = 0, pinned = 0; | ||
3253 | struct task_struct *p; | ||
3254 | long rem_load_move = max_load_move; | ||
3255 | |||
3256 | if (max_load_move == 0) | ||
3257 | goto out; | ||
3258 | |||
3259 | pinned = 1; | ||
3260 | |||
3261 | /* | ||
3262 | * Start the load-balancing iterator: | ||
3263 | */ | ||
3264 | p = iterator->start(iterator->arg); | ||
3265 | next: | ||
3266 | if (!p || loops++ > sysctl_sched_nr_migrate) | ||
3267 | goto out; | ||
3268 | |||
3269 | if ((p->se.load.weight >> 1) > rem_load_move || | ||
3270 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { | ||
3271 | p = iterator->next(iterator->arg); | ||
3272 | goto next; | ||
3273 | } | ||
3274 | |||
3275 | pull_task(busiest, p, this_rq, this_cpu); | ||
3276 | pulled++; | ||
3277 | rem_load_move -= p->se.load.weight; | ||
3278 | |||
3279 | #ifdef CONFIG_PREEMPT | ||
3280 | /* | ||
3281 | * NEWIDLE balancing is a source of latency, so preemptible kernels | ||
3282 | * will stop after the first task is pulled to minimize the critical | ||
3283 | * section. | ||
3284 | */ | ||
3285 | if (idle == CPU_NEWLY_IDLE) | ||
3286 | goto out; | ||
3287 | #endif | ||
3288 | |||
3289 | /* | ||
3290 | * We only want to steal up to the prescribed amount of weighted load. | ||
3291 | */ | ||
3292 | if (rem_load_move > 0) { | ||
3293 | if (p->prio < *this_best_prio) | ||
3294 | *this_best_prio = p->prio; | ||
3295 | p = iterator->next(iterator->arg); | ||
3296 | goto next; | ||
3297 | } | ||
3298 | out: | ||
3299 | /* | ||
3300 | * Right now, this is one of only two places pull_task() is called, | ||
3301 | * so we can safely collect pull_task() stats here rather than | ||
3302 | * inside pull_task(). | ||
3303 | */ | ||
3304 | schedstat_add(sd, lb_gained[idle], pulled); | ||
3305 | |||
3306 | if (all_pinned) | ||
3307 | *all_pinned = pinned; | ||
3308 | |||
3309 | return max_load_move - rem_load_move; | ||
3310 | } | ||
3311 | |||
3312 | /* | ||
3313 | * move_tasks tries to move up to max_load_move weighted load from busiest to | ||
3314 | * this_rq, as part of a balancing operation within domain "sd". | ||
3315 | * Returns 1 if successful and 0 otherwise. | ||
3316 | * | ||
3317 | * Called with both runqueues locked. | ||
3318 | */ | ||
3319 | static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3320 | unsigned long max_load_move, | ||
3321 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
3322 | int *all_pinned) | ||
3323 | { | ||
3324 | const struct sched_class *class = sched_class_highest; | ||
3325 | unsigned long total_load_moved = 0; | ||
3326 | int this_best_prio = this_rq->curr->prio; | ||
3327 | |||
3328 | do { | ||
3329 | total_load_moved += | ||
3330 | class->load_balance(this_rq, this_cpu, busiest, | ||
3331 | max_load_move - total_load_moved, | ||
3332 | sd, idle, all_pinned, &this_best_prio); | ||
3333 | class = class->next; | ||
3334 | |||
3335 | #ifdef CONFIG_PREEMPT | ||
3336 | /* | ||
3337 | * NEWIDLE balancing is a source of latency, so preemptible | ||
3338 | * kernels will stop after the first task is pulled to minimize | ||
3339 | * the critical section. | ||
3340 | */ | ||
3341 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) | ||
3342 | break; | ||
3343 | #endif | ||
3344 | } while (class && max_load_move > total_load_moved); | ||
3345 | |||
3346 | return total_load_moved > 0; | ||
3347 | } | ||
3348 | |||
3349 | static int | ||
3350 | iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3351 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
3352 | struct rq_iterator *iterator) | ||
3353 | { | ||
3354 | struct task_struct *p = iterator->start(iterator->arg); | ||
3355 | int pinned = 0; | ||
3356 | |||
3357 | while (p) { | ||
3358 | if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { | ||
3359 | pull_task(busiest, p, this_rq, this_cpu); | ||
3360 | /* | ||
3361 | * Right now, this is only the second place pull_task() | ||
3362 | * is called, so we can safely collect pull_task() | ||
3363 | * stats here rather than inside pull_task(). | ||
3364 | */ | ||
3365 | schedstat_inc(sd, lb_gained[idle]); | ||
3366 | |||
3367 | return 1; | ||
3368 | } | ||
3369 | p = iterator->next(iterator->arg); | ||
3370 | } | ||
3371 | |||
3372 | return 0; | ||
3373 | } | ||
3374 | |||
3375 | /* | ||
3376 | * move_one_task tries to move exactly one task from busiest to this_rq, as | ||
3377 | * part of active balancing operations within "domain". | ||
3378 | * Returns 1 if successful and 0 otherwise. | ||
3379 | * | ||
3380 | * Called with both runqueues locked. | ||
3381 | */ | ||
3382 | static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
3383 | struct sched_domain *sd, enum cpu_idle_type idle) | ||
3384 | { | ||
3385 | const struct sched_class *class; | ||
3386 | |||
3387 | for_each_class(class) { | ||
3388 | if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle)) | ||
3389 | return 1; | ||
3390 | } | ||
3391 | |||
3392 | return 0; | ||
3393 | } | ||
3394 | /********** Helpers for find_busiest_group ************************/ | ||
3395 | /* | ||
3396 | * sd_lb_stats - Structure to store the statistics of a sched_domain | ||
3397 | * during load balancing. | ||
3398 | */ | ||
3399 | struct sd_lb_stats { | ||
3400 | struct sched_group *busiest; /* Busiest group in this sd */ | ||
3401 | struct sched_group *this; /* Local group in this sd */ | ||
3402 | unsigned long total_load; /* Total load of all groups in sd */ | ||
3403 | unsigned long total_pwr; /* Total power of all groups in sd */ | ||
3404 | unsigned long avg_load; /* Average load across all groups in sd */ | ||
3405 | |||
3406 | /** Statistics of this group */ | ||
3407 | unsigned long this_load; | ||
3408 | unsigned long this_load_per_task; | ||
3409 | unsigned long this_nr_running; | ||
3410 | |||
3411 | /* Statistics of the busiest group */ | ||
3412 | unsigned long max_load; | ||
3413 | unsigned long busiest_load_per_task; | ||
3414 | unsigned long busiest_nr_running; | ||
3415 | |||
3416 | int group_imb; /* Is there imbalance in this sd */ | ||
3417 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
3418 | int power_savings_balance; /* Is powersave balance needed for this sd */ | ||
3419 | struct sched_group *group_min; /* Least loaded group in sd */ | ||
3420 | struct sched_group *group_leader; /* Group which relieves group_min */ | ||
3421 | unsigned long min_load_per_task; /* load_per_task in group_min */ | ||
3422 | unsigned long leader_nr_running; /* Nr running of group_leader */ | ||
3423 | unsigned long min_nr_running; /* Nr running of group_min */ | ||
3424 | #endif | ||
3425 | }; | ||
3426 | |||
3427 | /* | ||
3428 | * sg_lb_stats - stats of a sched_group required for load_balancing | ||
3429 | */ | ||
3430 | struct sg_lb_stats { | ||
3431 | unsigned long avg_load; /*Avg load across the CPUs of the group */ | ||
3432 | unsigned long group_load; /* Total load over the CPUs of the group */ | ||
3433 | unsigned long sum_nr_running; /* Nr tasks running in the group */ | ||
3434 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | ||
3435 | unsigned long group_capacity; | ||
3436 | int group_imb; /* Is there an imbalance in the group ? */ | ||
3437 | }; | ||
3438 | |||
3439 | /** | ||
3440 | * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. | ||
3441 | * @group: The group whose first cpu is to be returned. | ||
3442 | */ | ||
3443 | static inline unsigned int group_first_cpu(struct sched_group *group) | ||
3444 | { | ||
3445 | return cpumask_first(sched_group_cpus(group)); | ||
3446 | } | ||
3447 | |||
3448 | /** | ||
3449 | * get_sd_load_idx - Obtain the load index for a given sched domain. | ||
3450 | * @sd: The sched_domain whose load_idx is to be obtained. | ||
3451 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | ||
3452 | */ | ||
3453 | static inline int get_sd_load_idx(struct sched_domain *sd, | ||
3454 | enum cpu_idle_type idle) | ||
3455 | { | ||
3456 | int load_idx; | ||
3457 | |||
3458 | switch (idle) { | ||
3459 | case CPU_NOT_IDLE: | ||
3460 | load_idx = sd->busy_idx; | ||
3461 | break; | ||
3462 | |||
3463 | case CPU_NEWLY_IDLE: | ||
3464 | load_idx = sd->newidle_idx; | ||
3465 | break; | ||
3466 | default: | ||
3467 | load_idx = sd->idle_idx; | ||
3468 | break; | ||
3469 | } | ||
3470 | |||
3471 | return load_idx; | ||
3472 | } | ||
3473 | |||
3474 | |||
3475 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
3476 | /** | ||
3477 | * init_sd_power_savings_stats - Initialize power savings statistics for | ||
3478 | * the given sched_domain, during load balancing. | ||
3479 | * | ||
3480 | * @sd: Sched domain whose power-savings statistics are to be initialized. | ||
3481 | * @sds: Variable containing the statistics for sd. | ||
3482 | * @idle: Idle status of the CPU at which we're performing load-balancing. | ||
3483 | */ | ||
3484 | static inline void init_sd_power_savings_stats(struct sched_domain *sd, | ||
3485 | struct sd_lb_stats *sds, enum cpu_idle_type idle) | ||
3486 | { | ||
3487 | /* | ||
3488 | * Busy processors will not participate in power savings | ||
3489 | * balance. | ||
3490 | */ | ||
3491 | if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | ||
3492 | sds->power_savings_balance = 0; | ||
3493 | else { | ||
3494 | sds->power_savings_balance = 1; | ||
3495 | sds->min_nr_running = ULONG_MAX; | ||
3496 | sds->leader_nr_running = 0; | ||
3497 | } | ||
3498 | } | ||
3499 | |||
3500 | /** | ||
3501 | * update_sd_power_savings_stats - Update the power saving stats for a | ||
3502 | * sched_domain while performing load balancing. | ||
3503 | * | ||
3504 | * @group: sched_group belonging to the sched_domain under consideration. | ||
3505 | * @sds: Variable containing the statistics of the sched_domain | ||
3506 | * @local_group: Does group contain the CPU for which we're performing | ||
3507 | * load balancing ? | ||
3508 | * @sgs: Variable containing the statistics of the group. | ||
3509 | */ | ||
3510 | static inline void update_sd_power_savings_stats(struct sched_group *group, | ||
3511 | struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs) | ||
3512 | { | ||
3513 | |||
3514 | if (!sds->power_savings_balance) | ||
3515 | return; | ||
3516 | |||
3517 | /* | ||
3518 | * If the local group is idle or completely loaded | ||
3519 | * no need to do power savings balance at this domain | ||
3520 | */ | ||
3521 | if (local_group && (sds->this_nr_running >= sgs->group_capacity || | ||
3522 | !sds->this_nr_running)) | ||
3523 | sds->power_savings_balance = 0; | ||
3524 | |||
3525 | /* | ||
3526 | * If a group is already running at full capacity or idle, | ||
3527 | * don't include that group in power savings calculations | ||
3528 | */ | ||
3529 | if (!sds->power_savings_balance || | ||
3530 | sgs->sum_nr_running >= sgs->group_capacity || | ||
3531 | !sgs->sum_nr_running) | ||
3532 | return; | ||
3533 | |||
3534 | /* | ||
3535 | * Calculate the group which has the least non-idle load. | ||
3536 | * This is the group from where we need to pick up the load | ||
3537 | * for saving power | ||
3538 | */ | ||
3539 | if ((sgs->sum_nr_running < sds->min_nr_running) || | ||
3540 | (sgs->sum_nr_running == sds->min_nr_running && | ||
3541 | group_first_cpu(group) > group_first_cpu(sds->group_min))) { | ||
3542 | sds->group_min = group; | ||
3543 | sds->min_nr_running = sgs->sum_nr_running; | ||
3544 | sds->min_load_per_task = sgs->sum_weighted_load / | ||
3545 | sgs->sum_nr_running; | ||
3546 | } | ||
3547 | |||
3548 | /* | ||
3549 | * Calculate the group which is almost near its | ||
3550 | * capacity but still has some space to pick up some load | ||
3551 | * from other group and save more power | ||
3552 | */ | ||
3553 | if (sgs->sum_nr_running + 1 > sgs->group_capacity) | ||
3554 | return; | ||
3555 | |||
3556 | if (sgs->sum_nr_running > sds->leader_nr_running || | ||
3557 | (sgs->sum_nr_running == sds->leader_nr_running && | ||
3558 | group_first_cpu(group) < group_first_cpu(sds->group_leader))) { | ||
3559 | sds->group_leader = group; | ||
3560 | sds->leader_nr_running = sgs->sum_nr_running; | ||
3561 | } | ||
3562 | } | ||
3563 | |||
3564 | /** | ||
3565 | * check_power_save_busiest_group - see if there is potential for some power-savings balance | ||
3566 | * @sds: Variable containing the statistics of the sched_domain | ||
3567 | * under consideration. | ||
3568 | * @this_cpu: Cpu at which we're currently performing load-balancing. | ||
3569 | * @imbalance: Variable to store the imbalance. | ||
3570 | * | ||
3571 | * Description: | ||
3572 | * Check if we have potential to perform some power-savings balance. | ||
3573 | * If yes, set the busiest group to be the least loaded group in the | ||
3574 | * sched_domain, so that it's CPUs can be put to idle. | ||
3575 | * | ||
3576 | * Returns 1 if there is potential to perform power-savings balance. | ||
3577 | * Else returns 0. | ||
3578 | */ | ||
3579 | static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | ||
3580 | int this_cpu, unsigned long *imbalance) | ||
3581 | { | ||
3582 | if (!sds->power_savings_balance) | ||
3583 | return 0; | ||
3584 | |||
3585 | if (sds->this != sds->group_leader || | ||
3586 | sds->group_leader == sds->group_min) | ||
3587 | return 0; | ||
3588 | |||
3589 | *imbalance = sds->min_load_per_task; | ||
3590 | sds->busiest = sds->group_min; | ||
3591 | |||
3592 | return 1; | ||
3593 | |||
3594 | } | ||
3595 | #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | ||
3596 | static inline void init_sd_power_savings_stats(struct sched_domain *sd, | ||
3597 | struct sd_lb_stats *sds, enum cpu_idle_type idle) | ||
3598 | { | ||
3599 | return; | ||
3600 | } | ||
3601 | |||
3602 | static inline void update_sd_power_savings_stats(struct sched_group *group, | ||
3603 | struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs) | ||
3604 | { | ||
3605 | return; | ||
3606 | } | ||
3607 | |||
3608 | static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | ||
3609 | int this_cpu, unsigned long *imbalance) | ||
3610 | { | ||
3611 | return 0; | ||
3612 | } | ||
3613 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | ||
3614 | |||
3615 | |||
3616 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) | ||
3617 | { | ||
3618 | return SCHED_LOAD_SCALE; | ||
3619 | } | ||
3620 | |||
3621 | unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) | ||
3622 | { | ||
3623 | return default_scale_freq_power(sd, cpu); | ||
3624 | } | ||
3625 | |||
3626 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) | ||
3627 | { | ||
3628 | unsigned long weight = cpumask_weight(sched_domain_span(sd)); | ||
3629 | unsigned long smt_gain = sd->smt_gain; | ||
3630 | |||
3631 | smt_gain /= weight; | ||
3632 | |||
3633 | return smt_gain; | ||
3634 | } | ||
3635 | |||
3636 | unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) | ||
3637 | { | ||
3638 | return default_scale_smt_power(sd, cpu); | ||
3639 | } | ||
3640 | |||
3641 | unsigned long scale_rt_power(int cpu) | ||
3642 | { | ||
3643 | struct rq *rq = cpu_rq(cpu); | ||
3644 | u64 total, available; | ||
3645 | |||
3646 | sched_avg_update(rq); | ||
3647 | |||
3648 | total = sched_avg_period() + (rq->clock - rq->age_stamp); | ||
3649 | available = total - rq->rt_avg; | ||
3650 | |||
3651 | if (unlikely((s64)total < SCHED_LOAD_SCALE)) | ||
3652 | total = SCHED_LOAD_SCALE; | ||
3653 | |||
3654 | total >>= SCHED_LOAD_SHIFT; | ||
3655 | |||
3656 | return div_u64(available, total); | ||
3657 | } | ||
3658 | |||
3659 | static void update_cpu_power(struct sched_domain *sd, int cpu) | ||
3660 | { | ||
3661 | unsigned long weight = cpumask_weight(sched_domain_span(sd)); | ||
3662 | unsigned long power = SCHED_LOAD_SCALE; | ||
3663 | struct sched_group *sdg = sd->groups; | ||
3664 | |||
3665 | if (sched_feat(ARCH_POWER)) | ||
3666 | power *= arch_scale_freq_power(sd, cpu); | ||
3667 | else | ||
3668 | power *= default_scale_freq_power(sd, cpu); | ||
3669 | |||
3670 | power >>= SCHED_LOAD_SHIFT; | ||
3671 | |||
3672 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { | ||
3673 | if (sched_feat(ARCH_POWER)) | ||
3674 | power *= arch_scale_smt_power(sd, cpu); | ||
3675 | else | ||
3676 | power *= default_scale_smt_power(sd, cpu); | ||
3677 | |||
3678 | power >>= SCHED_LOAD_SHIFT; | ||
3679 | } | ||
3680 | |||
3681 | power *= scale_rt_power(cpu); | ||
3682 | power >>= SCHED_LOAD_SHIFT; | ||
3683 | |||
3684 | if (!power) | ||
3685 | power = 1; | ||
3686 | |||
3687 | sdg->cpu_power = power; | ||
3688 | } | ||
3689 | |||
3690 | static void update_group_power(struct sched_domain *sd, int cpu) | ||
3691 | { | ||
3692 | struct sched_domain *child = sd->child; | ||
3693 | struct sched_group *group, *sdg = sd->groups; | ||
3694 | unsigned long power; | ||
3695 | |||
3696 | if (!child) { | ||
3697 | update_cpu_power(sd, cpu); | ||
3698 | return; | ||
3699 | } | ||
3700 | |||
3701 | power = 0; | ||
3702 | |||
3703 | group = child->groups; | ||
3704 | do { | ||
3705 | power += group->cpu_power; | ||
3706 | group = group->next; | ||
3707 | } while (group != child->groups); | ||
3708 | |||
3709 | sdg->cpu_power = power; | ||
3710 | } | ||
3711 | |||
3712 | /** | ||
3713 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. | ||
3714 | * @sd: The sched_domain whose statistics are to be updated. | ||
3715 | * @group: sched_group whose statistics are to be updated. | ||
3716 | * @this_cpu: Cpu for which load balance is currently performed. | ||
3717 | * @idle: Idle status of this_cpu | ||
3718 | * @load_idx: Load index of sched_domain of this_cpu for load calc. | ||
3719 | * @sd_idle: Idle status of the sched_domain containing group. | ||
3720 | * @local_group: Does group contain this_cpu. | ||
3721 | * @cpus: Set of cpus considered for load balancing. | ||
3722 | * @balance: Should we balance. | ||
3723 | * @sgs: variable to hold the statistics for this group. | ||
3724 | */ | ||
3725 | static inline void update_sg_lb_stats(struct sched_domain *sd, | ||
3726 | struct sched_group *group, int this_cpu, | ||
3727 | enum cpu_idle_type idle, int load_idx, int *sd_idle, | ||
3728 | int local_group, const struct cpumask *cpus, | ||
3729 | int *balance, struct sg_lb_stats *sgs) | ||
3730 | { | ||
3731 | unsigned long load, max_cpu_load, min_cpu_load; | ||
3732 | int i; | ||
3733 | unsigned int balance_cpu = -1, first_idle_cpu = 0; | ||
3734 | unsigned long sum_avg_load_per_task; | ||
3735 | unsigned long avg_load_per_task; | ||
3736 | |||
3737 | if (local_group) { | ||
3738 | balance_cpu = group_first_cpu(group); | ||
3739 | if (balance_cpu == this_cpu) | ||
3740 | update_group_power(sd, this_cpu); | ||
3741 | } | ||
3742 | |||
3743 | /* Tally up the load of all CPUs in the group */ | ||
3744 | sum_avg_load_per_task = avg_load_per_task = 0; | ||
3745 | max_cpu_load = 0; | ||
3746 | min_cpu_load = ~0UL; | ||
3747 | |||
3748 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { | ||
3749 | struct rq *rq = cpu_rq(i); | ||
3750 | |||
3751 | if (*sd_idle && rq->nr_running) | ||
3752 | *sd_idle = 0; | ||
3753 | |||
3754 | /* Bias balancing toward cpus of our domain */ | ||
3755 | if (local_group) { | ||
3756 | if (idle_cpu(i) && !first_idle_cpu) { | ||
3757 | first_idle_cpu = 1; | ||
3758 | balance_cpu = i; | ||
3759 | } | ||
3760 | |||
3761 | load = target_load(i, load_idx); | ||
3762 | } else { | ||
3763 | load = source_load(i, load_idx); | ||
3764 | if (load > max_cpu_load) | ||
3765 | max_cpu_load = load; | ||
3766 | if (min_cpu_load > load) | ||
3767 | min_cpu_load = load; | ||
3768 | } | ||
3769 | |||
3770 | sgs->group_load += load; | ||
3771 | sgs->sum_nr_running += rq->nr_running; | ||
3772 | sgs->sum_weighted_load += weighted_cpuload(i); | ||
3773 | |||
3774 | sum_avg_load_per_task += cpu_avg_load_per_task(i); | ||
3775 | } | ||
3776 | |||
3777 | /* | ||
3778 | * First idle cpu or the first cpu(busiest) in this sched group | ||
3779 | * is eligible for doing load balancing at this and above | ||
3780 | * domains. In the newly idle case, we will allow all the cpu's | ||
3781 | * to do the newly idle load balance. | ||
3782 | */ | ||
3783 | if (idle != CPU_NEWLY_IDLE && local_group && | ||
3784 | balance_cpu != this_cpu && balance) { | ||
3785 | *balance = 0; | ||
3786 | return; | ||
3787 | } | ||
3788 | |||
3789 | /* Adjust by relative CPU power of the group */ | ||
3790 | sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power; | ||
3791 | |||
3792 | |||
3793 | /* | ||
3794 | * Consider the group unbalanced when the imbalance is larger | ||
3795 | * than the average weight of two tasks. | ||
3796 | * | ||
3797 | * APZ: with cgroup the avg task weight can vary wildly and | ||
3798 | * might not be a suitable number - should we keep a | ||
3799 | * normalized nr_running number somewhere that negates | ||
3800 | * the hierarchy? | ||
3801 | */ | ||
3802 | avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) / | ||
3803 | group->cpu_power; | ||
3804 | |||
3805 | if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) | ||
3806 | sgs->group_imb = 1; | ||
3807 | |||
3808 | sgs->group_capacity = | ||
3809 | DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); | ||
3810 | } | ||
3811 | |||
3812 | /** | ||
3813 | * update_sd_lb_stats - Update sched_group's statistics for load balancing. | ||
3814 | * @sd: sched_domain whose statistics are to be updated. | ||
3815 | * @this_cpu: Cpu for which load balance is currently performed. | ||
3816 | * @idle: Idle status of this_cpu | ||
3817 | * @sd_idle: Idle status of the sched_domain containing group. | ||
3818 | * @cpus: Set of cpus considered for load balancing. | ||
3819 | * @balance: Should we balance. | ||
3820 | * @sds: variable to hold the statistics for this sched_domain. | ||
3821 | */ | ||
3822 | static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | ||
3823 | enum cpu_idle_type idle, int *sd_idle, | ||
3824 | const struct cpumask *cpus, int *balance, | ||
3825 | struct sd_lb_stats *sds) | ||
3826 | { | ||
3827 | struct sched_domain *child = sd->child; | ||
3828 | struct sched_group *group = sd->groups; | ||
3829 | struct sg_lb_stats sgs; | ||
3830 | int load_idx, prefer_sibling = 0; | ||
3831 | |||
3832 | if (child && child->flags & SD_PREFER_SIBLING) | ||
3833 | prefer_sibling = 1; | ||
3834 | |||
3835 | init_sd_power_savings_stats(sd, sds, idle); | ||
3836 | load_idx = get_sd_load_idx(sd, idle); | ||
3837 | |||
3838 | do { | ||
3839 | int local_group; | ||
3840 | |||
3841 | local_group = cpumask_test_cpu(this_cpu, | ||
3842 | sched_group_cpus(group)); | ||
3843 | memset(&sgs, 0, sizeof(sgs)); | ||
3844 | update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle, | ||
3845 | local_group, cpus, balance, &sgs); | ||
3846 | |||
3847 | if (local_group && balance && !(*balance)) | ||
3848 | return; | ||
3849 | |||
3850 | sds->total_load += sgs.group_load; | ||
3851 | sds->total_pwr += group->cpu_power; | ||
3852 | |||
3853 | /* | ||
3854 | * In case the child domain prefers tasks go to siblings | ||
3855 | * first, lower the group capacity to one so that we'll try | ||
3856 | * and move all the excess tasks away. | ||
3857 | */ | ||
3858 | if (prefer_sibling) | ||
3859 | sgs.group_capacity = min(sgs.group_capacity, 1UL); | ||
3860 | |||
3861 | if (local_group) { | ||
3862 | sds->this_load = sgs.avg_load; | ||
3863 | sds->this = group; | ||
3864 | sds->this_nr_running = sgs.sum_nr_running; | ||
3865 | sds->this_load_per_task = sgs.sum_weighted_load; | ||
3866 | } else if (sgs.avg_load > sds->max_load && | ||
3867 | (sgs.sum_nr_running > sgs.group_capacity || | ||
3868 | sgs.group_imb)) { | ||
3869 | sds->max_load = sgs.avg_load; | ||
3870 | sds->busiest = group; | ||
3871 | sds->busiest_nr_running = sgs.sum_nr_running; | ||
3872 | sds->busiest_load_per_task = sgs.sum_weighted_load; | ||
3873 | sds->group_imb = sgs.group_imb; | ||
3874 | } | ||
3875 | |||
3876 | update_sd_power_savings_stats(group, sds, local_group, &sgs); | ||
3877 | group = group->next; | ||
3878 | } while (group != sd->groups); | ||
3879 | } | ||
3880 | |||
3881 | /** | ||
3882 | * fix_small_imbalance - Calculate the minor imbalance that exists | ||
3883 | * amongst the groups of a sched_domain, during | ||
3884 | * load balancing. | ||
3885 | * @sds: Statistics of the sched_domain whose imbalance is to be calculated. | ||
3886 | * @this_cpu: The cpu at whose sched_domain we're performing load-balance. | ||
3887 | * @imbalance: Variable to store the imbalance. | ||
3888 | */ | ||
3889 | static inline void fix_small_imbalance(struct sd_lb_stats *sds, | ||
3890 | int this_cpu, unsigned long *imbalance) | ||
3891 | { | ||
3892 | unsigned long tmp, pwr_now = 0, pwr_move = 0; | ||
3893 | unsigned int imbn = 2; | ||
3894 | |||
3895 | if (sds->this_nr_running) { | ||
3896 | sds->this_load_per_task /= sds->this_nr_running; | ||
3897 | if (sds->busiest_load_per_task > | ||
3898 | sds->this_load_per_task) | ||
3899 | imbn = 1; | ||
3900 | } else | ||
3901 | sds->this_load_per_task = | ||
3902 | cpu_avg_load_per_task(this_cpu); | ||
3903 | |||
3904 | if (sds->max_load - sds->this_load + sds->busiest_load_per_task >= | ||
3905 | sds->busiest_load_per_task * imbn) { | ||
3906 | *imbalance = sds->busiest_load_per_task; | ||
3907 | return; | ||
3908 | } | ||
3909 | |||
3910 | /* | ||
3911 | * OK, we don't have enough imbalance to justify moving tasks, | ||
3912 | * however we may be able to increase total CPU power used by | ||
3913 | * moving them. | ||
3914 | */ | ||
3915 | |||
3916 | pwr_now += sds->busiest->cpu_power * | ||
3917 | min(sds->busiest_load_per_task, sds->max_load); | ||
3918 | pwr_now += sds->this->cpu_power * | ||
3919 | min(sds->this_load_per_task, sds->this_load); | ||
3920 | pwr_now /= SCHED_LOAD_SCALE; | ||
3921 | |||
3922 | /* Amount of load we'd subtract */ | ||
3923 | tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) / | ||
3924 | sds->busiest->cpu_power; | ||
3925 | if (sds->max_load > tmp) | ||
3926 | pwr_move += sds->busiest->cpu_power * | ||
3927 | min(sds->busiest_load_per_task, sds->max_load - tmp); | ||
3928 | |||
3929 | /* Amount of load we'd add */ | ||
3930 | if (sds->max_load * sds->busiest->cpu_power < | ||
3931 | sds->busiest_load_per_task * SCHED_LOAD_SCALE) | ||
3932 | tmp = (sds->max_load * sds->busiest->cpu_power) / | ||
3933 | sds->this->cpu_power; | ||
3934 | else | ||
3935 | tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) / | ||
3936 | sds->this->cpu_power; | ||
3937 | pwr_move += sds->this->cpu_power * | ||
3938 | min(sds->this_load_per_task, sds->this_load + tmp); | ||
3939 | pwr_move /= SCHED_LOAD_SCALE; | ||
3940 | |||
3941 | /* Move if we gain throughput */ | ||
3942 | if (pwr_move > pwr_now) | ||
3943 | *imbalance = sds->busiest_load_per_task; | ||
3944 | } | ||
3945 | |||
3946 | /** | ||
3947 | * calculate_imbalance - Calculate the amount of imbalance present within the | ||
3948 | * groups of a given sched_domain during load balance. | ||
3949 | * @sds: statistics of the sched_domain whose imbalance is to be calculated. | ||
3950 | * @this_cpu: Cpu for which currently load balance is being performed. | ||
3951 | * @imbalance: The variable to store the imbalance. | ||
3952 | */ | ||
3953 | static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, | ||
3954 | unsigned long *imbalance) | ||
3955 | { | ||
3956 | unsigned long max_pull; | ||
3957 | /* | ||
3958 | * In the presence of smp nice balancing, certain scenarios can have | ||
3959 | * max load less than avg load(as we skip the groups at or below | ||
3960 | * its cpu_power, while calculating max_load..) | ||
3961 | */ | ||
3962 | if (sds->max_load < sds->avg_load) { | ||
3963 | *imbalance = 0; | ||
3964 | return fix_small_imbalance(sds, this_cpu, imbalance); | ||
3965 | } | ||
3966 | |||
3967 | /* Don't want to pull so many tasks that a group would go idle */ | ||
3968 | max_pull = min(sds->max_load - sds->avg_load, | ||
3969 | sds->max_load - sds->busiest_load_per_task); | ||
3970 | |||
3971 | /* How much load to actually move to equalise the imbalance */ | ||
3972 | *imbalance = min(max_pull * sds->busiest->cpu_power, | ||
3973 | (sds->avg_load - sds->this_load) * sds->this->cpu_power) | ||
3974 | / SCHED_LOAD_SCALE; | ||
3975 | |||
3976 | /* | ||
3977 | * if *imbalance is less than the average load per runnable task | ||
3978 | * there is no gaurantee that any tasks will be moved so we'll have | ||
3979 | * a think about bumping its value to force at least one task to be | ||
3980 | * moved | ||
3981 | */ | ||
3982 | if (*imbalance < sds->busiest_load_per_task) | ||
3983 | return fix_small_imbalance(sds, this_cpu, imbalance); | ||
3984 | |||
3985 | } | ||
3986 | /******* find_busiest_group() helpers end here *********************/ | ||
3987 | |||
3988 | /** | ||
3989 | * find_busiest_group - Returns the busiest group within the sched_domain | ||
3990 | * if there is an imbalance. If there isn't an imbalance, and | ||
3991 | * the user has opted for power-savings, it returns a group whose | ||
3992 | * CPUs can be put to idle by rebalancing those tasks elsewhere, if | ||
3993 | * such a group exists. | ||
3994 | * | ||
3995 | * Also calculates the amount of weighted load which should be moved | ||
3996 | * to restore balance. | ||
3997 | * | ||
3998 | * @sd: The sched_domain whose busiest group is to be returned. | ||
3999 | * @this_cpu: The cpu for which load balancing is currently being performed. | ||
4000 | * @imbalance: Variable which stores amount of weighted load which should | ||
4001 | * be moved to restore balance/put a group to idle. | ||
4002 | * @idle: The idle status of this_cpu. | ||
4003 | * @sd_idle: The idleness of sd | ||
4004 | * @cpus: The set of CPUs under consideration for load-balancing. | ||
4005 | * @balance: Pointer to a variable indicating if this_cpu | ||
4006 | * is the appropriate cpu to perform load balancing at this_level. | ||
4007 | * | ||
4008 | * Returns: - the busiest group if imbalance exists. | ||
4009 | * - If no imbalance and user has opted for power-savings balance, | ||
4010 | * return the least loaded group whose CPUs can be | ||
4011 | * put to idle by rebalancing its tasks onto our group. | ||
4012 | */ | ||
4013 | static struct sched_group * | ||
4014 | find_busiest_group(struct sched_domain *sd, int this_cpu, | ||
4015 | unsigned long *imbalance, enum cpu_idle_type idle, | ||
4016 | int *sd_idle, const struct cpumask *cpus, int *balance) | ||
4017 | { | ||
4018 | struct sd_lb_stats sds; | ||
4019 | |||
4020 | memset(&sds, 0, sizeof(sds)); | ||
4021 | |||
4022 | /* | ||
4023 | * Compute the various statistics relavent for load balancing at | ||
4024 | * this level. | ||
4025 | */ | ||
4026 | update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus, | ||
4027 | balance, &sds); | ||
4028 | |||
4029 | /* Cases where imbalance does not exist from POV of this_cpu */ | ||
4030 | /* 1) this_cpu is not the appropriate cpu to perform load balancing | ||
4031 | * at this level. | ||
4032 | * 2) There is no busy sibling group to pull from. | ||
4033 | * 3) This group is the busiest group. | ||
4034 | * 4) This group is more busy than the avg busieness at this | ||
4035 | * sched_domain. | ||
4036 | * 5) The imbalance is within the specified limit. | ||
4037 | * 6) Any rebalance would lead to ping-pong | ||
4038 | */ | ||
4039 | if (balance && !(*balance)) | ||
4040 | goto ret; | ||
4041 | |||
4042 | if (!sds.busiest || sds.busiest_nr_running == 0) | ||
4043 | goto out_balanced; | ||
4044 | |||
4045 | if (sds.this_load >= sds.max_load) | ||
4046 | goto out_balanced; | ||
4047 | |||
4048 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
4049 | |||
4050 | if (sds.this_load >= sds.avg_load) | ||
4051 | goto out_balanced; | ||
4052 | |||
4053 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | ||
4054 | goto out_balanced; | ||
4055 | |||
4056 | sds.busiest_load_per_task /= sds.busiest_nr_running; | ||
4057 | if (sds.group_imb) | ||
4058 | sds.busiest_load_per_task = | ||
4059 | min(sds.busiest_load_per_task, sds.avg_load); | ||
4060 | |||
4061 | /* | ||
4062 | * We're trying to get all the cpus to the average_load, so we don't | ||
4063 | * want to push ourselves above the average load, nor do we wish to | ||
4064 | * reduce the max loaded cpu below the average load, as either of these | ||
4065 | * actions would just result in more rebalancing later, and ping-pong | ||
4066 | * tasks around. Thus we look for the minimum possible imbalance. | ||
4067 | * Negative imbalances (*we* are more loaded than anyone else) will | ||
4068 | * be counted as no imbalance for these purposes -- we can't fix that | ||
4069 | * by pulling tasks to us. Be careful of negative numbers as they'll | ||
4070 | * appear as very large values with unsigned longs. | ||
4071 | */ | ||
4072 | if (sds.max_load <= sds.busiest_load_per_task) | ||
4073 | goto out_balanced; | ||
4074 | |||
4075 | /* Looks like there is an imbalance. Compute it */ | ||
4076 | calculate_imbalance(&sds, this_cpu, imbalance); | ||
4077 | return sds.busiest; | ||
4078 | |||
4079 | out_balanced: | ||
4080 | /* | ||
4081 | * There is no obvious imbalance. But check if we can do some balancing | ||
4082 | * to save power. | ||
4083 | */ | ||
4084 | if (check_power_save_busiest_group(&sds, this_cpu, imbalance)) | ||
4085 | return sds.busiest; | ||
4086 | ret: | ||
4087 | *imbalance = 0; | ||
4088 | return NULL; | ||
4089 | } | ||
4090 | |||
4091 | /* | ||
4092 | * find_busiest_queue - find the busiest runqueue among the cpus in group. | ||
4093 | */ | ||
4094 | static struct rq * | ||
4095 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | ||
4096 | unsigned long imbalance, const struct cpumask *cpus) | ||
4097 | { | ||
4098 | struct rq *busiest = NULL, *rq; | ||
4099 | unsigned long max_load = 0; | ||
4100 | int i; | ||
4101 | |||
4102 | for_each_cpu(i, sched_group_cpus(group)) { | ||
4103 | unsigned long power = power_of(i); | ||
4104 | unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE); | ||
4105 | unsigned long wl; | ||
4106 | |||
4107 | if (!cpumask_test_cpu(i, cpus)) | ||
4108 | continue; | ||
4109 | |||
4110 | rq = cpu_rq(i); | ||
4111 | wl = weighted_cpuload(i) * SCHED_LOAD_SCALE; | ||
4112 | wl /= power; | ||
4113 | |||
4114 | if (capacity && rq->nr_running == 1 && wl > imbalance) | ||
4115 | continue; | ||
4116 | |||
4117 | if (wl > max_load) { | ||
4118 | max_load = wl; | ||
4119 | busiest = rq; | ||
4120 | } | ||
4121 | } | ||
4122 | |||
4123 | return busiest; | ||
4124 | } | ||
4125 | |||
4126 | /* | ||
4127 | * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but | ||
4128 | * so long as it is large enough. | ||
4129 | */ | ||
4130 | #define MAX_PINNED_INTERVAL 512 | ||
4131 | |||
4132 | /* Working cpumask for load_balance and load_balance_newidle. */ | ||
4133 | static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); | ||
4134 | |||
4135 | /* | ||
4136 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | ||
4137 | * tasks if there is an imbalance. | ||
4138 | */ | ||
4139 | static int load_balance(int this_cpu, struct rq *this_rq, | ||
4140 | struct sched_domain *sd, enum cpu_idle_type idle, | ||
4141 | int *balance) | ||
4142 | { | ||
4143 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | ||
4144 | struct sched_group *group; | ||
4145 | unsigned long imbalance; | ||
4146 | struct rq *busiest; | ||
4147 | unsigned long flags; | ||
4148 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | ||
4149 | |||
4150 | cpumask_copy(cpus, cpu_active_mask); | ||
4151 | |||
4152 | /* | ||
4153 | * When power savings policy is enabled for the parent domain, idle | ||
4154 | * sibling can pick up load irrespective of busy siblings. In this case, | ||
4155 | * let the state of idle sibling percolate up as CPU_IDLE, instead of | ||
4156 | * portraying it as CPU_NOT_IDLE. | ||
4157 | */ | ||
4158 | if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && | ||
4159 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | ||
4160 | sd_idle = 1; | ||
4161 | |||
4162 | schedstat_inc(sd, lb_count[idle]); | ||
4163 | |||
4164 | redo: | ||
4165 | update_shares(sd); | ||
4166 | group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, | ||
4167 | cpus, balance); | ||
4168 | |||
4169 | if (*balance == 0) | ||
4170 | goto out_balanced; | ||
4171 | |||
4172 | if (!group) { | ||
4173 | schedstat_inc(sd, lb_nobusyg[idle]); | ||
4174 | goto out_balanced; | ||
4175 | } | ||
4176 | |||
4177 | busiest = find_busiest_queue(group, idle, imbalance, cpus); | ||
4178 | if (!busiest) { | ||
4179 | schedstat_inc(sd, lb_nobusyq[idle]); | ||
4180 | goto out_balanced; | ||
4181 | } | ||
4182 | |||
4183 | BUG_ON(busiest == this_rq); | ||
4184 | |||
4185 | schedstat_add(sd, lb_imbalance[idle], imbalance); | ||
4186 | |||
4187 | ld_moved = 0; | ||
4188 | if (busiest->nr_running > 1) { | ||
4189 | /* | ||
4190 | * Attempt to move tasks. If find_busiest_group has found | ||
4191 | * an imbalance but busiest->nr_running <= 1, the group is | ||
4192 | * still unbalanced. ld_moved simply stays zero, so it is | ||
4193 | * correctly treated as an imbalance. | ||
4194 | */ | ||
4195 | local_irq_save(flags); | ||
4196 | double_rq_lock(this_rq, busiest); | ||
4197 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | ||
4198 | imbalance, sd, idle, &all_pinned); | ||
4199 | double_rq_unlock(this_rq, busiest); | ||
4200 | local_irq_restore(flags); | ||
4201 | |||
4202 | /* | ||
4203 | * some other cpu did the load balance for us. | ||
4204 | */ | ||
4205 | if (ld_moved && this_cpu != smp_processor_id()) | ||
4206 | resched_cpu(this_cpu); | ||
4207 | |||
4208 | /* All tasks on this runqueue were pinned by CPU affinity */ | ||
4209 | if (unlikely(all_pinned)) { | ||
4210 | cpumask_clear_cpu(cpu_of(busiest), cpus); | ||
4211 | if (!cpumask_empty(cpus)) | ||
4212 | goto redo; | ||
4213 | goto out_balanced; | ||
4214 | } | ||
4215 | } | ||
4216 | |||
4217 | if (!ld_moved) { | ||
4218 | schedstat_inc(sd, lb_failed[idle]); | ||
4219 | sd->nr_balance_failed++; | ||
4220 | |||
4221 | if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { | ||
4222 | |||
4223 | raw_spin_lock_irqsave(&busiest->lock, flags); | ||
4224 | |||
4225 | /* don't kick the migration_thread, if the curr | ||
4226 | * task on busiest cpu can't be moved to this_cpu | ||
4227 | */ | ||
4228 | if (!cpumask_test_cpu(this_cpu, | ||
4229 | &busiest->curr->cpus_allowed)) { | ||
4230 | raw_spin_unlock_irqrestore(&busiest->lock, | ||
4231 | flags); | ||
4232 | all_pinned = 1; | ||
4233 | goto out_one_pinned; | ||
4234 | } | ||
4235 | |||
4236 | if (!busiest->active_balance) { | ||
4237 | busiest->active_balance = 1; | ||
4238 | busiest->push_cpu = this_cpu; | ||
4239 | active_balance = 1; | ||
4240 | } | ||
4241 | raw_spin_unlock_irqrestore(&busiest->lock, flags); | ||
4242 | if (active_balance) | ||
4243 | wake_up_process(busiest->migration_thread); | ||
4244 | |||
4245 | /* | ||
4246 | * We've kicked active balancing, reset the failure | ||
4247 | * counter. | ||
4248 | */ | ||
4249 | sd->nr_balance_failed = sd->cache_nice_tries+1; | ||
4250 | } | ||
4251 | } else | ||
4252 | sd->nr_balance_failed = 0; | ||
4253 | |||
4254 | if (likely(!active_balance)) { | ||
4255 | /* We were unbalanced, so reset the balancing interval */ | ||
4256 | sd->balance_interval = sd->min_interval; | ||
4257 | } else { | ||
4258 | /* | ||
4259 | * If we've begun active balancing, start to back off. This | ||
4260 | * case may not be covered by the all_pinned logic if there | ||
4261 | * is only 1 task on the busy runqueue (because we don't call | ||
4262 | * move_tasks). | ||
4263 | */ | ||
4264 | if (sd->balance_interval < sd->max_interval) | ||
4265 | sd->balance_interval *= 2; | ||
4266 | } | ||
4267 | |||
4268 | if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && | ||
4269 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | ||
4270 | ld_moved = -1; | ||
4271 | |||
4272 | goto out; | ||
4273 | |||
4274 | out_balanced: | ||
4275 | schedstat_inc(sd, lb_balanced[idle]); | ||
4276 | |||
4277 | sd->nr_balance_failed = 0; | ||
4278 | |||
4279 | out_one_pinned: | ||
4280 | /* tune up the balancing interval */ | ||
4281 | if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) || | ||
4282 | (sd->balance_interval < sd->max_interval)) | ||
4283 | sd->balance_interval *= 2; | ||
4284 | |||
4285 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | ||
4286 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | ||
4287 | ld_moved = -1; | ||
4288 | else | ||
4289 | ld_moved = 0; | ||
4290 | out: | ||
4291 | if (ld_moved) | ||
4292 | update_shares(sd); | ||
4293 | return ld_moved; | ||
4294 | } | ||
4295 | |||
4296 | /* | ||
4297 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | ||
4298 | * tasks if there is an imbalance. | ||
4299 | * | ||
4300 | * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE). | ||
4301 | * this_rq is locked. | ||
4302 | */ | ||
4303 | static int | ||
4304 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | ||
4305 | { | ||
4306 | struct sched_group *group; | ||
4307 | struct rq *busiest = NULL; | ||
4308 | unsigned long imbalance; | ||
4309 | int ld_moved = 0; | ||
4310 | int sd_idle = 0; | ||
4311 | int all_pinned = 0; | ||
4312 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | ||
4313 | |||
4314 | cpumask_copy(cpus, cpu_active_mask); | ||
4315 | |||
4316 | /* | ||
4317 | * When power savings policy is enabled for the parent domain, idle | ||
4318 | * sibling can pick up load irrespective of busy siblings. In this case, | ||
4319 | * let the state of idle sibling percolate up as IDLE, instead of | ||
4320 | * portraying it as CPU_NOT_IDLE. | ||
4321 | */ | ||
4322 | if (sd->flags & SD_SHARE_CPUPOWER && | ||
4323 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | ||
4324 | sd_idle = 1; | ||
4325 | |||
4326 | schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]); | ||
4327 | redo: | ||
4328 | update_shares_locked(this_rq, sd); | ||
4329 | group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, | ||
4330 | &sd_idle, cpus, NULL); | ||
4331 | if (!group) { | ||
4332 | schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]); | ||
4333 | goto out_balanced; | ||
4334 | } | ||
4335 | |||
4336 | busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus); | ||
4337 | if (!busiest) { | ||
4338 | schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]); | ||
4339 | goto out_balanced; | ||
4340 | } | ||
4341 | |||
4342 | BUG_ON(busiest == this_rq); | ||
4343 | |||
4344 | schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance); | ||
4345 | |||
4346 | ld_moved = 0; | ||
4347 | if (busiest->nr_running > 1) { | ||
4348 | /* Attempt to move tasks */ | ||
4349 | double_lock_balance(this_rq, busiest); | ||
4350 | /* this_rq->clock is already updated */ | ||
4351 | update_rq_clock(busiest); | ||
4352 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | ||
4353 | imbalance, sd, CPU_NEWLY_IDLE, | ||
4354 | &all_pinned); | ||
4355 | double_unlock_balance(this_rq, busiest); | ||
4356 | |||
4357 | if (unlikely(all_pinned)) { | ||
4358 | cpumask_clear_cpu(cpu_of(busiest), cpus); | ||
4359 | if (!cpumask_empty(cpus)) | ||
4360 | goto redo; | ||
4361 | } | ||
4362 | } | ||
4363 | |||
4364 | if (!ld_moved) { | ||
4365 | int active_balance = 0; | ||
4366 | |||
4367 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); | ||
4368 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | ||
4369 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | ||
4370 | return -1; | ||
4371 | |||
4372 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
4373 | return -1; | ||
4374 | |||
4375 | if (sd->nr_balance_failed++ < 2) | ||
4376 | return -1; | ||
4377 | |||
4378 | /* | ||
4379 | * The only task running in a non-idle cpu can be moved to this | ||
4380 | * cpu in an attempt to completely freeup the other CPU | ||
4381 | * package. The same method used to move task in load_balance() | ||
4382 | * have been extended for load_balance_newidle() to speedup | ||
4383 | * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) | ||
4384 | * | ||
4385 | * The package power saving logic comes from | ||
4386 | * find_busiest_group(). If there are no imbalance, then | ||
4387 | * f_b_g() will return NULL. However when sched_mc={1,2} then | ||
4388 | * f_b_g() will select a group from which a running task may be | ||
4389 | * pulled to this cpu in order to make the other package idle. | ||
4390 | * If there is no opportunity to make a package idle and if | ||
4391 | * there are no imbalance, then f_b_g() will return NULL and no | ||
4392 | * action will be taken in load_balance_newidle(). | ||
4393 | * | ||
4394 | * Under normal task pull operation due to imbalance, there | ||
4395 | * will be more than one task in the source run queue and | ||
4396 | * move_tasks() will succeed. ld_moved will be true and this | ||
4397 | * active balance code will not be triggered. | ||
4398 | */ | ||
4399 | |||
4400 | /* Lock busiest in correct order while this_rq is held */ | ||
4401 | double_lock_balance(this_rq, busiest); | ||
4402 | |||
4403 | /* | ||
4404 | * don't kick the migration_thread, if the curr | ||
4405 | * task on busiest cpu can't be moved to this_cpu | ||
4406 | */ | ||
4407 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { | ||
4408 | double_unlock_balance(this_rq, busiest); | ||
4409 | all_pinned = 1; | ||
4410 | return ld_moved; | ||
4411 | } | ||
4412 | |||
4413 | if (!busiest->active_balance) { | ||
4414 | busiest->active_balance = 1; | ||
4415 | busiest->push_cpu = this_cpu; | ||
4416 | active_balance = 1; | ||
4417 | } | ||
4418 | |||
4419 | double_unlock_balance(this_rq, busiest); | ||
4420 | /* | ||
4421 | * Should not call ttwu while holding a rq->lock | ||
4422 | */ | ||
4423 | raw_spin_unlock(&this_rq->lock); | ||
4424 | if (active_balance) | ||
4425 | wake_up_process(busiest->migration_thread); | ||
4426 | raw_spin_lock(&this_rq->lock); | ||
4427 | |||
4428 | } else | ||
4429 | sd->nr_balance_failed = 0; | ||
4430 | |||
4431 | update_shares_locked(this_rq, sd); | ||
4432 | return ld_moved; | ||
4433 | |||
4434 | out_balanced: | ||
4435 | schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]); | ||
4436 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | ||
4437 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | ||
4438 | return -1; | ||
4439 | sd->nr_balance_failed = 0; | ||
4440 | |||
4441 | return 0; | ||
4442 | } | ||
4443 | |||
4444 | /* | ||
4445 | * idle_balance is called by schedule() if this_cpu is about to become | ||
4446 | * idle. Attempts to pull tasks from other CPUs. | ||
4447 | */ | ||
4448 | static void idle_balance(int this_cpu, struct rq *this_rq) | ||
4449 | { | ||
4450 | struct sched_domain *sd; | ||
4451 | int pulled_task = 0; | ||
4452 | unsigned long next_balance = jiffies + HZ; | ||
4453 | |||
4454 | this_rq->idle_stamp = this_rq->clock; | ||
4455 | |||
4456 | if (this_rq->avg_idle < sysctl_sched_migration_cost) | ||
4457 | return; | ||
4458 | |||
4459 | for_each_domain(this_cpu, sd) { | ||
4460 | unsigned long interval; | ||
4461 | |||
4462 | if (!(sd->flags & SD_LOAD_BALANCE)) | ||
4463 | continue; | ||
4464 | |||
4465 | if (sd->flags & SD_BALANCE_NEWIDLE) | ||
4466 | /* If we've pulled tasks over stop searching: */ | ||
4467 | pulled_task = load_balance_newidle(this_cpu, this_rq, | ||
4468 | sd); | ||
4469 | |||
4470 | interval = msecs_to_jiffies(sd->balance_interval); | ||
4471 | if (time_after(next_balance, sd->last_balance + interval)) | ||
4472 | next_balance = sd->last_balance + interval; | ||
4473 | if (pulled_task) { | ||
4474 | this_rq->idle_stamp = 0; | ||
4475 | break; | ||
4476 | } | ||
4477 | } | ||
4478 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | ||
4479 | /* | ||
4480 | * We are going idle. next_balance may be set based on | ||
4481 | * a busy processor. So reset next_balance. | ||
4482 | */ | ||
4483 | this_rq->next_balance = next_balance; | ||
4484 | } | ||
4485 | } | ||
4486 | |||
4487 | /* | ||
4488 | * active_load_balance is run by migration threads. It pushes running tasks | ||
4489 | * off the busiest CPU onto idle CPUs. It requires at least 1 task to be | ||
4490 | * running on each physical CPU where possible, and avoids physical / | ||
4491 | * logical imbalances. | ||
4492 | * | ||
4493 | * Called with busiest_rq locked. | ||
4494 | */ | ||
4495 | static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | ||
4496 | { | ||
4497 | int target_cpu = busiest_rq->push_cpu; | ||
4498 | struct sched_domain *sd; | ||
4499 | struct rq *target_rq; | ||
4500 | |||
4501 | /* Is there any task to move? */ | ||
4502 | if (busiest_rq->nr_running <= 1) | ||
4503 | return; | ||
4504 | |||
4505 | target_rq = cpu_rq(target_cpu); | ||
4506 | |||
4507 | /* | ||
4508 | * This condition is "impossible", if it occurs | ||
4509 | * we need to fix it. Originally reported by | ||
4510 | * Bjorn Helgaas on a 128-cpu setup. | ||
4511 | */ | ||
4512 | BUG_ON(busiest_rq == target_rq); | ||
4513 | |||
4514 | /* move a task from busiest_rq to target_rq */ | ||
4515 | double_lock_balance(busiest_rq, target_rq); | ||
4516 | update_rq_clock(busiest_rq); | ||
4517 | update_rq_clock(target_rq); | ||
4518 | |||
4519 | /* Search for an sd spanning us and the target CPU. */ | ||
4520 | for_each_domain(target_cpu, sd) { | ||
4521 | if ((sd->flags & SD_LOAD_BALANCE) && | ||
4522 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) | ||
4523 | break; | ||
4524 | } | ||
4525 | |||
4526 | if (likely(sd)) { | ||
4527 | schedstat_inc(sd, alb_count); | ||
4528 | |||
4529 | if (move_one_task(target_rq, target_cpu, busiest_rq, | ||
4530 | sd, CPU_IDLE)) | ||
4531 | schedstat_inc(sd, alb_pushed); | ||
4532 | else | ||
4533 | schedstat_inc(sd, alb_failed); | ||
4534 | } | ||
4535 | double_unlock_balance(busiest_rq, target_rq); | ||
4536 | } | ||
4537 | |||
4538 | #ifdef CONFIG_NO_HZ | ||
4539 | static struct { | ||
4540 | atomic_t load_balancer; | ||
4541 | cpumask_var_t cpu_mask; | ||
4542 | cpumask_var_t ilb_grp_nohz_mask; | ||
4543 | } nohz ____cacheline_aligned = { | ||
4544 | .load_balancer = ATOMIC_INIT(-1), | ||
4545 | }; | ||
4546 | |||
4547 | int get_nohz_load_balancer(void) | ||
4548 | { | ||
4549 | return atomic_read(&nohz.load_balancer); | ||
4550 | } | ||
4551 | |||
4552 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
4553 | /** | ||
4554 | * lowest_flag_domain - Return lowest sched_domain containing flag. | ||
4555 | * @cpu: The cpu whose lowest level of sched domain is to | ||
4556 | * be returned. | ||
4557 | * @flag: The flag to check for the lowest sched_domain | ||
4558 | * for the given cpu. | ||
4559 | * | ||
4560 | * Returns the lowest sched_domain of a cpu which contains the given flag. | ||
4561 | */ | ||
4562 | static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) | ||
4563 | { | ||
4564 | struct sched_domain *sd; | ||
4565 | |||
4566 | for_each_domain(cpu, sd) | ||
4567 | if (sd && (sd->flags & flag)) | ||
4568 | break; | ||
4569 | |||
4570 | return sd; | ||
4571 | } | ||
4572 | |||
4573 | /** | ||
4574 | * for_each_flag_domain - Iterates over sched_domains containing the flag. | ||
4575 | * @cpu: The cpu whose domains we're iterating over. | ||
4576 | * @sd: variable holding the value of the power_savings_sd | ||
4577 | * for cpu. | ||
4578 | * @flag: The flag to filter the sched_domains to be iterated. | ||
4579 | * | ||
4580 | * Iterates over all the scheduler domains for a given cpu that has the 'flag' | ||
4581 | * set, starting from the lowest sched_domain to the highest. | ||
4582 | */ | ||
4583 | #define for_each_flag_domain(cpu, sd, flag) \ | ||
4584 | for (sd = lowest_flag_domain(cpu, flag); \ | ||
4585 | (sd && (sd->flags & flag)); sd = sd->parent) | ||
4586 | |||
4587 | /** | ||
4588 | * is_semi_idle_group - Checks if the given sched_group is semi-idle. | ||
4589 | * @ilb_group: group to be checked for semi-idleness | ||
4590 | * | ||
4591 | * Returns: 1 if the group is semi-idle. 0 otherwise. | ||
4592 | * | ||
4593 | * We define a sched_group to be semi idle if it has atleast one idle-CPU | ||
4594 | * and atleast one non-idle CPU. This helper function checks if the given | ||
4595 | * sched_group is semi-idle or not. | ||
4596 | */ | ||
4597 | static inline int is_semi_idle_group(struct sched_group *ilb_group) | ||
4598 | { | ||
4599 | cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask, | ||
4600 | sched_group_cpus(ilb_group)); | ||
4601 | |||
4602 | /* | ||
4603 | * A sched_group is semi-idle when it has atleast one busy cpu | ||
4604 | * and atleast one idle cpu. | ||
4605 | */ | ||
4606 | if (cpumask_empty(nohz.ilb_grp_nohz_mask)) | ||
4607 | return 0; | ||
4608 | |||
4609 | if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group))) | ||
4610 | return 0; | ||
4611 | |||
4612 | return 1; | ||
4613 | } | ||
4614 | /** | ||
4615 | * find_new_ilb - Finds the optimum idle load balancer for nomination. | ||
4616 | * @cpu: The cpu which is nominating a new idle_load_balancer. | ||
4617 | * | ||
4618 | * Returns: Returns the id of the idle load balancer if it exists, | ||
4619 | * Else, returns >= nr_cpu_ids. | ||
4620 | * | ||
4621 | * This algorithm picks the idle load balancer such that it belongs to a | ||
4622 | * semi-idle powersavings sched_domain. The idea is to try and avoid | ||
4623 | * completely idle packages/cores just for the purpose of idle load balancing | ||
4624 | * when there are other idle cpu's which are better suited for that job. | ||
4625 | */ | ||
4626 | static int find_new_ilb(int cpu) | ||
4627 | { | ||
4628 | struct sched_domain *sd; | ||
4629 | struct sched_group *ilb_group; | ||
4630 | |||
4631 | /* | ||
4632 | * Have idle load balancer selection from semi-idle packages only | ||
4633 | * when power-aware load balancing is enabled | ||
4634 | */ | ||
4635 | if (!(sched_smt_power_savings || sched_mc_power_savings)) | ||
4636 | goto out_done; | ||
4637 | |||
4638 | /* | ||
4639 | * Optimize for the case when we have no idle CPUs or only one | ||
4640 | * idle CPU. Don't walk the sched_domain hierarchy in such cases | ||
4641 | */ | ||
4642 | if (cpumask_weight(nohz.cpu_mask) < 2) | ||
4643 | goto out_done; | ||
4644 | |||
4645 | for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) { | ||
4646 | ilb_group = sd->groups; | ||
4647 | |||
4648 | do { | ||
4649 | if (is_semi_idle_group(ilb_group)) | ||
4650 | return cpumask_first(nohz.ilb_grp_nohz_mask); | ||
4651 | |||
4652 | ilb_group = ilb_group->next; | ||
4653 | |||
4654 | } while (ilb_group != sd->groups); | ||
4655 | } | ||
4656 | |||
4657 | out_done: | ||
4658 | return cpumask_first(nohz.cpu_mask); | ||
4659 | } | ||
4660 | #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ | ||
4661 | static inline int find_new_ilb(int call_cpu) | ||
4662 | { | ||
4663 | return cpumask_first(nohz.cpu_mask); | ||
4664 | } | ||
4665 | #endif | ||
4666 | |||
4667 | /* | ||
4668 | * This routine will try to nominate the ilb (idle load balancing) | ||
4669 | * owner among the cpus whose ticks are stopped. ilb owner will do the idle | ||
4670 | * load balancing on behalf of all those cpus. If all the cpus in the system | ||
4671 | * go into this tickless mode, then there will be no ilb owner (as there is | ||
4672 | * no need for one) and all the cpus will sleep till the next wakeup event | ||
4673 | * arrives... | ||
4674 | * | ||
4675 | * For the ilb owner, tick is not stopped. And this tick will be used | ||
4676 | * for idle load balancing. ilb owner will still be part of | ||
4677 | * nohz.cpu_mask.. | ||
4678 | * | ||
4679 | * While stopping the tick, this cpu will become the ilb owner if there | ||
4680 | * is no other owner. And will be the owner till that cpu becomes busy | ||
4681 | * or if all cpus in the system stop their ticks at which point | ||
4682 | * there is no need for ilb owner. | ||
4683 | * | ||
4684 | * When the ilb owner becomes busy, it nominates another owner, during the | ||
4685 | * next busy scheduler_tick() | ||
4686 | */ | ||
4687 | int select_nohz_load_balancer(int stop_tick) | ||
4688 | { | ||
4689 | int cpu = smp_processor_id(); | ||
4690 | |||
4691 | if (stop_tick) { | ||
4692 | cpu_rq(cpu)->in_nohz_recently = 1; | ||
4693 | |||
4694 | if (!cpu_active(cpu)) { | ||
4695 | if (atomic_read(&nohz.load_balancer) != cpu) | ||
4696 | return 0; | ||
4697 | |||
4698 | /* | ||
4699 | * If we are going offline and still the leader, | ||
4700 | * give up! | ||
4701 | */ | ||
4702 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | ||
4703 | BUG(); | ||
4704 | |||
4705 | return 0; | ||
4706 | } | ||
4707 | |||
4708 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
4709 | |||
4710 | /* time for ilb owner also to sleep */ | ||
4711 | if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) { | ||
4712 | if (atomic_read(&nohz.load_balancer) == cpu) | ||
4713 | atomic_set(&nohz.load_balancer, -1); | ||
4714 | return 0; | ||
4715 | } | ||
4716 | |||
4717 | if (atomic_read(&nohz.load_balancer) == -1) { | ||
4718 | /* make me the ilb owner */ | ||
4719 | if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1) | ||
4720 | return 1; | ||
4721 | } else if (atomic_read(&nohz.load_balancer) == cpu) { | ||
4722 | int new_ilb; | ||
4723 | |||
4724 | if (!(sched_smt_power_savings || | ||
4725 | sched_mc_power_savings)) | ||
4726 | return 1; | ||
4727 | /* | ||
4728 | * Check to see if there is a more power-efficient | ||
4729 | * ilb. | ||
4730 | */ | ||
4731 | new_ilb = find_new_ilb(cpu); | ||
4732 | if (new_ilb < nr_cpu_ids && new_ilb != cpu) { | ||
4733 | atomic_set(&nohz.load_balancer, -1); | ||
4734 | resched_cpu(new_ilb); | ||
4735 | return 0; | ||
4736 | } | ||
4737 | return 1; | ||
4738 | } | ||
4739 | } else { | ||
4740 | if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) | ||
4741 | return 0; | ||
4742 | |||
4743 | cpumask_clear_cpu(cpu, nohz.cpu_mask); | ||
4744 | |||
4745 | if (atomic_read(&nohz.load_balancer) == cpu) | ||
4746 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | ||
4747 | BUG(); | ||
4748 | } | ||
4749 | return 0; | ||
4750 | } | ||
4751 | #endif | ||
4752 | |||
4753 | static DEFINE_SPINLOCK(balancing); | ||
4754 | |||
4755 | /* | ||
4756 | * It checks each scheduling domain to see if it is due to be balanced, | ||
4757 | * and initiates a balancing operation if so. | ||
4758 | * | ||
4759 | * Balancing parameters are set up in arch_init_sched_domains. | ||
4760 | */ | ||
4761 | static void rebalance_domains(int cpu, enum cpu_idle_type idle) | ||
4762 | { | ||
4763 | int balance = 1; | ||
4764 | struct rq *rq = cpu_rq(cpu); | ||
4765 | unsigned long interval; | ||
4766 | struct sched_domain *sd; | ||
4767 | /* Earliest time when we have to do rebalance again */ | ||
4768 | unsigned long next_balance = jiffies + 60*HZ; | ||
4769 | int update_next_balance = 0; | ||
4770 | int need_serialize; | ||
4771 | |||
4772 | for_each_domain(cpu, sd) { | ||
4773 | if (!(sd->flags & SD_LOAD_BALANCE)) | ||
4774 | continue; | ||
4775 | |||
4776 | interval = sd->balance_interval; | ||
4777 | if (idle != CPU_IDLE) | ||
4778 | interval *= sd->busy_factor; | ||
4779 | |||
4780 | /* scale ms to jiffies */ | ||
4781 | interval = msecs_to_jiffies(interval); | ||
4782 | if (unlikely(!interval)) | ||
4783 | interval = 1; | ||
4784 | if (interval > HZ*NR_CPUS/10) | ||
4785 | interval = HZ*NR_CPUS/10; | ||
4786 | |||
4787 | need_serialize = sd->flags & SD_SERIALIZE; | ||
4788 | |||
4789 | if (need_serialize) { | ||
4790 | if (!spin_trylock(&balancing)) | ||
4791 | goto out; | ||
4792 | } | ||
4793 | |||
4794 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | ||
4795 | if (load_balance(cpu, rq, sd, idle, &balance)) { | ||
4796 | /* | ||
4797 | * We've pulled tasks over so either we're no | ||
4798 | * longer idle, or one of our SMT siblings is | ||
4799 | * not idle. | ||
4800 | */ | ||
4801 | idle = CPU_NOT_IDLE; | ||
4802 | } | ||
4803 | sd->last_balance = jiffies; | ||
4804 | } | ||
4805 | if (need_serialize) | ||
4806 | spin_unlock(&balancing); | ||
4807 | out: | ||
4808 | if (time_after(next_balance, sd->last_balance + interval)) { | ||
4809 | next_balance = sd->last_balance + interval; | ||
4810 | update_next_balance = 1; | ||
4811 | } | ||
4812 | |||
4813 | /* | ||
4814 | * Stop the load balance at this level. There is another | ||
4815 | * CPU in our sched group which is doing load balancing more | ||
4816 | * actively. | ||
4817 | */ | ||
4818 | if (!balance) | ||
4819 | break; | ||
4820 | } | ||
4821 | |||
4822 | /* | ||
4823 | * next_balance will be updated only when there is a need. | ||
4824 | * When the cpu is attached to null domain for ex, it will not be | ||
4825 | * updated. | ||
4826 | */ | ||
4827 | if (likely(update_next_balance)) | ||
4828 | rq->next_balance = next_balance; | ||
4829 | } | ||
4830 | |||
4831 | /* | ||
4832 | * run_rebalance_domains is triggered when needed from the scheduler tick. | ||
4833 | * In CONFIG_NO_HZ case, the idle load balance owner will do the | ||
4834 | * rebalancing for all the cpus for whom scheduler ticks are stopped. | ||
4835 | */ | ||
4836 | static void run_rebalance_domains(struct softirq_action *h) | ||
4837 | { | ||
4838 | int this_cpu = smp_processor_id(); | ||
4839 | struct rq *this_rq = cpu_rq(this_cpu); | ||
4840 | enum cpu_idle_type idle = this_rq->idle_at_tick ? | ||
4841 | CPU_IDLE : CPU_NOT_IDLE; | ||
4842 | |||
4843 | rebalance_domains(this_cpu, idle); | ||
4844 | |||
4845 | #ifdef CONFIG_NO_HZ | ||
4846 | /* | ||
4847 | * If this cpu is the owner for idle load balancing, then do the | ||
4848 | * balancing on behalf of the other idle cpus whose ticks are | ||
4849 | * stopped. | ||
4850 | */ | ||
4851 | if (this_rq->idle_at_tick && | ||
4852 | atomic_read(&nohz.load_balancer) == this_cpu) { | ||
4853 | struct rq *rq; | ||
4854 | int balance_cpu; | ||
4855 | |||
4856 | for_each_cpu(balance_cpu, nohz.cpu_mask) { | ||
4857 | if (balance_cpu == this_cpu) | ||
4858 | continue; | ||
4859 | |||
4860 | /* | ||
4861 | * If this cpu gets work to do, stop the load balancing | ||
4862 | * work being done for other cpus. Next load | ||
4863 | * balancing owner will pick it up. | ||
4864 | */ | ||
4865 | if (need_resched()) | ||
4866 | break; | ||
4867 | |||
4868 | rebalance_domains(balance_cpu, CPU_IDLE); | ||
4869 | |||
4870 | rq = cpu_rq(balance_cpu); | ||
4871 | if (time_after(this_rq->next_balance, rq->next_balance)) | ||
4872 | this_rq->next_balance = rq->next_balance; | ||
4873 | } | ||
4874 | } | ||
4875 | #endif | ||
4876 | } | ||
4877 | |||
4878 | static inline int on_null_domain(int cpu) | ||
4879 | { | ||
4880 | return !rcu_dereference(cpu_rq(cpu)->sd); | ||
4881 | } | ||
4882 | |||
4883 | /* | ||
4884 | * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. | ||
4885 | * | ||
4886 | * In case of CONFIG_NO_HZ, this is the place where we nominate a new | ||
4887 | * idle load balancing owner or decide to stop the periodic load balancing, | ||
4888 | * if the whole system is idle. | ||
4889 | */ | ||
4890 | static inline void trigger_load_balance(struct rq *rq, int cpu) | ||
4891 | { | ||
4892 | #ifdef CONFIG_NO_HZ | ||
4893 | /* | ||
4894 | * If we were in the nohz mode recently and busy at the current | ||
4895 | * scheduler tick, then check if we need to nominate new idle | ||
4896 | * load balancer. | ||
4897 | */ | ||
4898 | if (rq->in_nohz_recently && !rq->idle_at_tick) { | ||
4899 | rq->in_nohz_recently = 0; | ||
4900 | |||
4901 | if (atomic_read(&nohz.load_balancer) == cpu) { | ||
4902 | cpumask_clear_cpu(cpu, nohz.cpu_mask); | ||
4903 | atomic_set(&nohz.load_balancer, -1); | ||
4904 | } | ||
4905 | |||
4906 | if (atomic_read(&nohz.load_balancer) == -1) { | ||
4907 | int ilb = find_new_ilb(cpu); | ||
4908 | |||
4909 | if (ilb < nr_cpu_ids) | ||
4910 | resched_cpu(ilb); | ||
4911 | } | ||
4912 | } | ||
4913 | |||
4914 | /* | ||
4915 | * If this cpu is idle and doing idle load balancing for all the | ||
4916 | * cpus with ticks stopped, is it time for that to stop? | ||
4917 | */ | ||
4918 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && | ||
4919 | cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { | ||
4920 | resched_cpu(cpu); | ||
4921 | return; | ||
4922 | } | ||
4923 | |||
4924 | /* | ||
4925 | * If this cpu is idle and the idle load balancing is done by | ||
4926 | * someone else, then no need raise the SCHED_SOFTIRQ | ||
4927 | */ | ||
4928 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && | ||
4929 | cpumask_test_cpu(cpu, nohz.cpu_mask)) | ||
4930 | return; | ||
4931 | #endif | ||
4932 | /* Don't need to rebalance while attached to NULL domain */ | ||
4933 | if (time_after_eq(jiffies, rq->next_balance) && | ||
4934 | likely(!on_null_domain(cpu))) | ||
4935 | raise_softirq(SCHED_SOFTIRQ); | ||
4936 | } | ||
4937 | |||
4938 | #else /* CONFIG_SMP */ | ||
4939 | |||
4940 | /* | ||
4941 | * on UP we do not need to balance between CPUs: | ||
4942 | */ | ||
4943 | static inline void idle_balance(int cpu, struct rq *rq) | ||
4944 | { | ||
4945 | } | ||
4946 | |||
4947 | #endif | 3079 | #endif |
4948 | 3080 | ||
4949 | DEFINE_PER_CPU(struct kernel_stat, kstat); | 3081 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
@@ -5298,7 +3430,7 @@ void scheduler_tick(void) | |||
5298 | curr->sched_class->task_tick(rq, curr, 0); | 3430 | curr->sched_class->task_tick(rq, curr, 0); |
5299 | raw_spin_unlock(&rq->lock); | 3431 | raw_spin_unlock(&rq->lock); |
5300 | 3432 | ||
5301 | perf_event_task_tick(curr, cpu); | 3433 | perf_event_task_tick(curr); |
5302 | 3434 | ||
5303 | #ifdef CONFIG_SMP | 3435 | #ifdef CONFIG_SMP |
5304 | rq->idle_at_tick = idle_cpu(cpu); | 3436 | rq->idle_at_tick = idle_cpu(cpu); |
@@ -5412,23 +3544,9 @@ static inline void schedule_debug(struct task_struct *prev) | |||
5412 | 3544 | ||
5413 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | 3545 | static void put_prev_task(struct rq *rq, struct task_struct *prev) |
5414 | { | 3546 | { |
5415 | if (prev->state == TASK_RUNNING) { | 3547 | if (prev->se.on_rq) |
5416 | u64 runtime = prev->se.sum_exec_runtime; | 3548 | update_rq_clock(rq); |
5417 | 3549 | rq->skip_clock_update = 0; | |
5418 | runtime -= prev->se.prev_sum_exec_runtime; | ||
5419 | runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); | ||
5420 | |||
5421 | /* | ||
5422 | * In order to avoid avg_overlap growing stale when we are | ||
5423 | * indeed overlapping and hence not getting put to sleep, grow | ||
5424 | * the avg_overlap on preemption. | ||
5425 | * | ||
5426 | * We use the average preemption runtime because that | ||
5427 | * correlates to the amount of cache footprint a task can | ||
5428 | * build up. | ||
5429 | */ | ||
5430 | update_avg(&prev->se.avg_overlap, runtime); | ||
5431 | } | ||
5432 | prev->sched_class->put_prev_task(rq, prev); | 3550 | prev->sched_class->put_prev_task(rq, prev); |
5433 | } | 3551 | } |
5434 | 3552 | ||
@@ -5478,7 +3596,7 @@ need_resched: | |||
5478 | preempt_disable(); | 3596 | preempt_disable(); |
5479 | cpu = smp_processor_id(); | 3597 | cpu = smp_processor_id(); |
5480 | rq = cpu_rq(cpu); | 3598 | rq = cpu_rq(cpu); |
5481 | rcu_sched_qs(cpu); | 3599 | rcu_note_context_switch(cpu); |
5482 | prev = rq->curr; | 3600 | prev = rq->curr; |
5483 | switch_count = &prev->nivcsw; | 3601 | switch_count = &prev->nivcsw; |
5484 | 3602 | ||
@@ -5491,14 +3609,13 @@ need_resched_nonpreemptible: | |||
5491 | hrtick_clear(rq); | 3609 | hrtick_clear(rq); |
5492 | 3610 | ||
5493 | raw_spin_lock_irq(&rq->lock); | 3611 | raw_spin_lock_irq(&rq->lock); |
5494 | update_rq_clock(rq); | ||
5495 | clear_tsk_need_resched(prev); | 3612 | clear_tsk_need_resched(prev); |
5496 | 3613 | ||
5497 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3614 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
5498 | if (unlikely(signal_pending_state(prev->state, prev))) | 3615 | if (unlikely(signal_pending_state(prev->state, prev))) |
5499 | prev->state = TASK_RUNNING; | 3616 | prev->state = TASK_RUNNING; |
5500 | else | 3617 | else |
5501 | deactivate_task(rq, prev, 1); | 3618 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
5502 | switch_count = &prev->nvcsw; | 3619 | switch_count = &prev->nvcsw; |
5503 | } | 3620 | } |
5504 | 3621 | ||
@@ -5512,7 +3629,7 @@ need_resched_nonpreemptible: | |||
5512 | 3629 | ||
5513 | if (likely(prev != next)) { | 3630 | if (likely(prev != next)) { |
5514 | sched_info_switch(prev, next); | 3631 | sched_info_switch(prev, next); |
5515 | perf_event_task_sched_out(prev, next, cpu); | 3632 | perf_event_task_sched_out(prev, next); |
5516 | 3633 | ||
5517 | rq->nr_switches++; | 3634 | rq->nr_switches++; |
5518 | rq->curr = next; | 3635 | rq->curr = next; |
@@ -5562,7 +3679,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
5562 | * the mutex owner just released it and exited. | 3679 | * the mutex owner just released it and exited. |
5563 | */ | 3680 | */ |
5564 | if (probe_kernel_address(&owner->cpu, cpu)) | 3681 | if (probe_kernel_address(&owner->cpu, cpu)) |
5565 | goto out; | 3682 | return 0; |
5566 | #else | 3683 | #else |
5567 | cpu = owner->cpu; | 3684 | cpu = owner->cpu; |
5568 | #endif | 3685 | #endif |
@@ -5572,14 +3689,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
5572 | * the cpu field may no longer be valid. | 3689 | * the cpu field may no longer be valid. |
5573 | */ | 3690 | */ |
5574 | if (cpu >= nr_cpumask_bits) | 3691 | if (cpu >= nr_cpumask_bits) |
5575 | goto out; | 3692 | return 0; |
5576 | 3693 | ||
5577 | /* | 3694 | /* |
5578 | * We need to validate that we can do a | 3695 | * We need to validate that we can do a |
5579 | * get_cpu() and that we have the percpu area. | 3696 | * get_cpu() and that we have the percpu area. |
5580 | */ | 3697 | */ |
5581 | if (!cpu_online(cpu)) | 3698 | if (!cpu_online(cpu)) |
5582 | goto out; | 3699 | return 0; |
5583 | 3700 | ||
5584 | rq = cpu_rq(cpu); | 3701 | rq = cpu_rq(cpu); |
5585 | 3702 | ||
@@ -5598,7 +3715,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
5598 | 3715 | ||
5599 | cpu_relax(); | 3716 | cpu_relax(); |
5600 | } | 3717 | } |
5601 | out: | 3718 | |
5602 | return 1; | 3719 | return 1; |
5603 | } | 3720 | } |
5604 | #endif | 3721 | #endif |
@@ -5722,6 +3839,7 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
5722 | { | 3839 | { |
5723 | __wake_up_common(q, mode, 1, 0, NULL); | 3840 | __wake_up_common(q, mode, 1, 0, NULL); |
5724 | } | 3841 | } |
3842 | EXPORT_SYMBOL_GPL(__wake_up_locked); | ||
5725 | 3843 | ||
5726 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | 3844 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) |
5727 | { | 3845 | { |
@@ -5821,8 +3939,7 @@ do_wait_for_common(struct completion *x, long timeout, int state) | |||
5821 | if (!x->done) { | 3939 | if (!x->done) { |
5822 | DECLARE_WAITQUEUE(wait, current); | 3940 | DECLARE_WAITQUEUE(wait, current); |
5823 | 3941 | ||
5824 | wait.flags |= WQ_FLAG_EXCLUSIVE; | 3942 | __add_wait_queue_tail_exclusive(&x->wait, &wait); |
5825 | __add_wait_queue_tail(&x->wait, &wait); | ||
5826 | do { | 3943 | do { |
5827 | if (signal_pending_state(state, current)) { | 3944 | if (signal_pending_state(state, current)) { |
5828 | timeout = -ERESTARTSYS; | 3945 | timeout = -ERESTARTSYS; |
@@ -5933,6 +4050,23 @@ int __sched wait_for_completion_killable(struct completion *x) | |||
5933 | EXPORT_SYMBOL(wait_for_completion_killable); | 4050 | EXPORT_SYMBOL(wait_for_completion_killable); |
5934 | 4051 | ||
5935 | /** | 4052 | /** |
4053 | * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) | ||
4054 | * @x: holds the state of this particular completion | ||
4055 | * @timeout: timeout value in jiffies | ||
4056 | * | ||
4057 | * This waits for either a completion of a specific task to be | ||
4058 | * signaled or for a specified timeout to expire. It can be | ||
4059 | * interrupted by a kill signal. The timeout is in jiffies. | ||
4060 | */ | ||
4061 | unsigned long __sched | ||
4062 | wait_for_completion_killable_timeout(struct completion *x, | ||
4063 | unsigned long timeout) | ||
4064 | { | ||
4065 | return wait_for_common(x, timeout, TASK_KILLABLE); | ||
4066 | } | ||
4067 | EXPORT_SYMBOL(wait_for_completion_killable_timeout); | ||
4068 | |||
4069 | /** | ||
5936 | * try_wait_for_completion - try to decrement a completion without blocking | 4070 | * try_wait_for_completion - try to decrement a completion without blocking |
5937 | * @x: completion structure | 4071 | * @x: completion structure |
5938 | * | 4072 | * |
@@ -6043,14 +4177,14 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
6043 | unsigned long flags; | 4177 | unsigned long flags; |
6044 | int oldprio, on_rq, running; | 4178 | int oldprio, on_rq, running; |
6045 | struct rq *rq; | 4179 | struct rq *rq; |
6046 | const struct sched_class *prev_class = p->sched_class; | 4180 | const struct sched_class *prev_class; |
6047 | 4181 | ||
6048 | BUG_ON(prio < 0 || prio > MAX_PRIO); | 4182 | BUG_ON(prio < 0 || prio > MAX_PRIO); |
6049 | 4183 | ||
6050 | rq = task_rq_lock(p, &flags); | 4184 | rq = task_rq_lock(p, &flags); |
6051 | update_rq_clock(rq); | ||
6052 | 4185 | ||
6053 | oldprio = p->prio; | 4186 | oldprio = p->prio; |
4187 | prev_class = p->sched_class; | ||
6054 | on_rq = p->se.on_rq; | 4188 | on_rq = p->se.on_rq; |
6055 | running = task_current(rq, p); | 4189 | running = task_current(rq, p); |
6056 | if (on_rq) | 4190 | if (on_rq) |
@@ -6068,7 +4202,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
6068 | if (running) | 4202 | if (running) |
6069 | p->sched_class->set_curr_task(rq); | 4203 | p->sched_class->set_curr_task(rq); |
6070 | if (on_rq) { | 4204 | if (on_rq) { |
6071 | enqueue_task(rq, p, 0); | 4205 | enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); |
6072 | 4206 | ||
6073 | check_class_changed(rq, p, prev_class, oldprio, running); | 4207 | check_class_changed(rq, p, prev_class, oldprio, running); |
6074 | } | 4208 | } |
@@ -6090,7 +4224,6 @@ void set_user_nice(struct task_struct *p, long nice) | |||
6090 | * the task might be in the middle of scheduling on another CPU. | 4224 | * the task might be in the middle of scheduling on another CPU. |
6091 | */ | 4225 | */ |
6092 | rq = task_rq_lock(p, &flags); | 4226 | rq = task_rq_lock(p, &flags); |
6093 | update_rq_clock(rq); | ||
6094 | /* | 4227 | /* |
6095 | * The RT priorities are set via sched_setscheduler(), but we still | 4228 | * The RT priorities are set via sched_setscheduler(), but we still |
6096 | * allow the 'normal' nice value to be set - but as expected | 4229 | * allow the 'normal' nice value to be set - but as expected |
@@ -6135,7 +4268,7 @@ int can_nice(const struct task_struct *p, const int nice) | |||
6135 | /* convert nice value [19,-20] to rlimit style value [1,40] */ | 4268 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
6136 | int nice_rlim = 20 - nice; | 4269 | int nice_rlim = 20 - nice; |
6137 | 4270 | ||
6138 | return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || | 4271 | return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || |
6139 | capable(CAP_SYS_NICE)); | 4272 | capable(CAP_SYS_NICE)); |
6140 | } | 4273 | } |
6141 | 4274 | ||
@@ -6270,7 +4403,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy, | |||
6270 | { | 4403 | { |
6271 | int retval, oldprio, oldpolicy = -1, on_rq, running; | 4404 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
6272 | unsigned long flags; | 4405 | unsigned long flags; |
6273 | const struct sched_class *prev_class = p->sched_class; | 4406 | const struct sched_class *prev_class; |
6274 | struct rq *rq; | 4407 | struct rq *rq; |
6275 | int reset_on_fork; | 4408 | int reset_on_fork; |
6276 | 4409 | ||
@@ -6312,7 +4445,7 @@ recheck: | |||
6312 | 4445 | ||
6313 | if (!lock_task_sighand(p, &flags)) | 4446 | if (!lock_task_sighand(p, &flags)) |
6314 | return -ESRCH; | 4447 | return -ESRCH; |
6315 | rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; | 4448 | rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); |
6316 | unlock_task_sighand(p, &flags); | 4449 | unlock_task_sighand(p, &flags); |
6317 | 4450 | ||
6318 | /* can't set/change the rt policy */ | 4451 | /* can't set/change the rt policy */ |
@@ -6341,16 +4474,6 @@ recheck: | |||
6341 | } | 4474 | } |
6342 | 4475 | ||
6343 | if (user) { | 4476 | if (user) { |
6344 | #ifdef CONFIG_RT_GROUP_SCHED | ||
6345 | /* | ||
6346 | * Do not allow realtime tasks into groups that have no runtime | ||
6347 | * assigned. | ||
6348 | */ | ||
6349 | if (rt_bandwidth_enabled() && rt_policy(policy) && | ||
6350 | task_group(p)->rt_bandwidth.rt_runtime == 0) | ||
6351 | return -EPERM; | ||
6352 | #endif | ||
6353 | |||
6354 | retval = security_task_setscheduler(p, policy, param); | 4477 | retval = security_task_setscheduler(p, policy, param); |
6355 | if (retval) | 4478 | if (retval) |
6356 | return retval; | 4479 | return retval; |
@@ -6366,6 +4489,22 @@ recheck: | |||
6366 | * runqueue lock must be held. | 4489 | * runqueue lock must be held. |
6367 | */ | 4490 | */ |
6368 | rq = __task_rq_lock(p); | 4491 | rq = __task_rq_lock(p); |
4492 | |||
4493 | #ifdef CONFIG_RT_GROUP_SCHED | ||
4494 | if (user) { | ||
4495 | /* | ||
4496 | * Do not allow realtime tasks into groups that have no runtime | ||
4497 | * assigned. | ||
4498 | */ | ||
4499 | if (rt_bandwidth_enabled() && rt_policy(policy) && | ||
4500 | task_group(p)->rt_bandwidth.rt_runtime == 0) { | ||
4501 | __task_rq_unlock(rq); | ||
4502 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
4503 | return -EPERM; | ||
4504 | } | ||
4505 | } | ||
4506 | #endif | ||
4507 | |||
6369 | /* recheck policy now with rq lock held */ | 4508 | /* recheck policy now with rq lock held */ |
6370 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 4509 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
6371 | policy = oldpolicy = -1; | 4510 | policy = oldpolicy = -1; |
@@ -6373,7 +4512,6 @@ recheck: | |||
6373 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | 4512 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6374 | goto recheck; | 4513 | goto recheck; |
6375 | } | 4514 | } |
6376 | update_rq_clock(rq); | ||
6377 | on_rq = p->se.on_rq; | 4515 | on_rq = p->se.on_rq; |
6378 | running = task_current(rq, p); | 4516 | running = task_current(rq, p); |
6379 | if (on_rq) | 4517 | if (on_rq) |
@@ -6384,6 +4522,7 @@ recheck: | |||
6384 | p->sched_reset_on_fork = reset_on_fork; | 4522 | p->sched_reset_on_fork = reset_on_fork; |
6385 | 4523 | ||
6386 | oldprio = p->prio; | 4524 | oldprio = p->prio; |
4525 | prev_class = p->sched_class; | ||
6387 | __setscheduler(rq, p, policy, param->sched_priority); | 4526 | __setscheduler(rq, p, policy, param->sched_priority); |
6388 | 4527 | ||
6389 | if (running) | 4528 | if (running) |
@@ -6683,7 +4822,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
6683 | int ret; | 4822 | int ret; |
6684 | cpumask_var_t mask; | 4823 | cpumask_var_t mask; |
6685 | 4824 | ||
6686 | if (len < cpumask_size()) | 4825 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
4826 | return -EINVAL; | ||
4827 | if (len & (sizeof(unsigned long)-1)) | ||
6687 | return -EINVAL; | 4828 | return -EINVAL; |
6688 | 4829 | ||
6689 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | 4830 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
@@ -6691,10 +4832,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
6691 | 4832 | ||
6692 | ret = sched_getaffinity(pid, mask); | 4833 | ret = sched_getaffinity(pid, mask); |
6693 | if (ret == 0) { | 4834 | if (ret == 0) { |
6694 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | 4835 | size_t retlen = min_t(size_t, len, cpumask_size()); |
4836 | |||
4837 | if (copy_to_user(user_mask_ptr, mask, retlen)) | ||
6695 | ret = -EFAULT; | 4838 | ret = -EFAULT; |
6696 | else | 4839 | else |
6697 | ret = cpumask_size(); | 4840 | ret = retlen; |
6698 | } | 4841 | } |
6699 | free_cpumask_var(mask); | 4842 | free_cpumask_var(mask); |
6700 | 4843 | ||
@@ -7105,17 +5248,15 @@ static inline void sched_init_granularity(void) | |||
7105 | /* | 5248 | /* |
7106 | * This is how migration works: | 5249 | * This is how migration works: |
7107 | * | 5250 | * |
7108 | * 1) we queue a struct migration_req structure in the source CPU's | 5251 | * 1) we invoke migration_cpu_stop() on the target CPU using |
7109 | * runqueue and wake up that CPU's migration thread. | 5252 | * stop_one_cpu(). |
7110 | * 2) we down() the locked semaphore => thread blocks. | 5253 | * 2) stopper starts to run (implicitly forcing the migrated thread |
7111 | * 3) migration thread wakes up (implicitly it forces the migrated | 5254 | * off the CPU) |
7112 | * thread off the CPU) | 5255 | * 3) it checks whether the migrated task is still in the wrong runqueue. |
7113 | * 4) it gets the migration request and checks whether the migrated | 5256 | * 4) if it's in the wrong runqueue then the migration thread removes |
7114 | * task is still in the wrong runqueue. | ||
7115 | * 5) if it's in the wrong runqueue then the migration thread removes | ||
7116 | * it and puts it into the right queue. | 5257 | * it and puts it into the right queue. |
7117 | * 6) migration thread up()s the semaphore. | 5258 | * 5) stopper completes and stop_one_cpu() returns and the migration |
7118 | * 7) we wake up and the migration is done. | 5259 | * is done. |
7119 | */ | 5260 | */ |
7120 | 5261 | ||
7121 | /* | 5262 | /* |
@@ -7129,24 +5270,20 @@ static inline void sched_init_granularity(void) | |||
7129 | */ | 5270 | */ |
7130 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | 5271 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
7131 | { | 5272 | { |
7132 | struct migration_req req; | ||
7133 | unsigned long flags; | 5273 | unsigned long flags; |
7134 | struct rq *rq; | 5274 | struct rq *rq; |
5275 | unsigned int dest_cpu; | ||
7135 | int ret = 0; | 5276 | int ret = 0; |
7136 | 5277 | ||
7137 | /* | 5278 | /* |
7138 | * Since we rely on wake-ups to migrate sleeping tasks, don't change | 5279 | * Serialize against TASK_WAKING so that ttwu() and wunt() can |
7139 | * the ->cpus_allowed mask from under waking tasks, which would be | 5280 | * drop the rq->lock and still rely on ->cpus_allowed. |
7140 | * possible when we change rq->lock in ttwu(), so synchronize against | ||
7141 | * TASK_WAKING to avoid that. | ||
7142 | */ | 5281 | */ |
7143 | again: | 5282 | again: |
7144 | while (p->state == TASK_WAKING) | 5283 | while (task_is_waking(p)) |
7145 | cpu_relax(); | 5284 | cpu_relax(); |
7146 | |||
7147 | rq = task_rq_lock(p, &flags); | 5285 | rq = task_rq_lock(p, &flags); |
7148 | 5286 | if (task_is_waking(p)) { | |
7149 | if (p->state == TASK_WAKING) { | ||
7150 | task_rq_unlock(rq, &flags); | 5287 | task_rq_unlock(rq, &flags); |
7151 | goto again; | 5288 | goto again; |
7152 | } | 5289 | } |
@@ -7173,15 +5310,12 @@ again: | |||
7173 | if (cpumask_test_cpu(task_cpu(p), new_mask)) | 5310 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
7174 | goto out; | 5311 | goto out; |
7175 | 5312 | ||
7176 | if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { | 5313 | dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); |
5314 | if (migrate_task(p, dest_cpu)) { | ||
5315 | struct migration_arg arg = { p, dest_cpu }; | ||
7177 | /* Need help from migration thread: drop lock and wait. */ | 5316 | /* Need help from migration thread: drop lock and wait. */ |
7178 | struct task_struct *mt = rq->migration_thread; | ||
7179 | |||
7180 | get_task_struct(mt); | ||
7181 | task_rq_unlock(rq, &flags); | 5317 | task_rq_unlock(rq, &flags); |
7182 | wake_up_process(rq->migration_thread); | 5318 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
7183 | put_task_struct(mt); | ||
7184 | wait_for_completion(&req.done); | ||
7185 | tlb_migrate_finish(p->mm); | 5319 | tlb_migrate_finish(p->mm); |
7186 | return 0; | 5320 | return 0; |
7187 | } | 5321 | } |
@@ -7239,98 +5373,49 @@ fail: | |||
7239 | return ret; | 5373 | return ret; |
7240 | } | 5374 | } |
7241 | 5375 | ||
7242 | #define RCU_MIGRATION_IDLE 0 | ||
7243 | #define RCU_MIGRATION_NEED_QS 1 | ||
7244 | #define RCU_MIGRATION_GOT_QS 2 | ||
7245 | #define RCU_MIGRATION_MUST_SYNC 3 | ||
7246 | |||
7247 | /* | 5376 | /* |
7248 | * migration_thread - this is a highprio system thread that performs | 5377 | * migration_cpu_stop - this will be executed by a highprio stopper thread |
7249 | * thread migration by bumping thread off CPU then 'pushing' onto | 5378 | * and performs thread migration by bumping thread off CPU then |
7250 | * another runqueue. | 5379 | * 'pushing' onto another runqueue. |
7251 | */ | 5380 | */ |
7252 | static int migration_thread(void *data) | 5381 | static int migration_cpu_stop(void *data) |
7253 | { | 5382 | { |
7254 | int badcpu; | 5383 | struct migration_arg *arg = data; |
7255 | int cpu = (long)data; | ||
7256 | struct rq *rq; | ||
7257 | |||
7258 | rq = cpu_rq(cpu); | ||
7259 | BUG_ON(rq->migration_thread != current); | ||
7260 | |||
7261 | set_current_state(TASK_INTERRUPTIBLE); | ||
7262 | while (!kthread_should_stop()) { | ||
7263 | struct migration_req *req; | ||
7264 | struct list_head *head; | ||
7265 | |||
7266 | raw_spin_lock_irq(&rq->lock); | ||
7267 | |||
7268 | if (cpu_is_offline(cpu)) { | ||
7269 | raw_spin_unlock_irq(&rq->lock); | ||
7270 | break; | ||
7271 | } | ||
7272 | |||
7273 | if (rq->active_balance) { | ||
7274 | active_load_balance(rq, cpu); | ||
7275 | rq->active_balance = 0; | ||
7276 | } | ||
7277 | |||
7278 | head = &rq->migration_queue; | ||
7279 | |||
7280 | if (list_empty(head)) { | ||
7281 | raw_spin_unlock_irq(&rq->lock); | ||
7282 | schedule(); | ||
7283 | set_current_state(TASK_INTERRUPTIBLE); | ||
7284 | continue; | ||
7285 | } | ||
7286 | req = list_entry(head->next, struct migration_req, list); | ||
7287 | list_del_init(head->next); | ||
7288 | |||
7289 | if (req->task != NULL) { | ||
7290 | raw_spin_unlock(&rq->lock); | ||
7291 | __migrate_task(req->task, cpu, req->dest_cpu); | ||
7292 | } else if (likely(cpu == (badcpu = smp_processor_id()))) { | ||
7293 | req->dest_cpu = RCU_MIGRATION_GOT_QS; | ||
7294 | raw_spin_unlock(&rq->lock); | ||
7295 | } else { | ||
7296 | req->dest_cpu = RCU_MIGRATION_MUST_SYNC; | ||
7297 | raw_spin_unlock(&rq->lock); | ||
7298 | WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); | ||
7299 | } | ||
7300 | local_irq_enable(); | ||
7301 | |||
7302 | complete(&req->done); | ||
7303 | } | ||
7304 | __set_current_state(TASK_RUNNING); | ||
7305 | |||
7306 | return 0; | ||
7307 | } | ||
7308 | |||
7309 | #ifdef CONFIG_HOTPLUG_CPU | ||
7310 | |||
7311 | static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | ||
7312 | { | ||
7313 | int ret; | ||
7314 | 5384 | ||
5385 | /* | ||
5386 | * The original target cpu might have gone down and we might | ||
5387 | * be on another cpu but it doesn't matter. | ||
5388 | */ | ||
7315 | local_irq_disable(); | 5389 | local_irq_disable(); |
7316 | ret = __migrate_task(p, src_cpu, dest_cpu); | 5390 | __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu); |
7317 | local_irq_enable(); | 5391 | local_irq_enable(); |
7318 | return ret; | 5392 | return 0; |
7319 | } | 5393 | } |
7320 | 5394 | ||
5395 | #ifdef CONFIG_HOTPLUG_CPU | ||
7321 | /* | 5396 | /* |
7322 | * Figure out where task on dead CPU should go, use force if necessary. | 5397 | * Figure out where task on dead CPU should go, use force if necessary. |
7323 | */ | 5398 | */ |
7324 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 5399 | void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
7325 | { | 5400 | { |
7326 | int dest_cpu; | 5401 | struct rq *rq = cpu_rq(dead_cpu); |
5402 | int needs_cpu, uninitialized_var(dest_cpu); | ||
5403 | unsigned long flags; | ||
7327 | 5404 | ||
7328 | again: | 5405 | local_irq_save(flags); |
7329 | dest_cpu = select_fallback_rq(dead_cpu, p); | ||
7330 | 5406 | ||
7331 | /* It can have affinity changed while we were choosing. */ | 5407 | raw_spin_lock(&rq->lock); |
7332 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | 5408 | needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING); |
7333 | goto again; | 5409 | if (needs_cpu) |
5410 | dest_cpu = select_fallback_rq(dead_cpu, p); | ||
5411 | raw_spin_unlock(&rq->lock); | ||
5412 | /* | ||
5413 | * It can only fail if we race with set_cpus_allowed(), | ||
5414 | * in the racer should migrate the task anyway. | ||
5415 | */ | ||
5416 | if (needs_cpu) | ||
5417 | __migrate_task(p, dead_cpu, dest_cpu); | ||
5418 | local_irq_restore(flags); | ||
7334 | } | 5419 | } |
7335 | 5420 | ||
7336 | /* | 5421 | /* |
@@ -7394,7 +5479,6 @@ void sched_idle_next(void) | |||
7394 | 5479 | ||
7395 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | 5480 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); |
7396 | 5481 | ||
7397 | update_rq_clock(rq); | ||
7398 | activate_task(rq, p, 0); | 5482 | activate_task(rq, p, 0); |
7399 | 5483 | ||
7400 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 5484 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
@@ -7449,7 +5533,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
7449 | for ( ; ; ) { | 5533 | for ( ; ; ) { |
7450 | if (!rq->nr_running) | 5534 | if (!rq->nr_running) |
7451 | break; | 5535 | break; |
7452 | update_rq_clock(rq); | ||
7453 | next = pick_next_task(rq); | 5536 | next = pick_next_task(rq); |
7454 | if (!next) | 5537 | if (!next) |
7455 | break; | 5538 | break; |
@@ -7672,35 +5755,20 @@ static void set_rq_offline(struct rq *rq) | |||
7672 | static int __cpuinit | 5755 | static int __cpuinit |
7673 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | 5756 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
7674 | { | 5757 | { |
7675 | struct task_struct *p; | ||
7676 | int cpu = (long)hcpu; | 5758 | int cpu = (long)hcpu; |
7677 | unsigned long flags; | 5759 | unsigned long flags; |
7678 | struct rq *rq; | 5760 | struct rq *rq = cpu_rq(cpu); |
7679 | 5761 | ||
7680 | switch (action) { | 5762 | switch (action) { |
7681 | 5763 | ||
7682 | case CPU_UP_PREPARE: | 5764 | case CPU_UP_PREPARE: |
7683 | case CPU_UP_PREPARE_FROZEN: | 5765 | case CPU_UP_PREPARE_FROZEN: |
7684 | p = kthread_create(migration_thread, hcpu, "migration/%d", cpu); | ||
7685 | if (IS_ERR(p)) | ||
7686 | return NOTIFY_BAD; | ||
7687 | kthread_bind(p, cpu); | ||
7688 | /* Must be high prio: stop_machine expects to yield to it. */ | ||
7689 | rq = task_rq_lock(p, &flags); | ||
7690 | __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); | ||
7691 | task_rq_unlock(rq, &flags); | ||
7692 | get_task_struct(p); | ||
7693 | cpu_rq(cpu)->migration_thread = p; | ||
7694 | rq->calc_load_update = calc_load_update; | 5766 | rq->calc_load_update = calc_load_update; |
7695 | break; | 5767 | break; |
7696 | 5768 | ||
7697 | case CPU_ONLINE: | 5769 | case CPU_ONLINE: |
7698 | case CPU_ONLINE_FROZEN: | 5770 | case CPU_ONLINE_FROZEN: |
7699 | /* Strictly unnecessary, as first user will wake it. */ | ||
7700 | wake_up_process(cpu_rq(cpu)->migration_thread); | ||
7701 | |||
7702 | /* Update our root-domain */ | 5771 | /* Update our root-domain */ |
7703 | rq = cpu_rq(cpu); | ||
7704 | raw_spin_lock_irqsave(&rq->lock, flags); | 5772 | raw_spin_lock_irqsave(&rq->lock, flags); |
7705 | if (rq->rd) { | 5773 | if (rq->rd) { |
7706 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 5774 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
@@ -7711,61 +5779,24 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7711 | break; | 5779 | break; |
7712 | 5780 | ||
7713 | #ifdef CONFIG_HOTPLUG_CPU | 5781 | #ifdef CONFIG_HOTPLUG_CPU |
7714 | case CPU_UP_CANCELED: | ||
7715 | case CPU_UP_CANCELED_FROZEN: | ||
7716 | if (!cpu_rq(cpu)->migration_thread) | ||
7717 | break; | ||
7718 | /* Unbind it from offline cpu so it can run. Fall thru. */ | ||
7719 | kthread_bind(cpu_rq(cpu)->migration_thread, | ||
7720 | cpumask_any(cpu_online_mask)); | ||
7721 | kthread_stop(cpu_rq(cpu)->migration_thread); | ||
7722 | put_task_struct(cpu_rq(cpu)->migration_thread); | ||
7723 | cpu_rq(cpu)->migration_thread = NULL; | ||
7724 | break; | ||
7725 | |||
7726 | case CPU_DEAD: | 5782 | case CPU_DEAD: |
7727 | case CPU_DEAD_FROZEN: | 5783 | case CPU_DEAD_FROZEN: |
7728 | cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */ | ||
7729 | migrate_live_tasks(cpu); | 5784 | migrate_live_tasks(cpu); |
7730 | rq = cpu_rq(cpu); | ||
7731 | kthread_stop(rq->migration_thread); | ||
7732 | put_task_struct(rq->migration_thread); | ||
7733 | rq->migration_thread = NULL; | ||
7734 | /* Idle task back to normal (off runqueue, low prio) */ | 5785 | /* Idle task back to normal (off runqueue, low prio) */ |
7735 | raw_spin_lock_irq(&rq->lock); | 5786 | raw_spin_lock_irq(&rq->lock); |
7736 | update_rq_clock(rq); | ||
7737 | deactivate_task(rq, rq->idle, 0); | 5787 | deactivate_task(rq, rq->idle, 0); |
7738 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | 5788 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); |
7739 | rq->idle->sched_class = &idle_sched_class; | 5789 | rq->idle->sched_class = &idle_sched_class; |
7740 | migrate_dead_tasks(cpu); | 5790 | migrate_dead_tasks(cpu); |
7741 | raw_spin_unlock_irq(&rq->lock); | 5791 | raw_spin_unlock_irq(&rq->lock); |
7742 | cpuset_unlock(); | ||
7743 | migrate_nr_uninterruptible(rq); | 5792 | migrate_nr_uninterruptible(rq); |
7744 | BUG_ON(rq->nr_running != 0); | 5793 | BUG_ON(rq->nr_running != 0); |
7745 | calc_global_load_remove(rq); | 5794 | calc_global_load_remove(rq); |
7746 | /* | ||
7747 | * No need to migrate the tasks: it was best-effort if | ||
7748 | * they didn't take sched_hotcpu_mutex. Just wake up | ||
7749 | * the requestors. | ||
7750 | */ | ||
7751 | raw_spin_lock_irq(&rq->lock); | ||
7752 | while (!list_empty(&rq->migration_queue)) { | ||
7753 | struct migration_req *req; | ||
7754 | |||
7755 | req = list_entry(rq->migration_queue.next, | ||
7756 | struct migration_req, list); | ||
7757 | list_del_init(&req->list); | ||
7758 | raw_spin_unlock_irq(&rq->lock); | ||
7759 | complete(&req->done); | ||
7760 | raw_spin_lock_irq(&rq->lock); | ||
7761 | } | ||
7762 | raw_spin_unlock_irq(&rq->lock); | ||
7763 | break; | 5795 | break; |
7764 | 5796 | ||
7765 | case CPU_DYING: | 5797 | case CPU_DYING: |
7766 | case CPU_DYING_FROZEN: | 5798 | case CPU_DYING_FROZEN: |
7767 | /* Update our root-domain */ | 5799 | /* Update our root-domain */ |
7768 | rq = cpu_rq(cpu); | ||
7769 | raw_spin_lock_irqsave(&rq->lock, flags); | 5800 | raw_spin_lock_irqsave(&rq->lock, flags); |
7770 | if (rq->rd) { | 5801 | if (rq->rd) { |
7771 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | 5802 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
@@ -8096,6 +6127,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
8096 | struct rq *rq = cpu_rq(cpu); | 6127 | struct rq *rq = cpu_rq(cpu); |
8097 | struct sched_domain *tmp; | 6128 | struct sched_domain *tmp; |
8098 | 6129 | ||
6130 | for (tmp = sd; tmp; tmp = tmp->parent) | ||
6131 | tmp->span_weight = cpumask_weight(sched_domain_span(tmp)); | ||
6132 | |||
8099 | /* Remove the sched domains which do not contribute to scheduling. */ | 6133 | /* Remove the sched domains which do not contribute to scheduling. */ |
8100 | for (tmp = sd; tmp; ) { | 6134 | for (tmp = sd; tmp; ) { |
8101 | struct sched_domain *parent = tmp->parent; | 6135 | struct sched_domain *parent = tmp->parent; |
@@ -9202,11 +7236,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
9202 | 7236 | ||
9203 | #ifdef CONFIG_SCHED_MC | 7237 | #ifdef CONFIG_SCHED_MC |
9204 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, | 7238 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, |
7239 | struct sysdev_class_attribute *attr, | ||
9205 | char *page) | 7240 | char *page) |
9206 | { | 7241 | { |
9207 | return sprintf(page, "%u\n", sched_mc_power_savings); | 7242 | return sprintf(page, "%u\n", sched_mc_power_savings); |
9208 | } | 7243 | } |
9209 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, | 7244 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, |
7245 | struct sysdev_class_attribute *attr, | ||
9210 | const char *buf, size_t count) | 7246 | const char *buf, size_t count) |
9211 | { | 7247 | { |
9212 | return sched_power_savings_store(buf, count, 0); | 7248 | return sched_power_savings_store(buf, count, 0); |
@@ -9218,11 +7254,13 @@ static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, | |||
9218 | 7254 | ||
9219 | #ifdef CONFIG_SCHED_SMT | 7255 | #ifdef CONFIG_SCHED_SMT |
9220 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, | 7256 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, |
7257 | struct sysdev_class_attribute *attr, | ||
9221 | char *page) | 7258 | char *page) |
9222 | { | 7259 | { |
9223 | return sprintf(page, "%u\n", sched_smt_power_savings); | 7260 | return sprintf(page, "%u\n", sched_smt_power_savings); |
9224 | } | 7261 | } |
9225 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, | 7262 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, |
7263 | struct sysdev_class_attribute *attr, | ||
9226 | const char *buf, size_t count) | 7264 | const char *buf, size_t count) |
9227 | { | 7265 | { |
9228 | return sched_power_savings_store(buf, count, 1); | 7266 | return sched_power_savings_store(buf, count, 1); |
@@ -9437,7 +7475,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, | |||
9437 | tg->rt_rq[cpu] = rt_rq; | 7475 | tg->rt_rq[cpu] = rt_rq; |
9438 | init_rt_rq(rt_rq, rq); | 7476 | init_rt_rq(rt_rq, rq); |
9439 | rt_rq->tg = tg; | 7477 | rt_rq->tg = tg; |
9440 | rt_rq->rt_se = rt_se; | ||
9441 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; | 7478 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; |
9442 | if (add) | 7479 | if (add) |
9443 | list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); | 7480 | list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); |
@@ -9468,9 +7505,6 @@ void __init sched_init(void) | |||
9468 | #ifdef CONFIG_RT_GROUP_SCHED | 7505 | #ifdef CONFIG_RT_GROUP_SCHED |
9469 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); | 7506 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
9470 | #endif | 7507 | #endif |
9471 | #ifdef CONFIG_USER_SCHED | ||
9472 | alloc_size *= 2; | ||
9473 | #endif | ||
9474 | #ifdef CONFIG_CPUMASK_OFFSTACK | 7508 | #ifdef CONFIG_CPUMASK_OFFSTACK |
9475 | alloc_size += num_possible_cpus() * cpumask_size(); | 7509 | alloc_size += num_possible_cpus() * cpumask_size(); |
9476 | #endif | 7510 | #endif |
@@ -9484,13 +7518,6 @@ void __init sched_init(void) | |||
9484 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; | 7518 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; |
9485 | ptr += nr_cpu_ids * sizeof(void **); | 7519 | ptr += nr_cpu_ids * sizeof(void **); |
9486 | 7520 | ||
9487 | #ifdef CONFIG_USER_SCHED | ||
9488 | root_task_group.se = (struct sched_entity **)ptr; | ||
9489 | ptr += nr_cpu_ids * sizeof(void **); | ||
9490 | |||
9491 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; | ||
9492 | ptr += nr_cpu_ids * sizeof(void **); | ||
9493 | #endif /* CONFIG_USER_SCHED */ | ||
9494 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7521 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
9495 | #ifdef CONFIG_RT_GROUP_SCHED | 7522 | #ifdef CONFIG_RT_GROUP_SCHED |
9496 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; | 7523 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; |
@@ -9499,13 +7526,6 @@ void __init sched_init(void) | |||
9499 | init_task_group.rt_rq = (struct rt_rq **)ptr; | 7526 | init_task_group.rt_rq = (struct rt_rq **)ptr; |
9500 | ptr += nr_cpu_ids * sizeof(void **); | 7527 | ptr += nr_cpu_ids * sizeof(void **); |
9501 | 7528 | ||
9502 | #ifdef CONFIG_USER_SCHED | ||
9503 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; | ||
9504 | ptr += nr_cpu_ids * sizeof(void **); | ||
9505 | |||
9506 | root_task_group.rt_rq = (struct rt_rq **)ptr; | ||
9507 | ptr += nr_cpu_ids * sizeof(void **); | ||
9508 | #endif /* CONFIG_USER_SCHED */ | ||
9509 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7529 | #endif /* CONFIG_RT_GROUP_SCHED */ |
9510 | #ifdef CONFIG_CPUMASK_OFFSTACK | 7530 | #ifdef CONFIG_CPUMASK_OFFSTACK |
9511 | for_each_possible_cpu(i) { | 7531 | for_each_possible_cpu(i) { |
@@ -9525,22 +7545,13 @@ void __init sched_init(void) | |||
9525 | #ifdef CONFIG_RT_GROUP_SCHED | 7545 | #ifdef CONFIG_RT_GROUP_SCHED |
9526 | init_rt_bandwidth(&init_task_group.rt_bandwidth, | 7546 | init_rt_bandwidth(&init_task_group.rt_bandwidth, |
9527 | global_rt_period(), global_rt_runtime()); | 7547 | global_rt_period(), global_rt_runtime()); |
9528 | #ifdef CONFIG_USER_SCHED | ||
9529 | init_rt_bandwidth(&root_task_group.rt_bandwidth, | ||
9530 | global_rt_period(), RUNTIME_INF); | ||
9531 | #endif /* CONFIG_USER_SCHED */ | ||
9532 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7548 | #endif /* CONFIG_RT_GROUP_SCHED */ |
9533 | 7549 | ||
9534 | #ifdef CONFIG_GROUP_SCHED | 7550 | #ifdef CONFIG_CGROUP_SCHED |
9535 | list_add(&init_task_group.list, &task_groups); | 7551 | list_add(&init_task_group.list, &task_groups); |
9536 | INIT_LIST_HEAD(&init_task_group.children); | 7552 | INIT_LIST_HEAD(&init_task_group.children); |
9537 | 7553 | ||
9538 | #ifdef CONFIG_USER_SCHED | 7554 | #endif /* CONFIG_CGROUP_SCHED */ |
9539 | INIT_LIST_HEAD(&root_task_group.children); | ||
9540 | init_task_group.parent = &root_task_group; | ||
9541 | list_add(&init_task_group.siblings, &root_task_group.children); | ||
9542 | #endif /* CONFIG_USER_SCHED */ | ||
9543 | #endif /* CONFIG_GROUP_SCHED */ | ||
9544 | 7555 | ||
9545 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | 7556 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP |
9546 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), | 7557 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), |
@@ -9580,25 +7591,6 @@ void __init sched_init(void) | |||
9580 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). | 7591 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). |
9581 | */ | 7592 | */ |
9582 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); | 7593 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); |
9583 | #elif defined CONFIG_USER_SCHED | ||
9584 | root_task_group.shares = NICE_0_LOAD; | ||
9585 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL); | ||
9586 | /* | ||
9587 | * In case of task-groups formed thr' the user id of tasks, | ||
9588 | * init_task_group represents tasks belonging to root user. | ||
9589 | * Hence it forms a sibling of all subsequent groups formed. | ||
9590 | * In this case, init_task_group gets only a fraction of overall | ||
9591 | * system cpu resource, based on the weight assigned to root | ||
9592 | * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished | ||
9593 | * by letting tasks of init_task_group sit in a separate cfs_rq | ||
9594 | * (init_tg_cfs_rq) and having one entity represent this group of | ||
9595 | * tasks in rq->cfs (i.e init_task_group->se[] != NULL). | ||
9596 | */ | ||
9597 | init_tg_cfs_entry(&init_task_group, | ||
9598 | &per_cpu(init_tg_cfs_rq, i), | ||
9599 | &per_cpu(init_sched_entity, i), i, 1, | ||
9600 | root_task_group.se[i]); | ||
9601 | |||
9602 | #endif | 7594 | #endif |
9603 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7595 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
9604 | 7596 | ||
@@ -9607,12 +7599,6 @@ void __init sched_init(void) | |||
9607 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); | 7599 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); |
9608 | #ifdef CONFIG_CGROUP_SCHED | 7600 | #ifdef CONFIG_CGROUP_SCHED |
9609 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); | 7601 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); |
9610 | #elif defined CONFIG_USER_SCHED | ||
9611 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | ||
9612 | init_tg_rt_entry(&init_task_group, | ||
9613 | &per_cpu(init_rt_rq_var, i), | ||
9614 | &per_cpu(init_sched_rt_entity, i), i, 1, | ||
9615 | root_task_group.rt_se[i]); | ||
9616 | #endif | 7602 | #endif |
9617 | #endif | 7603 | #endif |
9618 | 7604 | ||
@@ -9621,16 +7607,15 @@ void __init sched_init(void) | |||
9621 | #ifdef CONFIG_SMP | 7607 | #ifdef CONFIG_SMP |
9622 | rq->sd = NULL; | 7608 | rq->sd = NULL; |
9623 | rq->rd = NULL; | 7609 | rq->rd = NULL; |
7610 | rq->cpu_power = SCHED_LOAD_SCALE; | ||
9624 | rq->post_schedule = 0; | 7611 | rq->post_schedule = 0; |
9625 | rq->active_balance = 0; | 7612 | rq->active_balance = 0; |
9626 | rq->next_balance = jiffies; | 7613 | rq->next_balance = jiffies; |
9627 | rq->push_cpu = 0; | 7614 | rq->push_cpu = 0; |
9628 | rq->cpu = i; | 7615 | rq->cpu = i; |
9629 | rq->online = 0; | 7616 | rq->online = 0; |
9630 | rq->migration_thread = NULL; | ||
9631 | rq->idle_stamp = 0; | 7617 | rq->idle_stamp = 0; |
9632 | rq->avg_idle = 2*sysctl_sched_migration_cost; | 7618 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
9633 | INIT_LIST_HEAD(&rq->migration_queue); | ||
9634 | rq_attach_root(rq, &def_root_domain); | 7619 | rq_attach_root(rq, &def_root_domain); |
9635 | #endif | 7620 | #endif |
9636 | init_rq_hrtick(rq); | 7621 | init_rq_hrtick(rq); |
@@ -9697,7 +7682,7 @@ static inline int preempt_count_equals(int preempt_offset) | |||
9697 | return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); | 7682 | return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); |
9698 | } | 7683 | } |
9699 | 7684 | ||
9700 | void __might_sleep(char *file, int line, int preempt_offset) | 7685 | void __might_sleep(const char *file, int line, int preempt_offset) |
9701 | { | 7686 | { |
9702 | #ifdef in_atomic | 7687 | #ifdef in_atomic |
9703 | static unsigned long prev_jiffy; /* ratelimiting */ | 7688 | static unsigned long prev_jiffy; /* ratelimiting */ |
@@ -9731,7 +7716,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p) | |||
9731 | { | 7716 | { |
9732 | int on_rq; | 7717 | int on_rq; |
9733 | 7718 | ||
9734 | update_rq_clock(rq); | ||
9735 | on_rq = p->se.on_rq; | 7719 | on_rq = p->se.on_rq; |
9736 | if (on_rq) | 7720 | if (on_rq) |
9737 | deactivate_task(rq, p, 0); | 7721 | deactivate_task(rq, p, 0); |
@@ -9758,9 +7742,9 @@ void normalize_rt_tasks(void) | |||
9758 | 7742 | ||
9759 | p->se.exec_start = 0; | 7743 | p->se.exec_start = 0; |
9760 | #ifdef CONFIG_SCHEDSTATS | 7744 | #ifdef CONFIG_SCHEDSTATS |
9761 | p->se.wait_start = 0; | 7745 | p->se.statistics.wait_start = 0; |
9762 | p->se.sleep_start = 0; | 7746 | p->se.statistics.sleep_start = 0; |
9763 | p->se.block_start = 0; | 7747 | p->se.statistics.block_start = 0; |
9764 | #endif | 7748 | #endif |
9765 | 7749 | ||
9766 | if (!rt_task(p)) { | 7750 | if (!rt_task(p)) { |
@@ -9787,9 +7771,9 @@ void normalize_rt_tasks(void) | |||
9787 | 7771 | ||
9788 | #endif /* CONFIG_MAGIC_SYSRQ */ | 7772 | #endif /* CONFIG_MAGIC_SYSRQ */ |
9789 | 7773 | ||
9790 | #ifdef CONFIG_IA64 | 7774 | #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) |
9791 | /* | 7775 | /* |
9792 | * These functions are only useful for the IA64 MCA handling. | 7776 | * These functions are only useful for the IA64 MCA handling, or kdb. |
9793 | * | 7777 | * |
9794 | * They can only be called when the whole system has been | 7778 | * They can only be called when the whole system has been |
9795 | * stopped - every CPU needs to be quiescent, and no scheduling | 7779 | * stopped - every CPU needs to be quiescent, and no scheduling |
@@ -9809,6 +7793,9 @@ struct task_struct *curr_task(int cpu) | |||
9809 | return cpu_curr(cpu); | 7793 | return cpu_curr(cpu); |
9810 | } | 7794 | } |
9811 | 7795 | ||
7796 | #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ | ||
7797 | |||
7798 | #ifdef CONFIG_IA64 | ||
9812 | /** | 7799 | /** |
9813 | * set_curr_task - set the current task for a given cpu. | 7800 | * set_curr_task - set the current task for a given cpu. |
9814 | * @cpu: the processor in question. | 7801 | * @cpu: the processor in question. |
@@ -10008,7 +7995,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) | |||
10008 | } | 7995 | } |
10009 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7996 | #endif /* CONFIG_RT_GROUP_SCHED */ |
10010 | 7997 | ||
10011 | #ifdef CONFIG_GROUP_SCHED | 7998 | #ifdef CONFIG_CGROUP_SCHED |
10012 | static void free_sched_group(struct task_group *tg) | 7999 | static void free_sched_group(struct task_group *tg) |
10013 | { | 8000 | { |
10014 | free_fair_sched_group(tg); | 8001 | free_fair_sched_group(tg); |
@@ -10093,8 +8080,6 @@ void sched_move_task(struct task_struct *tsk) | |||
10093 | 8080 | ||
10094 | rq = task_rq_lock(tsk, &flags); | 8081 | rq = task_rq_lock(tsk, &flags); |
10095 | 8082 | ||
10096 | update_rq_clock(rq); | ||
10097 | |||
10098 | running = task_current(rq, tsk); | 8083 | running = task_current(rq, tsk); |
10099 | on_rq = tsk->se.on_rq; | 8084 | on_rq = tsk->se.on_rq; |
10100 | 8085 | ||
@@ -10117,7 +8102,7 @@ void sched_move_task(struct task_struct *tsk) | |||
10117 | 8102 | ||
10118 | task_rq_unlock(rq, &flags); | 8103 | task_rq_unlock(rq, &flags); |
10119 | } | 8104 | } |
10120 | #endif /* CONFIG_GROUP_SCHED */ | 8105 | #endif /* CONFIG_CGROUP_SCHED */ |
10121 | 8106 | ||
10122 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8107 | #ifdef CONFIG_FAIR_GROUP_SCHED |
10123 | static void __set_se_shares(struct sched_entity *se, unsigned long shares) | 8108 | static void __set_se_shares(struct sched_entity *se, unsigned long shares) |
@@ -10259,13 +8244,6 @@ static int tg_schedulable(struct task_group *tg, void *data) | |||
10259 | runtime = d->rt_runtime; | 8244 | runtime = d->rt_runtime; |
10260 | } | 8245 | } |
10261 | 8246 | ||
10262 | #ifdef CONFIG_USER_SCHED | ||
10263 | if (tg == &root_task_group) { | ||
10264 | period = global_rt_period(); | ||
10265 | runtime = global_rt_runtime(); | ||
10266 | } | ||
10267 | #endif | ||
10268 | |||
10269 | /* | 8247 | /* |
10270 | * Cannot have more runtime than the period. | 8248 | * Cannot have more runtime than the period. |
10271 | */ | 8249 | */ |
@@ -10668,7 +8646,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
10668 | struct cpuacct { | 8646 | struct cpuacct { |
10669 | struct cgroup_subsys_state css; | 8647 | struct cgroup_subsys_state css; |
10670 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 8648 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
10671 | u64 *cpuusage; | 8649 | u64 __percpu *cpuusage; |
10672 | struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; | 8650 | struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; |
10673 | struct cpuacct *parent; | 8651 | struct cpuacct *parent; |
10674 | }; | 8652 | }; |
@@ -10885,12 +8863,30 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | |||
10885 | } | 8863 | } |
10886 | 8864 | ||
10887 | /* | 8865 | /* |
8866 | * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large | ||
8867 | * in cputime_t units. As a result, cpuacct_update_stats calls | ||
8868 | * percpu_counter_add with values large enough to always overflow the | ||
8869 | * per cpu batch limit causing bad SMP scalability. | ||
8870 | * | ||
8871 | * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we | ||
8872 | * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled | ||
8873 | * and enabled. We cap it at INT_MAX which is the largest allowed batch value. | ||
8874 | */ | ||
8875 | #ifdef CONFIG_SMP | ||
8876 | #define CPUACCT_BATCH \ | ||
8877 | min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX) | ||
8878 | #else | ||
8879 | #define CPUACCT_BATCH 0 | ||
8880 | #endif | ||
8881 | |||
8882 | /* | ||
10888 | * Charge the system/user time to the task's accounting group. | 8883 | * Charge the system/user time to the task's accounting group. |
10889 | */ | 8884 | */ |
10890 | static void cpuacct_update_stats(struct task_struct *tsk, | 8885 | static void cpuacct_update_stats(struct task_struct *tsk, |
10891 | enum cpuacct_stat_index idx, cputime_t val) | 8886 | enum cpuacct_stat_index idx, cputime_t val) |
10892 | { | 8887 | { |
10893 | struct cpuacct *ca; | 8888 | struct cpuacct *ca; |
8889 | int batch = CPUACCT_BATCH; | ||
10894 | 8890 | ||
10895 | if (unlikely(!cpuacct_subsys.active)) | 8891 | if (unlikely(!cpuacct_subsys.active)) |
10896 | return; | 8892 | return; |
@@ -10899,7 +8895,7 @@ static void cpuacct_update_stats(struct task_struct *tsk, | |||
10899 | ca = task_ca(tsk); | 8895 | ca = task_ca(tsk); |
10900 | 8896 | ||
10901 | do { | 8897 | do { |
10902 | percpu_counter_add(&ca->cpustat[idx], val); | 8898 | __percpu_counter_add(&ca->cpustat[idx], val, batch); |
10903 | ca = ca->parent; | 8899 | ca = ca->parent; |
10904 | } while (ca); | 8900 | } while (ca); |
10905 | rcu_read_unlock(); | 8901 | rcu_read_unlock(); |
@@ -10916,43 +8912,32 @@ struct cgroup_subsys cpuacct_subsys = { | |||
10916 | 8912 | ||
10917 | #ifndef CONFIG_SMP | 8913 | #ifndef CONFIG_SMP |
10918 | 8914 | ||
10919 | int rcu_expedited_torture_stats(char *page) | ||
10920 | { | ||
10921 | return 0; | ||
10922 | } | ||
10923 | EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats); | ||
10924 | |||
10925 | void synchronize_sched_expedited(void) | 8915 | void synchronize_sched_expedited(void) |
10926 | { | 8916 | { |
8917 | barrier(); | ||
10927 | } | 8918 | } |
10928 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | 8919 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
10929 | 8920 | ||
10930 | #else /* #ifndef CONFIG_SMP */ | 8921 | #else /* #ifndef CONFIG_SMP */ |
10931 | 8922 | ||
10932 | static DEFINE_PER_CPU(struct migration_req, rcu_migration_req); | 8923 | static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); |
10933 | static DEFINE_MUTEX(rcu_sched_expedited_mutex); | ||
10934 | |||
10935 | #define RCU_EXPEDITED_STATE_POST -2 | ||
10936 | #define RCU_EXPEDITED_STATE_IDLE -1 | ||
10937 | |||
10938 | static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | ||
10939 | 8924 | ||
10940 | int rcu_expedited_torture_stats(char *page) | 8925 | static int synchronize_sched_expedited_cpu_stop(void *data) |
10941 | { | 8926 | { |
10942 | int cnt = 0; | 8927 | /* |
10943 | int cpu; | 8928 | * There must be a full memory barrier on each affected CPU |
10944 | 8929 | * between the time that try_stop_cpus() is called and the | |
10945 | cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state); | 8930 | * time that it returns. |
10946 | for_each_online_cpu(cpu) { | 8931 | * |
10947 | cnt += sprintf(&page[cnt], " %d:%d", | 8932 | * In the current initial implementation of cpu_stop, the |
10948 | cpu, per_cpu(rcu_migration_req, cpu).dest_cpu); | 8933 | * above condition is already met when the control reaches |
10949 | } | 8934 | * this point and the following smp_mb() is not strictly |
10950 | cnt += sprintf(&page[cnt], "\n"); | 8935 | * necessary. Do smp_mb() anyway for documentation and |
10951 | return cnt; | 8936 | * robustness against future implementation changes. |
8937 | */ | ||
8938 | smp_mb(); /* See above comment block. */ | ||
8939 | return 0; | ||
10952 | } | 8940 | } |
10953 | EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats); | ||
10954 | |||
10955 | static long synchronize_sched_expedited_count; | ||
10956 | 8941 | ||
10957 | /* | 8942 | /* |
10958 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | 8943 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" |
@@ -10966,18 +8951,14 @@ static long synchronize_sched_expedited_count; | |||
10966 | */ | 8951 | */ |
10967 | void synchronize_sched_expedited(void) | 8952 | void synchronize_sched_expedited(void) |
10968 | { | 8953 | { |
10969 | int cpu; | 8954 | int snap, trycount = 0; |
10970 | unsigned long flags; | ||
10971 | bool need_full_sync = 0; | ||
10972 | struct rq *rq; | ||
10973 | struct migration_req *req; | ||
10974 | long snap; | ||
10975 | int trycount = 0; | ||
10976 | 8955 | ||
10977 | smp_mb(); /* ensure prior mod happens before capturing snap. */ | 8956 | smp_mb(); /* ensure prior mod happens before capturing snap. */ |
10978 | snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1; | 8957 | snap = atomic_read(&synchronize_sched_expedited_count) + 1; |
10979 | get_online_cpus(); | 8958 | get_online_cpus(); |
10980 | while (!mutex_trylock(&rcu_sched_expedited_mutex)) { | 8959 | while (try_stop_cpus(cpu_online_mask, |
8960 | synchronize_sched_expedited_cpu_stop, | ||
8961 | NULL) == -EAGAIN) { | ||
10981 | put_online_cpus(); | 8962 | put_online_cpus(); |
10982 | if (trycount++ < 10) | 8963 | if (trycount++ < 10) |
10983 | udelay(trycount * num_online_cpus()); | 8964 | udelay(trycount * num_online_cpus()); |
@@ -10985,41 +8966,15 @@ void synchronize_sched_expedited(void) | |||
10985 | synchronize_sched(); | 8966 | synchronize_sched(); |
10986 | return; | 8967 | return; |
10987 | } | 8968 | } |
10988 | if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) { | 8969 | if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) { |
10989 | smp_mb(); /* ensure test happens before caller kfree */ | 8970 | smp_mb(); /* ensure test happens before caller kfree */ |
10990 | return; | 8971 | return; |
10991 | } | 8972 | } |
10992 | get_online_cpus(); | 8973 | get_online_cpus(); |
10993 | } | 8974 | } |
10994 | rcu_expedited_state = RCU_EXPEDITED_STATE_POST; | 8975 | atomic_inc(&synchronize_sched_expedited_count); |
10995 | for_each_online_cpu(cpu) { | 8976 | smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ |
10996 | rq = cpu_rq(cpu); | ||
10997 | req = &per_cpu(rcu_migration_req, cpu); | ||
10998 | init_completion(&req->done); | ||
10999 | req->task = NULL; | ||
11000 | req->dest_cpu = RCU_MIGRATION_NEED_QS; | ||
11001 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
11002 | list_add(&req->list, &rq->migration_queue); | ||
11003 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
11004 | wake_up_process(rq->migration_thread); | ||
11005 | } | ||
11006 | for_each_online_cpu(cpu) { | ||
11007 | rcu_expedited_state = cpu; | ||
11008 | req = &per_cpu(rcu_migration_req, cpu); | ||
11009 | rq = cpu_rq(cpu); | ||
11010 | wait_for_completion(&req->done); | ||
11011 | raw_spin_lock_irqsave(&rq->lock, flags); | ||
11012 | if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) | ||
11013 | need_full_sync = 1; | ||
11014 | req->dest_cpu = RCU_MIGRATION_IDLE; | ||
11015 | raw_spin_unlock_irqrestore(&rq->lock, flags); | ||
11016 | } | ||
11017 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | ||
11018 | synchronize_sched_expedited_count++; | ||
11019 | mutex_unlock(&rcu_sched_expedited_mutex); | ||
11020 | put_online_cpus(); | 8977 | put_online_cpus(); |
11021 | if (need_full_sync) | ||
11022 | synchronize_sched(); | ||
11023 | } | 8978 | } |
11024 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | 8979 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); |
11025 | 8980 | ||