aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c3147
1 files changed, 717 insertions, 2430 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c11ae0a948d..6af210a7de70 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h> 73#include <linux/ftrace.h>
74#include <linux/slab.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -141,7 +142,7 @@ struct rt_prio_array {
141 142
142struct rt_bandwidth { 143struct rt_bandwidth {
143 /* nests inside the rq lock: */ 144 /* nests inside the rq lock: */
144 spinlock_t rt_runtime_lock; 145 raw_spinlock_t rt_runtime_lock;
145 ktime_t rt_period; 146 ktime_t rt_period;
146 u64 rt_runtime; 147 u64 rt_runtime;
147 struct hrtimer rt_period_timer; 148 struct hrtimer rt_period_timer;
@@ -178,7 +179,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
178 rt_b->rt_period = ns_to_ktime(period); 179 rt_b->rt_period = ns_to_ktime(period);
179 rt_b->rt_runtime = runtime; 180 rt_b->rt_runtime = runtime;
180 181
181 spin_lock_init(&rt_b->rt_runtime_lock); 182 raw_spin_lock_init(&rt_b->rt_runtime_lock);
182 183
183 hrtimer_init(&rt_b->rt_period_timer, 184 hrtimer_init(&rt_b->rt_period_timer,
184 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 185 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +201,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
200 if (hrtimer_active(&rt_b->rt_period_timer)) 201 if (hrtimer_active(&rt_b->rt_period_timer))
201 return; 202 return;
202 203
203 spin_lock(&rt_b->rt_runtime_lock); 204 raw_spin_lock(&rt_b->rt_runtime_lock);
204 for (;;) { 205 for (;;) {
205 unsigned long delta; 206 unsigned long delta;
206 ktime_t soft, hard; 207 ktime_t soft, hard;
@@ -217,7 +218,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, 218 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
218 HRTIMER_MODE_ABS_PINNED, 0); 219 HRTIMER_MODE_ABS_PINNED, 0);
219 } 220 }
220 spin_unlock(&rt_b->rt_runtime_lock); 221 raw_spin_unlock(&rt_b->rt_runtime_lock);
221} 222}
222 223
223#ifdef CONFIG_RT_GROUP_SCHED 224#ifdef CONFIG_RT_GROUP_SCHED
@@ -233,7 +234,7 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
233 */ 234 */
234static DEFINE_MUTEX(sched_domains_mutex); 235static DEFINE_MUTEX(sched_domains_mutex);
235 236
236#ifdef CONFIG_GROUP_SCHED 237#ifdef CONFIG_CGROUP_SCHED
237 238
238#include <linux/cgroup.h> 239#include <linux/cgroup.h>
239 240
@@ -243,13 +244,7 @@ static LIST_HEAD(task_groups);
243 244
244/* task group related information */ 245/* task group related information */
245struct task_group { 246struct task_group {
246#ifdef CONFIG_CGROUP_SCHED
247 struct cgroup_subsys_state css; 247 struct cgroup_subsys_state css;
248#endif
249
250#ifdef CONFIG_USER_SCHED
251 uid_t uid;
252#endif
253 248
254#ifdef CONFIG_FAIR_GROUP_SCHED 249#ifdef CONFIG_FAIR_GROUP_SCHED
255 /* schedulable entities of this group on each cpu */ 250 /* schedulable entities of this group on each cpu */
@@ -274,35 +269,7 @@ struct task_group {
274 struct list_head children; 269 struct list_head children;
275}; 270};
276 271
277#ifdef CONFIG_USER_SCHED
278
279/* Helper function to pass uid information to create_sched_user() */
280void set_tg_uid(struct user_struct *user)
281{
282 user->tg->uid = user->uid;
283}
284
285/*
286 * Root task group.
287 * Every UID task group (including init_task_group aka UID-0) will
288 * be a child to this group.
289 */
290struct task_group root_task_group;
291
292#ifdef CONFIG_FAIR_GROUP_SCHED
293/* Default task group's sched entity on each cpu */
294static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
295/* Default task group's cfs_rq on each cpu */
296static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
297#endif /* CONFIG_FAIR_GROUP_SCHED */
298
299#ifdef CONFIG_RT_GROUP_SCHED
300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
302#endif /* CONFIG_RT_GROUP_SCHED */
303#else /* !CONFIG_USER_SCHED */
304#define root_task_group init_task_group 272#define root_task_group init_task_group
305#endif /* CONFIG_USER_SCHED */
306 273
307/* task_group_lock serializes add/remove of task groups and also changes to 274/* task_group_lock serializes add/remove of task groups and also changes to
308 * a task group's cpu shares. 275 * a task group's cpu shares.
@@ -318,11 +285,7 @@ static int root_task_group_empty(void)
318} 285}
319#endif 286#endif
320 287
321#ifdef CONFIG_USER_SCHED
322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
323#else /* !CONFIG_USER_SCHED */
324# define INIT_TASK_GROUP_LOAD NICE_0_LOAD 288# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
325#endif /* CONFIG_USER_SCHED */
326 289
327/* 290/*
328 * A weight of 0 or 1 can cause arithmetics problems. 291 * A weight of 0 or 1 can cause arithmetics problems.
@@ -348,11 +311,7 @@ static inline struct task_group *task_group(struct task_struct *p)
348{ 311{
349 struct task_group *tg; 312 struct task_group *tg;
350 313
351#ifdef CONFIG_USER_SCHED 314#ifdef CONFIG_CGROUP_SCHED
352 rcu_read_lock();
353 tg = __task_cred(p)->user->tg;
354 rcu_read_unlock();
355#elif defined(CONFIG_CGROUP_SCHED)
356 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), 315 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
357 struct task_group, css); 316 struct task_group, css);
358#else 317#else
@@ -383,7 +342,7 @@ static inline struct task_group *task_group(struct task_struct *p)
383 return NULL; 342 return NULL;
384} 343}
385 344
386#endif /* CONFIG_GROUP_SCHED */ 345#endif /* CONFIG_CGROUP_SCHED */
387 346
388/* CFS-related fields in a runqueue */ 347/* CFS-related fields in a runqueue */
389struct cfs_rq { 348struct cfs_rq {
@@ -470,7 +429,7 @@ struct rt_rq {
470 u64 rt_time; 429 u64 rt_time;
471 u64 rt_runtime; 430 u64 rt_runtime;
472 /* Nests inside the rq lock: */ 431 /* Nests inside the rq lock: */
473 spinlock_t rt_runtime_lock; 432 raw_spinlock_t rt_runtime_lock;
474 433
475#ifdef CONFIG_RT_GROUP_SCHED 434#ifdef CONFIG_RT_GROUP_SCHED
476 unsigned long rt_nr_boosted; 435 unsigned long rt_nr_boosted;
@@ -478,7 +437,6 @@ struct rt_rq {
478 struct rq *rq; 437 struct rq *rq;
479 struct list_head leaf_rt_rq_list; 438 struct list_head leaf_rt_rq_list;
480 struct task_group *tg; 439 struct task_group *tg;
481 struct sched_rt_entity *rt_se;
482#endif 440#endif
483}; 441};
484 442
@@ -525,7 +483,7 @@ static struct root_domain def_root_domain;
525 */ 483 */
526struct rq { 484struct rq {
527 /* runqueue lock: */ 485 /* runqueue lock: */
528 spinlock_t lock; 486 raw_spinlock_t lock;
529 487
530 /* 488 /*
531 * nr_running and cpu_load should be in the same cacheline because 489 * nr_running and cpu_load should be in the same cacheline because
@@ -535,14 +493,12 @@ struct rq {
535 #define CPU_LOAD_IDX_MAX 5 493 #define CPU_LOAD_IDX_MAX 5
536 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 494 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
537#ifdef CONFIG_NO_HZ 495#ifdef CONFIG_NO_HZ
538 unsigned long last_tick_seen;
539 unsigned char in_nohz_recently; 496 unsigned char in_nohz_recently;
540#endif 497#endif
541 /* capture load from *all* tasks on this cpu: */ 498 /* capture load from *all* tasks on this cpu: */
542 struct load_weight load; 499 struct load_weight load;
543 unsigned long nr_load_updates; 500 unsigned long nr_load_updates;
544 u64 nr_switches; 501 u64 nr_switches;
545 u64 nr_migrations_in;
546 502
547 struct cfs_rq cfs; 503 struct cfs_rq cfs;
548 struct rt_rq rt; 504 struct rt_rq rt;
@@ -591,6 +547,8 @@ struct rq {
591 547
592 u64 rt_avg; 548 u64 rt_avg;
593 u64 age_stamp; 549 u64 age_stamp;
550 u64 idle_stamp;
551 u64 avg_idle;
594#endif 552#endif
595 553
596 /* calc_load related fields */ 554 /* calc_load related fields */
@@ -645,6 +603,11 @@ static inline int cpu_of(struct rq *rq)
645#endif 603#endif
646} 604}
647 605
606#define rcu_dereference_check_sched_domain(p) \
607 rcu_dereference_check((p), \
608 rcu_read_lock_sched_held() || \
609 lockdep_is_held(&sched_domains_mutex))
610
648/* 611/*
649 * The domain tree (rq->sd) is protected by RCU's quiescent state transition. 612 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
650 * See detach_destroy_domains: synchronize_sched for details. 613 * See detach_destroy_domains: synchronize_sched for details.
@@ -653,7 +616,7 @@ static inline int cpu_of(struct rq *rq)
653 * preempt-disabled sections. 616 * preempt-disabled sections.
654 */ 617 */
655#define for_each_domain(cpu, __sd) \ 618#define for_each_domain(cpu, __sd) \
656 for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) 619 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
657 620
658#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 621#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
659#define this_rq() (&__get_cpu_var(runqueues)) 622#define this_rq() (&__get_cpu_var(runqueues))
@@ -685,7 +648,7 @@ inline void update_rq_clock(struct rq *rq)
685 */ 648 */
686int runqueue_is_locked(int cpu) 649int runqueue_is_locked(int cpu)
687{ 650{
688 return spin_is_locked(&cpu_rq(cpu)->lock); 651 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
689} 652}
690 653
691/* 654/*
@@ -772,7 +735,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
772 if (!sched_feat_names[i]) 735 if (!sched_feat_names[i])
773 return -EINVAL; 736 return -EINVAL;
774 737
775 filp->f_pos += cnt; 738 *ppos += cnt;
776 739
777 return cnt; 740 return cnt;
778} 741}
@@ -814,6 +777,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
814 * default: 0.25ms 777 * default: 0.25ms
815 */ 778 */
816unsigned int sysctl_sched_shares_ratelimit = 250000; 779unsigned int sysctl_sched_shares_ratelimit = 250000;
780unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
817 781
818/* 782/*
819 * Inject some fuzzyness into changing the per-cpu group shares 783 * Inject some fuzzyness into changing the per-cpu group shares
@@ -892,7 +856,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
892 */ 856 */
893 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 857 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
894 858
895 spin_unlock_irq(&rq->lock); 859 raw_spin_unlock_irq(&rq->lock);
896} 860}
897 861
898#else /* __ARCH_WANT_UNLOCKED_CTXSW */ 862#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -916,9 +880,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
916 next->oncpu = 1; 880 next->oncpu = 1;
917#endif 881#endif
918#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 882#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
919 spin_unlock_irq(&rq->lock); 883 raw_spin_unlock_irq(&rq->lock);
920#else 884#else
921 spin_unlock(&rq->lock); 885 raw_spin_unlock(&rq->lock);
922#endif 886#endif
923} 887}
924 888
@@ -940,18 +904,35 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
940#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 904#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
941 905
942/* 906/*
907 * Check whether the task is waking, we use this to synchronize against
908 * ttwu() so that task_cpu() reports a stable number.
909 *
910 * We need to make an exception for PF_STARTING tasks because the fork
911 * path might require task_rq_lock() to work, eg. it can call
912 * set_cpus_allowed_ptr() from the cpuset clone_ns code.
913 */
914static inline int task_is_waking(struct task_struct *p)
915{
916 return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING));
917}
918
919/*
943 * __task_rq_lock - lock the runqueue a given task resides on. 920 * __task_rq_lock - lock the runqueue a given task resides on.
944 * Must be called interrupts disabled. 921 * Must be called interrupts disabled.
945 */ 922 */
946static inline struct rq *__task_rq_lock(struct task_struct *p) 923static inline struct rq *__task_rq_lock(struct task_struct *p)
947 __acquires(rq->lock) 924 __acquires(rq->lock)
948{ 925{
926 struct rq *rq;
927
949 for (;;) { 928 for (;;) {
950 struct rq *rq = task_rq(p); 929 while (task_is_waking(p))
951 spin_lock(&rq->lock); 930 cpu_relax();
952 if (likely(rq == task_rq(p))) 931 rq = task_rq(p);
932 raw_spin_lock(&rq->lock);
933 if (likely(rq == task_rq(p) && !task_is_waking(p)))
953 return rq; 934 return rq;
954 spin_unlock(&rq->lock); 935 raw_spin_unlock(&rq->lock);
955 } 936 }
956} 937}
957 938
@@ -966,12 +947,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
966 struct rq *rq; 947 struct rq *rq;
967 948
968 for (;;) { 949 for (;;) {
950 while (task_is_waking(p))
951 cpu_relax();
969 local_irq_save(*flags); 952 local_irq_save(*flags);
970 rq = task_rq(p); 953 rq = task_rq(p);
971 spin_lock(&rq->lock); 954 raw_spin_lock(&rq->lock);
972 if (likely(rq == task_rq(p))) 955 if (likely(rq == task_rq(p) && !task_is_waking(p)))
973 return rq; 956 return rq;
974 spin_unlock_irqrestore(&rq->lock, *flags); 957 raw_spin_unlock_irqrestore(&rq->lock, *flags);
975 } 958 }
976} 959}
977 960
@@ -980,19 +963,19 @@ void task_rq_unlock_wait(struct task_struct *p)
980 struct rq *rq = task_rq(p); 963 struct rq *rq = task_rq(p);
981 964
982 smp_mb(); /* spin-unlock-wait is not a full memory barrier */ 965 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
983 spin_unlock_wait(&rq->lock); 966 raw_spin_unlock_wait(&rq->lock);
984} 967}
985 968
986static void __task_rq_unlock(struct rq *rq) 969static void __task_rq_unlock(struct rq *rq)
987 __releases(rq->lock) 970 __releases(rq->lock)
988{ 971{
989 spin_unlock(&rq->lock); 972 raw_spin_unlock(&rq->lock);
990} 973}
991 974
992static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 975static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
993 __releases(rq->lock) 976 __releases(rq->lock)
994{ 977{
995 spin_unlock_irqrestore(&rq->lock, *flags); 978 raw_spin_unlock_irqrestore(&rq->lock, *flags);
996} 979}
997 980
998/* 981/*
@@ -1005,7 +988,7 @@ static struct rq *this_rq_lock(void)
1005 988
1006 local_irq_disable(); 989 local_irq_disable();
1007 rq = this_rq(); 990 rq = this_rq();
1008 spin_lock(&rq->lock); 991 raw_spin_lock(&rq->lock);
1009 992
1010 return rq; 993 return rq;
1011} 994}
@@ -1052,10 +1035,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1052 1035
1053 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 1036 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1054 1037
1055 spin_lock(&rq->lock); 1038 raw_spin_lock(&rq->lock);
1056 update_rq_clock(rq); 1039 update_rq_clock(rq);
1057 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 1040 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1058 spin_unlock(&rq->lock); 1041 raw_spin_unlock(&rq->lock);
1059 1042
1060 return HRTIMER_NORESTART; 1043 return HRTIMER_NORESTART;
1061} 1044}
@@ -1068,10 +1051,10 @@ static void __hrtick_start(void *arg)
1068{ 1051{
1069 struct rq *rq = arg; 1052 struct rq *rq = arg;
1070 1053
1071 spin_lock(&rq->lock); 1054 raw_spin_lock(&rq->lock);
1072 hrtimer_restart(&rq->hrtick_timer); 1055 hrtimer_restart(&rq->hrtick_timer);
1073 rq->hrtick_csd_pending = 0; 1056 rq->hrtick_csd_pending = 0;
1074 spin_unlock(&rq->lock); 1057 raw_spin_unlock(&rq->lock);
1075} 1058}
1076 1059
1077/* 1060/*
@@ -1178,7 +1161,7 @@ static void resched_task(struct task_struct *p)
1178{ 1161{
1179 int cpu; 1162 int cpu;
1180 1163
1181 assert_spin_locked(&task_rq(p)->lock); 1164 assert_raw_spin_locked(&task_rq(p)->lock);
1182 1165
1183 if (test_tsk_need_resched(p)) 1166 if (test_tsk_need_resched(p))
1184 return; 1167 return;
@@ -1200,10 +1183,10 @@ static void resched_cpu(int cpu)
1200 struct rq *rq = cpu_rq(cpu); 1183 struct rq *rq = cpu_rq(cpu);
1201 unsigned long flags; 1184 unsigned long flags;
1202 1185
1203 if (!spin_trylock_irqsave(&rq->lock, flags)) 1186 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1204 return; 1187 return;
1205 resched_task(cpu_curr(cpu)); 1188 resched_task(cpu_curr(cpu));
1206 spin_unlock_irqrestore(&rq->lock, flags); 1189 raw_spin_unlock_irqrestore(&rq->lock, flags);
1207} 1190}
1208 1191
1209#ifdef CONFIG_NO_HZ 1192#ifdef CONFIG_NO_HZ
@@ -1272,7 +1255,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1272#else /* !CONFIG_SMP */ 1255#else /* !CONFIG_SMP */
1273static void resched_task(struct task_struct *p) 1256static void resched_task(struct task_struct *p)
1274{ 1257{
1275 assert_spin_locked(&task_rq(p)->lock); 1258 assert_raw_spin_locked(&task_rq(p)->lock);
1276 set_tsk_need_resched(p); 1259 set_tsk_need_resched(p);
1277} 1260}
1278 1261
@@ -1389,32 +1372,6 @@ static const u32 prio_to_wmult[40] = {
1389 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, 1372 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1390}; 1373};
1391 1374
1392static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
1393
1394/*
1395 * runqueue iterator, to support SMP load-balancing between different
1396 * scheduling classes, without having to expose their internal data
1397 * structures to the load-balancing proper:
1398 */
1399struct rq_iterator {
1400 void *arg;
1401 struct task_struct *(*start)(void *);
1402 struct task_struct *(*next)(void *);
1403};
1404
1405#ifdef CONFIG_SMP
1406static unsigned long
1407balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1408 unsigned long max_load_move, struct sched_domain *sd,
1409 enum cpu_idle_type idle, int *all_pinned,
1410 int *this_best_prio, struct rq_iterator *iterator);
1411
1412static int
1413iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1414 struct sched_domain *sd, enum cpu_idle_type idle,
1415 struct rq_iterator *iterator);
1416#endif
1417
1418/* Time spent by the tasks of the cpu accounting group executing in ... */ 1375/* Time spent by the tasks of the cpu accounting group executing in ... */
1419enum cpuacct_stat_index { 1376enum cpuacct_stat_index {
1420 CPUACCT_STAT_USER, /* ... user mode */ 1377 CPUACCT_STAT_USER, /* ... user mode */
@@ -1530,7 +1487,7 @@ static unsigned long target_load(int cpu, int type)
1530 1487
1531static struct sched_group *group_of(int cpu) 1488static struct sched_group *group_of(int cpu)
1532{ 1489{
1533 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd); 1490 struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
1534 1491
1535 if (!sd) 1492 if (!sd)
1536 return NULL; 1493 return NULL;
@@ -1565,7 +1522,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1565 1522
1566#ifdef CONFIG_FAIR_GROUP_SCHED 1523#ifdef CONFIG_FAIR_GROUP_SCHED
1567 1524
1568static __read_mostly unsigned long *update_shares_data; 1525static __read_mostly unsigned long __percpu *update_shares_data;
1569 1526
1570static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1527static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1571 1528
@@ -1599,11 +1556,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1599 struct rq *rq = cpu_rq(cpu); 1556 struct rq *rq = cpu_rq(cpu);
1600 unsigned long flags; 1557 unsigned long flags;
1601 1558
1602 spin_lock_irqsave(&rq->lock, flags); 1559 raw_spin_lock_irqsave(&rq->lock, flags);
1603 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; 1560 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
1604 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1561 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1605 __set_se_shares(tg->se[cpu], shares); 1562 __set_se_shares(tg->se[cpu], shares);
1606 spin_unlock_irqrestore(&rq->lock, flags); 1563 raw_spin_unlock_irqrestore(&rq->lock, flags);
1607 } 1564 }
1608} 1565}
1609 1566
@@ -1614,7 +1571,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1614 */ 1571 */
1615static int tg_shares_up(struct task_group *tg, void *data) 1572static int tg_shares_up(struct task_group *tg, void *data)
1616{ 1573{
1617 unsigned long weight, rq_weight = 0, shares = 0; 1574 unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
1618 unsigned long *usd_rq_weight; 1575 unsigned long *usd_rq_weight;
1619 struct sched_domain *sd = data; 1576 struct sched_domain *sd = data;
1620 unsigned long flags; 1577 unsigned long flags;
@@ -1630,6 +1587,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1630 weight = tg->cfs_rq[i]->load.weight; 1587 weight = tg->cfs_rq[i]->load.weight;
1631 usd_rq_weight[i] = weight; 1588 usd_rq_weight[i] = weight;
1632 1589
1590 rq_weight += weight;
1633 /* 1591 /*
1634 * If there are currently no tasks on the cpu pretend there 1592 * If there are currently no tasks on the cpu pretend there
1635 * is one of average load so that when a new task gets to 1593 * is one of average load so that when a new task gets to
@@ -1638,10 +1596,13 @@ static int tg_shares_up(struct task_group *tg, void *data)
1638 if (!weight) 1596 if (!weight)
1639 weight = NICE_0_LOAD; 1597 weight = NICE_0_LOAD;
1640 1598
1641 rq_weight += weight; 1599 sum_weight += weight;
1642 shares += tg->cfs_rq[i]->shares; 1600 shares += tg->cfs_rq[i]->shares;
1643 } 1601 }
1644 1602
1603 if (!rq_weight)
1604 rq_weight = sum_weight;
1605
1645 if ((!shares && rq_weight) || shares > tg->shares) 1606 if ((!shares && rq_weight) || shares > tg->shares)
1646 shares = tg->shares; 1607 shares = tg->shares;
1647 1608
@@ -1696,16 +1657,6 @@ static void update_shares(struct sched_domain *sd)
1696 } 1657 }
1697} 1658}
1698 1659
1699static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1700{
1701 if (root_task_group_empty())
1702 return;
1703
1704 spin_unlock(&rq->lock);
1705 update_shares(sd);
1706 spin_lock(&rq->lock);
1707}
1708
1709static void update_h_load(long cpu) 1660static void update_h_load(long cpu)
1710{ 1661{
1711 if (root_task_group_empty()) 1662 if (root_task_group_empty())
@@ -1720,10 +1671,6 @@ static inline void update_shares(struct sched_domain *sd)
1720{ 1671{
1721} 1672}
1722 1673
1723static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1724{
1725}
1726
1727#endif 1674#endif
1728 1675
1729#ifdef CONFIG_PREEMPT 1676#ifdef CONFIG_PREEMPT
@@ -1743,7 +1690,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1743 __acquires(busiest->lock) 1690 __acquires(busiest->lock)
1744 __acquires(this_rq->lock) 1691 __acquires(this_rq->lock)
1745{ 1692{
1746 spin_unlock(&this_rq->lock); 1693 raw_spin_unlock(&this_rq->lock);
1747 double_rq_lock(this_rq, busiest); 1694 double_rq_lock(this_rq, busiest);
1748 1695
1749 return 1; 1696 return 1;
@@ -1764,14 +1711,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1764{ 1711{
1765 int ret = 0; 1712 int ret = 0;
1766 1713
1767 if (unlikely(!spin_trylock(&busiest->lock))) { 1714 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1768 if (busiest < this_rq) { 1715 if (busiest < this_rq) {
1769 spin_unlock(&this_rq->lock); 1716 raw_spin_unlock(&this_rq->lock);
1770 spin_lock(&busiest->lock); 1717 raw_spin_lock(&busiest->lock);
1771 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 1718 raw_spin_lock_nested(&this_rq->lock,
1719 SINGLE_DEPTH_NESTING);
1772 ret = 1; 1720 ret = 1;
1773 } else 1721 } else
1774 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 1722 raw_spin_lock_nested(&busiest->lock,
1723 SINGLE_DEPTH_NESTING);
1775 } 1724 }
1776 return ret; 1725 return ret;
1777} 1726}
@@ -1785,7 +1734,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1785{ 1734{
1786 if (unlikely(!irqs_disabled())) { 1735 if (unlikely(!irqs_disabled())) {
1787 /* printk() doesn't work good under rq->lock */ 1736 /* printk() doesn't work good under rq->lock */
1788 spin_unlock(&this_rq->lock); 1737 raw_spin_unlock(&this_rq->lock);
1789 BUG_ON(1); 1738 BUG_ON(1);
1790 } 1739 }
1791 1740
@@ -1795,9 +1744,54 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1795static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1744static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1796 __releases(busiest->lock) 1745 __releases(busiest->lock)
1797{ 1746{
1798 spin_unlock(&busiest->lock); 1747 raw_spin_unlock(&busiest->lock);
1799 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1748 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1800} 1749}
1750
1751/*
1752 * double_rq_lock - safely lock two runqueues
1753 *
1754 * Note this does not disable interrupts like task_rq_lock,
1755 * you need to do so manually before calling.
1756 */
1757static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1758 __acquires(rq1->lock)
1759 __acquires(rq2->lock)
1760{
1761 BUG_ON(!irqs_disabled());
1762 if (rq1 == rq2) {
1763 raw_spin_lock(&rq1->lock);
1764 __acquire(rq2->lock); /* Fake it out ;) */
1765 } else {
1766 if (rq1 < rq2) {
1767 raw_spin_lock(&rq1->lock);
1768 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1769 } else {
1770 raw_spin_lock(&rq2->lock);
1771 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1772 }
1773 }
1774 update_rq_clock(rq1);
1775 update_rq_clock(rq2);
1776}
1777
1778/*
1779 * double_rq_unlock - safely unlock two runqueues
1780 *
1781 * Note this does not restore interrupts like task_rq_unlock,
1782 * you need to do so manually after calling.
1783 */
1784static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1785 __releases(rq1->lock)
1786 __releases(rq2->lock)
1787{
1788 raw_spin_unlock(&rq1->lock);
1789 if (rq1 != rq2)
1790 raw_spin_unlock(&rq2->lock);
1791 else
1792 __release(rq2->lock);
1793}
1794
1801#endif 1795#endif
1802 1796
1803#ifdef CONFIG_FAIR_GROUP_SCHED 1797#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1810,19 +1804,31 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1810#endif 1804#endif
1811 1805
1812static void calc_load_account_active(struct rq *this_rq); 1806static void calc_load_account_active(struct rq *this_rq);
1807static void update_sysctl(void);
1808static int get_update_sysctl_factor(void);
1813 1809
1814#include "sched_stats.h" 1810static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1815#include "sched_idletask.c" 1811{
1816#include "sched_fair.c" 1812 set_task_rq(p, cpu);
1817#include "sched_rt.c" 1813#ifdef CONFIG_SMP
1818#ifdef CONFIG_SCHED_DEBUG 1814 /*
1819# include "sched_debug.c" 1815 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1816 * successfuly executed on another CPU. We must ensure that updates of
1817 * per-task data have been completed by this moment.
1818 */
1819 smp_wmb();
1820 task_thread_info(p)->cpu = cpu;
1820#endif 1821#endif
1822}
1823
1824static const struct sched_class rt_sched_class;
1821 1825
1822#define sched_class_highest (&rt_sched_class) 1826#define sched_class_highest (&rt_sched_class)
1823#define for_each_class(class) \ 1827#define for_each_class(class) \
1824 for (class = sched_class_highest; class; class = class->next) 1828 for (class = sched_class_highest; class; class = class->next)
1825 1829
1830#include "sched_stats.h"
1831
1826static void inc_nr_running(struct rq *rq) 1832static void inc_nr_running(struct rq *rq)
1827{ 1833{
1828 rq->nr_running++; 1834 rq->nr_running++;
@@ -1860,13 +1866,14 @@ static void update_avg(u64 *avg, u64 sample)
1860 *avg += diff >> 3; 1866 *avg += diff >> 3;
1861} 1867}
1862 1868
1863static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1869static void
1870enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1864{ 1871{
1865 if (wakeup) 1872 if (wakeup)
1866 p->se.start_runtime = p->se.sum_exec_runtime; 1873 p->se.start_runtime = p->se.sum_exec_runtime;
1867 1874
1868 sched_info_queued(p); 1875 sched_info_queued(p);
1869 p->sched_class->enqueue_task(rq, p, wakeup); 1876 p->sched_class->enqueue_task(rq, p, wakeup, head);
1870 p->se.on_rq = 1; 1877 p->se.on_rq = 1;
1871} 1878}
1872 1879
@@ -1889,6 +1896,37 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1889} 1896}
1890 1897
1891/* 1898/*
1899 * activate_task - move a task to the runqueue.
1900 */
1901static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1902{
1903 if (task_contributes_to_load(p))
1904 rq->nr_uninterruptible--;
1905
1906 enqueue_task(rq, p, wakeup, false);
1907 inc_nr_running(rq);
1908}
1909
1910/*
1911 * deactivate_task - remove a task from the runqueue.
1912 */
1913static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1914{
1915 if (task_contributes_to_load(p))
1916 rq->nr_uninterruptible++;
1917
1918 dequeue_task(rq, p, sleep);
1919 dec_nr_running(rq);
1920}
1921
1922#include "sched_idletask.c"
1923#include "sched_fair.c"
1924#include "sched_rt.c"
1925#ifdef CONFIG_SCHED_DEBUG
1926# include "sched_debug.c"
1927#endif
1928
1929/*
1892 * __normal_prio - return the priority that is based on the static prio 1930 * __normal_prio - return the priority that is based on the static prio
1893 */ 1931 */
1894static inline int __normal_prio(struct task_struct *p) 1932static inline int __normal_prio(struct task_struct *p)
@@ -1934,30 +1972,6 @@ static int effective_prio(struct task_struct *p)
1934 return p->prio; 1972 return p->prio;
1935} 1973}
1936 1974
1937/*
1938 * activate_task - move a task to the runqueue.
1939 */
1940static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1941{
1942 if (task_contributes_to_load(p))
1943 rq->nr_uninterruptible--;
1944
1945 enqueue_task(rq, p, wakeup);
1946 inc_nr_running(rq);
1947}
1948
1949/*
1950 * deactivate_task - remove a task from the runqueue.
1951 */
1952static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1953{
1954 if (task_contributes_to_load(p))
1955 rq->nr_uninterruptible++;
1956
1957 dequeue_task(rq, p, sleep);
1958 dec_nr_running(rq);
1959}
1960
1961/** 1975/**
1962 * task_curr - is this task currently executing on a CPU? 1976 * task_curr - is this task currently executing on a CPU?
1963 * @p: the task in question. 1977 * @p: the task in question.
@@ -1967,20 +1981,6 @@ inline int task_curr(const struct task_struct *p)
1967 return cpu_curr(task_cpu(p)) == p; 1981 return cpu_curr(task_cpu(p)) == p;
1968} 1982}
1969 1983
1970static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1971{
1972 set_task_rq(p, cpu);
1973#ifdef CONFIG_SMP
1974 /*
1975 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1976 * successfuly executed on another CPU. We must ensure that updates of
1977 * per-task data have been completed by this moment.
1978 */
1979 smp_wmb();
1980 task_thread_info(p)->cpu = cpu;
1981#endif
1982}
1983
1984static inline void check_class_changed(struct rq *rq, struct task_struct *p, 1984static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1985 const struct sched_class *prev_class, 1985 const struct sched_class *prev_class,
1986 int oldprio, int running) 1986 int oldprio, int running)
@@ -1993,38 +1993,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1993 p->sched_class->prio_changed(rq, p, oldprio, running); 1993 p->sched_class->prio_changed(rq, p, oldprio, running);
1994} 1994}
1995 1995
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 set_task_cpu(p, cpu);
2021 p->cpus_allowed = cpumask_of_cpu(cpu);
2022 p->rt.nr_cpus_allowed = 1;
2023 p->flags |= PF_THREAD_BOUND;
2024 spin_unlock_irqrestore(&rq->lock, flags);
2025}
2026EXPORT_SYMBOL(kthread_bind);
2027
2028#ifdef CONFIG_SMP 1996#ifdef CONFIG_SMP
2029/* 1997/*
2030 * Is this task likely cache-hot: 1998 * Is this task likely cache-hot:
@@ -2034,6 +2002,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2034{ 2002{
2035 s64 delta; 2003 s64 delta;
2036 2004
2005 if (p->sched_class != &fair_sched_class)
2006 return 0;
2007
2037 /* 2008 /*
2038 * Buddy candidates are cache hot: 2009 * Buddy candidates are cache hot:
2039 */ 2010 */
@@ -2042,9 +2013,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2042 &p->se == cfs_rq_of(&p->se)->last)) 2013 &p->se == cfs_rq_of(&p->se)->last))
2043 return 1; 2014 return 1;
2044 2015
2045 if (p->sched_class != &fair_sched_class)
2046 return 0;
2047
2048 if (sysctl_sched_migration_cost == -1) 2016 if (sysctl_sched_migration_cost == -1)
2049 return 1; 2017 return 1;
2050 if (sysctl_sched_migration_cost == 0) 2018 if (sysctl_sched_migration_cost == 0)
@@ -2055,39 +2023,23 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2055 return delta < (s64)sysctl_sched_migration_cost; 2023 return delta < (s64)sysctl_sched_migration_cost;
2056} 2024}
2057 2025
2058
2059void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 2026void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2060{ 2027{
2061 int old_cpu = task_cpu(p); 2028#ifdef CONFIG_SCHED_DEBUG
2062 struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu); 2029 /*
2063 struct cfs_rq *old_cfsrq = task_cfs_rq(p), 2030 * We should never call set_task_cpu() on a blocked task,
2064 *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu); 2031 * ttwu() will sort out the placement.
2065 u64 clock_offset; 2032 */
2066 2033 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2067 clock_offset = old_rq->clock - new_rq->clock; 2034 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2035#endif
2068 2036
2069 trace_sched_migrate_task(p, new_cpu); 2037 trace_sched_migrate_task(p, new_cpu);
2070 2038
2071#ifdef CONFIG_SCHEDSTATS 2039 if (task_cpu(p) != new_cpu) {
2072 if (p->se.wait_start)
2073 p->se.wait_start -= clock_offset;
2074 if (p->se.sleep_start)
2075 p->se.sleep_start -= clock_offset;
2076 if (p->se.block_start)
2077 p->se.block_start -= clock_offset;
2078#endif
2079 if (old_cpu != new_cpu) {
2080 p->se.nr_migrations++; 2040 p->se.nr_migrations++;
2081 new_rq->nr_migrations_in++; 2041 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2082#ifdef CONFIG_SCHEDSTATS
2083 if (task_hot(p, old_rq->clock, NULL))
2084 schedstat_inc(p, se.nr_forced2_migrations);
2085#endif
2086 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2087 1, 1, NULL, 0);
2088 } 2042 }
2089 p->se.vruntime -= old_cfsrq->min_vruntime -
2090 new_cfsrq->min_vruntime;
2091 2043
2092 __set_task_cpu(p, new_cpu); 2044 __set_task_cpu(p, new_cpu);
2093} 2045}
@@ -2112,12 +2064,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2112 2064
2113 /* 2065 /*
2114 * If the task is not on a runqueue (and not running), then 2066 * If the task is not on a runqueue (and not running), then
2115 * it is sufficient to simply update the task's cpu field. 2067 * the next wake-up will properly place the task.
2116 */ 2068 */
2117 if (!p->se.on_rq && !task_running(rq, p)) { 2069 if (!p->se.on_rq && !task_running(rq, p))
2118 set_task_cpu(p, dest_cpu);
2119 return 0; 2070 return 0;
2120 }
2121 2071
2122 init_completion(&req->done); 2072 init_completion(&req->done);
2123 req->task = p; 2073 req->task = p;
@@ -2322,6 +2272,75 @@ void task_oncpu_function_call(struct task_struct *p,
2322 preempt_enable(); 2272 preempt_enable();
2323} 2273}
2324 2274
2275#ifdef CONFIG_SMP
2276static int select_fallback_rq(int cpu, struct task_struct *p)
2277{
2278 int dest_cpu;
2279 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2280
2281 /* Look for allowed, online CPU in same node. */
2282 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2283 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2284 return dest_cpu;
2285
2286 /* Any allowed, online CPU? */
2287 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2288 if (dest_cpu < nr_cpu_ids)
2289 return dest_cpu;
2290
2291 /* No more Mr. Nice Guy. */
2292 if (dest_cpu >= nr_cpu_ids) {
2293 rcu_read_lock();
2294 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2295 rcu_read_unlock();
2296 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2297
2298 /*
2299 * Don't tell them about moving exiting tasks or
2300 * kernel threads (both mm NULL), since they never
2301 * leave kernel.
2302 */
2303 if (p->mm && printk_ratelimit()) {
2304 printk(KERN_INFO "process %d (%s) no "
2305 "longer affine to cpu%d\n",
2306 task_pid_nr(p), p->comm, cpu);
2307 }
2308 }
2309
2310 return dest_cpu;
2311}
2312
2313/*
2314 * Gets called from 3 sites (exec, fork, wakeup), since it is called without
2315 * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
2316 * by:
2317 *
2318 * exec: is unstable, retry loop
2319 * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
2320 */
2321static inline
2322int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2323{
2324 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
2325
2326 /*
2327 * In order not to call set_task_cpu() on a blocking task we need
2328 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2329 * cpu.
2330 *
2331 * Since this is common to all placement strategies, this lives here.
2332 *
2333 * [ this allows ->select_task() to simply return task_cpu(p) and
2334 * not worry about this generic constraint ]
2335 */
2336 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2337 !cpu_online(cpu)))
2338 cpu = select_fallback_rq(task_cpu(p), p);
2339
2340 return cpu;
2341}
2342#endif
2343
2325/*** 2344/***
2326 * try_to_wake_up - wake up a thread 2345 * try_to_wake_up - wake up a thread
2327 * @p: the to-be-woken-up thread 2346 * @p: the to-be-woken-up thread
@@ -2341,7 +2360,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2341{ 2360{
2342 int cpu, orig_cpu, this_cpu, success = 0; 2361 int cpu, orig_cpu, this_cpu, success = 0;
2343 unsigned long flags; 2362 unsigned long flags;
2344 struct rq *rq, *orig_rq; 2363 struct rq *rq;
2345 2364
2346 if (!sched_feat(SYNC_WAKEUPS)) 2365 if (!sched_feat(SYNC_WAKEUPS))
2347 wake_flags &= ~WF_SYNC; 2366 wake_flags &= ~WF_SYNC;
@@ -2349,7 +2368,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2349 this_cpu = get_cpu(); 2368 this_cpu = get_cpu();
2350 2369
2351 smp_wmb(); 2370 smp_wmb();
2352 rq = orig_rq = task_rq_lock(p, &flags); 2371 rq = task_rq_lock(p, &flags);
2353 update_rq_clock(rq); 2372 update_rq_clock(rq);
2354 if (!(p->state & state)) 2373 if (!(p->state & state))
2355 goto out; 2374 goto out;
@@ -2373,19 +2392,34 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2373 if (task_contributes_to_load(p)) 2392 if (task_contributes_to_load(p))
2374 rq->nr_uninterruptible--; 2393 rq->nr_uninterruptible--;
2375 p->state = TASK_WAKING; 2394 p->state = TASK_WAKING;
2376 task_rq_unlock(rq, &flags);
2377 2395
2378 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2396 if (p->sched_class->task_waking)
2379 if (cpu != orig_cpu) 2397 p->sched_class->task_waking(rq, p);
2380 set_task_cpu(p, cpu);
2381 2398
2382 rq = task_rq_lock(p, &flags); 2399 __task_rq_unlock(rq);
2383 2400
2384 if (rq != orig_rq) 2401 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2385 update_rq_clock(rq); 2402 if (cpu != orig_cpu) {
2403 /*
2404 * Since we migrate the task without holding any rq->lock,
2405 * we need to be careful with task_rq_lock(), since that
2406 * might end up locking an invalid rq.
2407 */
2408 set_task_cpu(p, cpu);
2409 }
2386 2410
2411 rq = cpu_rq(cpu);
2412 raw_spin_lock(&rq->lock);
2413 update_rq_clock(rq);
2414
2415 /*
2416 * We migrated the task without holding either rq->lock, however
2417 * since the task is not on the task list itself, nobody else
2418 * will try and migrate the task, hence the rq should match the
2419 * cpu we just moved it to.
2420 */
2421 WARN_ON(task_cpu(p) != cpu);
2387 WARN_ON(p->state != TASK_WAKING); 2422 WARN_ON(p->state != TASK_WAKING);
2388 cpu = task_cpu(p);
2389 2423
2390#ifdef CONFIG_SCHEDSTATS 2424#ifdef CONFIG_SCHEDSTATS
2391 schedstat_inc(rq, ttwu_count); 2425 schedstat_inc(rq, ttwu_count);
@@ -2438,8 +2472,19 @@ out_running:
2438 2472
2439 p->state = TASK_RUNNING; 2473 p->state = TASK_RUNNING;
2440#ifdef CONFIG_SMP 2474#ifdef CONFIG_SMP
2441 if (p->sched_class->task_wake_up) 2475 if (p->sched_class->task_woken)
2442 p->sched_class->task_wake_up(rq, p); 2476 p->sched_class->task_woken(rq, p);
2477
2478 if (unlikely(rq->idle_stamp)) {
2479 u64 delta = rq->clock - rq->idle_stamp;
2480 u64 max = 2*sysctl_sched_migration_cost;
2481
2482 if (delta > max)
2483 rq->avg_idle = max;
2484 else
2485 update_avg(&rq->avg_idle, delta);
2486 rq->idle_stamp = 0;
2487 }
2443#endif 2488#endif
2444out: 2489out:
2445 task_rq_unlock(rq, &flags); 2490 task_rq_unlock(rq, &flags);
@@ -2486,7 +2531,6 @@ static void __sched_fork(struct task_struct *p)
2486 p->se.avg_overlap = 0; 2531 p->se.avg_overlap = 0;
2487 p->se.start_runtime = 0; 2532 p->se.start_runtime = 0;
2488 p->se.avg_wakeup = sysctl_sched_wakeup_granularity; 2533 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2489 p->se.avg_running = 0;
2490 2534
2491#ifdef CONFIG_SCHEDSTATS 2535#ifdef CONFIG_SCHEDSTATS
2492 p->se.wait_start = 0; 2536 p->se.wait_start = 0;
@@ -2508,7 +2552,6 @@ static void __sched_fork(struct task_struct *p)
2508 p->se.nr_failed_migrations_running = 0; 2552 p->se.nr_failed_migrations_running = 0;
2509 p->se.nr_failed_migrations_hot = 0; 2553 p->se.nr_failed_migrations_hot = 0;
2510 p->se.nr_forced_migrations = 0; 2554 p->se.nr_forced_migrations = 0;
2511 p->se.nr_forced2_migrations = 0;
2512 2555
2513 p->se.nr_wakeups = 0; 2556 p->se.nr_wakeups = 0;
2514 p->se.nr_wakeups_sync = 0; 2557 p->se.nr_wakeups_sync = 0;
@@ -2529,14 +2572,6 @@ static void __sched_fork(struct task_struct *p)
2529#ifdef CONFIG_PREEMPT_NOTIFIERS 2572#ifdef CONFIG_PREEMPT_NOTIFIERS
2530 INIT_HLIST_HEAD(&p->preempt_notifiers); 2573 INIT_HLIST_HEAD(&p->preempt_notifiers);
2531#endif 2574#endif
2532
2533 /*
2534 * We mark the process as running here, but have not actually
2535 * inserted it onto the runqueue yet. This guarantees that
2536 * nobody will actually run it, and a signal or other external
2537 * event cannot wake it up and insert it on the runqueue either.
2538 */
2539 p->state = TASK_RUNNING;
2540} 2575}
2541 2576
2542/* 2577/*
@@ -2547,6 +2582,12 @@ void sched_fork(struct task_struct *p, int clone_flags)
2547 int cpu = get_cpu(); 2582 int cpu = get_cpu();
2548 2583
2549 __sched_fork(p); 2584 __sched_fork(p);
2585 /*
2586 * We mark the process as waking here. This guarantees that
2587 * nobody will actually run it, and a signal or other external
2588 * event cannot wake it up and insert it on the runqueue either.
2589 */
2590 p->state = TASK_WAKING;
2550 2591
2551 /* 2592 /*
2552 * Revert to default priority/policy on fork if requested. 2593 * Revert to default priority/policy on fork if requested.
@@ -2578,9 +2619,9 @@ void sched_fork(struct task_struct *p, int clone_flags)
2578 if (!rt_prio(p->prio)) 2619 if (!rt_prio(p->prio))
2579 p->sched_class = &fair_sched_class; 2620 p->sched_class = &fair_sched_class;
2580 2621
2581#ifdef CONFIG_SMP 2622 if (p->sched_class->task_fork)
2582 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); 2623 p->sched_class->task_fork(p);
2583#endif 2624
2584 set_task_cpu(p, cpu); 2625 set_task_cpu(p, cpu);
2585 2626
2586#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2627#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -2610,28 +2651,41 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2610{ 2651{
2611 unsigned long flags; 2652 unsigned long flags;
2612 struct rq *rq; 2653 struct rq *rq;
2654 int cpu __maybe_unused = get_cpu();
2613 2655
2614 rq = task_rq_lock(p, &flags); 2656#ifdef CONFIG_SMP
2615 BUG_ON(p->state != TASK_RUNNING); 2657 /*
2616 update_rq_clock(rq); 2658 * Fork balancing, do it here and not earlier because:
2659 * - cpus_allowed can change in the fork path
2660 * - any previously selected cpu might disappear through hotplug
2661 *
2662 * We still have TASK_WAKING but PF_STARTING is gone now, meaning
2663 * ->cpus_allowed is stable, we have preemption disabled, meaning
2664 * cpu_online_mask is stable.
2665 */
2666 cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
2667 set_task_cpu(p, cpu);
2668#endif
2617 2669
2618 if (!p->sched_class->task_new || !current->se.on_rq) { 2670 /*
2619 activate_task(rq, p, 0); 2671 * Since the task is not on the rq and we still have TASK_WAKING set
2620 } else { 2672 * nobody else will migrate this task.
2621 /* 2673 */
2622 * Let the scheduling class do new task startup 2674 rq = cpu_rq(cpu);
2623 * management (if any): 2675 raw_spin_lock_irqsave(&rq->lock, flags);
2624 */ 2676
2625 p->sched_class->task_new(rq, p); 2677 BUG_ON(p->state != TASK_WAKING);
2626 inc_nr_running(rq); 2678 p->state = TASK_RUNNING;
2627 } 2679 update_rq_clock(rq);
2680 activate_task(rq, p, 0);
2628 trace_sched_wakeup_new(rq, p, 1); 2681 trace_sched_wakeup_new(rq, p, 1);
2629 check_preempt_curr(rq, p, WF_FORK); 2682 check_preempt_curr(rq, p, WF_FORK);
2630#ifdef CONFIG_SMP 2683#ifdef CONFIG_SMP
2631 if (p->sched_class->task_wake_up) 2684 if (p->sched_class->task_woken)
2632 p->sched_class->task_wake_up(rq, p); 2685 p->sched_class->task_woken(rq, p);
2633#endif 2686#endif
2634 task_rq_unlock(rq, &flags); 2687 task_rq_unlock(rq, &flags);
2688 put_cpu();
2635} 2689}
2636 2690
2637#ifdef CONFIG_PREEMPT_NOTIFIERS 2691#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -2750,7 +2804,13 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2750 */ 2804 */
2751 prev_state = prev->state; 2805 prev_state = prev->state;
2752 finish_arch_switch(prev); 2806 finish_arch_switch(prev);
2753 perf_event_task_sched_in(current, cpu_of(rq)); 2807#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2808 local_irq_disable();
2809#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2810 perf_event_task_sched_in(current);
2811#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2812 local_irq_enable();
2813#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2754 finish_lock_switch(rq, prev); 2814 finish_lock_switch(rq, prev);
2755 2815
2756 fire_sched_in_preempt_notifiers(current); 2816 fire_sched_in_preempt_notifiers(current);
@@ -2781,10 +2841,10 @@ static inline void post_schedule(struct rq *rq)
2781 if (rq->post_schedule) { 2841 if (rq->post_schedule) {
2782 unsigned long flags; 2842 unsigned long flags;
2783 2843
2784 spin_lock_irqsave(&rq->lock, flags); 2844 raw_spin_lock_irqsave(&rq->lock, flags);
2785 if (rq->curr->sched_class->post_schedule) 2845 if (rq->curr->sched_class->post_schedule)
2786 rq->curr->sched_class->post_schedule(rq); 2846 rq->curr->sched_class->post_schedule(rq);
2787 spin_unlock_irqrestore(&rq->lock, flags); 2847 raw_spin_unlock_irqrestore(&rq->lock, flags);
2788 2848
2789 rq->post_schedule = 0; 2849 rq->post_schedule = 0;
2790 } 2850 }
@@ -2848,14 +2908,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
2848 */ 2908 */
2849 arch_start_context_switch(prev); 2909 arch_start_context_switch(prev);
2850 2910
2851 if (unlikely(!mm)) { 2911 if (likely(!mm)) {
2852 next->active_mm = oldmm; 2912 next->active_mm = oldmm;
2853 atomic_inc(&oldmm->mm_count); 2913 atomic_inc(&oldmm->mm_count);
2854 enter_lazy_tlb(oldmm, next); 2914 enter_lazy_tlb(oldmm, next);
2855 } else 2915 } else
2856 switch_mm(oldmm, mm, next); 2916 switch_mm(oldmm, mm, next);
2857 2917
2858 if (unlikely(!prev->mm)) { 2918 if (likely(!prev->mm)) {
2859 prev->active_mm = NULL; 2919 prev->active_mm = NULL;
2860 rq->prev_mm = oldmm; 2920 rq->prev_mm = oldmm;
2861 } 2921 }
@@ -3018,15 +3078,6 @@ static void calc_load_account_active(struct rq *this_rq)
3018} 3078}
3019 3079
3020/* 3080/*
3021 * Externally visible per-cpu scheduler statistics:
3022 * cpu_nr_migrations(cpu) - number of migrations into that cpu
3023 */
3024u64 cpu_nr_migrations(int cpu)
3025{
3026 return cpu_rq(cpu)->nr_migrations_in;
3027}
3028
3029/*
3030 * Update rq->cpu_load[] statistics. This function is usually called every 3081 * Update rq->cpu_load[] statistics. This function is usually called every
3031 * scheduler tick (TICK_NSEC). 3082 * scheduler tick (TICK_NSEC).
3032 */ 3083 */
@@ -3064,65 +3115,36 @@ static void update_cpu_load(struct rq *this_rq)
3064#ifdef CONFIG_SMP 3115#ifdef CONFIG_SMP
3065 3116
3066/* 3117/*
3067 * double_rq_lock - safely lock two runqueues 3118 * sched_exec - execve() is a valuable balancing opportunity, because at
3068 * 3119 * this point the task has the smallest effective memory and cache footprint.
3069 * Note this does not disable interrupts like task_rq_lock,
3070 * you need to do so manually before calling.
3071 */
3072static void double_rq_lock(struct rq *rq1, struct rq *rq2)
3073 __acquires(rq1->lock)
3074 __acquires(rq2->lock)
3075{
3076 BUG_ON(!irqs_disabled());
3077 if (rq1 == rq2) {
3078 spin_lock(&rq1->lock);
3079 __acquire(rq2->lock); /* Fake it out ;) */
3080 } else {
3081 if (rq1 < rq2) {
3082 spin_lock(&rq1->lock);
3083 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
3084 } else {
3085 spin_lock(&rq2->lock);
3086 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
3087 }
3088 }
3089 update_rq_clock(rq1);
3090 update_rq_clock(rq2);
3091}
3092
3093/*
3094 * double_rq_unlock - safely unlock two runqueues
3095 *
3096 * Note this does not restore interrupts like task_rq_unlock,
3097 * you need to do so manually after calling.
3098 */
3099static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
3100 __releases(rq1->lock)
3101 __releases(rq2->lock)
3102{
3103 spin_unlock(&rq1->lock);
3104 if (rq1 != rq2)
3105 spin_unlock(&rq2->lock);
3106 else
3107 __release(rq2->lock);
3108}
3109
3110/*
3111 * If dest_cpu is allowed for this process, migrate the task to it.
3112 * This is accomplished by forcing the cpu_allowed mask to only
3113 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
3114 * the cpu_allowed mask is restored.
3115 */ 3120 */
3116static void sched_migrate_task(struct task_struct *p, int dest_cpu) 3121void sched_exec(void)
3117{ 3122{
3123 struct task_struct *p = current;
3118 struct migration_req req; 3124 struct migration_req req;
3125 int dest_cpu, this_cpu;
3119 unsigned long flags; 3126 unsigned long flags;
3120 struct rq *rq; 3127 struct rq *rq;
3121 3128
3129again:
3130 this_cpu = get_cpu();
3131 dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
3132 if (dest_cpu == this_cpu) {
3133 put_cpu();
3134 return;
3135 }
3136
3122 rq = task_rq_lock(p, &flags); 3137 rq = task_rq_lock(p, &flags);
3138 put_cpu();
3139
3140 /*
3141 * select_task_rq() can race against ->cpus_allowed
3142 */
3123 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 3143 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
3124 || unlikely(!cpu_active(dest_cpu))) 3144 || unlikely(!cpu_active(dest_cpu))) {
3125 goto out; 3145 task_rq_unlock(rq, &flags);
3146 goto again;
3147 }
3126 3148
3127 /* force the process onto the specified CPU */ 3149 /* force the process onto the specified CPU */
3128 if (migrate_task(p, dest_cpu, &req)) { 3150 if (migrate_task(p, dest_cpu, &req)) {
@@ -3137,1784 +3159,9 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
3137 3159
3138 return; 3160 return;
3139 } 3161 }
3140out:
3141 task_rq_unlock(rq, &flags); 3162 task_rq_unlock(rq, &flags);
3142} 3163}
3143 3164
3144/*
3145 * sched_exec - execve() is a valuable balancing opportunity, because at
3146 * this point the task has the smallest effective memory and cache footprint.
3147 */
3148void sched_exec(void)
3149{
3150 int new_cpu, this_cpu = get_cpu();
3151 new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
3152 put_cpu();
3153 if (new_cpu != this_cpu)
3154 sched_migrate_task(current, new_cpu);
3155}
3156
3157/*
3158 * pull_task - move a task from a remote runqueue to the local runqueue.
3159 * Both runqueues must be locked.
3160 */
3161static void pull_task(struct rq *src_rq, struct task_struct *p,
3162 struct rq *this_rq, int this_cpu)
3163{
3164 deactivate_task(src_rq, p, 0);
3165 set_task_cpu(p, this_cpu);
3166 activate_task(this_rq, p, 0);
3167 /*
3168 * Note that idle threads have a prio of MAX_PRIO, for this test
3169 * to be always true for them.
3170 */
3171 check_preempt_curr(this_rq, p, 0);
3172}
3173
3174/*
3175 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3176 */
3177static
3178int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3179 struct sched_domain *sd, enum cpu_idle_type idle,
3180 int *all_pinned)
3181{
3182 int tsk_cache_hot = 0;
3183 /*
3184 * We do not migrate tasks that are:
3185 * 1) running (obviously), or
3186 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3187 * 3) are cache-hot on their current CPU.
3188 */
3189 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
3190 schedstat_inc(p, se.nr_failed_migrations_affine);
3191 return 0;
3192 }
3193 *all_pinned = 0;
3194
3195 if (task_running(rq, p)) {
3196 schedstat_inc(p, se.nr_failed_migrations_running);
3197 return 0;
3198 }
3199
3200 /*
3201 * Aggressive migration if:
3202 * 1) task is cache cold, or
3203 * 2) too many balance attempts have failed.
3204 */
3205
3206 tsk_cache_hot = task_hot(p, rq->clock, sd);
3207 if (!tsk_cache_hot ||
3208 sd->nr_balance_failed > sd->cache_nice_tries) {
3209#ifdef CONFIG_SCHEDSTATS
3210 if (tsk_cache_hot) {
3211 schedstat_inc(sd, lb_hot_gained[idle]);
3212 schedstat_inc(p, se.nr_forced_migrations);
3213 }
3214#endif
3215 return 1;
3216 }
3217
3218 if (tsk_cache_hot) {
3219 schedstat_inc(p, se.nr_failed_migrations_hot);
3220 return 0;
3221 }
3222 return 1;
3223}
3224
3225static unsigned long
3226balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3227 unsigned long max_load_move, struct sched_domain *sd,
3228 enum cpu_idle_type idle, int *all_pinned,
3229 int *this_best_prio, struct rq_iterator *iterator)
3230{
3231 int loops = 0, pulled = 0, pinned = 0;
3232 struct task_struct *p;
3233 long rem_load_move = max_load_move;
3234
3235 if (max_load_move == 0)
3236 goto out;
3237
3238 pinned = 1;
3239
3240 /*
3241 * Start the load-balancing iterator:
3242 */
3243 p = iterator->start(iterator->arg);
3244next:
3245 if (!p || loops++ > sysctl_sched_nr_migrate)
3246 goto out;
3247
3248 if ((p->se.load.weight >> 1) > rem_load_move ||
3249 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
3250 p = iterator->next(iterator->arg);
3251 goto next;
3252 }
3253
3254 pull_task(busiest, p, this_rq, this_cpu);
3255 pulled++;
3256 rem_load_move -= p->se.load.weight;
3257
3258#ifdef CONFIG_PREEMPT
3259 /*
3260 * NEWIDLE balancing is a source of latency, so preemptible kernels
3261 * will stop after the first task is pulled to minimize the critical
3262 * section.
3263 */
3264 if (idle == CPU_NEWLY_IDLE)
3265 goto out;
3266#endif
3267
3268 /*
3269 * We only want to steal up to the prescribed amount of weighted load.
3270 */
3271 if (rem_load_move > 0) {
3272 if (p->prio < *this_best_prio)
3273 *this_best_prio = p->prio;
3274 p = iterator->next(iterator->arg);
3275 goto next;
3276 }
3277out:
3278 /*
3279 * Right now, this is one of only two places pull_task() is called,
3280 * so we can safely collect pull_task() stats here rather than
3281 * inside pull_task().
3282 */
3283 schedstat_add(sd, lb_gained[idle], pulled);
3284
3285 if (all_pinned)
3286 *all_pinned = pinned;
3287
3288 return max_load_move - rem_load_move;
3289}
3290
3291/*
3292 * move_tasks tries to move up to max_load_move weighted load from busiest to
3293 * this_rq, as part of a balancing operation within domain "sd".
3294 * Returns 1 if successful and 0 otherwise.
3295 *
3296 * Called with both runqueues locked.
3297 */
3298static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3299 unsigned long max_load_move,
3300 struct sched_domain *sd, enum cpu_idle_type idle,
3301 int *all_pinned)
3302{
3303 const struct sched_class *class = sched_class_highest;
3304 unsigned long total_load_moved = 0;
3305 int this_best_prio = this_rq->curr->prio;
3306
3307 do {
3308 total_load_moved +=
3309 class->load_balance(this_rq, this_cpu, busiest,
3310 max_load_move - total_load_moved,
3311 sd, idle, all_pinned, &this_best_prio);
3312 class = class->next;
3313
3314#ifdef CONFIG_PREEMPT
3315 /*
3316 * NEWIDLE balancing is a source of latency, so preemptible
3317 * kernels will stop after the first task is pulled to minimize
3318 * the critical section.
3319 */
3320 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3321 break;
3322#endif
3323 } while (class && max_load_move > total_load_moved);
3324
3325 return total_load_moved > 0;
3326}
3327
3328static int
3329iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3330 struct sched_domain *sd, enum cpu_idle_type idle,
3331 struct rq_iterator *iterator)
3332{
3333 struct task_struct *p = iterator->start(iterator->arg);
3334 int pinned = 0;
3335
3336 while (p) {
3337 if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
3338 pull_task(busiest, p, this_rq, this_cpu);
3339 /*
3340 * Right now, this is only the second place pull_task()
3341 * is called, so we can safely collect pull_task()
3342 * stats here rather than inside pull_task().
3343 */
3344 schedstat_inc(sd, lb_gained[idle]);
3345
3346 return 1;
3347 }
3348 p = iterator->next(iterator->arg);
3349 }
3350
3351 return 0;
3352}
3353
3354/*
3355 * move_one_task tries to move exactly one task from busiest to this_rq, as
3356 * part of active balancing operations within "domain".
3357 * Returns 1 if successful and 0 otherwise.
3358 *
3359 * Called with both runqueues locked.
3360 */
3361static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3362 struct sched_domain *sd, enum cpu_idle_type idle)
3363{
3364 const struct sched_class *class;
3365
3366 for_each_class(class) {
3367 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
3368 return 1;
3369 }
3370
3371 return 0;
3372}
3373/********** Helpers for find_busiest_group ************************/
3374/*
3375 * sd_lb_stats - Structure to store the statistics of a sched_domain
3376 * during load balancing.
3377 */
3378struct sd_lb_stats {
3379 struct sched_group *busiest; /* Busiest group in this sd */
3380 struct sched_group *this; /* Local group in this sd */
3381 unsigned long total_load; /* Total load of all groups in sd */
3382 unsigned long total_pwr; /* Total power of all groups in sd */
3383 unsigned long avg_load; /* Average load across all groups in sd */
3384
3385 /** Statistics of this group */
3386 unsigned long this_load;
3387 unsigned long this_load_per_task;
3388 unsigned long this_nr_running;
3389
3390 /* Statistics of the busiest group */
3391 unsigned long max_load;
3392 unsigned long busiest_load_per_task;
3393 unsigned long busiest_nr_running;
3394
3395 int group_imb; /* Is there imbalance in this sd */
3396#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3397 int power_savings_balance; /* Is powersave balance needed for this sd */
3398 struct sched_group *group_min; /* Least loaded group in sd */
3399 struct sched_group *group_leader; /* Group which relieves group_min */
3400 unsigned long min_load_per_task; /* load_per_task in group_min */
3401 unsigned long leader_nr_running; /* Nr running of group_leader */
3402 unsigned long min_nr_running; /* Nr running of group_min */
3403#endif
3404};
3405
3406/*
3407 * sg_lb_stats - stats of a sched_group required for load_balancing
3408 */
3409struct sg_lb_stats {
3410 unsigned long avg_load; /*Avg load across the CPUs of the group */
3411 unsigned long group_load; /* Total load over the CPUs of the group */
3412 unsigned long sum_nr_running; /* Nr tasks running in the group */
3413 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3414 unsigned long group_capacity;
3415 int group_imb; /* Is there an imbalance in the group ? */
3416};
3417
3418/**
3419 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
3420 * @group: The group whose first cpu is to be returned.
3421 */
3422static inline unsigned int group_first_cpu(struct sched_group *group)
3423{
3424 return cpumask_first(sched_group_cpus(group));
3425}
3426
3427/**
3428 * get_sd_load_idx - Obtain the load index for a given sched domain.
3429 * @sd: The sched_domain whose load_idx is to be obtained.
3430 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3431 */
3432static inline int get_sd_load_idx(struct sched_domain *sd,
3433 enum cpu_idle_type idle)
3434{
3435 int load_idx;
3436
3437 switch (idle) {
3438 case CPU_NOT_IDLE:
3439 load_idx = sd->busy_idx;
3440 break;
3441
3442 case CPU_NEWLY_IDLE:
3443 load_idx = sd->newidle_idx;
3444 break;
3445 default:
3446 load_idx = sd->idle_idx;
3447 break;
3448 }
3449
3450 return load_idx;
3451}
3452
3453
3454#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3455/**
3456 * init_sd_power_savings_stats - Initialize power savings statistics for
3457 * the given sched_domain, during load balancing.
3458 *
3459 * @sd: Sched domain whose power-savings statistics are to be initialized.
3460 * @sds: Variable containing the statistics for sd.
3461 * @idle: Idle status of the CPU at which we're performing load-balancing.
3462 */
3463static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3464 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3465{
3466 /*
3467 * Busy processors will not participate in power savings
3468 * balance.
3469 */
3470 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3471 sds->power_savings_balance = 0;
3472 else {
3473 sds->power_savings_balance = 1;
3474 sds->min_nr_running = ULONG_MAX;
3475 sds->leader_nr_running = 0;
3476 }
3477}
3478
3479/**
3480 * update_sd_power_savings_stats - Update the power saving stats for a
3481 * sched_domain while performing load balancing.
3482 *
3483 * @group: sched_group belonging to the sched_domain under consideration.
3484 * @sds: Variable containing the statistics of the sched_domain
3485 * @local_group: Does group contain the CPU for which we're performing
3486 * load balancing ?
3487 * @sgs: Variable containing the statistics of the group.
3488 */
3489static inline void update_sd_power_savings_stats(struct sched_group *group,
3490 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3491{
3492
3493 if (!sds->power_savings_balance)
3494 return;
3495
3496 /*
3497 * If the local group is idle or completely loaded
3498 * no need to do power savings balance at this domain
3499 */
3500 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3501 !sds->this_nr_running))
3502 sds->power_savings_balance = 0;
3503
3504 /*
3505 * If a group is already running at full capacity or idle,
3506 * don't include that group in power savings calculations
3507 */
3508 if (!sds->power_savings_balance ||
3509 sgs->sum_nr_running >= sgs->group_capacity ||
3510 !sgs->sum_nr_running)
3511 return;
3512
3513 /*
3514 * Calculate the group which has the least non-idle load.
3515 * This is the group from where we need to pick up the load
3516 * for saving power
3517 */
3518 if ((sgs->sum_nr_running < sds->min_nr_running) ||
3519 (sgs->sum_nr_running == sds->min_nr_running &&
3520 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3521 sds->group_min = group;
3522 sds->min_nr_running = sgs->sum_nr_running;
3523 sds->min_load_per_task = sgs->sum_weighted_load /
3524 sgs->sum_nr_running;
3525 }
3526
3527 /*
3528 * Calculate the group which is almost near its
3529 * capacity but still has some space to pick up some load
3530 * from other group and save more power
3531 */
3532 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
3533 return;
3534
3535 if (sgs->sum_nr_running > sds->leader_nr_running ||
3536 (sgs->sum_nr_running == sds->leader_nr_running &&
3537 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3538 sds->group_leader = group;
3539 sds->leader_nr_running = sgs->sum_nr_running;
3540 }
3541}
3542
3543/**
3544 * check_power_save_busiest_group - see if there is potential for some power-savings balance
3545 * @sds: Variable containing the statistics of the sched_domain
3546 * under consideration.
3547 * @this_cpu: Cpu at which we're currently performing load-balancing.
3548 * @imbalance: Variable to store the imbalance.
3549 *
3550 * Description:
3551 * Check if we have potential to perform some power-savings balance.
3552 * If yes, set the busiest group to be the least loaded group in the
3553 * sched_domain, so that it's CPUs can be put to idle.
3554 *
3555 * Returns 1 if there is potential to perform power-savings balance.
3556 * Else returns 0.
3557 */
3558static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3559 int this_cpu, unsigned long *imbalance)
3560{
3561 if (!sds->power_savings_balance)
3562 return 0;
3563
3564 if (sds->this != sds->group_leader ||
3565 sds->group_leader == sds->group_min)
3566 return 0;
3567
3568 *imbalance = sds->min_load_per_task;
3569 sds->busiest = sds->group_min;
3570
3571 return 1;
3572
3573}
3574#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3575static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3576 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3577{
3578 return;
3579}
3580
3581static inline void update_sd_power_savings_stats(struct sched_group *group,
3582 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3583{
3584 return;
3585}
3586
3587static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3588 int this_cpu, unsigned long *imbalance)
3589{
3590 return 0;
3591}
3592#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3593
3594
3595unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
3596{
3597 return SCHED_LOAD_SCALE;
3598}
3599
3600unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
3601{
3602 return default_scale_freq_power(sd, cpu);
3603}
3604
3605unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
3606{
3607 unsigned long weight = cpumask_weight(sched_domain_span(sd));
3608 unsigned long smt_gain = sd->smt_gain;
3609
3610 smt_gain /= weight;
3611
3612 return smt_gain;
3613}
3614
3615unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3616{
3617 return default_scale_smt_power(sd, cpu);
3618}
3619
3620unsigned long scale_rt_power(int cpu)
3621{
3622 struct rq *rq = cpu_rq(cpu);
3623 u64 total, available;
3624
3625 sched_avg_update(rq);
3626
3627 total = sched_avg_period() + (rq->clock - rq->age_stamp);
3628 available = total - rq->rt_avg;
3629
3630 if (unlikely((s64)total < SCHED_LOAD_SCALE))
3631 total = SCHED_LOAD_SCALE;
3632
3633 total >>= SCHED_LOAD_SHIFT;
3634
3635 return div_u64(available, total);
3636}
3637
3638static void update_cpu_power(struct sched_domain *sd, int cpu)
3639{
3640 unsigned long weight = cpumask_weight(sched_domain_span(sd));
3641 unsigned long power = SCHED_LOAD_SCALE;
3642 struct sched_group *sdg = sd->groups;
3643
3644 if (sched_feat(ARCH_POWER))
3645 power *= arch_scale_freq_power(sd, cpu);
3646 else
3647 power *= default_scale_freq_power(sd, cpu);
3648
3649 power >>= SCHED_LOAD_SHIFT;
3650
3651 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3652 if (sched_feat(ARCH_POWER))
3653 power *= arch_scale_smt_power(sd, cpu);
3654 else
3655 power *= default_scale_smt_power(sd, cpu);
3656
3657 power >>= SCHED_LOAD_SHIFT;
3658 }
3659
3660 power *= scale_rt_power(cpu);
3661 power >>= SCHED_LOAD_SHIFT;
3662
3663 if (!power)
3664 power = 1;
3665
3666 sdg->cpu_power = power;
3667}
3668
3669static void update_group_power(struct sched_domain *sd, int cpu)
3670{
3671 struct sched_domain *child = sd->child;
3672 struct sched_group *group, *sdg = sd->groups;
3673 unsigned long power;
3674
3675 if (!child) {
3676 update_cpu_power(sd, cpu);
3677 return;
3678 }
3679
3680 power = 0;
3681
3682 group = child->groups;
3683 do {
3684 power += group->cpu_power;
3685 group = group->next;
3686 } while (group != child->groups);
3687
3688 sdg->cpu_power = power;
3689}
3690
3691/**
3692 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3693 * @sd: The sched_domain whose statistics are to be updated.
3694 * @group: sched_group whose statistics are to be updated.
3695 * @this_cpu: Cpu for which load balance is currently performed.
3696 * @idle: Idle status of this_cpu
3697 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3698 * @sd_idle: Idle status of the sched_domain containing group.
3699 * @local_group: Does group contain this_cpu.
3700 * @cpus: Set of cpus considered for load balancing.
3701 * @balance: Should we balance.
3702 * @sgs: variable to hold the statistics for this group.
3703 */
3704static inline void update_sg_lb_stats(struct sched_domain *sd,
3705 struct sched_group *group, int this_cpu,
3706 enum cpu_idle_type idle, int load_idx, int *sd_idle,
3707 int local_group, const struct cpumask *cpus,
3708 int *balance, struct sg_lb_stats *sgs)
3709{
3710 unsigned long load, max_cpu_load, min_cpu_load;
3711 int i;
3712 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3713 unsigned long sum_avg_load_per_task;
3714 unsigned long avg_load_per_task;
3715
3716 if (local_group) {
3717 balance_cpu = group_first_cpu(group);
3718 if (balance_cpu == this_cpu)
3719 update_group_power(sd, this_cpu);
3720 }
3721
3722 /* Tally up the load of all CPUs in the group */
3723 sum_avg_load_per_task = avg_load_per_task = 0;
3724 max_cpu_load = 0;
3725 min_cpu_load = ~0UL;
3726
3727 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3728 struct rq *rq = cpu_rq(i);
3729
3730 if (*sd_idle && rq->nr_running)
3731 *sd_idle = 0;
3732
3733 /* Bias balancing toward cpus of our domain */
3734 if (local_group) {
3735 if (idle_cpu(i) && !first_idle_cpu) {
3736 first_idle_cpu = 1;
3737 balance_cpu = i;
3738 }
3739
3740 load = target_load(i, load_idx);
3741 } else {
3742 load = source_load(i, load_idx);
3743 if (load > max_cpu_load)
3744 max_cpu_load = load;
3745 if (min_cpu_load > load)
3746 min_cpu_load = load;
3747 }
3748
3749 sgs->group_load += load;
3750 sgs->sum_nr_running += rq->nr_running;
3751 sgs->sum_weighted_load += weighted_cpuload(i);
3752
3753 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3754 }
3755
3756 /*
3757 * First idle cpu or the first cpu(busiest) in this sched group
3758 * is eligible for doing load balancing at this and above
3759 * domains. In the newly idle case, we will allow all the cpu's
3760 * to do the newly idle load balance.
3761 */
3762 if (idle != CPU_NEWLY_IDLE && local_group &&
3763 balance_cpu != this_cpu && balance) {
3764 *balance = 0;
3765 return;
3766 }
3767
3768 /* Adjust by relative CPU power of the group */
3769 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
3770
3771
3772 /*
3773 * Consider the group unbalanced when the imbalance is larger
3774 * than the average weight of two tasks.
3775 *
3776 * APZ: with cgroup the avg task weight can vary wildly and
3777 * might not be a suitable number - should we keep a
3778 * normalized nr_running number somewhere that negates
3779 * the hierarchy?
3780 */
3781 avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
3782 group->cpu_power;
3783
3784 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3785 sgs->group_imb = 1;
3786
3787 sgs->group_capacity =
3788 DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
3789}
3790
3791/**
3792 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
3793 * @sd: sched_domain whose statistics are to be updated.
3794 * @this_cpu: Cpu for which load balance is currently performed.
3795 * @idle: Idle status of this_cpu
3796 * @sd_idle: Idle status of the sched_domain containing group.
3797 * @cpus: Set of cpus considered for load balancing.
3798 * @balance: Should we balance.
3799 * @sds: variable to hold the statistics for this sched_domain.
3800 */
3801static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3802 enum cpu_idle_type idle, int *sd_idle,
3803 const struct cpumask *cpus, int *balance,
3804 struct sd_lb_stats *sds)
3805{
3806 struct sched_domain *child = sd->child;
3807 struct sched_group *group = sd->groups;
3808 struct sg_lb_stats sgs;
3809 int load_idx, prefer_sibling = 0;
3810
3811 if (child && child->flags & SD_PREFER_SIBLING)
3812 prefer_sibling = 1;
3813
3814 init_sd_power_savings_stats(sd, sds, idle);
3815 load_idx = get_sd_load_idx(sd, idle);
3816
3817 do {
3818 int local_group;
3819
3820 local_group = cpumask_test_cpu(this_cpu,
3821 sched_group_cpus(group));
3822 memset(&sgs, 0, sizeof(sgs));
3823 update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
3824 local_group, cpus, balance, &sgs);
3825
3826 if (local_group && balance && !(*balance))
3827 return;
3828
3829 sds->total_load += sgs.group_load;
3830 sds->total_pwr += group->cpu_power;
3831
3832 /*
3833 * In case the child domain prefers tasks go to siblings
3834 * first, lower the group capacity to one so that we'll try
3835 * and move all the excess tasks away.
3836 */
3837 if (prefer_sibling)
3838 sgs.group_capacity = min(sgs.group_capacity, 1UL);
3839
3840 if (local_group) {
3841 sds->this_load = sgs.avg_load;
3842 sds->this = group;
3843 sds->this_nr_running = sgs.sum_nr_running;
3844 sds->this_load_per_task = sgs.sum_weighted_load;
3845 } else if (sgs.avg_load > sds->max_load &&
3846 (sgs.sum_nr_running > sgs.group_capacity ||
3847 sgs.group_imb)) {
3848 sds->max_load = sgs.avg_load;
3849 sds->busiest = group;
3850 sds->busiest_nr_running = sgs.sum_nr_running;
3851 sds->busiest_load_per_task = sgs.sum_weighted_load;
3852 sds->group_imb = sgs.group_imb;
3853 }
3854
3855 update_sd_power_savings_stats(group, sds, local_group, &sgs);
3856 group = group->next;
3857 } while (group != sd->groups);
3858}
3859
3860/**
3861 * fix_small_imbalance - Calculate the minor imbalance that exists
3862 * amongst the groups of a sched_domain, during
3863 * load balancing.
3864 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3865 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3866 * @imbalance: Variable to store the imbalance.
3867 */
3868static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3869 int this_cpu, unsigned long *imbalance)
3870{
3871 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3872 unsigned int imbn = 2;
3873
3874 if (sds->this_nr_running) {
3875 sds->this_load_per_task /= sds->this_nr_running;
3876 if (sds->busiest_load_per_task >
3877 sds->this_load_per_task)
3878 imbn = 1;
3879 } else
3880 sds->this_load_per_task =
3881 cpu_avg_load_per_task(this_cpu);
3882
3883 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3884 sds->busiest_load_per_task * imbn) {
3885 *imbalance = sds->busiest_load_per_task;
3886 return;
3887 }
3888
3889 /*
3890 * OK, we don't have enough imbalance to justify moving tasks,
3891 * however we may be able to increase total CPU power used by
3892 * moving them.
3893 */
3894
3895 pwr_now += sds->busiest->cpu_power *
3896 min(sds->busiest_load_per_task, sds->max_load);
3897 pwr_now += sds->this->cpu_power *
3898 min(sds->this_load_per_task, sds->this_load);
3899 pwr_now /= SCHED_LOAD_SCALE;
3900
3901 /* Amount of load we'd subtract */
3902 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
3903 sds->busiest->cpu_power;
3904 if (sds->max_load > tmp)
3905 pwr_move += sds->busiest->cpu_power *
3906 min(sds->busiest_load_per_task, sds->max_load - tmp);
3907
3908 /* Amount of load we'd add */
3909 if (sds->max_load * sds->busiest->cpu_power <
3910 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
3911 tmp = (sds->max_load * sds->busiest->cpu_power) /
3912 sds->this->cpu_power;
3913 else
3914 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
3915 sds->this->cpu_power;
3916 pwr_move += sds->this->cpu_power *
3917 min(sds->this_load_per_task, sds->this_load + tmp);
3918 pwr_move /= SCHED_LOAD_SCALE;
3919
3920 /* Move if we gain throughput */
3921 if (pwr_move > pwr_now)
3922 *imbalance = sds->busiest_load_per_task;
3923}
3924
3925/**
3926 * calculate_imbalance - Calculate the amount of imbalance present within the
3927 * groups of a given sched_domain during load balance.
3928 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3929 * @this_cpu: Cpu for which currently load balance is being performed.
3930 * @imbalance: The variable to store the imbalance.
3931 */
3932static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3933 unsigned long *imbalance)
3934{
3935 unsigned long max_pull;
3936 /*
3937 * In the presence of smp nice balancing, certain scenarios can have
3938 * max load less than avg load(as we skip the groups at or below
3939 * its cpu_power, while calculating max_load..)
3940 */
3941 if (sds->max_load < sds->avg_load) {
3942 *imbalance = 0;
3943 return fix_small_imbalance(sds, this_cpu, imbalance);
3944 }
3945
3946 /* Don't want to pull so many tasks that a group would go idle */
3947 max_pull = min(sds->max_load - sds->avg_load,
3948 sds->max_load - sds->busiest_load_per_task);
3949
3950 /* How much load to actually move to equalise the imbalance */
3951 *imbalance = min(max_pull * sds->busiest->cpu_power,
3952 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
3953 / SCHED_LOAD_SCALE;
3954
3955 /*
3956 * if *imbalance is less than the average load per runnable task
3957 * there is no gaurantee that any tasks will be moved so we'll have
3958 * a think about bumping its value to force at least one task to be
3959 * moved
3960 */
3961 if (*imbalance < sds->busiest_load_per_task)
3962 return fix_small_imbalance(sds, this_cpu, imbalance);
3963
3964}
3965/******* find_busiest_group() helpers end here *********************/
3966
3967/**
3968 * find_busiest_group - Returns the busiest group within the sched_domain
3969 * if there is an imbalance. If there isn't an imbalance, and
3970 * the user has opted for power-savings, it returns a group whose
3971 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3972 * such a group exists.
3973 *
3974 * Also calculates the amount of weighted load which should be moved
3975 * to restore balance.
3976 *
3977 * @sd: The sched_domain whose busiest group is to be returned.
3978 * @this_cpu: The cpu for which load balancing is currently being performed.
3979 * @imbalance: Variable which stores amount of weighted load which should
3980 * be moved to restore balance/put a group to idle.
3981 * @idle: The idle status of this_cpu.
3982 * @sd_idle: The idleness of sd
3983 * @cpus: The set of CPUs under consideration for load-balancing.
3984 * @balance: Pointer to a variable indicating if this_cpu
3985 * is the appropriate cpu to perform load balancing at this_level.
3986 *
3987 * Returns: - the busiest group if imbalance exists.
3988 * - If no imbalance and user has opted for power-savings balance,
3989 * return the least loaded group whose CPUs can be
3990 * put to idle by rebalancing its tasks onto our group.
3991 */
3992static struct sched_group *
3993find_busiest_group(struct sched_domain *sd, int this_cpu,
3994 unsigned long *imbalance, enum cpu_idle_type idle,
3995 int *sd_idle, const struct cpumask *cpus, int *balance)
3996{
3997 struct sd_lb_stats sds;
3998
3999 memset(&sds, 0, sizeof(sds));
4000
4001 /*
4002 * Compute the various statistics relavent for load balancing at
4003 * this level.
4004 */
4005 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
4006 balance, &sds);
4007
4008 /* Cases where imbalance does not exist from POV of this_cpu */
4009 /* 1) this_cpu is not the appropriate cpu to perform load balancing
4010 * at this level.
4011 * 2) There is no busy sibling group to pull from.
4012 * 3) This group is the busiest group.
4013 * 4) This group is more busy than the avg busieness at this
4014 * sched_domain.
4015 * 5) The imbalance is within the specified limit.
4016 * 6) Any rebalance would lead to ping-pong
4017 */
4018 if (balance && !(*balance))
4019 goto ret;
4020
4021 if (!sds.busiest || sds.busiest_nr_running == 0)
4022 goto out_balanced;
4023
4024 if (sds.this_load >= sds.max_load)
4025 goto out_balanced;
4026
4027 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
4028
4029 if (sds.this_load >= sds.avg_load)
4030 goto out_balanced;
4031
4032 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
4033 goto out_balanced;
4034
4035 sds.busiest_load_per_task /= sds.busiest_nr_running;
4036 if (sds.group_imb)
4037 sds.busiest_load_per_task =
4038 min(sds.busiest_load_per_task, sds.avg_load);
4039
4040 /*
4041 * We're trying to get all the cpus to the average_load, so we don't
4042 * want to push ourselves above the average load, nor do we wish to
4043 * reduce the max loaded cpu below the average load, as either of these
4044 * actions would just result in more rebalancing later, and ping-pong
4045 * tasks around. Thus we look for the minimum possible imbalance.
4046 * Negative imbalances (*we* are more loaded than anyone else) will
4047 * be counted as no imbalance for these purposes -- we can't fix that
4048 * by pulling tasks to us. Be careful of negative numbers as they'll
4049 * appear as very large values with unsigned longs.
4050 */
4051 if (sds.max_load <= sds.busiest_load_per_task)
4052 goto out_balanced;
4053
4054 /* Looks like there is an imbalance. Compute it */
4055 calculate_imbalance(&sds, this_cpu, imbalance);
4056 return sds.busiest;
4057
4058out_balanced:
4059 /*
4060 * There is no obvious imbalance. But check if we can do some balancing
4061 * to save power.
4062 */
4063 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
4064 return sds.busiest;
4065ret:
4066 *imbalance = 0;
4067 return NULL;
4068}
4069
4070/*
4071 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4072 */
4073static struct rq *
4074find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
4075 unsigned long imbalance, const struct cpumask *cpus)
4076{
4077 struct rq *busiest = NULL, *rq;
4078 unsigned long max_load = 0;
4079 int i;
4080
4081 for_each_cpu(i, sched_group_cpus(group)) {
4082 unsigned long power = power_of(i);
4083 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
4084 unsigned long wl;
4085
4086 if (!cpumask_test_cpu(i, cpus))
4087 continue;
4088
4089 rq = cpu_rq(i);
4090 wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
4091 wl /= power;
4092
4093 if (capacity && rq->nr_running == 1 && wl > imbalance)
4094 continue;
4095
4096 if (wl > max_load) {
4097 max_load = wl;
4098 busiest = rq;
4099 }
4100 }
4101
4102 return busiest;
4103}
4104
4105/*
4106 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4107 * so long as it is large enough.
4108 */
4109#define MAX_PINNED_INTERVAL 512
4110
4111/* Working cpumask for load_balance and load_balance_newidle. */
4112static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
4113
4114/*
4115 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4116 * tasks if there is an imbalance.
4117 */
4118static int load_balance(int this_cpu, struct rq *this_rq,
4119 struct sched_domain *sd, enum cpu_idle_type idle,
4120 int *balance)
4121{
4122 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
4123 struct sched_group *group;
4124 unsigned long imbalance;
4125 struct rq *busiest;
4126 unsigned long flags;
4127 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4128
4129 cpumask_setall(cpus);
4130
4131 /*
4132 * When power savings policy is enabled for the parent domain, idle
4133 * sibling can pick up load irrespective of busy siblings. In this case,
4134 * let the state of idle sibling percolate up as CPU_IDLE, instead of
4135 * portraying it as CPU_NOT_IDLE.
4136 */
4137 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
4138 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
4139 sd_idle = 1;
4140
4141 schedstat_inc(sd, lb_count[idle]);
4142
4143redo:
4144 update_shares(sd);
4145 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
4146 cpus, balance);
4147
4148 if (*balance == 0)
4149 goto out_balanced;
4150
4151 if (!group) {
4152 schedstat_inc(sd, lb_nobusyg[idle]);
4153 goto out_balanced;
4154 }
4155
4156 busiest = find_busiest_queue(group, idle, imbalance, cpus);
4157 if (!busiest) {
4158 schedstat_inc(sd, lb_nobusyq[idle]);
4159 goto out_balanced;
4160 }
4161
4162 BUG_ON(busiest == this_rq);
4163
4164 schedstat_add(sd, lb_imbalance[idle], imbalance);
4165
4166 ld_moved = 0;
4167 if (busiest->nr_running > 1) {
4168 /*
4169 * Attempt to move tasks. If find_busiest_group has found
4170 * an imbalance but busiest->nr_running <= 1, the group is
4171 * still unbalanced. ld_moved simply stays zero, so it is
4172 * correctly treated as an imbalance.
4173 */
4174 local_irq_save(flags);
4175 double_rq_lock(this_rq, busiest);
4176 ld_moved = move_tasks(this_rq, this_cpu, busiest,
4177 imbalance, sd, idle, &all_pinned);
4178 double_rq_unlock(this_rq, busiest);
4179 local_irq_restore(flags);
4180
4181 /*
4182 * some other cpu did the load balance for us.
4183 */
4184 if (ld_moved && this_cpu != smp_processor_id())
4185 resched_cpu(this_cpu);
4186
4187 /* All tasks on this runqueue were pinned by CPU affinity */
4188 if (unlikely(all_pinned)) {
4189 cpumask_clear_cpu(cpu_of(busiest), cpus);
4190 if (!cpumask_empty(cpus))
4191 goto redo;
4192 goto out_balanced;
4193 }
4194 }
4195
4196 if (!ld_moved) {
4197 schedstat_inc(sd, lb_failed[idle]);
4198 sd->nr_balance_failed++;
4199
4200 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
4201
4202 spin_lock_irqsave(&busiest->lock, flags);
4203
4204 /* don't kick the migration_thread, if the curr
4205 * task on busiest cpu can't be moved to this_cpu
4206 */
4207 if (!cpumask_test_cpu(this_cpu,
4208 &busiest->curr->cpus_allowed)) {
4209 spin_unlock_irqrestore(&busiest->lock, flags);
4210 all_pinned = 1;
4211 goto out_one_pinned;
4212 }
4213
4214 if (!busiest->active_balance) {
4215 busiest->active_balance = 1;
4216 busiest->push_cpu = this_cpu;
4217 active_balance = 1;
4218 }
4219 spin_unlock_irqrestore(&busiest->lock, flags);
4220 if (active_balance)
4221 wake_up_process(busiest->migration_thread);
4222
4223 /*
4224 * We've kicked active balancing, reset the failure
4225 * counter.
4226 */
4227 sd->nr_balance_failed = sd->cache_nice_tries+1;
4228 }
4229 } else
4230 sd->nr_balance_failed = 0;
4231
4232 if (likely(!active_balance)) {
4233 /* We were unbalanced, so reset the balancing interval */
4234 sd->balance_interval = sd->min_interval;
4235 } else {
4236 /*
4237 * If we've begun active balancing, start to back off. This
4238 * case may not be covered by the all_pinned logic if there
4239 * is only 1 task on the busy runqueue (because we don't call
4240 * move_tasks).
4241 */
4242 if (sd->balance_interval < sd->max_interval)
4243 sd->balance_interval *= 2;
4244 }
4245
4246 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
4247 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
4248 ld_moved = -1;
4249
4250 goto out;
4251
4252out_balanced:
4253 schedstat_inc(sd, lb_balanced[idle]);
4254
4255 sd->nr_balance_failed = 0;
4256
4257out_one_pinned:
4258 /* tune up the balancing interval */
4259 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
4260 (sd->balance_interval < sd->max_interval))
4261 sd->balance_interval *= 2;
4262
4263 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
4264 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
4265 ld_moved = -1;
4266 else
4267 ld_moved = 0;
4268out:
4269 if (ld_moved)
4270 update_shares(sd);
4271 return ld_moved;
4272}
4273
4274/*
4275 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4276 * tasks if there is an imbalance.
4277 *
4278 * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
4279 * this_rq is locked.
4280 */
4281static int
4282load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4283{
4284 struct sched_group *group;
4285 struct rq *busiest = NULL;
4286 unsigned long imbalance;
4287 int ld_moved = 0;
4288 int sd_idle = 0;
4289 int all_pinned = 0;
4290 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4291
4292 cpumask_setall(cpus);
4293
4294 /*
4295 * When power savings policy is enabled for the parent domain, idle
4296 * sibling can pick up load irrespective of busy siblings. In this case,
4297 * let the state of idle sibling percolate up as IDLE, instead of
4298 * portraying it as CPU_NOT_IDLE.
4299 */
4300 if (sd->flags & SD_SHARE_CPUPOWER &&
4301 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
4302 sd_idle = 1;
4303
4304 schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
4305redo:
4306 update_shares_locked(this_rq, sd);
4307 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
4308 &sd_idle, cpus, NULL);
4309 if (!group) {
4310 schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
4311 goto out_balanced;
4312 }
4313
4314 busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
4315 if (!busiest) {
4316 schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
4317 goto out_balanced;
4318 }
4319
4320 BUG_ON(busiest == this_rq);
4321
4322 schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
4323
4324 ld_moved = 0;
4325 if (busiest->nr_running > 1) {
4326 /* Attempt to move tasks */
4327 double_lock_balance(this_rq, busiest);
4328 /* this_rq->clock is already updated */
4329 update_rq_clock(busiest);
4330 ld_moved = move_tasks(this_rq, this_cpu, busiest,
4331 imbalance, sd, CPU_NEWLY_IDLE,
4332 &all_pinned);
4333 double_unlock_balance(this_rq, busiest);
4334
4335 if (unlikely(all_pinned)) {
4336 cpumask_clear_cpu(cpu_of(busiest), cpus);
4337 if (!cpumask_empty(cpus))
4338 goto redo;
4339 }
4340 }
4341
4342 if (!ld_moved) {
4343 int active_balance = 0;
4344
4345 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
4346 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
4347 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
4348 return -1;
4349
4350 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
4351 return -1;
4352
4353 if (sd->nr_balance_failed++ < 2)
4354 return -1;
4355
4356 /*
4357 * The only task running in a non-idle cpu can be moved to this
4358 * cpu in an attempt to completely freeup the other CPU
4359 * package. The same method used to move task in load_balance()
4360 * have been extended for load_balance_newidle() to speedup
4361 * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
4362 *
4363 * The package power saving logic comes from
4364 * find_busiest_group(). If there are no imbalance, then
4365 * f_b_g() will return NULL. However when sched_mc={1,2} then
4366 * f_b_g() will select a group from which a running task may be
4367 * pulled to this cpu in order to make the other package idle.
4368 * If there is no opportunity to make a package idle and if
4369 * there are no imbalance, then f_b_g() will return NULL and no
4370 * action will be taken in load_balance_newidle().
4371 *
4372 * Under normal task pull operation due to imbalance, there
4373 * will be more than one task in the source run queue and
4374 * move_tasks() will succeed. ld_moved will be true and this
4375 * active balance code will not be triggered.
4376 */
4377
4378 /* Lock busiest in correct order while this_rq is held */
4379 double_lock_balance(this_rq, busiest);
4380
4381 /*
4382 * don't kick the migration_thread, if the curr
4383 * task on busiest cpu can't be moved to this_cpu
4384 */
4385 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
4386 double_unlock_balance(this_rq, busiest);
4387 all_pinned = 1;
4388 return ld_moved;
4389 }
4390
4391 if (!busiest->active_balance) {
4392 busiest->active_balance = 1;
4393 busiest->push_cpu = this_cpu;
4394 active_balance = 1;
4395 }
4396
4397 double_unlock_balance(this_rq, busiest);
4398 /*
4399 * Should not call ttwu while holding a rq->lock
4400 */
4401 spin_unlock(&this_rq->lock);
4402 if (active_balance)
4403 wake_up_process(busiest->migration_thread);
4404 spin_lock(&this_rq->lock);
4405
4406 } else
4407 sd->nr_balance_failed = 0;
4408
4409 update_shares_locked(this_rq, sd);
4410 return ld_moved;
4411
4412out_balanced:
4413 schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
4414 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
4415 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
4416 return -1;
4417 sd->nr_balance_failed = 0;
4418
4419 return 0;
4420}
4421
4422/*
4423 * idle_balance is called by schedule() if this_cpu is about to become
4424 * idle. Attempts to pull tasks from other CPUs.
4425 */
4426static void idle_balance(int this_cpu, struct rq *this_rq)
4427{
4428 struct sched_domain *sd;
4429 int pulled_task = 0;
4430 unsigned long next_balance = jiffies + HZ;
4431
4432 for_each_domain(this_cpu, sd) {
4433 unsigned long interval;
4434
4435 if (!(sd->flags & SD_LOAD_BALANCE))
4436 continue;
4437
4438 if (sd->flags & SD_BALANCE_NEWIDLE)
4439 /* If we've pulled tasks over stop searching: */
4440 pulled_task = load_balance_newidle(this_cpu, this_rq,
4441 sd);
4442
4443 interval = msecs_to_jiffies(sd->balance_interval);
4444 if (time_after(next_balance, sd->last_balance + interval))
4445 next_balance = sd->last_balance + interval;
4446 if (pulled_task)
4447 break;
4448 }
4449 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4450 /*
4451 * We are going idle. next_balance may be set based on
4452 * a busy processor. So reset next_balance.
4453 */
4454 this_rq->next_balance = next_balance;
4455 }
4456}
4457
4458/*
4459 * active_load_balance is run by migration threads. It pushes running tasks
4460 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
4461 * running on each physical CPU where possible, and avoids physical /
4462 * logical imbalances.
4463 *
4464 * Called with busiest_rq locked.
4465 */
4466static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
4467{
4468 int target_cpu = busiest_rq->push_cpu;
4469 struct sched_domain *sd;
4470 struct rq *target_rq;
4471
4472 /* Is there any task to move? */
4473 if (busiest_rq->nr_running <= 1)
4474 return;
4475
4476 target_rq = cpu_rq(target_cpu);
4477
4478 /*
4479 * This condition is "impossible", if it occurs
4480 * we need to fix it. Originally reported by
4481 * Bjorn Helgaas on a 128-cpu setup.
4482 */
4483 BUG_ON(busiest_rq == target_rq);
4484
4485 /* move a task from busiest_rq to target_rq */
4486 double_lock_balance(busiest_rq, target_rq);
4487 update_rq_clock(busiest_rq);
4488 update_rq_clock(target_rq);
4489
4490 /* Search for an sd spanning us and the target CPU. */
4491 for_each_domain(target_cpu, sd) {
4492 if ((sd->flags & SD_LOAD_BALANCE) &&
4493 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
4494 break;
4495 }
4496
4497 if (likely(sd)) {
4498 schedstat_inc(sd, alb_count);
4499
4500 if (move_one_task(target_rq, target_cpu, busiest_rq,
4501 sd, CPU_IDLE))
4502 schedstat_inc(sd, alb_pushed);
4503 else
4504 schedstat_inc(sd, alb_failed);
4505 }
4506 double_unlock_balance(busiest_rq, target_rq);
4507}
4508
4509#ifdef CONFIG_NO_HZ
4510static struct {
4511 atomic_t load_balancer;
4512 cpumask_var_t cpu_mask;
4513 cpumask_var_t ilb_grp_nohz_mask;
4514} nohz ____cacheline_aligned = {
4515 .load_balancer = ATOMIC_INIT(-1),
4516};
4517
4518int get_nohz_load_balancer(void)
4519{
4520 return atomic_read(&nohz.load_balancer);
4521}
4522
4523#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4524/**
4525 * lowest_flag_domain - Return lowest sched_domain containing flag.
4526 * @cpu: The cpu whose lowest level of sched domain is to
4527 * be returned.
4528 * @flag: The flag to check for the lowest sched_domain
4529 * for the given cpu.
4530 *
4531 * Returns the lowest sched_domain of a cpu which contains the given flag.
4532 */
4533static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
4534{
4535 struct sched_domain *sd;
4536
4537 for_each_domain(cpu, sd)
4538 if (sd && (sd->flags & flag))
4539 break;
4540
4541 return sd;
4542}
4543
4544/**
4545 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4546 * @cpu: The cpu whose domains we're iterating over.
4547 * @sd: variable holding the value of the power_savings_sd
4548 * for cpu.
4549 * @flag: The flag to filter the sched_domains to be iterated.
4550 *
4551 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4552 * set, starting from the lowest sched_domain to the highest.
4553 */
4554#define for_each_flag_domain(cpu, sd, flag) \
4555 for (sd = lowest_flag_domain(cpu, flag); \
4556 (sd && (sd->flags & flag)); sd = sd->parent)
4557
4558/**
4559 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
4560 * @ilb_group: group to be checked for semi-idleness
4561 *
4562 * Returns: 1 if the group is semi-idle. 0 otherwise.
4563 *
4564 * We define a sched_group to be semi idle if it has atleast one idle-CPU
4565 * and atleast one non-idle CPU. This helper function checks if the given
4566 * sched_group is semi-idle or not.
4567 */
4568static inline int is_semi_idle_group(struct sched_group *ilb_group)
4569{
4570 cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
4571 sched_group_cpus(ilb_group));
4572
4573 /*
4574 * A sched_group is semi-idle when it has atleast one busy cpu
4575 * and atleast one idle cpu.
4576 */
4577 if (cpumask_empty(nohz.ilb_grp_nohz_mask))
4578 return 0;
4579
4580 if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
4581 return 0;
4582
4583 return 1;
4584}
4585/**
4586 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4587 * @cpu: The cpu which is nominating a new idle_load_balancer.
4588 *
4589 * Returns: Returns the id of the idle load balancer if it exists,
4590 * Else, returns >= nr_cpu_ids.
4591 *
4592 * This algorithm picks the idle load balancer such that it belongs to a
4593 * semi-idle powersavings sched_domain. The idea is to try and avoid
4594 * completely idle packages/cores just for the purpose of idle load balancing
4595 * when there are other idle cpu's which are better suited for that job.
4596 */
4597static int find_new_ilb(int cpu)
4598{
4599 struct sched_domain *sd;
4600 struct sched_group *ilb_group;
4601
4602 /*
4603 * Have idle load balancer selection from semi-idle packages only
4604 * when power-aware load balancing is enabled
4605 */
4606 if (!(sched_smt_power_savings || sched_mc_power_savings))
4607 goto out_done;
4608
4609 /*
4610 * Optimize for the case when we have no idle CPUs or only one
4611 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4612 */
4613 if (cpumask_weight(nohz.cpu_mask) < 2)
4614 goto out_done;
4615
4616 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
4617 ilb_group = sd->groups;
4618
4619 do {
4620 if (is_semi_idle_group(ilb_group))
4621 return cpumask_first(nohz.ilb_grp_nohz_mask);
4622
4623 ilb_group = ilb_group->next;
4624
4625 } while (ilb_group != sd->groups);
4626 }
4627
4628out_done:
4629 return cpumask_first(nohz.cpu_mask);
4630}
4631#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4632static inline int find_new_ilb(int call_cpu)
4633{
4634 return cpumask_first(nohz.cpu_mask);
4635}
4636#endif
4637
4638/*
4639 * This routine will try to nominate the ilb (idle load balancing)
4640 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
4641 * load balancing on behalf of all those cpus. If all the cpus in the system
4642 * go into this tickless mode, then there will be no ilb owner (as there is
4643 * no need for one) and all the cpus will sleep till the next wakeup event
4644 * arrives...
4645 *
4646 * For the ilb owner, tick is not stopped. And this tick will be used
4647 * for idle load balancing. ilb owner will still be part of
4648 * nohz.cpu_mask..
4649 *
4650 * While stopping the tick, this cpu will become the ilb owner if there
4651 * is no other owner. And will be the owner till that cpu becomes busy
4652 * or if all cpus in the system stop their ticks at which point
4653 * there is no need for ilb owner.
4654 *
4655 * When the ilb owner becomes busy, it nominates another owner, during the
4656 * next busy scheduler_tick()
4657 */
4658int select_nohz_load_balancer(int stop_tick)
4659{
4660 int cpu = smp_processor_id();
4661
4662 if (stop_tick) {
4663 cpu_rq(cpu)->in_nohz_recently = 1;
4664
4665 if (!cpu_active(cpu)) {
4666 if (atomic_read(&nohz.load_balancer) != cpu)
4667 return 0;
4668
4669 /*
4670 * If we are going offline and still the leader,
4671 * give up!
4672 */
4673 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4674 BUG();
4675
4676 return 0;
4677 }
4678
4679 cpumask_set_cpu(cpu, nohz.cpu_mask);
4680
4681 /* time for ilb owner also to sleep */
4682 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
4683 if (atomic_read(&nohz.load_balancer) == cpu)
4684 atomic_set(&nohz.load_balancer, -1);
4685 return 0;
4686 }
4687
4688 if (atomic_read(&nohz.load_balancer) == -1) {
4689 /* make me the ilb owner */
4690 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
4691 return 1;
4692 } else if (atomic_read(&nohz.load_balancer) == cpu) {
4693 int new_ilb;
4694
4695 if (!(sched_smt_power_savings ||
4696 sched_mc_power_savings))
4697 return 1;
4698 /*
4699 * Check to see if there is a more power-efficient
4700 * ilb.
4701 */
4702 new_ilb = find_new_ilb(cpu);
4703 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
4704 atomic_set(&nohz.load_balancer, -1);
4705 resched_cpu(new_ilb);
4706 return 0;
4707 }
4708 return 1;
4709 }
4710 } else {
4711 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
4712 return 0;
4713
4714 cpumask_clear_cpu(cpu, nohz.cpu_mask);
4715
4716 if (atomic_read(&nohz.load_balancer) == cpu)
4717 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4718 BUG();
4719 }
4720 return 0;
4721}
4722#endif
4723
4724static DEFINE_SPINLOCK(balancing);
4725
4726/*
4727 * It checks each scheduling domain to see if it is due to be balanced,
4728 * and initiates a balancing operation if so.
4729 *
4730 * Balancing parameters are set up in arch_init_sched_domains.
4731 */
4732static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4733{
4734 int balance = 1;
4735 struct rq *rq = cpu_rq(cpu);
4736 unsigned long interval;
4737 struct sched_domain *sd;
4738 /* Earliest time when we have to do rebalance again */
4739 unsigned long next_balance = jiffies + 60*HZ;
4740 int update_next_balance = 0;
4741 int need_serialize;
4742
4743 for_each_domain(cpu, sd) {
4744 if (!(sd->flags & SD_LOAD_BALANCE))
4745 continue;
4746
4747 interval = sd->balance_interval;
4748 if (idle != CPU_IDLE)
4749 interval *= sd->busy_factor;
4750
4751 /* scale ms to jiffies */
4752 interval = msecs_to_jiffies(interval);
4753 if (unlikely(!interval))
4754 interval = 1;
4755 if (interval > HZ*NR_CPUS/10)
4756 interval = HZ*NR_CPUS/10;
4757
4758 need_serialize = sd->flags & SD_SERIALIZE;
4759
4760 if (need_serialize) {
4761 if (!spin_trylock(&balancing))
4762 goto out;
4763 }
4764
4765 if (time_after_eq(jiffies, sd->last_balance + interval)) {
4766 if (load_balance(cpu, rq, sd, idle, &balance)) {
4767 /*
4768 * We've pulled tasks over so either we're no
4769 * longer idle, or one of our SMT siblings is
4770 * not idle.
4771 */
4772 idle = CPU_NOT_IDLE;
4773 }
4774 sd->last_balance = jiffies;
4775 }
4776 if (need_serialize)
4777 spin_unlock(&balancing);
4778out:
4779 if (time_after(next_balance, sd->last_balance + interval)) {
4780 next_balance = sd->last_balance + interval;
4781 update_next_balance = 1;
4782 }
4783
4784 /*
4785 * Stop the load balance at this level. There is another
4786 * CPU in our sched group which is doing load balancing more
4787 * actively.
4788 */
4789 if (!balance)
4790 break;
4791 }
4792
4793 /*
4794 * next_balance will be updated only when there is a need.
4795 * When the cpu is attached to null domain for ex, it will not be
4796 * updated.
4797 */
4798 if (likely(update_next_balance))
4799 rq->next_balance = next_balance;
4800}
4801
4802/*
4803 * run_rebalance_domains is triggered when needed from the scheduler tick.
4804 * In CONFIG_NO_HZ case, the idle load balance owner will do the
4805 * rebalancing for all the cpus for whom scheduler ticks are stopped.
4806 */
4807static void run_rebalance_domains(struct softirq_action *h)
4808{
4809 int this_cpu = smp_processor_id();
4810 struct rq *this_rq = cpu_rq(this_cpu);
4811 enum cpu_idle_type idle = this_rq->idle_at_tick ?
4812 CPU_IDLE : CPU_NOT_IDLE;
4813
4814 rebalance_domains(this_cpu, idle);
4815
4816#ifdef CONFIG_NO_HZ
4817 /*
4818 * If this cpu is the owner for idle load balancing, then do the
4819 * balancing on behalf of the other idle cpus whose ticks are
4820 * stopped.
4821 */
4822 if (this_rq->idle_at_tick &&
4823 atomic_read(&nohz.load_balancer) == this_cpu) {
4824 struct rq *rq;
4825 int balance_cpu;
4826
4827 for_each_cpu(balance_cpu, nohz.cpu_mask) {
4828 if (balance_cpu == this_cpu)
4829 continue;
4830
4831 /*
4832 * If this cpu gets work to do, stop the load balancing
4833 * work being done for other cpus. Next load
4834 * balancing owner will pick it up.
4835 */
4836 if (need_resched())
4837 break;
4838
4839 rebalance_domains(balance_cpu, CPU_IDLE);
4840
4841 rq = cpu_rq(balance_cpu);
4842 if (time_after(this_rq->next_balance, rq->next_balance))
4843 this_rq->next_balance = rq->next_balance;
4844 }
4845 }
4846#endif
4847}
4848
4849static inline int on_null_domain(int cpu)
4850{
4851 return !rcu_dereference(cpu_rq(cpu)->sd);
4852}
4853
4854/*
4855 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4856 *
4857 * In case of CONFIG_NO_HZ, this is the place where we nominate a new
4858 * idle load balancing owner or decide to stop the periodic load balancing,
4859 * if the whole system is idle.
4860 */
4861static inline void trigger_load_balance(struct rq *rq, int cpu)
4862{
4863#ifdef CONFIG_NO_HZ
4864 /*
4865 * If we were in the nohz mode recently and busy at the current
4866 * scheduler tick, then check if we need to nominate new idle
4867 * load balancer.
4868 */
4869 if (rq->in_nohz_recently && !rq->idle_at_tick) {
4870 rq->in_nohz_recently = 0;
4871
4872 if (atomic_read(&nohz.load_balancer) == cpu) {
4873 cpumask_clear_cpu(cpu, nohz.cpu_mask);
4874 atomic_set(&nohz.load_balancer, -1);
4875 }
4876
4877 if (atomic_read(&nohz.load_balancer) == -1) {
4878 int ilb = find_new_ilb(cpu);
4879
4880 if (ilb < nr_cpu_ids)
4881 resched_cpu(ilb);
4882 }
4883 }
4884
4885 /*
4886 * If this cpu is idle and doing idle load balancing for all the
4887 * cpus with ticks stopped, is it time for that to stop?
4888 */
4889 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
4890 cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
4891 resched_cpu(cpu);
4892 return;
4893 }
4894
4895 /*
4896 * If this cpu is idle and the idle load balancing is done by
4897 * someone else, then no need raise the SCHED_SOFTIRQ
4898 */
4899 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
4900 cpumask_test_cpu(cpu, nohz.cpu_mask))
4901 return;
4902#endif
4903 /* Don't need to rebalance while attached to NULL domain */
4904 if (time_after_eq(jiffies, rq->next_balance) &&
4905 likely(!on_null_domain(cpu)))
4906 raise_softirq(SCHED_SOFTIRQ);
4907}
4908
4909#else /* CONFIG_SMP */
4910
4911/*
4912 * on UP we do not need to balance between CPUs:
4913 */
4914static inline void idle_balance(int cpu, struct rq *rq)
4915{
4916}
4917
4918#endif 3165#endif
4919 3166
4920DEFINE_PER_CPU(struct kernel_stat, kstat); 3167DEFINE_PER_CPU(struct kernel_stat, kstat);
@@ -5046,8 +3293,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
5046 p->gtime = cputime_add(p->gtime, cputime); 3293 p->gtime = cputime_add(p->gtime, cputime);
5047 3294
5048 /* Add guest time to cpustat. */ 3295 /* Add guest time to cpustat. */
5049 cpustat->user = cputime64_add(cpustat->user, tmp); 3296 if (TASK_NICE(p) > 0) {
5050 cpustat->guest = cputime64_add(cpustat->guest, tmp); 3297 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3298 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3299 } else {
3300 cpustat->user = cputime64_add(cpustat->user, tmp);
3301 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3302 }
5051} 3303}
5052 3304
5053/* 3305/*
@@ -5162,60 +3414,86 @@ void account_idle_ticks(unsigned long ticks)
5162 * Use precise platform statistics if available: 3414 * Use precise platform statistics if available:
5163 */ 3415 */
5164#ifdef CONFIG_VIRT_CPU_ACCOUNTING 3416#ifdef CONFIG_VIRT_CPU_ACCOUNTING
5165cputime_t task_utime(struct task_struct *p) 3417void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5166{ 3418{
5167 return p->utime; 3419 *ut = p->utime;
3420 *st = p->stime;
5168} 3421}
5169 3422
5170cputime_t task_stime(struct task_struct *p) 3423void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5171{ 3424{
5172 return p->stime; 3425 struct task_cputime cputime;
3426
3427 thread_group_cputime(p, &cputime);
3428
3429 *ut = cputime.utime;
3430 *st = cputime.stime;
5173} 3431}
5174#else 3432#else
5175cputime_t task_utime(struct task_struct *p) 3433
3434#ifndef nsecs_to_cputime
3435# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
3436#endif
3437
3438void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5176{ 3439{
5177 clock_t utime = cputime_to_clock_t(p->utime), 3440 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
5178 total = utime + cputime_to_clock_t(p->stime);
5179 u64 temp;
5180 3441
5181 /* 3442 /*
5182 * Use CFS's precise accounting: 3443 * Use CFS's precise accounting:
5183 */ 3444 */
5184 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); 3445 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
5185 3446
5186 if (total) { 3447 if (total) {
5187 temp *= utime; 3448 u64 temp;
3449
3450 temp = (u64)(rtime * utime);
5188 do_div(temp, total); 3451 do_div(temp, total);
5189 } 3452 utime = (cputime_t)temp;
5190 utime = (clock_t)temp; 3453 } else
3454 utime = rtime;
3455
3456 /*
3457 * Compare with previous values, to keep monotonicity:
3458 */
3459 p->prev_utime = max(p->prev_utime, utime);
3460 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
5191 3461
5192 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); 3462 *ut = p->prev_utime;
5193 return p->prev_utime; 3463 *st = p->prev_stime;
5194} 3464}
5195 3465
5196cputime_t task_stime(struct task_struct *p) 3466/*
3467 * Must be called with siglock held.
3468 */
3469void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5197{ 3470{
5198 clock_t stime; 3471 struct signal_struct *sig = p->signal;
3472 struct task_cputime cputime;
3473 cputime_t rtime, utime, total;
5199 3474
5200 /* 3475 thread_group_cputime(p, &cputime);
5201 * Use CFS's precise accounting. (we subtract utime from
5202 * the total, to make sure the total observed by userspace
5203 * grows monotonically - apps rely on that):
5204 */
5205 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
5206 cputime_to_clock_t(task_utime(p));
5207 3476
5208 if (stime >= 0) 3477 total = cputime_add(cputime.utime, cputime.stime);
5209 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); 3478 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
5210 3479
5211 return p->prev_stime; 3480 if (total) {
5212} 3481 u64 temp;
5213#endif
5214 3482
5215inline cputime_t task_gtime(struct task_struct *p) 3483 temp = (u64)(rtime * cputime.utime);
5216{ 3484 do_div(temp, total);
5217 return p->gtime; 3485 utime = (cputime_t)temp;
3486 } else
3487 utime = rtime;
3488
3489 sig->prev_utime = max(sig->prev_utime, utime);
3490 sig->prev_stime = max(sig->prev_stime,
3491 cputime_sub(rtime, sig->prev_utime));
3492
3493 *ut = sig->prev_utime;
3494 *st = sig->prev_stime;
5218} 3495}
3496#endif
5219 3497
5220/* 3498/*
5221 * This function gets called by the timer code, with HZ frequency. 3499 * This function gets called by the timer code, with HZ frequency.
@@ -5232,13 +3510,13 @@ void scheduler_tick(void)
5232 3510
5233 sched_clock_tick(); 3511 sched_clock_tick();
5234 3512
5235 spin_lock(&rq->lock); 3513 raw_spin_lock(&rq->lock);
5236 update_rq_clock(rq); 3514 update_rq_clock(rq);
5237 update_cpu_load(rq); 3515 update_cpu_load(rq);
5238 curr->sched_class->task_tick(rq, curr, 0); 3516 curr->sched_class->task_tick(rq, curr, 0);
5239 spin_unlock(&rq->lock); 3517 raw_spin_unlock(&rq->lock);
5240 3518
5241 perf_event_task_tick(curr, cpu); 3519 perf_event_task_tick(curr);
5242 3520
5243#ifdef CONFIG_SMP 3521#ifdef CONFIG_SMP
5244 rq->idle_at_tick = idle_cpu(cpu); 3522 rq->idle_at_tick = idle_cpu(cpu);
@@ -5350,13 +3628,14 @@ static inline void schedule_debug(struct task_struct *prev)
5350#endif 3628#endif
5351} 3629}
5352 3630
5353static void put_prev_task(struct rq *rq, struct task_struct *p) 3631static void put_prev_task(struct rq *rq, struct task_struct *prev)
5354{ 3632{
5355 u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; 3633 if (prev->state == TASK_RUNNING) {
3634 u64 runtime = prev->se.sum_exec_runtime;
5356 3635
5357 update_avg(&p->se.avg_running, runtime); 3636 runtime -= prev->se.prev_sum_exec_runtime;
3637 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
5358 3638
5359 if (p->state == TASK_RUNNING) {
5360 /* 3639 /*
5361 * In order to avoid avg_overlap growing stale when we are 3640 * In order to avoid avg_overlap growing stale when we are
5362 * indeed overlapping and hence not getting put to sleep, grow 3641 * indeed overlapping and hence not getting put to sleep, grow
@@ -5366,12 +3645,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
5366 * correlates to the amount of cache footprint a task can 3645 * correlates to the amount of cache footprint a task can
5367 * build up. 3646 * build up.
5368 */ 3647 */
5369 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); 3648 update_avg(&prev->se.avg_overlap, runtime);
5370 update_avg(&p->se.avg_overlap, runtime);
5371 } else {
5372 update_avg(&p->se.avg_running, 0);
5373 } 3649 }
5374 p->sched_class->put_prev_task(rq, p); 3650 prev->sched_class->put_prev_task(rq, prev);
5375} 3651}
5376 3652
5377/* 3653/*
@@ -5432,7 +3708,7 @@ need_resched_nonpreemptible:
5432 if (sched_feat(HRTICK)) 3708 if (sched_feat(HRTICK))
5433 hrtick_clear(rq); 3709 hrtick_clear(rq);
5434 3710
5435 spin_lock_irq(&rq->lock); 3711 raw_spin_lock_irq(&rq->lock);
5436 update_rq_clock(rq); 3712 update_rq_clock(rq);
5437 clear_tsk_need_resched(prev); 3713 clear_tsk_need_resched(prev);
5438 3714
@@ -5454,7 +3730,7 @@ need_resched_nonpreemptible:
5454 3730
5455 if (likely(prev != next)) { 3731 if (likely(prev != next)) {
5456 sched_info_switch(prev, next); 3732 sched_info_switch(prev, next);
5457 perf_event_task_sched_out(prev, next, cpu); 3733 perf_event_task_sched_out(prev, next);
5458 3734
5459 rq->nr_switches++; 3735 rq->nr_switches++;
5460 rq->curr = next; 3736 rq->curr = next;
@@ -5468,12 +3744,15 @@ need_resched_nonpreemptible:
5468 cpu = smp_processor_id(); 3744 cpu = smp_processor_id();
5469 rq = cpu_rq(cpu); 3745 rq = cpu_rq(cpu);
5470 } else 3746 } else
5471 spin_unlock_irq(&rq->lock); 3747 raw_spin_unlock_irq(&rq->lock);
5472 3748
5473 post_schedule(rq); 3749 post_schedule(rq);
5474 3750
5475 if (unlikely(reacquire_kernel_lock(current) < 0)) 3751 if (unlikely(reacquire_kernel_lock(current) < 0)) {
3752 prev = rq->curr;
3753 switch_count = &prev->nivcsw;
5476 goto need_resched_nonpreemptible; 3754 goto need_resched_nonpreemptible;
3755 }
5477 3756
5478 preempt_enable_no_resched(); 3757 preempt_enable_no_resched();
5479 if (need_resched()) 3758 if (need_resched())
@@ -5481,7 +3760,7 @@ need_resched_nonpreemptible:
5481} 3760}
5482EXPORT_SYMBOL(schedule); 3761EXPORT_SYMBOL(schedule);
5483 3762
5484#ifdef CONFIG_SMP 3763#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
5485/* 3764/*
5486 * Look out! "owner" is an entirely speculative pointer 3765 * Look out! "owner" is an entirely speculative pointer
5487 * access and not reliable. 3766 * access and not reliable.
@@ -5885,14 +4164,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
5885 */ 4164 */
5886bool try_wait_for_completion(struct completion *x) 4165bool try_wait_for_completion(struct completion *x)
5887{ 4166{
4167 unsigned long flags;
5888 int ret = 1; 4168 int ret = 1;
5889 4169
5890 spin_lock_irq(&x->wait.lock); 4170 spin_lock_irqsave(&x->wait.lock, flags);
5891 if (!x->done) 4171 if (!x->done)
5892 ret = 0; 4172 ret = 0;
5893 else 4173 else
5894 x->done--; 4174 x->done--;
5895 spin_unlock_irq(&x->wait.lock); 4175 spin_unlock_irqrestore(&x->wait.lock, flags);
5896 return ret; 4176 return ret;
5897} 4177}
5898EXPORT_SYMBOL(try_wait_for_completion); 4178EXPORT_SYMBOL(try_wait_for_completion);
@@ -5907,12 +4187,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
5907 */ 4187 */
5908bool completion_done(struct completion *x) 4188bool completion_done(struct completion *x)
5909{ 4189{
4190 unsigned long flags;
5910 int ret = 1; 4191 int ret = 1;
5911 4192
5912 spin_lock_irq(&x->wait.lock); 4193 spin_lock_irqsave(&x->wait.lock, flags);
5913 if (!x->done) 4194 if (!x->done)
5914 ret = 0; 4195 ret = 0;
5915 spin_unlock_irq(&x->wait.lock); 4196 spin_unlock_irqrestore(&x->wait.lock, flags);
5916 return ret; 4197 return ret;
5917} 4198}
5918EXPORT_SYMBOL(completion_done); 4199EXPORT_SYMBOL(completion_done);
@@ -5980,7 +4261,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
5980 unsigned long flags; 4261 unsigned long flags;
5981 int oldprio, on_rq, running; 4262 int oldprio, on_rq, running;
5982 struct rq *rq; 4263 struct rq *rq;
5983 const struct sched_class *prev_class = p->sched_class; 4264 const struct sched_class *prev_class;
5984 4265
5985 BUG_ON(prio < 0 || prio > MAX_PRIO); 4266 BUG_ON(prio < 0 || prio > MAX_PRIO);
5986 4267
@@ -5988,6 +4269,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
5988 update_rq_clock(rq); 4269 update_rq_clock(rq);
5989 4270
5990 oldprio = p->prio; 4271 oldprio = p->prio;
4272 prev_class = p->sched_class;
5991 on_rq = p->se.on_rq; 4273 on_rq = p->se.on_rq;
5992 running = task_current(rq, p); 4274 running = task_current(rq, p);
5993 if (on_rq) 4275 if (on_rq)
@@ -6005,7 +4287,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
6005 if (running) 4287 if (running)
6006 p->sched_class->set_curr_task(rq); 4288 p->sched_class->set_curr_task(rq);
6007 if (on_rq) { 4289 if (on_rq) {
6008 enqueue_task(rq, p, 0); 4290 enqueue_task(rq, p, 0, oldprio < prio);
6009 4291
6010 check_class_changed(rq, p, prev_class, oldprio, running); 4292 check_class_changed(rq, p, prev_class, oldprio, running);
6011 } 4293 }
@@ -6049,7 +4331,7 @@ void set_user_nice(struct task_struct *p, long nice)
6049 delta = p->prio - old_prio; 4331 delta = p->prio - old_prio;
6050 4332
6051 if (on_rq) { 4333 if (on_rq) {
6052 enqueue_task(rq, p, 0); 4334 enqueue_task(rq, p, 0, false);
6053 /* 4335 /*
6054 * If the task increased its priority or is running and 4336 * If the task increased its priority or is running and
6055 * lowered its priority, then reschedule its CPU: 4337 * lowered its priority, then reschedule its CPU:
@@ -6072,7 +4354,7 @@ int can_nice(const struct task_struct *p, const int nice)
6072 /* convert nice value [19,-20] to rlimit style value [1,40] */ 4354 /* convert nice value [19,-20] to rlimit style value [1,40] */
6073 int nice_rlim = 20 - nice; 4355 int nice_rlim = 20 - nice;
6074 4356
6075 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || 4357 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
6076 capable(CAP_SYS_NICE)); 4358 capable(CAP_SYS_NICE));
6077} 4359}
6078 4360
@@ -6175,22 +4457,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
6175 BUG_ON(p->se.on_rq); 4457 BUG_ON(p->se.on_rq);
6176 4458
6177 p->policy = policy; 4459 p->policy = policy;
6178 switch (p->policy) {
6179 case SCHED_NORMAL:
6180 case SCHED_BATCH:
6181 case SCHED_IDLE:
6182 p->sched_class = &fair_sched_class;
6183 break;
6184 case SCHED_FIFO:
6185 case SCHED_RR:
6186 p->sched_class = &rt_sched_class;
6187 break;
6188 }
6189
6190 p->rt_priority = prio; 4460 p->rt_priority = prio;
6191 p->normal_prio = normal_prio(p); 4461 p->normal_prio = normal_prio(p);
6192 /* we are holding p->pi_lock already */ 4462 /* we are holding p->pi_lock already */
6193 p->prio = rt_mutex_getprio(p); 4463 p->prio = rt_mutex_getprio(p);
4464 if (rt_prio(p->prio))
4465 p->sched_class = &rt_sched_class;
4466 else
4467 p->sched_class = &fair_sched_class;
6194 set_load_weight(p); 4468 set_load_weight(p);
6195} 4469}
6196 4470
@@ -6215,7 +4489,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
6215{ 4489{
6216 int retval, oldprio, oldpolicy = -1, on_rq, running; 4490 int retval, oldprio, oldpolicy = -1, on_rq, running;
6217 unsigned long flags; 4491 unsigned long flags;
6218 const struct sched_class *prev_class = p->sched_class; 4492 const struct sched_class *prev_class;
6219 struct rq *rq; 4493 struct rq *rq;
6220 int reset_on_fork; 4494 int reset_on_fork;
6221 4495
@@ -6257,7 +4531,7 @@ recheck:
6257 4531
6258 if (!lock_task_sighand(p, &flags)) 4532 if (!lock_task_sighand(p, &flags))
6259 return -ESRCH; 4533 return -ESRCH;
6260 rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; 4534 rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
6261 unlock_task_sighand(p, &flags); 4535 unlock_task_sighand(p, &flags);
6262 4536
6263 /* can't set/change the rt policy */ 4537 /* can't set/change the rt policy */
@@ -6305,7 +4579,7 @@ recheck:
6305 * make sure no PI-waiters arrive (or leave) while we are 4579 * make sure no PI-waiters arrive (or leave) while we are
6306 * changing the priority of the task: 4580 * changing the priority of the task:
6307 */ 4581 */
6308 spin_lock_irqsave(&p->pi_lock, flags); 4582 raw_spin_lock_irqsave(&p->pi_lock, flags);
6309 /* 4583 /*
6310 * To be able to change p->policy safely, the apropriate 4584 * To be able to change p->policy safely, the apropriate
6311 * runqueue lock must be held. 4585 * runqueue lock must be held.
@@ -6315,7 +4589,7 @@ recheck:
6315 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4589 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
6316 policy = oldpolicy = -1; 4590 policy = oldpolicy = -1;
6317 __task_rq_unlock(rq); 4591 __task_rq_unlock(rq);
6318 spin_unlock_irqrestore(&p->pi_lock, flags); 4592 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6319 goto recheck; 4593 goto recheck;
6320 } 4594 }
6321 update_rq_clock(rq); 4595 update_rq_clock(rq);
@@ -6329,6 +4603,7 @@ recheck:
6329 p->sched_reset_on_fork = reset_on_fork; 4603 p->sched_reset_on_fork = reset_on_fork;
6330 4604
6331 oldprio = p->prio; 4605 oldprio = p->prio;
4606 prev_class = p->sched_class;
6332 __setscheduler(rq, p, policy, param->sched_priority); 4607 __setscheduler(rq, p, policy, param->sched_priority);
6333 4608
6334 if (running) 4609 if (running)
@@ -6339,7 +4614,7 @@ recheck:
6339 check_class_changed(rq, p, prev_class, oldprio, running); 4614 check_class_changed(rq, p, prev_class, oldprio, running);
6340 } 4615 }
6341 __task_rq_unlock(rq); 4616 __task_rq_unlock(rq);
6342 spin_unlock_irqrestore(&p->pi_lock, flags); 4617 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
6343 4618
6344 rt_mutex_adjust_pi(p); 4619 rt_mutex_adjust_pi(p);
6345 4620
@@ -6439,7 +4714,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6439 return -EINVAL; 4714 return -EINVAL;
6440 4715
6441 retval = -ESRCH; 4716 retval = -ESRCH;
6442 read_lock(&tasklist_lock); 4717 rcu_read_lock();
6443 p = find_process_by_pid(pid); 4718 p = find_process_by_pid(pid);
6444 if (p) { 4719 if (p) {
6445 retval = security_task_getscheduler(p); 4720 retval = security_task_getscheduler(p);
@@ -6447,7 +4722,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6447 retval = p->policy 4722 retval = p->policy
6448 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); 4723 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
6449 } 4724 }
6450 read_unlock(&tasklist_lock); 4725 rcu_read_unlock();
6451 return retval; 4726 return retval;
6452} 4727}
6453 4728
@@ -6465,7 +4740,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6465 if (!param || pid < 0) 4740 if (!param || pid < 0)
6466 return -EINVAL; 4741 return -EINVAL;
6467 4742
6468 read_lock(&tasklist_lock); 4743 rcu_read_lock();
6469 p = find_process_by_pid(pid); 4744 p = find_process_by_pid(pid);
6470 retval = -ESRCH; 4745 retval = -ESRCH;
6471 if (!p) 4746 if (!p)
@@ -6476,7 +4751,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6476 goto out_unlock; 4751 goto out_unlock;
6477 4752
6478 lp.sched_priority = p->rt_priority; 4753 lp.sched_priority = p->rt_priority;
6479 read_unlock(&tasklist_lock); 4754 rcu_read_unlock();
6480 4755
6481 /* 4756 /*
6482 * This one might sleep, we cannot do it with a spinlock held ... 4757 * This one might sleep, we cannot do it with a spinlock held ...
@@ -6486,7 +4761,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
6486 return retval; 4761 return retval;
6487 4762
6488out_unlock: 4763out_unlock:
6489 read_unlock(&tasklist_lock); 4764 rcu_read_unlock();
6490 return retval; 4765 return retval;
6491} 4766}
6492 4767
@@ -6497,22 +4772,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
6497 int retval; 4772 int retval;
6498 4773
6499 get_online_cpus(); 4774 get_online_cpus();
6500 read_lock(&tasklist_lock); 4775 rcu_read_lock();
6501 4776
6502 p = find_process_by_pid(pid); 4777 p = find_process_by_pid(pid);
6503 if (!p) { 4778 if (!p) {
6504 read_unlock(&tasklist_lock); 4779 rcu_read_unlock();
6505 put_online_cpus(); 4780 put_online_cpus();
6506 return -ESRCH; 4781 return -ESRCH;
6507 } 4782 }
6508 4783
6509 /* 4784 /* Prevent p going away */
6510 * It is not safe to call set_cpus_allowed with the
6511 * tasklist_lock held. We will bump the task_struct's
6512 * usage count and then drop tasklist_lock.
6513 */
6514 get_task_struct(p); 4785 get_task_struct(p);
6515 read_unlock(&tasklist_lock); 4786 rcu_read_unlock();
6516 4787
6517 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4788 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
6518 retval = -ENOMEM; 4789 retval = -ENOMEM;
@@ -6593,10 +4864,12 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
6593long sched_getaffinity(pid_t pid, struct cpumask *mask) 4864long sched_getaffinity(pid_t pid, struct cpumask *mask)
6594{ 4865{
6595 struct task_struct *p; 4866 struct task_struct *p;
4867 unsigned long flags;
4868 struct rq *rq;
6596 int retval; 4869 int retval;
6597 4870
6598 get_online_cpus(); 4871 get_online_cpus();
6599 read_lock(&tasklist_lock); 4872 rcu_read_lock();
6600 4873
6601 retval = -ESRCH; 4874 retval = -ESRCH;
6602 p = find_process_by_pid(pid); 4875 p = find_process_by_pid(pid);
@@ -6607,10 +4880,12 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
6607 if (retval) 4880 if (retval)
6608 goto out_unlock; 4881 goto out_unlock;
6609 4882
4883 rq = task_rq_lock(p, &flags);
6610 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); 4884 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
4885 task_rq_unlock(rq, &flags);
6611 4886
6612out_unlock: 4887out_unlock:
6613 read_unlock(&tasklist_lock); 4888 rcu_read_unlock();
6614 put_online_cpus(); 4889 put_online_cpus();
6615 4890
6616 return retval; 4891 return retval;
@@ -6628,7 +4903,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
6628 int ret; 4903 int ret;
6629 cpumask_var_t mask; 4904 cpumask_var_t mask;
6630 4905
6631 if (len < cpumask_size()) 4906 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4907 return -EINVAL;
4908 if (len & (sizeof(unsigned long)-1))
6632 return -EINVAL; 4909 return -EINVAL;
6633 4910
6634 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4911 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -6636,10 +4913,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
6636 4913
6637 ret = sched_getaffinity(pid, mask); 4914 ret = sched_getaffinity(pid, mask);
6638 if (ret == 0) { 4915 if (ret == 0) {
6639 if (copy_to_user(user_mask_ptr, mask, cpumask_size())) 4916 size_t retlen = min_t(size_t, len, cpumask_size());
4917
4918 if (copy_to_user(user_mask_ptr, mask, retlen))
6640 ret = -EFAULT; 4919 ret = -EFAULT;
6641 else 4920 else
6642 ret = cpumask_size(); 4921 ret = retlen;
6643 } 4922 }
6644 free_cpumask_var(mask); 4923 free_cpumask_var(mask);
6645 4924
@@ -6665,7 +4944,7 @@ SYSCALL_DEFINE0(sched_yield)
6665 */ 4944 */
6666 __release(rq->lock); 4945 __release(rq->lock);
6667 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4946 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
6668 _raw_spin_unlock(&rq->lock); 4947 do_raw_spin_unlock(&rq->lock);
6669 preempt_enable_no_resched(); 4948 preempt_enable_no_resched();
6670 4949
6671 schedule(); 4950 schedule();
@@ -6845,6 +5124,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6845{ 5124{
6846 struct task_struct *p; 5125 struct task_struct *p;
6847 unsigned int time_slice; 5126 unsigned int time_slice;
5127 unsigned long flags;
5128 struct rq *rq;
6848 int retval; 5129 int retval;
6849 struct timespec t; 5130 struct timespec t;
6850 5131
@@ -6852,7 +5133,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6852 return -EINVAL; 5133 return -EINVAL;
6853 5134
6854 retval = -ESRCH; 5135 retval = -ESRCH;
6855 read_lock(&tasklist_lock); 5136 rcu_read_lock();
6856 p = find_process_by_pid(pid); 5137 p = find_process_by_pid(pid);
6857 if (!p) 5138 if (!p)
6858 goto out_unlock; 5139 goto out_unlock;
@@ -6861,15 +5142,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6861 if (retval) 5142 if (retval)
6862 goto out_unlock; 5143 goto out_unlock;
6863 5144
6864 time_slice = p->sched_class->get_rr_interval(p); 5145 rq = task_rq_lock(p, &flags);
5146 time_slice = p->sched_class->get_rr_interval(rq, p);
5147 task_rq_unlock(rq, &flags);
6865 5148
6866 read_unlock(&tasklist_lock); 5149 rcu_read_unlock();
6867 jiffies_to_timespec(time_slice, &t); 5150 jiffies_to_timespec(time_slice, &t);
6868 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 5151 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
6869 return retval; 5152 return retval;
6870 5153
6871out_unlock: 5154out_unlock:
6872 read_unlock(&tasklist_lock); 5155 rcu_read_unlock();
6873 return retval; 5156 return retval;
6874} 5157}
6875 5158
@@ -6935,7 +5218,7 @@ void show_state_filter(unsigned long state_filter)
6935 /* 5218 /*
6936 * Only show locks if all tasks are dumped: 5219 * Only show locks if all tasks are dumped:
6937 */ 5220 */
6938 if (state_filter == -1) 5221 if (!state_filter)
6939 debug_show_all_locks(); 5222 debug_show_all_locks();
6940} 5223}
6941 5224
@@ -6957,12 +5240,12 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6957 struct rq *rq = cpu_rq(cpu); 5240 struct rq *rq = cpu_rq(cpu);
6958 unsigned long flags; 5241 unsigned long flags;
6959 5242
6960 spin_lock_irqsave(&rq->lock, flags); 5243 raw_spin_lock_irqsave(&rq->lock, flags);
6961 5244
6962 __sched_fork(idle); 5245 __sched_fork(idle);
5246 idle->state = TASK_RUNNING;
6963 idle->se.exec_start = sched_clock(); 5247 idle->se.exec_start = sched_clock();
6964 5248
6965 idle->prio = idle->normal_prio = MAX_PRIO;
6966 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); 5249 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
6967 __set_task_cpu(idle, cpu); 5250 __set_task_cpu(idle, cpu);
6968 5251
@@ -6970,7 +5253,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
6970#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 5253#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
6971 idle->oncpu = 1; 5254 idle->oncpu = 1;
6972#endif 5255#endif
6973 spin_unlock_irqrestore(&rq->lock, flags); 5256 raw_spin_unlock_irqrestore(&rq->lock, flags);
6974 5257
6975 /* Set the preempt count _outside_ the spinlocks! */ 5258 /* Set the preempt count _outside_ the spinlocks! */
6976#if defined(CONFIG_PREEMPT) 5259#if defined(CONFIG_PREEMPT)
@@ -7003,22 +5286,43 @@ cpumask_var_t nohz_cpu_mask;
7003 * 5286 *
7004 * This idea comes from the SD scheduler of Con Kolivas: 5287 * This idea comes from the SD scheduler of Con Kolivas:
7005 */ 5288 */
7006static inline void sched_init_granularity(void) 5289static int get_update_sysctl_factor(void)
7007{ 5290{
7008 unsigned int factor = 1 + ilog2(num_online_cpus()); 5291 unsigned int cpus = min_t(int, num_online_cpus(), 8);
7009 const unsigned long limit = 200000000; 5292 unsigned int factor;
7010 5293
7011 sysctl_sched_min_granularity *= factor; 5294 switch (sysctl_sched_tunable_scaling) {
7012 if (sysctl_sched_min_granularity > limit) 5295 case SCHED_TUNABLESCALING_NONE:
7013 sysctl_sched_min_granularity = limit; 5296 factor = 1;
5297 break;
5298 case SCHED_TUNABLESCALING_LINEAR:
5299 factor = cpus;
5300 break;
5301 case SCHED_TUNABLESCALING_LOG:
5302 default:
5303 factor = 1 + ilog2(cpus);
5304 break;
5305 }
7014 5306
7015 sysctl_sched_latency *= factor; 5307 return factor;
7016 if (sysctl_sched_latency > limit) 5308}
7017 sysctl_sched_latency = limit;
7018 5309
7019 sysctl_sched_wakeup_granularity *= factor; 5310static void update_sysctl(void)
5311{
5312 unsigned int factor = get_update_sysctl_factor();
7020 5313
7021 sysctl_sched_shares_ratelimit *= factor; 5314#define SET_SYSCTL(name) \
5315 (sysctl_##name = (factor) * normalized_sysctl_##name)
5316 SET_SYSCTL(sched_min_granularity);
5317 SET_SYSCTL(sched_latency);
5318 SET_SYSCTL(sched_wakeup_granularity);
5319 SET_SYSCTL(sched_shares_ratelimit);
5320#undef SET_SYSCTL
5321}
5322
5323static inline void sched_init_granularity(void)
5324{
5325 update_sysctl();
7022} 5326}
7023 5327
7024#ifdef CONFIG_SMP 5328#ifdef CONFIG_SMP
@@ -7055,7 +5359,8 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
7055 int ret = 0; 5359 int ret = 0;
7056 5360
7057 rq = task_rq_lock(p, &flags); 5361 rq = task_rq_lock(p, &flags);
7058 if (!cpumask_intersects(new_mask, cpu_online_mask)) { 5362
5363 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
7059 ret = -EINVAL; 5364 ret = -EINVAL;
7060 goto out; 5365 goto out;
7061 } 5366 }
@@ -7077,13 +5382,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
7077 if (cpumask_test_cpu(task_cpu(p), new_mask)) 5382 if (cpumask_test_cpu(task_cpu(p), new_mask))
7078 goto out; 5383 goto out;
7079 5384
7080 if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { 5385 if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
7081 /* Need help from migration thread: drop lock and wait. */ 5386 /* Need help from migration thread: drop lock and wait. */
7082 struct task_struct *mt = rq->migration_thread; 5387 struct task_struct *mt = rq->migration_thread;
7083 5388
7084 get_task_struct(mt); 5389 get_task_struct(mt);
7085 task_rq_unlock(rq, &flags); 5390 task_rq_unlock(rq, &flags);
7086 wake_up_process(rq->migration_thread); 5391 wake_up_process(mt);
7087 put_task_struct(mt); 5392 put_task_struct(mt);
7088 wait_for_completion(&req.done); 5393 wait_for_completion(&req.done);
7089 tlb_migrate_finish(p->mm); 5394 tlb_migrate_finish(p->mm);
@@ -7110,7 +5415,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
7110static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) 5415static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
7111{ 5416{
7112 struct rq *rq_dest, *rq_src; 5417 struct rq *rq_dest, *rq_src;
7113 int ret = 0, on_rq; 5418 int ret = 0;
7114 5419
7115 if (unlikely(!cpu_active(dest_cpu))) 5420 if (unlikely(!cpu_active(dest_cpu)))
7116 return ret; 5421 return ret;
@@ -7126,12 +5431,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
7126 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 5431 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7127 goto fail; 5432 goto fail;
7128 5433
7129 on_rq = p->se.on_rq; 5434 /*
7130 if (on_rq) 5435 * If we're not on a rq, the next wake-up will ensure we're
5436 * placed properly.
5437 */
5438 if (p->se.on_rq) {
7131 deactivate_task(rq_src, p, 0); 5439 deactivate_task(rq_src, p, 0);
7132 5440 set_task_cpu(p, dest_cpu);
7133 set_task_cpu(p, dest_cpu);
7134 if (on_rq) {
7135 activate_task(rq_dest, p, 0); 5441 activate_task(rq_dest, p, 0);
7136 check_preempt_curr(rq_dest, p, 0); 5442 check_preempt_curr(rq_dest, p, 0);
7137 } 5443 }
@@ -7166,10 +5472,10 @@ static int migration_thread(void *data)
7166 struct migration_req *req; 5472 struct migration_req *req;
7167 struct list_head *head; 5473 struct list_head *head;
7168 5474
7169 spin_lock_irq(&rq->lock); 5475 raw_spin_lock_irq(&rq->lock);
7170 5476
7171 if (cpu_is_offline(cpu)) { 5477 if (cpu_is_offline(cpu)) {
7172 spin_unlock_irq(&rq->lock); 5478 raw_spin_unlock_irq(&rq->lock);
7173 break; 5479 break;
7174 } 5480 }
7175 5481
@@ -7181,7 +5487,7 @@ static int migration_thread(void *data)
7181 head = &rq->migration_queue; 5487 head = &rq->migration_queue;
7182 5488
7183 if (list_empty(head)) { 5489 if (list_empty(head)) {
7184 spin_unlock_irq(&rq->lock); 5490 raw_spin_unlock_irq(&rq->lock);
7185 schedule(); 5491 schedule();
7186 set_current_state(TASK_INTERRUPTIBLE); 5492 set_current_state(TASK_INTERRUPTIBLE);
7187 continue; 5493 continue;
@@ -7190,14 +5496,14 @@ static int migration_thread(void *data)
7190 list_del_init(head->next); 5496 list_del_init(head->next);
7191 5497
7192 if (req->task != NULL) { 5498 if (req->task != NULL) {
7193 spin_unlock(&rq->lock); 5499 raw_spin_unlock(&rq->lock);
7194 __migrate_task(req->task, cpu, req->dest_cpu); 5500 __migrate_task(req->task, cpu, req->dest_cpu);
7195 } else if (likely(cpu == (badcpu = smp_processor_id()))) { 5501 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7196 req->dest_cpu = RCU_MIGRATION_GOT_QS; 5502 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7197 spin_unlock(&rq->lock); 5503 raw_spin_unlock(&rq->lock);
7198 } else { 5504 } else {
7199 req->dest_cpu = RCU_MIGRATION_MUST_SYNC; 5505 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7200 spin_unlock(&rq->lock); 5506 raw_spin_unlock(&rq->lock);
7201 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); 5507 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7202 } 5508 }
7203 local_irq_enable(); 5509 local_irq_enable();
@@ -7227,37 +5533,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
7227static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 5533static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
7228{ 5534{
7229 int dest_cpu; 5535 int dest_cpu;
7230 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
7231 5536
7232again: 5537again:
7233 /* Look for allowed, online CPU in same node. */ 5538 dest_cpu = select_fallback_rq(dead_cpu, p);
7234 for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
7235 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
7236 goto move;
7237
7238 /* Any allowed, online CPU? */
7239 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
7240 if (dest_cpu < nr_cpu_ids)
7241 goto move;
7242
7243 /* No more Mr. Nice Guy. */
7244 if (dest_cpu >= nr_cpu_ids) {
7245 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
7246 dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
7247
7248 /*
7249 * Don't tell them about moving exiting tasks or
7250 * kernel threads (both mm NULL), since they never
7251 * leave kernel.
7252 */
7253 if (p->mm && printk_ratelimit()) {
7254 printk(KERN_INFO "process %d (%s) no "
7255 "longer affine to cpu%d\n",
7256 task_pid_nr(p), p->comm, dead_cpu);
7257 }
7258 }
7259 5539
7260move:
7261 /* It can have affinity changed while we were choosing. */ 5540 /* It can have affinity changed while we were choosing. */
7262 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 5541 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
7263 goto again; 5542 goto again;
@@ -7272,7 +5551,7 @@ move:
7272 */ 5551 */
7273static void migrate_nr_uninterruptible(struct rq *rq_src) 5552static void migrate_nr_uninterruptible(struct rq *rq_src)
7274{ 5553{
7275 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); 5554 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
7276 unsigned long flags; 5555 unsigned long flags;
7277 5556
7278 local_irq_save(flags); 5557 local_irq_save(flags);
@@ -7320,14 +5599,14 @@ void sched_idle_next(void)
7320 * Strictly not necessary since rest of the CPUs are stopped by now 5599 * Strictly not necessary since rest of the CPUs are stopped by now
7321 * and interrupts disabled on the current cpu. 5600 * and interrupts disabled on the current cpu.
7322 */ 5601 */
7323 spin_lock_irqsave(&rq->lock, flags); 5602 raw_spin_lock_irqsave(&rq->lock, flags);
7324 5603
7325 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5604 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
7326 5605
7327 update_rq_clock(rq); 5606 update_rq_clock(rq);
7328 activate_task(rq, p, 0); 5607 activate_task(rq, p, 0);
7329 5608
7330 spin_unlock_irqrestore(&rq->lock, flags); 5609 raw_spin_unlock_irqrestore(&rq->lock, flags);
7331} 5610}
7332 5611
7333/* 5612/*
@@ -7363,9 +5642,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
7363 * that's OK. No task can be added to this CPU, so iteration is 5642 * that's OK. No task can be added to this CPU, so iteration is
7364 * fine. 5643 * fine.
7365 */ 5644 */
7366 spin_unlock_irq(&rq->lock); 5645 raw_spin_unlock_irq(&rq->lock);
7367 move_task_off_dead_cpu(dead_cpu, p); 5646 move_task_off_dead_cpu(dead_cpu, p);
7368 spin_lock_irq(&rq->lock); 5647 raw_spin_lock_irq(&rq->lock);
7369 5648
7370 put_task_struct(p); 5649 put_task_struct(p);
7371} 5650}
@@ -7406,17 +5685,16 @@ static struct ctl_table sd_ctl_dir[] = {
7406 .procname = "sched_domain", 5685 .procname = "sched_domain",
7407 .mode = 0555, 5686 .mode = 0555,
7408 }, 5687 },
7409 {0, }, 5688 {}
7410}; 5689};
7411 5690
7412static struct ctl_table sd_ctl_root[] = { 5691static struct ctl_table sd_ctl_root[] = {
7413 { 5692 {
7414 .ctl_name = CTL_KERN,
7415 .procname = "kernel", 5693 .procname = "kernel",
7416 .mode = 0555, 5694 .mode = 0555,
7417 .child = sd_ctl_dir, 5695 .child = sd_ctl_dir,
7418 }, 5696 },
7419 {0, }, 5697 {}
7420}; 5698};
7421 5699
7422static struct ctl_table *sd_alloc_ctl_entry(int n) 5700static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -7526,7 +5804,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
7526static struct ctl_table_header *sd_sysctl_header; 5804static struct ctl_table_header *sd_sysctl_header;
7527static void register_sched_domain_sysctl(void) 5805static void register_sched_domain_sysctl(void)
7528{ 5806{
7529 int i, cpu_num = num_online_cpus(); 5807 int i, cpu_num = num_possible_cpus();
7530 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); 5808 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
7531 char buf[32]; 5809 char buf[32];
7532 5810
@@ -7536,7 +5814,7 @@ static void register_sched_domain_sysctl(void)
7536 if (entry == NULL) 5814 if (entry == NULL)
7537 return; 5815 return;
7538 5816
7539 for_each_online_cpu(i) { 5817 for_each_possible_cpu(i) {
7540 snprintf(buf, 32, "cpu%d", i); 5818 snprintf(buf, 32, "cpu%d", i);
7541 entry->procname = kstrdup(buf, GFP_KERNEL); 5819 entry->procname = kstrdup(buf, GFP_KERNEL);
7542 entry->mode = 0555; 5820 entry->mode = 0555;
@@ -7632,13 +5910,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7632 5910
7633 /* Update our root-domain */ 5911 /* Update our root-domain */
7634 rq = cpu_rq(cpu); 5912 rq = cpu_rq(cpu);
7635 spin_lock_irqsave(&rq->lock, flags); 5913 raw_spin_lock_irqsave(&rq->lock, flags);
7636 if (rq->rd) { 5914 if (rq->rd) {
7637 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5915 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7638 5916
7639 set_rq_online(rq); 5917 set_rq_online(rq);
7640 } 5918 }
7641 spin_unlock_irqrestore(&rq->lock, flags); 5919 raw_spin_unlock_irqrestore(&rq->lock, flags);
7642 break; 5920 break;
7643 5921
7644#ifdef CONFIG_HOTPLUG_CPU 5922#ifdef CONFIG_HOTPLUG_CPU
@@ -7663,14 +5941,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7663 put_task_struct(rq->migration_thread); 5941 put_task_struct(rq->migration_thread);
7664 rq->migration_thread = NULL; 5942 rq->migration_thread = NULL;
7665 /* Idle task back to normal (off runqueue, low prio) */ 5943 /* Idle task back to normal (off runqueue, low prio) */
7666 spin_lock_irq(&rq->lock); 5944 raw_spin_lock_irq(&rq->lock);
7667 update_rq_clock(rq); 5945 update_rq_clock(rq);
7668 deactivate_task(rq, rq->idle, 0); 5946 deactivate_task(rq, rq->idle, 0);
7669 rq->idle->static_prio = MAX_PRIO;
7670 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 5947 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
7671 rq->idle->sched_class = &idle_sched_class; 5948 rq->idle->sched_class = &idle_sched_class;
7672 migrate_dead_tasks(cpu); 5949 migrate_dead_tasks(cpu);
7673 spin_unlock_irq(&rq->lock); 5950 raw_spin_unlock_irq(&rq->lock);
7674 cpuset_unlock(); 5951 cpuset_unlock();
7675 migrate_nr_uninterruptible(rq); 5952 migrate_nr_uninterruptible(rq);
7676 BUG_ON(rq->nr_running != 0); 5953 BUG_ON(rq->nr_running != 0);
@@ -7680,30 +5957,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7680 * they didn't take sched_hotcpu_mutex. Just wake up 5957 * they didn't take sched_hotcpu_mutex. Just wake up
7681 * the requestors. 5958 * the requestors.
7682 */ 5959 */
7683 spin_lock_irq(&rq->lock); 5960 raw_spin_lock_irq(&rq->lock);
7684 while (!list_empty(&rq->migration_queue)) { 5961 while (!list_empty(&rq->migration_queue)) {
7685 struct migration_req *req; 5962 struct migration_req *req;
7686 5963
7687 req = list_entry(rq->migration_queue.next, 5964 req = list_entry(rq->migration_queue.next,
7688 struct migration_req, list); 5965 struct migration_req, list);
7689 list_del_init(&req->list); 5966 list_del_init(&req->list);
7690 spin_unlock_irq(&rq->lock); 5967 raw_spin_unlock_irq(&rq->lock);
7691 complete(&req->done); 5968 complete(&req->done);
7692 spin_lock_irq(&rq->lock); 5969 raw_spin_lock_irq(&rq->lock);
7693 } 5970 }
7694 spin_unlock_irq(&rq->lock); 5971 raw_spin_unlock_irq(&rq->lock);
7695 break; 5972 break;
7696 5973
7697 case CPU_DYING: 5974 case CPU_DYING:
7698 case CPU_DYING_FROZEN: 5975 case CPU_DYING_FROZEN:
7699 /* Update our root-domain */ 5976 /* Update our root-domain */
7700 rq = cpu_rq(cpu); 5977 rq = cpu_rq(cpu);
7701 spin_lock_irqsave(&rq->lock, flags); 5978 raw_spin_lock_irqsave(&rq->lock, flags);
7702 if (rq->rd) { 5979 if (rq->rd) {
7703 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5980 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7704 set_rq_offline(rq); 5981 set_rq_offline(rq);
7705 } 5982 }
7706 spin_unlock_irqrestore(&rq->lock, flags); 5983 raw_spin_unlock_irqrestore(&rq->lock, flags);
7707 break; 5984 break;
7708#endif 5985#endif
7709 } 5986 }
@@ -7740,6 +6017,16 @@ early_initcall(migration_init);
7740 6017
7741#ifdef CONFIG_SCHED_DEBUG 6018#ifdef CONFIG_SCHED_DEBUG
7742 6019
6020static __read_mostly int sched_domain_debug_enabled;
6021
6022static int __init sched_domain_debug_setup(char *str)
6023{
6024 sched_domain_debug_enabled = 1;
6025
6026 return 0;
6027}
6028early_param("sched_debug", sched_domain_debug_setup);
6029
7743static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 6030static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7744 struct cpumask *groupmask) 6031 struct cpumask *groupmask)
7745{ 6032{
@@ -7826,6 +6113,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
7826 cpumask_var_t groupmask; 6113 cpumask_var_t groupmask;
7827 int level = 0; 6114 int level = 0;
7828 6115
6116 if (!sched_domain_debug_enabled)
6117 return;
6118
7829 if (!sd) { 6119 if (!sd) {
7830 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 6120 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
7831 return; 6121 return;
@@ -7905,6 +6195,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
7905 6195
7906static void free_rootdomain(struct root_domain *rd) 6196static void free_rootdomain(struct root_domain *rd)
7907{ 6197{
6198 synchronize_sched();
6199
7908 cpupri_cleanup(&rd->cpupri); 6200 cpupri_cleanup(&rd->cpupri);
7909 6201
7910 free_cpumask_var(rd->rto_mask); 6202 free_cpumask_var(rd->rto_mask);
@@ -7918,7 +6210,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7918 struct root_domain *old_rd = NULL; 6210 struct root_domain *old_rd = NULL;
7919 unsigned long flags; 6211 unsigned long flags;
7920 6212
7921 spin_lock_irqsave(&rq->lock, flags); 6213 raw_spin_lock_irqsave(&rq->lock, flags);
7922 6214
7923 if (rq->rd) { 6215 if (rq->rd) {
7924 old_rd = rq->rd; 6216 old_rd = rq->rd;
@@ -7944,7 +6236,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7944 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 6236 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
7945 set_rq_online(rq); 6237 set_rq_online(rq);
7946 6238
7947 spin_unlock_irqrestore(&rq->lock, flags); 6239 raw_spin_unlock_irqrestore(&rq->lock, flags);
7948 6240
7949 if (old_rd) 6241 if (old_rd)
7950 free_rootdomain(old_rd); 6242 free_rootdomain(old_rd);
@@ -8045,6 +6337,7 @@ static cpumask_var_t cpu_isolated_map;
8045/* Setup the mask of cpus configured for isolated domains */ 6337/* Setup the mask of cpus configured for isolated domains */
8046static int __init isolated_cpu_setup(char *str) 6338static int __init isolated_cpu_setup(char *str)
8047{ 6339{
6340 alloc_bootmem_cpumask_var(&cpu_isolated_map);
8048 cpulist_parse(str, cpu_isolated_map); 6341 cpulist_parse(str, cpu_isolated_map);
8049 return 1; 6342 return 1;
8050} 6343}
@@ -8229,14 +6522,14 @@ enum s_alloc {
8229 */ 6522 */
8230#ifdef CONFIG_SCHED_SMT 6523#ifdef CONFIG_SCHED_SMT
8231static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); 6524static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
8232static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); 6525static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
8233 6526
8234static int 6527static int
8235cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, 6528cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
8236 struct sched_group **sg, struct cpumask *unused) 6529 struct sched_group **sg, struct cpumask *unused)
8237{ 6530{
8238 if (sg) 6531 if (sg)
8239 *sg = &per_cpu(sched_group_cpus, cpu).sg; 6532 *sg = &per_cpu(sched_groups, cpu).sg;
8240 return cpu; 6533 return cpu;
8241} 6534}
8242#endif /* CONFIG_SCHED_SMT */ 6535#endif /* CONFIG_SCHED_SMT */
@@ -8881,7 +7174,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
8881 return __build_sched_domains(cpu_map, NULL); 7174 return __build_sched_domains(cpu_map, NULL);
8882} 7175}
8883 7176
8884static struct cpumask *doms_cur; /* current sched domains */ 7177static cpumask_var_t *doms_cur; /* current sched domains */
8885static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 7178static int ndoms_cur; /* number of sched domains in 'doms_cur' */
8886static struct sched_domain_attr *dattr_cur; 7179static struct sched_domain_attr *dattr_cur;
8887 /* attribues of custom domains in 'doms_cur' */ 7180 /* attribues of custom domains in 'doms_cur' */
@@ -8903,6 +7196,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
8903 return 0; 7196 return 0;
8904} 7197}
8905 7198
7199cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7200{
7201 int i;
7202 cpumask_var_t *doms;
7203
7204 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7205 if (!doms)
7206 return NULL;
7207 for (i = 0; i < ndoms; i++) {
7208 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7209 free_sched_domains(doms, i);
7210 return NULL;
7211 }
7212 }
7213 return doms;
7214}
7215
7216void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7217{
7218 unsigned int i;
7219 for (i = 0; i < ndoms; i++)
7220 free_cpumask_var(doms[i]);
7221 kfree(doms);
7222}
7223
8906/* 7224/*
8907 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7225 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
8908 * For now this just excludes isolated cpus, but could be used to 7226 * For now this just excludes isolated cpus, but could be used to
@@ -8914,12 +7232,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
8914 7232
8915 arch_update_cpu_topology(); 7233 arch_update_cpu_topology();
8916 ndoms_cur = 1; 7234 ndoms_cur = 1;
8917 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); 7235 doms_cur = alloc_sched_domains(ndoms_cur);
8918 if (!doms_cur) 7236 if (!doms_cur)
8919 doms_cur = fallback_doms; 7237 doms_cur = &fallback_doms;
8920 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); 7238 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
8921 dattr_cur = NULL; 7239 dattr_cur = NULL;
8922 err = build_sched_domains(doms_cur); 7240 err = build_sched_domains(doms_cur[0]);
8923 register_sched_domain_sysctl(); 7241 register_sched_domain_sysctl();
8924 7242
8925 return err; 7243 return err;
@@ -8969,19 +7287,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8969 * doms_new[] to the current sched domain partitioning, doms_cur[]. 7287 * doms_new[] to the current sched domain partitioning, doms_cur[].
8970 * It destroys each deleted domain and builds each new domain. 7288 * It destroys each deleted domain and builds each new domain.
8971 * 7289 *
8972 * 'doms_new' is an array of cpumask's of length 'ndoms_new'. 7290 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
8973 * The masks don't intersect (don't overlap.) We should setup one 7291 * The masks don't intersect (don't overlap.) We should setup one
8974 * sched domain for each mask. CPUs not in any of the cpumasks will 7292 * sched domain for each mask. CPUs not in any of the cpumasks will
8975 * not be load balanced. If the same cpumask appears both in the 7293 * not be load balanced. If the same cpumask appears both in the
8976 * current 'doms_cur' domains and in the new 'doms_new', we can leave 7294 * current 'doms_cur' domains and in the new 'doms_new', we can leave
8977 * it as it is. 7295 * it as it is.
8978 * 7296 *
8979 * The passed in 'doms_new' should be kmalloc'd. This routine takes 7297 * The passed in 'doms_new' should be allocated using
8980 * ownership of it and will kfree it when done with it. If the caller 7298 * alloc_sched_domains. This routine takes ownership of it and will
8981 * failed the kmalloc call, then it can pass in doms_new == NULL && 7299 * free_sched_domains it when done with it. If the caller failed the
8982 * ndoms_new == 1, and partition_sched_domains() will fallback to 7300 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
8983 * the single partition 'fallback_doms', it also forces the domains 7301 * and partition_sched_domains() will fallback to the single partition
8984 * to be rebuilt. 7302 * 'fallback_doms', it also forces the domains to be rebuilt.
8985 * 7303 *
8986 * If doms_new == NULL it will be replaced with cpu_online_mask. 7304 * If doms_new == NULL it will be replaced with cpu_online_mask.
8987 * ndoms_new == 0 is a special case for destroying existing domains, 7305 * ndoms_new == 0 is a special case for destroying existing domains,
@@ -8989,8 +7307,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8989 * 7307 *
8990 * Call with hotplug lock held 7308 * Call with hotplug lock held
8991 */ 7309 */
8992/* FIXME: Change to struct cpumask *doms_new[] */ 7310void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
8993void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8994 struct sched_domain_attr *dattr_new) 7311 struct sched_domain_attr *dattr_new)
8995{ 7312{
8996 int i, j, n; 7313 int i, j, n;
@@ -9009,40 +7326,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
9009 /* Destroy deleted domains */ 7326 /* Destroy deleted domains */
9010 for (i = 0; i < ndoms_cur; i++) { 7327 for (i = 0; i < ndoms_cur; i++) {
9011 for (j = 0; j < n && !new_topology; j++) { 7328 for (j = 0; j < n && !new_topology; j++) {
9012 if (cpumask_equal(&doms_cur[i], &doms_new[j]) 7329 if (cpumask_equal(doms_cur[i], doms_new[j])
9013 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7330 && dattrs_equal(dattr_cur, i, dattr_new, j))
9014 goto match1; 7331 goto match1;
9015 } 7332 }
9016 /* no match - a current sched domain not in new doms_new[] */ 7333 /* no match - a current sched domain not in new doms_new[] */
9017 detach_destroy_domains(doms_cur + i); 7334 detach_destroy_domains(doms_cur[i]);
9018match1: 7335match1:
9019 ; 7336 ;
9020 } 7337 }
9021 7338
9022 if (doms_new == NULL) { 7339 if (doms_new == NULL) {
9023 ndoms_cur = 0; 7340 ndoms_cur = 0;
9024 doms_new = fallback_doms; 7341 doms_new = &fallback_doms;
9025 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); 7342 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
9026 WARN_ON_ONCE(dattr_new); 7343 WARN_ON_ONCE(dattr_new);
9027 } 7344 }
9028 7345
9029 /* Build new domains */ 7346 /* Build new domains */
9030 for (i = 0; i < ndoms_new; i++) { 7347 for (i = 0; i < ndoms_new; i++) {
9031 for (j = 0; j < ndoms_cur && !new_topology; j++) { 7348 for (j = 0; j < ndoms_cur && !new_topology; j++) {
9032 if (cpumask_equal(&doms_new[i], &doms_cur[j]) 7349 if (cpumask_equal(doms_new[i], doms_cur[j])
9033 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7350 && dattrs_equal(dattr_new, i, dattr_cur, j))
9034 goto match2; 7351 goto match2;
9035 } 7352 }
9036 /* no match - add a new doms_new */ 7353 /* no match - add a new doms_new */
9037 __build_sched_domains(doms_new + i, 7354 __build_sched_domains(doms_new[i],
9038 dattr_new ? dattr_new + i : NULL); 7355 dattr_new ? dattr_new + i : NULL);
9039match2: 7356match2:
9040 ; 7357 ;
9041 } 7358 }
9042 7359
9043 /* Remember the new sched domains */ 7360 /* Remember the new sched domains */
9044 if (doms_cur != fallback_doms) 7361 if (doms_cur != &fallback_doms)
9045 kfree(doms_cur); 7362 free_sched_domains(doms_cur, ndoms_cur);
9046 kfree(dattr_cur); /* kfree(NULL) is safe */ 7363 kfree(dattr_cur); /* kfree(NULL) is safe */
9047 doms_cur = doms_new; 7364 doms_cur = doms_new;
9048 dattr_cur = dattr_new; 7365 dattr_cur = dattr_new;
@@ -9094,11 +7411,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
9094 7411
9095#ifdef CONFIG_SCHED_MC 7412#ifdef CONFIG_SCHED_MC
9096static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, 7413static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
7414 struct sysdev_class_attribute *attr,
9097 char *page) 7415 char *page)
9098{ 7416{
9099 return sprintf(page, "%u\n", sched_mc_power_savings); 7417 return sprintf(page, "%u\n", sched_mc_power_savings);
9100} 7418}
9101static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, 7419static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
7420 struct sysdev_class_attribute *attr,
9102 const char *buf, size_t count) 7421 const char *buf, size_t count)
9103{ 7422{
9104 return sched_power_savings_store(buf, count, 0); 7423 return sched_power_savings_store(buf, count, 0);
@@ -9110,11 +7429,13 @@ static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
9110 7429
9111#ifdef CONFIG_SCHED_SMT 7430#ifdef CONFIG_SCHED_SMT
9112static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, 7431static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
7432 struct sysdev_class_attribute *attr,
9113 char *page) 7433 char *page)
9114{ 7434{
9115 return sprintf(page, "%u\n", sched_smt_power_savings); 7435 return sprintf(page, "%u\n", sched_smt_power_savings);
9116} 7436}
9117static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, 7437static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
7438 struct sysdev_class_attribute *attr,
9118 const char *buf, size_t count) 7439 const char *buf, size_t count)
9119{ 7440{
9120 return sched_power_savings_store(buf, count, 1); 7441 return sched_power_savings_store(buf, count, 1);
@@ -9153,8 +7474,10 @@ static int update_sched_domains(struct notifier_block *nfb,
9153 switch (action) { 7474 switch (action) {
9154 case CPU_ONLINE: 7475 case CPU_ONLINE:
9155 case CPU_ONLINE_FROZEN: 7476 case CPU_ONLINE_FROZEN:
9156 case CPU_DEAD: 7477 case CPU_DOWN_PREPARE:
9157 case CPU_DEAD_FROZEN: 7478 case CPU_DOWN_PREPARE_FROZEN:
7479 case CPU_DOWN_FAILED:
7480 case CPU_DOWN_FAILED_FROZEN:
9158 partition_sched_domains(1, NULL, NULL); 7481 partition_sched_domains(1, NULL, NULL);
9159 return NOTIFY_OK; 7482 return NOTIFY_OK;
9160 7483
@@ -9201,7 +7524,7 @@ void __init sched_init_smp(void)
9201#endif 7524#endif
9202 get_online_cpus(); 7525 get_online_cpus();
9203 mutex_lock(&sched_domains_mutex); 7526 mutex_lock(&sched_domains_mutex);
9204 arch_init_sched_domains(cpu_online_mask); 7527 arch_init_sched_domains(cpu_active_mask);
9205 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); 7528 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
9206 if (cpumask_empty(non_isolated_cpus)) 7529 if (cpumask_empty(non_isolated_cpus))
9207 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); 7530 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -9274,13 +7597,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
9274#ifdef CONFIG_SMP 7597#ifdef CONFIG_SMP
9275 rt_rq->rt_nr_migratory = 0; 7598 rt_rq->rt_nr_migratory = 0;
9276 rt_rq->overloaded = 0; 7599 rt_rq->overloaded = 0;
9277 plist_head_init(&rt_rq->pushable_tasks, &rq->lock); 7600 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
9278#endif 7601#endif
9279 7602
9280 rt_rq->rt_time = 0; 7603 rt_rq->rt_time = 0;
9281 rt_rq->rt_throttled = 0; 7604 rt_rq->rt_throttled = 0;
9282 rt_rq->rt_runtime = 0; 7605 rt_rq->rt_runtime = 0;
9283 spin_lock_init(&rt_rq->rt_runtime_lock); 7606 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
9284 7607
9285#ifdef CONFIG_RT_GROUP_SCHED 7608#ifdef CONFIG_RT_GROUP_SCHED
9286 rt_rq->rt_nr_boosted = 0; 7609 rt_rq->rt_nr_boosted = 0;
@@ -9327,7 +7650,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
9327 tg->rt_rq[cpu] = rt_rq; 7650 tg->rt_rq[cpu] = rt_rq;
9328 init_rt_rq(rt_rq, rq); 7651 init_rt_rq(rt_rq, rq);
9329 rt_rq->tg = tg; 7652 rt_rq->tg = tg;
9330 rt_rq->rt_se = rt_se;
9331 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 7653 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
9332 if (add) 7654 if (add)
9333 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); 7655 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
@@ -9358,16 +7680,9 @@ void __init sched_init(void)
9358#ifdef CONFIG_RT_GROUP_SCHED 7680#ifdef CONFIG_RT_GROUP_SCHED
9359 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7681 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
9360#endif 7682#endif
9361#ifdef CONFIG_USER_SCHED
9362 alloc_size *= 2;
9363#endif
9364#ifdef CONFIG_CPUMASK_OFFSTACK 7683#ifdef CONFIG_CPUMASK_OFFSTACK
9365 alloc_size += num_possible_cpus() * cpumask_size(); 7684 alloc_size += num_possible_cpus() * cpumask_size();
9366#endif 7685#endif
9367 /*
9368 * As sched_init() is called before page_alloc is setup,
9369 * we use alloc_bootmem().
9370 */
9371 if (alloc_size) { 7686 if (alloc_size) {
9372 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7687 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
9373 7688
@@ -9378,13 +7693,6 @@ void __init sched_init(void)
9378 init_task_group.cfs_rq = (struct cfs_rq **)ptr; 7693 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
9379 ptr += nr_cpu_ids * sizeof(void **); 7694 ptr += nr_cpu_ids * sizeof(void **);
9380 7695
9381#ifdef CONFIG_USER_SCHED
9382 root_task_group.se = (struct sched_entity **)ptr;
9383 ptr += nr_cpu_ids * sizeof(void **);
9384
9385 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
9386 ptr += nr_cpu_ids * sizeof(void **);
9387#endif /* CONFIG_USER_SCHED */
9388#endif /* CONFIG_FAIR_GROUP_SCHED */ 7696#endif /* CONFIG_FAIR_GROUP_SCHED */
9389#ifdef CONFIG_RT_GROUP_SCHED 7697#ifdef CONFIG_RT_GROUP_SCHED
9390 init_task_group.rt_se = (struct sched_rt_entity **)ptr; 7698 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
@@ -9393,13 +7701,6 @@ void __init sched_init(void)
9393 init_task_group.rt_rq = (struct rt_rq **)ptr; 7701 init_task_group.rt_rq = (struct rt_rq **)ptr;
9394 ptr += nr_cpu_ids * sizeof(void **); 7702 ptr += nr_cpu_ids * sizeof(void **);
9395 7703
9396#ifdef CONFIG_USER_SCHED
9397 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
9398 ptr += nr_cpu_ids * sizeof(void **);
9399
9400 root_task_group.rt_rq = (struct rt_rq **)ptr;
9401 ptr += nr_cpu_ids * sizeof(void **);
9402#endif /* CONFIG_USER_SCHED */
9403#endif /* CONFIG_RT_GROUP_SCHED */ 7704#endif /* CONFIG_RT_GROUP_SCHED */
9404#ifdef CONFIG_CPUMASK_OFFSTACK 7705#ifdef CONFIG_CPUMASK_OFFSTACK
9405 for_each_possible_cpu(i) { 7706 for_each_possible_cpu(i) {
@@ -9419,22 +7720,13 @@ void __init sched_init(void)
9419#ifdef CONFIG_RT_GROUP_SCHED 7720#ifdef CONFIG_RT_GROUP_SCHED
9420 init_rt_bandwidth(&init_task_group.rt_bandwidth, 7721 init_rt_bandwidth(&init_task_group.rt_bandwidth,
9421 global_rt_period(), global_rt_runtime()); 7722 global_rt_period(), global_rt_runtime());
9422#ifdef CONFIG_USER_SCHED
9423 init_rt_bandwidth(&root_task_group.rt_bandwidth,
9424 global_rt_period(), RUNTIME_INF);
9425#endif /* CONFIG_USER_SCHED */
9426#endif /* CONFIG_RT_GROUP_SCHED */ 7723#endif /* CONFIG_RT_GROUP_SCHED */
9427 7724
9428#ifdef CONFIG_GROUP_SCHED 7725#ifdef CONFIG_CGROUP_SCHED
9429 list_add(&init_task_group.list, &task_groups); 7726 list_add(&init_task_group.list, &task_groups);
9430 INIT_LIST_HEAD(&init_task_group.children); 7727 INIT_LIST_HEAD(&init_task_group.children);
9431 7728
9432#ifdef CONFIG_USER_SCHED 7729#endif /* CONFIG_CGROUP_SCHED */
9433 INIT_LIST_HEAD(&root_task_group.children);
9434 init_task_group.parent = &root_task_group;
9435 list_add(&init_task_group.siblings, &root_task_group.children);
9436#endif /* CONFIG_USER_SCHED */
9437#endif /* CONFIG_GROUP_SCHED */
9438 7730
9439#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP 7731#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
9440 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), 7732 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
@@ -9444,7 +7736,7 @@ void __init sched_init(void)
9444 struct rq *rq; 7736 struct rq *rq;
9445 7737
9446 rq = cpu_rq(i); 7738 rq = cpu_rq(i);
9447 spin_lock_init(&rq->lock); 7739 raw_spin_lock_init(&rq->lock);
9448 rq->nr_running = 0; 7740 rq->nr_running = 0;
9449 rq->calc_load_active = 0; 7741 rq->calc_load_active = 0;
9450 rq->calc_load_update = jiffies + LOAD_FREQ; 7742 rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9474,25 +7766,6 @@ void __init sched_init(void)
9474 * directly in rq->cfs (i.e init_task_group->se[] = NULL). 7766 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
9475 */ 7767 */
9476 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); 7768 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
9477#elif defined CONFIG_USER_SCHED
9478 root_task_group.shares = NICE_0_LOAD;
9479 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
9480 /*
9481 * In case of task-groups formed thr' the user id of tasks,
9482 * init_task_group represents tasks belonging to root user.
9483 * Hence it forms a sibling of all subsequent groups formed.
9484 * In this case, init_task_group gets only a fraction of overall
9485 * system cpu resource, based on the weight assigned to root
9486 * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
9487 * by letting tasks of init_task_group sit in a separate cfs_rq
9488 * (init_tg_cfs_rq) and having one entity represent this group of
9489 * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
9490 */
9491 init_tg_cfs_entry(&init_task_group,
9492 &per_cpu(init_tg_cfs_rq, i),
9493 &per_cpu(init_sched_entity, i), i, 1,
9494 root_task_group.se[i]);
9495
9496#endif 7769#endif
9497#endif /* CONFIG_FAIR_GROUP_SCHED */ 7770#endif /* CONFIG_FAIR_GROUP_SCHED */
9498 7771
@@ -9501,12 +7774,6 @@ void __init sched_init(void)
9501 INIT_LIST_HEAD(&rq->leaf_rt_rq_list); 7774 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
9502#ifdef CONFIG_CGROUP_SCHED 7775#ifdef CONFIG_CGROUP_SCHED
9503 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); 7776 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
9504#elif defined CONFIG_USER_SCHED
9505 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
9506 init_tg_rt_entry(&init_task_group,
9507 &per_cpu(init_rt_rq, i),
9508 &per_cpu(init_sched_rt_entity, i), i, 1,
9509 root_task_group.rt_se[i]);
9510#endif 7777#endif
9511#endif 7778#endif
9512 7779
@@ -9522,6 +7789,8 @@ void __init sched_init(void)
9522 rq->cpu = i; 7789 rq->cpu = i;
9523 rq->online = 0; 7790 rq->online = 0;
9524 rq->migration_thread = NULL; 7791 rq->migration_thread = NULL;
7792 rq->idle_stamp = 0;
7793 rq->avg_idle = 2*sysctl_sched_migration_cost;
9525 INIT_LIST_HEAD(&rq->migration_queue); 7794 INIT_LIST_HEAD(&rq->migration_queue);
9526 rq_attach_root(rq, &def_root_domain); 7795 rq_attach_root(rq, &def_root_domain);
9527#endif 7796#endif
@@ -9540,7 +7809,7 @@ void __init sched_init(void)
9540#endif 7809#endif
9541 7810
9542#ifdef CONFIG_RT_MUTEXES 7811#ifdef CONFIG_RT_MUTEXES
9543 plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); 7812 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
9544#endif 7813#endif
9545 7814
9546 /* 7815 /*
@@ -9571,7 +7840,9 @@ void __init sched_init(void)
9571 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 7840 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9572 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 7841 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9573#endif 7842#endif
9574 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 7843 /* May be allocated at isolcpus cmdline parse time */
7844 if (cpu_isolated_map == NULL)
7845 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9575#endif /* SMP */ 7846#endif /* SMP */
9576 7847
9577 perf_event_init(); 7848 perf_event_init();
@@ -9582,12 +7853,12 @@ void __init sched_init(void)
9582#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 7853#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
9583static inline int preempt_count_equals(int preempt_offset) 7854static inline int preempt_count_equals(int preempt_offset)
9584{ 7855{
9585 int nested = preempt_count() & ~PREEMPT_ACTIVE; 7856 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
9586 7857
9587 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); 7858 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
9588} 7859}
9589 7860
9590void __might_sleep(char *file, int line, int preempt_offset) 7861void __might_sleep(const char *file, int line, int preempt_offset)
9591{ 7862{
9592#ifdef in_atomic 7863#ifdef in_atomic
9593 static unsigned long prev_jiffy; /* ratelimiting */ 7864 static unsigned long prev_jiffy; /* ratelimiting */
@@ -9663,13 +7934,13 @@ void normalize_rt_tasks(void)
9663 continue; 7934 continue;
9664 } 7935 }
9665 7936
9666 spin_lock(&p->pi_lock); 7937 raw_spin_lock(&p->pi_lock);
9667 rq = __task_rq_lock(p); 7938 rq = __task_rq_lock(p);
9668 7939
9669 normalize_task(rq, p); 7940 normalize_task(rq, p);
9670 7941
9671 __task_rq_unlock(rq); 7942 __task_rq_unlock(rq);
9672 spin_unlock(&p->pi_lock); 7943 raw_spin_unlock(&p->pi_lock);
9673 } while_each_thread(g, p); 7944 } while_each_thread(g, p);
9674 7945
9675 read_unlock_irqrestore(&tasklist_lock, flags); 7946 read_unlock_irqrestore(&tasklist_lock, flags);
@@ -9765,13 +8036,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
9765 se = kzalloc_node(sizeof(struct sched_entity), 8036 se = kzalloc_node(sizeof(struct sched_entity),
9766 GFP_KERNEL, cpu_to_node(i)); 8037 GFP_KERNEL, cpu_to_node(i));
9767 if (!se) 8038 if (!se)
9768 goto err; 8039 goto err_free_rq;
9769 8040
9770 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); 8041 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
9771 } 8042 }
9772 8043
9773 return 1; 8044 return 1;
9774 8045
8046 err_free_rq:
8047 kfree(cfs_rq);
9775 err: 8048 err:
9776 return 0; 8049 return 0;
9777} 8050}
@@ -9853,13 +8126,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
9853 rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 8126 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
9854 GFP_KERNEL, cpu_to_node(i)); 8127 GFP_KERNEL, cpu_to_node(i));
9855 if (!rt_se) 8128 if (!rt_se)
9856 goto err; 8129 goto err_free_rq;
9857 8130
9858 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); 8131 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
9859 } 8132 }
9860 8133
9861 return 1; 8134 return 1;
9862 8135
8136 err_free_rq:
8137 kfree(rt_rq);
9863 err: 8138 err:
9864 return 0; 8139 return 0;
9865} 8140}
@@ -9894,7 +8169,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
9894} 8169}
9895#endif /* CONFIG_RT_GROUP_SCHED */ 8170#endif /* CONFIG_RT_GROUP_SCHED */
9896 8171
9897#ifdef CONFIG_GROUP_SCHED 8172#ifdef CONFIG_CGROUP_SCHED
9898static void free_sched_group(struct task_group *tg) 8173static void free_sched_group(struct task_group *tg)
9899{ 8174{
9900 free_fair_sched_group(tg); 8175 free_fair_sched_group(tg);
@@ -9993,17 +8268,17 @@ void sched_move_task(struct task_struct *tsk)
9993 8268
9994#ifdef CONFIG_FAIR_GROUP_SCHED 8269#ifdef CONFIG_FAIR_GROUP_SCHED
9995 if (tsk->sched_class->moved_group) 8270 if (tsk->sched_class->moved_group)
9996 tsk->sched_class->moved_group(tsk); 8271 tsk->sched_class->moved_group(tsk, on_rq);
9997#endif 8272#endif
9998 8273
9999 if (unlikely(running)) 8274 if (unlikely(running))
10000 tsk->sched_class->set_curr_task(rq); 8275 tsk->sched_class->set_curr_task(rq);
10001 if (on_rq) 8276 if (on_rq)
10002 enqueue_task(rq, tsk, 0); 8277 enqueue_task(rq, tsk, 0, false);
10003 8278
10004 task_rq_unlock(rq, &flags); 8279 task_rq_unlock(rq, &flags);
10005} 8280}
10006#endif /* CONFIG_GROUP_SCHED */ 8281#endif /* CONFIG_CGROUP_SCHED */
10007 8282
10008#ifdef CONFIG_FAIR_GROUP_SCHED 8283#ifdef CONFIG_FAIR_GROUP_SCHED
10009static void __set_se_shares(struct sched_entity *se, unsigned long shares) 8284static void __set_se_shares(struct sched_entity *se, unsigned long shares)
@@ -10028,9 +8303,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
10028 struct rq *rq = cfs_rq->rq; 8303 struct rq *rq = cfs_rq->rq;
10029 unsigned long flags; 8304 unsigned long flags;
10030 8305
10031 spin_lock_irqsave(&rq->lock, flags); 8306 raw_spin_lock_irqsave(&rq->lock, flags);
10032 __set_se_shares(se, shares); 8307 __set_se_shares(se, shares);
10033 spin_unlock_irqrestore(&rq->lock, flags); 8308 raw_spin_unlock_irqrestore(&rq->lock, flags);
10034} 8309}
10035 8310
10036static DEFINE_MUTEX(shares_mutex); 8311static DEFINE_MUTEX(shares_mutex);
@@ -10145,13 +8420,6 @@ static int tg_schedulable(struct task_group *tg, void *data)
10145 runtime = d->rt_runtime; 8420 runtime = d->rt_runtime;
10146 } 8421 }
10147 8422
10148#ifdef CONFIG_USER_SCHED
10149 if (tg == &root_task_group) {
10150 period = global_rt_period();
10151 runtime = global_rt_runtime();
10152 }
10153#endif
10154
10155 /* 8423 /*
10156 * Cannot have more runtime than the period. 8424 * Cannot have more runtime than the period.
10157 */ 8425 */
@@ -10215,18 +8483,18 @@ static int tg_set_bandwidth(struct task_group *tg,
10215 if (err) 8483 if (err)
10216 goto unlock; 8484 goto unlock;
10217 8485
10218 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 8486 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10219 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 8487 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
10220 tg->rt_bandwidth.rt_runtime = rt_runtime; 8488 tg->rt_bandwidth.rt_runtime = rt_runtime;
10221 8489
10222 for_each_possible_cpu(i) { 8490 for_each_possible_cpu(i) {
10223 struct rt_rq *rt_rq = tg->rt_rq[i]; 8491 struct rt_rq *rt_rq = tg->rt_rq[i];
10224 8492
10225 spin_lock(&rt_rq->rt_runtime_lock); 8493 raw_spin_lock(&rt_rq->rt_runtime_lock);
10226 rt_rq->rt_runtime = rt_runtime; 8494 rt_rq->rt_runtime = rt_runtime;
10227 spin_unlock(&rt_rq->rt_runtime_lock); 8495 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10228 } 8496 }
10229 spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 8497 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
10230 unlock: 8498 unlock:
10231 read_unlock(&tasklist_lock); 8499 read_unlock(&tasklist_lock);
10232 mutex_unlock(&rt_constraints_mutex); 8500 mutex_unlock(&rt_constraints_mutex);
@@ -10331,15 +8599,15 @@ static int sched_rt_global_constraints(void)
10331 if (sysctl_sched_rt_runtime == 0) 8599 if (sysctl_sched_rt_runtime == 0)
10332 return -EBUSY; 8600 return -EBUSY;
10333 8601
10334 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 8602 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
10335 for_each_possible_cpu(i) { 8603 for_each_possible_cpu(i) {
10336 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 8604 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
10337 8605
10338 spin_lock(&rt_rq->rt_runtime_lock); 8606 raw_spin_lock(&rt_rq->rt_runtime_lock);
10339 rt_rq->rt_runtime = global_rt_runtime(); 8607 rt_rq->rt_runtime = global_rt_runtime();
10340 spin_unlock(&rt_rq->rt_runtime_lock); 8608 raw_spin_unlock(&rt_rq->rt_runtime_lock);
10341 } 8609 }
10342 spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 8610 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
10343 8611
10344 return 0; 8612 return 0;
10345} 8613}
@@ -10554,7 +8822,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
10554struct cpuacct { 8822struct cpuacct {
10555 struct cgroup_subsys_state css; 8823 struct cgroup_subsys_state css;
10556 /* cpuusage holds pointer to a u64-type object on every cpu */ 8824 /* cpuusage holds pointer to a u64-type object on every cpu */
10557 u64 *cpuusage; 8825 u64 __percpu *cpuusage;
10558 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; 8826 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
10559 struct cpuacct *parent; 8827 struct cpuacct *parent;
10560}; 8828};
@@ -10630,9 +8898,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
10630 /* 8898 /*
10631 * Take rq->lock to make 64-bit read safe on 32-bit platforms. 8899 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
10632 */ 8900 */
10633 spin_lock_irq(&cpu_rq(cpu)->lock); 8901 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10634 data = *cpuusage; 8902 data = *cpuusage;
10635 spin_unlock_irq(&cpu_rq(cpu)->lock); 8903 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10636#else 8904#else
10637 data = *cpuusage; 8905 data = *cpuusage;
10638#endif 8906#endif
@@ -10648,9 +8916,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
10648 /* 8916 /*
10649 * Take rq->lock to make 64-bit write safe on 32-bit platforms. 8917 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
10650 */ 8918 */
10651 spin_lock_irq(&cpu_rq(cpu)->lock); 8919 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
10652 *cpuusage = val; 8920 *cpuusage = val;
10653 spin_unlock_irq(&cpu_rq(cpu)->lock); 8921 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
10654#else 8922#else
10655 *cpuusage = val; 8923 *cpuusage = val;
10656#endif 8924#endif
@@ -10771,12 +9039,30 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
10771} 9039}
10772 9040
10773/* 9041/*
9042 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9043 * in cputime_t units. As a result, cpuacct_update_stats calls
9044 * percpu_counter_add with values large enough to always overflow the
9045 * per cpu batch limit causing bad SMP scalability.
9046 *
9047 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9048 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9049 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9050 */
9051#ifdef CONFIG_SMP
9052#define CPUACCT_BATCH \
9053 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9054#else
9055#define CPUACCT_BATCH 0
9056#endif
9057
9058/*
10774 * Charge the system/user time to the task's accounting group. 9059 * Charge the system/user time to the task's accounting group.
10775 */ 9060 */
10776static void cpuacct_update_stats(struct task_struct *tsk, 9061static void cpuacct_update_stats(struct task_struct *tsk,
10777 enum cpuacct_stat_index idx, cputime_t val) 9062 enum cpuacct_stat_index idx, cputime_t val)
10778{ 9063{
10779 struct cpuacct *ca; 9064 struct cpuacct *ca;
9065 int batch = CPUACCT_BATCH;
10780 9066
10781 if (unlikely(!cpuacct_subsys.active)) 9067 if (unlikely(!cpuacct_subsys.active))
10782 return; 9068 return;
@@ -10785,7 +9071,7 @@ static void cpuacct_update_stats(struct task_struct *tsk,
10785 ca = task_ca(tsk); 9071 ca = task_ca(tsk);
10786 9072
10787 do { 9073 do {
10788 percpu_counter_add(&ca->cpustat[idx], val); 9074 __percpu_counter_add(&ca->cpustat[idx], val, batch);
10789 ca = ca->parent; 9075 ca = ca->parent;
10790 } while (ca); 9076 } while (ca);
10791 rcu_read_unlock(); 9077 rcu_read_unlock();
@@ -10884,9 +9170,9 @@ void synchronize_sched_expedited(void)
10884 init_completion(&req->done); 9170 init_completion(&req->done);
10885 req->task = NULL; 9171 req->task = NULL;
10886 req->dest_cpu = RCU_MIGRATION_NEED_QS; 9172 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10887 spin_lock_irqsave(&rq->lock, flags); 9173 raw_spin_lock_irqsave(&rq->lock, flags);
10888 list_add(&req->list, &rq->migration_queue); 9174 list_add(&req->list, &rq->migration_queue);
10889 spin_unlock_irqrestore(&rq->lock, flags); 9175 raw_spin_unlock_irqrestore(&rq->lock, flags);
10890 wake_up_process(rq->migration_thread); 9176 wake_up_process(rq->migration_thread);
10891 } 9177 }
10892 for_each_online_cpu(cpu) { 9178 for_each_online_cpu(cpu) {
@@ -10894,13 +9180,14 @@ void synchronize_sched_expedited(void)
10894 req = &per_cpu(rcu_migration_req, cpu); 9180 req = &per_cpu(rcu_migration_req, cpu);
10895 rq = cpu_rq(cpu); 9181 rq = cpu_rq(cpu);
10896 wait_for_completion(&req->done); 9182 wait_for_completion(&req->done);
10897 spin_lock_irqsave(&rq->lock, flags); 9183 raw_spin_lock_irqsave(&rq->lock, flags);
10898 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) 9184 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
10899 need_full_sync = 1; 9185 need_full_sync = 1;
10900 req->dest_cpu = RCU_MIGRATION_IDLE; 9186 req->dest_cpu = RCU_MIGRATION_IDLE;
10901 spin_unlock_irqrestore(&rq->lock, flags); 9187 raw_spin_unlock_irqrestore(&rq->lock, flags);
10902 } 9188 }
10903 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 9189 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
9190 synchronize_sched_expedited_count++;
10904 mutex_unlock(&rcu_sched_expedited_mutex); 9191 mutex_unlock(&rcu_sched_expedited_mutex);
10905 put_online_cpus(); 9192 put_online_cpus();
10906 if (need_full_sync) 9193 if (need_full_sync)