aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c381
1 files changed, 293 insertions, 88 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f52a8801b7a2..16f3f77f71be 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -77,6 +77,7 @@
77#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
78 78
79#include "sched_cpupri.h" 79#include "sched_cpupri.h"
80#include "workqueue_sched.h"
80 81
81#define CREATE_TRACE_POINTS 82#define CREATE_TRACE_POINTS
82#include <trace/events/sched.h> 83#include <trace/events/sched.h>
@@ -456,9 +457,10 @@ struct rq {
456 unsigned long nr_running; 457 unsigned long nr_running;
457 #define CPU_LOAD_IDX_MAX 5 458 #define CPU_LOAD_IDX_MAX 5
458 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 459 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
460 unsigned long last_load_update_tick;
459#ifdef CONFIG_NO_HZ 461#ifdef CONFIG_NO_HZ
460 u64 nohz_stamp; 462 u64 nohz_stamp;
461 unsigned char in_nohz_recently; 463 unsigned char nohz_balance_kick;
462#endif 464#endif
463 unsigned int skip_clock_update; 465 unsigned int skip_clock_update;
464 466
@@ -1193,6 +1195,27 @@ static void resched_cpu(int cpu)
1193 1195
1194#ifdef CONFIG_NO_HZ 1196#ifdef CONFIG_NO_HZ
1195/* 1197/*
1198 * In the semi idle case, use the nearest busy cpu for migrating timers
1199 * from an idle cpu. This is good for power-savings.
1200 *
1201 * We don't do similar optimization for completely idle system, as
1202 * selecting an idle cpu will add more delays to the timers than intended
1203 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1204 */
1205int get_nohz_timer_target(void)
1206{
1207 int cpu = smp_processor_id();
1208 int i;
1209 struct sched_domain *sd;
1210
1211 for_each_domain(cpu, sd) {
1212 for_each_cpu(i, sched_domain_span(sd))
1213 if (!idle_cpu(i))
1214 return i;
1215 }
1216 return cpu;
1217}
1218/*
1196 * When add_timer_on() enqueues a timer into the timer wheel of an 1219 * When add_timer_on() enqueues a timer into the timer wheel of an
1197 * idle CPU then this timer might expire before the next timer event 1220 * idle CPU then this timer might expire before the next timer event
1198 * which is scheduled to wake up that CPU. In case of a completely 1221 * which is scheduled to wake up that CPU. In case of a completely
@@ -1652,7 +1675,7 @@ static void update_shares(struct sched_domain *sd)
1652 if (root_task_group_empty()) 1675 if (root_task_group_empty())
1653 return; 1676 return;
1654 1677
1655 now = cpu_clock(raw_smp_processor_id()); 1678 now = local_clock();
1656 elapsed = now - sd->last_update; 1679 elapsed = now - sd->last_update;
1657 1680
1658 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { 1681 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
@@ -1805,6 +1828,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1805static void calc_load_account_idle(struct rq *this_rq); 1828static void calc_load_account_idle(struct rq *this_rq);
1806static void update_sysctl(void); 1829static void update_sysctl(void);
1807static int get_update_sysctl_factor(void); 1830static int get_update_sysctl_factor(void);
1831static void update_cpu_load(struct rq *this_rq);
1808 1832
1809static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1833static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1810{ 1834{
@@ -2267,11 +2291,55 @@ static void update_avg(u64 *avg, u64 sample)
2267} 2291}
2268#endif 2292#endif
2269 2293
2270/*** 2294static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
2295 bool is_sync, bool is_migrate, bool is_local,
2296 unsigned long en_flags)
2297{
2298 schedstat_inc(p, se.statistics.nr_wakeups);
2299 if (is_sync)
2300 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2301 if (is_migrate)
2302 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2303 if (is_local)
2304 schedstat_inc(p, se.statistics.nr_wakeups_local);
2305 else
2306 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2307
2308 activate_task(rq, p, en_flags);
2309}
2310
2311static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2312 int wake_flags, bool success)
2313{
2314 trace_sched_wakeup(p, success);
2315 check_preempt_curr(rq, p, wake_flags);
2316
2317 p->state = TASK_RUNNING;
2318#ifdef CONFIG_SMP
2319 if (p->sched_class->task_woken)
2320 p->sched_class->task_woken(rq, p);
2321
2322 if (unlikely(rq->idle_stamp)) {
2323 u64 delta = rq->clock - rq->idle_stamp;
2324 u64 max = 2*sysctl_sched_migration_cost;
2325
2326 if (delta > max)
2327 rq->avg_idle = max;
2328 else
2329 update_avg(&rq->avg_idle, delta);
2330 rq->idle_stamp = 0;
2331 }
2332#endif
2333 /* if a worker is waking up, notify workqueue */
2334 if ((p->flags & PF_WQ_WORKER) && success)
2335 wq_worker_waking_up(p, cpu_of(rq));
2336}
2337
2338/**
2271 * try_to_wake_up - wake up a thread 2339 * try_to_wake_up - wake up a thread
2272 * @p: the to-be-woken-up thread 2340 * @p: the thread to be awakened
2273 * @state: the mask of task states that can be woken 2341 * @state: the mask of task states that can be woken
2274 * @sync: do a synchronous wakeup? 2342 * @wake_flags: wake modifier flags (WF_*)
2275 * 2343 *
2276 * Put it on the run-queue if it's not already there. The "current" 2344 * Put it on the run-queue if it's not already there. The "current"
2277 * thread is always on the run-queue (except when the actual 2345 * thread is always on the run-queue (except when the actual
@@ -2279,7 +2347,8 @@ static void update_avg(u64 *avg, u64 sample)
2279 * the simpler "current->state = TASK_RUNNING" to mark yourself 2347 * the simpler "current->state = TASK_RUNNING" to mark yourself
2280 * runnable without the overhead of this. 2348 * runnable without the overhead of this.
2281 * 2349 *
2282 * returns failure only if the task is already active. 2350 * Returns %true if @p was woken up, %false if it was already running
2351 * or @state didn't match @p's state.
2283 */ 2352 */
2284static int try_to_wake_up(struct task_struct *p, unsigned int state, 2353static int try_to_wake_up(struct task_struct *p, unsigned int state,
2285 int wake_flags) 2354 int wake_flags)
@@ -2359,38 +2428,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2359 2428
2360out_activate: 2429out_activate:
2361#endif /* CONFIG_SMP */ 2430#endif /* CONFIG_SMP */
2362 schedstat_inc(p, se.statistics.nr_wakeups); 2431 ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
2363 if (wake_flags & WF_SYNC) 2432 cpu == this_cpu, en_flags);
2364 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2365 if (orig_cpu != cpu)
2366 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2367 if (cpu == this_cpu)
2368 schedstat_inc(p, se.statistics.nr_wakeups_local);
2369 else
2370 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2371 activate_task(rq, p, en_flags);
2372 success = 1; 2433 success = 1;
2373
2374out_running: 2434out_running:
2375 trace_sched_wakeup(p, success); 2435 ttwu_post_activation(p, rq, wake_flags, success);
2376 check_preempt_curr(rq, p, wake_flags);
2377
2378 p->state = TASK_RUNNING;
2379#ifdef CONFIG_SMP
2380 if (p->sched_class->task_woken)
2381 p->sched_class->task_woken(rq, p);
2382
2383 if (unlikely(rq->idle_stamp)) {
2384 u64 delta = rq->clock - rq->idle_stamp;
2385 u64 max = 2*sysctl_sched_migration_cost;
2386
2387 if (delta > max)
2388 rq->avg_idle = max;
2389 else
2390 update_avg(&rq->avg_idle, delta);
2391 rq->idle_stamp = 0;
2392 }
2393#endif
2394out: 2436out:
2395 task_rq_unlock(rq, &flags); 2437 task_rq_unlock(rq, &flags);
2396 put_cpu(); 2438 put_cpu();
@@ -2399,6 +2441,37 @@ out:
2399} 2441}
2400 2442
2401/** 2443/**
2444 * try_to_wake_up_local - try to wake up a local task with rq lock held
2445 * @p: the thread to be awakened
2446 *
2447 * Put @p on the run-queue if it's not alredy there. The caller must
2448 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2449 * the current task. this_rq() stays locked over invocation.
2450 */
2451static void try_to_wake_up_local(struct task_struct *p)
2452{
2453 struct rq *rq = task_rq(p);
2454 bool success = false;
2455
2456 BUG_ON(rq != this_rq());
2457 BUG_ON(p == current);
2458 lockdep_assert_held(&rq->lock);
2459
2460 if (!(p->state & TASK_NORMAL))
2461 return;
2462
2463 if (!p->se.on_rq) {
2464 if (likely(!task_running(rq, p))) {
2465 schedstat_inc(rq, ttwu_count);
2466 schedstat_inc(rq, ttwu_local);
2467 }
2468 ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
2469 success = true;
2470 }
2471 ttwu_post_activation(p, rq, 0, success);
2472}
2473
2474/**
2402 * wake_up_process - Wake up a specific process 2475 * wake_up_process - Wake up a specific process
2403 * @p: The process to be woken up. 2476 * @p: The process to be woken up.
2404 * 2477 *
@@ -3012,23 +3085,102 @@ static void calc_load_account_active(struct rq *this_rq)
3012} 3085}
3013 3086
3014/* 3087/*
3088 * The exact cpuload at various idx values, calculated at every tick would be
3089 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3090 *
3091 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3092 * on nth tick when cpu may be busy, then we have:
3093 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3094 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3095 *
3096 * decay_load_missed() below does efficient calculation of
3097 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3098 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3099 *
3100 * The calculation is approximated on a 128 point scale.
3101 * degrade_zero_ticks is the number of ticks after which load at any
3102 * particular idx is approximated to be zero.
3103 * degrade_factor is a precomputed table, a row for each load idx.
3104 * Each column corresponds to degradation factor for a power of two ticks,
3105 * based on 128 point scale.
3106 * Example:
3107 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3108 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3109 *
3110 * With this power of 2 load factors, we can degrade the load n times
3111 * by looking at 1 bits in n and doing as many mult/shift instead of
3112 * n mult/shifts needed by the exact degradation.
3113 */
3114#define DEGRADE_SHIFT 7
3115static const unsigned char
3116 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3117static const unsigned char
3118 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3119 {0, 0, 0, 0, 0, 0, 0, 0},
3120 {64, 32, 8, 0, 0, 0, 0, 0},
3121 {96, 72, 40, 12, 1, 0, 0},
3122 {112, 98, 75, 43, 15, 1, 0},
3123 {120, 112, 98, 76, 45, 16, 2} };
3124
3125/*
3126 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3127 * would be when CPU is idle and so we just decay the old load without
3128 * adding any new load.
3129 */
3130static unsigned long
3131decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3132{
3133 int j = 0;
3134
3135 if (!missed_updates)
3136 return load;
3137
3138 if (missed_updates >= degrade_zero_ticks[idx])
3139 return 0;
3140
3141 if (idx == 1)
3142 return load >> missed_updates;
3143
3144 while (missed_updates) {
3145 if (missed_updates % 2)
3146 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3147
3148 missed_updates >>= 1;
3149 j++;
3150 }
3151 return load;
3152}
3153
3154/*
3015 * Update rq->cpu_load[] statistics. This function is usually called every 3155 * Update rq->cpu_load[] statistics. This function is usually called every
3016 * scheduler tick (TICK_NSEC). 3156 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3157 * every tick. We fix it up based on jiffies.
3017 */ 3158 */
3018static void update_cpu_load(struct rq *this_rq) 3159static void update_cpu_load(struct rq *this_rq)
3019{ 3160{
3020 unsigned long this_load = this_rq->load.weight; 3161 unsigned long this_load = this_rq->load.weight;
3162 unsigned long curr_jiffies = jiffies;
3163 unsigned long pending_updates;
3021 int i, scale; 3164 int i, scale;
3022 3165
3023 this_rq->nr_load_updates++; 3166 this_rq->nr_load_updates++;
3024 3167
3168 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3169 if (curr_jiffies == this_rq->last_load_update_tick)
3170 return;
3171
3172 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3173 this_rq->last_load_update_tick = curr_jiffies;
3174
3025 /* Update our load: */ 3175 /* Update our load: */
3026 for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { 3176 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3177 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
3027 unsigned long old_load, new_load; 3178 unsigned long old_load, new_load;
3028 3179
3029 /* scale is effectively 1 << i now, and >> i divides by scale */ 3180 /* scale is effectively 1 << i now, and >> i divides by scale */
3030 3181
3031 old_load = this_rq->cpu_load[i]; 3182 old_load = this_rq->cpu_load[i];
3183 old_load = decay_load_missed(old_load, pending_updates - 1, i);
3032 new_load = this_load; 3184 new_load = this_load;
3033 /* 3185 /*
3034 * Round up the averaging division if load is increasing. This 3186 * Round up the averaging division if load is increasing. This
@@ -3036,9 +3188,15 @@ static void update_cpu_load(struct rq *this_rq)
3036 * example. 3188 * example.
3037 */ 3189 */
3038 if (new_load > old_load) 3190 if (new_load > old_load)
3039 new_load += scale-1; 3191 new_load += scale - 1;
3040 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; 3192
3193 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
3041 } 3194 }
3195}
3196
3197static void update_cpu_load_active(struct rq *this_rq)
3198{
3199 update_cpu_load(this_rq);
3042 3200
3043 calc_load_account_active(this_rq); 3201 calc_load_account_active(this_rq);
3044} 3202}
@@ -3426,7 +3584,7 @@ void scheduler_tick(void)
3426 3584
3427 raw_spin_lock(&rq->lock); 3585 raw_spin_lock(&rq->lock);
3428 update_rq_clock(rq); 3586 update_rq_clock(rq);
3429 update_cpu_load(rq); 3587 update_cpu_load_active(rq);
3430 curr->sched_class->task_tick(rq, curr, 0); 3588 curr->sched_class->task_tick(rq, curr, 0);
3431 raw_spin_unlock(&rq->lock); 3589 raw_spin_unlock(&rq->lock);
3432 3590
@@ -3598,7 +3756,6 @@ need_resched:
3598 rq = cpu_rq(cpu); 3756 rq = cpu_rq(cpu);
3599 rcu_note_context_switch(cpu); 3757 rcu_note_context_switch(cpu);
3600 prev = rq->curr; 3758 prev = rq->curr;
3601 switch_count = &prev->nivcsw;
3602 3759
3603 release_kernel_lock(prev); 3760 release_kernel_lock(prev);
3604need_resched_nonpreemptible: 3761need_resched_nonpreemptible:
@@ -3611,11 +3768,26 @@ need_resched_nonpreemptible:
3611 raw_spin_lock_irq(&rq->lock); 3768 raw_spin_lock_irq(&rq->lock);
3612 clear_tsk_need_resched(prev); 3769 clear_tsk_need_resched(prev);
3613 3770
3771 switch_count = &prev->nivcsw;
3614 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3772 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3615 if (unlikely(signal_pending_state(prev->state, prev))) 3773 if (unlikely(signal_pending_state(prev->state, prev))) {
3616 prev->state = TASK_RUNNING; 3774 prev->state = TASK_RUNNING;
3617 else 3775 } else {
3776 /*
3777 * If a worker is going to sleep, notify and
3778 * ask workqueue whether it wants to wake up a
3779 * task to maintain concurrency. If so, wake
3780 * up the task.
3781 */
3782 if (prev->flags & PF_WQ_WORKER) {
3783 struct task_struct *to_wakeup;
3784
3785 to_wakeup = wq_worker_sleeping(prev, cpu);
3786 if (to_wakeup)
3787 try_to_wake_up_local(to_wakeup);
3788 }
3618 deactivate_task(rq, prev, DEQUEUE_SLEEP); 3789 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3790 }
3619 switch_count = &prev->nvcsw; 3791 switch_count = &prev->nvcsw;
3620 } 3792 }
3621 3793
@@ -3637,8 +3809,10 @@ need_resched_nonpreemptible:
3637 3809
3638 context_switch(rq, prev, next); /* unlocks the rq */ 3810 context_switch(rq, prev, next); /* unlocks the rq */
3639 /* 3811 /*
3640 * the context switch might have flipped the stack from under 3812 * The context switch have flipped the stack from under us
3641 * us, hence refresh the local variables. 3813 * and restored the local variables which were saved when
3814 * this task called schedule() in the past. prev == current
3815 * is still correct, but it can be moved to another cpu/rq.
3642 */ 3816 */
3643 cpu = smp_processor_id(); 3817 cpu = smp_processor_id();
3644 rq = cpu_rq(cpu); 3818 rq = cpu_rq(cpu);
@@ -3647,11 +3821,8 @@ need_resched_nonpreemptible:
3647 3821
3648 post_schedule(rq); 3822 post_schedule(rq);
3649 3823
3650 if (unlikely(reacquire_kernel_lock(current) < 0)) { 3824 if (unlikely(reacquire_kernel_lock(prev)))
3651 prev = rq->curr;
3652 switch_count = &prev->nivcsw;
3653 goto need_resched_nonpreemptible; 3825 goto need_resched_nonpreemptible;
3654 }
3655 3826
3656 preempt_enable_no_resched(); 3827 preempt_enable_no_resched();
3657 if (need_resched()) 3828 if (need_resched())
@@ -4441,12 +4612,8 @@ recheck:
4441 */ 4612 */
4442 if (user && !capable(CAP_SYS_NICE)) { 4613 if (user && !capable(CAP_SYS_NICE)) {
4443 if (rt_policy(policy)) { 4614 if (rt_policy(policy)) {
4444 unsigned long rlim_rtprio; 4615 unsigned long rlim_rtprio =
4445 4616 task_rlimit(p, RLIMIT_RTPRIO);
4446 if (!lock_task_sighand(p, &flags))
4447 return -ESRCH;
4448 rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
4449 unlock_task_sighand(p, &flags);
4450 4617
4451 /* can't set/change the rt policy */ 4618 /* can't set/change the rt policy */
4452 if (policy != p->policy && !rlim_rtprio) 4619 if (policy != p->policy && !rlim_rtprio)
@@ -5816,20 +5983,49 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5816 */ 5983 */
5817static struct notifier_block __cpuinitdata migration_notifier = { 5984static struct notifier_block __cpuinitdata migration_notifier = {
5818 .notifier_call = migration_call, 5985 .notifier_call = migration_call,
5819 .priority = 10 5986 .priority = CPU_PRI_MIGRATION,
5820}; 5987};
5821 5988
5989static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5990 unsigned long action, void *hcpu)
5991{
5992 switch (action & ~CPU_TASKS_FROZEN) {
5993 case CPU_ONLINE:
5994 case CPU_DOWN_FAILED:
5995 set_cpu_active((long)hcpu, true);
5996 return NOTIFY_OK;
5997 default:
5998 return NOTIFY_DONE;
5999 }
6000}
6001
6002static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6003 unsigned long action, void *hcpu)
6004{
6005 switch (action & ~CPU_TASKS_FROZEN) {
6006 case CPU_DOWN_PREPARE:
6007 set_cpu_active((long)hcpu, false);
6008 return NOTIFY_OK;
6009 default:
6010 return NOTIFY_DONE;
6011 }
6012}
6013
5822static int __init migration_init(void) 6014static int __init migration_init(void)
5823{ 6015{
5824 void *cpu = (void *)(long)smp_processor_id(); 6016 void *cpu = (void *)(long)smp_processor_id();
5825 int err; 6017 int err;
5826 6018
5827 /* Start one for the boot CPU: */ 6019 /* Initialize migration for the boot CPU */
5828 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); 6020 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5829 BUG_ON(err == NOTIFY_BAD); 6021 BUG_ON(err == NOTIFY_BAD);
5830 migration_call(&migration_notifier, CPU_ONLINE, cpu); 6022 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5831 register_cpu_notifier(&migration_notifier); 6023 register_cpu_notifier(&migration_notifier);
5832 6024
6025 /* Register cpu active notifiers */
6026 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6027 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6028
5833 return 0; 6029 return 0;
5834} 6030}
5835early_initcall(migration_init); 6031early_initcall(migration_init);
@@ -6064,23 +6260,18 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6064 free_rootdomain(old_rd); 6260 free_rootdomain(old_rd);
6065} 6261}
6066 6262
6067static int init_rootdomain(struct root_domain *rd, bool bootmem) 6263static int init_rootdomain(struct root_domain *rd)
6068{ 6264{
6069 gfp_t gfp = GFP_KERNEL;
6070
6071 memset(rd, 0, sizeof(*rd)); 6265 memset(rd, 0, sizeof(*rd));
6072 6266
6073 if (bootmem) 6267 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
6074 gfp = GFP_NOWAIT;
6075
6076 if (!alloc_cpumask_var(&rd->span, gfp))
6077 goto out; 6268 goto out;
6078 if (!alloc_cpumask_var(&rd->online, gfp)) 6269 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
6079 goto free_span; 6270 goto free_span;
6080 if (!alloc_cpumask_var(&rd->rto_mask, gfp)) 6271 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
6081 goto free_online; 6272 goto free_online;
6082 6273
6083 if (cpupri_init(&rd->cpupri, bootmem) != 0) 6274 if (cpupri_init(&rd->cpupri) != 0)
6084 goto free_rto_mask; 6275 goto free_rto_mask;
6085 return 0; 6276 return 0;
6086 6277
@@ -6096,7 +6287,7 @@ out:
6096 6287
6097static void init_defrootdomain(void) 6288static void init_defrootdomain(void)
6098{ 6289{
6099 init_rootdomain(&def_root_domain, true); 6290 init_rootdomain(&def_root_domain);
6100 6291
6101 atomic_set(&def_root_domain.refcount, 1); 6292 atomic_set(&def_root_domain.refcount, 1);
6102} 6293}
@@ -6109,7 +6300,7 @@ static struct root_domain *alloc_rootdomain(void)
6109 if (!rd) 6300 if (!rd)
6110 return NULL; 6301 return NULL;
6111 6302
6112 if (init_rootdomain(rd, false) != 0) { 6303 if (init_rootdomain(rd) != 0) {
6113 kfree(rd); 6304 kfree(rd);
6114 return NULL; 6305 return NULL;
6115 } 6306 }
@@ -7288,29 +7479,35 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7288} 7479}
7289#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ 7480#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
7290 7481
7291#ifndef CONFIG_CPUSETS
7292/* 7482/*
7293 * Add online and remove offline CPUs from the scheduler domains. 7483 * Update cpusets according to cpu_active mask. If cpusets are
7294 * When cpusets are enabled they take over this function. 7484 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7485 * around partition_sched_domains().
7295 */ 7486 */
7296static int update_sched_domains(struct notifier_block *nfb, 7487static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7297 unsigned long action, void *hcpu) 7488 void *hcpu)
7298{ 7489{
7299 switch (action) { 7490 switch (action & ~CPU_TASKS_FROZEN) {
7300 case CPU_ONLINE: 7491 case CPU_ONLINE:
7301 case CPU_ONLINE_FROZEN:
7302 case CPU_DOWN_PREPARE:
7303 case CPU_DOWN_PREPARE_FROZEN:
7304 case CPU_DOWN_FAILED: 7492 case CPU_DOWN_FAILED:
7305 case CPU_DOWN_FAILED_FROZEN: 7493 cpuset_update_active_cpus();
7306 partition_sched_domains(1, NULL, NULL);
7307 return NOTIFY_OK; 7494 return NOTIFY_OK;
7495 default:
7496 return NOTIFY_DONE;
7497 }
7498}
7308 7499
7500static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7501 void *hcpu)
7502{
7503 switch (action & ~CPU_TASKS_FROZEN) {
7504 case CPU_DOWN_PREPARE:
7505 cpuset_update_active_cpus();
7506 return NOTIFY_OK;
7309 default: 7507 default:
7310 return NOTIFY_DONE; 7508 return NOTIFY_DONE;
7311 } 7509 }
7312} 7510}
7313#endif
7314 7511
7315static int update_runtime(struct notifier_block *nfb, 7512static int update_runtime(struct notifier_block *nfb,
7316 unsigned long action, void *hcpu) 7513 unsigned long action, void *hcpu)
@@ -7356,10 +7553,8 @@ void __init sched_init_smp(void)
7356 mutex_unlock(&sched_domains_mutex); 7553 mutex_unlock(&sched_domains_mutex);
7357 put_online_cpus(); 7554 put_online_cpus();
7358 7555
7359#ifndef CONFIG_CPUSETS 7556 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7360 /* XXX: Theoretical race here - CPU may be hotplugged now */ 7557 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7361 hotcpu_notifier(update_sched_domains, 0);
7362#endif
7363 7558
7364 /* RT runtime code needs to handle some hotplug events */ 7559 /* RT runtime code needs to handle some hotplug events */
7365 hotcpu_notifier(update_runtime, 0); 7560 hotcpu_notifier(update_runtime, 0);
@@ -7604,6 +7799,9 @@ void __init sched_init(void)
7604 7799
7605 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7800 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7606 rq->cpu_load[j] = 0; 7801 rq->cpu_load[j] = 0;
7802
7803 rq->last_load_update_tick = jiffies;
7804
7607#ifdef CONFIG_SMP 7805#ifdef CONFIG_SMP
7608 rq->sd = NULL; 7806 rq->sd = NULL;
7609 rq->rd = NULL; 7807 rq->rd = NULL;
@@ -7617,6 +7815,10 @@ void __init sched_init(void)
7617 rq->idle_stamp = 0; 7815 rq->idle_stamp = 0;
7618 rq->avg_idle = 2*sysctl_sched_migration_cost; 7816 rq->avg_idle = 2*sysctl_sched_migration_cost;
7619 rq_attach_root(rq, &def_root_domain); 7817 rq_attach_root(rq, &def_root_domain);
7818#ifdef CONFIG_NO_HZ
7819 rq->nohz_balance_kick = 0;
7820 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
7821#endif
7620#endif 7822#endif
7621 init_rq_hrtick(rq); 7823 init_rq_hrtick(rq);
7622 atomic_set(&rq->nr_iowait, 0); 7824 atomic_set(&rq->nr_iowait, 0);
@@ -7661,8 +7863,11 @@ void __init sched_init(void)
7661 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 7863 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
7662#ifdef CONFIG_SMP 7864#ifdef CONFIG_SMP
7663#ifdef CONFIG_NO_HZ 7865#ifdef CONFIG_NO_HZ
7664 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 7866 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
7665 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 7867 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
7868 atomic_set(&nohz.load_balancer, nr_cpu_ids);
7869 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
7870 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
7666#endif 7871#endif
7667 /* May be allocated at isolcpus cmdline parse time */ 7872 /* May be allocated at isolcpus cmdline parse time */
7668 if (cpu_isolated_map == NULL) 7873 if (cpu_isolated_map == NULL)