diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 534 |
1 files changed, 372 insertions, 162 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f8b8996228dd..41541d79e3c8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
78 | 78 | ||
79 | #include "sched_cpupri.h" | 79 | #include "sched_cpupri.h" |
80 | #include "workqueue_sched.h" | ||
80 | 81 | ||
81 | #define CREATE_TRACE_POINTS | 82 | #define CREATE_TRACE_POINTS |
82 | #include <trace/events/sched.h> | 83 | #include <trace/events/sched.h> |
@@ -306,52 +307,6 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD; | |||
306 | */ | 307 | */ |
307 | struct task_group init_task_group; | 308 | struct task_group init_task_group; |
308 | 309 | ||
309 | /* return group to which a task belongs */ | ||
310 | static inline struct task_group *task_group(struct task_struct *p) | ||
311 | { | ||
312 | struct task_group *tg; | ||
313 | |||
314 | #ifdef CONFIG_CGROUP_SCHED | ||
315 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), | ||
316 | struct task_group, css); | ||
317 | #else | ||
318 | tg = &init_task_group; | ||
319 | #endif | ||
320 | return tg; | ||
321 | } | ||
322 | |||
323 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | ||
324 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | ||
325 | { | ||
326 | /* | ||
327 | * Strictly speaking this rcu_read_lock() is not needed since the | ||
328 | * task_group is tied to the cgroup, which in turn can never go away | ||
329 | * as long as there are tasks attached to it. | ||
330 | * | ||
331 | * However since task_group() uses task_subsys_state() which is an | ||
332 | * rcu_dereference() user, this quiets CONFIG_PROVE_RCU. | ||
333 | */ | ||
334 | rcu_read_lock(); | ||
335 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
336 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; | ||
337 | p->se.parent = task_group(p)->se[cpu]; | ||
338 | #endif | ||
339 | |||
340 | #ifdef CONFIG_RT_GROUP_SCHED | ||
341 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; | ||
342 | p->rt.parent = task_group(p)->rt_se[cpu]; | ||
343 | #endif | ||
344 | rcu_read_unlock(); | ||
345 | } | ||
346 | |||
347 | #else | ||
348 | |||
349 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } | ||
350 | static inline struct task_group *task_group(struct task_struct *p) | ||
351 | { | ||
352 | return NULL; | ||
353 | } | ||
354 | |||
355 | #endif /* CONFIG_CGROUP_SCHED */ | 310 | #endif /* CONFIG_CGROUP_SCHED */ |
356 | 311 | ||
357 | /* CFS-related fields in a runqueue */ | 312 | /* CFS-related fields in a runqueue */ |
@@ -502,9 +457,10 @@ struct rq { | |||
502 | unsigned long nr_running; | 457 | unsigned long nr_running; |
503 | #define CPU_LOAD_IDX_MAX 5 | 458 | #define CPU_LOAD_IDX_MAX 5 |
504 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; | 459 | unsigned long cpu_load[CPU_LOAD_IDX_MAX]; |
460 | unsigned long last_load_update_tick; | ||
505 | #ifdef CONFIG_NO_HZ | 461 | #ifdef CONFIG_NO_HZ |
506 | u64 nohz_stamp; | 462 | u64 nohz_stamp; |
507 | unsigned char in_nohz_recently; | 463 | unsigned char nohz_balance_kick; |
508 | #endif | 464 | #endif |
509 | unsigned int skip_clock_update; | 465 | unsigned int skip_clock_update; |
510 | 466 | ||
@@ -644,6 +600,49 @@ static inline int cpu_of(struct rq *rq) | |||
644 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 600 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
645 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) | 601 | #define raw_rq() (&__raw_get_cpu_var(runqueues)) |
646 | 602 | ||
603 | #ifdef CONFIG_CGROUP_SCHED | ||
604 | |||
605 | /* | ||
606 | * Return the group to which this tasks belongs. | ||
607 | * | ||
608 | * We use task_subsys_state_check() and extend the RCU verification | ||
609 | * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach() | ||
610 | * holds that lock for each task it moves into the cgroup. Therefore | ||
611 | * by holding that lock, we pin the task to the current cgroup. | ||
612 | */ | ||
613 | static inline struct task_group *task_group(struct task_struct *p) | ||
614 | { | ||
615 | struct cgroup_subsys_state *css; | ||
616 | |||
617 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, | ||
618 | lockdep_is_held(&task_rq(p)->lock)); | ||
619 | return container_of(css, struct task_group, css); | ||
620 | } | ||
621 | |||
622 | /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ | ||
623 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) | ||
624 | { | ||
625 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
626 | p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; | ||
627 | p->se.parent = task_group(p)->se[cpu]; | ||
628 | #endif | ||
629 | |||
630 | #ifdef CONFIG_RT_GROUP_SCHED | ||
631 | p->rt.rt_rq = task_group(p)->rt_rq[cpu]; | ||
632 | p->rt.parent = task_group(p)->rt_se[cpu]; | ||
633 | #endif | ||
634 | } | ||
635 | |||
636 | #else /* CONFIG_CGROUP_SCHED */ | ||
637 | |||
638 | static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } | ||
639 | static inline struct task_group *task_group(struct task_struct *p) | ||
640 | { | ||
641 | return NULL; | ||
642 | } | ||
643 | |||
644 | #endif /* CONFIG_CGROUP_SCHED */ | ||
645 | |||
647 | inline void update_rq_clock(struct rq *rq) | 646 | inline void update_rq_clock(struct rq *rq) |
648 | { | 647 | { |
649 | if (!rq->skip_clock_update) | 648 | if (!rq->skip_clock_update) |
@@ -1196,6 +1195,27 @@ static void resched_cpu(int cpu) | |||
1196 | 1195 | ||
1197 | #ifdef CONFIG_NO_HZ | 1196 | #ifdef CONFIG_NO_HZ |
1198 | /* | 1197 | /* |
1198 | * In the semi idle case, use the nearest busy cpu for migrating timers | ||
1199 | * from an idle cpu. This is good for power-savings. | ||
1200 | * | ||
1201 | * We don't do similar optimization for completely idle system, as | ||
1202 | * selecting an idle cpu will add more delays to the timers than intended | ||
1203 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). | ||
1204 | */ | ||
1205 | int get_nohz_timer_target(void) | ||
1206 | { | ||
1207 | int cpu = smp_processor_id(); | ||
1208 | int i; | ||
1209 | struct sched_domain *sd; | ||
1210 | |||
1211 | for_each_domain(cpu, sd) { | ||
1212 | for_each_cpu(i, sched_domain_span(sd)) | ||
1213 | if (!idle_cpu(i)) | ||
1214 | return i; | ||
1215 | } | ||
1216 | return cpu; | ||
1217 | } | ||
1218 | /* | ||
1199 | * When add_timer_on() enqueues a timer into the timer wheel of an | 1219 | * When add_timer_on() enqueues a timer into the timer wheel of an |
1200 | * idle CPU then this timer might expire before the next timer event | 1220 | * idle CPU then this timer might expire before the next timer event |
1201 | * which is scheduled to wake up that CPU. In case of a completely | 1221 | * which is scheduled to wake up that CPU. In case of a completely |
@@ -1235,16 +1255,6 @@ void wake_up_idle_cpu(int cpu) | |||
1235 | smp_send_reschedule(cpu); | 1255 | smp_send_reschedule(cpu); |
1236 | } | 1256 | } |
1237 | 1257 | ||
1238 | int nohz_ratelimit(int cpu) | ||
1239 | { | ||
1240 | struct rq *rq = cpu_rq(cpu); | ||
1241 | u64 diff = rq->clock - rq->nohz_stamp; | ||
1242 | |||
1243 | rq->nohz_stamp = rq->clock; | ||
1244 | |||
1245 | return diff < (NSEC_PER_SEC / HZ) >> 1; | ||
1246 | } | ||
1247 | |||
1248 | #endif /* CONFIG_NO_HZ */ | 1258 | #endif /* CONFIG_NO_HZ */ |
1249 | 1259 | ||
1250 | static u64 sched_avg_period(void) | 1260 | static u64 sched_avg_period(void) |
@@ -1257,6 +1267,12 @@ static void sched_avg_update(struct rq *rq) | |||
1257 | s64 period = sched_avg_period(); | 1267 | s64 period = sched_avg_period(); |
1258 | 1268 | ||
1259 | while ((s64)(rq->clock - rq->age_stamp) > period) { | 1269 | while ((s64)(rq->clock - rq->age_stamp) > period) { |
1270 | /* | ||
1271 | * Inline assembly required to prevent the compiler | ||
1272 | * optimising this loop into a divmod call. | ||
1273 | * See __iter_div_u64_rem() for another example of this. | ||
1274 | */ | ||
1275 | asm("" : "+rm" (rq->age_stamp)); | ||
1260 | rq->age_stamp += period; | 1276 | rq->age_stamp += period; |
1261 | rq->rt_avg /= 2; | 1277 | rq->rt_avg /= 2; |
1262 | } | 1278 | } |
@@ -1649,7 +1665,7 @@ static void update_shares(struct sched_domain *sd) | |||
1649 | if (root_task_group_empty()) | 1665 | if (root_task_group_empty()) |
1650 | return; | 1666 | return; |
1651 | 1667 | ||
1652 | now = cpu_clock(raw_smp_processor_id()); | 1668 | now = local_clock(); |
1653 | elapsed = now - sd->last_update; | 1669 | elapsed = now - sd->last_update; |
1654 | 1670 | ||
1655 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { | 1671 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { |
@@ -1660,9 +1676,6 @@ static void update_shares(struct sched_domain *sd) | |||
1660 | 1676 | ||
1661 | static void update_h_load(long cpu) | 1677 | static void update_h_load(long cpu) |
1662 | { | 1678 | { |
1663 | if (root_task_group_empty()) | ||
1664 | return; | ||
1665 | |||
1666 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | 1679 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
1667 | } | 1680 | } |
1668 | 1681 | ||
@@ -1805,6 +1818,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | |||
1805 | static void calc_load_account_idle(struct rq *this_rq); | 1818 | static void calc_load_account_idle(struct rq *this_rq); |
1806 | static void update_sysctl(void); | 1819 | static void update_sysctl(void); |
1807 | static int get_update_sysctl_factor(void); | 1820 | static int get_update_sysctl_factor(void); |
1821 | static void update_cpu_load(struct rq *this_rq); | ||
1808 | 1822 | ||
1809 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | 1823 | static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
1810 | { | 1824 | { |
@@ -2267,11 +2281,55 @@ static void update_avg(u64 *avg, u64 sample) | |||
2267 | } | 2281 | } |
2268 | #endif | 2282 | #endif |
2269 | 2283 | ||
2270 | /*** | 2284 | static inline void ttwu_activate(struct task_struct *p, struct rq *rq, |
2285 | bool is_sync, bool is_migrate, bool is_local, | ||
2286 | unsigned long en_flags) | ||
2287 | { | ||
2288 | schedstat_inc(p, se.statistics.nr_wakeups); | ||
2289 | if (is_sync) | ||
2290 | schedstat_inc(p, se.statistics.nr_wakeups_sync); | ||
2291 | if (is_migrate) | ||
2292 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); | ||
2293 | if (is_local) | ||
2294 | schedstat_inc(p, se.statistics.nr_wakeups_local); | ||
2295 | else | ||
2296 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | ||
2297 | |||
2298 | activate_task(rq, p, en_flags); | ||
2299 | } | ||
2300 | |||
2301 | static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, | ||
2302 | int wake_flags, bool success) | ||
2303 | { | ||
2304 | trace_sched_wakeup(p, success); | ||
2305 | check_preempt_curr(rq, p, wake_flags); | ||
2306 | |||
2307 | p->state = TASK_RUNNING; | ||
2308 | #ifdef CONFIG_SMP | ||
2309 | if (p->sched_class->task_woken) | ||
2310 | p->sched_class->task_woken(rq, p); | ||
2311 | |||
2312 | if (unlikely(rq->idle_stamp)) { | ||
2313 | u64 delta = rq->clock - rq->idle_stamp; | ||
2314 | u64 max = 2*sysctl_sched_migration_cost; | ||
2315 | |||
2316 | if (delta > max) | ||
2317 | rq->avg_idle = max; | ||
2318 | else | ||
2319 | update_avg(&rq->avg_idle, delta); | ||
2320 | rq->idle_stamp = 0; | ||
2321 | } | ||
2322 | #endif | ||
2323 | /* if a worker is waking up, notify workqueue */ | ||
2324 | if ((p->flags & PF_WQ_WORKER) && success) | ||
2325 | wq_worker_waking_up(p, cpu_of(rq)); | ||
2326 | } | ||
2327 | |||
2328 | /** | ||
2271 | * try_to_wake_up - wake up a thread | 2329 | * try_to_wake_up - wake up a thread |
2272 | * @p: the to-be-woken-up thread | 2330 | * @p: the thread to be awakened |
2273 | * @state: the mask of task states that can be woken | 2331 | * @state: the mask of task states that can be woken |
2274 | * @sync: do a synchronous wakeup? | 2332 | * @wake_flags: wake modifier flags (WF_*) |
2275 | * | 2333 | * |
2276 | * Put it on the run-queue if it's not already there. The "current" | 2334 | * Put it on the run-queue if it's not already there. The "current" |
2277 | * thread is always on the run-queue (except when the actual | 2335 | * thread is always on the run-queue (except when the actual |
@@ -2279,7 +2337,8 @@ static void update_avg(u64 *avg, u64 sample) | |||
2279 | * the simpler "current->state = TASK_RUNNING" to mark yourself | 2337 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
2280 | * runnable without the overhead of this. | 2338 | * runnable without the overhead of this. |
2281 | * | 2339 | * |
2282 | * returns failure only if the task is already active. | 2340 | * Returns %true if @p was woken up, %false if it was already running |
2341 | * or @state didn't match @p's state. | ||
2283 | */ | 2342 | */ |
2284 | static int try_to_wake_up(struct task_struct *p, unsigned int state, | 2343 | static int try_to_wake_up(struct task_struct *p, unsigned int state, |
2285 | int wake_flags) | 2344 | int wake_flags) |
@@ -2359,38 +2418,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2359 | 2418 | ||
2360 | out_activate: | 2419 | out_activate: |
2361 | #endif /* CONFIG_SMP */ | 2420 | #endif /* CONFIG_SMP */ |
2362 | schedstat_inc(p, se.statistics.nr_wakeups); | 2421 | ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, |
2363 | if (wake_flags & WF_SYNC) | 2422 | cpu == this_cpu, en_flags); |
2364 | schedstat_inc(p, se.statistics.nr_wakeups_sync); | ||
2365 | if (orig_cpu != cpu) | ||
2366 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); | ||
2367 | if (cpu == this_cpu) | ||
2368 | schedstat_inc(p, se.statistics.nr_wakeups_local); | ||
2369 | else | ||
2370 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | ||
2371 | activate_task(rq, p, en_flags); | ||
2372 | success = 1; | 2423 | success = 1; |
2373 | |||
2374 | out_running: | 2424 | out_running: |
2375 | trace_sched_wakeup(p, success); | 2425 | ttwu_post_activation(p, rq, wake_flags, success); |
2376 | check_preempt_curr(rq, p, wake_flags); | ||
2377 | |||
2378 | p->state = TASK_RUNNING; | ||
2379 | #ifdef CONFIG_SMP | ||
2380 | if (p->sched_class->task_woken) | ||
2381 | p->sched_class->task_woken(rq, p); | ||
2382 | |||
2383 | if (unlikely(rq->idle_stamp)) { | ||
2384 | u64 delta = rq->clock - rq->idle_stamp; | ||
2385 | u64 max = 2*sysctl_sched_migration_cost; | ||
2386 | |||
2387 | if (delta > max) | ||
2388 | rq->avg_idle = max; | ||
2389 | else | ||
2390 | update_avg(&rq->avg_idle, delta); | ||
2391 | rq->idle_stamp = 0; | ||
2392 | } | ||
2393 | #endif | ||
2394 | out: | 2426 | out: |
2395 | task_rq_unlock(rq, &flags); | 2427 | task_rq_unlock(rq, &flags); |
2396 | put_cpu(); | 2428 | put_cpu(); |
@@ -2399,6 +2431,37 @@ out: | |||
2399 | } | 2431 | } |
2400 | 2432 | ||
2401 | /** | 2433 | /** |
2434 | * try_to_wake_up_local - try to wake up a local task with rq lock held | ||
2435 | * @p: the thread to be awakened | ||
2436 | * | ||
2437 | * Put @p on the run-queue if it's not alredy there. The caller must | ||
2438 | * ensure that this_rq() is locked, @p is bound to this_rq() and not | ||
2439 | * the current task. this_rq() stays locked over invocation. | ||
2440 | */ | ||
2441 | static void try_to_wake_up_local(struct task_struct *p) | ||
2442 | { | ||
2443 | struct rq *rq = task_rq(p); | ||
2444 | bool success = false; | ||
2445 | |||
2446 | BUG_ON(rq != this_rq()); | ||
2447 | BUG_ON(p == current); | ||
2448 | lockdep_assert_held(&rq->lock); | ||
2449 | |||
2450 | if (!(p->state & TASK_NORMAL)) | ||
2451 | return; | ||
2452 | |||
2453 | if (!p->se.on_rq) { | ||
2454 | if (likely(!task_running(rq, p))) { | ||
2455 | schedstat_inc(rq, ttwu_count); | ||
2456 | schedstat_inc(rq, ttwu_local); | ||
2457 | } | ||
2458 | ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); | ||
2459 | success = true; | ||
2460 | } | ||
2461 | ttwu_post_activation(p, rq, 0, success); | ||
2462 | } | ||
2463 | |||
2464 | /** | ||
2402 | * wake_up_process - Wake up a specific process | 2465 | * wake_up_process - Wake up a specific process |
2403 | * @p: The process to be woken up. | 2466 | * @p: The process to be woken up. |
2404 | * | 2467 | * |
@@ -2494,7 +2557,16 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2494 | if (p->sched_class->task_fork) | 2557 | if (p->sched_class->task_fork) |
2495 | p->sched_class->task_fork(p); | 2558 | p->sched_class->task_fork(p); |
2496 | 2559 | ||
2560 | /* | ||
2561 | * The child is not yet in the pid-hash so no cgroup attach races, | ||
2562 | * and the cgroup is pinned to this child due to cgroup_fork() | ||
2563 | * is ran before sched_fork(). | ||
2564 | * | ||
2565 | * Silence PROVE_RCU. | ||
2566 | */ | ||
2567 | rcu_read_lock(); | ||
2497 | set_task_cpu(p, cpu); | 2568 | set_task_cpu(p, cpu); |
2569 | rcu_read_unlock(); | ||
2498 | 2570 | ||
2499 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2571 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
2500 | if (likely(sched_info_on())) | 2572 | if (likely(sched_info_on())) |
@@ -2864,9 +2936,9 @@ unsigned long nr_iowait(void) | |||
2864 | return sum; | 2936 | return sum; |
2865 | } | 2937 | } |
2866 | 2938 | ||
2867 | unsigned long nr_iowait_cpu(void) | 2939 | unsigned long nr_iowait_cpu(int cpu) |
2868 | { | 2940 | { |
2869 | struct rq *this = this_rq(); | 2941 | struct rq *this = cpu_rq(cpu); |
2870 | return atomic_read(&this->nr_iowait); | 2942 | return atomic_read(&this->nr_iowait); |
2871 | } | 2943 | } |
2872 | 2944 | ||
@@ -3003,23 +3075,102 @@ static void calc_load_account_active(struct rq *this_rq) | |||
3003 | } | 3075 | } |
3004 | 3076 | ||
3005 | /* | 3077 | /* |
3078 | * The exact cpuload at various idx values, calculated at every tick would be | ||
3079 | * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load | ||
3080 | * | ||
3081 | * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called | ||
3082 | * on nth tick when cpu may be busy, then we have: | ||
3083 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load | ||
3084 | * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load | ||
3085 | * | ||
3086 | * decay_load_missed() below does efficient calculation of | ||
3087 | * load = ((2^idx - 1) / 2^idx)^(n-1) * load | ||
3088 | * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load | ||
3089 | * | ||
3090 | * The calculation is approximated on a 128 point scale. | ||
3091 | * degrade_zero_ticks is the number of ticks after which load at any | ||
3092 | * particular idx is approximated to be zero. | ||
3093 | * degrade_factor is a precomputed table, a row for each load idx. | ||
3094 | * Each column corresponds to degradation factor for a power of two ticks, | ||
3095 | * based on 128 point scale. | ||
3096 | * Example: | ||
3097 | * row 2, col 3 (=12) says that the degradation at load idx 2 after | ||
3098 | * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). | ||
3099 | * | ||
3100 | * With this power of 2 load factors, we can degrade the load n times | ||
3101 | * by looking at 1 bits in n and doing as many mult/shift instead of | ||
3102 | * n mult/shifts needed by the exact degradation. | ||
3103 | */ | ||
3104 | #define DEGRADE_SHIFT 7 | ||
3105 | static const unsigned char | ||
3106 | degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; | ||
3107 | static const unsigned char | ||
3108 | degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { | ||
3109 | {0, 0, 0, 0, 0, 0, 0, 0}, | ||
3110 | {64, 32, 8, 0, 0, 0, 0, 0}, | ||
3111 | {96, 72, 40, 12, 1, 0, 0}, | ||
3112 | {112, 98, 75, 43, 15, 1, 0}, | ||
3113 | {120, 112, 98, 76, 45, 16, 2} }; | ||
3114 | |||
3115 | /* | ||
3116 | * Update cpu_load for any missed ticks, due to tickless idle. The backlog | ||
3117 | * would be when CPU is idle and so we just decay the old load without | ||
3118 | * adding any new load. | ||
3119 | */ | ||
3120 | static unsigned long | ||
3121 | decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) | ||
3122 | { | ||
3123 | int j = 0; | ||
3124 | |||
3125 | if (!missed_updates) | ||
3126 | return load; | ||
3127 | |||
3128 | if (missed_updates >= degrade_zero_ticks[idx]) | ||
3129 | return 0; | ||
3130 | |||
3131 | if (idx == 1) | ||
3132 | return load >> missed_updates; | ||
3133 | |||
3134 | while (missed_updates) { | ||
3135 | if (missed_updates % 2) | ||
3136 | load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; | ||
3137 | |||
3138 | missed_updates >>= 1; | ||
3139 | j++; | ||
3140 | } | ||
3141 | return load; | ||
3142 | } | ||
3143 | |||
3144 | /* | ||
3006 | * Update rq->cpu_load[] statistics. This function is usually called every | 3145 | * Update rq->cpu_load[] statistics. This function is usually called every |
3007 | * scheduler tick (TICK_NSEC). | 3146 | * scheduler tick (TICK_NSEC). With tickless idle this will not be called |
3147 | * every tick. We fix it up based on jiffies. | ||
3008 | */ | 3148 | */ |
3009 | static void update_cpu_load(struct rq *this_rq) | 3149 | static void update_cpu_load(struct rq *this_rq) |
3010 | { | 3150 | { |
3011 | unsigned long this_load = this_rq->load.weight; | 3151 | unsigned long this_load = this_rq->load.weight; |
3152 | unsigned long curr_jiffies = jiffies; | ||
3153 | unsigned long pending_updates; | ||
3012 | int i, scale; | 3154 | int i, scale; |
3013 | 3155 | ||
3014 | this_rq->nr_load_updates++; | 3156 | this_rq->nr_load_updates++; |
3015 | 3157 | ||
3158 | /* Avoid repeated calls on same jiffy, when moving in and out of idle */ | ||
3159 | if (curr_jiffies == this_rq->last_load_update_tick) | ||
3160 | return; | ||
3161 | |||
3162 | pending_updates = curr_jiffies - this_rq->last_load_update_tick; | ||
3163 | this_rq->last_load_update_tick = curr_jiffies; | ||
3164 | |||
3016 | /* Update our load: */ | 3165 | /* Update our load: */ |
3017 | for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { | 3166 | this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ |
3167 | for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { | ||
3018 | unsigned long old_load, new_load; | 3168 | unsigned long old_load, new_load; |
3019 | 3169 | ||
3020 | /* scale is effectively 1 << i now, and >> i divides by scale */ | 3170 | /* scale is effectively 1 << i now, and >> i divides by scale */ |
3021 | 3171 | ||
3022 | old_load = this_rq->cpu_load[i]; | 3172 | old_load = this_rq->cpu_load[i]; |
3173 | old_load = decay_load_missed(old_load, pending_updates - 1, i); | ||
3023 | new_load = this_load; | 3174 | new_load = this_load; |
3024 | /* | 3175 | /* |
3025 | * Round up the averaging division if load is increasing. This | 3176 | * Round up the averaging division if load is increasing. This |
@@ -3027,9 +3178,15 @@ static void update_cpu_load(struct rq *this_rq) | |||
3027 | * example. | 3178 | * example. |
3028 | */ | 3179 | */ |
3029 | if (new_load > old_load) | 3180 | if (new_load > old_load) |
3030 | new_load += scale-1; | 3181 | new_load += scale - 1; |
3031 | this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; | 3182 | |
3183 | this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; | ||
3032 | } | 3184 | } |
3185 | } | ||
3186 | |||
3187 | static void update_cpu_load_active(struct rq *this_rq) | ||
3188 | { | ||
3189 | update_cpu_load(this_rq); | ||
3033 | 3190 | ||
3034 | calc_load_account_active(this_rq); | 3191 | calc_load_account_active(this_rq); |
3035 | } | 3192 | } |
@@ -3417,7 +3574,7 @@ void scheduler_tick(void) | |||
3417 | 3574 | ||
3418 | raw_spin_lock(&rq->lock); | 3575 | raw_spin_lock(&rq->lock); |
3419 | update_rq_clock(rq); | 3576 | update_rq_clock(rq); |
3420 | update_cpu_load(rq); | 3577 | update_cpu_load_active(rq); |
3421 | curr->sched_class->task_tick(rq, curr, 0); | 3578 | curr->sched_class->task_tick(rq, curr, 0); |
3422 | raw_spin_unlock(&rq->lock); | 3579 | raw_spin_unlock(&rq->lock); |
3423 | 3580 | ||
@@ -3589,7 +3746,6 @@ need_resched: | |||
3589 | rq = cpu_rq(cpu); | 3746 | rq = cpu_rq(cpu); |
3590 | rcu_note_context_switch(cpu); | 3747 | rcu_note_context_switch(cpu); |
3591 | prev = rq->curr; | 3748 | prev = rq->curr; |
3592 | switch_count = &prev->nivcsw; | ||
3593 | 3749 | ||
3594 | release_kernel_lock(prev); | 3750 | release_kernel_lock(prev); |
3595 | need_resched_nonpreemptible: | 3751 | need_resched_nonpreemptible: |
@@ -3602,11 +3758,26 @@ need_resched_nonpreemptible: | |||
3602 | raw_spin_lock_irq(&rq->lock); | 3758 | raw_spin_lock_irq(&rq->lock); |
3603 | clear_tsk_need_resched(prev); | 3759 | clear_tsk_need_resched(prev); |
3604 | 3760 | ||
3761 | switch_count = &prev->nivcsw; | ||
3605 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3762 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3606 | if (unlikely(signal_pending_state(prev->state, prev))) | 3763 | if (unlikely(signal_pending_state(prev->state, prev))) { |
3607 | prev->state = TASK_RUNNING; | 3764 | prev->state = TASK_RUNNING; |
3608 | else | 3765 | } else { |
3766 | /* | ||
3767 | * If a worker is going to sleep, notify and | ||
3768 | * ask workqueue whether it wants to wake up a | ||
3769 | * task to maintain concurrency. If so, wake | ||
3770 | * up the task. | ||
3771 | */ | ||
3772 | if (prev->flags & PF_WQ_WORKER) { | ||
3773 | struct task_struct *to_wakeup; | ||
3774 | |||
3775 | to_wakeup = wq_worker_sleeping(prev, cpu); | ||
3776 | if (to_wakeup) | ||
3777 | try_to_wake_up_local(to_wakeup); | ||
3778 | } | ||
3609 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 3779 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
3780 | } | ||
3610 | switch_count = &prev->nvcsw; | 3781 | switch_count = &prev->nvcsw; |
3611 | } | 3782 | } |
3612 | 3783 | ||
@@ -3628,8 +3799,10 @@ need_resched_nonpreemptible: | |||
3628 | 3799 | ||
3629 | context_switch(rq, prev, next); /* unlocks the rq */ | 3800 | context_switch(rq, prev, next); /* unlocks the rq */ |
3630 | /* | 3801 | /* |
3631 | * the context switch might have flipped the stack from under | 3802 | * The context switch have flipped the stack from under us |
3632 | * us, hence refresh the local variables. | 3803 | * and restored the local variables which were saved when |
3804 | * this task called schedule() in the past. prev == current | ||
3805 | * is still correct, but it can be moved to another cpu/rq. | ||
3633 | */ | 3806 | */ |
3634 | cpu = smp_processor_id(); | 3807 | cpu = smp_processor_id(); |
3635 | rq = cpu_rq(cpu); | 3808 | rq = cpu_rq(cpu); |
@@ -3638,11 +3811,8 @@ need_resched_nonpreemptible: | |||
3638 | 3811 | ||
3639 | post_schedule(rq); | 3812 | post_schedule(rq); |
3640 | 3813 | ||
3641 | if (unlikely(reacquire_kernel_lock(current) < 0)) { | 3814 | if (unlikely(reacquire_kernel_lock(prev))) |
3642 | prev = rq->curr; | ||
3643 | switch_count = &prev->nivcsw; | ||
3644 | goto need_resched_nonpreemptible; | 3815 | goto need_resched_nonpreemptible; |
3645 | } | ||
3646 | 3816 | ||
3647 | preempt_enable_no_resched(); | 3817 | preempt_enable_no_resched(); |
3648 | if (need_resched()) | 3818 | if (need_resched()) |
@@ -3717,7 +3887,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3717 | * off of preempt_enable. Kernel preemptions off return from interrupt | 3887 | * off of preempt_enable. Kernel preemptions off return from interrupt |
3718 | * occur there and call schedule directly. | 3888 | * occur there and call schedule directly. |
3719 | */ | 3889 | */ |
3720 | asmlinkage void __sched preempt_schedule(void) | 3890 | asmlinkage void __sched notrace preempt_schedule(void) |
3721 | { | 3891 | { |
3722 | struct thread_info *ti = current_thread_info(); | 3892 | struct thread_info *ti = current_thread_info(); |
3723 | 3893 | ||
@@ -3729,9 +3899,9 @@ asmlinkage void __sched preempt_schedule(void) | |||
3729 | return; | 3899 | return; |
3730 | 3900 | ||
3731 | do { | 3901 | do { |
3732 | add_preempt_count(PREEMPT_ACTIVE); | 3902 | add_preempt_count_notrace(PREEMPT_ACTIVE); |
3733 | schedule(); | 3903 | schedule(); |
3734 | sub_preempt_count(PREEMPT_ACTIVE); | 3904 | sub_preempt_count_notrace(PREEMPT_ACTIVE); |
3735 | 3905 | ||
3736 | /* | 3906 | /* |
3737 | * Check again in case we missed a preemption opportunity | 3907 | * Check again in case we missed a preemption opportunity |
@@ -4432,12 +4602,8 @@ recheck: | |||
4432 | */ | 4602 | */ |
4433 | if (user && !capable(CAP_SYS_NICE)) { | 4603 | if (user && !capable(CAP_SYS_NICE)) { |
4434 | if (rt_policy(policy)) { | 4604 | if (rt_policy(policy)) { |
4435 | unsigned long rlim_rtprio; | 4605 | unsigned long rlim_rtprio = |
4436 | 4606 | task_rlimit(p, RLIMIT_RTPRIO); | |
4437 | if (!lock_task_sighand(p, &flags)) | ||
4438 | return -ESRCH; | ||
4439 | rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); | ||
4440 | unlock_task_sighand(p, &flags); | ||
4441 | 4607 | ||
4442 | /* can't set/change the rt policy */ | 4608 | /* can't set/change the rt policy */ |
4443 | if (policy != p->policy && !rlim_rtprio) | 4609 | if (policy != p->policy && !rlim_rtprio) |
@@ -4465,16 +4631,6 @@ recheck: | |||
4465 | } | 4631 | } |
4466 | 4632 | ||
4467 | if (user) { | 4633 | if (user) { |
4468 | #ifdef CONFIG_RT_GROUP_SCHED | ||
4469 | /* | ||
4470 | * Do not allow realtime tasks into groups that have no runtime | ||
4471 | * assigned. | ||
4472 | */ | ||
4473 | if (rt_bandwidth_enabled() && rt_policy(policy) && | ||
4474 | task_group(p)->rt_bandwidth.rt_runtime == 0) | ||
4475 | return -EPERM; | ||
4476 | #endif | ||
4477 | |||
4478 | retval = security_task_setscheduler(p, policy, param); | 4634 | retval = security_task_setscheduler(p, policy, param); |
4479 | if (retval) | 4635 | if (retval) |
4480 | return retval; | 4636 | return retval; |
@@ -4490,6 +4646,22 @@ recheck: | |||
4490 | * runqueue lock must be held. | 4646 | * runqueue lock must be held. |
4491 | */ | 4647 | */ |
4492 | rq = __task_rq_lock(p); | 4648 | rq = __task_rq_lock(p); |
4649 | |||
4650 | #ifdef CONFIG_RT_GROUP_SCHED | ||
4651 | if (user) { | ||
4652 | /* | ||
4653 | * Do not allow realtime tasks into groups that have no runtime | ||
4654 | * assigned. | ||
4655 | */ | ||
4656 | if (rt_bandwidth_enabled() && rt_policy(policy) && | ||
4657 | task_group(p)->rt_bandwidth.rt_runtime == 0) { | ||
4658 | __task_rq_unlock(rq); | ||
4659 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | ||
4660 | return -EPERM; | ||
4661 | } | ||
4662 | } | ||
4663 | #endif | ||
4664 | |||
4493 | /* recheck policy now with rq lock held */ | 4665 | /* recheck policy now with rq lock held */ |
4494 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 4666 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
4495 | policy = oldpolicy = -1; | 4667 | policy = oldpolicy = -1; |
@@ -5801,20 +5973,49 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5801 | */ | 5973 | */ |
5802 | static struct notifier_block __cpuinitdata migration_notifier = { | 5974 | static struct notifier_block __cpuinitdata migration_notifier = { |
5803 | .notifier_call = migration_call, | 5975 | .notifier_call = migration_call, |
5804 | .priority = 10 | 5976 | .priority = CPU_PRI_MIGRATION, |
5805 | }; | 5977 | }; |
5806 | 5978 | ||
5979 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | ||
5980 | unsigned long action, void *hcpu) | ||
5981 | { | ||
5982 | switch (action & ~CPU_TASKS_FROZEN) { | ||
5983 | case CPU_ONLINE: | ||
5984 | case CPU_DOWN_FAILED: | ||
5985 | set_cpu_active((long)hcpu, true); | ||
5986 | return NOTIFY_OK; | ||
5987 | default: | ||
5988 | return NOTIFY_DONE; | ||
5989 | } | ||
5990 | } | ||
5991 | |||
5992 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, | ||
5993 | unsigned long action, void *hcpu) | ||
5994 | { | ||
5995 | switch (action & ~CPU_TASKS_FROZEN) { | ||
5996 | case CPU_DOWN_PREPARE: | ||
5997 | set_cpu_active((long)hcpu, false); | ||
5998 | return NOTIFY_OK; | ||
5999 | default: | ||
6000 | return NOTIFY_DONE; | ||
6001 | } | ||
6002 | } | ||
6003 | |||
5807 | static int __init migration_init(void) | 6004 | static int __init migration_init(void) |
5808 | { | 6005 | { |
5809 | void *cpu = (void *)(long)smp_processor_id(); | 6006 | void *cpu = (void *)(long)smp_processor_id(); |
5810 | int err; | 6007 | int err; |
5811 | 6008 | ||
5812 | /* Start one for the boot CPU: */ | 6009 | /* Initialize migration for the boot CPU */ |
5813 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); | 6010 | err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); |
5814 | BUG_ON(err == NOTIFY_BAD); | 6011 | BUG_ON(err == NOTIFY_BAD); |
5815 | migration_call(&migration_notifier, CPU_ONLINE, cpu); | 6012 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
5816 | register_cpu_notifier(&migration_notifier); | 6013 | register_cpu_notifier(&migration_notifier); |
5817 | 6014 | ||
6015 | /* Register cpu active notifiers */ | ||
6016 | cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); | ||
6017 | cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); | ||
6018 | |||
5818 | return 0; | 6019 | return 0; |
5819 | } | 6020 | } |
5820 | early_initcall(migration_init); | 6021 | early_initcall(migration_init); |
@@ -6049,23 +6250,18 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6049 | free_rootdomain(old_rd); | 6250 | free_rootdomain(old_rd); |
6050 | } | 6251 | } |
6051 | 6252 | ||
6052 | static int init_rootdomain(struct root_domain *rd, bool bootmem) | 6253 | static int init_rootdomain(struct root_domain *rd) |
6053 | { | 6254 | { |
6054 | gfp_t gfp = GFP_KERNEL; | ||
6055 | |||
6056 | memset(rd, 0, sizeof(*rd)); | 6255 | memset(rd, 0, sizeof(*rd)); |
6057 | 6256 | ||
6058 | if (bootmem) | 6257 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) |
6059 | gfp = GFP_NOWAIT; | ||
6060 | |||
6061 | if (!alloc_cpumask_var(&rd->span, gfp)) | ||
6062 | goto out; | 6258 | goto out; |
6063 | if (!alloc_cpumask_var(&rd->online, gfp)) | 6259 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) |
6064 | goto free_span; | 6260 | goto free_span; |
6065 | if (!alloc_cpumask_var(&rd->rto_mask, gfp)) | 6261 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
6066 | goto free_online; | 6262 | goto free_online; |
6067 | 6263 | ||
6068 | if (cpupri_init(&rd->cpupri, bootmem) != 0) | 6264 | if (cpupri_init(&rd->cpupri) != 0) |
6069 | goto free_rto_mask; | 6265 | goto free_rto_mask; |
6070 | return 0; | 6266 | return 0; |
6071 | 6267 | ||
@@ -6081,7 +6277,7 @@ out: | |||
6081 | 6277 | ||
6082 | static void init_defrootdomain(void) | 6278 | static void init_defrootdomain(void) |
6083 | { | 6279 | { |
6084 | init_rootdomain(&def_root_domain, true); | 6280 | init_rootdomain(&def_root_domain); |
6085 | 6281 | ||
6086 | atomic_set(&def_root_domain.refcount, 1); | 6282 | atomic_set(&def_root_domain.refcount, 1); |
6087 | } | 6283 | } |
@@ -6094,7 +6290,7 @@ static struct root_domain *alloc_rootdomain(void) | |||
6094 | if (!rd) | 6290 | if (!rd) |
6095 | return NULL; | 6291 | return NULL; |
6096 | 6292 | ||
6097 | if (init_rootdomain(rd, false) != 0) { | 6293 | if (init_rootdomain(rd) != 0) { |
6098 | kfree(rd); | 6294 | kfree(rd); |
6099 | return NULL; | 6295 | return NULL; |
6100 | } | 6296 | } |
@@ -7273,29 +7469,35 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | |||
7273 | } | 7469 | } |
7274 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 7470 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
7275 | 7471 | ||
7276 | #ifndef CONFIG_CPUSETS | ||
7277 | /* | 7472 | /* |
7278 | * Add online and remove offline CPUs from the scheduler domains. | 7473 | * Update cpusets according to cpu_active mask. If cpusets are |
7279 | * When cpusets are enabled they take over this function. | 7474 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper |
7475 | * around partition_sched_domains(). | ||
7280 | */ | 7476 | */ |
7281 | static int update_sched_domains(struct notifier_block *nfb, | 7477 | static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, |
7282 | unsigned long action, void *hcpu) | 7478 | void *hcpu) |
7283 | { | 7479 | { |
7284 | switch (action) { | 7480 | switch (action & ~CPU_TASKS_FROZEN) { |
7285 | case CPU_ONLINE: | 7481 | case CPU_ONLINE: |
7286 | case CPU_ONLINE_FROZEN: | ||
7287 | case CPU_DOWN_PREPARE: | ||
7288 | case CPU_DOWN_PREPARE_FROZEN: | ||
7289 | case CPU_DOWN_FAILED: | 7482 | case CPU_DOWN_FAILED: |
7290 | case CPU_DOWN_FAILED_FROZEN: | 7483 | cpuset_update_active_cpus(); |
7291 | partition_sched_domains(1, NULL, NULL); | ||
7292 | return NOTIFY_OK; | 7484 | return NOTIFY_OK; |
7485 | default: | ||
7486 | return NOTIFY_DONE; | ||
7487 | } | ||
7488 | } | ||
7293 | 7489 | ||
7490 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, | ||
7491 | void *hcpu) | ||
7492 | { | ||
7493 | switch (action & ~CPU_TASKS_FROZEN) { | ||
7494 | case CPU_DOWN_PREPARE: | ||
7495 | cpuset_update_active_cpus(); | ||
7496 | return NOTIFY_OK; | ||
7294 | default: | 7497 | default: |
7295 | return NOTIFY_DONE; | 7498 | return NOTIFY_DONE; |
7296 | } | 7499 | } |
7297 | } | 7500 | } |
7298 | #endif | ||
7299 | 7501 | ||
7300 | static int update_runtime(struct notifier_block *nfb, | 7502 | static int update_runtime(struct notifier_block *nfb, |
7301 | unsigned long action, void *hcpu) | 7503 | unsigned long action, void *hcpu) |
@@ -7341,10 +7543,8 @@ void __init sched_init_smp(void) | |||
7341 | mutex_unlock(&sched_domains_mutex); | 7543 | mutex_unlock(&sched_domains_mutex); |
7342 | put_online_cpus(); | 7544 | put_online_cpus(); |
7343 | 7545 | ||
7344 | #ifndef CONFIG_CPUSETS | 7546 | hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); |
7345 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 7547 | hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); |
7346 | hotcpu_notifier(update_sched_domains, 0); | ||
7347 | #endif | ||
7348 | 7548 | ||
7349 | /* RT runtime code needs to handle some hotplug events */ | 7549 | /* RT runtime code needs to handle some hotplug events */ |
7350 | hotcpu_notifier(update_runtime, 0); | 7550 | hotcpu_notifier(update_runtime, 0); |
@@ -7589,6 +7789,9 @@ void __init sched_init(void) | |||
7589 | 7789 | ||
7590 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) | 7790 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
7591 | rq->cpu_load[j] = 0; | 7791 | rq->cpu_load[j] = 0; |
7792 | |||
7793 | rq->last_load_update_tick = jiffies; | ||
7794 | |||
7592 | #ifdef CONFIG_SMP | 7795 | #ifdef CONFIG_SMP |
7593 | rq->sd = NULL; | 7796 | rq->sd = NULL; |
7594 | rq->rd = NULL; | 7797 | rq->rd = NULL; |
@@ -7602,6 +7805,10 @@ void __init sched_init(void) | |||
7602 | rq->idle_stamp = 0; | 7805 | rq->idle_stamp = 0; |
7603 | rq->avg_idle = 2*sysctl_sched_migration_cost; | 7806 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
7604 | rq_attach_root(rq, &def_root_domain); | 7807 | rq_attach_root(rq, &def_root_domain); |
7808 | #ifdef CONFIG_NO_HZ | ||
7809 | rq->nohz_balance_kick = 0; | ||
7810 | init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i)); | ||
7811 | #endif | ||
7605 | #endif | 7812 | #endif |
7606 | init_rq_hrtick(rq); | 7813 | init_rq_hrtick(rq); |
7607 | atomic_set(&rq->nr_iowait, 0); | 7814 | atomic_set(&rq->nr_iowait, 0); |
@@ -7646,8 +7853,11 @@ void __init sched_init(void) | |||
7646 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 7853 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
7647 | #ifdef CONFIG_SMP | 7854 | #ifdef CONFIG_SMP |
7648 | #ifdef CONFIG_NO_HZ | 7855 | #ifdef CONFIG_NO_HZ |
7649 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | 7856 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
7650 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | 7857 | alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT); |
7858 | atomic_set(&nohz.load_balancer, nr_cpu_ids); | ||
7859 | atomic_set(&nohz.first_pick_cpu, nr_cpu_ids); | ||
7860 | atomic_set(&nohz.second_pick_cpu, nr_cpu_ids); | ||
7651 | #endif | 7861 | #endif |
7652 | /* May be allocated at isolcpus cmdline parse time */ | 7862 | /* May be allocated at isolcpus cmdline parse time */ |
7653 | if (cpu_isolated_map == NULL) | 7863 | if (cpu_isolated_map == NULL) |