aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c242
1 files changed, 156 insertions, 86 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 33a0676ea744..4603b9d8f30a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -71,7 +71,9 @@
71#include <linux/ftrace.h> 71#include <linux/ftrace.h>
72#include <linux/slab.h> 72#include <linux/slab.h>
73#include <linux/init_task.h> 73#include <linux/init_task.h>
74#include <linux/binfmts.h>
74 75
76#include <asm/switch_to.h>
75#include <asm/tlb.h> 77#include <asm/tlb.h>
76#include <asm/irq_regs.h> 78#include <asm/irq_regs.h>
77#include <asm/mutex.h> 79#include <asm/mutex.h>
@@ -162,13 +164,13 @@ static int sched_feat_show(struct seq_file *m, void *v)
162 164
163#ifdef HAVE_JUMP_LABEL 165#ifdef HAVE_JUMP_LABEL
164 166
165#define jump_label_key__true jump_label_key_enabled 167#define jump_label_key__true STATIC_KEY_INIT_TRUE
166#define jump_label_key__false jump_label_key_disabled 168#define jump_label_key__false STATIC_KEY_INIT_FALSE
167 169
168#define SCHED_FEAT(name, enabled) \ 170#define SCHED_FEAT(name, enabled) \
169 jump_label_key__##enabled , 171 jump_label_key__##enabled ,
170 172
171struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = { 173struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
172#include "features.h" 174#include "features.h"
173}; 175};
174 176
@@ -176,14 +178,14 @@ struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
176 178
177static void sched_feat_disable(int i) 179static void sched_feat_disable(int i)
178{ 180{
179 if (jump_label_enabled(&sched_feat_keys[i])) 181 if (static_key_enabled(&sched_feat_keys[i]))
180 jump_label_dec(&sched_feat_keys[i]); 182 static_key_slow_dec(&sched_feat_keys[i]);
181} 183}
182 184
183static void sched_feat_enable(int i) 185static void sched_feat_enable(int i)
184{ 186{
185 if (!jump_label_enabled(&sched_feat_keys[i])) 187 if (!static_key_enabled(&sched_feat_keys[i]))
186 jump_label_inc(&sched_feat_keys[i]); 188 static_key_slow_inc(&sched_feat_keys[i]);
187} 189}
188#else 190#else
189static void sched_feat_disable(int i) { }; 191static void sched_feat_disable(int i) { };
@@ -894,7 +896,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
894 delta -= irq_delta; 896 delta -= irq_delta;
895#endif 897#endif
896#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 898#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
897 if (static_branch((&paravirt_steal_rq_enabled))) { 899 if (static_key_false((&paravirt_steal_rq_enabled))) {
898 u64 st; 900 u64 st;
899 901
900 steal = paravirt_steal_clock(cpu_of(rq)); 902 steal = paravirt_steal_clock(cpu_of(rq));
@@ -1263,29 +1265,59 @@ EXPORT_SYMBOL_GPL(kick_process);
1263 */ 1265 */
1264static int select_fallback_rq(int cpu, struct task_struct *p) 1266static int select_fallback_rq(int cpu, struct task_struct *p)
1265{ 1267{
1266 int dest_cpu;
1267 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); 1268 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1269 enum { cpuset, possible, fail } state = cpuset;
1270 int dest_cpu;
1268 1271
1269 /* Look for allowed, online CPU in same node. */ 1272 /* Look for allowed, online CPU in same node. */
1270 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) 1273 for_each_cpu(dest_cpu, nodemask) {
1274 if (!cpu_online(dest_cpu))
1275 continue;
1276 if (!cpu_active(dest_cpu))
1277 continue;
1271 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1278 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1272 return dest_cpu; 1279 return dest_cpu;
1280 }
1273 1281
1274 /* Any allowed, online CPU? */ 1282 for (;;) {
1275 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask); 1283 /* Any allowed, online CPU? */
1276 if (dest_cpu < nr_cpu_ids) 1284 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1277 return dest_cpu; 1285 if (!cpu_online(dest_cpu))
1286 continue;
1287 if (!cpu_active(dest_cpu))
1288 continue;
1289 goto out;
1290 }
1278 1291
1279 /* No more Mr. Nice Guy. */ 1292 switch (state) {
1280 dest_cpu = cpuset_cpus_allowed_fallback(p); 1293 case cpuset:
1281 /* 1294 /* No more Mr. Nice Guy. */
1282 * Don't tell them about moving exiting tasks or 1295 cpuset_cpus_allowed_fallback(p);
1283 * kernel threads (both mm NULL), since they never 1296 state = possible;
1284 * leave kernel. 1297 break;
1285 */ 1298
1286 if (p->mm && printk_ratelimit()) { 1299 case possible:
1287 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n", 1300 do_set_cpus_allowed(p, cpu_possible_mask);
1288 task_pid_nr(p), p->comm, cpu); 1301 state = fail;
1302 break;
1303
1304 case fail:
1305 BUG();
1306 break;
1307 }
1308 }
1309
1310out:
1311 if (state != cpuset) {
1312 /*
1313 * Don't tell them about moving exiting tasks or
1314 * kernel threads (both mm NULL), since they never
1315 * leave kernel.
1316 */
1317 if (p->mm && printk_ratelimit()) {
1318 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1319 task_pid_nr(p), p->comm, cpu);
1320 }
1289 } 1321 }
1290 1322
1291 return dest_cpu; 1323 return dest_cpu;
@@ -1507,7 +1539,7 @@ static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
1507} 1539}
1508#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1540#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1509 1541
1510static inline int ttwu_share_cache(int this_cpu, int that_cpu) 1542bool cpus_share_cache(int this_cpu, int that_cpu)
1511{ 1543{
1512 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1544 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1513} 1545}
@@ -1518,7 +1550,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
1518 struct rq *rq = cpu_rq(cpu); 1550 struct rq *rq = cpu_rq(cpu);
1519 1551
1520#if defined(CONFIG_SMP) 1552#if defined(CONFIG_SMP)
1521 if (sched_feat(TTWU_QUEUE) && !ttwu_share_cache(smp_processor_id(), cpu)) { 1553 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1522 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1554 sched_clock_cpu(cpu); /* sync clocks x-cpu */
1523 ttwu_queue_remote(p, cpu); 1555 ttwu_queue_remote(p, cpu);
1524 return; 1556 return;
@@ -1932,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1932 local_irq_enable(); 1964 local_irq_enable();
1933#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1965#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1934 finish_lock_switch(rq, prev); 1966 finish_lock_switch(rq, prev);
1967 finish_arch_post_lock_switch();
1935 1968
1936 fire_sched_in_preempt_notifiers(current); 1969 fire_sched_in_preempt_notifiers(current);
1937 if (mm) 1970 if (mm)
@@ -2266,13 +2299,10 @@ calc_load_n(unsigned long load, unsigned long exp,
2266 * Once we've updated the global active value, we need to apply the exponential 2299 * Once we've updated the global active value, we need to apply the exponential
2267 * weights adjusted to the number of cycles missed. 2300 * weights adjusted to the number of cycles missed.
2268 */ 2301 */
2269static void calc_global_nohz(unsigned long ticks) 2302static void calc_global_nohz(void)
2270{ 2303{
2271 long delta, active, n; 2304 long delta, active, n;
2272 2305
2273 if (time_before(jiffies, calc_load_update))
2274 return;
2275
2276 /* 2306 /*
2277 * If we crossed a calc_load_update boundary, make sure to fold 2307 * If we crossed a calc_load_update boundary, make sure to fold
2278 * any pending idle changes, the respective CPUs might have 2308 * any pending idle changes, the respective CPUs might have
@@ -2284,31 +2314,25 @@ static void calc_global_nohz(unsigned long ticks)
2284 atomic_long_add(delta, &calc_load_tasks); 2314 atomic_long_add(delta, &calc_load_tasks);
2285 2315
2286 /* 2316 /*
2287 * If we were idle for multiple load cycles, apply them. 2317 * It could be the one fold was all it took, we done!
2288 */ 2318 */
2289 if (ticks >= LOAD_FREQ) { 2319 if (time_before(jiffies, calc_load_update + 10))
2290 n = ticks / LOAD_FREQ; 2320 return;
2291 2321
2292 active = atomic_long_read(&calc_load_tasks); 2322 /*
2293 active = active > 0 ? active * FIXED_1 : 0; 2323 * Catch-up, fold however many we are behind still
2324 */
2325 delta = jiffies - calc_load_update - 10;
2326 n = 1 + (delta / LOAD_FREQ);
2294 2327
2295 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); 2328 active = atomic_long_read(&calc_load_tasks);
2296 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); 2329 active = active > 0 ? active * FIXED_1 : 0;
2297 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2298 2330
2299 calc_load_update += n * LOAD_FREQ; 2331 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2300 } 2332 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2333 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2301 2334
2302 /* 2335 calc_load_update += n * LOAD_FREQ;
2303 * Its possible the remainder of the above division also crosses
2304 * a LOAD_FREQ period, the regular check in calc_global_load()
2305 * which comes after this will take care of that.
2306 *
2307 * Consider us being 11 ticks before a cycle completion, and us
2308 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
2309 * age us 4 cycles, and the test in calc_global_load() will
2310 * pick up the final one.
2311 */
2312} 2336}
2313#else 2337#else
2314void calc_load_account_idle(struct rq *this_rq) 2338void calc_load_account_idle(struct rq *this_rq)
@@ -2320,7 +2344,7 @@ static inline long calc_load_fold_idle(void)
2320 return 0; 2344 return 0;
2321} 2345}
2322 2346
2323static void calc_global_nohz(unsigned long ticks) 2347static void calc_global_nohz(void)
2324{ 2348{
2325} 2349}
2326#endif 2350#endif
@@ -2348,8 +2372,6 @@ void calc_global_load(unsigned long ticks)
2348{ 2372{
2349 long active; 2373 long active;
2350 2374
2351 calc_global_nohz(ticks);
2352
2353 if (time_before(jiffies, calc_load_update + 10)) 2375 if (time_before(jiffies, calc_load_update + 10))
2354 return; 2376 return;
2355 2377
@@ -2361,6 +2383,16 @@ void calc_global_load(unsigned long ticks)
2361 avenrun[2] = calc_load(avenrun[2], EXP_15, active); 2383 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
2362 2384
2363 calc_load_update += LOAD_FREQ; 2385 calc_load_update += LOAD_FREQ;
2386
2387 /*
2388 * Account one period with whatever state we found before
2389 * folding in the nohz state and ageing the entire idle period.
2390 *
2391 * This avoids loosing a sample when we go idle between
2392 * calc_load_account_active() (10 ticks ago) and now and thus
2393 * under-accounting.
2394 */
2395 calc_global_nohz();
2364} 2396}
2365 2397
2366/* 2398/*
@@ -2755,7 +2787,7 @@ void account_idle_time(cputime_t cputime)
2755static __always_inline bool steal_account_process_tick(void) 2787static __always_inline bool steal_account_process_tick(void)
2756{ 2788{
2757#ifdef CONFIG_PARAVIRT 2789#ifdef CONFIG_PARAVIRT
2758 if (static_branch(&paravirt_steal_enabled)) { 2790 if (static_key_false(&paravirt_steal_enabled)) {
2759 u64 steal, st = 0; 2791 u64 steal, st = 0;
2760 2792
2761 steal = paravirt_steal_clock(smp_processor_id()); 2793 steal = paravirt_steal_clock(smp_processor_id());
@@ -3070,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count);
3070 */ 3102 */
3071static noinline void __schedule_bug(struct task_struct *prev) 3103static noinline void __schedule_bug(struct task_struct *prev)
3072{ 3104{
3073 struct pt_regs *regs = get_irq_regs();
3074
3075 if (oops_in_progress) 3105 if (oops_in_progress)
3076 return; 3106 return;
3077 3107
@@ -3082,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
3082 print_modules(); 3112 print_modules();
3083 if (irqs_disabled()) 3113 if (irqs_disabled())
3084 print_irqtrace_events(prev); 3114 print_irqtrace_events(prev);
3085 3115 dump_stack();
3086 if (regs)
3087 show_regs(regs);
3088 else
3089 dump_stack();
3090} 3116}
3091 3117
3092/* 3118/*
@@ -3220,14 +3246,14 @@ need_resched:
3220 3246
3221 post_schedule(rq); 3247 post_schedule(rq);
3222 3248
3223 preempt_enable_no_resched(); 3249 sched_preempt_enable_no_resched();
3224 if (need_resched()) 3250 if (need_resched())
3225 goto need_resched; 3251 goto need_resched;
3226} 3252}
3227 3253
3228static inline void sched_submit_work(struct task_struct *tsk) 3254static inline void sched_submit_work(struct task_struct *tsk)
3229{ 3255{
3230 if (!tsk->state) 3256 if (!tsk->state || tsk_is_pi_blocked(tsk))
3231 return; 3257 return;
3232 /* 3258 /*
3233 * If we are going to sleep and we have plugged IO queued, 3259 * If we are going to sleep and we have plugged IO queued,
@@ -3246,6 +3272,18 @@ asmlinkage void __sched schedule(void)
3246} 3272}
3247EXPORT_SYMBOL(schedule); 3273EXPORT_SYMBOL(schedule);
3248 3274
3275/**
3276 * schedule_preempt_disabled - called with preemption disabled
3277 *
3278 * Returns with preemption disabled. Note: preempt_count must be 1
3279 */
3280void __sched schedule_preempt_disabled(void)
3281{
3282 sched_preempt_enable_no_resched();
3283 schedule();
3284 preempt_disable();
3285}
3286
3249#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 3287#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
3250 3288
3251static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 3289static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
@@ -3406,9 +3444,9 @@ EXPORT_SYMBOL(__wake_up);
3406/* 3444/*
3407 * Same as __wake_up but called with the spinlock in wait_queue_head_t held. 3445 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3408 */ 3446 */
3409void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) 3447void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
3410{ 3448{
3411 __wake_up_common(q, mode, 1, 0, NULL); 3449 __wake_up_common(q, mode, nr, 0, NULL);
3412} 3450}
3413EXPORT_SYMBOL_GPL(__wake_up_locked); 3451EXPORT_SYMBOL_GPL(__wake_up_locked);
3414 3452
@@ -3767,6 +3805,24 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3767 3805
3768 rq = __task_rq_lock(p); 3806 rq = __task_rq_lock(p);
3769 3807
3808 /*
3809 * Idle task boosting is a nono in general. There is one
3810 * exception, when PREEMPT_RT and NOHZ is active:
3811 *
3812 * The idle task calls get_next_timer_interrupt() and holds
3813 * the timer wheel base->lock on the CPU and another CPU wants
3814 * to access the timer (probably to cancel it). We can safely
3815 * ignore the boosting request, as the idle CPU runs this code
3816 * with interrupts disabled and will complete the lock
3817 * protected section without being interrupted. So there is no
3818 * real need to boost.
3819 */
3820 if (unlikely(p == rq->idle)) {
3821 WARN_ON(p != rq->curr);
3822 WARN_ON(p->pi_blocked_on);
3823 goto out_unlock;
3824 }
3825
3770 trace_sched_pi_setprio(p, prio); 3826 trace_sched_pi_setprio(p, prio);
3771 oldprio = p->prio; 3827 oldprio = p->prio;
3772 prev_class = p->sched_class; 3828 prev_class = p->sched_class;
@@ -3790,11 +3846,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3790 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); 3846 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
3791 3847
3792 check_class_changed(rq, p, prev_class, oldprio); 3848 check_class_changed(rq, p, prev_class, oldprio);
3849out_unlock:
3793 __task_rq_unlock(rq); 3850 __task_rq_unlock(rq);
3794} 3851}
3795
3796#endif 3852#endif
3797
3798void set_user_nice(struct task_struct *p, long nice) 3853void set_user_nice(struct task_struct *p, long nice)
3799{ 3854{
3800 int old_prio, delta, on_rq; 3855 int old_prio, delta, on_rq;
@@ -4474,7 +4529,7 @@ SYSCALL_DEFINE0(sched_yield)
4474 __release(rq->lock); 4529 __release(rq->lock);
4475 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4530 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4476 do_raw_spin_unlock(&rq->lock); 4531 do_raw_spin_unlock(&rq->lock);
4477 preempt_enable_no_resched(); 4532 sched_preempt_enable_no_resched();
4478 4533
4479 schedule(); 4534 schedule();
4480 4535
@@ -4548,8 +4603,24 @@ EXPORT_SYMBOL(__cond_resched_softirq);
4548/** 4603/**
4549 * yield - yield the current processor to other threads. 4604 * yield - yield the current processor to other threads.
4550 * 4605 *
4551 * This is a shortcut for kernel-space yielding - it marks the 4606 * Do not ever use this function, there's a 99% chance you're doing it wrong.
4552 * thread runnable and calls sys_sched_yield(). 4607 *
4608 * The scheduler is at all times free to pick the calling task as the most
4609 * eligible task to run, if removing the yield() call from your code breaks
4610 * it, its already broken.
4611 *
4612 * Typical broken usage is:
4613 *
4614 * while (!event)
4615 * yield();
4616 *
4617 * where one assumes that yield() will let 'the other' process run that will
4618 * make event true. If the current task is a SCHED_FIFO task that will never
4619 * happen. Never use yield() as a progress guarantee!!
4620 *
4621 * If you want to use yield() to wait for something, use wait_event().
4622 * If you want to use yield() to be 'nice' for others, use cond_resched().
4623 * If you still want to use yield(), do not!
4553 */ 4624 */
4554void __sched yield(void) 4625void __sched yield(void)
4555{ 4626{
@@ -5381,7 +5452,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5381 unsigned long action, void *hcpu) 5452 unsigned long action, void *hcpu)
5382{ 5453{
5383 switch (action & ~CPU_TASKS_FROZEN) { 5454 switch (action & ~CPU_TASKS_FROZEN) {
5384 case CPU_ONLINE: 5455 case CPU_STARTING:
5385 case CPU_DOWN_FAILED: 5456 case CPU_DOWN_FAILED:
5386 set_cpu_active((long)hcpu, true); 5457 set_cpu_active((long)hcpu, true);
5387 return NOTIFY_OK; 5458 return NOTIFY_OK;
@@ -5753,7 +5824,7 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5753 * 5824 *
5754 * Also keep a unique ID per domain (we use the first cpu number in 5825 * Also keep a unique ID per domain (we use the first cpu number in
5755 * the cpumask of the domain), this allows us to quickly tell if 5826 * the cpumask of the domain), this allows us to quickly tell if
5756 * two cpus are in the same cache domain, see ttwu_share_cache(). 5827 * two cpus are in the same cache domain, see cpus_share_cache().
5757 */ 5828 */
5758DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5829DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5759DEFINE_PER_CPU(int, sd_llc_id); 5830DEFINE_PER_CPU(int, sd_llc_id);
@@ -6728,7 +6799,7 @@ int __init sched_create_sysfs_power_savings_entries(struct device *dev)
6728static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, 6799static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6729 void *hcpu) 6800 void *hcpu)
6730{ 6801{
6731 switch (action) { 6802 switch (action & ~CPU_TASKS_FROZEN) {
6732 case CPU_ONLINE: 6803 case CPU_ONLINE:
6733 case CPU_DOWN_FAILED: 6804 case CPU_DOWN_FAILED:
6734 cpuset_update_active_cpus(); 6805 cpuset_update_active_cpus();
@@ -6741,7 +6812,7 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6741static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, 6812static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6742 void *hcpu) 6813 void *hcpu)
6743{ 6814{
6744 switch (action) { 6815 switch (action & ~CPU_TASKS_FROZEN) {
6745 case CPU_DOWN_PREPARE: 6816 case CPU_DOWN_PREPARE:
6746 cpuset_update_active_cpus(); 6817 cpuset_update_active_cpus();
6747 return NOTIFY_OK; 6818 return NOTIFY_OK;
@@ -6930,6 +7001,9 @@ void __init sched_init(void)
6930 rq->online = 0; 7001 rq->online = 0;
6931 rq->idle_stamp = 0; 7002 rq->idle_stamp = 0;
6932 rq->avg_idle = 2*sysctl_sched_migration_cost; 7003 rq->avg_idle = 2*sysctl_sched_migration_cost;
7004
7005 INIT_LIST_HEAD(&rq->cfs_tasks);
7006
6933 rq_attach_root(rq, &def_root_domain); 7007 rq_attach_root(rq, &def_root_domain);
6934#ifdef CONFIG_NO_HZ 7008#ifdef CONFIG_NO_HZ
6935 rq->nohz_flags = 0; 7009 rq->nohz_flags = 0;
@@ -7524,8 +7598,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
7524 struct task_group, css); 7598 struct task_group, css);
7525} 7599}
7526 7600
7527static struct cgroup_subsys_state * 7601static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
7528cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
7529{ 7602{
7530 struct task_group *tg, *parent; 7603 struct task_group *tg, *parent;
7531 7604
@@ -7542,15 +7615,14 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
7542 return &tg->css; 7615 return &tg->css;
7543} 7616}
7544 7617
7545static void 7618static void cpu_cgroup_destroy(struct cgroup *cgrp)
7546cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
7547{ 7619{
7548 struct task_group *tg = cgroup_tg(cgrp); 7620 struct task_group *tg = cgroup_tg(cgrp);
7549 7621
7550 sched_destroy_group(tg); 7622 sched_destroy_group(tg);
7551} 7623}
7552 7624
7553static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 7625static int cpu_cgroup_can_attach(struct cgroup *cgrp,
7554 struct cgroup_taskset *tset) 7626 struct cgroup_taskset *tset)
7555{ 7627{
7556 struct task_struct *task; 7628 struct task_struct *task;
@@ -7568,7 +7640,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7568 return 0; 7640 return 0;
7569} 7641}
7570 7642
7571static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 7643static void cpu_cgroup_attach(struct cgroup *cgrp,
7572 struct cgroup_taskset *tset) 7644 struct cgroup_taskset *tset)
7573{ 7645{
7574 struct task_struct *task; 7646 struct task_struct *task;
@@ -7578,8 +7650,8 @@ static void cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7578} 7650}
7579 7651
7580static void 7652static void
7581cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, 7653cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7582 struct cgroup *old_cgrp, struct task_struct *task) 7654 struct task_struct *task)
7583{ 7655{
7584 /* 7656 /*
7585 * cgroup_exit() is called in the copy_process() failure path. 7657 * cgroup_exit() is called in the copy_process() failure path.
@@ -7929,8 +8001,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
7929 */ 8001 */
7930 8002
7931/* create a new cpu accounting group */ 8003/* create a new cpu accounting group */
7932static struct cgroup_subsys_state *cpuacct_create( 8004static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
7933 struct cgroup_subsys *ss, struct cgroup *cgrp)
7934{ 8005{
7935 struct cpuacct *ca; 8006 struct cpuacct *ca;
7936 8007
@@ -7960,8 +8031,7 @@ out:
7960} 8031}
7961 8032
7962/* destroy an existing cpu accounting group */ 8033/* destroy an existing cpu accounting group */
7963static void 8034static void cpuacct_destroy(struct cgroup *cgrp)
7964cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
7965{ 8035{
7966 struct cpuacct *ca = cgroup_ca(cgrp); 8036 struct cpuacct *ca = cgroup_ca(cgrp);
7967 8037