aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c201
1 files changed, 121 insertions, 80 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b4fbbc440453..38933cafea8a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -52,7 +52,6 @@
52#include <linux/cpu.h> 52#include <linux/cpu.h>
53#include <linux/cpuset.h> 53#include <linux/cpuset.h>
54#include <linux/percpu.h> 54#include <linux/percpu.h>
55#include <linux/cpu_acct.h>
56#include <linux/kthread.h> 55#include <linux/kthread.h>
57#include <linux/seq_file.h> 56#include <linux/seq_file.h>
58#include <linux/sysctl.h> 57#include <linux/sysctl.h>
@@ -75,7 +74,7 @@
75 */ 74 */
76unsigned long long __attribute__((weak)) sched_clock(void) 75unsigned long long __attribute__((weak)) sched_clock(void)
77{ 76{
78 return (unsigned long long)jiffies * (1000000000 / HZ); 77 return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
79} 78}
80 79
81/* 80/*
@@ -99,8 +98,8 @@ unsigned long long __attribute__((weak)) sched_clock(void)
99/* 98/*
100 * Some helpers for converting nanosecond timing to jiffy resolution 99 * Some helpers for converting nanosecond timing to jiffy resolution
101 */ 100 */
102#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (1000000000 / HZ)) 101#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
103#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) 102#define JIFFIES_TO_NS(TIME) ((TIME) * (NSEC_PER_SEC / HZ))
104 103
105#define NICE_0_LOAD SCHED_LOAD_SCALE 104#define NICE_0_LOAD SCHED_LOAD_SCALE
106#define NICE_0_SHIFT SCHED_LOAD_SHIFT 105#define NICE_0_SHIFT SCHED_LOAD_SHIFT
@@ -172,6 +171,7 @@ struct task_group {
172 unsigned long shares; 171 unsigned long shares;
173 /* spinlock to serialize modification to shares */ 172 /* spinlock to serialize modification to shares */
174 spinlock_t lock; 173 spinlock_t lock;
174 struct rcu_head rcu;
175}; 175};
176 176
177/* Default task group's sched entity on each cpu */ 177/* Default task group's sched entity on each cpu */
@@ -216,15 +216,15 @@ static inline struct task_group *task_group(struct task_struct *p)
216} 216}
217 217
218/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 218/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
219static inline void set_task_cfs_rq(struct task_struct *p) 219static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
220{ 220{
221 p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; 221 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
222 p->se.parent = task_group(p)->se[task_cpu(p)]; 222 p->se.parent = task_group(p)->se[cpu];
223} 223}
224 224
225#else 225#else
226 226
227static inline void set_task_cfs_rq(struct task_struct *p) { } 227static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
228 228
229#endif /* CONFIG_FAIR_GROUP_SCHED */ 229#endif /* CONFIG_FAIR_GROUP_SCHED */
230 230
@@ -258,7 +258,6 @@ struct cfs_rq {
258 */ 258 */
259 struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ 259 struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
260 struct task_group *tg; /* group that "owns" this runqueue */ 260 struct task_group *tg; /* group that "owns" this runqueue */
261 struct rcu_head rcu;
262#endif 261#endif
263}; 262};
264 263
@@ -456,24 +455,28 @@ static void update_rq_clock(struct rq *rq)
456 */ 455 */
457enum { 456enum {
458 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, 457 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
459 SCHED_FEAT_START_DEBIT = 2, 458 SCHED_FEAT_WAKEUP_PREEMPT = 2,
460 SCHED_FEAT_TREE_AVG = 4, 459 SCHED_FEAT_START_DEBIT = 4,
461 SCHED_FEAT_APPROX_AVG = 8, 460 SCHED_FEAT_TREE_AVG = 8,
462 SCHED_FEAT_WAKEUP_PREEMPT = 16, 461 SCHED_FEAT_APPROX_AVG = 16,
463 SCHED_FEAT_PREEMPT_RESTRICT = 32,
464}; 462};
465 463
466const_debug unsigned int sysctl_sched_features = 464const_debug unsigned int sysctl_sched_features =
467 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | 465 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
466 SCHED_FEAT_WAKEUP_PREEMPT * 1 |
468 SCHED_FEAT_START_DEBIT * 1 | 467 SCHED_FEAT_START_DEBIT * 1 |
469 SCHED_FEAT_TREE_AVG * 0 | 468 SCHED_FEAT_TREE_AVG * 0 |
470 SCHED_FEAT_APPROX_AVG * 0 | 469 SCHED_FEAT_APPROX_AVG * 0;
471 SCHED_FEAT_WAKEUP_PREEMPT * 1 |
472 SCHED_FEAT_PREEMPT_RESTRICT * 1;
473 470
474#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) 471#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
475 472
476/* 473/*
474 * Number of tasks to iterate in a single balance run.
475 * Limited because this is done with IRQs disabled.
476 */
477const_debug unsigned int sysctl_sched_nr_migrate = 32;
478
479/*
477 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 480 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
478 * clock constructed from sched_clock(): 481 * clock constructed from sched_clock():
479 */ 482 */
@@ -1019,10 +1022,16 @@ unsigned long weighted_cpuload(const int cpu)
1019 1022
1020static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1023static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1021{ 1024{
1025 set_task_cfs_rq(p, cpu);
1022#ifdef CONFIG_SMP 1026#ifdef CONFIG_SMP
1027 /*
1028 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1029 * successfuly executed on another CPU. We must ensure that updates of
1030 * per-task data have been completed by this moment.
1031 */
1032 smp_wmb();
1023 task_thread_info(p)->cpu = cpu; 1033 task_thread_info(p)->cpu = cpu;
1024#endif 1034#endif
1025 set_task_cfs_rq(p);
1026} 1035}
1027 1036
1028#ifdef CONFIG_SMP 1037#ifdef CONFIG_SMP
@@ -2237,7 +2246,7 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2237 enum cpu_idle_type idle, int *all_pinned, 2246 enum cpu_idle_type idle, int *all_pinned,
2238 int *this_best_prio, struct rq_iterator *iterator) 2247 int *this_best_prio, struct rq_iterator *iterator)
2239{ 2248{
2240 int pulled = 0, pinned = 0, skip_for_load; 2249 int loops = 0, pulled = 0, pinned = 0, skip_for_load;
2241 struct task_struct *p; 2250 struct task_struct *p;
2242 long rem_load_move = max_load_move; 2251 long rem_load_move = max_load_move;
2243 2252
@@ -2251,10 +2260,10 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2251 */ 2260 */
2252 p = iterator->start(iterator->arg); 2261 p = iterator->start(iterator->arg);
2253next: 2262next:
2254 if (!p) 2263 if (!p || loops++ > sysctl_sched_nr_migrate)
2255 goto out; 2264 goto out;
2256 /* 2265 /*
2257 * To help distribute high priority tasks accross CPUs we don't 2266 * To help distribute high priority tasks across CPUs we don't
2258 * skip a task if it will be the highest priority task (i.e. smallest 2267 * skip a task if it will be the highest priority task (i.e. smallest
2259 * prio value) on its new queue regardless of its load weight 2268 * prio value) on its new queue regardless of its load weight
2260 */ 2269 */
@@ -2271,8 +2280,7 @@ next:
2271 rem_load_move -= p->se.load.weight; 2280 rem_load_move -= p->se.load.weight;
2272 2281
2273 /* 2282 /*
2274 * We only want to steal up to the prescribed number of tasks 2283 * We only want to steal up to the prescribed amount of weighted load.
2275 * and the prescribed amount of weighted load.
2276 */ 2284 */
2277 if (rem_load_move > 0) { 2285 if (rem_load_move > 0) {
2278 if (p->prio < *this_best_prio) 2286 if (p->prio < *this_best_prio)
@@ -3335,13 +3343,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
3335{ 3343{
3336 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3344 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3337 cputime64_t tmp; 3345 cputime64_t tmp;
3338 struct rq *rq = this_rq();
3339 3346
3340 p->utime = cputime_add(p->utime, cputime); 3347 p->utime = cputime_add(p->utime, cputime);
3341 3348
3342 if (p != rq->idle)
3343 cpuacct_charge(p, cputime);
3344
3345 /* Add user time to cpustat. */ 3349 /* Add user time to cpustat. */
3346 tmp = cputime_to_cputime64(cputime); 3350 tmp = cputime_to_cputime64(cputime);
3347 if (TASK_NICE(p) > 0) 3351 if (TASK_NICE(p) > 0)
@@ -3355,7 +3359,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
3355 * @p: the process that the cpu time gets accounted to 3359 * @p: the process that the cpu time gets accounted to
3356 * @cputime: the cpu time spent in virtual machine since the last update 3360 * @cputime: the cpu time spent in virtual machine since the last update
3357 */ 3361 */
3358void account_guest_time(struct task_struct *p, cputime_t cputime) 3362static void account_guest_time(struct task_struct *p, cputime_t cputime)
3359{ 3363{
3360 cputime64_t tmp; 3364 cputime64_t tmp;
3361 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3365 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
@@ -3392,10 +3396,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3392 struct rq *rq = this_rq(); 3396 struct rq *rq = this_rq();
3393 cputime64_t tmp; 3397 cputime64_t tmp;
3394 3398
3395 if (p->flags & PF_VCPU) { 3399 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
3396 account_guest_time(p, cputime); 3400 return account_guest_time(p, cputime);
3397 return;
3398 }
3399 3401
3400 p->stime = cputime_add(p->stime, cputime); 3402 p->stime = cputime_add(p->stime, cputime);
3401 3403
@@ -3405,10 +3407,9 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3405 cpustat->irq = cputime64_add(cpustat->irq, tmp); 3407 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3406 else if (softirq_count()) 3408 else if (softirq_count())
3407 cpustat->softirq = cputime64_add(cpustat->softirq, tmp); 3409 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3408 else if (p != rq->idle) { 3410 else if (p != rq->idle)
3409 cpustat->system = cputime64_add(cpustat->system, tmp); 3411 cpustat->system = cputime64_add(cpustat->system, tmp);
3410 cpuacct_charge(p, cputime); 3412 else if (atomic_read(&rq->nr_iowait) > 0)
3411 } else if (atomic_read(&rq->nr_iowait) > 0)
3412 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 3413 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3413 else 3414 else
3414 cpustat->idle = cputime64_add(cpustat->idle, tmp); 3415 cpustat->idle = cputime64_add(cpustat->idle, tmp);
@@ -3444,10 +3445,8 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
3444 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 3445 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3445 else 3446 else
3446 cpustat->idle = cputime64_add(cpustat->idle, tmp); 3447 cpustat->idle = cputime64_add(cpustat->idle, tmp);
3447 } else { 3448 } else
3448 cpustat->steal = cputime64_add(cpustat->steal, tmp); 3449 cpustat->steal = cputime64_add(cpustat->steal, tmp);
3449 cpuacct_charge(p, -tmp);
3450 }
3451} 3450}
3452 3451
3453/* 3452/*
@@ -4992,6 +4991,32 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
4992 */ 4991 */
4993cpumask_t nohz_cpu_mask = CPU_MASK_NONE; 4992cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4994 4993
4994/*
4995 * Increase the granularity value when there are more CPUs,
4996 * because with more CPUs the 'effective latency' as visible
4997 * to users decreases. But the relationship is not linear,
4998 * so pick a second-best guess by going with the log2 of the
4999 * number of CPUs.
5000 *
5001 * This idea comes from the SD scheduler of Con Kolivas:
5002 */
5003static inline void sched_init_granularity(void)
5004{
5005 unsigned int factor = 1 + ilog2(num_online_cpus());
5006 const unsigned long limit = 200000000;
5007
5008 sysctl_sched_min_granularity *= factor;
5009 if (sysctl_sched_min_granularity > limit)
5010 sysctl_sched_min_granularity = limit;
5011
5012 sysctl_sched_latency *= factor;
5013 if (sysctl_sched_latency > limit)
5014 sysctl_sched_latency = limit;
5015
5016 sysctl_sched_wakeup_granularity *= factor;
5017 sysctl_sched_batch_wakeup_granularity *= factor;
5018}
5019
4995#ifdef CONFIG_SMP 5020#ifdef CONFIG_SMP
4996/* 5021/*
4997 * This is how migration works: 5022 * This is how migration works:
@@ -5257,23 +5282,9 @@ static void migrate_live_tasks(int src_cpu)
5257} 5282}
5258 5283
5259/* 5284/*
5260 * activate_idle_task - move idle task to the _front_ of runqueue.
5261 */
5262static void activate_idle_task(struct task_struct *p, struct rq *rq)
5263{
5264 update_rq_clock(rq);
5265
5266 if (p->state == TASK_UNINTERRUPTIBLE)
5267 rq->nr_uninterruptible--;
5268
5269 enqueue_task(rq, p, 0);
5270 inc_nr_running(p, rq);
5271}
5272
5273/*
5274 * Schedules idle task to be the next runnable task on current CPU. 5285 * Schedules idle task to be the next runnable task on current CPU.
5275 * It does so by boosting its priority to highest possible and adding it to 5286 * It does so by boosting its priority to highest possible.
5276 * the _front_ of the runqueue. Used by CPU offline code. 5287 * Used by CPU offline code.
5277 */ 5288 */
5278void sched_idle_next(void) 5289void sched_idle_next(void)
5279{ 5290{
@@ -5293,8 +5304,8 @@ void sched_idle_next(void)
5293 5304
5294 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5305 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5295 5306
5296 /* Add idle task to the _front_ of its priority queue: */ 5307 update_rq_clock(rq);
5297 activate_idle_task(p, rq); 5308 activate_task(rq, p, 0);
5298 5309
5299 spin_unlock_irqrestore(&rq->lock, flags); 5310 spin_unlock_irqrestore(&rq->lock, flags);
5300} 5311}
@@ -5365,7 +5376,7 @@ static struct ctl_table sd_ctl_dir[] = {
5365 .procname = "sched_domain", 5376 .procname = "sched_domain",
5366 .mode = 0555, 5377 .mode = 0555,
5367 }, 5378 },
5368 {0,}, 5379 {0, },
5369}; 5380};
5370 5381
5371static struct ctl_table sd_ctl_root[] = { 5382static struct ctl_table sd_ctl_root[] = {
@@ -5375,7 +5386,7 @@ static struct ctl_table sd_ctl_root[] = {
5375 .mode = 0555, 5386 .mode = 0555,
5376 .child = sd_ctl_dir, 5387 .child = sd_ctl_dir,
5377 }, 5388 },
5378 {0,}, 5389 {0, },
5379}; 5390};
5380 5391
5381static struct ctl_table *sd_alloc_ctl_entry(int n) 5392static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -5621,7 +5632,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
5621 .priority = 10 5632 .priority = 10
5622}; 5633};
5623 5634
5624int __init migration_init(void) 5635void __init migration_init(void)
5625{ 5636{
5626 void *cpu = (void *)(long)smp_processor_id(); 5637 void *cpu = (void *)(long)smp_processor_id();
5627 int err; 5638 int err;
@@ -5631,8 +5642,6 @@ int __init migration_init(void)
5631 BUG_ON(err == NOTIFY_BAD); 5642 BUG_ON(err == NOTIFY_BAD);
5632 migration_call(&migration_notifier, CPU_ONLINE, cpu); 5643 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5633 register_cpu_notifier(&migration_notifier); 5644 register_cpu_notifier(&migration_notifier);
5634
5635 return 0;
5636} 5645}
5637#endif 5646#endif
5638 5647
@@ -6688,10 +6697,12 @@ void __init sched_init_smp(void)
6688 /* Move init over to a non-isolated CPU */ 6697 /* Move init over to a non-isolated CPU */
6689 if (set_cpus_allowed(current, non_isolated_cpus) < 0) 6698 if (set_cpus_allowed(current, non_isolated_cpus) < 0)
6690 BUG(); 6699 BUG();
6700 sched_init_granularity();
6691} 6701}
6692#else 6702#else
6693void __init sched_init_smp(void) 6703void __init sched_init_smp(void)
6694{ 6704{
6705 sched_init_granularity();
6695} 6706}
6696#endif /* CONFIG_SMP */ 6707#endif /* CONFIG_SMP */
6697 6708
@@ -7019,8 +7030,8 @@ err:
7019/* rcu callback to free various structures associated with a task group */ 7030/* rcu callback to free various structures associated with a task group */
7020static void free_sched_group(struct rcu_head *rhp) 7031static void free_sched_group(struct rcu_head *rhp)
7021{ 7032{
7022 struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); 7033 struct task_group *tg = container_of(rhp, struct task_group, rcu);
7023 struct task_group *tg = cfs_rq->tg; 7034 struct cfs_rq *cfs_rq;
7024 struct sched_entity *se; 7035 struct sched_entity *se;
7025 int i; 7036 int i;
7026 7037
@@ -7041,7 +7052,7 @@ static void free_sched_group(struct rcu_head *rhp)
7041/* Destroy runqueue etc associated with a task group */ 7052/* Destroy runqueue etc associated with a task group */
7042void sched_destroy_group(struct task_group *tg) 7053void sched_destroy_group(struct task_group *tg)
7043{ 7054{
7044 struct cfs_rq *cfs_rq; 7055 struct cfs_rq *cfs_rq = NULL;
7045 int i; 7056 int i;
7046 7057
7047 for_each_possible_cpu(i) { 7058 for_each_possible_cpu(i) {
@@ -7049,10 +7060,10 @@ void sched_destroy_group(struct task_group *tg)
7049 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); 7060 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
7050 } 7061 }
7051 7062
7052 cfs_rq = tg->cfs_rq[0]; 7063 BUG_ON(!cfs_rq);
7053 7064
7054 /* wait for possible concurrent references to cfs_rqs complete */ 7065 /* wait for possible concurrent references to cfs_rqs complete */
7055 call_rcu(&cfs_rq->rcu, free_sched_group); 7066 call_rcu(&tg->rcu, free_sched_group);
7056} 7067}
7057 7068
7058/* change task's runqueue when it moves between groups. 7069/* change task's runqueue when it moves between groups.
@@ -7068,8 +7079,10 @@ void sched_move_task(struct task_struct *tsk)
7068 7079
7069 rq = task_rq_lock(tsk, &flags); 7080 rq = task_rq_lock(tsk, &flags);
7070 7081
7071 if (tsk->sched_class != &fair_sched_class) 7082 if (tsk->sched_class != &fair_sched_class) {
7083 set_task_cfs_rq(tsk, task_cpu(tsk));
7072 goto done; 7084 goto done;
7085 }
7073 7086
7074 update_rq_clock(rq); 7087 update_rq_clock(rq);
7075 7088
@@ -7082,7 +7095,7 @@ void sched_move_task(struct task_struct *tsk)
7082 tsk->sched_class->put_prev_task(rq, tsk); 7095 tsk->sched_class->put_prev_task(rq, tsk);
7083 } 7096 }
7084 7097
7085 set_task_cfs_rq(tsk); 7098 set_task_cfs_rq(tsk, task_cpu(tsk));
7086 7099
7087 if (on_rq) { 7100 if (on_rq) {
7088 if (unlikely(running)) 7101 if (unlikely(running))
@@ -7211,25 +7224,53 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
7211 return (u64) tg->shares; 7224 return (u64) tg->shares;
7212} 7225}
7213 7226
7214static struct cftype cpu_shares = { 7227static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft)
7215 .name = "shares", 7228{
7216 .read_uint = cpu_shares_read_uint, 7229 struct task_group *tg = cgroup_tg(cgrp);
7217 .write_uint = cpu_shares_write_uint, 7230 unsigned long flags;
7231 u64 res = 0;
7232 int i;
7233
7234 for_each_possible_cpu(i) {
7235 /*
7236 * Lock to prevent races with updating 64-bit counters
7237 * on 32-bit arches.
7238 */
7239 spin_lock_irqsave(&cpu_rq(i)->lock, flags);
7240 res += tg->se[i]->sum_exec_runtime;
7241 spin_unlock_irqrestore(&cpu_rq(i)->lock, flags);
7242 }
7243 /* Convert from ns to ms */
7244 do_div(res, NSEC_PER_MSEC);
7245
7246 return res;
7247}
7248
7249static struct cftype cpu_files[] = {
7250 {
7251 .name = "shares",
7252 .read_uint = cpu_shares_read_uint,
7253 .write_uint = cpu_shares_write_uint,
7254 },
7255 {
7256 .name = "usage",
7257 .read_uint = cpu_usage_read,
7258 },
7218}; 7259};
7219 7260
7220static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) 7261static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
7221{ 7262{
7222 return cgroup_add_file(cont, ss, &cpu_shares); 7263 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
7223} 7264}
7224 7265
7225struct cgroup_subsys cpu_cgroup_subsys = { 7266struct cgroup_subsys cpu_cgroup_subsys = {
7226 .name = "cpu", 7267 .name = "cpu",
7227 .create = cpu_cgroup_create, 7268 .create = cpu_cgroup_create,
7228 .destroy = cpu_cgroup_destroy, 7269 .destroy = cpu_cgroup_destroy,
7229 .can_attach = cpu_cgroup_can_attach, 7270 .can_attach = cpu_cgroup_can_attach,
7230 .attach = cpu_cgroup_attach, 7271 .attach = cpu_cgroup_attach,
7231 .populate = cpu_cgroup_populate, 7272 .populate = cpu_cgroup_populate,
7232 .subsys_id = cpu_cgroup_subsys_id, 7273 .subsys_id = cpu_cgroup_subsys_id,
7233 .early_init = 1, 7274 .early_init = 1,
7234}; 7275};
7235 7276