aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorDhaval Giani <dhaval.giani@gmail.com>2010-01-20 07:26:18 -0500
committerIngo Molnar <mingo@elte.hu>2010-01-21 07:40:18 -0500
commit7c9414385ebfdd87cc542d4e7e3bb0dbb2d3ce25 (patch)
treebeb51d7d0d543d72e2754cff807df5c399f2d376 /kernel/sched.c
parent871e35bc9733f273eaf5ceb69bbd0423b58e5285 (diff)
sched: Remove USER_SCHED
Remove the USER_SCHED feature. It has been scheduled to be removed in 2.6.34 as per http://marc.info/?l=linux-kernel&m=125728479022976&w=2 Signed-off-by: Dhaval Giani <dhaval.giani@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1263990378.24844.3.camel@localhost> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c114
1 files changed, 7 insertions, 107 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c0be07932a8d..41e76d325648 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -233,7 +233,7 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
233 */ 233 */
234static DEFINE_MUTEX(sched_domains_mutex); 234static DEFINE_MUTEX(sched_domains_mutex);
235 235
236#ifdef CONFIG_GROUP_SCHED 236#ifdef CONFIG_CGROUP_SCHED
237 237
238#include <linux/cgroup.h> 238#include <linux/cgroup.h>
239 239
@@ -243,13 +243,7 @@ static LIST_HEAD(task_groups);
243 243
244/* task group related information */ 244/* task group related information */
245struct task_group { 245struct task_group {
246#ifdef CONFIG_CGROUP_SCHED
247 struct cgroup_subsys_state css; 246 struct cgroup_subsys_state css;
248#endif
249
250#ifdef CONFIG_USER_SCHED
251 uid_t uid;
252#endif
253 247
254#ifdef CONFIG_FAIR_GROUP_SCHED 248#ifdef CONFIG_FAIR_GROUP_SCHED
255 /* schedulable entities of this group on each cpu */ 249 /* schedulable entities of this group on each cpu */
@@ -274,35 +268,7 @@ struct task_group {
274 struct list_head children; 268 struct list_head children;
275}; 269};
276 270
277#ifdef CONFIG_USER_SCHED
278
279/* Helper function to pass uid information to create_sched_user() */
280void set_tg_uid(struct user_struct *user)
281{
282 user->tg->uid = user->uid;
283}
284
285/*
286 * Root task group.
287 * Every UID task group (including init_task_group aka UID-0) will
288 * be a child to this group.
289 */
290struct task_group root_task_group;
291
292#ifdef CONFIG_FAIR_GROUP_SCHED
293/* Default task group's sched entity on each cpu */
294static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
295/* Default task group's cfs_rq on each cpu */
296static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
297#endif /* CONFIG_FAIR_GROUP_SCHED */
298
299#ifdef CONFIG_RT_GROUP_SCHED
300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
302#endif /* CONFIG_RT_GROUP_SCHED */
303#else /* !CONFIG_USER_SCHED */
304#define root_task_group init_task_group 271#define root_task_group init_task_group
305#endif /* CONFIG_USER_SCHED */
306 272
307/* task_group_lock serializes add/remove of task groups and also changes to 273/* task_group_lock serializes add/remove of task groups and also changes to
308 * a task group's cpu shares. 274 * a task group's cpu shares.
@@ -318,11 +284,7 @@ static int root_task_group_empty(void)
318} 284}
319#endif 285#endif
320 286
321#ifdef CONFIG_USER_SCHED
322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
323#else /* !CONFIG_USER_SCHED */
324# define INIT_TASK_GROUP_LOAD NICE_0_LOAD 287# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
325#endif /* CONFIG_USER_SCHED */
326 288
327/* 289/*
328 * A weight of 0 or 1 can cause arithmetics problems. 290 * A weight of 0 or 1 can cause arithmetics problems.
@@ -348,11 +310,7 @@ static inline struct task_group *task_group(struct task_struct *p)
348{ 310{
349 struct task_group *tg; 311 struct task_group *tg;
350 312
351#ifdef CONFIG_USER_SCHED 313#ifdef CONFIG_CGROUP_SCHED
352 rcu_read_lock();
353 tg = __task_cred(p)->user->tg;
354 rcu_read_unlock();
355#elif defined(CONFIG_CGROUP_SCHED)
356 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), 314 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
357 struct task_group, css); 315 struct task_group, css);
358#else 316#else
@@ -383,7 +341,7 @@ static inline struct task_group *task_group(struct task_struct *p)
383 return NULL; 341 return NULL;
384} 342}
385 343
386#endif /* CONFIG_GROUP_SCHED */ 344#endif /* CONFIG_CGROUP_SCHED */
387 345
388/* CFS-related fields in a runqueue */ 346/* CFS-related fields in a runqueue */
389struct cfs_rq { 347struct cfs_rq {
@@ -7678,9 +7636,6 @@ void __init sched_init(void)
7678#ifdef CONFIG_RT_GROUP_SCHED 7636#ifdef CONFIG_RT_GROUP_SCHED
7679 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7637 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7680#endif 7638#endif
7681#ifdef CONFIG_USER_SCHED
7682 alloc_size *= 2;
7683#endif
7684#ifdef CONFIG_CPUMASK_OFFSTACK 7639#ifdef CONFIG_CPUMASK_OFFSTACK
7685 alloc_size += num_possible_cpus() * cpumask_size(); 7640 alloc_size += num_possible_cpus() * cpumask_size();
7686#endif 7641#endif
@@ -7694,13 +7649,6 @@ void __init sched_init(void)
7694 init_task_group.cfs_rq = (struct cfs_rq **)ptr; 7649 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
7695 ptr += nr_cpu_ids * sizeof(void **); 7650 ptr += nr_cpu_ids * sizeof(void **);
7696 7651
7697#ifdef CONFIG_USER_SCHED
7698 root_task_group.se = (struct sched_entity **)ptr;
7699 ptr += nr_cpu_ids * sizeof(void **);
7700
7701 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7702 ptr += nr_cpu_ids * sizeof(void **);
7703#endif /* CONFIG_USER_SCHED */
7704#endif /* CONFIG_FAIR_GROUP_SCHED */ 7652#endif /* CONFIG_FAIR_GROUP_SCHED */
7705#ifdef CONFIG_RT_GROUP_SCHED 7653#ifdef CONFIG_RT_GROUP_SCHED
7706 init_task_group.rt_se = (struct sched_rt_entity **)ptr; 7654 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
@@ -7709,13 +7657,6 @@ void __init sched_init(void)
7709 init_task_group.rt_rq = (struct rt_rq **)ptr; 7657 init_task_group.rt_rq = (struct rt_rq **)ptr;
7710 ptr += nr_cpu_ids * sizeof(void **); 7658 ptr += nr_cpu_ids * sizeof(void **);
7711 7659
7712#ifdef CONFIG_USER_SCHED
7713 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7714 ptr += nr_cpu_ids * sizeof(void **);
7715
7716 root_task_group.rt_rq = (struct rt_rq **)ptr;
7717 ptr += nr_cpu_ids * sizeof(void **);
7718#endif /* CONFIG_USER_SCHED */
7719#endif /* CONFIG_RT_GROUP_SCHED */ 7660#endif /* CONFIG_RT_GROUP_SCHED */
7720#ifdef CONFIG_CPUMASK_OFFSTACK 7661#ifdef CONFIG_CPUMASK_OFFSTACK
7721 for_each_possible_cpu(i) { 7662 for_each_possible_cpu(i) {
@@ -7735,22 +7676,13 @@ void __init sched_init(void)
7735#ifdef CONFIG_RT_GROUP_SCHED 7676#ifdef CONFIG_RT_GROUP_SCHED
7736 init_rt_bandwidth(&init_task_group.rt_bandwidth, 7677 init_rt_bandwidth(&init_task_group.rt_bandwidth,
7737 global_rt_period(), global_rt_runtime()); 7678 global_rt_period(), global_rt_runtime());
7738#ifdef CONFIG_USER_SCHED
7739 init_rt_bandwidth(&root_task_group.rt_bandwidth,
7740 global_rt_period(), RUNTIME_INF);
7741#endif /* CONFIG_USER_SCHED */
7742#endif /* CONFIG_RT_GROUP_SCHED */ 7679#endif /* CONFIG_RT_GROUP_SCHED */
7743 7680
7744#ifdef CONFIG_GROUP_SCHED 7681#ifdef CONFIG_CGROUP_SCHED
7745 list_add(&init_task_group.list, &task_groups); 7682 list_add(&init_task_group.list, &task_groups);
7746 INIT_LIST_HEAD(&init_task_group.children); 7683 INIT_LIST_HEAD(&init_task_group.children);
7747 7684
7748#ifdef CONFIG_USER_SCHED 7685#endif /* CONFIG_CGROUP_SCHED */
7749 INIT_LIST_HEAD(&root_task_group.children);
7750 init_task_group.parent = &root_task_group;
7751 list_add(&init_task_group.siblings, &root_task_group.children);
7752#endif /* CONFIG_USER_SCHED */
7753#endif /* CONFIG_GROUP_SCHED */
7754 7686
7755#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP 7687#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
7756 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), 7688 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
@@ -7790,25 +7722,6 @@ void __init sched_init(void)
7790 * directly in rq->cfs (i.e init_task_group->se[] = NULL). 7722 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
7791 */ 7723 */
7792 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL); 7724 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
7793#elif defined CONFIG_USER_SCHED
7794 root_task_group.shares = NICE_0_LOAD;
7795 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
7796 /*
7797 * In case of task-groups formed thr' the user id of tasks,
7798 * init_task_group represents tasks belonging to root user.
7799 * Hence it forms a sibling of all subsequent groups formed.
7800 * In this case, init_task_group gets only a fraction of overall
7801 * system cpu resource, based on the weight assigned to root
7802 * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
7803 * by letting tasks of init_task_group sit in a separate cfs_rq
7804 * (init_tg_cfs_rq) and having one entity represent this group of
7805 * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
7806 */
7807 init_tg_cfs_entry(&init_task_group,
7808 &per_cpu(init_tg_cfs_rq, i),
7809 &per_cpu(init_sched_entity, i), i, 1,
7810 root_task_group.se[i]);
7811
7812#endif 7725#endif
7813#endif /* CONFIG_FAIR_GROUP_SCHED */ 7726#endif /* CONFIG_FAIR_GROUP_SCHED */
7814 7727
@@ -7817,12 +7730,6 @@ void __init sched_init(void)
7817 INIT_LIST_HEAD(&rq->leaf_rt_rq_list); 7730 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7818#ifdef CONFIG_CGROUP_SCHED 7731#ifdef CONFIG_CGROUP_SCHED
7819 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL); 7732 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
7820#elif defined CONFIG_USER_SCHED
7821 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
7822 init_tg_rt_entry(&init_task_group,
7823 &per_cpu(init_rt_rq_var, i),
7824 &per_cpu(init_sched_rt_entity, i), i, 1,
7825 root_task_group.rt_se[i]);
7826#endif 7733#endif
7827#endif 7734#endif
7828 7735
@@ -8218,7 +8125,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8218} 8125}
8219#endif /* CONFIG_RT_GROUP_SCHED */ 8126#endif /* CONFIG_RT_GROUP_SCHED */
8220 8127
8221#ifdef CONFIG_GROUP_SCHED 8128#ifdef CONFIG_CGROUP_SCHED
8222static void free_sched_group(struct task_group *tg) 8129static void free_sched_group(struct task_group *tg)
8223{ 8130{
8224 free_fair_sched_group(tg); 8131 free_fair_sched_group(tg);
@@ -8327,7 +8234,7 @@ void sched_move_task(struct task_struct *tsk)
8327 8234
8328 task_rq_unlock(rq, &flags); 8235 task_rq_unlock(rq, &flags);
8329} 8236}
8330#endif /* CONFIG_GROUP_SCHED */ 8237#endif /* CONFIG_CGROUP_SCHED */
8331 8238
8332#ifdef CONFIG_FAIR_GROUP_SCHED 8239#ifdef CONFIG_FAIR_GROUP_SCHED
8333static void __set_se_shares(struct sched_entity *se, unsigned long shares) 8240static void __set_se_shares(struct sched_entity *se, unsigned long shares)
@@ -8469,13 +8376,6 @@ static int tg_schedulable(struct task_group *tg, void *data)
8469 runtime = d->rt_runtime; 8376 runtime = d->rt_runtime;
8470 } 8377 }
8471 8378
8472#ifdef CONFIG_USER_SCHED
8473 if (tg == &root_task_group) {
8474 period = global_rt_period();
8475 runtime = global_rt_runtime();
8476 }
8477#endif
8478
8479 /* 8379 /*
8480 * Cannot have more runtime than the period. 8380 * Cannot have more runtime than the period.
8481 */ 8381 */