diff options
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/fork.c | 7 | ||||
-rw-r--r-- | kernel/irq/manage.c | 2 | ||||
-rw-r--r-- | kernel/kthread.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 45 | ||||
-rw-r--r-- | kernel/sched_autogroup.c | 8 | ||||
-rw-r--r-- | kernel/softirq.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 2 |
8 files changed, 34 insertions, 36 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 777cd01e240e..341acbbc434a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2511,7 +2511,7 @@ extern void normalize_rt_tasks(void); | |||
2511 | 2511 | ||
2512 | #ifdef CONFIG_CGROUP_SCHED | 2512 | #ifdef CONFIG_CGROUP_SCHED |
2513 | 2513 | ||
2514 | extern struct task_group init_task_group; | 2514 | extern struct task_group root_task_group; |
2515 | 2515 | ||
2516 | extern struct task_group *sched_create_group(struct task_group *parent); | 2516 | extern struct task_group *sched_create_group(struct task_group *parent); |
2517 | extern void sched_destroy_group(struct task_group *tg); | 2517 | extern void sched_destroy_group(struct task_group *tg); |
diff --git a/kernel/fork.c b/kernel/fork.c index 7d164e25b0f0..dc1a8bbcea7b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -169,15 +169,14 @@ EXPORT_SYMBOL(free_task); | |||
169 | static inline void free_signal_struct(struct signal_struct *sig) | 169 | static inline void free_signal_struct(struct signal_struct *sig) |
170 | { | 170 | { |
171 | taskstats_tgid_free(sig); | 171 | taskstats_tgid_free(sig); |
172 | sched_autogroup_exit(sig); | ||
172 | kmem_cache_free(signal_cachep, sig); | 173 | kmem_cache_free(signal_cachep, sig); |
173 | } | 174 | } |
174 | 175 | ||
175 | static inline void put_signal_struct(struct signal_struct *sig) | 176 | static inline void put_signal_struct(struct signal_struct *sig) |
176 | { | 177 | { |
177 | if (atomic_dec_and_test(&sig->sigcnt)) { | 178 | if (atomic_dec_and_test(&sig->sigcnt)) |
178 | sched_autogroup_exit(sig); | ||
179 | free_signal_struct(sig); | 179 | free_signal_struct(sig); |
180 | } | ||
181 | } | 180 | } |
182 | 181 | ||
183 | void __put_task_struct(struct task_struct *tsk) | 182 | void __put_task_struct(struct task_struct *tsk) |
@@ -1318,7 +1317,7 @@ bad_fork_cleanup_mm: | |||
1318 | } | 1317 | } |
1319 | bad_fork_cleanup_signal: | 1318 | bad_fork_cleanup_signal: |
1320 | if (!(clone_flags & CLONE_THREAD)) | 1319 | if (!(clone_flags & CLONE_THREAD)) |
1321 | put_signal_struct(p->signal); | 1320 | free_signal_struct(p->signal); |
1322 | bad_fork_cleanup_sighand: | 1321 | bad_fork_cleanup_sighand: |
1323 | __cleanup_sighand(p->sighand); | 1322 | __cleanup_sighand(p->sighand); |
1324 | bad_fork_cleanup_fs: | 1323 | bad_fork_cleanup_fs: |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 91a5fa25054e..0caa59f747dd 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -577,7 +577,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |||
577 | */ | 577 | */ |
578 | static int irq_thread(void *data) | 578 | static int irq_thread(void *data) |
579 | { | 579 | { |
580 | static struct sched_param param = { | 580 | static const struct sched_param param = { |
581 | .sched_priority = MAX_USER_RT_PRIO/2, | 581 | .sched_priority = MAX_USER_RT_PRIO/2, |
582 | }; | 582 | }; |
583 | struct irqaction *action = data; | 583 | struct irqaction *action = data; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 5355cfd44a3f..c55afba990a3 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -148,7 +148,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
148 | wait_for_completion(&create.done); | 148 | wait_for_completion(&create.done); |
149 | 149 | ||
150 | if (!IS_ERR(create.result)) { | 150 | if (!IS_ERR(create.result)) { |
151 | static struct sched_param param = { .sched_priority = 0 }; | 151 | static const struct sched_param param = { .sched_priority = 0 }; |
152 | va_list args; | 152 | va_list args; |
153 | 153 | ||
154 | va_start(args, namefmt); | 154 | va_start(args, namefmt); |
diff --git a/kernel/sched.c b/kernel/sched.c index 04949089e760..a0eb0941fa84 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -278,14 +278,12 @@ struct task_group { | |||
278 | #endif | 278 | #endif |
279 | }; | 279 | }; |
280 | 280 | ||
281 | #define root_task_group init_task_group | ||
282 | |||
283 | /* task_group_lock serializes the addition/removal of task groups */ | 281 | /* task_group_lock serializes the addition/removal of task groups */ |
284 | static DEFINE_SPINLOCK(task_group_lock); | 282 | static DEFINE_SPINLOCK(task_group_lock); |
285 | 283 | ||
286 | #ifdef CONFIG_FAIR_GROUP_SCHED | 284 | #ifdef CONFIG_FAIR_GROUP_SCHED |
287 | 285 | ||
288 | # define INIT_TASK_GROUP_LOAD NICE_0_LOAD | 286 | # define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
289 | 287 | ||
290 | /* | 288 | /* |
291 | * A weight of 0 or 1 can cause arithmetics problems. | 289 | * A weight of 0 or 1 can cause arithmetics problems. |
@@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock); | |||
298 | #define MIN_SHARES 2 | 296 | #define MIN_SHARES 2 |
299 | #define MAX_SHARES (1UL << 18) | 297 | #define MAX_SHARES (1UL << 18) |
300 | 298 | ||
301 | static int init_task_group_load = INIT_TASK_GROUP_LOAD; | 299 | static int root_task_group_load = ROOT_TASK_GROUP_LOAD; |
302 | #endif | 300 | #endif |
303 | 301 | ||
304 | /* Default task group. | 302 | /* Default task group. |
305 | * Every task in system belong to this group at bootup. | 303 | * Every task in system belong to this group at bootup. |
306 | */ | 304 | */ |
307 | struct task_group init_task_group; | 305 | struct task_group root_task_group; |
308 | 306 | ||
309 | #endif /* CONFIG_CGROUP_SCHED */ | 307 | #endif /* CONFIG_CGROUP_SCHED */ |
310 | 308 | ||
@@ -743,7 +741,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
743 | buf[cnt] = 0; | 741 | buf[cnt] = 0; |
744 | cmp = strstrip(buf); | 742 | cmp = strstrip(buf); |
745 | 743 | ||
746 | if (strncmp(buf, "NO_", 3) == 0) { | 744 | if (strncmp(cmp, "NO_", 3) == 0) { |
747 | neg = 1; | 745 | neg = 1; |
748 | cmp += 3; | 746 | cmp += 3; |
749 | } | 747 | } |
@@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | |||
7848 | cfs_rq->tg = tg; | 7846 | cfs_rq->tg = tg; |
7849 | 7847 | ||
7850 | tg->se[cpu] = se; | 7848 | tg->se[cpu] = se; |
7851 | /* se could be NULL for init_task_group */ | 7849 | /* se could be NULL for root_task_group */ |
7852 | if (!se) | 7850 | if (!se) |
7853 | return; | 7851 | return; |
7854 | 7852 | ||
@@ -7908,18 +7906,18 @@ void __init sched_init(void) | |||
7908 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); | 7906 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
7909 | 7907 | ||
7910 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7908 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7911 | init_task_group.se = (struct sched_entity **)ptr; | 7909 | root_task_group.se = (struct sched_entity **)ptr; |
7912 | ptr += nr_cpu_ids * sizeof(void **); | 7910 | ptr += nr_cpu_ids * sizeof(void **); |
7913 | 7911 | ||
7914 | init_task_group.cfs_rq = (struct cfs_rq **)ptr; | 7912 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; |
7915 | ptr += nr_cpu_ids * sizeof(void **); | 7913 | ptr += nr_cpu_ids * sizeof(void **); |
7916 | 7914 | ||
7917 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7915 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
7918 | #ifdef CONFIG_RT_GROUP_SCHED | 7916 | #ifdef CONFIG_RT_GROUP_SCHED |
7919 | init_task_group.rt_se = (struct sched_rt_entity **)ptr; | 7917 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; |
7920 | ptr += nr_cpu_ids * sizeof(void **); | 7918 | ptr += nr_cpu_ids * sizeof(void **); |
7921 | 7919 | ||
7922 | init_task_group.rt_rq = (struct rt_rq **)ptr; | 7920 | root_task_group.rt_rq = (struct rt_rq **)ptr; |
7923 | ptr += nr_cpu_ids * sizeof(void **); | 7921 | ptr += nr_cpu_ids * sizeof(void **); |
7924 | 7922 | ||
7925 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7923 | #endif /* CONFIG_RT_GROUP_SCHED */ |
@@ -7939,13 +7937,13 @@ void __init sched_init(void) | |||
7939 | global_rt_period(), global_rt_runtime()); | 7937 | global_rt_period(), global_rt_runtime()); |
7940 | 7938 | ||
7941 | #ifdef CONFIG_RT_GROUP_SCHED | 7939 | #ifdef CONFIG_RT_GROUP_SCHED |
7942 | init_rt_bandwidth(&init_task_group.rt_bandwidth, | 7940 | init_rt_bandwidth(&root_task_group.rt_bandwidth, |
7943 | global_rt_period(), global_rt_runtime()); | 7941 | global_rt_period(), global_rt_runtime()); |
7944 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7942 | #endif /* CONFIG_RT_GROUP_SCHED */ |
7945 | 7943 | ||
7946 | #ifdef CONFIG_CGROUP_SCHED | 7944 | #ifdef CONFIG_CGROUP_SCHED |
7947 | list_add(&init_task_group.list, &task_groups); | 7945 | list_add(&root_task_group.list, &task_groups); |
7948 | INIT_LIST_HEAD(&init_task_group.children); | 7946 | INIT_LIST_HEAD(&root_task_group.children); |
7949 | autogroup_init(&init_task); | 7947 | autogroup_init(&init_task); |
7950 | #endif /* CONFIG_CGROUP_SCHED */ | 7948 | #endif /* CONFIG_CGROUP_SCHED */ |
7951 | 7949 | ||
@@ -7960,34 +7958,34 @@ void __init sched_init(void) | |||
7960 | init_cfs_rq(&rq->cfs, rq); | 7958 | init_cfs_rq(&rq->cfs, rq); |
7961 | init_rt_rq(&rq->rt, rq); | 7959 | init_rt_rq(&rq->rt, rq); |
7962 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7960 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7963 | init_task_group.shares = init_task_group_load; | 7961 | root_task_group.shares = root_task_group_load; |
7964 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); | 7962 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
7965 | /* | 7963 | /* |
7966 | * How much cpu bandwidth does init_task_group get? | 7964 | * How much cpu bandwidth does root_task_group get? |
7967 | * | 7965 | * |
7968 | * In case of task-groups formed thr' the cgroup filesystem, it | 7966 | * In case of task-groups formed thr' the cgroup filesystem, it |
7969 | * gets 100% of the cpu resources in the system. This overall | 7967 | * gets 100% of the cpu resources in the system. This overall |
7970 | * system cpu resource is divided among the tasks of | 7968 | * system cpu resource is divided among the tasks of |
7971 | * init_task_group and its child task-groups in a fair manner, | 7969 | * root_task_group and its child task-groups in a fair manner, |
7972 | * based on each entity's (task or task-group's) weight | 7970 | * based on each entity's (task or task-group's) weight |
7973 | * (se->load.weight). | 7971 | * (se->load.weight). |
7974 | * | 7972 | * |
7975 | * In other words, if init_task_group has 10 tasks of weight | 7973 | * In other words, if root_task_group has 10 tasks of weight |
7976 | * 1024) and two child groups A0 and A1 (of weight 1024 each), | 7974 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
7977 | * then A0's share of the cpu resource is: | 7975 | * then A0's share of the cpu resource is: |
7978 | * | 7976 | * |
7979 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% | 7977 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
7980 | * | 7978 | * |
7981 | * We achieve this by letting init_task_group's tasks sit | 7979 | * We achieve this by letting root_task_group's tasks sit |
7982 | * directly in rq->cfs (i.e init_task_group->se[] = NULL). | 7980 | * directly in rq->cfs (i.e root_task_group->se[] = NULL). |
7983 | */ | 7981 | */ |
7984 | init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL); | 7982 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); |
7985 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 7983 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
7986 | 7984 | ||
7987 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; | 7985 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; |
7988 | #ifdef CONFIG_RT_GROUP_SCHED | 7986 | #ifdef CONFIG_RT_GROUP_SCHED |
7989 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); | 7987 | INIT_LIST_HEAD(&rq->leaf_rt_rq_list); |
7990 | init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL); | 7988 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); |
7991 | #endif | 7989 | #endif |
7992 | 7990 | ||
7993 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) | 7991 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
@@ -8379,6 +8377,7 @@ static void free_sched_group(struct task_group *tg) | |||
8379 | { | 8377 | { |
8380 | free_fair_sched_group(tg); | 8378 | free_fair_sched_group(tg); |
8381 | free_rt_sched_group(tg); | 8379 | free_rt_sched_group(tg); |
8380 | autogroup_free(tg); | ||
8382 | kfree(tg); | 8381 | kfree(tg); |
8383 | } | 8382 | } |
8384 | 8383 | ||
@@ -8812,7 +8811,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
8812 | 8811 | ||
8813 | if (!cgrp->parent) { | 8812 | if (!cgrp->parent) { |
8814 | /* This is early initialization for the top cgroup */ | 8813 | /* This is early initialization for the top cgroup */ |
8815 | return &init_task_group.css; | 8814 | return &root_task_group.css; |
8816 | } | 8815 | } |
8817 | 8816 | ||
8818 | parent = cgroup_tg(cgrp->parent); | 8817 | parent = cgroup_tg(cgrp->parent); |
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c index c80fedcd476b..32a723b8f84c 100644 --- a/kernel/sched_autogroup.c +++ b/kernel/sched_autogroup.c | |||
@@ -9,10 +9,10 @@ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; | |||
9 | static struct autogroup autogroup_default; | 9 | static struct autogroup autogroup_default; |
10 | static atomic_t autogroup_seq_nr; | 10 | static atomic_t autogroup_seq_nr; |
11 | 11 | ||
12 | static void autogroup_init(struct task_struct *init_task) | 12 | static void __init autogroup_init(struct task_struct *init_task) |
13 | { | 13 | { |
14 | autogroup_default.tg = &init_task_group; | 14 | autogroup_default.tg = &root_task_group; |
15 | init_task_group.autogroup = &autogroup_default; | 15 | root_task_group.autogroup = &autogroup_default; |
16 | kref_init(&autogroup_default.kref); | 16 | kref_init(&autogroup_default.kref); |
17 | init_rwsem(&autogroup_default.lock); | 17 | init_rwsem(&autogroup_default.lock); |
18 | init_task->signal->autogroup = &autogroup_default; | 18 | init_task->signal->autogroup = &autogroup_default; |
@@ -63,7 +63,7 @@ static inline struct autogroup *autogroup_create(void) | |||
63 | if (!ag) | 63 | if (!ag) |
64 | goto out_fail; | 64 | goto out_fail; |
65 | 65 | ||
66 | tg = sched_create_group(&init_task_group); | 66 | tg = sched_create_group(&root_task_group); |
67 | 67 | ||
68 | if (IS_ERR(tg)) | 68 | if (IS_ERR(tg)) |
69 | goto out_free; | 69 | goto out_free; |
diff --git a/kernel/softirq.c b/kernel/softirq.c index d4d918a91881..c10150cb456b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -853,7 +853,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
853 | cpumask_any(cpu_online_mask)); | 853 | cpumask_any(cpu_online_mask)); |
854 | case CPU_DEAD: | 854 | case CPU_DEAD: |
855 | case CPU_DEAD_FROZEN: { | 855 | case CPU_DEAD_FROZEN: { |
856 | static struct sched_param param = { | 856 | static const struct sched_param param = { |
857 | .sched_priority = MAX_RT_PRIO-1 | 857 | .sched_priority = MAX_RT_PRIO-1 |
858 | }; | 858 | }; |
859 | 859 | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 562c56e048fd..659732eba07c 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -558,7 +558,7 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |||
558 | static int trace_wakeup_test_thread(void *data) | 558 | static int trace_wakeup_test_thread(void *data) |
559 | { | 559 | { |
560 | /* Make this a RT thread, doesn't need to be too high */ | 560 | /* Make this a RT thread, doesn't need to be too high */ |
561 | static struct sched_param param = { .sched_priority = 5 }; | 561 | static const struct sched_param param = { .sched_priority = 5 }; |
562 | struct completion *x = data; | 562 | struct completion *x = data; |
563 | 563 | ||
564 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 564 | sched_setscheduler(current, SCHED_FIFO, ¶m); |