aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/auto_group.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2012-10-28 15:19:23 -0400
committerIngo Molnar <mingo@kernel.org>2012-10-30 05:26:04 -0400
commit5258f386ea4e8454bc801fb443e8a4217da1947c (patch)
treec97487f040b95f83a2c9d31d51cbfe57f35e59e3 /kernel/sched/auto_group.c
parent8ed92e51f99c2199c64cb33b4ba95ab12940a94c (diff)
sched/autogroup: Fix crash on reboot when autogroup is disabled
Due to these two commits: 8323f26ce342 sched: Fix race in task_group() 800d4d30c8f2 sched, autogroup: Stop going ahead if autogroup is disabled ... autogroup scheduling's dynamic knobs are wrecked. With both patches applied, all you have to do to crash a box is disable autogroup during boot up, then reboot.. boom, NULL pointer dereference due to 800d4d30 not allowing autogroup to move things, and 8323f26ce making that the only way to switch runqueues. Remove most of the (dysfunctional) knobs and turn the remaining sched_autogroup_enabled knob readonly. If the user fiddles with cgroups hereafter, once tasks are moved, autogroup won't mess with them again unless they call setsid(). No knobs, no glitz, nada, just a cute little thing folks can turn on if they don't want to muck about with cgroups and/or systemd. Signed-off-by: Mike Galbraith <efault@gmx.de> Cc: Xiaotian Feng <xtfeng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Xiaotian Feng <dannyfeng@tencent.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: <stable@vger.kernel.org> # v3.6 Link: http://lkml.kernel.org/r/1351451963.4999.8.camel@maggy.simpson.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/auto_group.c')
-rw-r--r--kernel/sched/auto_group.c68
1 files changed, 11 insertions, 57 deletions
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 0984a21076a3..0f1bacb005a4 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -110,6 +110,9 @@ out_fail:
110 110
111bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) 111bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
112{ 112{
113 if (!sysctl_sched_autogroup_enabled)
114 return false;
115
113 if (tg != &root_task_group) 116 if (tg != &root_task_group)
114 return false; 117 return false;
115 118
@@ -143,15 +146,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
143 146
144 p->signal->autogroup = autogroup_kref_get(ag); 147 p->signal->autogroup = autogroup_kref_get(ag);
145 148
146 if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
147 goto out;
148
149 t = p; 149 t = p;
150 do { 150 do {
151 sched_move_task(t); 151 sched_move_task(t);
152 } while_each_thread(p, t); 152 } while_each_thread(p, t);
153 153
154out:
155 unlock_task_sighand(p, &flags); 154 unlock_task_sighand(p, &flags);
156 autogroup_kref_put(prev); 155 autogroup_kref_put(prev);
157} 156}
@@ -159,8 +158,11 @@ out:
159/* Allocates GFP_KERNEL, cannot be called under any spinlock */ 158/* Allocates GFP_KERNEL, cannot be called under any spinlock */
160void sched_autogroup_create_attach(struct task_struct *p) 159void sched_autogroup_create_attach(struct task_struct *p)
161{ 160{
162 struct autogroup *ag = autogroup_create(); 161 struct autogroup *ag;
163 162
163 if (!sysctl_sched_autogroup_enabled)
164 return;
165 ag = autogroup_create();
164 autogroup_move_group(p, ag); 166 autogroup_move_group(p, ag);
165 /* drop extra reference added by autogroup_create() */ 167 /* drop extra reference added by autogroup_create() */
166 autogroup_kref_put(ag); 168 autogroup_kref_put(ag);
@@ -176,11 +178,15 @@ EXPORT_SYMBOL(sched_autogroup_detach);
176 178
177void sched_autogroup_fork(struct signal_struct *sig) 179void sched_autogroup_fork(struct signal_struct *sig)
178{ 180{
181 if (!sysctl_sched_autogroup_enabled)
182 return;
179 sig->autogroup = autogroup_task_get(current); 183 sig->autogroup = autogroup_task_get(current);
180} 184}
181 185
182void sched_autogroup_exit(struct signal_struct *sig) 186void sched_autogroup_exit(struct signal_struct *sig)
183{ 187{
188 if (!sysctl_sched_autogroup_enabled)
189 return;
184 autogroup_kref_put(sig->autogroup); 190 autogroup_kref_put(sig->autogroup);
185} 191}
186 192
@@ -193,58 +199,6 @@ static int __init setup_autogroup(char *str)
193 199
194__setup("noautogroup", setup_autogroup); 200__setup("noautogroup", setup_autogroup);
195 201
196#ifdef CONFIG_PROC_FS
197
198int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
199{
200 static unsigned long next = INITIAL_JIFFIES;
201 struct autogroup *ag;
202 int err;
203
204 if (nice < -20 || nice > 19)
205 return -EINVAL;
206
207 err = security_task_setnice(current, nice);
208 if (err)
209 return err;
210
211 if (nice < 0 && !can_nice(current, nice))
212 return -EPERM;
213
214 /* this is a heavy operation taking global locks.. */
215 if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
216 return -EAGAIN;
217
218 next = HZ / 10 + jiffies;
219 ag = autogroup_task_get(p);
220
221 down_write(&ag->lock);
222 err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]);
223 if (!err)
224 ag->nice = nice;
225 up_write(&ag->lock);
226
227 autogroup_kref_put(ag);
228
229 return err;
230}
231
232void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
233{
234 struct autogroup *ag = autogroup_task_get(p);
235
236 if (!task_group_is_autogroup(ag->tg))
237 goto out;
238
239 down_read(&ag->lock);
240 seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
241 up_read(&ag->lock);
242
243out:
244 autogroup_kref_put(ag);
245}
246#endif /* CONFIG_PROC_FS */
247
248#ifdef CONFIG_SCHED_DEBUG 202#ifdef CONFIG_SCHED_DEBUG
249int autogroup_path(struct task_group *tg, char *buf, int buflen) 203int autogroup_path(struct task_group *tg, char *buf, int buflen)
250{ 204{