diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-12-11 04:23:45 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-12-11 04:23:45 -0500 |
commit | c1ad41f1f7270c1956da13fa8fd59d8d5929d56e (patch) | |
tree | 2a9812fe4a5bee0f354273c34411e00e94746b64 /kernel/sched/auto_group.c | |
parent | 38130ec08716ae2ece8060eca01607b58da7258c (diff) |
Revert "sched/autogroup: Fix crash on reboot when autogroup is disabled"
This reverts commit 5258f386ea4e8454bc801fb443e8a4217da1947c,
because the underlying autogroups bug got fixed upstream in
a better way, via:
fd8ef11730f1 Revert "sched, autogroup: Stop going ahead if autogroup is disabled"
Cc: Mike Galbraith <efault@gmx.de>
Cc: Yong Zhang <yong.zhang0@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/auto_group.c')
-rw-r--r-- | kernel/sched/auto_group.c | 68 |
1 files changed, 57 insertions, 11 deletions
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index 0f1bacb005a4..0984a21076a3 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c | |||
@@ -110,9 +110,6 @@ out_fail: | |||
110 | 110 | ||
111 | bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) | 111 | bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) |
112 | { | 112 | { |
113 | if (!sysctl_sched_autogroup_enabled) | ||
114 | return false; | ||
115 | |||
116 | if (tg != &root_task_group) | 113 | if (tg != &root_task_group) |
117 | return false; | 114 | return false; |
118 | 115 | ||
@@ -146,11 +143,15 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) | |||
146 | 143 | ||
147 | p->signal->autogroup = autogroup_kref_get(ag); | 144 | p->signal->autogroup = autogroup_kref_get(ag); |
148 | 145 | ||
146 | if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) | ||
147 | goto out; | ||
148 | |||
149 | t = p; | 149 | t = p; |
150 | do { | 150 | do { |
151 | sched_move_task(t); | 151 | sched_move_task(t); |
152 | } while_each_thread(p, t); | 152 | } while_each_thread(p, t); |
153 | 153 | ||
154 | out: | ||
154 | unlock_task_sighand(p, &flags); | 155 | unlock_task_sighand(p, &flags); |
155 | autogroup_kref_put(prev); | 156 | autogroup_kref_put(prev); |
156 | } | 157 | } |
@@ -158,11 +159,8 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) | |||
158 | /* Allocates GFP_KERNEL, cannot be called under any spinlock */ | 159 | /* Allocates GFP_KERNEL, cannot be called under any spinlock */ |
159 | void sched_autogroup_create_attach(struct task_struct *p) | 160 | void sched_autogroup_create_attach(struct task_struct *p) |
160 | { | 161 | { |
161 | struct autogroup *ag; | 162 | struct autogroup *ag = autogroup_create(); |
162 | 163 | ||
163 | if (!sysctl_sched_autogroup_enabled) | ||
164 | return; | ||
165 | ag = autogroup_create(); | ||
166 | autogroup_move_group(p, ag); | 164 | autogroup_move_group(p, ag); |
167 | /* drop extra reference added by autogroup_create() */ | 165 | /* drop extra reference added by autogroup_create() */ |
168 | autogroup_kref_put(ag); | 166 | autogroup_kref_put(ag); |
@@ -178,15 +176,11 @@ EXPORT_SYMBOL(sched_autogroup_detach); | |||
178 | 176 | ||
179 | void sched_autogroup_fork(struct signal_struct *sig) | 177 | void sched_autogroup_fork(struct signal_struct *sig) |
180 | { | 178 | { |
181 | if (!sysctl_sched_autogroup_enabled) | ||
182 | return; | ||
183 | sig->autogroup = autogroup_task_get(current); | 179 | sig->autogroup = autogroup_task_get(current); |
184 | } | 180 | } |
185 | 181 | ||
186 | void sched_autogroup_exit(struct signal_struct *sig) | 182 | void sched_autogroup_exit(struct signal_struct *sig) |
187 | { | 183 | { |
188 | if (!sysctl_sched_autogroup_enabled) | ||
189 | return; | ||
190 | autogroup_kref_put(sig->autogroup); | 184 | autogroup_kref_put(sig->autogroup); |
191 | } | 185 | } |
192 | 186 | ||
@@ -199,6 +193,58 @@ static int __init setup_autogroup(char *str) | |||
199 | 193 | ||
200 | __setup("noautogroup", setup_autogroup); | 194 | __setup("noautogroup", setup_autogroup); |
201 | 195 | ||
196 | #ifdef CONFIG_PROC_FS | ||
197 | |||
198 | int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) | ||
199 | { | ||
200 | static unsigned long next = INITIAL_JIFFIES; | ||
201 | struct autogroup *ag; | ||
202 | int err; | ||
203 | |||
204 | if (nice < -20 || nice > 19) | ||
205 | return -EINVAL; | ||
206 | |||
207 | err = security_task_setnice(current, nice); | ||
208 | if (err) | ||
209 | return err; | ||
210 | |||
211 | if (nice < 0 && !can_nice(current, nice)) | ||
212 | return -EPERM; | ||
213 | |||
214 | /* this is a heavy operation taking global locks.. */ | ||
215 | if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) | ||
216 | return -EAGAIN; | ||
217 | |||
218 | next = HZ / 10 + jiffies; | ||
219 | ag = autogroup_task_get(p); | ||
220 | |||
221 | down_write(&ag->lock); | ||
222 | err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]); | ||
223 | if (!err) | ||
224 | ag->nice = nice; | ||
225 | up_write(&ag->lock); | ||
226 | |||
227 | autogroup_kref_put(ag); | ||
228 | |||
229 | return err; | ||
230 | } | ||
231 | |||
232 | void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) | ||
233 | { | ||
234 | struct autogroup *ag = autogroup_task_get(p); | ||
235 | |||
236 | if (!task_group_is_autogroup(ag->tg)) | ||
237 | goto out; | ||
238 | |||
239 | down_read(&ag->lock); | ||
240 | seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); | ||
241 | up_read(&ag->lock); | ||
242 | |||
243 | out: | ||
244 | autogroup_kref_put(ag); | ||
245 | } | ||
246 | #endif /* CONFIG_PROC_FS */ | ||
247 | |||
202 | #ifdef CONFIG_SCHED_DEBUG | 248 | #ifdef CONFIG_SCHED_DEBUG |
203 | int autogroup_path(struct task_group *tg, char *buf, int buflen) | 249 | int autogroup_path(struct task_group *tg, char *buf, int buflen) |
204 | { | 250 | { |