diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-09-22 07:53:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-10-18 12:41:58 -0400 |
commit | 34f971f6f7988be4d014eec3e3526bee6d007ffa (patch) | |
tree | a662e0701f81f019418b55aa1354f833277b9c40 /kernel/stop_machine.c | |
parent | 4924627423d5e286136ad2520f5be536345ae590 (diff) |
sched: Create special class for stop/migrate work
In order to separate the stop/migrate work thread from the SCHED_FIFO
implementation, create a special class for it that is of higher priority than
SCHED_FIFO itself.
This currently solves a problem where cpu-hotplug consumes so much cpu-time
that the SCHED_FIFO class gets throttled, but has the bandwidth replenishment
timer pending on the now dead cpu.
It is also required for when we add the planned deadline scheduling class above
SCHED_FIFO, as the stop/migrate thread still needs to transcent those tasks.
Tested-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1285165776.2275.1022.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r-- | kernel/stop_machine.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 4372ccb25127..090c28812ce1 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -287,11 +287,12 @@ repeat: | |||
287 | goto repeat; | 287 | goto repeat; |
288 | } | 288 | } |
289 | 289 | ||
290 | extern void sched_set_stop_task(int cpu, struct task_struct *stop); | ||
291 | |||
290 | /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ | 292 | /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ |
291 | static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | 293 | static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, |
292 | unsigned long action, void *hcpu) | 294 | unsigned long action, void *hcpu) |
293 | { | 295 | { |
294 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | ||
295 | unsigned int cpu = (unsigned long)hcpu; | 296 | unsigned int cpu = (unsigned long)hcpu; |
296 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | 297 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
297 | struct task_struct *p; | 298 | struct task_struct *p; |
@@ -304,13 +305,13 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
304 | cpu); | 305 | cpu); |
305 | if (IS_ERR(p)) | 306 | if (IS_ERR(p)) |
306 | return NOTIFY_BAD; | 307 | return NOTIFY_BAD; |
307 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | ||
308 | get_task_struct(p); | 308 | get_task_struct(p); |
309 | kthread_bind(p, cpu); | ||
310 | sched_set_stop_task(cpu, p); | ||
309 | stopper->thread = p; | 311 | stopper->thread = p; |
310 | break; | 312 | break; |
311 | 313 | ||
312 | case CPU_ONLINE: | 314 | case CPU_ONLINE: |
313 | kthread_bind(stopper->thread, cpu); | ||
314 | /* strictly unnecessary, as first user will wake it */ | 315 | /* strictly unnecessary, as first user will wake it */ |
315 | wake_up_process(stopper->thread); | 316 | wake_up_process(stopper->thread); |
316 | /* mark enabled */ | 317 | /* mark enabled */ |
@@ -325,6 +326,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
325 | { | 326 | { |
326 | struct cpu_stop_work *work; | 327 | struct cpu_stop_work *work; |
327 | 328 | ||
329 | sched_set_stop_task(cpu, NULL); | ||
328 | /* kill the stopper */ | 330 | /* kill the stopper */ |
329 | kthread_stop(stopper->thread); | 331 | kthread_stop(stopper->thread); |
330 | /* drain remaining works */ | 332 | /* drain remaining works */ |