diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:55:43 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:55:43 -0400 |
| commit | bc4016f48161454a9a8e5eb209b0693c6cde9f62 (patch) | |
| tree | f470f5d711e975b152eec90282f5dd30a1d5dba5 /kernel/stop_machine.c | |
| parent | 5d70f79b5ef6ea2de4f72a37b2d96e2601e40a22 (diff) | |
| parent | b7dadc38797584f6203386da1947ed5edf516646 (diff) | |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (29 commits)
sched: Export account_system_vtime()
sched: Call tick_check_idle before __irq_enter
sched: Remove irq time from available CPU power
sched: Do not account irq time to current task
x86: Add IRQ_TIME_ACCOUNTING
sched: Add IRQ_TIME_ACCOUNTING, finer accounting of irq time
sched: Add a PF flag for ksoftirqd identification
sched: Consolidate account_system_vtime extern declaration
sched: Fix softirq time accounting
sched: Drop group_capacity to 1 only if local group has extra capacity
sched: Force balancing on newidle balance if local group has capacity
sched: Set group_imb only a task can be pulled from the busiest cpu
sched: Do not consider SCHED_IDLE tasks to be cache hot
sched: Drop all load weight manipulation for RT tasks
sched: Create special class for stop/migrate work
sched: Unindent labels
sched: Comment updates: fix default latency and granularity numbers
tracing/sched: Add sched_pi_setprio tracepoint
sched: Give CPU bound RT tasks preference
sched: Try not to migrate higher priority RT tasks
...
Diffstat (limited to 'kernel/stop_machine.c')
| -rw-r--r-- | kernel/stop_machine.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 4372ccb25127..090c28812ce1 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -287,11 +287,12 @@ repeat: | |||
| 287 | goto repeat; | 287 | goto repeat; |
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | extern void sched_set_stop_task(int cpu, struct task_struct *stop); | ||
| 291 | |||
| 290 | /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ | 292 | /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ |
| 291 | static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | 293 | static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, |
| 292 | unsigned long action, void *hcpu) | 294 | unsigned long action, void *hcpu) |
| 293 | { | 295 | { |
| 294 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | ||
| 295 | unsigned int cpu = (unsigned long)hcpu; | 296 | unsigned int cpu = (unsigned long)hcpu; |
| 296 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); | 297 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
| 297 | struct task_struct *p; | 298 | struct task_struct *p; |
| @@ -304,13 +305,13 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
| 304 | cpu); | 305 | cpu); |
| 305 | if (IS_ERR(p)) | 306 | if (IS_ERR(p)) |
| 306 | return NOTIFY_BAD; | 307 | return NOTIFY_BAD; |
| 307 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | ||
| 308 | get_task_struct(p); | 308 | get_task_struct(p); |
| 309 | kthread_bind(p, cpu); | ||
| 310 | sched_set_stop_task(cpu, p); | ||
| 309 | stopper->thread = p; | 311 | stopper->thread = p; |
| 310 | break; | 312 | break; |
| 311 | 313 | ||
| 312 | case CPU_ONLINE: | 314 | case CPU_ONLINE: |
| 313 | kthread_bind(stopper->thread, cpu); | ||
| 314 | /* strictly unnecessary, as first user will wake it */ | 315 | /* strictly unnecessary, as first user will wake it */ |
| 315 | wake_up_process(stopper->thread); | 316 | wake_up_process(stopper->thread); |
| 316 | /* mark enabled */ | 317 | /* mark enabled */ |
| @@ -325,6 +326,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, | |||
| 325 | { | 326 | { |
| 326 | struct cpu_stop_work *work; | 327 | struct cpu_stop_work *work; |
| 327 | 328 | ||
| 329 | sched_set_stop_task(cpu, NULL); | ||
| 328 | /* kill the stopper */ | 330 | /* kill the stopper */ |
| 329 | kthread_stop(stopper->thread); | 331 | kthread_stop(stopper->thread); |
| 330 | /* drain remaining works */ | 332 | /* drain remaining works */ |
