aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 09:43:54 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 10:06:11 -0400
commit7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch)
tree5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /kernel/stop_machine.c
parent7d754596756240fa918b94cd0c3011c77a638987 (diff)
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
Merge 'Linux v3.0' into Litmus
Some notes: * Litmus^RT scheduling class is the topmost scheduling class (above stop_sched_class). * scheduler_ipi() function (e.g., in smp_reschedule_interrupt()) may increase IPI latencies. * Added path into schedule() to quickly re-evaluate scheduling decision without becoming preemptive again. This used to be a standard path before the removal of BKL. Conflicts: Makefile arch/arm/kernel/calls.S arch/arm/kernel/smp.c arch/x86/include/asm/unistd_32.h arch/x86/kernel/smp.c arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/printk.c kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 4372ccb25127..e3516b29076c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -262,7 +262,7 @@ repeat:
262 cpu_stop_fn_t fn = work->fn; 262 cpu_stop_fn_t fn = work->fn;
263 void *arg = work->arg; 263 void *arg = work->arg;
264 struct cpu_stop_done *done = work->done; 264 struct cpu_stop_done *done = work->done;
265 char ksym_buf[KSYM_NAME_LEN]; 265 char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
266 266
267 __set_current_state(TASK_RUNNING); 267 __set_current_state(TASK_RUNNING);
268 268
@@ -287,11 +287,12 @@ repeat:
287 goto repeat; 287 goto repeat;
288} 288}
289 289
290extern void sched_set_stop_task(int cpu, struct task_struct *stop);
291
290/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */ 292/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
291static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, 293static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
292 unsigned long action, void *hcpu) 294 unsigned long action, void *hcpu)
293{ 295{
294 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
295 unsigned int cpu = (unsigned long)hcpu; 296 unsigned int cpu = (unsigned long)hcpu;
296 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); 297 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
297 struct task_struct *p; 298 struct task_struct *p;
@@ -300,17 +301,19 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
300 case CPU_UP_PREPARE: 301 case CPU_UP_PREPARE:
301 BUG_ON(stopper->thread || stopper->enabled || 302 BUG_ON(stopper->thread || stopper->enabled ||
302 !list_empty(&stopper->works)); 303 !list_empty(&stopper->works));
303 p = kthread_create(cpu_stopper_thread, stopper, "migration/%d", 304 p = kthread_create_on_node(cpu_stopper_thread,
304 cpu); 305 stopper,
306 cpu_to_node(cpu),
307 "migration/%d", cpu);
305 if (IS_ERR(p)) 308 if (IS_ERR(p))
306 return NOTIFY_BAD; 309 return notifier_from_errno(PTR_ERR(p));
307 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
308 get_task_struct(p); 310 get_task_struct(p);
311 kthread_bind(p, cpu);
312 sched_set_stop_task(cpu, p);
309 stopper->thread = p; 313 stopper->thread = p;
310 break; 314 break;
311 315
312 case CPU_ONLINE: 316 case CPU_ONLINE:
313 kthread_bind(stopper->thread, cpu);
314 /* strictly unnecessary, as first user will wake it */ 317 /* strictly unnecessary, as first user will wake it */
315 wake_up_process(stopper->thread); 318 wake_up_process(stopper->thread);
316 /* mark enabled */ 319 /* mark enabled */
@@ -325,6 +328,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
325 { 328 {
326 struct cpu_stop_work *work; 329 struct cpu_stop_work *work;
327 330
331 sched_set_stop_task(cpu, NULL);
328 /* kill the stopper */ 332 /* kill the stopper */
329 kthread_stop(stopper->thread); 333 kthread_stop(stopper->thread);
330 /* drain remaining works */ 334 /* drain remaining works */
@@ -370,7 +374,7 @@ static int __init cpu_stop_init(void)
370 /* start one for the boot cpu */ 374 /* start one for the boot cpu */
371 err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE, 375 err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
372 bcpu); 376 bcpu);
373 BUG_ON(err == NOTIFY_BAD); 377 BUG_ON(err != NOTIFY_OK);
374 cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu); 378 cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
375 register_cpu_notifier(&cpu_stop_cpu_notifier); 379 register_cpu_notifier(&cpu_stop_cpu_notifier);
376 380