aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 09:43:54 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2011-08-27 10:06:11 -0400
commit7b1bb388bc879ffcc6c69b567816d5c354afe42b (patch)
tree5a217fdfb0b5e5a327bdcd624506337c1ae1fe32 /arch/arm/kernel/smp.c
parent7d754596756240fa918b94cd0c3011c77a638987 (diff)
parent02f8c6aee8df3cdc935e9bdd4f2d020306035dbe (diff)
Merge 'Linux v3.0' into Litmus
Some notes: * Litmus^RT scheduling class is the topmost scheduling class (above stop_sched_class). * scheduler_ipi() function (e.g., in smp_reschedule_interrupt()) may increase IPI latencies. * Added path into schedule() to quickly re-evaluate scheduling decision without becoming preemptive again. This used to be a standard path before the removal of BKL. Conflicts: Makefile arch/arm/kernel/calls.S arch/arm/kernel/smp.c arch/x86/include/asm/unistd_32.h arch/x86/kernel/smp.c arch/x86/kernel/syscall_table_32.S include/linux/hrtimer.h kernel/printk.c kernel/sched.c kernel/sched_fair.c
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c469
1 files changed, 197 insertions, 272 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b72fbf3d043c..5a574296ace0 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -16,6 +16,7 @@
16#include <linux/cache.h> 16#include <linux/cache.h>
17#include <linux/profile.h> 17#include <linux/profile.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/ftrace.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
20#include <linux/err.h> 21#include <linux/err.h>
21#include <linux/cpu.h> 22#include <linux/cpu.h>
@@ -24,6 +25,7 @@
24#include <linux/irq.h> 25#include <linux/irq.h>
25#include <linux/percpu.h> 26#include <linux/percpu.h>
26#include <linux/clockchips.h> 27#include <linux/clockchips.h>
28#include <linux/completion.h>
27 29
28#include <asm/atomic.h> 30#include <asm/atomic.h>
29#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
@@ -33,10 +35,10 @@
33#include <asm/pgtable.h> 35#include <asm/pgtable.h>
34#include <asm/pgalloc.h> 36#include <asm/pgalloc.h>
35#include <asm/processor.h> 37#include <asm/processor.h>
38#include <asm/sections.h>
36#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
37#include <asm/ptrace.h> 40#include <asm/ptrace.h>
38#include <asm/localtimer.h> 41#include <asm/localtimer.h>
39#include <asm/smp_plat.h>
40 42
41#include <litmus/preempt.h> 43#include <litmus/preempt.h>
42 44
@@ -47,22 +49,8 @@
47 */ 49 */
48struct secondary_data secondary_data; 50struct secondary_data secondary_data;
49 51
50/*
51 * structures for inter-processor calls
52 * - A collection of single bit ipi messages.
53 */
54struct ipi_data {
55 spinlock_t lock;
56 unsigned long ipi_count;
57 unsigned long bits;
58};
59
60static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
61 .lock = SPIN_LOCK_UNLOCKED,
62};
63
64enum ipi_msg_type { 52enum ipi_msg_type {
65 IPI_TIMER, 53 IPI_TIMER = 2,
66 IPI_RESCHEDULE, 54 IPI_RESCHEDULE,
67 IPI_CALL_FUNC, 55 IPI_CALL_FUNC,
68 IPI_CALL_FUNC_SINGLE, 56 IPI_CALL_FUNC_SINGLE,
@@ -74,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
74 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 62 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
75 struct task_struct *idle = ci->idle; 63 struct task_struct *idle = ci->idle;
76 pgd_t *pgd; 64 pgd_t *pgd;
77 pmd_t *pmd;
78 int ret; 65 int ret;
79 66
80 /* 67 /*
@@ -103,11 +90,16 @@ int __cpuinit __cpu_up(unsigned int cpu)
103 * a 1:1 mapping for the physical address of the kernel. 90 * a 1:1 mapping for the physical address of the kernel.
104 */ 91 */
105 pgd = pgd_alloc(&init_mm); 92 pgd = pgd_alloc(&init_mm);
106 pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET); 93 if (!pgd)
107 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | 94 return -ENOMEM;
108 PMD_TYPE_SECT | PMD_SECT_AP_WRITE); 95
109 flush_pmd_entry(pmd); 96 if (PHYS_OFFSET != PAGE_OFFSET) {
110 outer_clean_range(__pa(pmd), __pa(pmd + 1)); 97#ifndef CONFIG_HOTPLUG_CPU
98 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
99#endif
100 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
101 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
102 }
111 103
112 /* 104 /*
113 * We need to tell the secondary core where to find 105 * We need to tell the secondary core where to find
@@ -115,6 +107,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
115 */ 107 */
116 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 108 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
117 secondary_data.pgdir = virt_to_phys(pgd); 109 secondary_data.pgdir = virt_to_phys(pgd);
110 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
118 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); 111 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
119 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); 112 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
120 113
@@ -138,29 +131,33 @@ int __cpuinit __cpu_up(unsigned int cpu)
138 barrier(); 131 barrier();
139 } 132 }
140 133
141 if (!cpu_online(cpu)) 134 if (!cpu_online(cpu)) {
135 pr_crit("CPU%u: failed to come online\n", cpu);
142 ret = -EIO; 136 ret = -EIO;
137 }
138 } else {
139 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
143 } 140 }
144 141
145 secondary_data.stack = NULL; 142 secondary_data.stack = NULL;
146 secondary_data.pgdir = 0; 143 secondary_data.pgdir = 0;
147 144
148 *pmd = __pmd(0); 145 if (PHYS_OFFSET != PAGE_OFFSET) {
149 clean_pmd_entry(pmd); 146#ifndef CONFIG_HOTPLUG_CPU
150 pgd_free(&init_mm, pgd); 147 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
151 148#endif
152 if (ret) { 149 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
153 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); 150 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
154
155 /*
156 * FIXME: We need to clean up the new idle thread. --rmk
157 */
158 } 151 }
159 152
153 pgd_free(&init_mm, pgd);
154
160 return ret; 155 return ret;
161} 156}
162 157
163#ifdef CONFIG_HOTPLUG_CPU 158#ifdef CONFIG_HOTPLUG_CPU
159static void percpu_timer_stop(void);
160
164/* 161/*
165 * __cpu_disable runs on the processor to be shutdown. 162 * __cpu_disable runs on the processor to be shutdown.
166 */ 163 */
@@ -188,7 +185,7 @@ int __cpu_disable(void)
188 /* 185 /*
189 * Stop the local timer for this CPU. 186 * Stop the local timer for this CPU.
190 */ 187 */
191 local_timer_stop(); 188 percpu_timer_stop();
192 189
193 /* 190 /*
194 * Flush user cache and TLB mappings, and then remove this CPU 191 * Flush user cache and TLB mappings, and then remove this CPU
@@ -207,12 +204,20 @@ int __cpu_disable(void)
207 return 0; 204 return 0;
208} 205}
209 206
207static DECLARE_COMPLETION(cpu_died);
208
210/* 209/*
211 * called on the thread which is asking for a CPU to be shutdown - 210 * called on the thread which is asking for a CPU to be shutdown -
212 * waits until shutdown has completed, or it is timed out. 211 * waits until shutdown has completed, or it is timed out.
213 */ 212 */
214void __cpu_die(unsigned int cpu) 213void __cpu_die(unsigned int cpu)
215{ 214{
215 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
216 pr_err("CPU%u: cpu didn't die\n", cpu);
217 return;
218 }
219 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
220
216 if (!platform_cpu_kill(cpu)) 221 if (!platform_cpu_kill(cpu))
217 printk("CPU%u: unable to kill\n", cpu); 222 printk("CPU%u: unable to kill\n", cpu);
218} 223}
@@ -229,12 +234,17 @@ void __ref cpu_die(void)
229{ 234{
230 unsigned int cpu = smp_processor_id(); 235 unsigned int cpu = smp_processor_id();
231 236
232 local_irq_disable();
233 idle_task_exit(); 237 idle_task_exit();
234 238
239 local_irq_disable();
240 mb();
241
242 /* Tell __cpu_die() that this CPU is now safe to dispose of */
243 complete(&cpu_died);
244
235 /* 245 /*
236 * actual CPU shutdown procedure is at least platform (if not 246 * actual CPU shutdown procedure is at least platform (if not
237 * CPU) specific 247 * CPU) specific.
238 */ 248 */
239 platform_cpu_die(cpu); 249 platform_cpu_die(cpu);
240 250
@@ -244,6 +254,7 @@ void __ref cpu_die(void)
244 * to be repeated to undo the effects of taking the CPU offline. 254 * to be repeated to undo the effects of taking the CPU offline.
245 */ 255 */
246 __asm__("mov sp, %0\n" 256 __asm__("mov sp, %0\n"
257 " mov fp, #0\n"
247 " b secondary_start_kernel" 258 " b secondary_start_kernel"
248 : 259 :
249 : "r" (task_stack_page(current) + THREAD_SIZE - 8)); 260 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
@@ -251,6 +262,17 @@ void __ref cpu_die(void)
251#endif /* CONFIG_HOTPLUG_CPU */ 262#endif /* CONFIG_HOTPLUG_CPU */
252 263
253/* 264/*
265 * Called by both boot and secondaries to move global data into
266 * per-processor storage.
267 */
268static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
269{
270 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
271
272 cpu_info->loops_per_jiffy = loops_per_jiffy;
273}
274
275/*
254 * This is the secondary CPU boot entry. We're using this CPUs 276 * This is the secondary CPU boot entry. We're using this CPUs
255 * idle thread stack, but a set of temporary page tables. 277 * idle thread stack, but a set of temporary page tables.
256 */ 278 */
@@ -265,7 +287,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
265 * All kernel threads share the same mm context; grab a 287 * All kernel threads share the same mm context; grab a
266 * reference and switch to it. 288 * reference and switch to it.
267 */ 289 */
268 atomic_inc(&mm->mm_users);
269 atomic_inc(&mm->mm_count); 290 atomic_inc(&mm->mm_count);
270 current->active_mm = mm; 291 current->active_mm = mm;
271 cpumask_set_cpu(cpu, mm_cpumask(mm)); 292 cpumask_set_cpu(cpu, mm_cpumask(mm));
@@ -275,6 +296,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
275 296
276 cpu_init(); 297 cpu_init();
277 preempt_disable(); 298 preempt_disable();
299 trace_hardirqs_off();
278 300
279 /* 301 /*
280 * Give the platform a chance to do its own initialisation. 302 * Give the platform a chance to do its own initialisation.
@@ -298,9 +320,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
298 smp_store_cpu_info(cpu); 320 smp_store_cpu_info(cpu);
299 321
300 /* 322 /*
301 * OK, now it's safe to let the boot CPU continue 323 * OK, now it's safe to let the boot CPU continue. Wait for
324 * the CPU migration code to notice that the CPU is online
325 * before we continue.
302 */ 326 */
303 set_cpu_online(cpu, true); 327 set_cpu_online(cpu, true);
328 while (!cpu_active(cpu))
329 cpu_relax();
304 330
305 /* 331 /*
306 * OK, it's off to the idle thread for us 332 * OK, it's off to the idle thread for us
@@ -308,17 +334,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
308 cpu_idle(); 334 cpu_idle();
309} 335}
310 336
311/*
312 * Called by both boot and secondaries to move global data into
313 * per-processor storage.
314 */
315void __cpuinit smp_store_cpu_info(unsigned int cpuid)
316{
317 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
318
319 cpu_info->loops_per_jiffy = loops_per_jiffy;
320}
321
322void __init smp_cpus_done(unsigned int max_cpus) 337void __init smp_cpus_done(unsigned int max_cpus)
323{ 338{
324 int cpu; 339 int cpu;
@@ -341,61 +356,87 @@ void __init smp_prepare_boot_cpu(void)
341 per_cpu(cpu_data, cpu).idle = current; 356 per_cpu(cpu_data, cpu).idle = current;
342} 357}
343 358
344static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) 359void __init smp_prepare_cpus(unsigned int max_cpus)
345{ 360{
346 unsigned long flags; 361 unsigned int ncores = num_possible_cpus();
347 unsigned int cpu;
348 362
349 local_irq_save(flags); 363 smp_store_cpu_info(smp_processor_id());
350 364
351 for_each_cpu(cpu, mask) { 365 /*
352 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); 366 * are we trying to boot more cores than exist?
367 */
368 if (max_cpus > ncores)
369 max_cpus = ncores;
353 370
354 spin_lock(&ipi->lock); 371 if (max_cpus > 1) {
355 ipi->bits |= 1 << msg; 372 /*
356 spin_unlock(&ipi->lock); 373 * Enable the local timer or broadcast device for the
374 * boot CPU, but only if we have more than one CPU.
375 */
376 percpu_timer_setup();
377
378 /*
379 * Initialise the SCU if there are more than one CPU
380 * and let them know where to start.
381 */
382 platform_smp_prepare_cpus(max_cpus);
357 } 383 }
384}
358 385
359 /* 386static void (*smp_cross_call)(const struct cpumask *, unsigned int);
360 * Call the platform specific cross-CPU call function.
361 */
362 smp_cross_call(mask);
363 387
364 local_irq_restore(flags); 388void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
389{
390 smp_cross_call = fn;
365} 391}
366 392
367void arch_send_call_function_ipi_mask(const struct cpumask *mask) 393void arch_send_call_function_ipi_mask(const struct cpumask *mask)
368{ 394{
369 send_ipi_message(mask, IPI_CALL_FUNC); 395 smp_cross_call(mask, IPI_CALL_FUNC);
370} 396}
371 397
372void arch_send_call_function_single_ipi(int cpu) 398void arch_send_call_function_single_ipi(int cpu)
373{ 399{
374 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 400 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
375} 401}
376 402
377void show_ipi_list(struct seq_file *p) 403static const char *ipi_types[NR_IPI] = {
404#define S(x,s) [x - IPI_TIMER] = s
405 S(IPI_TIMER, "Timer broadcast interrupts"),
406 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
407 S(IPI_CALL_FUNC, "Function call interrupts"),
408 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
409 S(IPI_CPU_STOP, "CPU stop interrupts"),
410};
411
412void show_ipi_list(struct seq_file *p, int prec)
378{ 413{
379 unsigned int cpu; 414 unsigned int cpu, i;
380 415
381 seq_puts(p, "IPI:"); 416 for (i = 0; i < NR_IPI; i++) {
417 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
382 418
383 for_each_present_cpu(cpu) 419 for_each_present_cpu(cpu)
384 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 420 seq_printf(p, "%10u ",
421 __get_irq_stat(cpu, ipi_irqs[i]));
385 422
386 seq_putc(p, '\n'); 423 seq_printf(p, " %s\n", ipi_types[i]);
424 }
387} 425}
388 426
389void show_local_irqs(struct seq_file *p) 427u64 smp_irq_stat_cpu(unsigned int cpu)
390{ 428{
391 unsigned int cpu; 429 u64 sum = 0;
430 int i;
392 431
393 seq_printf(p, "LOC: "); 432 for (i = 0; i < NR_IPI; i++)
433 sum += __get_irq_stat(cpu, ipi_irqs[i]);
394 434
395 for_each_present_cpu(cpu) 435#ifdef CONFIG_LOCAL_TIMERS
396 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); 436 sum += __get_irq_stat(cpu, local_timer_irqs);
437#endif
397 438
398 seq_putc(p, '\n'); 439 return sum;
399} 440}
400 441
401/* 442/*
@@ -412,36 +453,47 @@ static void ipi_timer(void)
412} 453}
413 454
414#ifdef CONFIG_LOCAL_TIMERS 455#ifdef CONFIG_LOCAL_TIMERS
415asmlinkage void __exception do_local_timer(struct pt_regs *regs) 456asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
416{ 457{
417 struct pt_regs *old_regs = set_irq_regs(regs); 458 struct pt_regs *old_regs = set_irq_regs(regs);
418 int cpu = smp_processor_id(); 459 int cpu = smp_processor_id();
419 460
420 if (local_timer_ack()) { 461 if (local_timer_ack()) {
421 irq_stat[cpu].local_timer_irqs++; 462 __inc_irq_stat(cpu, local_timer_irqs);
422 ipi_timer(); 463 ipi_timer();
423 } 464 }
424 465
425 set_irq_regs(old_regs); 466 set_irq_regs(old_regs);
426} 467}
468
469void show_local_irqs(struct seq_file *p, int prec)
470{
471 unsigned int cpu;
472
473 seq_printf(p, "%*s: ", prec, "LOC");
474
475 for_each_present_cpu(cpu)
476 seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
477
478 seq_printf(p, " Local timer interrupts\n");
479}
427#endif 480#endif
428 481
429#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 482#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
430static void smp_timer_broadcast(const struct cpumask *mask) 483static void smp_timer_broadcast(const struct cpumask *mask)
431{ 484{
432 send_ipi_message(mask, IPI_TIMER); 485 smp_cross_call(mask, IPI_TIMER);
433} 486}
434#else 487#else
435#define smp_timer_broadcast NULL 488#define smp_timer_broadcast NULL
436#endif 489#endif
437 490
438#ifndef CONFIG_LOCAL_TIMERS
439static void broadcast_timer_set_mode(enum clock_event_mode mode, 491static void broadcast_timer_set_mode(enum clock_event_mode mode,
440 struct clock_event_device *evt) 492 struct clock_event_device *evt)
441{ 493{
442} 494}
443 495
444static void local_timer_setup(struct clock_event_device *evt) 496static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
445{ 497{
446 evt->name = "dummy_timer"; 498 evt->name = "dummy_timer";
447 evt->features = CLOCK_EVT_FEAT_ONESHOT | 499 evt->features = CLOCK_EVT_FEAT_ONESHOT |
@@ -453,7 +505,6 @@ static void local_timer_setup(struct clock_event_device *evt)
453 505
454 clockevents_register_device(evt); 506 clockevents_register_device(evt);
455} 507}
456#endif
457 508
458void __cpuinit percpu_timer_setup(void) 509void __cpuinit percpu_timer_setup(void)
459{ 510{
@@ -463,8 +514,24 @@ void __cpuinit percpu_timer_setup(void)
463 evt->cpumask = cpumask_of(cpu); 514 evt->cpumask = cpumask_of(cpu);
464 evt->broadcast = smp_timer_broadcast; 515 evt->broadcast = smp_timer_broadcast;
465 516
466 local_timer_setup(evt); 517 if (local_timer_setup(evt))
518 broadcast_timer_setup(evt);
519}
520
521#ifdef CONFIG_HOTPLUG_CPU
522/*
523 * The generic clock events code purposely does not stop the local timer
524 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
525 * manually here.
526 */
527static void percpu_timer_stop(void)
528{
529 unsigned int cpu = smp_processor_id();
530 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
531
532 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
467} 533}
534#endif
468 535
469static DEFINE_SPINLOCK(stop_lock); 536static DEFINE_SPINLOCK(stop_lock);
470 537
@@ -492,217 +559,75 @@ static void ipi_cpu_stop(unsigned int cpu)
492 559
493/* 560/*
494 * Main handler for inter-processor interrupts 561 * Main handler for inter-processor interrupts
495 *
496 * For ARM, the ipimask now only identifies a single
497 * category of IPI (Bit 1 IPIs have been replaced by a
498 * different mechanism):
499 *
500 * Bit 0 - Inter-processor function call
501 */ 562 */
502asmlinkage void __exception do_IPI(struct pt_regs *regs) 563asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
503{ 564{
504 unsigned int cpu = smp_processor_id(); 565 unsigned int cpu = smp_processor_id();
505 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
506 struct pt_regs *old_regs = set_irq_regs(regs); 566 struct pt_regs *old_regs = set_irq_regs(regs);
507 567
508 ipi->ipi_count++; 568 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
509 569 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
510 for (;;) {
511 unsigned long msgs;
512
513 spin_lock(&ipi->lock);
514 msgs = ipi->bits;
515 ipi->bits = 0;
516 spin_unlock(&ipi->lock);
517
518 if (!msgs)
519 break;
520
521 do {
522 unsigned nextmsg;
523
524 nextmsg = msgs & -msgs;
525 msgs &= ~nextmsg;
526 nextmsg = ffz(~nextmsg);
527
528 switch (nextmsg) {
529 case IPI_TIMER:
530 ipi_timer();
531 break;
532
533 case IPI_RESCHEDULE:
534 /*
535 * nothing more to do - eveything is
536 * done on the interrupt return path
537 */
538 /* LITMUS^RT: take action based on scheduler state */
539 sched_state_ipi();
540 break;
541
542 case IPI_CALL_FUNC:
543 generic_smp_call_function_interrupt();
544 break;
545
546 case IPI_CALL_FUNC_SINGLE:
547 generic_smp_call_function_single_interrupt();
548 break;
549 570
550 case IPI_CPU_STOP: 571 switch (ipinr) {
551 ipi_cpu_stop(cpu); 572 case IPI_TIMER:
552 break; 573 ipi_timer();
553 574 break;
554 default: 575
555 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 576 case IPI_RESCHEDULE:
556 cpu, nextmsg); 577 /* LITMUS^RT: take action based on scheduler state */
557 break; 578 sched_state_ipi();
558 } 579 scheduler_ipi();
559 } while (msgs); 580 break;
581
582 case IPI_CALL_FUNC:
583 generic_smp_call_function_interrupt();
584 break;
585
586 case IPI_CALL_FUNC_SINGLE:
587 generic_smp_call_function_single_interrupt();
588 break;
589
590 case IPI_CPU_STOP:
591 ipi_cpu_stop(cpu);
592 break;
593
594 default:
595 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
596 cpu, ipinr);
597 break;
560 } 598 }
561
562 set_irq_regs(old_regs); 599 set_irq_regs(old_regs);
563} 600}
564 601
565void smp_send_reschedule(int cpu) 602void smp_send_reschedule(int cpu)
566{ 603{
567 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 604 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
568} 605}
569 606
570void smp_send_stop(void) 607void smp_send_stop(void)
571{ 608{
572 cpumask_t mask = cpu_online_map; 609 unsigned long timeout;
573 cpu_clear(smp_processor_id(), mask);
574 send_ipi_message(&mask, IPI_CPU_STOP);
575}
576 610
577/* 611 if (num_online_cpus() > 1) {
578 * not supported here 612 cpumask_t mask = cpu_online_map;
579 */ 613 cpu_clear(smp_processor_id(), mask);
580int setup_profiling_timer(unsigned int multiplier)
581{
582 return -EINVAL;
583}
584 614
585static void 615 smp_cross_call(&mask, IPI_CPU_STOP);
586on_each_cpu_mask(void (*func)(void *), void *info, int wait, 616 }
587 const struct cpumask *mask)
588{
589 preempt_disable();
590 617
591 smp_call_function_many(mask, func, info, wait); 618 /* Wait up to one second for other CPUs to stop */
592 if (cpumask_test_cpu(smp_processor_id(), mask)) 619 timeout = USEC_PER_SEC;
593 func(info); 620 while (num_online_cpus() > 1 && timeout--)
621 udelay(1);
594 622
595 preempt_enable(); 623 if (num_online_cpus() > 1)
624 pr_warning("SMP: failed to stop secondary CPUs\n");
596} 625}
597 626
598/**********************************************************************/
599
600/* 627/*
601 * TLB operations 628 * not supported here
602 */ 629 */
603struct tlb_args { 630int setup_profiling_timer(unsigned int multiplier)
604 struct vm_area_struct *ta_vma;
605 unsigned long ta_start;
606 unsigned long ta_end;
607};
608
609static inline void ipi_flush_tlb_all(void *ignored)
610{
611 local_flush_tlb_all();
612}
613
614static inline void ipi_flush_tlb_mm(void *arg)
615{
616 struct mm_struct *mm = (struct mm_struct *)arg;
617
618 local_flush_tlb_mm(mm);
619}
620
621static inline void ipi_flush_tlb_page(void *arg)
622{
623 struct tlb_args *ta = (struct tlb_args *)arg;
624
625 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
626}
627
628static inline void ipi_flush_tlb_kernel_page(void *arg)
629{
630 struct tlb_args *ta = (struct tlb_args *)arg;
631
632 local_flush_tlb_kernel_page(ta->ta_start);
633}
634
635static inline void ipi_flush_tlb_range(void *arg)
636{
637 struct tlb_args *ta = (struct tlb_args *)arg;
638
639 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
640}
641
642static inline void ipi_flush_tlb_kernel_range(void *arg)
643{
644 struct tlb_args *ta = (struct tlb_args *)arg;
645
646 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
647}
648
649void flush_tlb_all(void)
650{
651 if (tlb_ops_need_broadcast())
652 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
653 else
654 local_flush_tlb_all();
655}
656
657void flush_tlb_mm(struct mm_struct *mm)
658{
659 if (tlb_ops_need_broadcast())
660 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
661 else
662 local_flush_tlb_mm(mm);
663}
664
665void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
666{
667 if (tlb_ops_need_broadcast()) {
668 struct tlb_args ta;
669 ta.ta_vma = vma;
670 ta.ta_start = uaddr;
671 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
672 } else
673 local_flush_tlb_page(vma, uaddr);
674}
675
676void flush_tlb_kernel_page(unsigned long kaddr)
677{
678 if (tlb_ops_need_broadcast()) {
679 struct tlb_args ta;
680 ta.ta_start = kaddr;
681 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
682 } else
683 local_flush_tlb_kernel_page(kaddr);
684}
685
686void flush_tlb_range(struct vm_area_struct *vma,
687 unsigned long start, unsigned long end)
688{
689 if (tlb_ops_need_broadcast()) {
690 struct tlb_args ta;
691 ta.ta_vma = vma;
692 ta.ta_start = start;
693 ta.ta_end = end;
694 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
695 } else
696 local_flush_tlb_range(vma, start, end);
697}
698
699void flush_tlb_kernel_range(unsigned long start, unsigned long end)
700{ 631{
701 if (tlb_ops_need_broadcast()) { 632 return -EINVAL;
702 struct tlb_args ta;
703 ta.ta_start = start;
704 ta.ta_end = end;
705 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
706 } else
707 local_flush_tlb_kernel_range(start, end);
708} 633}