aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-26 05:22:30 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:22:30 -0400
commitf27b433ef32a77c8cb76f018507453df7c03e552 (patch)
tree928f03e2325a9b7f1d9543603bfd1a7133b2fdfa
parentb7d7a2404f80386307ccc0cde63d8d2a5e3bc85c (diff)
ia64: convert to generic helpers for IPI function calls
This converts ia64 to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/kernel/smp.c250
-rw-r--r--arch/ia64/kernel/smpboot.c4
-rw-r--r--include/asm-ia64/smp.h8
4 files changed, 19 insertions, 244 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 16be41446b5b..18bcc10903b4 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING
303 303
304config SMP 304config SMP
305 bool "Symmetric multi-processing support" 305 bool "Symmetric multi-processing support"
306 select USE_GENERIC_SMP_HELPERS
306 help 307 help
307 This enables support for systems with more than one CPU. If you have 308 This enables support for systems with more than one CPU. If you have
308 a system with only one CPU, say N. If you have a system with more 309 a system with only one CPU, say N. If you have a system with more
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 983296f1c813..19152dcbf6e4 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts {
60 60
61static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; 61static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
62 62
63
64/*
65 * Structure and data for smp_call_function(). This is designed to minimise static memory
66 * requirements. It also looks cleaner.
67 */
68static __cacheline_aligned DEFINE_SPINLOCK(call_lock);
69
70struct call_data_struct {
71 void (*func) (void *info);
72 void *info;
73 long wait;
74 atomic_t started;
75 atomic_t finished;
76};
77
78static volatile struct call_data_struct *call_data;
79
80#define IPI_CALL_FUNC 0 63#define IPI_CALL_FUNC 0
81#define IPI_CPU_STOP 1 64#define IPI_CPU_STOP 1
65#define IPI_CALL_FUNC_SINGLE 2
82#define IPI_KDUMP_CPU_STOP 3 66#define IPI_KDUMP_CPU_STOP 3
83 67
84/* This needs to be cacheline aligned because it is written to by *other* CPUs. */ 68/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
@@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
86 70
87extern void cpu_halt (void); 71extern void cpu_halt (void);
88 72
89void
90lock_ipi_calllock(void)
91{
92 spin_lock_irq(&call_lock);
93}
94
95void
96unlock_ipi_calllock(void)
97{
98 spin_unlock_irq(&call_lock);
99}
100
101static inline void
102handle_call_data(void)
103{
104 struct call_data_struct *data;
105 void (*func)(void *info);
106 void *info;
107 int wait;
108
109 /* release the 'pointer lock' */
110 data = (struct call_data_struct *)call_data;
111 func = data->func;
112 info = data->info;
113 wait = data->wait;
114
115 mb();
116 atomic_inc(&data->started);
117 /* At this point the structure may be gone unless wait is true. */
118 (*func)(info);
119
120 /* Notify the sending CPU that the task is done. */
121 mb();
122 if (wait)
123 atomic_inc(&data->finished);
124}
125
126static void 73static void
127stop_this_cpu(void) 74stop_this_cpu(void)
128{ 75{
@@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id)
163 ops &= ~(1 << which); 110 ops &= ~(1 << which);
164 111
165 switch (which) { 112 switch (which) {
166 case IPI_CALL_FUNC:
167 handle_call_data();
168 break;
169
170 case IPI_CPU_STOP: 113 case IPI_CPU_STOP:
171 stop_this_cpu(); 114 stop_this_cpu();
172 break; 115 break;
116 case IPI_CALL_FUNC:
117 generic_smp_call_function_interrupt();
118 break;
119 case IPI_CALL_FUNC_SINGLE:
120 generic_smp_call_function_single_interrupt();
121 break;
173#ifdef CONFIG_KEXEC 122#ifdef CONFIG_KEXEC
174 case IPI_KDUMP_CPU_STOP: 123 case IPI_KDUMP_CPU_STOP:
175 unw_init_running(kdump_cpu_freeze, NULL); 124 unw_init_running(kdump_cpu_freeze, NULL);
@@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id)
187 return IRQ_HANDLED; 136 return IRQ_HANDLED;
188} 137}
189 138
139
140
190/* 141/*
191 * Called with preemption disabled. 142 * Called with preemption disabled.
192 */ 143 */
@@ -360,190 +311,15 @@ smp_flush_tlb_mm (struct mm_struct *mm)
360 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); 311 on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
361} 312}
362 313
363/* 314void arch_send_call_function_single_ipi(int cpu)
364 * Run a function on a specific CPU
365 * <func> The function to run. This must be fast and non-blocking.
366 * <info> An arbitrary pointer to pass to the function.
367 * <nonatomic> Currently unused.
368 * <wait> If true, wait until function has completed on other CPUs.
369 * [RETURNS] 0 on success, else a negative status code.
370 *
371 * Does not return until the remote CPU is nearly ready to execute <func>
372 * or is or has executed.
373 */
374
375int
376smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
377 int wait)
378{
379 struct call_data_struct data;
380 int cpus = 1;
381 int me = get_cpu(); /* prevent preemption and reschedule on another processor */
382
383 if (cpuid == me) {
384 local_irq_disable();
385 func(info);
386 local_irq_enable();
387 put_cpu();
388 return 0;
389 }
390
391 data.func = func;
392 data.info = info;
393 atomic_set(&data.started, 0);
394 data.wait = wait;
395 if (wait)
396 atomic_set(&data.finished, 0);
397
398 spin_lock_bh(&call_lock);
399
400 call_data = &data;
401 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
402 send_IPI_single(cpuid, IPI_CALL_FUNC);
403
404 /* Wait for response */
405 while (atomic_read(&data.started) != cpus)
406 cpu_relax();
407
408 if (wait)
409 while (atomic_read(&data.finished) != cpus)
410 cpu_relax();
411 call_data = NULL;
412
413 spin_unlock_bh(&call_lock);
414 put_cpu();
415 return 0;
416}
417EXPORT_SYMBOL(smp_call_function_single);
418
419/**
420 * smp_call_function_mask(): Run a function on a set of other CPUs.
421 * <mask> The set of cpus to run on. Must not include the current cpu.
422 * <func> The function to run. This must be fast and non-blocking.
423 * <info> An arbitrary pointer to pass to the function.
424 * <wait> If true, wait (atomically) until function
425 * has completed on other CPUs.
426 *
427 * Returns 0 on success, else a negative status code.
428 *
429 * If @wait is true, then returns once @func has returned; otherwise
430 * it returns just before the target cpu calls @func.
431 *
432 * You must not call this function with disabled interrupts or from a
433 * hardware interrupt handler or from a bottom half handler.
434 */
435int smp_call_function_mask(cpumask_t mask,
436 void (*func)(void *), void *info,
437 int wait)
438{ 315{
439 struct call_data_struct data; 316 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
440 cpumask_t allbutself;
441 int cpus;
442
443 spin_lock(&call_lock);
444 allbutself = cpu_online_map;
445 cpu_clear(smp_processor_id(), allbutself);
446
447 cpus_and(mask, mask, allbutself);
448 cpus = cpus_weight(mask);
449 if (!cpus) {
450 spin_unlock(&call_lock);
451 return 0;
452 }
453
454 /* Can deadlock when called with interrupts disabled */
455 WARN_ON(irqs_disabled());
456
457 data.func = func;
458 data.info = info;
459 atomic_set(&data.started, 0);
460 data.wait = wait;
461 if (wait)
462 atomic_set(&data.finished, 0);
463
464 call_data = &data;
465 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
466
467 /* Send a message to other CPUs */
468 if (cpus_equal(mask, allbutself))
469 send_IPI_allbutself(IPI_CALL_FUNC);
470 else
471 send_IPI_mask(mask, IPI_CALL_FUNC);
472
473 /* Wait for response */
474 while (atomic_read(&data.started) != cpus)
475 cpu_relax();
476
477 if (wait)
478 while (atomic_read(&data.finished) != cpus)
479 cpu_relax();
480 call_data = NULL;
481
482 spin_unlock(&call_lock);
483 return 0;
484
485} 317}
486EXPORT_SYMBOL(smp_call_function_mask);
487 318
488/* 319void arch_send_call_function_ipi(cpumask_t mask)
489 * this function sends a 'generic call function' IPI to all other CPUs
490 * in the system.
491 */
492
493/*
494 * [SUMMARY] Run a function on all other CPUs.
495 * <func> The function to run. This must be fast and non-blocking.
496 * <info> An arbitrary pointer to pass to the function.
497 * <nonatomic> currently unused.
498 * <wait> If true, wait (atomically) until function has completed on other CPUs.
499 * [RETURNS] 0 on success, else a negative status code.
500 *
501 * Does not return until remote CPUs are nearly ready to execute <func> or are or have
502 * executed.
503 *
504 * You must not call this function with disabled interrupts or from a
505 * hardware interrupt handler or from a bottom half handler.
506 */
507int
508smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
509{ 320{
510 struct call_data_struct data; 321 send_IPI_mask(mask, IPI_CALL_FUNC);
511 int cpus;
512
513 spin_lock(&call_lock);
514 cpus = num_online_cpus() - 1;
515 if (!cpus) {
516 spin_unlock(&call_lock);
517 return 0;
518 }
519
520 /* Can deadlock when called with interrupts disabled */
521 WARN_ON(irqs_disabled());
522
523 data.func = func;
524 data.info = info;
525 atomic_set(&data.started, 0);
526 data.wait = wait;
527 if (wait)
528 atomic_set(&data.finished, 0);
529
530 call_data = &data;
531 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
532 send_IPI_allbutself(IPI_CALL_FUNC);
533
534 /* Wait for response */
535 while (atomic_read(&data.started) != cpus)
536 cpu_relax();
537
538 if (wait)
539 while (atomic_read(&data.finished) != cpus)
540 cpu_relax();
541 call_data = NULL;
542
543 spin_unlock(&call_lock);
544 return 0;
545} 322}
546EXPORT_SYMBOL(smp_call_function);
547 323
548/* 324/*
549 * this function calls the 'stop' function on all other CPUs in the system. 325 * this function calls the 'stop' function on all other CPUs in the system.
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index d7ad42b77d41..eaa1b6795a13 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -395,14 +395,14 @@ smp_callin (void)
395 395
396 fix_b0_for_bsp(); 396 fix_b0_for_bsp();
397 397
398 lock_ipi_calllock(); 398 ipi_call_lock_irq();
399 spin_lock(&vector_lock); 399 spin_lock(&vector_lock);
400 /* Setup the per cpu irq handling data structures */ 400 /* Setup the per cpu irq handling data structures */
401 __setup_vector_irq(cpuid); 401 __setup_vector_irq(cpuid);
402 cpu_set(cpuid, cpu_online_map); 402 cpu_set(cpuid, cpu_online_map);
403 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 403 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
404 spin_unlock(&vector_lock); 404 spin_unlock(&vector_lock);
405 unlock_ipi_calllock(); 405 ipi_call_unlock_irq();
406 406
407 smp_setup_percpu_timer(); 407 smp_setup_percpu_timer();
408 408
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index ec5f355fb7e3..27731e032ee9 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,9 +38,6 @@ ia64_get_lid (void)
38 return lid.f.id << 8 | lid.f.eid; 38 return lid.f.id << 8 | lid.f.eid;
39} 39}
40 40
41extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
42 void *info, int wait);
43
44#define hard_smp_processor_id() ia64_get_lid() 41#define hard_smp_processor_id() ia64_get_lid()
45 42
46#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
@@ -124,11 +121,12 @@ extern void __init init_smp_config (void);
124extern void smp_do_timer (struct pt_regs *regs); 121extern void smp_do_timer (struct pt_regs *regs);
125 122
126extern void smp_send_reschedule (int cpu); 123extern void smp_send_reschedule (int cpu);
127extern void lock_ipi_calllock(void);
128extern void unlock_ipi_calllock(void);
129extern void identify_siblings (struct cpuinfo_ia64 *); 124extern void identify_siblings (struct cpuinfo_ia64 *);
130extern int is_multithreading_enabled(void); 125extern int is_multithreading_enabled(void);
131 126
127extern void arch_send_call_function_single_ipi(int cpu);
128extern void arch_send_call_function_ipi(cpumask_t mask);
129
132#else /* CONFIG_SMP */ 130#else /* CONFIG_SMP */
133 131
134#define cpu_logical_id(i) 0 132#define cpu_logical_id(i) 0