diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-06-26 05:22:30 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-06-26 05:22:30 -0400 |
commit | f27b433ef32a77c8cb76f018507453df7c03e552 (patch) | |
tree | 928f03e2325a9b7f1d9543603bfd1a7133b2fdfa /arch/ia64/kernel/smp.c | |
parent | b7d7a2404f80386307ccc0cde63d8d2a5e3bc85c (diff) |
ia64: convert to generic helpers for IPI function calls
This converts ia64 to use the new helpers for smp_call_function() and
friends, and adds support for smp_call_function_single().
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/ia64/kernel/smp.c')
-rw-r--r-- | arch/ia64/kernel/smp.c | 250 |
1 files changed, 13 insertions, 237 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 983296f1c813..19152dcbf6e4 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts { | |||
60 | 60 | ||
61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; | 61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; |
62 | 62 | ||
63 | |||
64 | /* | ||
65 | * Structure and data for smp_call_function(). This is designed to minimise static memory | ||
66 | * requirements. It also looks cleaner. | ||
67 | */ | ||
68 | static __cacheline_aligned DEFINE_SPINLOCK(call_lock); | ||
69 | |||
70 | struct call_data_struct { | ||
71 | void (*func) (void *info); | ||
72 | void *info; | ||
73 | long wait; | ||
74 | atomic_t started; | ||
75 | atomic_t finished; | ||
76 | }; | ||
77 | |||
78 | static volatile struct call_data_struct *call_data; | ||
79 | |||
80 | #define IPI_CALL_FUNC 0 | 63 | #define IPI_CALL_FUNC 0 |
81 | #define IPI_CPU_STOP 1 | 64 | #define IPI_CPU_STOP 1 |
65 | #define IPI_CALL_FUNC_SINGLE 2 | ||
82 | #define IPI_KDUMP_CPU_STOP 3 | 66 | #define IPI_KDUMP_CPU_STOP 3 |
83 | 67 | ||
84 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ | 68 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ |
@@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); | |||
86 | 70 | ||
87 | extern void cpu_halt (void); | 71 | extern void cpu_halt (void); |
88 | 72 | ||
89 | void | ||
90 | lock_ipi_calllock(void) | ||
91 | { | ||
92 | spin_lock_irq(&call_lock); | ||
93 | } | ||
94 | |||
95 | void | ||
96 | unlock_ipi_calllock(void) | ||
97 | { | ||
98 | spin_unlock_irq(&call_lock); | ||
99 | } | ||
100 | |||
101 | static inline void | ||
102 | handle_call_data(void) | ||
103 | { | ||
104 | struct call_data_struct *data; | ||
105 | void (*func)(void *info); | ||
106 | void *info; | ||
107 | int wait; | ||
108 | |||
109 | /* release the 'pointer lock' */ | ||
110 | data = (struct call_data_struct *)call_data; | ||
111 | func = data->func; | ||
112 | info = data->info; | ||
113 | wait = data->wait; | ||
114 | |||
115 | mb(); | ||
116 | atomic_inc(&data->started); | ||
117 | /* At this point the structure may be gone unless wait is true. */ | ||
118 | (*func)(info); | ||
119 | |||
120 | /* Notify the sending CPU that the task is done. */ | ||
121 | mb(); | ||
122 | if (wait) | ||
123 | atomic_inc(&data->finished); | ||
124 | } | ||
125 | |||
126 | static void | 73 | static void |
127 | stop_this_cpu(void) | 74 | stop_this_cpu(void) |
128 | { | 75 | { |
@@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id) | |||
163 | ops &= ~(1 << which); | 110 | ops &= ~(1 << which); |
164 | 111 | ||
165 | switch (which) { | 112 | switch (which) { |
166 | case IPI_CALL_FUNC: | ||
167 | handle_call_data(); | ||
168 | break; | ||
169 | |||
170 | case IPI_CPU_STOP: | 113 | case IPI_CPU_STOP: |
171 | stop_this_cpu(); | 114 | stop_this_cpu(); |
172 | break; | 115 | break; |
116 | case IPI_CALL_FUNC: | ||
117 | generic_smp_call_function_interrupt(); | ||
118 | break; | ||
119 | case IPI_CALL_FUNC_SINGLE: | ||
120 | generic_smp_call_function_single_interrupt(); | ||
121 | break; | ||
173 | #ifdef CONFIG_KEXEC | 122 | #ifdef CONFIG_KEXEC |
174 | case IPI_KDUMP_CPU_STOP: | 123 | case IPI_KDUMP_CPU_STOP: |
175 | unw_init_running(kdump_cpu_freeze, NULL); | 124 | unw_init_running(kdump_cpu_freeze, NULL); |
@@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id) | |||
187 | return IRQ_HANDLED; | 136 | return IRQ_HANDLED; |
188 | } | 137 | } |
189 | 138 | ||
139 | |||
140 | |||
190 | /* | 141 | /* |
191 | * Called with preemption disabled. | 142 | * Called with preemption disabled. |
192 | */ | 143 | */ |
@@ -360,190 +311,15 @@ smp_flush_tlb_mm (struct mm_struct *mm) | |||
360 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); | 311 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); |
361 | } | 312 | } |
362 | 313 | ||
363 | /* | 314 | void arch_send_call_function_single_ipi(int cpu) |
364 | * Run a function on a specific CPU | ||
365 | * <func> The function to run. This must be fast and non-blocking. | ||
366 | * <info> An arbitrary pointer to pass to the function. | ||
367 | * <nonatomic> Currently unused. | ||
368 | * <wait> If true, wait until function has completed on other CPUs. | ||
369 | * [RETURNS] 0 on success, else a negative status code. | ||
370 | * | ||
371 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
372 | * or is or has executed. | ||
373 | */ | ||
374 | |||
375 | int | ||
376 | smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, | ||
377 | int wait) | ||
378 | { | ||
379 | struct call_data_struct data; | ||
380 | int cpus = 1; | ||
381 | int me = get_cpu(); /* prevent preemption and reschedule on another processor */ | ||
382 | |||
383 | if (cpuid == me) { | ||
384 | local_irq_disable(); | ||
385 | func(info); | ||
386 | local_irq_enable(); | ||
387 | put_cpu(); | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | data.func = func; | ||
392 | data.info = info; | ||
393 | atomic_set(&data.started, 0); | ||
394 | data.wait = wait; | ||
395 | if (wait) | ||
396 | atomic_set(&data.finished, 0); | ||
397 | |||
398 | spin_lock_bh(&call_lock); | ||
399 | |||
400 | call_data = &data; | ||
401 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
402 | send_IPI_single(cpuid, IPI_CALL_FUNC); | ||
403 | |||
404 | /* Wait for response */ | ||
405 | while (atomic_read(&data.started) != cpus) | ||
406 | cpu_relax(); | ||
407 | |||
408 | if (wait) | ||
409 | while (atomic_read(&data.finished) != cpus) | ||
410 | cpu_relax(); | ||
411 | call_data = NULL; | ||
412 | |||
413 | spin_unlock_bh(&call_lock); | ||
414 | put_cpu(); | ||
415 | return 0; | ||
416 | } | ||
417 | EXPORT_SYMBOL(smp_call_function_single); | ||
418 | |||
419 | /** | ||
420 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
421 | * <mask> The set of cpus to run on. Must not include the current cpu. | ||
422 | * <func> The function to run. This must be fast and non-blocking. | ||
423 | * <info> An arbitrary pointer to pass to the function. | ||
424 | * <wait> If true, wait (atomically) until function | ||
425 | * has completed on other CPUs. | ||
426 | * | ||
427 | * Returns 0 on success, else a negative status code. | ||
428 | * | ||
429 | * If @wait is true, then returns once @func has returned; otherwise | ||
430 | * it returns just before the target cpu calls @func. | ||
431 | * | ||
432 | * You must not call this function with disabled interrupts or from a | ||
433 | * hardware interrupt handler or from a bottom half handler. | ||
434 | */ | ||
435 | int smp_call_function_mask(cpumask_t mask, | ||
436 | void (*func)(void *), void *info, | ||
437 | int wait) | ||
438 | { | 315 | { |
439 | struct call_data_struct data; | 316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); |
440 | cpumask_t allbutself; | ||
441 | int cpus; | ||
442 | |||
443 | spin_lock(&call_lock); | ||
444 | allbutself = cpu_online_map; | ||
445 | cpu_clear(smp_processor_id(), allbutself); | ||
446 | |||
447 | cpus_and(mask, mask, allbutself); | ||
448 | cpus = cpus_weight(mask); | ||
449 | if (!cpus) { | ||
450 | spin_unlock(&call_lock); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* Can deadlock when called with interrupts disabled */ | ||
455 | WARN_ON(irqs_disabled()); | ||
456 | |||
457 | data.func = func; | ||
458 | data.info = info; | ||
459 | atomic_set(&data.started, 0); | ||
460 | data.wait = wait; | ||
461 | if (wait) | ||
462 | atomic_set(&data.finished, 0); | ||
463 | |||
464 | call_data = &data; | ||
465 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ | ||
466 | |||
467 | /* Send a message to other CPUs */ | ||
468 | if (cpus_equal(mask, allbutself)) | ||
469 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
470 | else | ||
471 | send_IPI_mask(mask, IPI_CALL_FUNC); | ||
472 | |||
473 | /* Wait for response */ | ||
474 | while (atomic_read(&data.started) != cpus) | ||
475 | cpu_relax(); | ||
476 | |||
477 | if (wait) | ||
478 | while (atomic_read(&data.finished) != cpus) | ||
479 | cpu_relax(); | ||
480 | call_data = NULL; | ||
481 | |||
482 | spin_unlock(&call_lock); | ||
483 | return 0; | ||
484 | |||
485 | } | 317 | } |
486 | EXPORT_SYMBOL(smp_call_function_mask); | ||
487 | 318 | ||
488 | /* | 319 | void arch_send_call_function_ipi(cpumask_t mask) |
489 | * this function sends a 'generic call function' IPI to all other CPUs | ||
490 | * in the system. | ||
491 | */ | ||
492 | |||
493 | /* | ||
494 | * [SUMMARY] Run a function on all other CPUs. | ||
495 | * <func> The function to run. This must be fast and non-blocking. | ||
496 | * <info> An arbitrary pointer to pass to the function. | ||
497 | * <nonatomic> currently unused. | ||
498 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
499 | * [RETURNS] 0 on success, else a negative status code. | ||
500 | * | ||
501 | * Does not return until remote CPUs are nearly ready to execute <func> or are or have | ||
502 | * executed. | ||
503 | * | ||
504 | * You must not call this function with disabled interrupts or from a | ||
505 | * hardware interrupt handler or from a bottom half handler. | ||
506 | */ | ||
507 | int | ||
508 | smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) | ||
509 | { | 320 | { |
510 | struct call_data_struct data; | 321 | send_IPI_mask(mask, IPI_CALL_FUNC); |
511 | int cpus; | ||
512 | |||
513 | spin_lock(&call_lock); | ||
514 | cpus = num_online_cpus() - 1; | ||
515 | if (!cpus) { | ||
516 | spin_unlock(&call_lock); | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | /* Can deadlock when called with interrupts disabled */ | ||
521 | WARN_ON(irqs_disabled()); | ||
522 | |||
523 | data.func = func; | ||
524 | data.info = info; | ||
525 | atomic_set(&data.started, 0); | ||
526 | data.wait = wait; | ||
527 | if (wait) | ||
528 | atomic_set(&data.finished, 0); | ||
529 | |||
530 | call_data = &data; | ||
531 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
532 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
533 | |||
534 | /* Wait for response */ | ||
535 | while (atomic_read(&data.started) != cpus) | ||
536 | cpu_relax(); | ||
537 | |||
538 | if (wait) | ||
539 | while (atomic_read(&data.finished) != cpus) | ||
540 | cpu_relax(); | ||
541 | call_data = NULL; | ||
542 | |||
543 | spin_unlock(&call_lock); | ||
544 | return 0; | ||
545 | } | 322 | } |
546 | EXPORT_SYMBOL(smp_call_function); | ||
547 | 323 | ||
548 | /* | 324 | /* |
549 | * this function calls the 'stop' function on all other CPUs in the system. | 325 | * this function calls the 'stop' function on all other CPUs in the system. |