diff options
author | David S. Miller <davem@davemloft.net> | 2008-08-04 02:56:28 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-04 16:51:39 -0400 |
commit | 199266305311d060b6e057fa5c7de01f218bb911 (patch) | |
tree | 062c97729ec6c89eab3b4b2c8ff173df7b0e3031 /arch | |
parent | cd5bc89debb4045d55eeffe325b97f2dfba4ddea (diff) |
sparc64: Call xcall_deliver() directly in some cases.
For these cases the callers make sure:
1) The cpus indicated are online.
2) The current cpu is not in the list of indicated cpus.
Therefore we can pass a pointer to the mask directly.
One of the motivations in this transformation is to make use of
"&cpumask_of_cpu(cpu)" which evaluates to a pointer to constant
data in the kernel and thus takes up no stack space.
Hopefully someone in the future will change the interface of
arch_send_call_function_ipi() such that it passes a const cpumask_t
pointer so that this will optimize ever further.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc64/kernel/smp.c | 33 |
1 files changed, 10 insertions, 23 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 868625e3b661..47b0738ea4be 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -792,16 +792,15 @@ extern unsigned long xcall_call_function; | |||
792 | 792 | ||
793 | void arch_send_call_function_ipi(cpumask_t mask) | 793 | void arch_send_call_function_ipi(cpumask_t mask) |
794 | { | 794 | { |
795 | smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); | 795 | xcall_deliver((u64) &xcall_call_function, 0, 0, &mask); |
796 | } | 796 | } |
797 | 797 | ||
798 | extern unsigned long xcall_call_function_single; | 798 | extern unsigned long xcall_call_function_single; |
799 | 799 | ||
800 | void arch_send_call_function_single_ipi(int cpu) | 800 | void arch_send_call_function_single_ipi(int cpu) |
801 | { | 801 | { |
802 | cpumask_t mask = cpumask_of_cpu(cpu); | 802 | xcall_deliver((u64) &xcall_call_function_single, 0, 0, |
803 | 803 | &cpumask_of_cpu(cpu)); | |
804 | smp_cross_call_masked(&xcall_call_function_single, 0, 0, 0, mask); | ||
805 | } | 804 | } |
806 | 805 | ||
807 | /* Send cross call to all processors except self. */ | 806 | /* Send cross call to all processors except self. */ |
@@ -959,24 +958,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
959 | put_cpu(); | 958 | put_cpu(); |
960 | } | 959 | } |
961 | 960 | ||
962 | static void __smp_receive_signal_mask(cpumask_t mask) | ||
963 | { | ||
964 | smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); | ||
965 | } | ||
966 | |||
967 | void smp_receive_signal(int cpu) | ||
968 | { | ||
969 | cpumask_t mask = cpumask_of_cpu(cpu); | ||
970 | |||
971 | if (cpu_online(cpu)) | ||
972 | __smp_receive_signal_mask(mask); | ||
973 | } | ||
974 | |||
975 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | ||
976 | { | ||
977 | clear_softint(1 << irq); | ||
978 | } | ||
979 | |||
980 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | 961 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
981 | { | 962 | { |
982 | struct mm_struct *mm; | 963 | struct mm_struct *mm; |
@@ -1374,7 +1355,13 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
1374 | 1355 | ||
1375 | void smp_send_reschedule(int cpu) | 1356 | void smp_send_reschedule(int cpu) |
1376 | { | 1357 | { |
1377 | smp_receive_signal(cpu); | 1358 | xcall_deliver((u64) &xcall_receive_signal, 0, 0, |
1359 | &cpumask_of_cpu(cpu)); | ||
1360 | } | ||
1361 | |||
1362 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | ||
1363 | { | ||
1364 | clear_softint(1 << irq); | ||
1378 | } | 1365 | } |
1379 | 1366 | ||
1380 | /* This is a nop because we capture all other cpus | 1367 | /* This is a nop because we capture all other cpus |