aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-05-25 19:34:12 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-06-19 21:21:32 -0400
commit9ca980dce523760ce04a798470d36fd5aa596b78 (patch)
treebef37cf5113f56a8e77d6bebf2f57a9df9ae8c21 /arch/powerpc/kernel/smp.c
parent7ac87abb8166b99584149fcfb2efef5773a078e9 (diff)
powerpc: Avoid extra indirect function call in sending IPIs
On many platforms (including pSeries), smp_ops->message_pass is always smp_muxed_ipi_message_pass. This changes arch/powerpc/kernel/smp.c so that if smp_ops->message_pass is NULL, it calls smp_muxed_ipi_message_pass directly. This means that a platform doesn't need to set both .message_pass and .cause_ipi, only one of them. It is a slight performance improvement in that it gets rid of an indirect function call at the expense of a predictable conditional branch. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc6700b98d..2975f64cf310 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -238,15 +238,25 @@ irqreturn_t smp_ipi_demux(void)
238} 238}
239#endif /* CONFIG_PPC_SMP_MUXED_IPI */ 239#endif /* CONFIG_PPC_SMP_MUXED_IPI */
240 240
241static inline void do_message_pass(int cpu, int msg)
242{
243 if (smp_ops->message_pass)
244 smp_ops->message_pass(cpu, msg);
245#ifdef CONFIG_PPC_SMP_MUXED_IPI
246 else
247 smp_muxed_ipi_message_pass(cpu, msg);
248#endif
249}
250
241void smp_send_reschedule(int cpu) 251void smp_send_reschedule(int cpu)
242{ 252{
243 if (likely(smp_ops)) 253 if (likely(smp_ops))
244 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 254 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
245} 255}
246 256
247void arch_send_call_function_single_ipi(int cpu) 257void arch_send_call_function_single_ipi(int cpu)
248{ 258{
249 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); 259 do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
250} 260}
251 261
252void arch_send_call_function_ipi_mask(const struct cpumask *mask) 262void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -254,7 +264,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
254 unsigned int cpu; 264 unsigned int cpu;
255 265
256 for_each_cpu(cpu, mask) 266 for_each_cpu(cpu, mask)
257 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); 267 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
258} 268}
259 269
260#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 270#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
@@ -268,7 +278,7 @@ void smp_send_debugger_break(void)
268 278
269 for_each_online_cpu(cpu) 279 for_each_online_cpu(cpu)
270 if (cpu != me) 280 if (cpu != me)
271 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); 281 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
272} 282}
273#endif 283#endif
274 284