aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMilton Miller <miltonm@bga.com>2011-05-10 15:29:39 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-05-19 01:31:03 -0400
commit23d72bfd8f9f24aa9efafed3586a99f5669c23d7 (patch)
treef8fbd87c64de43c1d85a830f1f3342818414764a /arch/powerpc
parent17f9c8a73bac2c7dfe28a520516ea6b8bbbe977e (diff)
powerpc: Consolidate ipi message mux and demux
Consolidate the mux and demux of ipi messages into smp.c and call a new smp_ops callback to actually trigger the ipi. The powerpc architecture code is optimised for having 4 distinct ipi triggers, which are mapped to 4 distinct messages (ipi many, ipi single, scheduler ipi, and enter debugger). However, several interrupt controllers only provide a single software triggered interrupt that can be delivered to each cpu. To resolve this limitation, each smp_ops implementation created a per-cpu variable that is manipulated with atomic bitops. Since these lines will be contended they are optimialy marked as shared_aligned and take a full cache line for each cpu. Distro kernels may have 2 or 3 of these in their config, each taking per-cpu space even though at most one will be in use. This consolidation removes smp_message_recv and replaces the single call actions cases with direct calls from the common message recognition loop. The complicated debugger ipi case with its muxed crash handling code is moved to debug_ipi_action which is now called from the demux code (instead of the multi-message action calling smp_message_recv). I put a call to reschedule_action to increase the likelyhood of correctly merging the anticipated scheduler_ipi() hook coming from the scheduler tree; that single required call can be inlined later. The actual message decode is a copy of the old pseries xics code with its memory barriers and cache line spacing, augmented with a per-cpu unsigned long based on the book-e doorbell code. The optional data is set via a callback from the implementation and is passed to the new cause-ipi hook along with the logical cpu number. While currently only the doorbell implemntation uses this data it should be almost zero cost to retrieve and pass it -- it adds a single register load for the argument from the same cache line to which we just completed a store and the register is dead on return from the call. I extended the data element from unsigned int to unsigned long in case some other code wanted to associate a pointer. The doorbell check_self is replaced by a call to smp_muxed_ipi_resend, conditioned on the CPU_DBELL feature. The ifdef guard could be relaxed to CONFIG_SMP but I left it with BOOKE for now. Also, the doorbell interrupt vector for book-e was not calling irq_enter and irq_exit, which throws off cpu accounting and causes code to not realize it is running in interrupt context. Add the missing calls. Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/dbell.h3
-rw-r--r--arch/powerpc/include/asm/smp.h16
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/kernel/dbell.c46
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/powerpc/kernel/smp.c94
-rw-r--r--arch/powerpc/platforms/85xx/smp.c6
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/iseries/irq.c3
-rw-r--r--arch/powerpc/platforms/iseries/smp.c23
-rw-r--r--arch/powerpc/platforms/iseries/smp.h6
-rw-r--r--arch/powerpc/platforms/powermac/smp.c27
-rw-r--r--arch/powerpc/platforms/pseries/smp.c3
-rw-r--r--arch/powerpc/platforms/wsp/smp.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c10
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c10
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c30
17 files changed, 126 insertions, 176 deletions
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 3269eb49640a..9c70d0ca96d4 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -27,9 +27,8 @@ enum ppc_dbell {
27 PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ 27 PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */
28}; 28};
29 29
30extern void doorbell_message_pass(int cpu, int msg); 30extern void doorbell_cause_ipi(int cpu, unsigned long data);
31extern void doorbell_exception(struct pt_regs *regs); 31extern void doorbell_exception(struct pt_regs *regs);
32extern void doorbell_check_self(void);
33extern void doorbell_setup_this_cpu(void); 32extern void doorbell_setup_this_cpu(void);
34 33
35static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) 34static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 6f7c95c0027a..26f861560c51 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -20,6 +20,7 @@
20#include <linux/threads.h> 20#include <linux/threads.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/irqreturn.h>
23 24
24#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
25 26
@@ -37,6 +38,7 @@ extern void cpu_die(void);
37 38
38struct smp_ops_t { 39struct smp_ops_t {
39 void (*message_pass)(int cpu, int msg); 40 void (*message_pass)(int cpu, int msg);
41 void (*cause_ipi)(int cpu, unsigned long data);
40 int (*probe)(void); 42 int (*probe)(void);
41 int (*kick_cpu)(int nr); 43 int (*kick_cpu)(int nr);
42 void (*setup_cpu)(int nr); 44 void (*setup_cpu)(int nr);
@@ -49,7 +51,6 @@ struct smp_ops_t {
49}; 51};
50 52
51extern void smp_send_debugger_break(void); 53extern void smp_send_debugger_break(void);
52extern void smp_message_recv(int);
53extern void start_secondary_resume(void); 54extern void start_secondary_resume(void);
54extern void __devinit smp_generic_give_timebase(void); 55extern void __devinit smp_generic_give_timebase(void);
55extern void __devinit smp_generic_take_timebase(void); 56extern void __devinit smp_generic_take_timebase(void);
@@ -109,13 +110,16 @@ extern int cpu_to_core_id(int cpu);
109#define PPC_MSG_CALL_FUNC_SINGLE 2 110#define PPC_MSG_CALL_FUNC_SINGLE 2
110#define PPC_MSG_DEBUGGER_BREAK 3 111#define PPC_MSG_DEBUGGER_BREAK 3
111 112
112/* 113/* for irq controllers that have dedicated ipis per message (4) */
113 * irq controllers that have dedicated ipis per message and don't
114 * need additional code in the action handler may use this
115 */
116extern int smp_request_message_ipi(int virq, int message); 114extern int smp_request_message_ipi(int virq, int message);
117extern const char *smp_ipi_name[]; 115extern const char *smp_ipi_name[];
118 116
117/* for irq controllers with only a single ipi */
118extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
119extern void smp_muxed_ipi_message_pass(int cpu, int msg);
120extern void smp_muxed_ipi_resend(void);
121extern irqreturn_t smp_ipi_demux(void);
122
119void smp_init_iSeries(void); 123void smp_init_iSeries(void);
120void smp_init_pSeries(void); 124void smp_init_pSeries(void);
121void smp_init_cell(void); 125void smp_init_cell(void);
@@ -185,6 +189,8 @@ extern unsigned long __secondary_hold_spinloop;
185extern unsigned long __secondary_hold_acknowledge; 189extern unsigned long __secondary_hold_acknowledge;
186extern char __secondary_hold; 190extern char __secondary_hold;
187 191
192extern irqreturn_t debug_ipi_action(int irq, void *data);
193
188#endif /* __ASSEMBLY__ */ 194#endif /* __ASSEMBLY__ */
189 195
190#endif /* __KERNEL__ */ 196#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 1750c8dae1fa..b183a4062011 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -40,7 +40,7 @@ struct icp_ops {
40 void (*teardown_cpu)(void); 40 void (*teardown_cpu)(void);
41 void (*flush_ipi)(void); 41 void (*flush_ipi)(void);
42#ifdef CONFIG_SMP 42#ifdef CONFIG_SMP
43 void (*message_pass)(int cpu, int msg); 43 void (*cause_ipi)(int cpu, unsigned long data);
44 irq_handler_t ipi_action; 44 irq_handler_t ipi_action;
45#endif 45#endif
46}; 46};
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index e49b24c84133..2cc451aaaca7 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -13,65 +13,35 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/threads.h> 15#include <linux/threads.h>
16#include <linux/percpu.h> 16#include <linux/hardirq.h>
17 17
18#include <asm/dbell.h> 18#include <asm/dbell.h>
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20 20
21#ifdef CONFIG_SMP 21#ifdef CONFIG_SMP
22struct doorbell_cpu_info {
23 unsigned long messages; /* current messages bits */
24 unsigned int tag; /* tag value */
25};
26
27static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info);
28
29void doorbell_setup_this_cpu(void) 22void doorbell_setup_this_cpu(void)
30{ 23{
31 struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); 24 unsigned long tag = mfspr(SPRN_PIR) & 0x3fff;
32 25
33 info->messages = 0; 26 smp_muxed_ipi_set_data(smp_processor_id(), tag);
34 info->tag = mfspr(SPRN_PIR) & 0x3fff;
35} 27}
36 28
37void doorbell_message_pass(int cpu, int msg) 29void doorbell_cause_ipi(int cpu, unsigned long data)
38{ 30{
39 struct doorbell_cpu_info *info; 31 ppc_msgsnd(PPC_DBELL, 0, data);
40
41 info = &per_cpu(doorbell_cpu_info, cpu);
42 set_bit(msg, &info->messages);
43 ppc_msgsnd(PPC_DBELL, 0, info->tag);
44} 32}
45 33
46void doorbell_exception(struct pt_regs *regs) 34void doorbell_exception(struct pt_regs *regs)
47{ 35{
48 struct pt_regs *old_regs = set_irq_regs(regs); 36 struct pt_regs *old_regs = set_irq_regs(regs);
49 struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
50 int msg;
51 37
52 /* Warning: regs can be NULL when called from irq enable */ 38 irq_enter();
53 39
54 if (!info->messages || (num_online_cpus() < 2)) 40 smp_ipi_demux();
55 goto out;
56 41
57 for (msg = 0; msg < 4; msg++) 42 irq_exit();
58 if (test_and_clear_bit(msg, &info->messages))
59 smp_message_recv(msg);
60
61out:
62 set_irq_regs(old_regs); 43 set_irq_regs(old_regs);
63} 44}
64
65void doorbell_check_self(void)
66{
67 struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info);
68
69 if (!info->messages)
70 return;
71
72 ppc_msgsnd(PPC_DBELL, 0, info->tag);
73}
74
75#else /* CONFIG_SMP */ 45#else /* CONFIG_SMP */
76void doorbell_exception(struct pt_regs *regs) 46void doorbell_exception(struct pt_regs *regs)
77{ 47{
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index a81dd74414bf..826552cecebd 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -66,7 +66,6 @@
66#include <asm/ptrace.h> 66#include <asm/ptrace.h>
67#include <asm/machdep.h> 67#include <asm/machdep.h>
68#include <asm/udbg.h> 68#include <asm/udbg.h>
69#include <asm/dbell.h>
70#include <asm/smp.h> 69#include <asm/smp.h>
71 70
72#ifdef CONFIG_PPC64 71#ifdef CONFIG_PPC64
@@ -160,7 +159,8 @@ notrace void arch_local_irq_restore(unsigned long en)
160 159
161#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) 160#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
162 /* Check for pending doorbell interrupts and resend to ourself */ 161 /* Check for pending doorbell interrupts and resend to ourself */
163 doorbell_check_self(); 162 if (cpu_has_feature(CPU_FTR_DBELL))
163 smp_muxed_ipi_resend();
164#endif 164#endif
165 165
166 /* 166 /*
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index b74411446922..fa8e8700064b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -111,35 +111,6 @@ int __devinit smp_generic_kick_cpu(int nr)
111} 111}
112#endif 112#endif
113 113
114void smp_message_recv(int msg)
115{
116 switch(msg) {
117 case PPC_MSG_CALL_FUNCTION:
118 generic_smp_call_function_interrupt();
119 break;
120 case PPC_MSG_RESCHEDULE:
121 /* we notice need_resched on exit */
122 break;
123 case PPC_MSG_CALL_FUNC_SINGLE:
124 generic_smp_call_function_single_interrupt();
125 break;
126 case PPC_MSG_DEBUGGER_BREAK:
127 if (crash_ipi_function_ptr) {
128 crash_ipi_function_ptr(get_irq_regs());
129 break;
130 }
131#ifdef CONFIG_DEBUGGER
132 debugger_ipi(get_irq_regs());
133 break;
134#endif /* CONFIG_DEBUGGER */
135 /* FALLTHROUGH */
136 default:
137 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
138 smp_processor_id(), msg);
139 break;
140 }
141}
142
143static irqreturn_t call_function_action(int irq, void *data) 114static irqreturn_t call_function_action(int irq, void *data)
144{ 115{
145 generic_smp_call_function_interrupt(); 116 generic_smp_call_function_interrupt();
@@ -158,9 +129,17 @@ static irqreturn_t call_function_single_action(int irq, void *data)
158 return IRQ_HANDLED; 129 return IRQ_HANDLED;
159} 130}
160 131
161static irqreturn_t debug_ipi_action(int irq, void *data) 132irqreturn_t debug_ipi_action(int irq, void *data)
162{ 133{
163 smp_message_recv(PPC_MSG_DEBUGGER_BREAK); 134 if (crash_ipi_function_ptr) {
135 crash_ipi_function_ptr(get_irq_regs());
136 return IRQ_HANDLED;
137 }
138
139#ifdef CONFIG_DEBUGGER
140 debugger_ipi(get_irq_regs());
141#endif /* CONFIG_DEBUGGER */
142
164 return IRQ_HANDLED; 143 return IRQ_HANDLED;
165} 144}
166 145
@@ -199,6 +178,59 @@ int smp_request_message_ipi(int virq, int msg)
199 return err; 178 return err;
200} 179}
201 180
181struct cpu_messages {
182 unsigned long messages; /* current messages bits */
183 unsigned long data; /* data for cause ipi */
184};
185static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
186
187void smp_muxed_ipi_set_data(int cpu, unsigned long data)
188{
189 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
190
191 info->data = data;
192}
193
194void smp_muxed_ipi_message_pass(int cpu, int msg)
195{
196 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
197 unsigned long *tgt = &info->messages;
198
199 set_bit(msg, tgt);
200 mb();
201 smp_ops->cause_ipi(cpu, info->data);
202}
203
204void smp_muxed_ipi_resend(void)
205{
206 struct cpu_messages *info = &__get_cpu_var(ipi_message);
207 unsigned long *tgt = &info->messages;
208
209 if (*tgt)
210 smp_ops->cause_ipi(smp_processor_id(), info->data);
211}
212
213irqreturn_t smp_ipi_demux(void)
214{
215 struct cpu_messages *info = &__get_cpu_var(ipi_message);
216 unsigned long *tgt = &info->messages;
217
218 mb(); /* order any irq clear */
219 while (*tgt) {
220 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt))
221 generic_smp_call_function_interrupt();
222 if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt))
223 reschedule_action(0, NULL); /* upcoming sched hook */
224 if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt))
225 generic_smp_call_function_single_interrupt();
226#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
227 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt))
228 debug_ipi_action(0, NULL);
229#endif
230 }
231 return IRQ_HANDLED;
232}
233
202void smp_send_reschedule(int cpu) 234void smp_send_reschedule(int cpu)
203{ 235{
204 if (likely(smp_ops)) 236 if (likely(smp_ops))
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index fe3f6a3a5307..d6a93a10c0f5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -235,8 +235,10 @@ void __init mpc85xx_smp_init(void)
235 smp_85xx_ops.message_pass = smp_mpic_message_pass; 235 smp_85xx_ops.message_pass = smp_mpic_message_pass;
236 } 236 }
237 237
238 if (cpu_has_feature(CPU_FTR_DBELL)) 238 if (cpu_has_feature(CPU_FTR_DBELL)) {
239 smp_85xx_ops.message_pass = doorbell_message_pass; 239 smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass;
240 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
241 }
240 242
241 BUG_ON(!smp_85xx_ops.message_pass); 243 BUG_ON(!smp_85xx_ops.message_pass);
242 244
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 44cfd1bef89b..6a58744d66c3 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -196,8 +196,20 @@ static irqreturn_t iic_ipi_action(int irq, void *dev_id)
196{ 196{
197 int ipi = (int)(long)dev_id; 197 int ipi = (int)(long)dev_id;
198 198
199 smp_message_recv(ipi); 199 switch(ipi) {
200 200 case PPC_MSG_CALL_FUNCTION:
201 generic_smp_call_function_interrupt();
202 break;
203 case PPC_MSG_RESCHEDULE:
204 /* Upcoming sched hook */
205 break;
206 case PPC_MSG_CALL_FUNC_SINGLE:
207 generic_smp_call_function_single_interrupt();
208 break;
209 case PPC_MSG_DEBUGGER_BREAK:
210 debug_ipi_action(0, NULL);
211 break;
212 }
201 return IRQ_HANDLED; 213 return IRQ_HANDLED;
202} 214}
203static void iic_request_ipi(int ipi, const char *name) 215static void iic_request_ipi(int ipi, const char *name)
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 375c21ca6602..b2103453eb01 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -42,7 +42,6 @@
42#include "irq.h" 42#include "irq.h"
43#include "pci.h" 43#include "pci.h"
44#include "call_pci.h" 44#include "call_pci.h"
45#include "smp.h"
46 45
47#ifdef CONFIG_PCI 46#ifdef CONFIG_PCI
48 47
@@ -316,7 +315,7 @@ unsigned int iSeries_get_irq(void)
316#ifdef CONFIG_SMP 315#ifdef CONFIG_SMP
317 if (get_lppaca()->int_dword.fields.ipi_cnt) { 316 if (get_lppaca()->int_dword.fields.ipi_cnt) {
318 get_lppaca()->int_dword.fields.ipi_cnt = 0; 317 get_lppaca()->int_dword.fields.ipi_cnt = 0;
319 iSeries_smp_message_recv(); 318 smp_ipi_demux();
320 } 319 }
321#endif /* CONFIG_SMP */ 320#endif /* CONFIG_SMP */
322 if (hvlpevent_is_pending()) 321 if (hvlpevent_is_pending())
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index dcdbc5dc5aad..e3265adde5d3 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -42,26 +42,8 @@
42#include <asm/cputable.h> 42#include <asm/cputable.h>
43#include <asm/system.h> 43#include <asm/system.h>
44 44
45#include "smp.h" 45static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
46
47static unsigned long iSeries_smp_message[NR_CPUS];
48
49void iSeries_smp_message_recv(void)
50{
51 int cpu = smp_processor_id();
52 int msg;
53
54 if (num_online_cpus() < 2)
55 return;
56
57 for (msg = 0; msg < 4; msg++)
58 if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
59 smp_message_recv(msg);
60}
61
62static void smp_iSeries_message_pass(int cpu, int msg)
63{ 46{
64 set_bit(msg, &iSeries_smp_message[cpu]);
65 HvCall_sendIPI(&(paca[cpu])); 47 HvCall_sendIPI(&(paca[cpu]));
66} 48}
67 49
@@ -93,7 +75,8 @@ static void __devinit smp_iSeries_setup_cpu(int nr)
93} 75}
94 76
95static struct smp_ops_t iSeries_smp_ops = { 77static struct smp_ops_t iSeries_smp_ops = {
96 .message_pass = smp_iSeries_message_pass, 78 .message_pass = smp_muxed_ipi_message_pass,
79 .cause_ipi = smp_iSeries_cause_ipi,
97 .probe = smp_iSeries_probe, 80 .probe = smp_iSeries_probe,
98 .kick_cpu = smp_iSeries_kick_cpu, 81 .kick_cpu = smp_iSeries_kick_cpu,
99 .setup_cpu = smp_iSeries_setup_cpu, 82 .setup_cpu = smp_iSeries_setup_cpu,
diff --git a/arch/powerpc/platforms/iseries/smp.h b/arch/powerpc/platforms/iseries/smp.h
deleted file mode 100644
index d501f7de01e7..000000000000
--- a/arch/powerpc/platforms/iseries/smp.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _PLATFORMS_ISERIES_SMP_H
2#define _PLATFORMS_ISERIES_SMP_H
3
4extern void iSeries_smp_message_recv(void);
5
6#endif /* _PLATFORMS_ISERIES_SMP_H */
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index c49e71926a54..a3401071abfb 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -156,28 +156,13 @@ static inline void psurge_clr_ipi(int cpu)
156/* 156/*
157 * On powersurge (old SMP powermac architecture) we don't have 157 * On powersurge (old SMP powermac architecture) we don't have
158 * separate IPIs for separate messages like openpic does. Instead 158 * separate IPIs for separate messages like openpic does. Instead
159 * we have a bitmap for each processor, where a 1 bit means that 159 * use the generic demux helpers
160 * the corresponding message is pending for that processor.
161 * Ideally each cpu's entry would be in a different cache line.
162 * -- paulus. 160 * -- paulus.
163 */ 161 */
164static unsigned long psurge_smp_message[NR_CPUS];
165
166void psurge_smp_message_recv(void) 162void psurge_smp_message_recv(void)
167{ 163{
168 int cpu = smp_processor_id(); 164 psurge_clr_ipi(smp_processor_id());
169 int msg; 165 smp_ipi_demux();
170
171 /* clear interrupt */
172 psurge_clr_ipi(cpu);
173
174 if (num_online_cpus() < 2)
175 return;
176
177 /* make sure there is a message there */
178 for (msg = 0; msg < 4; msg++)
179 if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
180 smp_message_recv(msg);
181} 166}
182 167
183irqreturn_t psurge_primary_intr(int irq, void *d) 168irqreturn_t psurge_primary_intr(int irq, void *d)
@@ -186,9 +171,8 @@ irqreturn_t psurge_primary_intr(int irq, void *d)
186 return IRQ_HANDLED; 171 return IRQ_HANDLED;
187} 172}
188 173
189static void smp_psurge_message_pass(int cpu, int msg) 174static void smp_psurge_cause_ipi(int cpu, unsigned long data)
190{ 175{
191 set_bit(msg, &psurge_smp_message[cpu]);
192 psurge_set_ipi(cpu); 176 psurge_set_ipi(cpu);
193} 177}
194 178
@@ -428,7 +412,8 @@ void __init smp_psurge_give_timebase(void)
428 412
429/* PowerSurge-style Macs */ 413/* PowerSurge-style Macs */
430struct smp_ops_t psurge_smp_ops = { 414struct smp_ops_t psurge_smp_ops = {
431 .message_pass = smp_psurge_message_pass, 415 .message_pass = smp_muxed_ipi_message_pass,
416 .cause_ipi = smp_psurge_cause_ipi,
432 .probe = smp_psurge_probe, 417 .probe = smp_psurge_probe,
433 .kick_cpu = smp_psurge_kick_cpu, 418 .kick_cpu = smp_psurge_kick_cpu,
434 .setup_cpu = smp_psurge_setup_cpu, 419 .setup_cpu = smp_psurge_setup_cpu,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 95f578158ff0..fbffd7e47ab8 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -207,7 +207,8 @@ static struct smp_ops_t pSeries_mpic_smp_ops = {
207}; 207};
208 208
209static struct smp_ops_t pSeries_xics_smp_ops = { 209static struct smp_ops_t pSeries_xics_smp_ops = {
210 .message_pass = NULL, /* Filled at runtime by xics_smp_probe() */ 210 .message_pass = smp_muxed_ipi_message_pass,
211 .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
211 .probe = xics_smp_probe, 212 .probe = xics_smp_probe,
212 .kick_cpu = smp_pSeries_kick_cpu, 213 .kick_cpu = smp_pSeries_kick_cpu,
213 .setup_cpu = smp_xics_setup_cpu, 214 .setup_cpu = smp_xics_setup_cpu,
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
index c7b8db9ed9b3..9d20fa9d3710 100644
--- a/arch/powerpc/platforms/wsp/smp.c
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -75,7 +75,8 @@ static int __init smp_a2_probe(void)
75} 75}
76 76
77static struct smp_ops_t a2_smp_ops = { 77static struct smp_ops_t a2_smp_ops = {
78 .message_pass = doorbell_message_pass, 78 .message_pass = smp_muxed_ipi_message_pass,
79 .cause_ipi = doorbell_cause_ipi,
79 .probe = smp_a2_probe, 80 .probe = smp_a2_probe,
80 .kick_cpu = smp_a2_kick_cpu, 81 .kick_cpu = smp_a2_kick_cpu,
81 .setup_cpu = smp_a2_setup_cpu, 82 .setup_cpu = smp_a2_setup_cpu,
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 234764c189a4..9518d367a64f 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -118,12 +118,8 @@ static void icp_hv_set_cpu_priority(unsigned char cppr)
118 118
119#ifdef CONFIG_SMP 119#ifdef CONFIG_SMP
120 120
121static void icp_hv_message_pass(int cpu, int msg) 121static void icp_hv_cause_ipi(int cpu, unsigned long data)
122{ 122{
123 unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
124
125 set_bit(msg, tgt);
126 mb();
127 icp_hv_set_qirr(cpu, IPI_PRIORITY); 123 icp_hv_set_qirr(cpu, IPI_PRIORITY);
128} 124}
129 125
@@ -133,7 +129,7 @@ static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
133 129
134 icp_hv_set_qirr(cpu, 0xff); 130 icp_hv_set_qirr(cpu, 0xff);
135 131
136 return xics_ipi_dispatch(cpu); 132 return smp_ipi_demux();
137} 133}
138 134
139#endif /* CONFIG_SMP */ 135#endif /* CONFIG_SMP */
@@ -146,7 +142,7 @@ static const struct icp_ops icp_hv_ops = {
146 .flush_ipi = icp_hv_flush_ipi, 142 .flush_ipi = icp_hv_flush_ipi,
147#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
148 .ipi_action = icp_hv_ipi_action, 144 .ipi_action = icp_hv_ipi_action,
149 .message_pass = icp_hv_message_pass, 145 .cause_ipi = icp_hv_cause_ipi,
150#endif 146#endif
151}; 147};
152 148
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 246500eefbfd..1f15ad436140 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -134,12 +134,8 @@ static unsigned int icp_native_get_irq(void)
134 134
135#ifdef CONFIG_SMP 135#ifdef CONFIG_SMP
136 136
137static void icp_native_message_pass(int cpu, int msg) 137static void icp_native_cause_ipi(int cpu, unsigned long data)
138{ 138{
139 unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
140
141 set_bit(msg, tgt);
142 mb();
143 icp_native_set_qirr(cpu, IPI_PRIORITY); 139 icp_native_set_qirr(cpu, IPI_PRIORITY);
144} 140}
145 141
@@ -149,7 +145,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
149 145
150 icp_native_set_qirr(cpu, 0xff); 146 icp_native_set_qirr(cpu, 0xff);
151 147
152 return xics_ipi_dispatch(cpu); 148 return smp_ipi_demux();
153} 149}
154 150
155#endif /* CONFIG_SMP */ 151#endif /* CONFIG_SMP */
@@ -267,7 +263,7 @@ static const struct icp_ops icp_native_ops = {
267 .flush_ipi = icp_native_flush_ipi, 263 .flush_ipi = icp_native_flush_ipi,
268#ifdef CONFIG_SMP 264#ifdef CONFIG_SMP
269 .ipi_action = icp_native_ipi_action, 265 .ipi_action = icp_native_ipi_action,
270 .message_pass = icp_native_message_pass, 266 .cause_ipi = icp_native_cause_ipi,
271#endif 267#endif
272}; 268};
273 269
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index a0576b705ddd..a31a7103218f 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -126,32 +126,6 @@ void xics_mask_unknown_vec(unsigned int vec)
126 126
127#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
128 128
129DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message);
130
131irqreturn_t xics_ipi_dispatch(int cpu)
132{
133 unsigned long *tgt = &per_cpu(xics_ipi_message, cpu);
134
135 mb(); /* order mmio clearing qirr */
136 while (*tgt) {
137 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) {
138 smp_message_recv(PPC_MSG_CALL_FUNCTION);
139 }
140 if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) {
141 smp_message_recv(PPC_MSG_RESCHEDULE);
142 }
143 if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) {
144 smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
145 }
146#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
147 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) {
148 smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
149 }
150#endif
151 }
152 return IRQ_HANDLED;
153}
154
155static void xics_request_ipi(void) 129static void xics_request_ipi(void)
156{ 130{
157 unsigned int ipi; 131 unsigned int ipi;
@@ -170,8 +144,8 @@ static void xics_request_ipi(void)
170 144
171int __init xics_smp_probe(void) 145int __init xics_smp_probe(void)
172{ 146{
173 /* Setup message_pass callback based on which ICP is used */ 147 /* Setup cause_ipi callback based on which ICP is used */
174 smp_ops->message_pass = icp_ops->message_pass; 148 smp_ops->cause_ipi = icp_ops->cause_ipi;
175 149
176 /* Register all the IPIs */ 150 /* Register all the IPIs */
177 xics_request_ipi(); 151 xics_request_ipi();