aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-17 04:45:23 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 05:22:57 -0400
commit2f304c0a0a55072b80957580f1b66256a615d8da (patch)
tree19f2dbd55f94b34ab9de5e9f66068641ddc0b536
parent7b7426c8a615cf61df9a77b9df7d5b75d91e3fa0 (diff)
mips: convert to generic helpers for IPI function calls
This converts mips to use the new helpers for smp_call_function() and friends, and adds support for smp_call_function_single(). Not tested, but it compiles. mips shares the same IPI for smp_call_function() and smp_call_function_single(), since not all mips platforms have enough available IPIs to support seperate setups. Cc: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/kernel/smp.c141
-rw-r--r--arch/mips/kernel/smtc.c1
-rw-r--r--include/asm-mips/smp.h13
4 files changed, 15 insertions, 141 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e5a7c5d96364..ea70d5a225ca 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1763,6 +1763,7 @@ config SMP
1763 bool "Multi-Processing support" 1763 bool "Multi-Processing support"
1764 depends on SYS_SUPPORTS_SMP 1764 depends on SYS_SUPPORTS_SMP
1765 select IRQ_PER_CPU 1765 select IRQ_PER_CPU
1766 select USE_GENERIC_SMP_HELPERS
1766 help 1767 help
1767 This enables support for systems with more than one CPU. If you have 1768 This enables support for systems with more than one CPU. If you have
1768 a system with only one CPU, like most personal computers, say N. If 1769 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index cdf87a9dd4ba..c75b26cb61df 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void)
131 cpu_idle(); 131 cpu_idle();
132} 132}
133 133
134DEFINE_SPINLOCK(smp_call_lock); 134void arch_send_call_function_ipi(cpumask_t mask)
135
136struct call_data_struct *call_data;
137
138/*
139 * Run a function on all other CPUs.
140 *
141 * <mask> cpuset_t of all processors to run the function on.
142 * <func> The function to run. This must be fast and non-blocking.
143 * <info> An arbitrary pointer to pass to the function.
144 * <retry> If true, keep retrying until ready.
145 * <wait> If true, wait until function has completed on other CPUs.
146 * [RETURNS] 0 on success, else a negative status code.
147 *
148 * Does not return until remote CPUs are nearly ready to execute <func>
149 * or are or have executed.
150 *
151 * You must not call this function with disabled interrupts or from a
152 * hardware interrupt handler or from a bottom half handler:
153 *
154 * CPU A CPU B
155 * Disable interrupts
156 * smp_call_function()
157 * Take call_lock
158 * Send IPIs
159 * Wait for all cpus to acknowledge IPI
160 * CPU A has not responded, spin waiting
161 * for cpu A to respond, holding call_lock
162 * smp_call_function()
163 * Spin waiting for call_lock
164 * Deadlock Deadlock
165 */
166int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
167 void *info, int retry, int wait)
168{ 135{
169 struct call_data_struct data;
170 int cpu = smp_processor_id();
171 int cpus;
172
173 /*
174 * Can die spectacularly if this CPU isn't yet marked online
175 */
176 BUG_ON(!cpu_online(cpu));
177
178 cpu_clear(cpu, mask);
179 cpus = cpus_weight(mask);
180 if (!cpus)
181 return 0;
182
183 /* Can deadlock when called with interrupts disabled */
184 WARN_ON(irqs_disabled());
185
186 data.func = func;
187 data.info = info;
188 atomic_set(&data.started, 0);
189 data.wait = wait;
190 if (wait)
191 atomic_set(&data.finished, 0);
192
193 spin_lock(&smp_call_lock);
194 call_data = &data;
195 smp_mb();
196
197 /* Send a message to all other CPUs and wait for them to respond */
198 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); 136 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
199
200 /* Wait for response */
201 /* FIXME: lock-up detection, backtrace on lock-up */
202 while (atomic_read(&data.started) != cpus)
203 barrier();
204
205 if (wait)
206 while (atomic_read(&data.finished) != cpus)
207 barrier();
208 call_data = NULL;
209 spin_unlock(&smp_call_lock);
210
211 return 0;
212} 137}
213 138
214int smp_call_function(void (*func) (void *info), void *info, int retry, 139/*
215 int wait) 140 * We reuse the same vector for the single IPI
141 */
142void arch_send_call_function_single_ipi(int cpu)
216{ 143{
217 return smp_call_function_mask(cpu_online_map, func, info, retry, wait); 144 mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
218} 145}
219EXPORT_SYMBOL(smp_call_function);
220 146
147/*
148 * Call into both interrupt handlers, as we share the IPI for them
149 */
221void smp_call_function_interrupt(void) 150void smp_call_function_interrupt(void)
222{ 151{
223 void (*func) (void *info) = call_data->func;
224 void *info = call_data->info;
225 int wait = call_data->wait;
226
227 /*
228 * Notify initiating CPU that I've grabbed the data and am
229 * about to execute the function.
230 */
231 smp_mb();
232 atomic_inc(&call_data->started);
233
234 /*
235 * At this point the info structure may be out of scope unless wait==1.
236 */
237 irq_enter(); 152 irq_enter();
238 (*func)(info); 153 generic_smp_call_function_single_interrupt();
154 generic_smp_call_function_interrupt();
239 irq_exit(); 155 irq_exit();
240
241 if (wait) {
242 smp_mb();
243 atomic_inc(&call_data->finished);
244 }
245}
246
247int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
248 int retry, int wait)
249{
250 int ret, me;
251
252 /*
253 * Can die spectacularly if this CPU isn't yet marked online
254 */
255 if (!cpu_online(cpu))
256 return 0;
257
258 me = get_cpu();
259 BUG_ON(!cpu_online(me));
260
261 if (cpu == me) {
262 local_irq_disable();
263 func(info);
264 local_irq_enable();
265 put_cpu();
266 return 0;
267 }
268
269 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
270 wait);
271
272 put_cpu();
273 return 0;
274} 156}
275EXPORT_SYMBOL(smp_call_function_single);
276 157
277static void stop_this_cpu(void *dummy) 158static void stop_this_cpu(void *dummy)
278{ 159{
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 3e863186cd22..a516286532ab 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -877,7 +877,6 @@ static void ipi_resched_interrupt(void)
877 /* Return from interrupt should be enough to cause scheduler check */ 877 /* Return from interrupt should be enough to cause scheduler check */
878} 878}
879 879
880
881static void ipi_call_interrupt(void) 880static void ipi_call_interrupt(void)
882{ 881{
883 /* Invoke generic function invocation code in smp.c */ 882 /* Invoke generic function invocation code in smp.c */
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 84fef1aeec0c..0ff5b523ea77 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -35,16 +35,6 @@ extern int __cpu_logical_map[NR_CPUS];
35 35
36#define NO_PROC_ID (-1) 36#define NO_PROC_ID (-1)
37 37
38struct call_data_struct {
39 void (*func)(void *);
40 void *info;
41 atomic_t started;
42 atomic_t finished;
43 int wait;
44};
45
46extern struct call_data_struct *call_data;
47
48#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ 38#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
49#define SMP_CALL_FUNCTION 0x2 39#define SMP_CALL_FUNCTION 0x2
50 40
@@ -67,4 +57,7 @@ static inline void smp_send_reschedule(int cpu)
67 57
68extern asmlinkage void smp_call_function_interrupt(void); 58extern asmlinkage void smp_call_function_interrupt(void);
69 59
60extern void arch_send_call_function_single_ipi(int cpu);
61extern void arch_send_call_function_ipi(cpumask_t mask);
62
70#endif /* __ASM_SMP_H */ 63#endif /* __ASM_SMP_H */