diff options
Diffstat (limited to 'arch/arc/kernel/smp.c')
-rw-r--r-- | arch/arc/kernel/smp.c | 124 |
1 files changed, 70 insertions, 54 deletions
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index c2f9ebbc38f6..40859e5619f9 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -197,51 +197,65 @@ int __init setup_profiling_timer(unsigned int multiplier) | |||
197 | /* Inter Processor Interrupt Handling */ | 197 | /* Inter Processor Interrupt Handling */ |
198 | /*****************************************************************************/ | 198 | /*****************************************************************************/ |
199 | 199 | ||
200 | /* | ||
201 | * structures for inter-processor calls | ||
202 | * A Collection of single bit ipi messages | ||
203 | * | ||
204 | */ | ||
205 | |||
206 | /* | ||
207 | * TODO_rajesh investigate tlb message types. | ||
208 | * IPI Timer not needed because each ARC has an individual Interrupting Timer | ||
209 | */ | ||
210 | enum ipi_msg_type { | 200 | enum ipi_msg_type { |
211 | IPI_NOP = 0, | 201 | IPI_EMPTY = 0, |
212 | IPI_RESCHEDULE = 1, | 202 | IPI_RESCHEDULE = 1, |
213 | IPI_CALL_FUNC, | 203 | IPI_CALL_FUNC, |
214 | IPI_CPU_STOP | 204 | IPI_CPU_STOP, |
215 | }; | 205 | }; |
216 | 206 | ||
217 | struct ipi_data { | 207 | /* |
218 | unsigned long bits; | 208 | * In arches with IRQ for each msg type (above), receiver can use IRQ-id to |
219 | }; | 209 | * figure out what msg was sent. For those which don't (ARC has dedicated IPI |
210 | * IRQ), the msg-type needs to be conveyed via per-cpu data | ||
211 | */ | ||
220 | 212 | ||
221 | static DEFINE_PER_CPU(struct ipi_data, ipi_data); | 213 | static DEFINE_PER_CPU(unsigned long, ipi_data); |
222 | 214 | ||
223 | static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) | 215 | static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) |
224 | { | 216 | { |
217 | unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu); | ||
218 | unsigned long old, new; | ||
225 | unsigned long flags; | 219 | unsigned long flags; |
226 | unsigned int cpu; | 220 | |
221 | pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu); | ||
227 | 222 | ||
228 | local_irq_save(flags); | 223 | local_irq_save(flags); |
229 | 224 | ||
230 | for_each_cpu(cpu, callmap) { | 225 | /* |
231 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | 226 | * Atomically write new msg bit (in case others are writing too), |
232 | set_bit(msg, &ipi->bits); | 227 | * and read back old value |
233 | } | 228 | */ |
229 | do { | ||
230 | new = old = *ipi_data_ptr; | ||
231 | new |= 1U << msg; | ||
232 | } while (cmpxchg(ipi_data_ptr, old, new) != old); | ||
234 | 233 | ||
235 | /* Call the platform specific cross-CPU call function */ | 234 | /* |
236 | if (plat_smp_ops.ipi_send) | 235 | * Call the platform specific IPI kick function, but avoid if possible: |
237 | plat_smp_ops.ipi_send((void *)callmap); | 236 | * Only do so if there's no pending msg from other concurrent sender(s). |
237 | * Otherwise, recevier will see this msg as well when it takes the | ||
238 | * IPI corresponding to that msg. This is true, even if it is already in | ||
239 | * IPI handler, because !@old means it has not yet dequeued the msg(s) | ||
240 | * so @new msg can be a free-loader | ||
241 | */ | ||
242 | if (plat_smp_ops.ipi_send && !old) | ||
243 | plat_smp_ops.ipi_send(cpu); | ||
238 | 244 | ||
239 | local_irq_restore(flags); | 245 | local_irq_restore(flags); |
240 | } | 246 | } |
241 | 247 | ||
248 | static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) | ||
249 | { | ||
250 | unsigned int cpu; | ||
251 | |||
252 | for_each_cpu(cpu, callmap) | ||
253 | ipi_send_msg_one(cpu, msg); | ||
254 | } | ||
255 | |||
242 | void smp_send_reschedule(int cpu) | 256 | void smp_send_reschedule(int cpu) |
243 | { | 257 | { |
244 | ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE); | 258 | ipi_send_msg_one(cpu, IPI_RESCHEDULE); |
245 | } | 259 | } |
246 | 260 | ||
247 | void smp_send_stop(void) | 261 | void smp_send_stop(void) |
@@ -254,7 +268,7 @@ void smp_send_stop(void) | |||
254 | 268 | ||
255 | void arch_send_call_function_single_ipi(int cpu) | 269 | void arch_send_call_function_single_ipi(int cpu) |
256 | { | 270 | { |
257 | ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC); | 271 | ipi_send_msg_one(cpu, IPI_CALL_FUNC); |
258 | } | 272 | } |
259 | 273 | ||
260 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 274 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
@@ -265,33 +279,29 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
265 | /* | 279 | /* |
266 | * ipi_cpu_stop - handle IPI from smp_send_stop() | 280 | * ipi_cpu_stop - handle IPI from smp_send_stop() |
267 | */ | 281 | */ |
268 | static void ipi_cpu_stop(unsigned int cpu) | 282 | static void ipi_cpu_stop(void) |
269 | { | 283 | { |
270 | machine_halt(); | 284 | machine_halt(); |
271 | } | 285 | } |
272 | 286 | ||
273 | static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu) | 287 | static inline void __do_IPI(unsigned long msg) |
274 | { | 288 | { |
275 | unsigned long msg = 0; | 289 | switch (msg) { |
290 | case IPI_RESCHEDULE: | ||
291 | scheduler_ipi(); | ||
292 | break; | ||
276 | 293 | ||
277 | do { | 294 | case IPI_CALL_FUNC: |
278 | msg = find_next_bit(ops, BITS_PER_LONG, msg+1); | 295 | generic_smp_call_function_interrupt(); |
296 | break; | ||
279 | 297 | ||
280 | switch (msg) { | 298 | case IPI_CPU_STOP: |
281 | case IPI_RESCHEDULE: | 299 | ipi_cpu_stop(); |
282 | scheduler_ipi(); | 300 | break; |
283 | break; | ||
284 | |||
285 | case IPI_CALL_FUNC: | ||
286 | generic_smp_call_function_interrupt(); | ||
287 | break; | ||
288 | |||
289 | case IPI_CPU_STOP: | ||
290 | ipi_cpu_stop(cpu); | ||
291 | break; | ||
292 | } | ||
293 | } while (msg < BITS_PER_LONG); | ||
294 | 301 | ||
302 | default: | ||
303 | pr_warn("IPI with unexpected msg %ld\n", msg); | ||
304 | } | ||
295 | } | 305 | } |
296 | 306 | ||
297 | /* | 307 | /* |
@@ -300,19 +310,25 @@ static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu) | |||
300 | */ | 310 | */ |
301 | irqreturn_t do_IPI(int irq, void *dev_id) | 311 | irqreturn_t do_IPI(int irq, void *dev_id) |
302 | { | 312 | { |
303 | int cpu = smp_processor_id(); | 313 | unsigned long pending; |
304 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | 314 | |
305 | unsigned long ops; | 315 | pr_debug("IPI [%ld] received on cpu %d\n", |
316 | *this_cpu_ptr(&ipi_data), smp_processor_id()); | ||
306 | 317 | ||
307 | if (plat_smp_ops.ipi_clear) | 318 | if (plat_smp_ops.ipi_clear) |
308 | plat_smp_ops.ipi_clear(cpu, irq); | 319 | plat_smp_ops.ipi_clear(irq); |
309 | 320 | ||
310 | /* | 321 | /* |
311 | * XXX: is this loop really needed | 322 | * "dequeue" the msg corresponding to this IPI (and possibly other |
312 | * And do we need to move ipi_clean inside | 323 | * piggybacked msg from elided IPIs: see ipi_send_msg_one() above) |
313 | */ | 324 | */ |
314 | while ((ops = xchg(&ipi->bits, 0)) != 0) | 325 | pending = xchg(this_cpu_ptr(&ipi_data), 0); |
315 | __do_IPI(&ops, ipi, cpu); | 326 | |
327 | do { | ||
328 | unsigned long msg = __ffs(pending); | ||
329 | __do_IPI(msg); | ||
330 | pending &= ~(1U << msg); | ||
331 | } while (pending); | ||
316 | 332 | ||
317 | return IRQ_HANDLED; | 333 | return IRQ_HANDLED; |
318 | } | 334 | } |