aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm/smp.h
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-01-18 04:42:23 -0500
committerVineet Gupta <vgupta@synopsys.com>2013-02-15 12:46:02 -0500
commit41195d236e84458bebd4fdc218610a92231ac791 (patch)
treec0049630c1a21a071c9c942086041029ebdf2866 /arch/arc/include/asm/smp.h
parent0ef88a54aa341f754707414500158addbf35c780 (diff)
ARC: SMP support
ARC common code to enable a SMP system + ISS provided SMP extensions. ARC700 natively lacks SMP support, hence some of the core features are are only enabled if SoCs have the necessary h/w pixie-dust. This includes: -Inter Processor Interrupts (IPI) -Cache coherency -load-locked/store-conditional ... The low level exception handling would be completely broken in SMP because we don't have hardware assisted stack switching. Thus a fair bit of this code is repurposing the MMU_SCRATCH reg for event handler prologues to keep them re-entrant. Many thanks to Rajeshwar Ranga for his initial "major" contributions to SMP Port (back in 2008), and to Noam Camus and Gilad Ben-Yossef for help with resurrecting that in 3.2 kernel (2012). Note that this platform code is again singleton design pattern - so multiple SMP platforms won't build at the moment - this deficiency is addressed in subsequent patches within this series. Signed-off-by: Vineet Gupta <vgupta@synopsys.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Rajeshwar Ranga <rajeshwar.ranga@gmail.com> Cc: Noam Camus <noamc@ezchip.com> Cc: Gilad Ben-Yossef <gilad@benyossef.com>
Diffstat (limited to 'arch/arc/include/asm/smp.h')
-rw-r--r--arch/arc/include/asm/smp.h107
1 files changed, 107 insertions, 0 deletions
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 4341f3ba7d92..f91f1946272f 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -9,6 +9,69 @@
9#ifndef __ASM_ARC_SMP_H 9#ifndef __ASM_ARC_SMP_H
10#define __ASM_ARC_SMP_H 10#define __ASM_ARC_SMP_H
11 11
12#ifdef CONFIG_SMP
13
14#include <linux/types.h>
15#include <linux/init.h>
16#include <linux/threads.h>
17
18#define raw_smp_processor_id() (current_thread_info()->cpu)
19
20/* including cpumask.h leads to cyclic deps hence this Forward declaration */
21struct cpumask;
22
23/*
24 * APIs provided by arch SMP code to generic code
25 */
26extern void arch_send_call_function_single_ipi(int cpu);
27extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
28
29/*
30 * APIs provided by arch SMP code to rest of arch code
31 */
32extern void __init smp_init_cpus(void);
33extern void __init first_lines_of_secondary(void);
34
35/*
36 * API expected BY platform smp code (FROM arch smp code)
37 *
38 * smp_ipi_irq_setup:
39 * Takes @cpu and @irq to which the arch-common ISR is hooked up
40 */
41extern int smp_ipi_irq_setup(int cpu, int irq);
42
43/*
44 * APIs expected FROM platform smp code
45 *
46 * arc_platform_smp_cpuinfo:
47 * returns a string containing info for /proc/cpuinfo
48 *
49 * arc_platform_smp_init_cpu:
50 * Called from start_kernel_secondary to do any CPU local setup
51 * such as starting a timer, setting up IPI etc
52 *
53 * arc_platform_smp_wait_to_boot:
54 * Called from early bootup code for non-Master CPUs to "park" them
55 *
56 * arc_platform_smp_wakeup_cpu:
57 * Called from __cpu_up (Master CPU) to kick start another one
58 *
59 * arc_platform_ipi_send:
60 * Takes @cpumask to which IPI(s) would be sent.
61 * The actual msg-id/buffer is manager in arch-common code
62 *
63 * arc_platform_ipi_clear:
64 * Takes @cpu which got IPI at @irq to do any IPI clearing
65 */
66extern const char *arc_platform_smp_cpuinfo(void);
67extern void arc_platform_smp_init_cpu(void);
68extern void arc_platform_smp_wait_to_boot(int cpu);
69extern void arc_platform_smp_wakeup_cpu(int cpu, unsigned long pc);
70extern void arc_platform_ipi_send(const struct cpumask *callmap);
71extern void arc_platform_ipi_clear(int cpu, int irq);
72
73#endif /* CONFIG_SMP */
74
12/* 75/*
13 * ARC700 doesn't support atomic Read-Modify-Write ops. 76 * ARC700 doesn't support atomic Read-Modify-Write ops.
14 * Originally Interrupts had to be disabled around code to gaurantee atomicity. 77 * Originally Interrupts had to be disabled around code to gaurantee atomicity.
@@ -18,10 +81,52 @@
18 * 81 *
19 * (1) These insn were introduced only in 4.10 release. So for older released 82 * (1) These insn were introduced only in 4.10 release. So for older released
20 * support needed. 83 * support needed.
84 *
85 * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
86 * gaurantted by the platform (not something which core handles).
87 * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
88 * disabling for atomicity.
89 *
90 * However exported spinlock API is not usable due to cyclic hdr deps
91 * (even after system.h disintegration upstream)
92 * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
93 * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
94 *
95 * So the workaround is to use the lowest level arch spinlock API.
96 * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
97 * but same is not true for ARCH backend, hence the need for 2 variants
21 */ 98 */
22#ifndef CONFIG_ARC_HAS_LLSC 99#ifndef CONFIG_ARC_HAS_LLSC
23 100
24#include <linux/irqflags.h> 101#include <linux/irqflags.h>
102#ifdef CONFIG_SMP
103
104#include <asm/spinlock.h>
105
106extern arch_spinlock_t smp_atomic_ops_lock;
107extern arch_spinlock_t smp_bitops_lock;
108
109#define atomic_ops_lock(flags) do { \
110 local_irq_save(flags); \
111 arch_spin_lock(&smp_atomic_ops_lock); \
112} while (0)
113
114#define atomic_ops_unlock(flags) do { \
115 arch_spin_unlock(&smp_atomic_ops_lock); \
116 local_irq_restore(flags); \
117} while (0)
118
119#define bitops_lock(flags) do { \
120 local_irq_save(flags); \
121 arch_spin_lock(&smp_bitops_lock); \
122} while (0)
123
124#define bitops_unlock(flags) do { \
125 arch_spin_unlock(&smp_bitops_lock); \
126 local_irq_restore(flags); \
127} while (0)
128
129#else /* !CONFIG_SMP */
25 130
26#define atomic_ops_lock(flags) local_irq_save(flags) 131#define atomic_ops_lock(flags) local_irq_save(flags)
27#define atomic_ops_unlock(flags) local_irq_restore(flags) 132#define atomic_ops_unlock(flags) local_irq_restore(flags)
@@ -29,6 +134,8 @@
29#define bitops_lock(flags) local_irq_save(flags) 134#define bitops_lock(flags) local_irq_save(flags)
30#define bitops_unlock(flags) local_irq_restore(flags) 135#define bitops_unlock(flags) local_irq_restore(flags)
31 136
137#endif /* !CONFIG_SMP */
138
32#endif /* !CONFIG_ARC_HAS_LLSC */ 139#endif /* !CONFIG_ARC_HAS_LLSC */
33 140
34#endif 141#endif