aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDaniel Hellstrom <daniel@gaisler.com>2011-05-01 20:08:51 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-16 16:07:43 -0400
commitd6d048192b1d22cb8f09da0cc936095ec2cb969c (patch)
tree1fcb2aac7a706074a59c329a2e25cac5cc171255 /arch/sparc
parent2645e7219e88d1e2ab8b2939537bce36e6db9e8c (diff)
sparc32: implement SMP IPIs using the generic functions
The current sparc32 SMP IPI generation is implemented the cross call function. The cross call function uses IRQ15 the NMI, this is has the effect that IPIs will interrupt IRQ critical areas and hang the system. Typically on/after spin_lock_irqsave calls can be aborted. The cross call functionality must still exist to flush cache/TLBS. This patch provides CPU models a custom way to implement generation of IPIs on the generic code's request. The typical approach is to generate an IRQ for each IPI case. After this patch each sparc32 SMP CPU model needs to implement IPIs in order to function properly. Signed-off-by: Daniel Hellstrom <daniel@gaisler.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/cpudata_32.h5
-rw-r--r--arch/sparc/include/asm/smp_32.h22
-rw-r--r--arch/sparc/kernel/irq_32.c10
-rw-r--r--arch/sparc/kernel/smp_32.c47
5 files changed, 71 insertions, 16 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index a56630d4f14b..63a027c9ada5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -28,7 +28,7 @@ config SPARC
28 select HAVE_GENERIC_HARDIRQS 28 select HAVE_GENERIC_HARDIRQS
29 select GENERIC_HARDIRQS_NO_DEPRECATED 29 select GENERIC_HARDIRQS_NO_DEPRECATED
30 select GENERIC_IRQ_SHOW 30 select GENERIC_IRQ_SHOW
31 31 select USE_GENERIC_SMP_HELPERS if SMP
32 32
33config SPARC32 33config SPARC32
34 def_bool !64BIT 34 def_bool !64BIT
@@ -47,7 +47,6 @@ config SPARC64
47 select HAVE_DYNAMIC_FTRACE 47 select HAVE_DYNAMIC_FTRACE
48 select HAVE_FTRACE_MCOUNT_RECORD 48 select HAVE_FTRACE_MCOUNT_RECORD
49 select HAVE_SYSCALL_TRACEPOINTS 49 select HAVE_SYSCALL_TRACEPOINTS
50 select USE_GENERIC_SMP_HELPERS if SMP
51 select RTC_DRV_CMOS 50 select RTC_DRV_CMOS
52 select RTC_DRV_BQ4802 51 select RTC_DRV_BQ4802
53 select RTC_DRV_SUN4V 52 select RTC_DRV_SUN4V
diff --git a/arch/sparc/include/asm/cpudata_32.h b/arch/sparc/include/asm/cpudata_32.h
index 31d48a0e32c7..a4c5a938b936 100644
--- a/arch/sparc/include/asm/cpudata_32.h
+++ b/arch/sparc/include/asm/cpudata_32.h
@@ -16,6 +16,10 @@ typedef struct {
16 unsigned long clock_tick; 16 unsigned long clock_tick;
17 unsigned int multiplier; 17 unsigned int multiplier;
18 unsigned int counter; 18 unsigned int counter;
19#ifdef CONFIG_SMP
20 unsigned int irq_resched_count;
21 unsigned int irq_call_count;
22#endif
19 int prom_node; 23 int prom_node;
20 int mid; 24 int mid;
21 int next; 25 int next;
@@ -23,5 +27,6 @@ typedef struct {
23 27
24DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 28DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
25#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 29#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
30#define local_cpu_data() __get_cpu_var(__cpu_data)
26 31
27#endif /* _SPARC_CPUDATA_H */ 32#endif /* _SPARC_CPUDATA_H */
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index d7837dcb2525..7a8e6cbd640c 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -50,12 +50,19 @@ void smp_callin(void);
50void smp_boot_cpus(void); 50void smp_boot_cpus(void);
51void smp_store_cpu_info(int); 51void smp_store_cpu_info(int);
52 52
53void smp_resched_interrupt(void);
54void smp_call_function_single_interrupt(void);
55void smp_call_function_interrupt(void);
56
53struct seq_file; 57struct seq_file;
54void smp_bogo(struct seq_file *); 58void smp_bogo(struct seq_file *);
55void smp_info(struct seq_file *); 59void smp_info(struct seq_file *);
56 60
57BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long) 61BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
58BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void) 62BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
63BTFIXUPDEF_CALL(void, smp_ipi_resched, int);
64BTFIXUPDEF_CALL(void, smp_ipi_single, int);
65BTFIXUPDEF_CALL(void, smp_ipi_mask_one, int);
59BTFIXUPDEF_BLACKBOX(hard_smp_processor_id) 66BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
60BTFIXUPDEF_BLACKBOX(load_current) 67BTFIXUPDEF_BLACKBOX(load_current)
61 68
@@ -73,19 +80,8 @@ static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
73 unsigned long arg3, unsigned long arg4) 80 unsigned long arg3, unsigned long arg4)
74{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); } 81{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); }
75 82
76static inline int smp_call_function(void (*func)(void *info), void *info, int wait) 83extern void arch_send_call_function_single_ipi(int cpu);
77{ 84extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
78 xc1((smpfunc_t)func, (unsigned long)info);
79 return 0;
80}
81
82static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
83 void *info, int wait)
84{
85 smp_cross_call((smpfunc_t)func, cpumask_of_cpu(cpuid),
86 (unsigned long) info, 0, 0, 0);
87 return 0;
88}
89 85
90static inline int cpu_logical_map(int cpu) 86static inline int cpu_logical_map(int cpu)
91{ 87{
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 197e1ba85484..9b89d842913c 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -206,6 +206,16 @@ int arch_show_interrupts(struct seq_file *p, int prec)
206{ 206{
207 int j; 207 int j;
208 208
209#ifdef CONFIG_SMP
210 seq_printf(p, "RES: ");
211 for_each_online_cpu(j)
212 seq_printf(p, "%10u ", cpu_data(j).irq_resched_count);
213 seq_printf(p, " IPI rescheduling interrupts\n");
214 seq_printf(p, "CAL: ");
215 for_each_online_cpu(j)
216 seq_printf(p, "%10u ", cpu_data(j).irq_call_count);
217 seq_printf(p, " IPI function call interrupts\n");
218#endif
209 seq_printf(p, "NMI: "); 219 seq_printf(p, "NMI: ");
210 for_each_online_cpu(j) 220 for_each_online_cpu(j)
211 seq_printf(p, "%10u ", cpu_data(j).counter); 221 seq_printf(p, "%10u ", cpu_data(j).counter);
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 4a1d5b7f20d3..2710602281de 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -123,13 +123,58 @@ struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 };
123 123
124void smp_send_reschedule(int cpu) 124void smp_send_reschedule(int cpu)
125{ 125{
126 /* See sparc64 */ 126 /*
127 * CPU model dependent way of implementing IPI generation targeting
128 * a single CPU. The trap handler needs only to do trap entry/return
129 * to call schedule.
130 */
131 BTFIXUP_CALL(smp_ipi_resched)(cpu);
127} 132}
128 133
129void smp_send_stop(void) 134void smp_send_stop(void)
130{ 135{
131} 136}
132 137
138void arch_send_call_function_single_ipi(int cpu)
139{
140 /* trigger one IPI single call on one CPU */
141 BTFIXUP_CALL(smp_ipi_single)(cpu);
142}
143
144void arch_send_call_function_ipi_mask(const struct cpumask *mask)
145{
146 int cpu;
147
148 /* trigger IPI mask call on each CPU */
149 for_each_cpu(cpu, mask)
150 BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
151}
152
153void smp_resched_interrupt(void)
154{
155 local_cpu_data().irq_resched_count++;
156 /*
157 * do nothing, since it all was about calling re-schedule
158 * routine called by interrupt return code.
159 */
160}
161
162void smp_call_function_single_interrupt(void)
163{
164 irq_enter();
165 generic_smp_call_function_single_interrupt();
166 local_cpu_data().irq_call_count++;
167 irq_exit();
168}
169
170void smp_call_function_interrupt(void)
171{
172 irq_enter();
173 generic_smp_call_function_interrupt();
174 local_cpu_data().irq_call_count++;
175 irq_exit();
176}
177
133void smp_flush_cache_all(void) 178void smp_flush_cache_all(void)
134{ 179{
135 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); 180 xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));