aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
commit1a781a777b2f6ac46523fe92396215762ced624d (patch)
tree4f34bb4aade85c0eb364b53d664ec7f6ab959006 /arch/powerpc/kernel
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
parent42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c (diff)
Merge branch 'generic-ipi' into generic-ipi-for-linus
Conflicts: arch/powerpc/Kconfig arch/s390/kernel/time.c arch/x86/kernel/apic_32.c arch/x86/kernel/cpu/perfctr-watchdog.c arch/x86/kernel/i8259_64.c arch/x86/kernel/ldt.c arch/x86/kernel/nmi_64.c arch/x86/kernel/smpboot.c arch/x86/xen/smp.c include/asm-x86/hw_irq_32.h include/asm-x86/hw_irq_64.h include/asm-x86/mach-default/irq_vectors.h include/asm-x86/mach-voyager/irq_vectors.h include/asm-x86/smp.h kernel/Makefile Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/powerpc/kernel/smp.c234
-rw-r--r--arch/powerpc/kernel/tau_6xx.c4
-rw-r--r--arch/powerpc/kernel/time.c2
5 files changed, 25 insertions, 219 deletions
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 704375bda73a..b732b5f8e356 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -172,7 +172,7 @@ static void kexec_prepare_cpus(void)
172{ 172{
173 int my_cpu, i, notified=-1; 173 int my_cpu, i, notified=-1;
174 174
175 smp_call_function(kexec_smp_down, NULL, 0, /* wait */0); 175 smp_call_function(kexec_smp_down, NULL, /* wait */0);
176 my_cpu = get_cpu(); 176 my_cpu = get_cpu();
177 177
178 /* check the others cpus are now down (via paca hw cpu id == -1) */ 178 /* check the others cpus are now down (via paca hw cpu id == -1) */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 34843c318419..647f3e8677dc 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -747,7 +747,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
747 /* Call function on all CPUs. One of us will make the 747 /* Call function on all CPUs. One of us will make the
748 * rtas call 748 * rtas call
749 */ 749 */
750 if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) 750 if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
751 data.error = -EINVAL; 751 data.error = -EINVAL;
752 752
753 wait_for_completion(&done); 753 wait_for_completion(&done);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 1457aa0a08f1..5191b46a611e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops;
72 72
73static volatile unsigned int cpu_callin_map[NR_CPUS]; 73static volatile unsigned int cpu_callin_map[NR_CPUS];
74 74
75void smp_call_function_interrupt(void);
76
77int smt_enabled_at_boot = 1; 75int smt_enabled_at_boot = 1;
78 76
79static int ipi_fail_ok;
80
81static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 77static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
82 78
83#ifdef CONFIG_PPC64 79#ifdef CONFIG_PPC64
@@ -99,12 +95,15 @@ void smp_message_recv(int msg)
99{ 95{
100 switch(msg) { 96 switch(msg) {
101 case PPC_MSG_CALL_FUNCTION: 97 case PPC_MSG_CALL_FUNCTION:
102 smp_call_function_interrupt(); 98 generic_smp_call_function_interrupt();
103 break; 99 break;
104 case PPC_MSG_RESCHEDULE: 100 case PPC_MSG_RESCHEDULE:
105 /* XXX Do we have to do this? */ 101 /* XXX Do we have to do this? */
106 set_need_resched(); 102 set_need_resched();
107 break; 103 break;
104 case PPC_MSG_CALL_FUNC_SINGLE:
105 generic_smp_call_function_single_interrupt();
106 break;
108 case PPC_MSG_DEBUGGER_BREAK: 107 case PPC_MSG_DEBUGGER_BREAK:
109 if (crash_ipi_function_ptr) { 108 if (crash_ipi_function_ptr) {
110 crash_ipi_function_ptr(get_irq_regs()); 109 crash_ipi_function_ptr(get_irq_regs());
@@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu)
128 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 127 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
129} 128}
130 129
130void arch_send_call_function_single_ipi(int cpu)
131{
132 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
133}
134
135void arch_send_call_function_ipi(cpumask_t mask)
136{
137 unsigned int cpu;
138
139 for_each_cpu_mask(cpu, mask)
140 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
141}
142
131#ifdef CONFIG_DEBUGGER 143#ifdef CONFIG_DEBUGGER
132void smp_send_debugger_break(int cpu) 144void smp_send_debugger_break(int cpu)
133{ 145{
@@ -154,215 +166,9 @@ static void stop_this_cpu(void *dummy)
154 ; 166 ;
155} 167}
156 168
157/*
158 * Structure and data for smp_call_function(). This is designed to minimise
159 * static memory requirements. It also looks cleaner.
160 * Stolen from the i386 version.
161 */
162static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
163
164static struct call_data_struct {
165 void (*func) (void *info);
166 void *info;
167 atomic_t started;
168 atomic_t finished;
169 int wait;
170} *call_data;
171
172/* delay of at least 8 seconds */
173#define SMP_CALL_TIMEOUT 8
174
175/*
176 * These functions send a 'generic call function' IPI to other online
177 * CPUS in the system.
178 *
179 * [SUMMARY] Run a function on other CPUs.
180 * <func> The function to run. This must be fast and non-blocking.
181 * <info> An arbitrary pointer to pass to the function.
182 * <nonatomic> currently unused.
183 * <wait> If true, wait (atomically) until function has completed on other CPUs.
184 * [RETURNS] 0 on success, else a negative status code. Does not return until
185 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
186 * <map> is a cpu map of the cpus to send IPI to.
187 *
188 * You must not call this function with disabled interrupts or from a
189 * hardware interrupt handler or from a bottom half handler.
190 */
191static int __smp_call_function_map(void (*func) (void *info), void *info,
192 int nonatomic, int wait, cpumask_t map)
193{
194 struct call_data_struct data;
195 int ret = -1, num_cpus;
196 int cpu;
197 u64 timeout;
198
199 if (unlikely(smp_ops == NULL))
200 return ret;
201
202 data.func = func;
203 data.info = info;
204 atomic_set(&data.started, 0);
205 data.wait = wait;
206 if (wait)
207 atomic_set(&data.finished, 0);
208
209 /* remove 'self' from the map */
210 if (cpu_isset(smp_processor_id(), map))
211 cpu_clear(smp_processor_id(), map);
212
213 /* sanity check the map, remove any non-online processors. */
214 cpus_and(map, map, cpu_online_map);
215
216 num_cpus = cpus_weight(map);
217 if (!num_cpus)
218 goto done;
219
220 call_data = &data;
221 smp_wmb();
222 /* Send a message to all CPUs in the map */
223 for_each_cpu_mask(cpu, map)
224 smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
225
226 timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
227
228 /* Wait for indication that they have received the message */
229 while (atomic_read(&data.started) != num_cpus) {
230 HMT_low();
231 if (get_tb() >= timeout) {
232 printk("smp_call_function on cpu %d: other cpus not "
233 "responding (%d)\n", smp_processor_id(),
234 atomic_read(&data.started));
235 if (!ipi_fail_ok)
236 debugger(NULL);
237 goto out;
238 }
239 }
240
241 /* optionally wait for the CPUs to complete */
242 if (wait) {
243 while (atomic_read(&data.finished) != num_cpus) {
244 HMT_low();
245 if (get_tb() >= timeout) {
246 printk("smp_call_function on cpu %d: other "
247 "cpus not finishing (%d/%d)\n",
248 smp_processor_id(),
249 atomic_read(&data.finished),
250 atomic_read(&data.started));
251 debugger(NULL);
252 goto out;
253 }
254 }
255 }
256
257 done:
258 ret = 0;
259
260 out:
261 call_data = NULL;
262 HMT_medium();
263 return ret;
264}
265
266static int __smp_call_function(void (*func)(void *info), void *info,
267 int nonatomic, int wait)
268{
269 int ret;
270 spin_lock(&call_lock);
271 ret =__smp_call_function_map(func, info, nonatomic, wait,
272 cpu_online_map);
273 spin_unlock(&call_lock);
274 return ret;
275}
276
277int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
278 int wait)
279{
280 /* Can deadlock when called with interrupts disabled */
281 WARN_ON(irqs_disabled());
282
283 return __smp_call_function(func, info, nonatomic, wait);
284}
285EXPORT_SYMBOL(smp_call_function);
286
287int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
288 int nonatomic, int wait)
289{
290 cpumask_t map = CPU_MASK_NONE;
291 int ret = 0;
292
293 /* Can deadlock when called with interrupts disabled */
294 WARN_ON(irqs_disabled());
295
296 if (!cpu_online(cpu))
297 return -EINVAL;
298
299 cpu_set(cpu, map);
300 if (cpu != get_cpu()) {
301 spin_lock(&call_lock);
302 ret = __smp_call_function_map(func, info, nonatomic, wait, map);
303 spin_unlock(&call_lock);
304 } else {
305 local_irq_disable();
306 func(info);
307 local_irq_enable();
308 }
309 put_cpu();
310 return ret;
311}
312EXPORT_SYMBOL(smp_call_function_single);
313
314void smp_send_stop(void) 169void smp_send_stop(void)
315{ 170{
316 int nolock; 171 smp_call_function(stop_this_cpu, NULL, 0);
317
318 /* It's OK to fail sending the IPI, since the alternative is to
319 * be stuck forever waiting on the other CPU to take the interrupt.
320 *
321 * It's better to at least continue and go through reboot, since this
322 * function is usually called at panic or reboot time in the first
323 * place.
324 */
325 ipi_fail_ok = 1;
326
327 /* Don't deadlock in case we got called through panic */
328 nolock = !spin_trylock(&call_lock);
329 __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
330 if (!nolock)
331 spin_unlock(&call_lock);
332}
333
334void smp_call_function_interrupt(void)
335{
336 void (*func) (void *info);
337 void *info;
338 int wait;
339
340 /* call_data will be NULL if the sender timed out while
341 * waiting on us to receive the call.
342 */
343 if (!call_data)
344 return;
345
346 func = call_data->func;
347 info = call_data->info;
348 wait = call_data->wait;
349
350 if (!wait)
351 smp_mb__before_atomic_inc();
352
353 /*
354 * Notify initiating CPU that I've grabbed the data and am
355 * about to execute the function
356 */
357 atomic_inc(&call_data->started);
358 /*
359 * At this point the info structure may be out of scope unless wait==1
360 */
361 (*func)(info);
362 if (wait) {
363 smp_mb__before_atomic_inc();
364 atomic_inc(&call_data->finished);
365 }
366} 172}
367 173
368extern struct gettimeofday_struct do_gtod; 174extern struct gettimeofday_struct do_gtod;
@@ -596,9 +402,9 @@ int __devinit start_secondary(void *unused)
596 402
597 secondary_cpu_time_init(); 403 secondary_cpu_time_init();
598 404
599 spin_lock(&call_lock); 405 ipi_call_lock();
600 cpu_set(cpu, cpu_online_map); 406 cpu_set(cpu, cpu_online_map);
601 spin_unlock(&call_lock); 407 ipi_call_unlock();
602 408
603 local_irq_enable(); 409 local_irq_enable();
604 410
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index 368a4934f7ee..c3a56d65c5a9 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -192,7 +192,7 @@ static void tau_timeout_smp(unsigned long unused)
192 192
193 /* schedule ourselves to be run again */ 193 /* schedule ourselves to be run again */
194 mod_timer(&tau_timer, jiffies + shrink_timer) ; 194 mod_timer(&tau_timer, jiffies + shrink_timer) ;
195 on_each_cpu(tau_timeout, NULL, 1, 0); 195 on_each_cpu(tau_timeout, NULL, 0);
196} 196}
197 197
198/* 198/*
@@ -234,7 +234,7 @@ int __init TAU_init(void)
234 tau_timer.expires = jiffies + shrink_timer; 234 tau_timer.expires = jiffies + shrink_timer;
235 add_timer(&tau_timer); 235 add_timer(&tau_timer);
236 236
237 on_each_cpu(TAU_init_smp, NULL, 1, 0); 237 on_each_cpu(TAU_init_smp, NULL, 0);
238 238
239 printk("Thermal assist unit "); 239 printk("Thermal assist unit ");
240#ifdef CONFIG_TAU_INT 240#ifdef CONFIG_TAU_INT
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 73401e83739a..f1a38a6c1e2d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -322,7 +322,7 @@ void snapshot_timebases(void)
322{ 322{
323 if (!cpu_has_feature(CPU_FTR_PURR)) 323 if (!cpu_has_feature(CPU_FTR_PURR))
324 return; 324 return;
325 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); 325 on_each_cpu(snapshot_tb_and_purr, NULL, 1);
326} 326}
327 327
328/* 328/*