aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 15:55:59 -0400
commit1a781a777b2f6ac46523fe92396215762ced624d (patch)
tree4f34bb4aade85c0eb364b53d664ec7f6ab959006 /arch/sh
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
parent42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c (diff)
Merge branch 'generic-ipi' into generic-ipi-for-linus
Conflicts: arch/powerpc/Kconfig arch/s390/kernel/time.c arch/x86/kernel/apic_32.c arch/x86/kernel/cpu/perfctr-watchdog.c arch/x86/kernel/i8259_64.c arch/x86/kernel/ldt.c arch/x86/kernel/nmi_64.c arch/x86/kernel/smpboot.c arch/x86/xen/smp.c include/asm-x86/hw_irq_32.h include/asm-x86/hw_irq_64.h include/asm-x86/mach-default/irq_vectors.h include/asm-x86/mach-voyager/irq_vectors.h include/asm-x86/smp.h kernel/Makefile Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/kernel/smp.c62
2 files changed, 16 insertions, 47 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 9a854c8e5274..3e7384f4619c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -688,6 +688,7 @@ config CRASH_DUMP
688config SMP 688config SMP
689 bool "Symmetric multi-processing support" 689 bool "Symmetric multi-processing support"
690 depends on SYS_SUPPORTS_SMP 690 depends on SYS_SUPPORTS_SMP
691 select USE_GENERIC_SMP_HELPERS
691 ---help--- 692 ---help---
692 This enables support for systems with more than one CPU. If you have 693 This enables support for systems with more than one CPU. If you have
693 a system with only one CPU, like most personal computers, say N. If 694 a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 5d039d168f57..60c50841143e 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map);
36cpumask_t cpu_online_map; 36cpumask_t cpu_online_map;
37EXPORT_SYMBOL(cpu_online_map); 37EXPORT_SYMBOL(cpu_online_map);
38 38
39static atomic_t cpus_booted = ATOMIC_INIT(0);
40
41/*
42 * Run specified function on a particular processor.
43 */
44void __smp_call_function(unsigned int cpu);
45
46static inline void __init smp_store_cpu_info(unsigned int cpu) 39static inline void __init smp_store_cpu_info(unsigned int cpu)
47{ 40{
48 struct sh_cpuinfo *c = cpu_data + cpu; 41 struct sh_cpuinfo *c = cpu_data + cpu;
@@ -175,45 +168,20 @@ static void stop_this_cpu(void *unused)
175 168
176void smp_send_stop(void) 169void smp_send_stop(void)
177{ 170{
178 smp_call_function(stop_this_cpu, 0, 1, 0); 171 smp_call_function(stop_this_cpu, 0, 0);
179} 172}
180 173
181struct smp_fn_call_struct smp_fn_call = { 174void arch_send_call_function_ipi(cpumask_t mask)
182 .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock),
183 .finished = ATOMIC_INIT(0),
184};
185
186/*
187 * The caller of this wants the passed function to run on every cpu. If wait
188 * is set, wait until all cpus have finished the function before returning.
189 * The lock is here to protect the call structure.
190 * You must not call this function with disabled interrupts or from a
191 * hardware interrupt handler or from a bottom half handler.
192 */
193int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
194{ 175{
195 unsigned int nr_cpus = atomic_read(&cpus_booted); 176 int cpu;
196 int i;
197
198 /* Can deadlock when called with interrupts disabled */
199 WARN_ON(irqs_disabled());
200
201 spin_lock(&smp_fn_call.lock);
202
203 atomic_set(&smp_fn_call.finished, 0);
204 smp_fn_call.fn = func;
205 smp_fn_call.data = info;
206
207 for (i = 0; i < nr_cpus; i++)
208 if (i != smp_processor_id())
209 plat_send_ipi(i, SMP_MSG_FUNCTION);
210
211 if (wait)
212 while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
213 177
214 spin_unlock(&smp_fn_call.lock); 178 for_each_cpu_mask(cpu, mask)
179 plat_send_ipi(cpu, SMP_MSG_FUNCTION);
180}
215 181
216 return 0; 182void arch_send_call_function_single_ipi(int cpu)
183{
184 plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
217} 185}
218 186
219/* Not really SMP stuff ... */ 187/* Not really SMP stuff ... */
@@ -229,7 +197,7 @@ static void flush_tlb_all_ipi(void *info)
229 197
230void flush_tlb_all(void) 198void flush_tlb_all(void)
231{ 199{
232 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); 200 on_each_cpu(flush_tlb_all_ipi, 0, 1);
233} 201}
234 202
235static void flush_tlb_mm_ipi(void *mm) 203static void flush_tlb_mm_ipi(void *mm)
@@ -255,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm)
255 preempt_disable(); 223 preempt_disable();
256 224
257 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 225 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
258 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); 226 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
259 } else { 227 } else {
260 int i; 228 int i;
261 for (i = 0; i < num_online_cpus(); i++) 229 for (i = 0; i < num_online_cpus(); i++)
@@ -292,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
292 fd.vma = vma; 260 fd.vma = vma;
293 fd.addr1 = start; 261 fd.addr1 = start;
294 fd.addr2 = end; 262 fd.addr2 = end;
295 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); 263 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
296 } else { 264 } else {
297 int i; 265 int i;
298 for (i = 0; i < num_online_cpus(); i++) 266 for (i = 0; i < num_online_cpus(); i++)
@@ -316,7 +284,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
316 284
317 fd.addr1 = start; 285 fd.addr1 = start;
318 fd.addr2 = end; 286 fd.addr2 = end;
319 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); 287 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
320} 288}
321 289
322static void flush_tlb_page_ipi(void *info) 290static void flush_tlb_page_ipi(void *info)
@@ -335,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
335 303
336 fd.vma = vma; 304 fd.vma = vma;
337 fd.addr1 = page; 305 fd.addr1 = page;
338 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); 306 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
339 } else { 307 } else {
340 int i; 308 int i;
341 for (i = 0; i < num_online_cpus(); i++) 309 for (i = 0; i < num_online_cpus(); i++)
@@ -359,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr)
359 fd.addr1 = asid; 327 fd.addr1 = asid;
360 fd.addr2 = vaddr; 328 fd.addr2 = vaddr;
361 329
362 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1); 330 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
363 local_flush_tlb_one(asid, vaddr); 331 local_flush_tlb_one(asid, vaddr);
364} 332}