aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/smp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:21:23 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:21:23 -0400
commitdd6d1844af33acb4edd0a40b1770d091a22c94be (patch)
treee6bd3549919773a13b770324a4dddb51b194b452 /arch/mips/kernel/smp.c
parent19f71153b9be219756c6b2757921433a69b7975c (diff)
parentaaf76a3245c02faba51c96b9a340c14d6bb0dcc0 (diff)
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (80 commits) [MIPS] tlbex.c: Cleanup __init usage. [MIPS] WRPPMC serial support move to platform device [MIPS] R1: Fix hazard barriers to make kernels work on R2 also. [MIPS] VPE: reimplement ELF loader. [MIPS] cleanup WRPPMC include files [MIPS] Add BUG_ON assertion for attempt to run kernel on the wrong CPU type. [MIPS] SMP: Use ISO C struct initializer for local structs. [MIPS] SMP: Kill useless casts. [MIPS] Kill num_online_cpus() loops. [MIPS] SMP: Implement smp_call_function_mask(). [MIPS] Make facility to convert CPU types to strings generally available. [MIPS] Convert list of CPU types from #define to enum. [MIPS] Optimize get_unaligned / put_unaligned implementations. [MIPS] checkfiles: Fix "need space after that ','" errors. [MIPS] Fix "no space between function name and open parenthesis" warnings. [MIPS] Allow hardwiring of the CPU type to a single type for optimization. [MIPS] tlbex: Size optimize code by declaring a few functions inline. [MIPS] pg-r4k.c: Dump the generated code [MIPS] Cobalt: Remove cobalt_machine_power_off() [MIPS] Cobalt: Move reset port definition to arch/mips/cobalt/reset.c ...
Diffstat (limited to 'arch/mips/kernel/smp.c')
-rw-r--r--arch/mips/kernel/smp.c123
1 files changed, 59 insertions, 64 deletions
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 73b0dab02668..432f2e376aea 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -38,6 +38,7 @@
38#include <asm/system.h> 38#include <asm/system.h>
39#include <asm/mmu_context.h> 39#include <asm/mmu_context.h>
40#include <asm/smp.h> 40#include <asm/smp.h>
41#include <asm/time.h>
41 42
42#ifdef CONFIG_MIPS_MT_SMTC 43#ifdef CONFIG_MIPS_MT_SMTC
43#include <asm/mipsmtregs.h> 44#include <asm/mipsmtregs.h>
@@ -70,6 +71,7 @@ asmlinkage __cpuinit void start_secondary(void)
70 cpu_probe(); 71 cpu_probe();
71 cpu_report(); 72 cpu_report();
72 per_cpu_trap_init(); 73 per_cpu_trap_init();
74 mips_clockevent_init();
73 prom_init_secondary(); 75 prom_init_secondary();
74 76
75 /* 77 /*
@@ -95,6 +97,8 @@ struct call_data_struct *call_data;
95 97
96/* 98/*
97 * Run a function on all other CPUs. 99 * Run a function on all other CPUs.
100 *
101 * <mask> cpuset_t of all processors to run the function on.
98 * <func> The function to run. This must be fast and non-blocking. 102 * <func> The function to run. This must be fast and non-blocking.
99 * <info> An arbitrary pointer to pass to the function. 103 * <info> An arbitrary pointer to pass to the function.
100 * <retry> If true, keep retrying until ready. 104 * <retry> If true, keep retrying until ready.
@@ -119,18 +123,20 @@ struct call_data_struct *call_data;
119 * Spin waiting for call_lock 123 * Spin waiting for call_lock
120 * Deadlock Deadlock 124 * Deadlock Deadlock
121 */ 125 */
122int smp_call_function (void (*func) (void *info), void *info, int retry, 126int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
123 int wait) 127 void *info, int retry, int wait)
124{ 128{
125 struct call_data_struct data; 129 struct call_data_struct data;
126 int i, cpus = num_online_cpus() - 1;
127 int cpu = smp_processor_id(); 130 int cpu = smp_processor_id();
131 int cpus;
128 132
129 /* 133 /*
130 * Can die spectacularly if this CPU isn't yet marked online 134 * Can die spectacularly if this CPU isn't yet marked online
131 */ 135 */
132 BUG_ON(!cpu_online(cpu)); 136 BUG_ON(!cpu_online(cpu));
133 137
138 cpu_clear(cpu, mask);
139 cpus = cpus_weight(mask);
134 if (!cpus) 140 if (!cpus)
135 return 0; 141 return 0;
136 142
@@ -149,9 +155,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
149 smp_mb(); 155 smp_mb();
150 156
151 /* Send a message to all other CPUs and wait for them to respond */ 157 /* Send a message to all other CPUs and wait for them to respond */
152 for_each_online_cpu(i) 158 core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
153 if (i != cpu)
154 core_send_ipi(i, SMP_CALL_FUNCTION);
155 159
156 /* Wait for response */ 160 /* Wait for response */
157 /* FIXME: lock-up detection, backtrace on lock-up */ 161 /* FIXME: lock-up detection, backtrace on lock-up */
@@ -167,6 +171,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
167 return 0; 171 return 0;
168} 172}
169 173
174int smp_call_function(void (*func) (void *info), void *info, int retry,
175 int wait)
176{
177 return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
178}
170 179
171void smp_call_function_interrupt(void) 180void smp_call_function_interrupt(void)
172{ 181{
@@ -197,8 +206,7 @@ void smp_call_function_interrupt(void)
197int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 206int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
198 int retry, int wait) 207 int retry, int wait)
199{ 208{
200 struct call_data_struct data; 209 int ret, me;
201 int me;
202 210
203 /* 211 /*
204 * Can die spectacularly if this CPU isn't yet marked online 212 * Can die spectacularly if this CPU isn't yet marked online
@@ -217,33 +225,8 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
217 return 0; 225 return 0;
218 } 226 }
219 227
220 /* Can deadlock when called with interrupts disabled */ 228 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
221 WARN_ON(irqs_disabled()); 229 wait);
222
223 data.func = func;
224 data.info = info;
225 atomic_set(&data.started, 0);
226 data.wait = wait;
227 if (wait)
228 atomic_set(&data.finished, 0);
229
230 spin_lock(&smp_call_lock);
231 call_data = &data;
232 smp_mb();
233
234 /* Send a message to the other CPU */
235 core_send_ipi(cpu, SMP_CALL_FUNCTION);
236
237 /* Wait for response */
238 /* FIXME: lock-up detection, backtrace on lock-up */
239 while (atomic_read(&data.started) != 1)
240 barrier();
241
242 if (wait)
243 while (atomic_read(&data.finished) != 1)
244 barrier();
245 call_data = NULL;
246 spin_unlock(&smp_call_lock);
247 230
248 put_cpu(); 231 put_cpu();
249 return 0; 232 return 0;
@@ -390,12 +373,15 @@ void flush_tlb_mm(struct mm_struct *mm)
390 preempt_disable(); 373 preempt_disable();
391 374
392 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 375 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
393 smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); 376 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
394 } else { 377 } else {
395 int i; 378 cpumask_t mask = cpu_online_map;
396 for (i = 0; i < num_online_cpus(); i++) 379 unsigned int cpu;
397 if (smp_processor_id() != i) 380
398 cpu_context(i, mm) = 0; 381 cpu_clear(smp_processor_id(), mask);
382 for_each_online_cpu(cpu)
383 if (cpu_context(cpu, mm))
384 cpu_context(cpu, mm) = 0;
399 } 385 }
400 local_flush_tlb_mm(mm); 386 local_flush_tlb_mm(mm);
401 387
@@ -410,7 +396,7 @@ struct flush_tlb_data {
410 396
411static void flush_tlb_range_ipi(void *info) 397static void flush_tlb_range_ipi(void *info)
412{ 398{
413 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 399 struct flush_tlb_data *fd = info;
414 400
415 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); 401 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
416} 402}
@@ -421,17 +407,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
421 407
422 preempt_disable(); 408 preempt_disable();
423 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { 409 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
424 struct flush_tlb_data fd; 410 struct flush_tlb_data fd = {
411 .vma = vma,
412 .addr1 = start,
413 .addr2 = end,
414 };
425 415
426 fd.vma = vma; 416 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
427 fd.addr1 = start;
428 fd.addr2 = end;
429 smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
430 } else { 417 } else {
431 int i; 418 cpumask_t mask = cpu_online_map;
432 for (i = 0; i < num_online_cpus(); i++) 419 unsigned int cpu;
433 if (smp_processor_id() != i) 420
434 cpu_context(i, mm) = 0; 421 cpu_clear(smp_processor_id(), mask);
422 for_each_online_cpu(cpu)
423 if (cpu_context(cpu, mm))
424 cpu_context(cpu, mm) = 0;
435 } 425 }
436 local_flush_tlb_range(vma, start, end); 426 local_flush_tlb_range(vma, start, end);
437 preempt_enable(); 427 preempt_enable();
@@ -439,23 +429,24 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
439 429
440static void flush_tlb_kernel_range_ipi(void *info) 430static void flush_tlb_kernel_range_ipi(void *info)
441{ 431{
442 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 432 struct flush_tlb_data *fd = info;
443 433
444 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); 434 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
445} 435}
446 436
447void flush_tlb_kernel_range(unsigned long start, unsigned long end) 437void flush_tlb_kernel_range(unsigned long start, unsigned long end)
448{ 438{
449 struct flush_tlb_data fd; 439 struct flush_tlb_data fd = {
440 .addr1 = start,
441 .addr2 = end,
442 };
450 443
451 fd.addr1 = start; 444 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
452 fd.addr2 = end;
453 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
454} 445}
455 446
456static void flush_tlb_page_ipi(void *info) 447static void flush_tlb_page_ipi(void *info)
457{ 448{
458 struct flush_tlb_data *fd = (struct flush_tlb_data *)info; 449 struct flush_tlb_data *fd = info;
459 450
460 local_flush_tlb_page(fd->vma, fd->addr1); 451 local_flush_tlb_page(fd->vma, fd->addr1);
461} 452}
@@ -464,16 +455,20 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
464{ 455{
465 preempt_disable(); 456 preempt_disable();
466 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { 457 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
467 struct flush_tlb_data fd; 458 struct flush_tlb_data fd = {
459 .vma = vma,
460 .addr1 = page,
461 };
468 462
469 fd.vma = vma; 463 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
470 fd.addr1 = page;
471 smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
472 } else { 464 } else {
473 int i; 465 cpumask_t mask = cpu_online_map;
474 for (i = 0; i < num_online_cpus(); i++) 466 unsigned int cpu;
475 if (smp_processor_id() != i) 467
476 cpu_context(i, vma->vm_mm) = 0; 468 cpu_clear(smp_processor_id(), mask);
469 for_each_online_cpu(cpu)
470 if (cpu_context(cpu, vma->vm_mm))
471 cpu_context(cpu, vma->vm_mm) = 0;
477 } 472 }
478 local_flush_tlb_page(vma, page); 473 local_flush_tlb_page(vma, page);
479 preempt_enable(); 474 preempt_enable();