aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c137
1 files changed, 131 insertions, 6 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 34892758f098..b2085735a2ba 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -36,7 +36,7 @@
36 * The present bitmask indicates that the CPU is physically present. 36 * The present bitmask indicates that the CPU is physically present.
37 * The online bitmask indicates that the CPU is up and running. 37 * The online bitmask indicates that the CPU is up and running.
38 */ 38 */
39cpumask_t cpu_present_mask; 39cpumask_t cpu_possible_map;
40cpumask_t cpu_online_map; 40cpumask_t cpu_online_map;
41 41
42/* 42/*
@@ -78,7 +78,7 @@ struct smp_call_struct {
78static struct smp_call_struct * volatile smp_call_function_data; 78static struct smp_call_struct * volatile smp_call_function_data;
79static DEFINE_SPINLOCK(smp_call_function_lock); 79static DEFINE_SPINLOCK(smp_call_function_lock);
80 80
81int __init __cpu_up(unsigned int cpu) 81int __cpuinit __cpu_up(unsigned int cpu)
82{ 82{
83 struct task_struct *idle; 83 struct task_struct *idle;
84 pgd_t *pgd; 84 pgd_t *pgd;
@@ -159,7 +159,7 @@ int __init __cpu_up(unsigned int cpu)
159 * This is the secondary CPU boot entry. We're using this CPUs 159 * This is the secondary CPU boot entry. We're using this CPUs
160 * idle thread stack, but a set of temporary page tables. 160 * idle thread stack, but a set of temporary page tables.
161 */ 161 */
162asmlinkage void __init secondary_start_kernel(void) 162asmlinkage void __cpuinit secondary_start_kernel(void)
163{ 163{
164 struct mm_struct *mm = &init_mm; 164 struct mm_struct *mm = &init_mm;
165 unsigned int cpu = smp_processor_id(); 165 unsigned int cpu = smp_processor_id();
@@ -176,6 +176,7 @@ asmlinkage void __init secondary_start_kernel(void)
176 cpu_set(cpu, mm->cpu_vm_mask); 176 cpu_set(cpu, mm->cpu_vm_mask);
177 cpu_switch_mm(mm->pgd, mm); 177 cpu_switch_mm(mm->pgd, mm);
178 enter_lazy_tlb(mm, current); 178 enter_lazy_tlb(mm, current);
179 local_flush_tlb_all();
179 180
180 cpu_init(); 181 cpu_init();
181 182
@@ -209,7 +210,7 @@ asmlinkage void __init secondary_start_kernel(void)
209 * Called by both boot and secondaries to move global data into 210 * Called by both boot and secondaries to move global data into
210 * per-processor storage. 211 * per-processor storage.
211 */ 212 */
212void __init smp_store_cpu_info(unsigned int cpuid) 213void __cpuinit smp_store_cpu_info(unsigned int cpuid)
213{ 214{
214 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 215 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
215 216
@@ -235,7 +236,8 @@ void __init smp_prepare_boot_cpu(void)
235{ 236{
236 unsigned int cpu = smp_processor_id(); 237 unsigned int cpu = smp_processor_id();
237 238
238 cpu_set(cpu, cpu_present_mask); 239 cpu_set(cpu, cpu_possible_map);
240 cpu_set(cpu, cpu_present_map);
239 cpu_set(cpu, cpu_online_map); 241 cpu_set(cpu, cpu_online_map);
240} 242}
241 243
@@ -355,7 +357,7 @@ void show_ipi_list(struct seq_file *p)
355 357
356 seq_puts(p, "IPI:"); 358 seq_puts(p, "IPI:");
357 359
358 for_each_online_cpu(cpu) 360 for_each_present_cpu(cpu)
359 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); 361 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
360 362
361 seq_putc(p, '\n'); 363 seq_putc(p, '\n');
@@ -502,3 +504,126 @@ int __init setup_profiling_timer(unsigned int multiplier)
502{ 504{
503 return -EINVAL; 505 return -EINVAL;
504} 506}
507
508static int
509on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait,
510 cpumask_t mask)
511{
512 int ret = 0;
513
514 preempt_disable();
515
516 ret = smp_call_function_on_cpu(func, info, retry, wait, mask);
517 if (cpu_isset(smp_processor_id(), mask))
518 func(info);
519
520 preempt_enable();
521
522 return ret;
523}
524
525/**********************************************************************/
526
527/*
528 * TLB operations
529 */
530struct tlb_args {
531 struct vm_area_struct *ta_vma;
532 unsigned long ta_start;
533 unsigned long ta_end;
534};
535
536static inline void ipi_flush_tlb_all(void *ignored)
537{
538 local_flush_tlb_all();
539}
540
541static inline void ipi_flush_tlb_mm(void *arg)
542{
543 struct mm_struct *mm = (struct mm_struct *)arg;
544
545 local_flush_tlb_mm(mm);
546}
547
548static inline void ipi_flush_tlb_page(void *arg)
549{
550 struct tlb_args *ta = (struct tlb_args *)arg;
551
552 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
553}
554
555static inline void ipi_flush_tlb_kernel_page(void *arg)
556{
557 struct tlb_args *ta = (struct tlb_args *)arg;
558
559 local_flush_tlb_kernel_page(ta->ta_start);
560}
561
562static inline void ipi_flush_tlb_range(void *arg)
563{
564 struct tlb_args *ta = (struct tlb_args *)arg;
565
566 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
567}
568
569static inline void ipi_flush_tlb_kernel_range(void *arg)
570{
571 struct tlb_args *ta = (struct tlb_args *)arg;
572
573 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
574}
575
576void flush_tlb_all(void)
577{
578 on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1);
579}
580
581void flush_tlb_mm(struct mm_struct *mm)
582{
583 cpumask_t mask = mm->cpu_vm_mask;
584
585 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
586}
587
588void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
589{
590 cpumask_t mask = vma->vm_mm->cpu_vm_mask;
591 struct tlb_args ta;
592
593 ta.ta_vma = vma;
594 ta.ta_start = uaddr;
595
596 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask);
597}
598
599void flush_tlb_kernel_page(unsigned long kaddr)
600{
601 struct tlb_args ta;
602
603 ta.ta_start = kaddr;
604
605 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1);
606}
607
608void flush_tlb_range(struct vm_area_struct *vma,
609 unsigned long start, unsigned long end)
610{
611 cpumask_t mask = vma->vm_mm->cpu_vm_mask;
612 struct tlb_args ta;
613
614 ta.ta_vma = vma;
615 ta.ta_start = start;
616 ta.ta_end = end;
617
618 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask);
619}
620
621void flush_tlb_kernel_range(unsigned long start, unsigned long end)
622{
623 struct tlb_args ta;
624
625 ta.ta_start = start;
626 ta.ta_end = end;
627
628 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1);
629}