aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-09-24 11:34:49 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-09-23 20:04:49 -0400
commit56f8ba83a52b9f9e3711eff8e54168ac14aa288f (patch)
treee030f7f3a191384268d86863ca43237a137e8f51
parenta6a01063de6298c60f2506dc7659403e02b4b224 (diff)
cpumask: use mm_cpumask() wrapper: arm
Makes code futureproof against the impending change to mm->cpu_vm_mask. It's also a chance to use the new cpumask_ ops which take a pointer (the older ones are deprecated, but there's no hurry for arch code). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h7
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/mm/context.c2
-rw-r--r--arch/arm/mm/flush.c10
6 files changed, 21 insertions, 20 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 1a711ea8418..fd03fb63a33 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
334#ifndef CONFIG_CPU_CACHE_VIPT 334#ifndef CONFIG_CPU_CACHE_VIPT
335static inline void flush_cache_mm(struct mm_struct *mm) 335static inline void flush_cache_mm(struct mm_struct *mm)
336{ 336{
337 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 337 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
338 __cpuc_flush_user_all(); 338 __cpuc_flush_user_all();
339} 339}
340 340
341static inline void 341static inline void
342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
343{ 343{
344 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 344 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
346 vma->vm_flags); 346 vma->vm_flags);
347} 347}
@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
349static inline void 349static inline void
350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
351{ 351{
352 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 352 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
353 unsigned long addr = user_addr & PAGE_MASK; 353 unsigned long addr = user_addr & PAGE_MASK;
354 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 354 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
355 } 355 }
@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr, 360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write) 361 unsigned long len, int write)
362{ 362{
363 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 363 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
364 unsigned long addr = (unsigned long)kaddr; 364 unsigned long addr = (unsigned long)kaddr;
365 __cpuc_coherent_kern_range(addr, addr + len); 365 __cpuc_coherent_kern_range(addr, addr + len);
366 } 366 }
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index bcdb9291ef0..de6cefb329d 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
103 103
104#ifdef CONFIG_SMP 104#ifdef CONFIG_SMP
105 /* check for possible thread migration */ 105 /* check for possible thread migration */
106 if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask)) 106 if (!cpumask_empty(mm_cpumask(next)) &&
107 !cpumask_test_cpu(cpu, mm_cpumask(next)))
107 __flush_icache_all(); 108 __flush_icache_all();
108#endif 109#endif
109 if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) { 110 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
110 check_context(next); 111 check_context(next);
111 cpu_switch_mm(next->pgd, next); 112 cpu_switch_mm(next->pgd, next);
112 if (cache_is_vivt()) 113 if (cache_is_vivt())
113 cpu_clear(cpu, prev->cpu_vm_mask); 114 cpumask_clear_cpu(cpu, mm_cpumask(prev));
114 } 115 }
115#endif 116#endif
116} 117}
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c964f3fc3bc..a45ab5dd825 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
350 if (tlb_flag(TLB_WB)) 350 if (tlb_flag(TLB_WB))
351 dsb(); 351 dsb();
352 352
353 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { 353 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
354 if (tlb_flag(TLB_V3_FULL)) 354 if (tlb_flag(TLB_V3_FULL))
355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 355 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
356 if (tlb_flag(TLB_V4_U_FULL)) 356 if (tlb_flag(TLB_V4_U_FULL))
@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
388 if (tlb_flag(TLB_WB)) 388 if (tlb_flag(TLB_WB))
389 dsb(); 389 dsb();
390 390
391 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 391 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
392 if (tlb_flag(TLB_V3_PAGE)) 392 if (tlb_flag(TLB_V3_PAGE))
393 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); 393 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
394 if (tlb_flag(TLB_V4_U_PAGE)) 394 if (tlb_flag(TLB_V4_U_PAGE))
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index de885fd256c..e0d32770bb3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
189 read_lock(&tasklist_lock); 189 read_lock(&tasklist_lock);
190 for_each_process(p) { 190 for_each_process(p) {
191 if (p->mm) 191 if (p->mm)
192 cpu_clear(cpu, p->mm->cpu_vm_mask); 192 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
193 } 193 }
194 read_unlock(&tasklist_lock); 194 read_unlock(&tasklist_lock);
195 195
@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
257 atomic_inc(&mm->mm_users); 257 atomic_inc(&mm->mm_users);
258 atomic_inc(&mm->mm_count); 258 atomic_inc(&mm->mm_count);
259 current->active_mm = mm; 259 current->active_mm = mm;
260 cpu_set(cpu, mm->cpu_vm_mask); 260 cpumask_set_cpu(cpu, mm_cpumask(mm));
261 cpu_switch_mm(mm->pgd, mm); 261 cpu_switch_mm(mm->pgd, mm);
262 enter_lazy_tlb(mm, current); 262 enter_lazy_tlb(mm, current);
263 local_flush_tlb_all(); 263 local_flush_tlb_all();
@@ -643,7 +643,7 @@ void flush_tlb_all(void)
643void flush_tlb_mm(struct mm_struct *mm) 643void flush_tlb_mm(struct mm_struct *mm)
644{ 644{
645 if (tlb_ops_need_broadcast()) 645 if (tlb_ops_need_broadcast())
646 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); 646 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
647 else 647 else
648 local_flush_tlb_mm(mm); 648 local_flush_tlb_mm(mm);
649} 649}
@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
654 struct tlb_args ta; 654 struct tlb_args ta;
655 ta.ta_vma = vma; 655 ta.ta_vma = vma;
656 ta.ta_start = uaddr; 656 ta.ta_start = uaddr;
657 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); 657 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
658 } else 658 } else
659 local_flush_tlb_page(vma, uaddr); 659 local_flush_tlb_page(vma, uaddr);
660} 660}
@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
677 ta.ta_vma = vma; 677 ta.ta_vma = vma;
678 ta.ta_start = start; 678 ta.ta_start = start;
679 ta.ta_end = end; 679 ta.ta_end = end;
680 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); 680 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
681 } else 681 } else
682 local_flush_tlb_range(vma, start, end); 682 local_flush_tlb_range(vma, start, end);
683} 683}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index fc84fcc7438..6bda76a4319 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
59 } 59 }
60 spin_unlock(&cpu_asid_lock); 60 spin_unlock(&cpu_asid_lock);
61 61
62 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); 62 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
63 mm->context.id = asid; 63 mm->context.id = asid;
64} 64}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 575f3ad722e..b27942909b2 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
50void flush_cache_mm(struct mm_struct *mm) 50void flush_cache_mm(struct mm_struct *mm)
51{ 51{
52 if (cache_is_vivt()) { 52 if (cache_is_vivt()) {
53 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 53 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
54 __cpuc_flush_user_all(); 54 __cpuc_flush_user_all();
55 return; 55 return;
56 } 56 }
@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
73void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 73void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
74{ 74{
75 if (cache_is_vivt()) { 75 if (cache_is_vivt()) {
76 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) 76 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
78 vma->vm_flags); 78 vma->vm_flags);
79 return; 79 return;
@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
98{ 98{
99 if (cache_is_vivt()) { 99 if (cache_is_vivt()) {
100 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 100 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
101 unsigned long addr = user_addr & PAGE_MASK; 101 unsigned long addr = user_addr & PAGE_MASK;
102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); 102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
103 } 103 }
@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
113 unsigned long len, int write) 113 unsigned long len, int write)
114{ 114{
115 if (cache_is_vivt()) { 115 if (cache_is_vivt()) {
116 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { 116 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
117 unsigned long addr = (unsigned long)kaddr; 117 unsigned long addr = (unsigned long)kaddr;
118 __cpuc_coherent_kern_range(addr, addr + len); 118 __cpuc_coherent_kern_range(addr, addr + len);
119 } 119 }
@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
126 } 126 }
127 127
128 /* VIPT non-aliasing cache */ 128 /* VIPT non-aliasing cache */
129 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && 129 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
130 vma->vm_flags & VM_EXEC) { 130 vma->vm_flags & VM_EXEC) {
131 unsigned long addr = (unsigned long)kaddr; 131 unsigned long addr = (unsigned long)kaddr;
132 /* only flushing the kernel mapping on non-aliasing VIPT */ 132 /* only flushing the kernel mapping on non-aliasing VIPT */