aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-09-24 11:34:49 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-09-23 20:04:50 -0400
commit49b92050f6ce436cde7f495bbb27437bcb09e238 (patch)
tree6f4ce2326a870f4494fa72d6be0fdf42c4c749d5 /arch
parent56f8ba83a52b9f9e3711eff8e54168ac14aa288f (diff)
cpumask: use mm_cpumask() wrapper: m32r
Makes code futureproof against the impending change to mm->cpu_vm_mask. It's also a chance to use the new cpumask_ ops which take a pointer (the older ones are deprecated, but there's no hurry for arch code). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Hirokazu Takata <takata@linux-m32r.org> (fixes)
Diffstat (limited to 'arch')
-rw-r--r--arch/m32r/include/asm/mmu_context.h4
-rw-r--r--arch/m32r/kernel/smp.c8
2 files changed, 6 insertions, 6 deletions
diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h
index 91909e5dd9d0..a70a3df33635 100644
--- a/arch/m32r/include/asm/mmu_context.h
+++ b/arch/m32r/include/asm/mmu_context.h
@@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev,
127 127
128 if (prev != next) { 128 if (prev != next) {
129#ifdef CONFIG_SMP 129#ifdef CONFIG_SMP
130 cpu_set(cpu, next->cpu_vm_mask); 130 cpumask_set_cpu(cpu, mm_cpumask(next));
131#endif /* CONFIG_SMP */ 131#endif /* CONFIG_SMP */
132 /* Set MPTB = next->pgd */ 132 /* Set MPTB = next->pgd */
133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd; 133 *(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
@@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev,
135 } 135 }
136#ifdef CONFIG_SMP 136#ifdef CONFIG_SMP
137 else 137 else
138 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) 138 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
139 activate_context(next); 139 activate_context(next);
140#endif /* CONFIG_SMP */ 140#endif /* CONFIG_SMP */
141} 141}
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 45c512bcd4ca..1b7598e6f6e8 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
264 preempt_disable(); 264 preempt_disable();
265 cpu_id = smp_processor_id(); 265 cpu_id = smp_processor_id();
266 mmc = &mm->context[cpu_id]; 266 mmc = &mm->context[cpu_id];
267 cpu_mask = mm->cpu_vm_mask; 267 cpu_mask = *mm_cpumask(mm);
268 cpu_clear(cpu_id, cpu_mask); 268 cpu_clear(cpu_id, cpu_mask);
269 269
270 if (*mmc != NO_CONTEXT) { 270 if (*mmc != NO_CONTEXT) {
@@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
273 if (mm == current->mm) 273 if (mm == current->mm)
274 activate_context(mm); 274 activate_context(mm);
275 else 275 else
276 cpu_clear(cpu_id, mm->cpu_vm_mask); 276 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
277 local_irq_restore(flags); 277 local_irq_restore(flags);
278 } 278 }
279 if (!cpus_empty(cpu_mask)) 279 if (!cpus_empty(cpu_mask))
@@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
334 preempt_disable(); 334 preempt_disable();
335 cpu_id = smp_processor_id(); 335 cpu_id = smp_processor_id();
336 mmc = &mm->context[cpu_id]; 336 mmc = &mm->context[cpu_id];
337 cpu_mask = mm->cpu_vm_mask; 337 cpu_mask = *mm_cpumask(mm);
338 cpu_clear(cpu_id, cpu_mask); 338 cpu_clear(cpu_id, cpu_mask);
339 339
340#ifdef DEBUG_SMP 340#ifdef DEBUG_SMP
@@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void)
469 if (flush_mm == current->active_mm) 469 if (flush_mm == current->active_mm)
470 activate_context(flush_mm); 470 activate_context(flush_mm);
471 else 471 else
472 cpu_clear(cpu_id, flush_mm->cpu_vm_mask); 472 cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
473 } else { 473 } else {
474 unsigned long va = flush_va; 474 unsigned long va = flush_va;
475 475