diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-09-24 11:34:51 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-09-23 20:04:52 -0400 |
commit | 78f1c4d6b027993763a5aba83873b0462d06db8f (patch) | |
tree | d7a97d02e838b32c458c4ef23e9583d6faedc648 /arch/x86/mm | |
parent | fa40699b975131028a61aa8e095b0b17f350da40 (diff) |
cpumask: use mm_cpumask() wrapper: x86
Makes code futureproof against the impending change to mm->cpu_vm_mask (to be a pointer).
It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/tlb.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index c814e144a3f0..36fe08eeb5c3 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -59,7 +59,8 @@ void leave_mm(int cpu) | |||
59 | { | 59 | { |
60 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) | 60 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
61 | BUG(); | 61 | BUG(); |
62 | cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); | 62 | cpumask_clear_cpu(cpu, |
63 | mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); | ||
63 | load_cr3(swapper_pg_dir); | 64 | load_cr3(swapper_pg_dir); |
64 | } | 65 | } |
65 | EXPORT_SYMBOL_GPL(leave_mm); | 66 | EXPORT_SYMBOL_GPL(leave_mm); |
@@ -234,8 +235,8 @@ void flush_tlb_current_task(void) | |||
234 | preempt_disable(); | 235 | preempt_disable(); |
235 | 236 | ||
236 | local_flush_tlb(); | 237 | local_flush_tlb(); |
237 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | 238 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
238 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | 239 | flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL); |
239 | preempt_enable(); | 240 | preempt_enable(); |
240 | } | 241 | } |
241 | 242 | ||
@@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
249 | else | 250 | else |
250 | leave_mm(smp_processor_id()); | 251 | leave_mm(smp_processor_id()); |
251 | } | 252 | } |
252 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | 253 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
253 | flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL); | 254 | flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL); |
254 | 255 | ||
255 | preempt_enable(); | 256 | preempt_enable(); |
256 | } | 257 | } |
@@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
268 | leave_mm(smp_processor_id()); | 269 | leave_mm(smp_processor_id()); |
269 | } | 270 | } |
270 | 271 | ||
271 | if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids) | 272 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
272 | flush_tlb_others(&mm->cpu_vm_mask, mm, va); | 273 | flush_tlb_others(mm_cpumask(mm), mm, va); |
273 | 274 | ||
274 | preempt_enable(); | 275 | preempt_enable(); |
275 | } | 276 | } |