aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-15 23:42:48 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-15 23:42:48 -0400
commit5d8c39f68e1dc78c1a958e28bc685a5bac125b21 (patch)
tree7ddfa61f4cba4923ad6d3036947ac652035e65a2 /arch
parent2af51a3f817a22661fcb52da7c96d078a699f40f (diff)
cpumask: use mm_cpumask() wrapper: ia64
Makes code futureproof against the impending change to mm->cpu_vm_mask. It's also a chance to use the new cpumask_ ops which take a pointer (the older ones are deprecated, but there's no hurry for arch code). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/include/asm/mmu_context.h6
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c4
3 files changed, 6 insertions, 6 deletions
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 040bc87db930..7f2a456603cb 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm)
87 /* re-check, now that we've got the lock: */ 87 /* re-check, now that we've got the lock: */
88 context = mm->context; 88 context = mm->context;
89 if (context == 0) { 89 if (context == 0) {
90 cpus_clear(mm->cpu_vm_mask); 90 cpumask_clear(mm_cpumask(mm));
91 if (ia64_ctx.next >= ia64_ctx.limit) { 91 if (ia64_ctx.next >= ia64_ctx.limit) {
92 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, 92 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
93 ia64_ctx.max_ctx, ia64_ctx.next); 93 ia64_ctx.max_ctx, ia64_ctx.next);
@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm)
166 166
167 do { 167 do {
168 context = get_mmu_context(mm); 168 context = get_mmu_context(mm);
169 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 169 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
170 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 170 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
171 reload_context(context); 171 reload_context(context);
172 /* 172 /*
173 * in the unlikely event of a TLB-flush by another thread, 173 * in the unlikely event of a TLB-flush by another thread,
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index bd9818a36b47..b9f3d7bbb338 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
309 309
310 preempt_disable(); 310 preempt_disable();
311#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
312 if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { 312 if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
313 platform_global_tlb_purge(mm, start, end, nbits); 313 platform_global_tlb_purge(mm, start, end, nbits);
314 preempt_enable(); 314 preempt_enable();
315 return; 315 return;
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 209e1eb467da..3c2f242d90cb 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm)
133 unsigned long itc; 133 unsigned long itc;
134 134
135 itc = ia64_get_itc(); 135 itc = ia64_get_itc();
136 smp_flush_tlb_cpumask(mm->cpu_vm_mask); 136 smp_flush_tlb_cpumask(*mm_cpumask(mm));
137 itc = ia64_get_itc() - itc; 137 itc = ia64_get_itc() - itc;
138 __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; 138 __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
139 __get_cpu_var(ptcstats).shub_ipi_flushes++; 139 __get_cpu_var(ptcstats).shub_ipi_flushes++;
@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
182 nodes_clear(nodes_flushed); 182 nodes_clear(nodes_flushed);
183 i = 0; 183 i = 0;
184 184
185 for_each_cpu_mask(cpu, mm->cpu_vm_mask) { 185 for_each_cpu(cpu, mm_cpumask(mm)) {
186 cnode = cpu_to_node(cpu); 186 cnode = cpu_to_node(cpu);
187 node_set(cnode, nodes_flushed); 187 node_set(cnode, nodes_flushed);
188 lcpu = cpu; 188 lcpu = cpu;