aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/smp_32.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-16 00:10:39 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-16 00:10:39 -0400
commit81f1adf01224f5c0be5f90f43664f799c1f7bb2d (patch)
treeb38dbf1bc7e144a44ec38a6b1787fa870eac367f /arch/sparc/kernel/smp_32.c
parente9b375120b593d3081c2f63e8ccf350cccc8fd68 (diff)
cpumask: use mm_cpumask() wrapper: sparc
Makes code futureproof against the impending change to mm->cpu_vm_mask. It's also a chance to use the new cpumask_ ops which take a pointer (the older ones are deprecated, but there's no hurry for arch code). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'arch/sparc/kernel/smp_32.c')
-rw-r--r--arch/sparc/kernel/smp_32.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index be1ae37e773..132d81fb261 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -143,7 +143,7 @@ void smp_flush_tlb_all(void)
143void smp_flush_cache_mm(struct mm_struct *mm) 143void smp_flush_cache_mm(struct mm_struct *mm)
144{ 144{
145 if(mm->context != NO_CONTEXT) { 145 if(mm->context != NO_CONTEXT) {
146 cpumask_t cpu_mask = mm->cpu_vm_mask; 146 cpumask_t cpu_mask = *mm_cpumask(mm);
147 cpu_clear(smp_processor_id(), cpu_mask); 147 cpu_clear(smp_processor_id(), cpu_mask);
148 if (!cpus_empty(cpu_mask)) 148 if (!cpus_empty(cpu_mask))
149 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); 149 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
@@ -154,12 +154,13 @@ void smp_flush_cache_mm(struct mm_struct *mm)
154void smp_flush_tlb_mm(struct mm_struct *mm) 154void smp_flush_tlb_mm(struct mm_struct *mm)
155{ 155{
156 if(mm->context != NO_CONTEXT) { 156 if(mm->context != NO_CONTEXT) {
157 cpumask_t cpu_mask = mm->cpu_vm_mask; 157 cpumask_t cpu_mask = *mm_cpumask(mm);
158 cpu_clear(smp_processor_id(), cpu_mask); 158 cpu_clear(smp_processor_id(), cpu_mask);
159 if (!cpus_empty(cpu_mask)) { 159 if (!cpus_empty(cpu_mask)) {
160 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); 160 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
161 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) 161 if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
162 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); 162 cpumask_copy(mm_cpumask(mm),
163 cpumask_of(smp_processor_id()));
163 } 164 }
164 local_flush_tlb_mm(mm); 165 local_flush_tlb_mm(mm);
165 } 166 }
@@ -171,7 +172,7 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
171 struct mm_struct *mm = vma->vm_mm; 172 struct mm_struct *mm = vma->vm_mm;
172 173
173 if (mm->context != NO_CONTEXT) { 174 if (mm->context != NO_CONTEXT) {
174 cpumask_t cpu_mask = mm->cpu_vm_mask; 175 cpumask_t cpu_mask = *mm_cpumask(mm);
175 cpu_clear(smp_processor_id(), cpu_mask); 176 cpu_clear(smp_processor_id(), cpu_mask);
176 if (!cpus_empty(cpu_mask)) 177 if (!cpus_empty(cpu_mask))
177 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); 178 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
@@ -185,7 +186,7 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
185 struct mm_struct *mm = vma->vm_mm; 186 struct mm_struct *mm = vma->vm_mm;
186 187
187 if (mm->context != NO_CONTEXT) { 188 if (mm->context != NO_CONTEXT) {
188 cpumask_t cpu_mask = mm->cpu_vm_mask; 189 cpumask_t cpu_mask = *mm_cpumask(mm);
189 cpu_clear(smp_processor_id(), cpu_mask); 190 cpu_clear(smp_processor_id(), cpu_mask);
190 if (!cpus_empty(cpu_mask)) 191 if (!cpus_empty(cpu_mask))
191 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); 192 xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
@@ -198,7 +199,7 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
198 struct mm_struct *mm = vma->vm_mm; 199 struct mm_struct *mm = vma->vm_mm;
199 200
200 if(mm->context != NO_CONTEXT) { 201 if(mm->context != NO_CONTEXT) {
201 cpumask_t cpu_mask = mm->cpu_vm_mask; 202 cpumask_t cpu_mask = *mm_cpumask(mm);
202 cpu_clear(smp_processor_id(), cpu_mask); 203 cpu_clear(smp_processor_id(), cpu_mask);
203 if (!cpus_empty(cpu_mask)) 204 if (!cpus_empty(cpu_mask))
204 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); 205 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
@@ -211,7 +212,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
211 struct mm_struct *mm = vma->vm_mm; 212 struct mm_struct *mm = vma->vm_mm;
212 213
213 if(mm->context != NO_CONTEXT) { 214 if(mm->context != NO_CONTEXT) {
214 cpumask_t cpu_mask = mm->cpu_vm_mask; 215 cpumask_t cpu_mask = *mm_cpumask(mm);
215 cpu_clear(smp_processor_id(), cpu_mask); 216 cpu_clear(smp_processor_id(), cpu_mask);
216 if (!cpus_empty(cpu_mask)) 217 if (!cpus_empty(cpu_mask))
217 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); 218 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
@@ -240,7 +241,7 @@ void smp_flush_page_to_ram(unsigned long page)
240 241
241void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 242void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
242{ 243{
243 cpumask_t cpu_mask = mm->cpu_vm_mask; 244 cpumask_t cpu_mask = *mm_cpumask(mm);
244 cpu_clear(smp_processor_id(), cpu_mask); 245 cpu_clear(smp_processor_id(), cpu_mask);
245 if (!cpus_empty(cpu_mask)) 246 if (!cpus_empty(cpu_mask))
246 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); 247 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);