diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2011-05-16 16:38:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-05-16 16:38:07 -0400 |
commit | fb1fece5da027d3c7e69cf44ca8e58aaf0faf520 (patch) | |
tree | e2c50029304ea0eebef9ca40e8e33888900b7b72 /arch/sparc/kernel/smp_32.c | |
parent | 55dd23eca666876e6028aa35d5e391cfced54871 (diff) |
sparc: convert old cpumask API into new one
Adapt new API. Almost change is trivial, most important change are to
remove following like =operator.
cpumask_t cpu_mask = *mm_cpumask(mm);
cpus_allowed = current->cpus_allowed;
Because cpumask_var_t is =operator unsafe. These usage might prevent
kernel core improvement.
No functional change.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/smp_32.c')
-rw-r--r-- | arch/sparc/kernel/smp_32.c | 51 |
1 files changed, 29 insertions, 22 deletions
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 705a94e1b8a5..139c312a41f7 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -190,9 +190,10 @@ void smp_flush_tlb_all(void) | |||
190 | void smp_flush_cache_mm(struct mm_struct *mm) | 190 | void smp_flush_cache_mm(struct mm_struct *mm) |
191 | { | 191 | { |
192 | if(mm->context != NO_CONTEXT) { | 192 | if(mm->context != NO_CONTEXT) { |
193 | cpumask_t cpu_mask = *mm_cpumask(mm); | 193 | cpumask_t cpu_mask; |
194 | cpu_clear(smp_processor_id(), cpu_mask); | 194 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
195 | if (!cpus_empty(cpu_mask)) | 195 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
196 | if (!cpumask_empty(&cpu_mask)) | ||
196 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); | 197 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); |
197 | local_flush_cache_mm(mm); | 198 | local_flush_cache_mm(mm); |
198 | } | 199 | } |
@@ -201,9 +202,10 @@ void smp_flush_cache_mm(struct mm_struct *mm) | |||
201 | void smp_flush_tlb_mm(struct mm_struct *mm) | 202 | void smp_flush_tlb_mm(struct mm_struct *mm) |
202 | { | 203 | { |
203 | if(mm->context != NO_CONTEXT) { | 204 | if(mm->context != NO_CONTEXT) { |
204 | cpumask_t cpu_mask = *mm_cpumask(mm); | 205 | cpumask_t cpu_mask; |
205 | cpu_clear(smp_processor_id(), cpu_mask); | 206 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
206 | if (!cpus_empty(cpu_mask)) { | 207 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
208 | if (!cpumask_empty(&cpu_mask)) { | ||
207 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); | 209 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); |
208 | if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) | 210 | if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) |
209 | cpumask_copy(mm_cpumask(mm), | 211 | cpumask_copy(mm_cpumask(mm), |
@@ -219,9 +221,10 @@ void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
219 | struct mm_struct *mm = vma->vm_mm; | 221 | struct mm_struct *mm = vma->vm_mm; |
220 | 222 | ||
221 | if (mm->context != NO_CONTEXT) { | 223 | if (mm->context != NO_CONTEXT) { |
222 | cpumask_t cpu_mask = *mm_cpumask(mm); | 224 | cpumask_t cpu_mask; |
223 | cpu_clear(smp_processor_id(), cpu_mask); | 225 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
224 | if (!cpus_empty(cpu_mask)) | 226 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
227 | if (!cpumask_empty(&cpu_mask)) | ||
225 | xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); | 228 | xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); |
226 | local_flush_cache_range(vma, start, end); | 229 | local_flush_cache_range(vma, start, end); |
227 | } | 230 | } |
@@ -233,9 +236,10 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
233 | struct mm_struct *mm = vma->vm_mm; | 236 | struct mm_struct *mm = vma->vm_mm; |
234 | 237 | ||
235 | if (mm->context != NO_CONTEXT) { | 238 | if (mm->context != NO_CONTEXT) { |
236 | cpumask_t cpu_mask = *mm_cpumask(mm); | 239 | cpumask_t cpu_mask; |
237 | cpu_clear(smp_processor_id(), cpu_mask); | 240 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
238 | if (!cpus_empty(cpu_mask)) | 241 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
242 | if (!cpumask_empty(&cpu_mask)) | ||
239 | xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); | 243 | xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); |
240 | local_flush_tlb_range(vma, start, end); | 244 | local_flush_tlb_range(vma, start, end); |
241 | } | 245 | } |
@@ -246,9 +250,10 @@ void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) | |||
246 | struct mm_struct *mm = vma->vm_mm; | 250 | struct mm_struct *mm = vma->vm_mm; |
247 | 251 | ||
248 | if(mm->context != NO_CONTEXT) { | 252 | if(mm->context != NO_CONTEXT) { |
249 | cpumask_t cpu_mask = *mm_cpumask(mm); | 253 | cpumask_t cpu_mask; |
250 | cpu_clear(smp_processor_id(), cpu_mask); | 254 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
251 | if (!cpus_empty(cpu_mask)) | 255 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
256 | if (!cpumask_empty(&cpu_mask)) | ||
252 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); | 257 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); |
253 | local_flush_cache_page(vma, page); | 258 | local_flush_cache_page(vma, page); |
254 | } | 259 | } |
@@ -259,9 +264,10 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
259 | struct mm_struct *mm = vma->vm_mm; | 264 | struct mm_struct *mm = vma->vm_mm; |
260 | 265 | ||
261 | if(mm->context != NO_CONTEXT) { | 266 | if(mm->context != NO_CONTEXT) { |
262 | cpumask_t cpu_mask = *mm_cpumask(mm); | 267 | cpumask_t cpu_mask; |
263 | cpu_clear(smp_processor_id(), cpu_mask); | 268 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
264 | if (!cpus_empty(cpu_mask)) | 269 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
270 | if (!cpumask_empty(&cpu_mask)) | ||
265 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); | 271 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); |
266 | local_flush_tlb_page(vma, page); | 272 | local_flush_tlb_page(vma, page); |
267 | } | 273 | } |
@@ -283,9 +289,10 @@ void smp_flush_page_to_ram(unsigned long page) | |||
283 | 289 | ||
284 | void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | 290 | void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) |
285 | { | 291 | { |
286 | cpumask_t cpu_mask = *mm_cpumask(mm); | 292 | cpumask_t cpu_mask; |
287 | cpu_clear(smp_processor_id(), cpu_mask); | 293 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
288 | if (!cpus_empty(cpu_mask)) | 294 | cpumask_clear_cpu(smp_processor_id(), &cpu_mask); |
295 | if (!cpumask_empty(&cpu_mask)) | ||
289 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); | 296 | xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); |
290 | local_flush_sig_insns(mm, insn_addr); | 297 | local_flush_sig_insns(mm, insn_addr); |
291 | } | 298 | } |
@@ -439,7 +446,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
439 | }; | 446 | }; |
440 | 447 | ||
441 | if (!ret) { | 448 | if (!ret) { |
442 | cpu_set(cpu, smp_commenced_mask); | 449 | cpumask_set_cpu(cpu, &smp_commenced_mask); |
443 | while (!cpu_online(cpu)) | 450 | while (!cpu_online(cpu)) |
444 | mb(); | 451 | mb(); |
445 | } | 452 | } |