diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-01-11 00:58:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-11 13:13:06 -0500 |
commit | 4595f9620cda8a1e973588e743cf5f8436dd20c6 (patch) | |
tree | 98a47cff17f58262979c7d04590cb3ffc0deead9 /arch/x86/kernel/tlb_uv.c | |
parent | 802bf931f2688ad125b73db597ce63cc842fb27a (diff) |
x86: change flush_tlb_others to take a const struct cpumask
Impact: reduce stack usage, use new cpumask API.
This is made a little more tricky by uv_flush_tlb_others which
actually alters its argument, for an IPI to be sent to the remaining
cpus in the mask.
I solve this by allocating a cpumask_var_t for this case and falling back
to IPI should this fail.
To eliminate temporaries in the caller, all flush_tlb_others implementations
now do the this-cpu-elimination step themselves.
Note also the curious "cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask)"
which has been there since pre-git and yet f->flush_cpumask is always zero
at this point.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Diffstat (limited to 'arch/x86/kernel/tlb_uv.c')
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f885023167e0..690dcf1a27d4 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -212,11 +212,11 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
212 | * The cpumaskp mask contains the cpus the broadcast was sent to. | 212 | * The cpumaskp mask contains the cpus the broadcast was sent to. |
213 | * | 213 | * |
214 | * Returns 1 if all remote flushing was done. The mask is zeroed. | 214 | * Returns 1 if all remote flushing was done. The mask is zeroed. |
215 | * Returns 0 if some remote flushing remains to be done. The mask is left | 215 | * Returns 0 if some remote flushing remains to be done. The mask will have |
216 | * unchanged. | 216 | * some bits still set. |
217 | */ | 217 | */ |
218 | int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, | 218 | int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, |
219 | cpumask_t *cpumaskp) | 219 | struct cpumask *cpumaskp) |
220 | { | 220 | { |
221 | int completion_status = 0; | 221 | int completion_status = 0; |
222 | int right_shift; | 222 | int right_shift; |
@@ -263,13 +263,13 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, | |||
263 | * Success, so clear the remote cpu's from the mask so we don't | 263 | * Success, so clear the remote cpu's from the mask so we don't |
264 | * use the IPI method of shootdown on them. | 264 | * use the IPI method of shootdown on them. |
265 | */ | 265 | */ |
266 | for_each_cpu_mask(bit, *cpumaskp) { | 266 | for_each_cpu(bit, cpumaskp) { |
267 | blade = uv_cpu_to_blade_id(bit); | 267 | blade = uv_cpu_to_blade_id(bit); |
268 | if (blade == this_blade) | 268 | if (blade == this_blade) |
269 | continue; | 269 | continue; |
270 | cpu_clear(bit, *cpumaskp); | 270 | cpumask_clear_cpu(bit, cpumaskp); |
271 | } | 271 | } |
272 | if (!cpus_empty(*cpumaskp)) | 272 | if (!cpumask_empty(cpumaskp)) |
273 | return 0; | 273 | return 0; |
274 | return 1; | 274 | return 1; |
275 | } | 275 | } |
@@ -296,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, | |||
296 | * Returns 1 if all remote flushing was done. | 296 | * Returns 1 if all remote flushing was done. |
297 | * Returns 0 if some remote flushing remains to be done. | 297 | * Returns 0 if some remote flushing remains to be done. |
298 | */ | 298 | */ |
299 | int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, | 299 | int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm, |
300 | unsigned long va) | 300 | unsigned long va) |
301 | { | 301 | { |
302 | int i; | 302 | int i; |
@@ -315,7 +315,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, | |||
315 | bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | 315 | bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
316 | 316 | ||
317 | i = 0; | 317 | i = 0; |
318 | for_each_cpu_mask(bit, *cpumaskp) { | 318 | for_each_cpu(bit, cpumaskp) { |
319 | blade = uv_cpu_to_blade_id(bit); | 319 | blade = uv_cpu_to_blade_id(bit); |
320 | BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); | 320 | BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); |
321 | if (blade == this_blade) { | 321 | if (blade == this_blade) { |