diff options
author | Tejun Heo <tj@kernel.org> | 2009-01-21 03:26:06 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-01-21 03:26:06 -0500 |
commit | bdbcdd48883940bbd8d17eb01172d58a261a413a (patch) | |
tree | 59ae56454b3ccd8fbc14c57af97ed4bbade58e51 /arch/x86/kernel/tlb_uv.c | |
parent | d650a5148593b65a3c3f9a344f46b91b7dfe7713 (diff) |
x86: uv cleanup
Impact: cleanup
Make the following uv related cleanups.
* collect visible uv related definitions and interfaces into uv/uv.h
and use it. this cleans up the messy situation where on 64bit, uv
is defined properly, on 32bit generic it's dummy and on the rest
undefined. after this clean up, uv is defined on 64 and dummy on
32.
* update uv_flush_tlb_others() such that it takes cpumask of
to-be-flushed cpus as argument, instead of that minus self, and
returns yet-to-be-flushed cpumask, instead of modifying the passed
in parameter. this interface change will ease dummy implementation
of uv_flush_tlb_others() and makes uv tlb flush related stuff
defined in tlb_uv proper.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/kernel/tlb_uv.c')
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 68 |
1 files changed, 40 insertions, 28 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 690dcf1a27d4..aae15dd72604 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | 12 | ||
13 | #include <asm/mmu_context.h> | 13 | #include <asm/mmu_context.h> |
14 | #include <asm/uv/uv.h> | ||
14 | #include <asm/uv/uv_mmrs.h> | 15 | #include <asm/uv/uv_mmrs.h> |
15 | #include <asm/uv/uv_hub.h> | 16 | #include <asm/uv/uv_hub.h> |
16 | #include <asm/uv/uv_bau.h> | 17 | #include <asm/uv/uv_bau.h> |
@@ -209,14 +210,15 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
209 | * | 210 | * |
210 | * Send a broadcast and wait for a broadcast message to complete. | 211 | * Send a broadcast and wait for a broadcast message to complete. |
211 | * | 212 | * |
212 | * The cpumaskp mask contains the cpus the broadcast was sent to. | 213 | * The flush_mask contains the cpus the broadcast was sent to. |
213 | * | 214 | * |
214 | * Returns 1 if all remote flushing was done. The mask is zeroed. | 215 | * Returns NULL if all remote flushing was done. The mask is zeroed. |
215 | * Returns 0 if some remote flushing remains to be done. The mask will have | 216 | * Returns @flush_mask if some remote flushing remains to be done. The |
216 | * some bits still set. | 217 | * mask will have some bits still set. |
217 | */ | 218 | */ |
218 | int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, | 219 | const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, |
219 | struct cpumask *cpumaskp) | 220 | struct bau_desc *bau_desc, |
221 | struct cpumask *flush_mask) | ||
220 | { | 222 | { |
221 | int completion_status = 0; | 223 | int completion_status = 0; |
222 | int right_shift; | 224 | int right_shift; |
@@ -263,59 +265,69 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, | |||
263 | * Success, so clear the remote cpu's from the mask so we don't | 265 | * Success, so clear the remote cpu's from the mask so we don't |
264 | * use the IPI method of shootdown on them. | 266 | * use the IPI method of shootdown on them. |
265 | */ | 267 | */ |
266 | for_each_cpu(bit, cpumaskp) { | 268 | for_each_cpu(bit, flush_mask) { |
267 | blade = uv_cpu_to_blade_id(bit); | 269 | blade = uv_cpu_to_blade_id(bit); |
268 | if (blade == this_blade) | 270 | if (blade == this_blade) |
269 | continue; | 271 | continue; |
270 | cpumask_clear_cpu(bit, cpumaskp); | 272 | cpumask_clear_cpu(bit, flush_mask); |
271 | } | 273 | } |
272 | if (!cpumask_empty(cpumaskp)) | 274 | if (!cpumask_empty(flush_mask)) |
273 | return 0; | 275 | return flush_mask; |
274 | return 1; | 276 | return NULL; |
275 | } | 277 | } |
276 | 278 | ||
277 | /** | 279 | /** |
278 | * uv_flush_tlb_others - globally purge translation cache of a virtual | 280 | * uv_flush_tlb_others - globally purge translation cache of a virtual |
279 | * address or all TLB's | 281 | * address or all TLB's |
280 | * @cpumaskp: mask of all cpu's in which the address is to be removed | 282 | * @cpumask: mask of all cpu's in which the address is to be removed |
281 | * @mm: mm_struct containing virtual address range | 283 | * @mm: mm_struct containing virtual address range |
282 | * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) | 284 | * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) |
285 | * @cpu: the current cpu | ||
283 | * | 286 | * |
284 | * This is the entry point for initiating any UV global TLB shootdown. | 287 | * This is the entry point for initiating any UV global TLB shootdown. |
285 | * | 288 | * |
286 | * Purges the translation caches of all specified processors of the given | 289 | * Purges the translation caches of all specified processors of the given |
287 | * virtual address, or purges all TLB's on specified processors. | 290 | * virtual address, or purges all TLB's on specified processors. |
288 | * | 291 | * |
289 | * The caller has derived the cpumaskp from the mm_struct and has subtracted | 292 | * The caller has derived the cpumask from the mm_struct. This function |
290 | * the local cpu from the mask. This function is called only if there | 293 | * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) |
291 | * are bits set in the mask. (e.g. flush_tlb_page()) | ||
292 | * | 294 | * |
293 | * The cpumaskp is converted into a nodemask of the nodes containing | 295 | * The cpumask is converted into a nodemask of the nodes containing |
294 | * the cpus. | 296 | * the cpus. |
295 | * | 297 | * |
296 | * Returns 1 if all remote flushing was done. | 298 | * Note that this function should be called with preemption disabled. |
297 | * Returns 0 if some remote flushing remains to be done. | 299 | * |
300 | * Returns NULL if all remote flushing was done. | ||
301 | * Returns pointer to cpumask if some remote flushing remains to be | ||
302 | * done. The returned pointer is valid till preemption is re-enabled. | ||
298 | */ | 303 | */ |
299 | int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm, | 304 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
300 | unsigned long va) | 305 | struct mm_struct *mm, |
306 | unsigned long va, unsigned int cpu) | ||
301 | { | 307 | { |
308 | static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); | ||
309 | struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask); | ||
302 | int i; | 310 | int i; |
303 | int bit; | 311 | int bit; |
304 | int blade; | 312 | int blade; |
305 | int cpu; | 313 | int uv_cpu; |
306 | int this_blade; | 314 | int this_blade; |
307 | int locals = 0; | 315 | int locals = 0; |
308 | struct bau_desc *bau_desc; | 316 | struct bau_desc *bau_desc; |
309 | 317 | ||
310 | cpu = uv_blade_processor_id(); | 318 | WARN_ON(!in_atomic()); |
319 | |||
320 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); | ||
321 | |||
322 | uv_cpu = uv_blade_processor_id(); | ||
311 | this_blade = uv_numa_blade_id(); | 323 | this_blade = uv_numa_blade_id(); |
312 | bau_desc = __get_cpu_var(bau_control).descriptor_base; | 324 | bau_desc = __get_cpu_var(bau_control).descriptor_base; |
313 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu; | 325 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; |
314 | 326 | ||
315 | bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | 327 | bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
316 | 328 | ||
317 | i = 0; | 329 | i = 0; |
318 | for_each_cpu(bit, cpumaskp) { | 330 | for_each_cpu(bit, flush_mask) { |
319 | blade = uv_cpu_to_blade_id(bit); | 331 | blade = uv_cpu_to_blade_id(bit); |
320 | BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); | 332 | BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); |
321 | if (blade == this_blade) { | 333 | if (blade == this_blade) { |
@@ -330,17 +342,17 @@ int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm, | |||
330 | * no off_node flushing; return status for local node | 342 | * no off_node flushing; return status for local node |
331 | */ | 343 | */ |
332 | if (locals) | 344 | if (locals) |
333 | return 0; | 345 | return flush_mask; |
334 | else | 346 | else |
335 | return 1; | 347 | return NULL; |
336 | } | 348 | } |
337 | __get_cpu_var(ptcstats).requestor++; | 349 | __get_cpu_var(ptcstats).requestor++; |
338 | __get_cpu_var(ptcstats).ntargeted += i; | 350 | __get_cpu_var(ptcstats).ntargeted += i; |
339 | 351 | ||
340 | bau_desc->payload.address = va; | 352 | bau_desc->payload.address = va; |
341 | bau_desc->payload.sending_cpu = smp_processor_id(); | 353 | bau_desc->payload.sending_cpu = cpu; |
342 | 354 | ||
343 | return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp); | 355 | return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); |
344 | } | 356 | } |
345 | 357 | ||
346 | /* | 358 | /* |