diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-05-02 13:27:15 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 13:27:15 -0400 |
commit | d4c104771a1c58e3de2a888b73b0ba1b54c0ae76 (patch) | |
tree | 77ca2c6771e946aa926cb21c06702fdc7d230364 /arch/i386/kernel/smp.c | |
parent | 63f70270ccd981ce40a8ff58c03a8c2e97e368be (diff) |
[PATCH] i386: PARAVIRT: add flush_tlb_others paravirt_op
This patch adds a pv_op for flush_tlb_others. Linux running on native
hardware uses cross-CPU IPIs to flush the TLB on any CPU which may
have a particular mm's pagetable entries cached in its TLB. This is
inefficient in a paravirtualized environment, since the hypervisor
knows which real CPUs actually contain cached mappings, which may be a
small subset of a guest's VCPUs.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386/kernel/smp.c')
-rw-r--r-- | arch/i386/kernel/smp.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 9d84f6f001bf..892cd64130bc 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
@@ -256,7 +256,6 @@ static cpumask_t flush_cpumask; | |||
256 | static struct mm_struct * flush_mm; | 256 | static struct mm_struct * flush_mm; |
257 | static unsigned long flush_va; | 257 | static unsigned long flush_va; |
258 | static DEFINE_SPINLOCK(tlbstate_lock); | 258 | static DEFINE_SPINLOCK(tlbstate_lock); |
259 | #define FLUSH_ALL 0xffffffff | ||
260 | 259 | ||
261 | /* | 260 | /* |
262 | * We cannot call mmdrop() because we are in interrupt context, | 261 | * We cannot call mmdrop() because we are in interrupt context, |
@@ -338,7 +337,7 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs) | |||
338 | 337 | ||
339 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | 338 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { |
340 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | 339 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { |
341 | if (flush_va == FLUSH_ALL) | 340 | if (flush_va == TLB_FLUSH_ALL) |
342 | local_flush_tlb(); | 341 | local_flush_tlb(); |
343 | else | 342 | else |
344 | __flush_tlb_one(flush_va); | 343 | __flush_tlb_one(flush_va); |
@@ -353,9 +352,11 @@ out: | |||
353 | put_cpu_no_resched(); | 352 | put_cpu_no_resched(); |
354 | } | 353 | } |
355 | 354 | ||
356 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | 355 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, |
357 | unsigned long va) | 356 | unsigned long va) |
358 | { | 357 | { |
358 | cpumask_t cpumask = *cpumaskp; | ||
359 | |||
359 | /* | 360 | /* |
360 | * A couple of (to be removed) sanity checks: | 361 | * A couple of (to be removed) sanity checks: |
361 | * | 362 | * |
@@ -417,7 +418,7 @@ void flush_tlb_current_task(void) | |||
417 | 418 | ||
418 | local_flush_tlb(); | 419 | local_flush_tlb(); |
419 | if (!cpus_empty(cpu_mask)) | 420 | if (!cpus_empty(cpu_mask)) |
420 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 421 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
421 | preempt_enable(); | 422 | preempt_enable(); |
422 | } | 423 | } |
423 | 424 | ||
@@ -436,7 +437,7 @@ void flush_tlb_mm (struct mm_struct * mm) | |||
436 | leave_mm(smp_processor_id()); | 437 | leave_mm(smp_processor_id()); |
437 | } | 438 | } |
438 | if (!cpus_empty(cpu_mask)) | 439 | if (!cpus_empty(cpu_mask)) |
439 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 440 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
440 | 441 | ||
441 | preempt_enable(); | 442 | preempt_enable(); |
442 | } | 443 | } |