diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-05-02 13:27:15 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 13:27:15 -0400 |
commit | d4c104771a1c58e3de2a888b73b0ba1b54c0ae76 (patch) | |
tree | 77ca2c6771e946aa926cb21c06702fdc7d230364 | |
parent | 63f70270ccd981ce40a8ff58c03a8c2e97e368be (diff) |
[PATCH] i386: PARAVIRT: add flush_tlb_others paravirt_op
This patch adds a pv_op for flush_tlb_others. Linux running on native
hardware uses cross-CPU IPIs to flush the TLB on any CPU which may
have a particular mm's pagetable entries cached in its TLB. This is
inefficient in a paravirtualized environment, since the hypervisor
knows which real CPUs actually contain cached mappings, which may be a
small subset of a guest's VCPUs.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r-- | arch/i386/kernel/paravirt.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 13 | ||||
-rw-r--r-- | include/asm-i386/paravirt.h | 9 | ||||
-rw-r--r-- | include/asm-i386/tlbflush.h | 19 |
4 files changed, 34 insertions, 8 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index b0ed163e6f70..c7f0cf92925f 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c | |||
@@ -300,6 +300,7 @@ struct paravirt_ops paravirt_ops = { | |||
300 | .flush_tlb_user = native_flush_tlb, | 300 | .flush_tlb_user = native_flush_tlb, |
301 | .flush_tlb_kernel = native_flush_tlb_global, | 301 | .flush_tlb_kernel = native_flush_tlb_global, |
302 | .flush_tlb_single = native_flush_tlb_single, | 302 | .flush_tlb_single = native_flush_tlb_single, |
303 | .flush_tlb_others = native_flush_tlb_others, | ||
303 | 304 | ||
304 | .map_pt_hook = paravirt_nop, | 305 | .map_pt_hook = paravirt_nop, |
305 | 306 | ||
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 9d84f6f001bf..892cd64130bc 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
@@ -256,7 +256,6 @@ static cpumask_t flush_cpumask; | |||
256 | static struct mm_struct * flush_mm; | 256 | static struct mm_struct * flush_mm; |
257 | static unsigned long flush_va; | 257 | static unsigned long flush_va; |
258 | static DEFINE_SPINLOCK(tlbstate_lock); | 258 | static DEFINE_SPINLOCK(tlbstate_lock); |
259 | #define FLUSH_ALL 0xffffffff | ||
260 | 259 | ||
261 | /* | 260 | /* |
262 | * We cannot call mmdrop() because we are in interrupt context, | 261 | * We cannot call mmdrop() because we are in interrupt context, |
@@ -338,7 +337,7 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs) | |||
338 | 337 | ||
339 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | 338 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { |
340 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | 339 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { |
341 | if (flush_va == FLUSH_ALL) | 340 | if (flush_va == TLB_FLUSH_ALL) |
342 | local_flush_tlb(); | 341 | local_flush_tlb(); |
343 | else | 342 | else |
344 | __flush_tlb_one(flush_va); | 343 | __flush_tlb_one(flush_va); |
@@ -353,9 +352,11 @@ out: | |||
353 | put_cpu_no_resched(); | 352 | put_cpu_no_resched(); |
354 | } | 353 | } |
355 | 354 | ||
356 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | 355 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, |
357 | unsigned long va) | 356 | unsigned long va) |
358 | { | 357 | { |
358 | cpumask_t cpumask = *cpumaskp; | ||
359 | |||
359 | /* | 360 | /* |
360 | * A couple of (to be removed) sanity checks: | 361 | * A couple of (to be removed) sanity checks: |
361 | * | 362 | * |
@@ -417,7 +418,7 @@ void flush_tlb_current_task(void) | |||
417 | 418 | ||
418 | local_flush_tlb(); | 419 | local_flush_tlb(); |
419 | if (!cpus_empty(cpu_mask)) | 420 | if (!cpus_empty(cpu_mask)) |
420 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 421 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
421 | preempt_enable(); | 422 | preempt_enable(); |
422 | } | 423 | } |
423 | 424 | ||
@@ -436,7 +437,7 @@ void flush_tlb_mm (struct mm_struct * mm) | |||
436 | leave_mm(smp_processor_id()); | 437 | leave_mm(smp_processor_id()); |
437 | } | 438 | } |
438 | if (!cpus_empty(cpu_mask)) | 439 | if (!cpus_empty(cpu_mask)) |
439 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 440 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
440 | 441 | ||
441 | preempt_enable(); | 442 | preempt_enable(); |
442 | } | 443 | } |
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 4b3d50858670..f880b06d6d56 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/cpumask.h> | ||
18 | 19 | ||
19 | struct thread_struct; | 20 | struct thread_struct; |
20 | struct Xgt_desc_struct; | 21 | struct Xgt_desc_struct; |
@@ -165,6 +166,8 @@ struct paravirt_ops | |||
165 | void (*flush_tlb_user)(void); | 166 | void (*flush_tlb_user)(void); |
166 | void (*flush_tlb_kernel)(void); | 167 | void (*flush_tlb_kernel)(void); |
167 | void (*flush_tlb_single)(unsigned long addr); | 168 | void (*flush_tlb_single)(unsigned long addr); |
169 | void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, | ||
170 | unsigned long va); | ||
168 | 171 | ||
169 | void (*map_pt_hook)(int type, pte_t *va, u32 pfn); | 172 | void (*map_pt_hook)(int type, pte_t *va, u32 pfn); |
170 | 173 | ||
@@ -853,6 +856,12 @@ static inline void __flush_tlb_single(unsigned long addr) | |||
853 | PVOP_VCALL1(flush_tlb_single, addr); | 856 | PVOP_VCALL1(flush_tlb_single, addr); |
854 | } | 857 | } |
855 | 858 | ||
859 | static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | ||
860 | unsigned long va) | ||
861 | { | ||
862 | PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); | ||
863 | } | ||
864 | |||
856 | static inline void paravirt_map_pt_hook(int type, pte_t *va, u32 pfn) | 865 | static inline void paravirt_map_pt_hook(int type, pte_t *va, u32 pfn) |
857 | { | 866 | { |
858 | PVOP_VCALL3(map_pt_hook, type, va, pfn); | 867 | PVOP_VCALL3(map_pt_hook, type, va, pfn); |
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h index 4dd82840d53b..db7f77eacfa0 100644 --- a/include/asm-i386/tlbflush.h +++ b/include/asm-i386/tlbflush.h | |||
@@ -79,11 +79,15 @@ | |||
79 | * - flush_tlb_range(vma, start, end) flushes a range of pages | 79 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
80 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | 80 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
81 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables | 81 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables |
82 | * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus | ||
82 | * | 83 | * |
83 | * ..but the i386 has somewhat limited tlb flushing capabilities, | 84 | * ..but the i386 has somewhat limited tlb flushing capabilities, |
84 | * and page-granular flushes are available only on i486 and up. | 85 | * and page-granular flushes are available only on i486 and up. |
85 | */ | 86 | */ |
86 | 87 | ||
88 | #define TLB_FLUSH_ALL 0xffffffff | ||
89 | |||
90 | |||
87 | #ifndef CONFIG_SMP | 91 | #ifndef CONFIG_SMP |
88 | 92 | ||
89 | #define flush_tlb() __flush_tlb() | 93 | #define flush_tlb() __flush_tlb() |
@@ -110,7 +114,12 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, | |||
110 | __flush_tlb(); | 114 | __flush_tlb(); |
111 | } | 115 | } |
112 | 116 | ||
113 | #else | 117 | static inline void native_flush_tlb_others(const cpumask_t *cpumask, |
118 | struct mm_struct *mm, unsigned long va) | ||
119 | { | ||
120 | } | ||
121 | |||
122 | #else /* SMP */ | ||
114 | 123 | ||
115 | #include <asm/smp.h> | 124 | #include <asm/smp.h> |
116 | 125 | ||
@@ -129,6 +138,9 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st | |||
129 | flush_tlb_mm(vma->vm_mm); | 138 | flush_tlb_mm(vma->vm_mm); |
130 | } | 139 | } |
131 | 140 | ||
141 | void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, | ||
142 | unsigned long va); | ||
143 | |||
132 | #define TLBSTATE_OK 1 | 144 | #define TLBSTATE_OK 1 |
133 | #define TLBSTATE_LAZY 2 | 145 | #define TLBSTATE_LAZY 2 |
134 | 146 | ||
@@ -139,8 +151,11 @@ struct tlb_state | |||
139 | char __cacheline_padding[L1_CACHE_BYTES-8]; | 151 | char __cacheline_padding[L1_CACHE_BYTES-8]; |
140 | }; | 152 | }; |
141 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); | 153 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); |
154 | #endif /* SMP */ | ||
142 | 155 | ||
143 | 156 | #ifndef CONFIG_PARAVIRT | |
157 | #define flush_tlb_others(mask, mm, va) \ | ||
158 | native_flush_tlb_others(&mask, mm, va) | ||
144 | #endif | 159 | #endif |
145 | 160 | ||
146 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() | 161 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() |