diff options
| author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-02-18 02:05:19 -0500 |
|---|---|---|
| committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-03-30 02:35:38 -0400 |
| commit | b8bcfe997e46150fedcc3f5b26b846400122fdd9 (patch) | |
| tree | 435f1bdfa3317e13ddcd3b87176602b597639c1b | |
| parent | a8a93f3f03b7a8008d720e8d91798efe599d416c (diff) | |
x86/paravirt: remove lazy mode in interrupts
Impact: simplification, robustness
Make paravirt_lazy_mode() always return PARAVIRT_LAZY_NONE
when in an interrupt. This prevents interrupt code from
accidentally inheriting an outer lazy state, and instead
does everything synchronously. Outer batched operations
are left deferred.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
| -rw-r--r-- | arch/x86/kernel/paravirt.c | 3 | ||||
| -rw-r--r-- | arch/x86/mm/fault.c | 6 | ||||
| -rw-r--r-- | arch/x86/mm/highmem_32.c | 2 | ||||
| -rw-r--r-- | arch/x86/mm/iomap_32.c | 1 | ||||
| -rw-r--r-- | arch/x86/mm/pageattr.c | 14 |
5 files changed, 5 insertions, 21 deletions
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 63dd358d8ee1..8ab250ac498b 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
| @@ -282,6 +282,9 @@ void paravirt_leave_lazy_cpu(void) | |||
| 282 | 282 | ||
| 283 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | 283 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) |
| 284 | { | 284 | { |
| 285 | if (in_interrupt()) | ||
| 286 | return PARAVIRT_LAZY_NONE; | ||
| 287 | |||
| 285 | return __get_cpu_var(paravirt_lazy_mode); | 288 | return __get_cpu_var(paravirt_lazy_mode); |
| 286 | } | 289 | } |
| 287 | 290 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index a03b7279efa0..cfbb4a738011 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
| @@ -225,12 +225,10 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | |||
| 225 | if (!pmd_present(*pmd_k)) | 225 | if (!pmd_present(*pmd_k)) |
| 226 | return NULL; | 226 | return NULL; |
| 227 | 227 | ||
| 228 | if (!pmd_present(*pmd)) { | 228 | if (!pmd_present(*pmd)) |
| 229 | set_pmd(pmd, *pmd_k); | 229 | set_pmd(pmd, *pmd_k); |
| 230 | arch_flush_lazy_mmu_mode(); | 230 | else |
| 231 | } else { | ||
| 232 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | 231 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
| 233 | } | ||
| 234 | 232 | ||
| 235 | return pmd_k; | 233 | return pmd_k; |
| 236 | } | 234 | } |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 00f127c80b0e..e81dfa408157 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
| @@ -87,7 +87,6 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
| 87 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 87 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 88 | BUG_ON(!pte_none(*(kmap_pte-idx))); | 88 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
| 89 | set_pte(kmap_pte-idx, mk_pte(page, prot)); | 89 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
| 90 | arch_flush_lazy_mmu_mode(); | ||
| 91 | 90 | ||
| 92 | return (void *)vaddr; | 91 | return (void *)vaddr; |
| 93 | } | 92 | } |
| @@ -117,7 +116,6 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
| 117 | #endif | 116 | #endif |
| 118 | } | 117 | } |
| 119 | 118 | ||
| 120 | arch_flush_lazy_mmu_mode(); | ||
| 121 | pagefault_enable(); | 119 | pagefault_enable(); |
| 122 | } | 120 | } |
| 123 | 121 | ||
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index 04102d42ff42..b6a61f3d7ef8 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
| @@ -74,7 +74,6 @@ iounmap_atomic(void *kvaddr, enum km_type type) | |||
| 74 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) | 74 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
| 75 | kpte_clear_flush(kmap_pte-idx, vaddr); | 75 | kpte_clear_flush(kmap_pte-idx, vaddr); |
| 76 | 76 | ||
| 77 | arch_flush_lazy_mmu_mode(); | ||
| 78 | pagefault_enable(); | 77 | pagefault_enable(); |
| 79 | } | 78 | } |
| 80 | EXPORT_SYMBOL_GPL(iounmap_atomic); | 79 | EXPORT_SYMBOL_GPL(iounmap_atomic); |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 9c4294986af7..9015e5e412b5 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -824,13 +824,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
| 824 | 824 | ||
| 825 | vm_unmap_aliases(); | 825 | vm_unmap_aliases(); |
| 826 | 826 | ||
| 827 | /* | ||
| 828 | * If we're called with lazy mmu updates enabled, the | ||
| 829 | * in-memory pte state may be stale. Flush pending updates to | ||
| 830 | * bring them up to date. | ||
| 831 | */ | ||
| 832 | arch_flush_lazy_mmu_mode(); | ||
| 833 | |||
| 834 | cpa.vaddr = addr; | 827 | cpa.vaddr = addr; |
| 835 | cpa.numpages = numpages; | 828 | cpa.numpages = numpages; |
| 836 | cpa.mask_set = mask_set; | 829 | cpa.mask_set = mask_set; |
| @@ -873,13 +866,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
| 873 | } else | 866 | } else |
| 874 | cpa_flush_all(cache); | 867 | cpa_flush_all(cache); |
| 875 | 868 | ||
| 876 | /* | ||
| 877 | * If we've been called with lazy mmu updates enabled, then | ||
| 878 | * make sure that everything gets flushed out before we | ||
| 879 | * return. | ||
| 880 | */ | ||
| 881 | arch_flush_lazy_mmu_mode(); | ||
| 882 | |||
| 883 | out: | 869 | out: |
| 884 | return ret; | 870 | return ret; |
| 885 | } | 871 | } |
