aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-03-26 18:37:50 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-07-27 11:50:41 -0400
commitd2cb214551de8180542a04ec8c86c0c9412c5124 (patch)
tree8c9cb83c2899cc765cf5789896b19383b6e8ddef /arch/x86/xen/mmu.c
parenta0d40c80256e31b23849f2ba781b74bf0218a1fa (diff)
xen/mmu: inhibit vmap aliases rather than trying to clear them out
Rather than trying to deal with aliases once they appear, just completely inhibit them. Mostly the removal of aliases was managable, but it comes unstuck in xen_create_contiguous_region() because it gets executed at interrupt time (as a result of dma_alloc_coherent()), which causes all sorts of confusion in the vmap code, as it was never intended to be run in interrupt context. This has the unfortunate side effect of removing all the unmap batching the vmap code so carefully added, but that can't be helped. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index eb51402dd99..ef5728dde8f 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,6 +42,7 @@
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/bug.h> 44#include <linux/bug.h>
45#include <linux/vmalloc.h>
45#include <linux/module.h> 46#include <linux/module.h>
46#include <linux/gfp.h> 47#include <linux/gfp.h>
47 48
@@ -1015,8 +1016,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
1015 read-only, and can be pinned. */ 1016 read-only, and can be pinned. */
1016static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 1017static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
1017{ 1018{
1018 vm_unmap_aliases();
1019
1020 xen_mc_batch(); 1019 xen_mc_batch();
1021 1020
1022 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { 1021 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
@@ -1580,7 +1579,6 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
1580 if (PagePinned(virt_to_page(mm->pgd))) { 1579 if (PagePinned(virt_to_page(mm->pgd))) {
1581 SetPagePinned(page); 1580 SetPagePinned(page);
1582 1581
1583 vm_unmap_aliases();
1584 if (!PageHighMem(page)) { 1582 if (!PageHighMem(page)) {
1585 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); 1583 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1586 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1584 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
@@ -2026,6 +2024,8 @@ void __init xen_init_mmu_ops(void)
2026 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; 2024 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2027 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2025 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2028 pv_mmu_ops = xen_mmu_ops; 2026 pv_mmu_ops = xen_mmu_ops;
2027
2028 vmap_lazy_unmap = false;
2029} 2029}
2030 2030
2031/* Protected by xen_reservation_lock. */ 2031/* Protected by xen_reservation_lock. */
@@ -2165,8 +2165,6 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2165 2165
2166 memset((void *) vstart, 0, PAGE_SIZE << order); 2166 memset((void *) vstart, 0, PAGE_SIZE << order);
2167 2167
2168 vm_unmap_aliases();
2169
2170 spin_lock_irqsave(&xen_reservation_lock, flags); 2168 spin_lock_irqsave(&xen_reservation_lock, flags);
2171 2169
2172 /* 1. Zap current PTEs, remembering MFNs. */ 2170 /* 1. Zap current PTEs, remembering MFNs. */
@@ -2204,8 +2202,6 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2204 2202
2205 memset((void *) vstart, 0, PAGE_SIZE << order); 2203 memset((void *) vstart, 0, PAGE_SIZE << order);
2206 2204
2207 vm_unmap_aliases();
2208
2209 spin_lock_irqsave(&xen_reservation_lock, flags); 2205 spin_lock_irqsave(&xen_reservation_lock, flags);
2210 2206
2211 /* 1. Find start MFN of contiguous extent. */ 2207 /* 1. Find start MFN of contiguous extent. */