aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/xen/mmu.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index eb51402dd99a..ef5728dde8f3 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,6 +42,7 @@
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/bug.h> 44#include <linux/bug.h>
45#include <linux/vmalloc.h>
45#include <linux/module.h> 46#include <linux/module.h>
46#include <linux/gfp.h> 47#include <linux/gfp.h>
47 48
@@ -1015,8 +1016,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
1015 read-only, and can be pinned. */ 1016 read-only, and can be pinned. */
1016static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 1017static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
1017{ 1018{
1018 vm_unmap_aliases();
1019
1020 xen_mc_batch(); 1019 xen_mc_batch();
1021 1020
1022 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { 1021 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
@@ -1580,7 +1579,6 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
1580 if (PagePinned(virt_to_page(mm->pgd))) { 1579 if (PagePinned(virt_to_page(mm->pgd))) {
1581 SetPagePinned(page); 1580 SetPagePinned(page);
1582 1581
1583 vm_unmap_aliases();
1584 if (!PageHighMem(page)) { 1582 if (!PageHighMem(page)) {
1585 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); 1583 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1586 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1584 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
@@ -2026,6 +2024,8 @@ void __init xen_init_mmu_ops(void)
2026 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; 2024 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2027 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2025 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2028 pv_mmu_ops = xen_mmu_ops; 2026 pv_mmu_ops = xen_mmu_ops;
2027
2028 vmap_lazy_unmap = false;
2029} 2029}
2030 2030
2031/* Protected by xen_reservation_lock. */ 2031/* Protected by xen_reservation_lock. */
@@ -2165,8 +2165,6 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2165 2165
2166 memset((void *) vstart, 0, PAGE_SIZE << order); 2166 memset((void *) vstart, 0, PAGE_SIZE << order);
2167 2167
2168 vm_unmap_aliases();
2169
2170 spin_lock_irqsave(&xen_reservation_lock, flags); 2168 spin_lock_irqsave(&xen_reservation_lock, flags);
2171 2169
2172 /* 1. Zap current PTEs, remembering MFNs. */ 2170 /* 1. Zap current PTEs, remembering MFNs. */
@@ -2204,8 +2202,6 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2204 2202
2205 memset((void *) vstart, 0, PAGE_SIZE << order); 2203 memset((void *) vstart, 0, PAGE_SIZE << order);
2206 2204
2207 vm_unmap_aliases();
2208
2209 spin_lock_irqsave(&xen_reservation_lock, flags); 2205 spin_lock_irqsave(&xen_reservation_lock, flags);
2210 2206
2211 /* 1. Find start MFN of contiguous extent. */ 2207 /* 1. Find start MFN of contiguous extent. */