diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-10-28 04:23:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-07 04:05:59 -0500 |
commit | d05fdf316067cd311d5e7add08da26ded8a58080 (patch) | |
tree | abf6541cdd9e19f17eab6c73c4ffb09a23f0826e /arch/x86 | |
parent | 9b46333406b9cb3397ab538485a4d57c316af0ff (diff) |
xen: make sure stray alias mappings are gone before pinning
Xen requires that all mappings of pagetable pages are read-only, so
that they can't be updated illegally. As a result, if a page is being
turned into a pagetable page, we need to make sure all its mappings
are RO.
If the page had been used for ioremap or vmalloc, it may still have
left over mappings as a result of not having been lazily unmapped.
This change makes sure we explicitly mop them all up before pinning
the page.
Unlike aliases created by kmap, the there can be vmalloc aliases even
for non-high pages, so we must do the flush unconditionally.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Linux Memory Management List <linux-mm@kvack.org>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/xen/enlighten.c | 5 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 9 |
2 files changed, 9 insertions, 5 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index b61534c7a4c4..5e4686d70f62 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -863,15 +863,16 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l | |||
863 | if (PagePinned(virt_to_page(mm->pgd))) { | 863 | if (PagePinned(virt_to_page(mm->pgd))) { |
864 | SetPagePinned(page); | 864 | SetPagePinned(page); |
865 | 865 | ||
866 | vm_unmap_aliases(); | ||
866 | if (!PageHighMem(page)) { | 867 | if (!PageHighMem(page)) { |
867 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); | 868 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); |
868 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) | 869 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
869 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | 870 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
870 | } else | 871 | } else { |
871 | /* make sure there are no stray mappings of | 872 | /* make sure there are no stray mappings of |
872 | this page */ | 873 | this page */ |
873 | kmap_flush_unused(); | 874 | kmap_flush_unused(); |
874 | vm_unmap_aliases(); | 875 | } |
875 | } | 876 | } |
876 | } | 877 | } |
877 | 878 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index aba77b2b7d18..89f3b6edc65a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -850,13 +850,16 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, | |||
850 | read-only, and can be pinned. */ | 850 | read-only, and can be pinned. */ |
851 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | 851 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
852 | { | 852 | { |
853 | vm_unmap_aliases(); | ||
854 | |||
853 | xen_mc_batch(); | 855 | xen_mc_batch(); |
854 | 856 | ||
855 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { | 857 | if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { |
856 | /* re-enable interrupts for kmap_flush_unused */ | 858 | /* re-enable interrupts for flushing */ |
857 | xen_mc_issue(0); | 859 | xen_mc_issue(0); |
860 | |||
858 | kmap_flush_unused(); | 861 | kmap_flush_unused(); |
859 | vm_unmap_aliases(); | 862 | |
860 | xen_mc_batch(); | 863 | xen_mc_batch(); |
861 | } | 864 | } |
862 | 865 | ||