aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2011-03-09 09:22:05 -0500
committerH. Peter Anvin <hpa@zytor.com>2011-03-19 14:58:28 -0400
commitd8aa5ec3382e6a545b8f25178d1e0992d4927f19 (patch)
tree874b7fa5ee59a142951948fbc61f8a86d5879290 /arch/x86/xen/mmu.c
parent14988a4d350ce3b41ecad4f63c4f44c56f5ae34d (diff)
xen: update mask_rw_pte after kernel page tables init changes
After "x86-64, mm: Put early page table high" already existing kernel page table pages can be mapped using early_ioremap too so we need to update mask_rw_pte to make sure these pages are still mapped RO. The reason why we have to do that is explain by the commit message of fef5ba797991f9335bcfc295942b684f9bf613a1: "Xen requires that all pages containing pagetable entries to be mapped read-only. If pages used for the initial pagetable are already mapped then we can change the mapping to RO. However, if they are initially unmapped, we need to make sure that when they are later mapped, they are also mapped RO. ..SNIP.. the pagetable setup code early_ioremaps the pages to write their entries, so we must make sure that mappings created in the early_ioremap fixmap area are mapped RW. (Those mappings are removed before the pages are presented to Xen as pagetable pages.)" We accomplish all this in mask_rw_pte by mapping RO all the pages mapped using early_ioremap apart from the last one that has been allocated because it is not a page table page yet (it has not been hooked into the page tables yet). Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> LKML-Reference: <alpine.DEB.2.00.1103171739050.3382@kaball-desktop> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 6e27979506c1..21058ad1e5e3 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1488,10 +1488,12 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1488 /* 1488 /*
1489 * If the new pfn is within the range of the newly allocated 1489 * If the new pfn is within the range of the newly allocated
1490 * kernel pagetable, and it isn't being mapped into an 1490 * kernel pagetable, and it isn't being mapped into an
1491 * early_ioremap fixmap slot, make sure it is RO. 1491 * early_ioremap fixmap slot as a freshly allocated page, make sure
1492 * it is RO.
1492 */ 1493 */
1493 if (!is_early_ioremap_ptep(ptep) && 1494 if (((!is_early_ioremap_ptep(ptep) &&
1494 pfn >= pgt_buf_start && pfn < pgt_buf_end) 1495 pfn >= pgt_buf_start && pfn < pgt_buf_end)) ||
1496 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
1495 pte = pte_wrprotect(pte); 1497 pte = pte_wrprotect(pte);
1496 1498
1497 return pte; 1499 return pte;