aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2011-04-19 09:47:31 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-04-20 09:43:13 -0400
commitee176455e28469e2420032aab3db11ac2ae3eaa8 (patch)
treee8cfbf2cfd2a0617bb3a24d8851b2cde1eee2709 /arch
parent24bdb0b62cc82120924762ae6bc85afc8c3f2b26 (diff)
xen: mask_rw_pte: do not apply the early_ioremap checks on x86_32
The two "is_early_ioremap_ptep" checks in mask_rw_pte are only used on x86_64, in fact early_ioremap is not used at all to setup the initial pagetable on x86_32. Moreover on x86_32 the two checks are wrong because the range pgt_buf_start..pgt_buf_end initially should be mapped RW because the pages in the range are not pagetable pages yet and haven't been cleared yet. Afterwards considering the pgt_buf_start..pgt_buf_end is part of the initial mapping, xen_alloc_pte is capable of turning the ptes RO when they become pagetable pages. Fix the issue and improve the readability of the code providing two different implementation of mask_rw_pte for x86_32 and x86_64. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/xen/mmu.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index a991b57f91fe..aef7af92b28b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1473,16 +1473,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1473#endif 1473#endif
1474} 1474}
1475 1475
1476#ifdef CONFIG_X86_32
1476static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1477static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1477{ 1478{
1478 unsigned long pfn = pte_pfn(pte);
1479
1480#ifdef CONFIG_X86_32
1481 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1479 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1482 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 1480 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1483 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1481 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1484 pte_val_ma(pte)); 1482 pte_val_ma(pte));
1485#endif 1483
1484 return pte;
1485}
1486#else /* CONFIG_X86_64 */
1487static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1488{
1489 unsigned long pfn = pte_pfn(pte);
1486 1490
1487 /* 1491 /*
1488 * If the new pfn is within the range of the newly allocated 1492 * If the new pfn is within the range of the newly allocated
@@ -1497,6 +1501,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1497 1501
1498 return pte; 1502 return pte;
1499} 1503}
1504#endif /* CONFIG_X86_64 */
1500 1505
1501/* Init-time set_pte while constructing initial pagetables, which 1506/* Init-time set_pte while constructing initial pagetables, which
1502 doesn't allow RO pagetable pages to be remapped RW */ 1507 doesn't allow RO pagetable pages to be remapped RW */