diff options
-rw-r--r-- | arch/x86/xen/mmu.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index a991b57f91fe..aef7af92b28b 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1473,16 +1473,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1473 | #endif | 1473 | #endif |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | #ifdef CONFIG_X86_32 | ||
1476 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1477 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
1477 | { | 1478 | { |
1478 | unsigned long pfn = pte_pfn(pte); | ||
1479 | |||
1480 | #ifdef CONFIG_X86_32 | ||
1481 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | 1479 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ |
1482 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | 1480 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) |
1483 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | 1481 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
1484 | pte_val_ma(pte)); | 1482 | pte_val_ma(pte)); |
1485 | #endif | 1483 | |
1484 | return pte; | ||
1485 | } | ||
1486 | #else /* CONFIG_X86_64 */ | ||
1487 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | ||
1488 | { | ||
1489 | unsigned long pfn = pte_pfn(pte); | ||
1486 | 1490 | ||
1487 | /* | 1491 | /* |
1488 | * If the new pfn is within the range of the newly allocated | 1492 | * If the new pfn is within the range of the newly allocated |
@@ -1497,6 +1501,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1497 | 1501 | ||
1498 | return pte; | 1502 | return pte; |
1499 | } | 1503 | } |
1504 | #endif /* CONFIG_X86_64 */ | ||
1500 | 1505 | ||
1501 | /* Init-time set_pte while constructing initial pagetables, which | 1506 | /* Init-time set_pte while constructing initial pagetables, which |
1502 | doesn't allow RO pagetable pages to be remapped RW */ | 1507 | doesn't allow RO pagetable pages to be remapped RW */ |