diff options
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 74 |
1 files changed, 32 insertions, 42 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 478a2de543a5..67433714b791 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr, | |||
1113 | 1113 | ||
1114 | /* NOTE: The loop is more greedy than the cleanup_highmap variant. | 1114 | /* NOTE: The loop is more greedy than the cleanup_highmap variant. |
1115 | * We include the PMD passed in on _both_ boundaries. */ | 1115 | * We include the PMD passed in on _both_ boundaries. */ |
1116 | for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); | 1116 | for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); |
1117 | pmd++, vaddr += PMD_SIZE) { | 1117 | pmd++, vaddr += PMD_SIZE) { |
1118 | if (pmd_none(*pmd)) | 1118 | if (pmd_none(*pmd)) |
1119 | continue; | 1119 | continue; |
@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1551 | #endif | 1551 | #endif |
1552 | } | 1552 | } |
1553 | 1553 | ||
1554 | #ifdef CONFIG_X86_32 | ||
1555 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | ||
1556 | { | ||
1557 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | ||
1558 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | ||
1559 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | ||
1560 | pte_val_ma(pte)); | ||
1561 | |||
1562 | return pte; | ||
1563 | } | ||
1564 | #else /* CONFIG_X86_64 */ | ||
1565 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | ||
1566 | { | ||
1567 | unsigned long pfn; | ||
1568 | |||
1569 | if (xen_feature(XENFEAT_writable_page_tables) || | ||
1570 | xen_feature(XENFEAT_auto_translated_physmap) || | ||
1571 | xen_start_info->mfn_list >= __START_KERNEL_map) | ||
1572 | return pte; | ||
1573 | |||
1574 | /* | ||
1575 | * Pages belonging to the initial p2m list mapped outside the default | ||
1576 | * address range must be mapped read-only. This region contains the | ||
1577 | * page tables for mapping the p2m list, too, and page tables MUST be | ||
1578 | * mapped read-only. | ||
1579 | */ | ||
1580 | pfn = pte_pfn(pte); | ||
1581 | if (pfn >= xen_start_info->first_p2m_pfn && | ||
1582 | pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) | ||
1583 | pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW); | ||
1584 | |||
1585 | return pte; | ||
1586 | } | ||
1587 | #endif /* CONFIG_X86_64 */ | ||
1588 | |||
1589 | /* | 1554 | /* |
1590 | * Init-time set_pte while constructing initial pagetables, which | 1555 | * Init-time set_pte while constructing initial pagetables, which |
1591 | * doesn't allow RO page table pages to be remapped RW. | 1556 | * doesn't allow RO page table pages to be remapped RW. |
@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1600 | * so always write the PTE directly and rely on Xen trapping and | 1565 | * so always write the PTE directly and rely on Xen trapping and |
1601 | * emulating any updates as necessary. | 1566 | * emulating any updates as necessary. |
1602 | */ | 1567 | */ |
1603 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | 1568 | __visible pte_t xen_make_pte_init(pteval_t pte) |
1604 | { | 1569 | { |
1605 | if (pte_mfn(pte) != INVALID_P2M_ENTRY) | 1570 | #ifdef CONFIG_X86_64 |
1606 | pte = mask_rw_pte(ptep, pte); | 1571 | unsigned long pfn; |
1607 | else | 1572 | |
1608 | pte = __pte_ma(0); | 1573 | /* |
1574 | * Pages belonging to the initial p2m list mapped outside the default | ||
1575 | * address range must be mapped read-only. This region contains the | ||
1576 | * page tables for mapping the p2m list, too, and page tables MUST be | ||
1577 | * mapped read-only. | ||
1578 | */ | ||
1579 | pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
1580 | if (xen_start_info->mfn_list < __START_KERNEL_map && | ||
1581 | pfn >= xen_start_info->first_p2m_pfn && | ||
1582 | pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) | ||
1583 | pte &= ~_PAGE_RW; | ||
1584 | #endif | ||
1585 | pte = pte_pfn_to_mfn(pte); | ||
1586 | return native_make_pte(pte); | ||
1587 | } | ||
1588 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); | ||
1609 | 1589 | ||
1590 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | ||
1591 | { | ||
1592 | #ifdef CONFIG_X86_32 | ||
1593 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | ||
1594 | if (pte_mfn(pte) != INVALID_P2M_ENTRY | ||
1595 | && pte_val_ma(*ptep) & _PAGE_PRESENT) | ||
1596 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | ||
1597 | pte_val_ma(pte)); | ||
1598 | #endif | ||
1610 | native_set_pte(ptep, pte); | 1599 | native_set_pte(ptep, pte); |
1611 | } | 1600 | } |
1612 | 1601 | ||
@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void) | |||
2407 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | 2396 | pv_mmu_ops.alloc_pud = xen_alloc_pud; |
2408 | pv_mmu_ops.release_pud = xen_release_pud; | 2397 | pv_mmu_ops.release_pud = xen_release_pud; |
2409 | #endif | 2398 | #endif |
2399 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte); | ||
2410 | 2400 | ||
2411 | #ifdef CONFIG_X86_64 | 2401 | #ifdef CONFIG_X86_64 |
2412 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | 2402 | pv_mmu_ops.write_cr3 = &xen_write_cr3; |
@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { | |||
2455 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), | 2445 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), |
2456 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), | 2446 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), |
2457 | 2447 | ||
2458 | .make_pte = PV_CALLEE_SAVE(xen_make_pte), | 2448 | .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), |
2459 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), | 2449 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), |
2460 | 2450 | ||
2461 | #ifdef CONFIG_X86_PAE | 2451 | #ifdef CONFIG_X86_PAE |