diff options
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 38 |
1 files changed, 23 insertions, 15 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 39ee7182fd18..aef7af92b28b 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte) | |||
565 | if (io_page && | 565 | if (io_page && |
566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { | 566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { |
567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; | 567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; |
568 | WARN(addr != other_addr, | 568 | WARN_ONCE(addr != other_addr, |
569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", | 569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", |
570 | (unsigned long)addr, (unsigned long)other_addr); | 570 | (unsigned long)addr, (unsigned long)other_addr); |
571 | } else { | 571 | } else { |
572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; | 572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; |
573 | other_addr = (_pte.pte & PTE_PFN_MASK); | 573 | other_addr = (_pte.pte & PTE_PFN_MASK); |
574 | WARN((addr == other_addr) && (!io_page) && (!iomap_set), | 574 | WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), |
575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", | 575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", |
576 | (unsigned long)addr); | 576 | (unsigned long)addr); |
577 | } | 577 | } |
@@ -1473,28 +1473,35 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1473 | #endif | 1473 | #endif |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | #ifdef CONFIG_X86_32 | ||
1476 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1477 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
1477 | { | 1478 | { |
1478 | unsigned long pfn = pte_pfn(pte); | ||
1479 | |||
1480 | #ifdef CONFIG_X86_32 | ||
1481 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | 1479 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ |
1482 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | 1480 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) |
1483 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | 1481 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
1484 | pte_val_ma(pte)); | 1482 | pte_val_ma(pte)); |
1485 | #endif | 1483 | |
1484 | return pte; | ||
1485 | } | ||
1486 | #else /* CONFIG_X86_64 */ | ||
1487 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | ||
1488 | { | ||
1489 | unsigned long pfn = pte_pfn(pte); | ||
1486 | 1490 | ||
1487 | /* | 1491 | /* |
1488 | * If the new pfn is within the range of the newly allocated | 1492 | * If the new pfn is within the range of the newly allocated |
1489 | * kernel pagetable, and it isn't being mapped into an | 1493 | * kernel pagetable, and it isn't being mapped into an |
1490 | * early_ioremap fixmap slot, make sure it is RO. | 1494 | * early_ioremap fixmap slot as a freshly allocated page, make sure |
1495 | * it is RO. | ||
1491 | */ | 1496 | */ |
1492 | if (!is_early_ioremap_ptep(ptep) && | 1497 | if (((!is_early_ioremap_ptep(ptep) && |
1493 | pfn >= pgt_buf_start && pfn < pgt_buf_end) | 1498 | pfn >= pgt_buf_start && pfn < pgt_buf_end)) || |
1499 | (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) | ||
1494 | pte = pte_wrprotect(pte); | 1500 | pte = pte_wrprotect(pte); |
1495 | 1501 | ||
1496 | return pte; | 1502 | return pte; |
1497 | } | 1503 | } |
1504 | #endif /* CONFIG_X86_64 */ | ||
1498 | 1505 | ||
1499 | /* Init-time set_pte while constructing initial pagetables, which | 1506 | /* Init-time set_pte while constructing initial pagetables, which |
1500 | doesn't allow RO pagetable pages to be remapped RW */ | 1507 | doesn't allow RO pagetable pages to be remapped RW */ |
@@ -1700,9 +1707,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
1700 | for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { | 1707 | for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { |
1701 | pte_t pte; | 1708 | pte_t pte; |
1702 | 1709 | ||
1703 | if (pfn > max_pfn_mapped) | ||
1704 | max_pfn_mapped = pfn; | ||
1705 | |||
1706 | if (!pte_none(pte_page[pteidx])) | 1710 | if (!pte_none(pte_page[pteidx])) |
1707 | continue; | 1711 | continue; |
1708 | 1712 | ||
@@ -1760,6 +1764,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1760 | pud_t *l3; | 1764 | pud_t *l3; |
1761 | pmd_t *l2; | 1765 | pmd_t *l2; |
1762 | 1766 | ||
1767 | /* max_pfn_mapped is the last pfn mapped in the initial memory | ||
1768 | * mappings. Considering that on Xen after the kernel mappings we | ||
1769 | * have the mappings of some pages that don't exist in pfn space, we | ||
1770 | * set max_pfn_mapped to the last real pfn mapped. */ | ||
1771 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); | ||
1772 | |||
1763 | /* Zap identity mapping */ | 1773 | /* Zap identity mapping */ |
1764 | init_level4_pgt[0] = __pgd(0); | 1774 | init_level4_pgt[0] = __pgd(0); |
1765 | 1775 | ||
@@ -1864,9 +1874,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1864 | initial_kernel_pmd = | 1874 | initial_kernel_pmd = |
1865 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | 1875 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); |
1866 | 1876 | ||
1867 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + | 1877 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); |
1868 | xen_start_info->nr_pt_frames * PAGE_SIZE + | ||
1869 | 512*1024); | ||
1870 | 1878 | ||
1871 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); | 1879 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); |
1872 | memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); | 1880 | memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); |