diff options
-rw-r--r-- | arch/x86/xen/mmu_pv.c | 159 |
1 files changed, 60 insertions, 99 deletions
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 71495f1a86d7..2ccdaba31a07 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -449,7 +449,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd) | |||
449 | } | 449 | } |
450 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); | 450 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); |
451 | 451 | ||
452 | #if CONFIG_PGTABLE_LEVELS == 4 | 452 | #ifdef CONFIG_X86_64 |
453 | __visible pudval_t xen_pud_val(pud_t pud) | 453 | __visible pudval_t xen_pud_val(pud_t pud) |
454 | { | 454 | { |
455 | return pte_mfn_to_pfn(pud.pud); | 455 | return pte_mfn_to_pfn(pud.pud); |
@@ -538,7 +538,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val) | |||
538 | 538 | ||
539 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 539 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
540 | } | 540 | } |
541 | #endif /* CONFIG_PGTABLE_LEVELS == 4 */ | 541 | #endif /* CONFIG_X86_64 */ |
542 | 542 | ||
543 | static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, | 543 | static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, |
544 | int (*func)(struct mm_struct *mm, struct page *, enum pt_level), | 544 | int (*func)(struct mm_struct *mm, struct page *, enum pt_level), |
@@ -580,21 +580,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, | |||
580 | int (*func)(struct mm_struct *mm, struct page *, enum pt_level), | 580 | int (*func)(struct mm_struct *mm, struct page *, enum pt_level), |
581 | bool last, unsigned long limit) | 581 | bool last, unsigned long limit) |
582 | { | 582 | { |
583 | int i, nr, flush = 0; | 583 | int flush = 0; |
584 | pud_t *pud; | ||
584 | 585 | ||
585 | nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D; | ||
586 | for (i = 0; i < nr; i++) { | ||
587 | pud_t *pud; | ||
588 | 586 | ||
589 | if (p4d_none(p4d[i])) | 587 | if (p4d_none(*p4d)) |
590 | continue; | 588 | return flush; |
591 | 589 | ||
592 | pud = pud_offset(&p4d[i], 0); | 590 | pud = pud_offset(p4d, 0); |
593 | if (PTRS_PER_PUD > 1) | 591 | if (PTRS_PER_PUD > 1) |
594 | flush |= (*func)(mm, virt_to_page(pud), PT_PUD); | 592 | flush |= (*func)(mm, virt_to_page(pud), PT_PUD); |
595 | flush |= xen_pud_walk(mm, pud, func, | 593 | flush |= xen_pud_walk(mm, pud, func, last, limit); |
596 | last && i == nr - 1, limit); | ||
597 | } | ||
598 | return flush; | 594 | return flush; |
599 | } | 595 | } |
600 | 596 | ||
@@ -644,8 +640,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, | |||
644 | continue; | 640 | continue; |
645 | 641 | ||
646 | p4d = p4d_offset(&pgd[i], 0); | 642 | p4d = p4d_offset(&pgd[i], 0); |
647 | if (PTRS_PER_P4D > 1) | ||
648 | flush |= (*func)(mm, virt_to_page(p4d), PT_P4D); | ||
649 | flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); | 643 | flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); |
650 | } | 644 | } |
651 | 645 | ||
@@ -1176,22 +1170,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr) | |||
1176 | { | 1170 | { |
1177 | pgd_t *pgd; | 1171 | pgd_t *pgd; |
1178 | p4d_t *p4d; | 1172 | p4d_t *p4d; |
1179 | unsigned int i; | ||
1180 | bool unpin; | 1173 | bool unpin; |
1181 | 1174 | ||
1182 | unpin = (vaddr == 2 * PGDIR_SIZE); | 1175 | unpin = (vaddr == 2 * PGDIR_SIZE); |
1183 | vaddr &= PMD_MASK; | 1176 | vaddr &= PMD_MASK; |
1184 | pgd = pgd_offset_k(vaddr); | 1177 | pgd = pgd_offset_k(vaddr); |
1185 | p4d = p4d_offset(pgd, 0); | 1178 | p4d = p4d_offset(pgd, 0); |
1186 | for (i = 0; i < PTRS_PER_P4D; i++) { | 1179 | if (!p4d_none(*p4d)) |
1187 | if (p4d_none(p4d[i])) | 1180 | xen_cleanmfnmap_p4d(p4d, unpin); |
1188 | continue; | ||
1189 | xen_cleanmfnmap_p4d(p4d + i, unpin); | ||
1190 | } | ||
1191 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | ||
1192 | set_pgd(pgd, __pgd(0)); | ||
1193 | xen_cleanmfnmap_free_pgtbl(p4d, unpin); | ||
1194 | } | ||
1195 | } | 1181 | } |
1196 | 1182 | ||
1197 | static void __init xen_pagetable_p2m_free(void) | 1183 | static void __init xen_pagetable_p2m_free(void) |
@@ -1692,7 +1678,7 @@ static void xen_release_pmd(unsigned long pfn) | |||
1692 | xen_release_ptpage(pfn, PT_PMD); | 1678 | xen_release_ptpage(pfn, PT_PMD); |
1693 | } | 1679 | } |
1694 | 1680 | ||
1695 | #if CONFIG_PGTABLE_LEVELS >= 4 | 1681 | #ifdef CONFIG_X86_64 |
1696 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) | 1682 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
1697 | { | 1683 | { |
1698 | xen_alloc_ptpage(mm, pfn, PT_PUD); | 1684 | xen_alloc_ptpage(mm, pfn, PT_PUD); |
@@ -2029,13 +2015,12 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) | |||
2029 | */ | 2015 | */ |
2030 | void __init xen_relocate_p2m(void) | 2016 | void __init xen_relocate_p2m(void) |
2031 | { | 2017 | { |
2032 | phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys; | 2018 | phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys; |
2033 | unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; | 2019 | unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; |
2034 | int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d; | 2020 | int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud; |
2035 | pte_t *pt; | 2021 | pte_t *pt; |
2036 | pmd_t *pmd; | 2022 | pmd_t *pmd; |
2037 | pud_t *pud; | 2023 | pud_t *pud; |
2038 | p4d_t *p4d = NULL; | ||
2039 | pgd_t *pgd; | 2024 | pgd_t *pgd; |
2040 | unsigned long *new_p2m; | 2025 | unsigned long *new_p2m; |
2041 | int save_pud; | 2026 | int save_pud; |
@@ -2045,11 +2030,7 @@ void __init xen_relocate_p2m(void) | |||
2045 | n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; | 2030 | n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; |
2046 | n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; | 2031 | n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; |
2047 | n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; | 2032 | n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; |
2048 | if (PTRS_PER_P4D > 1) | 2033 | n_frames = n_pte + n_pt + n_pmd + n_pud; |
2049 | n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT; | ||
2050 | else | ||
2051 | n_p4d = 0; | ||
2052 | n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d; | ||
2053 | 2034 | ||
2054 | new_area = xen_find_free_area(PFN_PHYS(n_frames)); | 2035 | new_area = xen_find_free_area(PFN_PHYS(n_frames)); |
2055 | if (!new_area) { | 2036 | if (!new_area) { |
@@ -2065,76 +2046,56 @@ void __init xen_relocate_p2m(void) | |||
2065 | * To avoid any possible virtual address collision, just use | 2046 | * To avoid any possible virtual address collision, just use |
2066 | * 2 * PUD_SIZE for the new area. | 2047 | * 2 * PUD_SIZE for the new area. |
2067 | */ | 2048 | */ |
2068 | p4d_phys = new_area; | 2049 | pud_phys = new_area; |
2069 | pud_phys = p4d_phys + PFN_PHYS(n_p4d); | ||
2070 | pmd_phys = pud_phys + PFN_PHYS(n_pud); | 2050 | pmd_phys = pud_phys + PFN_PHYS(n_pud); |
2071 | pt_phys = pmd_phys + PFN_PHYS(n_pmd); | 2051 | pt_phys = pmd_phys + PFN_PHYS(n_pmd); |
2072 | p2m_pfn = PFN_DOWN(pt_phys) + n_pt; | 2052 | p2m_pfn = PFN_DOWN(pt_phys) + n_pt; |
2073 | 2053 | ||
2074 | pgd = __va(read_cr3_pa()); | 2054 | pgd = __va(read_cr3_pa()); |
2075 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); | 2055 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); |
2076 | idx_p4d = 0; | ||
2077 | save_pud = n_pud; | 2056 | save_pud = n_pud; |
2078 | do { | 2057 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { |
2079 | if (n_p4d > 0) { | 2058 | pud = early_memremap(pud_phys, PAGE_SIZE); |
2080 | p4d = early_memremap(p4d_phys, PAGE_SIZE); | 2059 | clear_page(pud); |
2081 | clear_page(p4d); | 2060 | for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); |
2082 | n_pud = min(save_pud, PTRS_PER_P4D); | 2061 | idx_pmd++) { |
2083 | } | 2062 | pmd = early_memremap(pmd_phys, PAGE_SIZE); |
2084 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { | 2063 | clear_page(pmd); |
2085 | pud = early_memremap(pud_phys, PAGE_SIZE); | 2064 | for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); |
2086 | clear_page(pud); | 2065 | idx_pt++) { |
2087 | for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); | 2066 | pt = early_memremap(pt_phys, PAGE_SIZE); |
2088 | idx_pmd++) { | 2067 | clear_page(pt); |
2089 | pmd = early_memremap(pmd_phys, PAGE_SIZE); | 2068 | for (idx_pte = 0; |
2090 | clear_page(pmd); | 2069 | idx_pte < min(n_pte, PTRS_PER_PTE); |
2091 | for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); | 2070 | idx_pte++) { |
2092 | idx_pt++) { | 2071 | set_pte(pt + idx_pte, |
2093 | pt = early_memremap(pt_phys, PAGE_SIZE); | 2072 | pfn_pte(p2m_pfn, PAGE_KERNEL)); |
2094 | clear_page(pt); | 2073 | p2m_pfn++; |
2095 | for (idx_pte = 0; | ||
2096 | idx_pte < min(n_pte, PTRS_PER_PTE); | ||
2097 | idx_pte++) { | ||
2098 | set_pte(pt + idx_pte, | ||
2099 | pfn_pte(p2m_pfn, PAGE_KERNEL)); | ||
2100 | p2m_pfn++; | ||
2101 | } | ||
2102 | n_pte -= PTRS_PER_PTE; | ||
2103 | early_memunmap(pt, PAGE_SIZE); | ||
2104 | make_lowmem_page_readonly(__va(pt_phys)); | ||
2105 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, | ||
2106 | PFN_DOWN(pt_phys)); | ||
2107 | set_pmd(pmd + idx_pt, | ||
2108 | __pmd(_PAGE_TABLE | pt_phys)); | ||
2109 | pt_phys += PAGE_SIZE; | ||
2110 | } | 2074 | } |
2111 | n_pt -= PTRS_PER_PMD; | 2075 | n_pte -= PTRS_PER_PTE; |
2112 | early_memunmap(pmd, PAGE_SIZE); | 2076 | early_memunmap(pt, PAGE_SIZE); |
2113 | make_lowmem_page_readonly(__va(pmd_phys)); | 2077 | make_lowmem_page_readonly(__va(pt_phys)); |
2114 | pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, | 2078 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, |
2115 | PFN_DOWN(pmd_phys)); | 2079 | PFN_DOWN(pt_phys)); |
2116 | set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); | 2080 | set_pmd(pmd + idx_pt, |
2117 | pmd_phys += PAGE_SIZE; | 2081 | __pmd(_PAGE_TABLE | pt_phys)); |
2082 | pt_phys += PAGE_SIZE; | ||
2118 | } | 2083 | } |
2119 | n_pmd -= PTRS_PER_PUD; | 2084 | n_pt -= PTRS_PER_PMD; |
2120 | early_memunmap(pud, PAGE_SIZE); | 2085 | early_memunmap(pmd, PAGE_SIZE); |
2121 | make_lowmem_page_readonly(__va(pud_phys)); | 2086 | make_lowmem_page_readonly(__va(pmd_phys)); |
2122 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); | 2087 | pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, |
2123 | if (n_p4d > 0) | 2088 | PFN_DOWN(pmd_phys)); |
2124 | set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys)); | 2089 | set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); |
2125 | else | 2090 | pmd_phys += PAGE_SIZE; |
2126 | set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); | ||
2127 | pud_phys += PAGE_SIZE; | ||
2128 | } | ||
2129 | if (n_p4d > 0) { | ||
2130 | save_pud -= PTRS_PER_P4D; | ||
2131 | early_memunmap(p4d, PAGE_SIZE); | ||
2132 | make_lowmem_page_readonly(__va(p4d_phys)); | ||
2133 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys)); | ||
2134 | set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys)); | ||
2135 | p4d_phys += PAGE_SIZE; | ||
2136 | } | 2091 | } |
2137 | } while (++idx_p4d < n_p4d); | 2092 | n_pmd -= PTRS_PER_PUD; |
2093 | early_memunmap(pud, PAGE_SIZE); | ||
2094 | make_lowmem_page_readonly(__va(pud_phys)); | ||
2095 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); | ||
2096 | set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); | ||
2097 | pud_phys += PAGE_SIZE; | ||
2098 | } | ||
2138 | 2099 | ||
2139 | /* Now copy the old p2m info to the new area. */ | 2100 | /* Now copy the old p2m info to the new area. */ |
2140 | memcpy(new_p2m, xen_p2m_addr, size); | 2101 | memcpy(new_p2m, xen_p2m_addr, size); |
@@ -2361,7 +2322,7 @@ static void __init xen_post_allocator_init(void) | |||
2361 | pv_mmu_ops.set_pte = xen_set_pte; | 2322 | pv_mmu_ops.set_pte = xen_set_pte; |
2362 | pv_mmu_ops.set_pmd = xen_set_pmd; | 2323 | pv_mmu_ops.set_pmd = xen_set_pmd; |
2363 | pv_mmu_ops.set_pud = xen_set_pud; | 2324 | pv_mmu_ops.set_pud = xen_set_pud; |
2364 | #if CONFIG_PGTABLE_LEVELS >= 4 | 2325 | #ifdef CONFIG_X86_64 |
2365 | pv_mmu_ops.set_p4d = xen_set_p4d; | 2326 | pv_mmu_ops.set_p4d = xen_set_p4d; |
2366 | #endif | 2327 | #endif |
2367 | 2328 | ||
@@ -2371,7 +2332,7 @@ static void __init xen_post_allocator_init(void) | |||
2371 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | 2332 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; |
2372 | pv_mmu_ops.release_pte = xen_release_pte; | 2333 | pv_mmu_ops.release_pte = xen_release_pte; |
2373 | pv_mmu_ops.release_pmd = xen_release_pmd; | 2334 | pv_mmu_ops.release_pmd = xen_release_pmd; |
2374 | #if CONFIG_PGTABLE_LEVELS >= 4 | 2335 | #ifdef CONFIG_X86_64 |
2375 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | 2336 | pv_mmu_ops.alloc_pud = xen_alloc_pud; |
2376 | pv_mmu_ops.release_pud = xen_release_pud; | 2337 | pv_mmu_ops.release_pud = xen_release_pud; |
2377 | #endif | 2338 | #endif |
@@ -2435,14 +2396,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { | |||
2435 | .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), | 2396 | .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), |
2436 | .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), | 2397 | .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), |
2437 | 2398 | ||
2438 | #if CONFIG_PGTABLE_LEVELS >= 4 | 2399 | #ifdef CONFIG_X86_64 |
2439 | .pud_val = PV_CALLEE_SAVE(xen_pud_val), | 2400 | .pud_val = PV_CALLEE_SAVE(xen_pud_val), |
2440 | .make_pud = PV_CALLEE_SAVE(xen_make_pud), | 2401 | .make_pud = PV_CALLEE_SAVE(xen_make_pud), |
2441 | .set_p4d = xen_set_p4d_hyper, | 2402 | .set_p4d = xen_set_p4d_hyper, |
2442 | 2403 | ||
2443 | .alloc_pud = xen_alloc_pmd_init, | 2404 | .alloc_pud = xen_alloc_pmd_init, |
2444 | .release_pud = xen_release_pmd_init, | 2405 | .release_pud = xen_release_pmd_init, |
2445 | #endif /* CONFIG_PGTABLE_LEVELS == 4 */ | 2406 | #endif /* CONFIG_X86_64 */ |
2446 | 2407 | ||
2447 | .activate_mm = xen_activate_mm, | 2408 | .activate_mm = xen_activate_mm, |
2448 | .dup_mmap = xen_dup_mmap, | 2409 | .dup_mmap = xen_dup_mmap, |