aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c88
1 files changed, 72 insertions, 16 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 21ed8d7f75a5..44924e551fde 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2034,6 +2034,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
2034 set_page_prot(pmd, PAGE_KERNEL_RO); 2034 set_page_prot(pmd, PAGE_KERNEL_RO);
2035} 2035}
2036 2036
2037void __init xen_setup_machphys_mapping(void)
2038{
2039 struct xen_machphys_mapping mapping;
2040 unsigned long machine_to_phys_nr_ents;
2041
2042 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
2043 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
2044 machine_to_phys_nr_ents = mapping.max_mfn + 1;
2045 } else {
2046 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
2047 }
2048 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
2049}
2050
2037#ifdef CONFIG_X86_64 2051#ifdef CONFIG_X86_64
2038static void convert_pfn_mfn(void *v) 2052static void convert_pfn_mfn(void *v)
2039{ 2053{
@@ -2119,44 +2133,83 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2119 return pgd; 2133 return pgd;
2120} 2134}
2121#else /* !CONFIG_X86_64 */ 2135#else /* !CONFIG_X86_64 */
2122static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); 2136static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2137static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2138
2139static __init void xen_write_cr3_init(unsigned long cr3)
2140{
2141 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2142
2143 BUG_ON(read_cr3() != __pa(initial_page_table));
2144 BUG_ON(cr3 != __pa(swapper_pg_dir));
2145
2146 /*
2147 * We are switching to swapper_pg_dir for the first time (from
2148 * initial_page_table) and therefore need to mark that page
2149 * read-only and then pin it.
2150 *
2151 * Xen disallows sharing of kernel PMDs for PAE
2152 * guests. Therefore we must copy the kernel PMD from
2153 * initial_page_table into a new kernel PMD to be used in
2154 * swapper_pg_dir.
2155 */
2156 swapper_kernel_pmd =
2157 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2158 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
2159 sizeof(pmd_t) * PTRS_PER_PMD);
2160 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2161 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2162 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2163
2164 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2165 xen_write_cr3(cr3);
2166 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2167
2168 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2169 PFN_DOWN(__pa(initial_page_table)));
2170 set_page_prot(initial_page_table, PAGE_KERNEL);
2171 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2172
2173 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2174}
2123 2175
2124__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 2176__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2125 unsigned long max_pfn) 2177 unsigned long max_pfn)
2126{ 2178{
2127 pmd_t *kernel_pmd; 2179 pmd_t *kernel_pmd;
2128 2180
2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 2181 initial_kernel_pmd =
2182 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2130 2183
2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2184 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2132 xen_start_info->nr_pt_frames * PAGE_SIZE + 2185 xen_start_info->nr_pt_frames * PAGE_SIZE +
2133 512*1024); 2186 512*1024);
2134 2187
2135 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 2188 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2136 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 2189 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
2137 2190
2138 xen_map_identity_early(level2_kernel_pgt, max_pfn); 2191 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2139 2192
2140 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); 2193 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
2141 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], 2194 initial_page_table[KERNEL_PGD_BOUNDARY] =
2142 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); 2195 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2143 2196
2144 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 2197 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2145 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); 2198 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2146 set_page_prot(empty_zero_page, PAGE_KERNEL_RO); 2199 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2147 2200
2148 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 2201 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2149 2202
2150 xen_write_cr3(__pa(swapper_pg_dir)); 2203 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2151 2204 PFN_DOWN(__pa(initial_page_table)));
2152 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); 2205 xen_write_cr3(__pa(initial_page_table));
2153 2206
2154 memblock_x86_reserve_range(__pa(xen_start_info->pt_base), 2207 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
2155 __pa(xen_start_info->pt_base + 2208 __pa(xen_start_info->pt_base +
2156 xen_start_info->nr_pt_frames * PAGE_SIZE), 2209 xen_start_info->nr_pt_frames * PAGE_SIZE),
2157 "XEN PAGETABLES"); 2210 "XEN PAGETABLES");
2158 2211
2159 return swapper_pg_dir; 2212 return initial_page_table;
2160} 2213}
2161#endif /* CONFIG_X86_64 */ 2214#endif /* CONFIG_X86_64 */
2162 2215
@@ -2290,7 +2343,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
2290 .write_cr2 = xen_write_cr2, 2343 .write_cr2 = xen_write_cr2,
2291 2344
2292 .read_cr3 = xen_read_cr3, 2345 .read_cr3 = xen_read_cr3,
2346#ifdef CONFIG_X86_32
2347 .write_cr3 = xen_write_cr3_init,
2348#else
2293 .write_cr3 = xen_write_cr3, 2349 .write_cr3 = xen_write_cr3,
2350#endif
2294 2351
2295 .flush_tlb_user = xen_flush_tlb, 2352 .flush_tlb_user = xen_flush_tlb,
2296 .flush_tlb_kernel = xen_flush_tlb, 2353 .flush_tlb_kernel = xen_flush_tlb,
@@ -2358,8 +2415,6 @@ void __init xen_init_mmu_ops(void)
2358 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2415 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2359 pv_mmu_ops = xen_mmu_ops; 2416 pv_mmu_ops = xen_mmu_ops;
2360 2417
2361 vmap_lazy_unmap = false;
2362
2363 memset(dummy_mapping, 0xff, PAGE_SIZE); 2418 memset(dummy_mapping, 0xff, PAGE_SIZE);
2364} 2419}
2365 2420
@@ -2627,7 +2682,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2627 2682
2628 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); 2683 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2629 2684
2630 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 2685 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2686 (VM_PFNMAP | VM_RESERVED | VM_IO)));
2631 2687
2632 rmd.mfn = mfn; 2688 rmd.mfn = mfn;
2633 rmd.prot = prot; 2689 rmd.prot = prot;