aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-07-26 12:47:40 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-23 11:52:14 -0400
commit7f9140626c757b773624b97865cb53c2a8348a69 (patch)
tree86b9035ee71b275d0fb9d23f180b03b797b3f758 /arch/x86
parent357a3cfb147ee8e97c6f9cdc51e9a33aa56f7d99 (diff)
xen/mmu: Copy and revector the P2M tree.
Please first read the description in "xen/p2m: Add logic to revector a P2M tree to use __va leafs" patch. The 'xen_revector_p2m_tree()' function allocates a new P2M tree copies the contents of the old one in it, and returns the new one. At this stage, the __ka address space (which is what the old P2M tree was using) is partially disassembled. The cleanup_highmap has removed the PMD entries from 0-16MB and anything past _brk_end up to the max_pfn_mapped (which is the end of the ramdisk). We have revectored the P2M tree (and the one for save/restore as well) to use new shiny __va address to new MFNs. The xen_start_info has been taken care of already in 'xen_setup_kernel_pagetable()' and xen_start_info->shared_info in 'xen_setup_shared_info()', so we are free to roam and delete PMD entries - which is exactly what we are going to do. We rip out the __ka for the old P2M array. [v1: Fix smatch warnings] [v2: memset was doing 0 instead of 0xff] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c57
1 files changed, 57 insertions, 0 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b44e6a88ea74..a640949f78d4 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1183,9 +1183,64 @@ static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1183 1183
1184static void xen_post_allocator_init(void); 1184static void xen_post_allocator_init(void);
1185 1185
1186#ifdef CONFIG_X86_64
1187static void __init xen_cleanhighmap(unsigned long vaddr,
1188 unsigned long vaddr_end)
1189{
1190 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1191 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1192
1193 /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1194 * We include the PMD passed in on _both_ boundaries. */
1195 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1196 pmd++, vaddr += PMD_SIZE) {
1197 if (pmd_none(*pmd))
1198 continue;
1199 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1200 set_pmd(pmd, __pmd(0));
1201 }
1202 /* In case we did something silly, we should crash in this function
1203 * instead of somewhere later and be confusing. */
1204 xen_mc_flush();
1205}
1206#endif
1186static void __init xen_pagetable_setup_done(pgd_t *base) 1207static void __init xen_pagetable_setup_done(pgd_t *base)
1187{ 1208{
1209#ifdef CONFIG_X86_64
1210 unsigned long size;
1211 unsigned long addr;
1212#endif
1213
1188 xen_setup_shared_info(); 1214 xen_setup_shared_info();
1215#ifdef CONFIG_X86_64
1216 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1217 unsigned long new_mfn_list;
1218
1219 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1220
1221 /* On 32-bit, we get zero so this never gets executed. */
1222 new_mfn_list = xen_revector_p2m_tree();
1223 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1224 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1225 memset((void *)xen_start_info->mfn_list, 0xff, size);
1226
1227 /* We should be in __ka space. */
1228 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1229 addr = xen_start_info->mfn_list;
1230 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1231 /* We roundup to the PMD, which means that if anybody at this stage is
1232 * using the __ka address of xen_start_info or xen_start_info->shared_info
1233 * they are in going to crash. Fortunatly we have already revectored
1234 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1235 size = roundup(size, PMD_SIZE);
1236 xen_cleanhighmap(addr, addr + size);
1237
1238 memblock_free(__pa(xen_start_info->mfn_list), size);
1239 /* And revector! Bye bye old array */
1240 xen_start_info->mfn_list = new_mfn_list;
1241 }
1242 }
1243#endif
1189 xen_post_allocator_init(); 1244 xen_post_allocator_init();
1190} 1245}
1191 1246
@@ -1824,6 +1879,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1824 1879
1825 /* Our (by three pages) smaller Xen pagetable that we are using */ 1880 /* Our (by three pages) smaller Xen pagetable that we are using */
1826 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE); 1881 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
1882 /* Revector the xen_start_info */
1883 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1827} 1884}
1828#else /* !CONFIG_X86_64 */ 1885#else /* !CONFIG_X86_64 */
1829static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); 1886static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);