aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-12-31 12:37:52 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2014-01-06 10:44:02 -0500
commit32df75cd148b43e007848ddbfdb1ea25535114cb (patch)
tree9c0154cf25df8736faa3c2315172a1258e18749e /arch/x86
parent696fd7c5b2ecb31b339019ced4fe15a3f9e7419a (diff)
xen/mmu/p2m: Refactor the xen_pagetable_init code (v2).
The revectoring and copying of the P2M only happens when !auto-xlat and on 64-bit builds. It is not obvious from the code, so lets have seperate 32 and 64-bit functions. We also invert the check for auto-xlat to make the code flow simpler. Suggested-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c70
1 files changed, 37 insertions, 33 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index ce563be09cc1..c140efffe37e 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1198,44 +1198,40 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
1198 * instead of somewhere later and be confusing. */ 1198 * instead of somewhere later and be confusing. */
1199 xen_mc_flush(); 1199 xen_mc_flush();
1200} 1200}
1201#endif 1201static void __init xen_pagetable_p2m_copy(void)
1202static void __init xen_pagetable_init(void)
1203{ 1202{
1204#ifdef CONFIG_X86_64
1205 unsigned long size; 1203 unsigned long size;
1206 unsigned long addr; 1204 unsigned long addr;
1207#endif 1205 unsigned long new_mfn_list;
1208 paging_init(); 1206
1209 xen_setup_shared_info(); 1207 if (xen_feature(XENFEAT_auto_translated_physmap))
1210#ifdef CONFIG_X86_64 1208 return;
1211 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1209
1212 unsigned long new_mfn_list; 1210 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1211
1212 /* On 32-bit, we get zero so this never gets executed. */
1213 new_mfn_list = xen_revector_p2m_tree();
1214 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1215 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1216 memset((void *)xen_start_info->mfn_list, 0xff, size);
1217
1218 /* We should be in __ka space. */
1219 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1220 addr = xen_start_info->mfn_list;
1221 /* We roundup to the PMD, which means that if anybody at this stage is
1222 * using the __ka address of xen_start_info or xen_start_info->shared_info
1223 * they are in going to crash. Fortunatly we have already revectored
1224 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1225 size = roundup(size, PMD_SIZE);
1226 xen_cleanhighmap(addr, addr + size);
1213 1227
1214 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); 1228 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1229 memblock_free(__pa(xen_start_info->mfn_list), size);
1230 /* And revector! Bye bye old array */
1231 xen_start_info->mfn_list = new_mfn_list;
1232 } else
1233 return;
1215 1234
1216 /* On 32-bit, we get zero so this never gets executed. */
1217 new_mfn_list = xen_revector_p2m_tree();
1218 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1219 /* using __ka address and sticking INVALID_P2M_ENTRY! */
1220 memset((void *)xen_start_info->mfn_list, 0xff, size);
1221
1222 /* We should be in __ka space. */
1223 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1224 addr = xen_start_info->mfn_list;
1225 /* We roundup to the PMD, which means that if anybody at this stage is
1226 * using the __ka address of xen_start_info or xen_start_info->shared_info
1227 * they are in going to crash. Fortunatly we have already revectored
1228 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
1229 size = roundup(size, PMD_SIZE);
1230 xen_cleanhighmap(addr, addr + size);
1231
1232 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1233 memblock_free(__pa(xen_start_info->mfn_list), size);
1234 /* And revector! Bye bye old array */
1235 xen_start_info->mfn_list = new_mfn_list;
1236 } else
1237 goto skip;
1238 }
1239 /* At this stage, cleanup_highmap has already cleaned __ka space 1235 /* At this stage, cleanup_highmap has already cleaned __ka space
1240 * from _brk_limit way up to the max_pfn_mapped (which is the end of 1236 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1241 * the ramdisk). We continue on, erasing PMD entries that point to page 1237 * the ramdisk). We continue on, erasing PMD entries that point to page
@@ -1255,7 +1251,15 @@ static void __init xen_pagetable_init(void)
1255 * anything at this stage. */ 1251 * anything at this stage. */
1256 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); 1252 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1257#endif 1253#endif
1258skip: 1254}
1255#endif
1256
1257static void __init xen_pagetable_init(void)
1258{
1259 paging_init();
1260 xen_setup_shared_info();
1261#ifdef CONFIG_X86_64
1262 xen_pagetable_p2m_copy();
1259#endif 1263#endif
1260 xen_post_allocator_init(); 1264 xen_post_allocator_init();
1261} 1265}