aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-22 13:00:10 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-23 11:33:27 -0400
commit51faaf2b0d5c7f44d82964f0c70b1c4e44d4e633 (patch)
tree7baa22c81606ceeb19e61d2023cc519869501fa1
parent806c312e50f122c47913145cf884f53dd09d9199 (diff)
Revert "xen/x86: Workaround 64-bit hypervisor and 32-bit initial domain." and "xen/x86: Use memblock_reserve for sensitive areas."
This reverts commit 806c312e50f122c47913145cf884f53dd09d9199 and commit 59b294403e9814e7c1154043567f0d71bac7a511. And also documents setup.c and why we want to do it that way, which is that we tried to make the the memblock_reserve more selective so that it would be clear what region is reserved. Sadly we ran in the problem wherein on a 64-bit hypervisor with a 32-bit initial domain, the pt_base has the cr3 value which is not neccessarily where the pagetable starts! As Jan put it: " Actually, the adjustment turns out to be correct: The page tables for a 32-on-64 dom0 get allocated in the order "first L1", "first L2", "first L3", so the offset to the page table base is indeed 2. When reading xen/include/public/xen.h's comment very strictly, this is not a violation (since there nothing is said that the first thing in the page table space is pointed to by pt_base; I admit that this seems to be implied though, namely do I think that it is implied that the page table space is the range [pt_base, pt_base + nt_pt_frames), whereas that range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames), which - without a priori knowledge - the kernel would have difficulty to figure out)." - so lets just fall back to the easy way and reserve the whole region. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--arch/x86/xen/enlighten.c60
-rw-r--r--arch/x86/xen/p2m.c5
-rw-r--r--arch/x86/xen/setup.c27
3 files changed, 27 insertions, 65 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 511f92d79e4a..ff962d4b821e 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -998,66 +998,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
998 998
999 return ret; 999 return ret;
1000} 1000}
1001/*
1002 * If the MFN is not in the m2p (provided to us by the hypervisor) this
1003 * function won't do anything. In practice this means that the XenBus
1004 * MFN won't be available for the initial domain. */
1005static unsigned long __init xen_reserve_mfn(unsigned long mfn)
1006{
1007 unsigned long pfn, end_pfn = 0;
1008
1009 if (!mfn)
1010 return end_pfn;
1011
1012 pfn = mfn_to_pfn(mfn);
1013 if (phys_to_machine_mapping_valid(pfn)) {
1014 end_pfn = PFN_PHYS(pfn) + PAGE_SIZE;
1015 memblock_reserve(PFN_PHYS(pfn), end_pfn);
1016 }
1017 return end_pfn;
1018}
1019static void __init xen_reserve_internals(void)
1020{
1021 unsigned long size;
1022 unsigned long last_phys = 0;
1023
1024 if (!xen_pv_domain())
1025 return;
1026
1027 /* xen_start_info does not exist in the M2P, hence can't use
1028 * xen_reserve_mfn. */
1029 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
1030 last_phys = __pa(xen_start_info) + PAGE_SIZE;
1031
1032 last_phys = max(xen_reserve_mfn(PFN_DOWN(xen_start_info->shared_info)), last_phys);
1033 last_phys = max(xen_reserve_mfn(xen_start_info->store_mfn), last_phys);
1034 1001
1035 if (!xen_initial_domain())
1036 last_phys = max(xen_reserve_mfn(xen_start_info->console.domU.mfn), last_phys);
1037
1038 if (xen_feature(XENFEAT_auto_translated_physmap))
1039 return;
1040
1041 /*
1042 * ALIGN up to compensate for the p2m_page pointing to an array that
1043 * can partially filled (look in xen_build_dynamic_phys_to_machine).
1044 */
1045
1046 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1047
1048 /* We could use xen_reserve_mfn here, but would end up looping quite
1049 * a lot (and call memblock_reserve for each PAGE), so lets just use
1050 * the easy way and reserve it wholesale. */
1051 memblock_reserve(__pa(xen_start_info->mfn_list), size);
1052 last_phys = max(__pa(xen_start_info->mfn_list) + size, last_phys);
1053 /* The pagetables are reserved in mmu.c */
1054
1055 /* Under 64-bit hypervisor with a 32-bit domain, the hypervisor
1056 * offsets the pt_base by two pages. Hence the reservation that is done
1057 * in mmu.c misses two pages. We correct it here if we detect this. */
1058 if (last_phys < __pa(xen_start_info->pt_base))
1059 memblock_reserve(last_phys, __pa(xen_start_info->pt_base) - last_phys);
1060}
1061void xen_setup_shared_info(void) 1002void xen_setup_shared_info(void)
1062{ 1003{
1063 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1004 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1421,7 +1362,6 @@ asmlinkage void __init xen_start_kernel(void)
1421 xen_raw_console_write("mapping kernel into physical memory\n"); 1362 xen_raw_console_write("mapping kernel into physical memory\n");
1422 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 1363 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
1423 1364
1424 xen_reserve_internals();
1425 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1365 /* Allocate and initialize top and mid mfn levels for p2m structure */
1426 xen_build_mfn_list_list(); 1366 xen_build_mfn_list_list();
1427 1367
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 6a2bfa43c8a1..e4adbfbdfada 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -388,11 +388,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
388 } 388 }
389 389
390 m2p_override_init(); 390 m2p_override_init();
391
392 /* NOTE: We cannot call memblock_reserve here for the mfn_list as there
393 * isn't enough pieces to make it work (for one - we are still using the
394 * Xen provided pagetable). Do it later in xen_reserve_internals.
395 */
396} 391}
397 392
398unsigned long get_phys_to_machine(unsigned long pfn) 393unsigned long get_phys_to_machine(unsigned long pfn)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 9efca750405d..740517be4da5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -424,6 +424,33 @@ char * __init xen_memory_setup(void)
424 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, 424 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
425 E820_RESERVED); 425 E820_RESERVED);
426 426
427 /*
428 * Reserve Xen bits:
429 * - mfn_list
430 * - xen_start_info
431 * See comment above "struct start_info" in <xen/interface/xen.h>
432 * We tried to make the the memblock_reserve more selective so
433 * that it would be clear what region is reserved. Sadly we ran
434 * in the problem wherein on a 64-bit hypervisor with a 32-bit
435 * initial domain, the pt_base has the cr3 value which is not
436 * neccessarily where the pagetable starts! As Jan put it: "
437 * Actually, the adjustment turns out to be correct: The page
438 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
439 * "first L2", "first L3", so the offset to the page table base is
440 * indeed 2. When reading xen/include/public/xen.h's comment
441 * very strictly, this is not a violation (since there nothing is said
442 * that the first thing in the page table space is pointed to by
443 * pt_base; I admit that this seems to be implied though, namely
444 * do I think that it is implied that the page table space is the
445 * range [pt_base, pt_base + nt_pt_frames), whereas that
446 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
447 * which - without a priori knowledge - the kernel would have
448 * difficulty to figure out)." - so lets just fall back to the
449 * easy way and reserve the whole region.
450 */
451 memblock_reserve(__pa(xen_start_info->mfn_list),
452 xen_start_info->pt_base - xen_start_info->mfn_list);
453
427 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 454 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
428 455
429 return "Xen"; 456 return "Xen";