diff options
author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-08-14 16:37:31 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2012-08-23 11:52:16 -0400 |
commit | 785f62314984ea3af9dd830b020289ba2509ae69 (patch) | |
tree | cf0c1b8da0c2b9374e537e6310b2e8b584e37f2e /arch | |
parent | 3aca7fbc8ede0dd194317b2e3144815128ffb232 (diff) |
xen/mmu: Release just the MFN list, not MFN list and part of pagetables.
We call memblock_reserve for [start of mfn list] -> [PMD aligned end
of mfn list] instead of <start of mfn list> -> <page aligned end of mfn list].
This has the disastrous effect that if at bootup the end of mfn_list is
not PMD aligned we end up returning to memblock parts of the region
past the mfn_list array. And those parts are the PTE tables with
the disastrous effect of seeing this at bootup:
Write protecting the kernel read-only data: 10240k
Freeing unused kernel memory: 1860k freed
Freeing unused kernel memory: 200k freed
(XEN) mm.c:2429:d0 Bad type (saw 1400000000000002 != exp 7000000000000000) for mfn 116a80 (pfn 14e26)
...
(XEN) mm.c:908:d0 Error getting mfn 116a83 (pfn 14e2a) from L1 entry 8000000116a83067 for l1e_owner=0, pg_owner=0
(XEN) mm.c:908:d0 Error getting mfn 4040 (pfn 5555555555555555) from L1 entry 0000000004040601 for l1e_owner=0, pg_owner=0
.. and so on.
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/xen/mmu.c | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3f8e963b76c0..5b2cb54425ce 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1227,7 +1227,6 @@ static void __init xen_pagetable_setup_done(pgd_t *base) | |||
1227 | /* We should be in __ka space. */ | 1227 | /* We should be in __ka space. */ |
1228 | BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map); | 1228 | BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map); |
1229 | addr = xen_start_info->mfn_list; | 1229 | addr = xen_start_info->mfn_list; |
1230 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | ||
1231 | /* We roundup to the PMD, which means that if anybody at this stage is | 1230 | /* We roundup to the PMD, which means that if anybody at this stage is |
1232 | * using the __ka address of xen_start_info or xen_start_info->shared_info | 1231 | * using the __ka address of xen_start_info or xen_start_info->shared_info |
1233 | * they are in going to crash. Fortunatly we have already revectored | 1232 | * they are in going to crash. Fortunatly we have already revectored |
@@ -1235,6 +1234,7 @@ static void __init xen_pagetable_setup_done(pgd_t *base) | |||
1235 | size = roundup(size, PMD_SIZE); | 1234 | size = roundup(size, PMD_SIZE); |
1236 | xen_cleanhighmap(addr, addr + size); | 1235 | xen_cleanhighmap(addr, addr + size); |
1237 | 1236 | ||
1237 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | ||
1238 | memblock_free(__pa(xen_start_info->mfn_list), size); | 1238 | memblock_free(__pa(xen_start_info->mfn_list), size); |
1239 | /* And revector! Bye bye old array */ | 1239 | /* And revector! Bye bye old array */ |
1240 | xen_start_info->mfn_list = new_mfn_list; | 1240 | xen_start_info->mfn_list = new_mfn_list; |