diff options
author | Stefano Stabellini <stefano.stabellini@eu.citrix.com> | 2011-04-14 10:49:41 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-05-12 13:05:04 -0400 |
commit | 279b706bf800b5967037f492dbe4fc5081ad5d0f (patch) | |
tree | 608b9455cd6af84018bd5096b45e0ca90f9c7619 /arch/x86/xen/mmu.c | |
parent | 92bdaef7b2c5d3cb8abc902faa1f7670a183dcdc (diff) |
x86,xen: introduce x86_init.mapping.pagetable_reserve
Introduce a new x86_init hook called pagetable_reserve that at the end
of init_memory_mapping is used to reserve a range of memory addresses for
the kernel pagetable pages we used and free the other ones.
On native it just calls memblock_x86_reserve_range while on xen it also
takes care of setting the spare memory previously allocated
for kernel pagetable pages from RO to RW, so that it can be used for
other purposes.
A detailed explanation of the reason why this hook is needed follows.
As a consequence of the commit:
commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
Author: Yinghai Lu <yinghai@kernel.org>
Date: Fri Dec 17 16:58:28 2010 -0800
x86-64, mm: Put early page table high
at some point init_memory_mapping is going to reach the pagetable pages
area and map those pages too (mapping them as normal memory that falls
in the range of addresses passed to init_memory_mapping as argument).
Some of those pages are already pagetable pages (they are in the range
pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
everything is fine.
Some of these pages are not pagetable pages yet (they fall in the range
pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
are going to be mapped RW. When these pages become pagetable pages and
are hooked into the pagetable, xen will find that the guest has already
a RW mapping of them somewhere and fail the operation.
The reason Xen requires pagetables to be RO is that the hypervisor needs
to verify that the pagetables are valid before using them. The validation
operations are called "pinning" (more details in arch/x86/xen/mmu.c).
In order to fix the issue we mark all the pages in the entire range
pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
is completed only the range pgt_buf_start-pgt_buf_end is reserved by
init_memory_mapping. Hence the kernel is going to crash as soon as one
of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
ranges are RO).
For this reason we need a hook to reserve the kernel pagetable pages we
used and free the other ones so that they can be reused for other
purposes.
On native it just means calling memblock_x86_reserve_range, on Xen it
also means marking RW the pagetable pages that we allocated before but
that haven't been used before.
Another way to fix this is without using the hook is by adding a 'if
(xen_pv_domain)' in the 'init_memory_mapping' code and calling the Xen
counterpart, but that is just nasty.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
Acked-by: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 15 |
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index cf4ef61e425b..0684f3c74d53 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1275,6 +1275,20 @@ static __init void xen_pagetable_setup_start(pgd_t *base) | |||
1275 | { | 1275 | { |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) | ||
1279 | { | ||
1280 | /* reserve the range used */ | ||
1281 | native_pagetable_reserve(start, end); | ||
1282 | |||
1283 | /* set as RW the rest */ | ||
1284 | printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, | ||
1285 | PFN_PHYS(pgt_buf_top)); | ||
1286 | while (end < PFN_PHYS(pgt_buf_top)) { | ||
1287 | make_lowmem_page_readwrite(__va(end)); | ||
1288 | end += PAGE_SIZE; | ||
1289 | } | ||
1290 | } | ||
1291 | |||
1278 | static void xen_post_allocator_init(void); | 1292 | static void xen_post_allocator_init(void); |
1279 | 1293 | ||
1280 | static __init void xen_pagetable_setup_done(pgd_t *base) | 1294 | static __init void xen_pagetable_setup_done(pgd_t *base) |
@@ -2105,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
2105 | 2119 | ||
2106 | void __init xen_init_mmu_ops(void) | 2120 | void __init xen_init_mmu_ops(void) |
2107 | { | 2121 | { |
2122 | x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; | ||
2108 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; | 2123 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; |
2109 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; | 2124 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; |
2110 | pv_mmu_ops = xen_mmu_ops; | 2125 | pv_mmu_ops = xen_mmu_ops; |