diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-07-08 18:06:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-16 05:01:45 -0400 |
commit | 8745f8b0b914cf1d617ecc49726c24011858c74e (patch) | |
tree | 1e4460fc2bb5f9da5bf66370a0fd388f78be0649 /arch/x86/xen | |
parent | 4560a2947e32670fc6ede108c2b032c396180649 (diff) |
xen64: defer setting pagetable alloc/release ops
We need to wait until the page structure is available to use the
proper pagetable page alloc/release operations, since they use struct
page to determine if a pagetable is pinned.
This happened to work in 32bit because nobody allocated new pagetable
pages in the interim between xen_pagetable_setup_done and
xen_post_allocator_init, but the 64-bit kenrel needs to allocate more
pagetable levels.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/enlighten.c | 25 |
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 19c12a6c7311..da91404fc66c 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -878,30 +878,29 @@ void xen_setup_shared_info(void) | |||
878 | 878 | ||
879 | static __init void xen_pagetable_setup_done(pgd_t *base) | 879 | static __init void xen_pagetable_setup_done(pgd_t *base) |
880 | { | 880 | { |
881 | /* This will work as long as patching hasn't happened yet | ||
882 | (which it hasn't) */ | ||
883 | pv_mmu_ops.alloc_pte = xen_alloc_pte; | ||
884 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | ||
885 | pv_mmu_ops.release_pte = xen_release_pte; | ||
886 | pv_mmu_ops.release_pmd = xen_release_pmd; | ||
887 | #if PAGETABLE_LEVELS == 4 | ||
888 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | ||
889 | pv_mmu_ops.release_pud = xen_release_pud; | ||
890 | #endif | ||
891 | |||
892 | pv_mmu_ops.set_pte = xen_set_pte; | ||
893 | |||
894 | xen_setup_shared_info(); | 881 | xen_setup_shared_info(); |
895 | } | 882 | } |
896 | 883 | ||
897 | static __init void xen_post_allocator_init(void) | 884 | static __init void xen_post_allocator_init(void) |
898 | { | 885 | { |
886 | pv_mmu_ops.set_pte = xen_set_pte; | ||
899 | pv_mmu_ops.set_pmd = xen_set_pmd; | 887 | pv_mmu_ops.set_pmd = xen_set_pmd; |
900 | pv_mmu_ops.set_pud = xen_set_pud; | 888 | pv_mmu_ops.set_pud = xen_set_pud; |
901 | #if PAGETABLE_LEVELS == 4 | 889 | #if PAGETABLE_LEVELS == 4 |
902 | pv_mmu_ops.set_pgd = xen_set_pgd; | 890 | pv_mmu_ops.set_pgd = xen_set_pgd; |
903 | #endif | 891 | #endif |
904 | 892 | ||
893 | /* This will work as long as patching hasn't happened yet | ||
894 | (which it hasn't) */ | ||
895 | pv_mmu_ops.alloc_pte = xen_alloc_pte; | ||
896 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | ||
897 | pv_mmu_ops.release_pte = xen_release_pte; | ||
898 | pv_mmu_ops.release_pmd = xen_release_pmd; | ||
899 | #if PAGETABLE_LEVELS == 4 | ||
900 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | ||
901 | pv_mmu_ops.release_pud = xen_release_pud; | ||
902 | #endif | ||
903 | |||
905 | xen_mark_init_mm_pinned(); | 904 | xen_mark_init_mm_pinned(); |
906 | } | 905 | } |
907 | 906 | ||