diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-07-08 18:06:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-16 04:57:16 -0400 |
commit | f6e587325b3bc7e5c829a407ddc25b52c1e73851 (patch) | |
tree | bc525b9bf2967491593b89bf022888102dfff57e /arch | |
parent | e74359028d5489a281fb2c379a47b1d3cb14526e (diff) |
xen64: add extra pv_mmu_ops
We need extra pv_mmu_ops for 64-bit, to deal with the extra level of
pagetable.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/xen/enlighten.c | 33 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 51 | ||||
-rw-r--r-- | arch/x86/xen/mmu.h | 15 |
3 files changed, 95 insertions, 4 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c5f0b40aa39d..afb047e30bdc 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -803,6 +803,18 @@ static void xen_release_pmd(u32 pfn) | |||
803 | xen_release_ptpage(pfn, PT_PMD); | 803 | xen_release_ptpage(pfn, PT_PMD); |
804 | } | 804 | } |
805 | 805 | ||
806 | #if PAGETABLE_LEVELS == 4 | ||
807 | static void xen_alloc_pud(struct mm_struct *mm, u32 pfn) | ||
808 | { | ||
809 | xen_alloc_ptpage(mm, pfn, PT_PUD); | ||
810 | } | ||
811 | |||
812 | static void xen_release_pud(u32 pfn) | ||
813 | { | ||
814 | xen_release_ptpage(pfn, PT_PUD); | ||
815 | } | ||
816 | #endif | ||
817 | |||
806 | #ifdef CONFIG_HIGHPTE | 818 | #ifdef CONFIG_HIGHPTE |
807 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) | 819 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) |
808 | { | 820 | { |
@@ -922,6 +934,11 @@ static __init void xen_pagetable_setup_done(pgd_t *base) | |||
922 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | 934 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; |
923 | pv_mmu_ops.release_pte = xen_release_pte; | 935 | pv_mmu_ops.release_pte = xen_release_pte; |
924 | pv_mmu_ops.release_pmd = xen_release_pmd; | 936 | pv_mmu_ops.release_pmd = xen_release_pmd; |
937 | #if PAGETABLE_LEVELS == 4 | ||
938 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | ||
939 | pv_mmu_ops.release_pud = xen_release_pud; | ||
940 | #endif | ||
941 | |||
925 | pv_mmu_ops.set_pte = xen_set_pte; | 942 | pv_mmu_ops.set_pte = xen_set_pte; |
926 | 943 | ||
927 | xen_setup_shared_info(); | 944 | xen_setup_shared_info(); |
@@ -937,6 +954,9 @@ static __init void xen_post_allocator_init(void) | |||
937 | { | 954 | { |
938 | pv_mmu_ops.set_pmd = xen_set_pmd; | 955 | pv_mmu_ops.set_pmd = xen_set_pmd; |
939 | pv_mmu_ops.set_pud = xen_set_pud; | 956 | pv_mmu_ops.set_pud = xen_set_pud; |
957 | #if PAGETABLE_LEVELS == 4 | ||
958 | pv_mmu_ops.set_pgd = xen_set_pgd; | ||
959 | #endif | ||
940 | 960 | ||
941 | xen_mark_init_mm_pinned(); | 961 | xen_mark_init_mm_pinned(); |
942 | } | 962 | } |
@@ -1185,15 +1205,26 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
1185 | .make_pte = xen_make_pte, | 1205 | .make_pte = xen_make_pte, |
1186 | .make_pgd = xen_make_pgd, | 1206 | .make_pgd = xen_make_pgd, |
1187 | 1207 | ||
1208 | #ifdef CONFIG_X86_PAE | ||
1188 | .set_pte_atomic = xen_set_pte_atomic, | 1209 | .set_pte_atomic = xen_set_pte_atomic, |
1189 | .set_pte_present = xen_set_pte_at, | 1210 | .set_pte_present = xen_set_pte_at, |
1190 | .set_pud = xen_set_pud_hyper, | ||
1191 | .pte_clear = xen_pte_clear, | 1211 | .pte_clear = xen_pte_clear, |
1192 | .pmd_clear = xen_pmd_clear, | 1212 | .pmd_clear = xen_pmd_clear, |
1213 | #endif /* CONFIG_X86_PAE */ | ||
1214 | .set_pud = xen_set_pud_hyper, | ||
1193 | 1215 | ||
1194 | .make_pmd = xen_make_pmd, | 1216 | .make_pmd = xen_make_pmd, |
1195 | .pmd_val = xen_pmd_val, | 1217 | .pmd_val = xen_pmd_val, |
1196 | 1218 | ||
1219 | #if PAGETABLE_LEVELS == 4 | ||
1220 | .pud_val = xen_pud_val, | ||
1221 | .make_pud = xen_make_pud, | ||
1222 | .set_pgd = xen_set_pgd_hyper, | ||
1223 | |||
1224 | .alloc_pud = xen_alloc_pte_init, | ||
1225 | .release_pud = xen_release_pte_init, | ||
1226 | #endif /* PAGETABLE_LEVELS == 4 */ | ||
1227 | |||
1197 | .activate_mm = xen_activate_mm, | 1228 | .activate_mm = xen_activate_mm, |
1198 | .dup_mmap = xen_dup_mmap, | 1229 | .dup_mmap = xen_dup_mmap, |
1199 | .exit_mmap = xen_exit_mmap, | 1230 | .exit_mmap = xen_exit_mmap, |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4fca9d88bef0..d0976b87cd2c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -438,14 +438,19 @@ void xen_set_pud(pud_t *ptr, pud_t val) | |||
438 | 438 | ||
439 | void xen_set_pte(pte_t *ptep, pte_t pte) | 439 | void xen_set_pte(pte_t *ptep, pte_t pte) |
440 | { | 440 | { |
441 | #ifdef CONFIG_X86_PAE | ||
441 | ptep->pte_high = pte.pte_high; | 442 | ptep->pte_high = pte.pte_high; |
442 | smp_wmb(); | 443 | smp_wmb(); |
443 | ptep->pte_low = pte.pte_low; | 444 | ptep->pte_low = pte.pte_low; |
445 | #else | ||
446 | *ptep = pte; | ||
447 | #endif | ||
444 | } | 448 | } |
445 | 449 | ||
450 | #ifdef CONFIG_X86_PAE | ||
446 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | 451 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
447 | { | 452 | { |
448 | set_64bit((u64 *)ptep, pte_val_ma(pte)); | 453 | set_64bit((u64 *)ptep, native_pte_val(pte)); |
449 | } | 454 | } |
450 | 455 | ||
451 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 456 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
@@ -459,6 +464,7 @@ void xen_pmd_clear(pmd_t *pmdp) | |||
459 | { | 464 | { |
460 | set_pmd(pmdp, __pmd(0)); | 465 | set_pmd(pmdp, __pmd(0)); |
461 | } | 466 | } |
467 | #endif /* CONFIG_X86_PAE */ | ||
462 | 468 | ||
463 | pmd_t xen_make_pmd(pmdval_t pmd) | 469 | pmd_t xen_make_pmd(pmdval_t pmd) |
464 | { | 470 | { |
@@ -466,6 +472,49 @@ pmd_t xen_make_pmd(pmdval_t pmd) | |||
466 | return native_make_pmd(pmd); | 472 | return native_make_pmd(pmd); |
467 | } | 473 | } |
468 | 474 | ||
475 | #if PAGETABLE_LEVELS == 4 | ||
476 | pudval_t xen_pud_val(pud_t pud) | ||
477 | { | ||
478 | return pte_mfn_to_pfn(pud.pud); | ||
479 | } | ||
480 | |||
481 | pud_t xen_make_pud(pudval_t pud) | ||
482 | { | ||
483 | pud = pte_pfn_to_mfn(pud); | ||
484 | |||
485 | return native_make_pud(pud); | ||
486 | } | ||
487 | |||
488 | void xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | ||
489 | { | ||
490 | struct mmu_update u; | ||
491 | |||
492 | preempt_disable(); | ||
493 | |||
494 | xen_mc_batch(); | ||
495 | |||
496 | u.ptr = virt_to_machine(ptr).maddr; | ||
497 | u.val = pgd_val_ma(val); | ||
498 | extend_mmu_update(&u); | ||
499 | |||
500 | xen_mc_issue(PARAVIRT_LAZY_MMU); | ||
501 | |||
502 | preempt_enable(); | ||
503 | } | ||
504 | |||
505 | void xen_set_pgd(pgd_t *ptr, pgd_t val) | ||
506 | { | ||
507 | /* If page is not pinned, we can just update the entry | ||
508 | directly */ | ||
509 | if (!page_pinned(ptr)) { | ||
510 | *ptr = val; | ||
511 | return; | ||
512 | } | ||
513 | |||
514 | xen_set_pgd_hyper(ptr, val); | ||
515 | } | ||
516 | #endif /* PAGETABLE_LEVELS == 4 */ | ||
517 | |||
469 | /* | 518 | /* |
470 | (Yet another) pagetable walker. This one is intended for pinning a | 519 | (Yet another) pagetable walker. This one is intended for pinning a |
471 | pagetable. This means that it walks a pagetable and calls the | 520 | pagetable. This means that it walks a pagetable and calls the |
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index 7856e37f6044..19d544b0b6c6 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h | |||
@@ -32,13 +32,24 @@ pgd_t xen_make_pgd(pgdval_t); | |||
32 | void xen_set_pte(pte_t *ptep, pte_t pteval); | 32 | void xen_set_pte(pte_t *ptep, pte_t pteval); |
33 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | 33 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, |
34 | pte_t *ptep, pte_t pteval); | 34 | pte_t *ptep, pte_t pteval); |
35 | |||
36 | #ifdef CONFIG_X86_PAE | ||
35 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte); | 37 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte); |
38 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
39 | void xen_pmd_clear(pmd_t *pmdp); | ||
40 | #endif /* CONFIG_X86_PAE */ | ||
41 | |||
36 | void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval); | 42 | void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval); |
37 | void xen_set_pud(pud_t *ptr, pud_t val); | 43 | void xen_set_pud(pud_t *ptr, pud_t val); |
38 | void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval); | 44 | void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval); |
39 | void xen_set_pud_hyper(pud_t *ptr, pud_t val); | 45 | void xen_set_pud_hyper(pud_t *ptr, pud_t val); |
40 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 46 | |
41 | void xen_pmd_clear(pmd_t *pmdp); | 47 | #if PAGETABLE_LEVELS == 4 |
48 | pudval_t xen_pud_val(pud_t pud); | ||
49 | pud_t xen_make_pud(pudval_t pudval); | ||
50 | void xen_set_pgd(pgd_t *pgdp, pgd_t pgd); | ||
51 | void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd); | ||
52 | #endif | ||
42 | 53 | ||
43 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 54 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
44 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | 55 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |