aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/enlighten.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2009-01-28 17:35:01 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2009-01-30 17:51:14 -0500
commit319f3ba52c71630865b10ac3b99dd020440d681d (patch)
tree2445f2adc22165ad0b5ea3e6e3ed7af7db19eed0 /arch/x86/xen/enlighten.c
parent9b7ed8faa034fc2d350e2eff5c68680eb5c43a07 (diff)
xen: move remaining mmu-related stuff into mmu.c
Impact: Cleanup Move remaining mmu-related stuff into mmu.c. A general cleanup, and lay the groundwork for later patches. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/xen/enlighten.c')
-rw-r--r--arch/x86/xen/enlighten.c731
1 files changed, 1 insertions, 730 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 6b3f7eef57e3..0cd2a165f179 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -61,35 +61,6 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
61enum xen_domain_type xen_domain_type = XEN_NATIVE; 61enum xen_domain_type xen_domain_type = XEN_NATIVE;
62EXPORT_SYMBOL_GPL(xen_domain_type); 62EXPORT_SYMBOL_GPL(xen_domain_type);
63 63
64/*
65 * Identity map, in addition to plain kernel map. This needs to be
66 * large enough to allocate page table pages to allocate the rest.
67 * Each page can map 2MB.
68 */
69static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
70
71#ifdef CONFIG_X86_64
72/* l3 pud for userspace vsyscall mapping */
73static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
74#endif /* CONFIG_X86_64 */
75
76/*
77 * Note about cr3 (pagetable base) values:
78 *
79 * xen_cr3 contains the current logical cr3 value; it contains the
80 * last set cr3. This may not be the current effective cr3, because
81 * its update may be being lazily deferred. However, a vcpu looking
82 * at its own cr3 can use this value knowing that it everything will
83 * be self-consistent.
84 *
85 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
86 * hypercall to set the vcpu cr3 is complete (so it may be a little
87 * out of date, but it will never be set early). If one vcpu is
88 * looking at another vcpu's cr3 value, it should use this variable.
89 */
90DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
91DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
92
93struct start_info *xen_start_info; 64struct start_info *xen_start_info;
94EXPORT_SYMBOL_GPL(xen_start_info); 65EXPORT_SYMBOL_GPL(xen_start_info);
95 66
@@ -237,7 +208,7 @@ static unsigned long xen_get_debugreg(int reg)
237 return HYPERVISOR_get_debugreg(reg); 208 return HYPERVISOR_get_debugreg(reg);
238} 209}
239 210
240static void xen_leave_lazy(void) 211void xen_leave_lazy(void)
241{ 212{
242 paravirt_leave_lazy(paravirt_get_lazy_mode()); 213 paravirt_leave_lazy(paravirt_get_lazy_mode());
243 xen_mc_flush(); 214 xen_mc_flush();
@@ -598,76 +569,6 @@ static struct apic_ops xen_basic_apic_ops = {
598 569
599#endif 570#endif
600 571
601static void xen_flush_tlb(void)
602{
603 struct mmuext_op *op;
604 struct multicall_space mcs;
605
606 preempt_disable();
607
608 mcs = xen_mc_entry(sizeof(*op));
609
610 op = mcs.args;
611 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
612 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
613
614 xen_mc_issue(PARAVIRT_LAZY_MMU);
615
616 preempt_enable();
617}
618
619static void xen_flush_tlb_single(unsigned long addr)
620{
621 struct mmuext_op *op;
622 struct multicall_space mcs;
623
624 preempt_disable();
625
626 mcs = xen_mc_entry(sizeof(*op));
627 op = mcs.args;
628 op->cmd = MMUEXT_INVLPG_LOCAL;
629 op->arg1.linear_addr = addr & PAGE_MASK;
630 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
631
632 xen_mc_issue(PARAVIRT_LAZY_MMU);
633
634 preempt_enable();
635}
636
637static void xen_flush_tlb_others(const struct cpumask *cpus,
638 struct mm_struct *mm, unsigned long va)
639{
640 struct {
641 struct mmuext_op op;
642 DECLARE_BITMAP(mask, NR_CPUS);
643 } *args;
644 struct multicall_space mcs;
645
646 BUG_ON(cpumask_empty(cpus));
647 BUG_ON(!mm);
648
649 mcs = xen_mc_entry(sizeof(*args));
650 args = mcs.args;
651 args->op.arg2.vcpumask = to_cpumask(args->mask);
652
653 /* Remove us, and any offline CPUS. */
654 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
655 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
656 if (unlikely(cpumask_empty(to_cpumask(args->mask))))
657 goto issue;
658
659 if (va == TLB_FLUSH_ALL) {
660 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
661 } else {
662 args->op.cmd = MMUEXT_INVLPG_MULTI;
663 args->op.arg1.linear_addr = va;
664 }
665
666 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
667
668issue:
669 xen_mc_issue(PARAVIRT_LAZY_MMU);
670}
671 572
672static void xen_clts(void) 573static void xen_clts(void)
673{ 574{
@@ -693,21 +594,6 @@ static void xen_write_cr0(unsigned long cr0)
693 xen_mc_issue(PARAVIRT_LAZY_CPU); 594 xen_mc_issue(PARAVIRT_LAZY_CPU);
694} 595}
695 596
696static void xen_write_cr2(unsigned long cr2)
697{
698 percpu_read(xen_vcpu)->arch.cr2 = cr2;
699}
700
701static unsigned long xen_read_cr2(void)
702{
703 return percpu_read(xen_vcpu)->arch.cr2;
704}
705
706static unsigned long xen_read_cr2_direct(void)
707{
708 return percpu_read(xen_vcpu_info.arch.cr2);
709}
710
711static void xen_write_cr4(unsigned long cr4) 597static void xen_write_cr4(unsigned long cr4)
712{ 598{
713 cr4 &= ~X86_CR4_PGE; 599 cr4 &= ~X86_CR4_PGE;
@@ -716,71 +602,6 @@ static void xen_write_cr4(unsigned long cr4)
716 native_write_cr4(cr4); 602 native_write_cr4(cr4);
717} 603}
718 604
719static unsigned long xen_read_cr3(void)
720{
721 return percpu_read(xen_cr3);
722}
723
724static void set_current_cr3(void *v)
725{
726 percpu_write(xen_current_cr3, (unsigned long)v);
727}
728
729static void __xen_write_cr3(bool kernel, unsigned long cr3)
730{
731 struct mmuext_op *op;
732 struct multicall_space mcs;
733 unsigned long mfn;
734
735 if (cr3)
736 mfn = pfn_to_mfn(PFN_DOWN(cr3));
737 else
738 mfn = 0;
739
740 WARN_ON(mfn == 0 && kernel);
741
742 mcs = __xen_mc_entry(sizeof(*op));
743
744 op = mcs.args;
745 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
746 op->arg1.mfn = mfn;
747
748 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
749
750 if (kernel) {
751 percpu_write(xen_cr3, cr3);
752
753 /* Update xen_current_cr3 once the batch has actually
754 been submitted. */
755 xen_mc_callback(set_current_cr3, (void *)cr3);
756 }
757}
758
759static void xen_write_cr3(unsigned long cr3)
760{
761 BUG_ON(preemptible());
762
763 xen_mc_batch(); /* disables interrupts */
764
765 /* Update while interrupts are disabled, so its atomic with
766 respect to ipis */
767 percpu_write(xen_cr3, cr3);
768
769 __xen_write_cr3(true, cr3);
770
771#ifdef CONFIG_X86_64
772 {
773 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
774 if (user_pgd)
775 __xen_write_cr3(false, __pa(user_pgd));
776 else
777 __xen_write_cr3(false, 0);
778 }
779#endif
780
781 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
782}
783
784static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) 605static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
785{ 606{
786 int ret; 607 int ret;
@@ -822,185 +643,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
822 return ret; 643 return ret;
823} 644}
824 645
825/* Early in boot, while setting up the initial pagetable, assume
826 everything is pinned. */
827static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
828{
829#ifdef CONFIG_FLATMEM
830 BUG_ON(mem_map); /* should only be used early */
831#endif
832 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
833}
834
835/* Early release_pte assumes that all pts are pinned, since there's
836 only init_mm and anything attached to that is pinned. */
837static void xen_release_pte_init(unsigned long pfn)
838{
839 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
840}
841
842static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
843{
844 struct mmuext_op op;
845 op.cmd = cmd;
846 op.arg1.mfn = pfn_to_mfn(pfn);
847 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
848 BUG();
849}
850
851/* This needs to make sure the new pte page is pinned iff its being
852 attached to a pinned pagetable. */
853static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
854{
855 struct page *page = pfn_to_page(pfn);
856
857 if (PagePinned(virt_to_page(mm->pgd))) {
858 SetPagePinned(page);
859
860 vm_unmap_aliases();
861 if (!PageHighMem(page)) {
862 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
863 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
864 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
865 } else {
866 /* make sure there are no stray mappings of
867 this page */
868 kmap_flush_unused();
869 }
870 }
871}
872
873static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
874{
875 xen_alloc_ptpage(mm, pfn, PT_PTE);
876}
877
878static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
879{
880 xen_alloc_ptpage(mm, pfn, PT_PMD);
881}
882
883static int xen_pgd_alloc(struct mm_struct *mm)
884{
885 pgd_t *pgd = mm->pgd;
886 int ret = 0;
887
888 BUG_ON(PagePinned(virt_to_page(pgd)));
889
890#ifdef CONFIG_X86_64
891 {
892 struct page *page = virt_to_page(pgd);
893 pgd_t *user_pgd;
894
895 BUG_ON(page->private != 0);
896
897 ret = -ENOMEM;
898
899 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
900 page->private = (unsigned long)user_pgd;
901
902 if (user_pgd != NULL) {
903 user_pgd[pgd_index(VSYSCALL_START)] =
904 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
905 ret = 0;
906 }
907
908 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
909 }
910#endif
911
912 return ret;
913}
914
915static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
916{
917#ifdef CONFIG_X86_64
918 pgd_t *user_pgd = xen_get_user_pgd(pgd);
919
920 if (user_pgd)
921 free_page((unsigned long)user_pgd);
922#endif
923}
924
925/* This should never happen until we're OK to use struct page */
926static void xen_release_ptpage(unsigned long pfn, unsigned level)
927{
928 struct page *page = pfn_to_page(pfn);
929
930 if (PagePinned(page)) {
931 if (!PageHighMem(page)) {
932 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
933 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
934 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
935 }
936 ClearPagePinned(page);
937 }
938}
939
940static void xen_release_pte(unsigned long pfn)
941{
942 xen_release_ptpage(pfn, PT_PTE);
943}
944
945static void xen_release_pmd(unsigned long pfn)
946{
947 xen_release_ptpage(pfn, PT_PMD);
948}
949
950#if PAGETABLE_LEVELS == 4
951static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
952{
953 xen_alloc_ptpage(mm, pfn, PT_PUD);
954}
955
956static void xen_release_pud(unsigned long pfn)
957{
958 xen_release_ptpage(pfn, PT_PUD);
959}
960#endif
961
962#ifdef CONFIG_HIGHPTE
963static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
964{
965 pgprot_t prot = PAGE_KERNEL;
966
967 if (PagePinned(page))
968 prot = PAGE_KERNEL_RO;
969
970 if (0 && PageHighMem(page))
971 printk("mapping highpte %lx type %d prot %s\n",
972 page_to_pfn(page), type,
973 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
974
975 return kmap_atomic_prot(page, type, prot);
976}
977#endif
978
979#ifdef CONFIG_X86_32
980static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
981{
982 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
983 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
984 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
985 pte_val_ma(pte));
986
987 return pte;
988}
989
990/* Init-time set_pte while constructing initial pagetables, which
991 doesn't allow RO pagetable pages to be remapped RW */
992static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
993{
994 pte = mask_rw_pte(ptep, pte);
995
996 xen_set_pte(ptep, pte);
997}
998#endif
999
1000static __init void xen_pagetable_setup_start(pgd_t *base)
1001{
1002}
1003
1004void xen_setup_shared_info(void) 646void xen_setup_shared_info(void)
1005{ 647{
1006 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 648 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1021,37 +663,6 @@ void xen_setup_shared_info(void)
1021 xen_setup_mfn_list_list(); 663 xen_setup_mfn_list_list();
1022} 664}
1023 665
1024static __init void xen_pagetable_setup_done(pgd_t *base)
1025{
1026 xen_setup_shared_info();
1027}
1028
1029static __init void xen_post_allocator_init(void)
1030{
1031 pv_mmu_ops.set_pte = xen_set_pte;
1032 pv_mmu_ops.set_pmd = xen_set_pmd;
1033 pv_mmu_ops.set_pud = xen_set_pud;
1034#if PAGETABLE_LEVELS == 4
1035 pv_mmu_ops.set_pgd = xen_set_pgd;
1036#endif
1037
1038 /* This will work as long as patching hasn't happened yet
1039 (which it hasn't) */
1040 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1041 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1042 pv_mmu_ops.release_pte = xen_release_pte;
1043 pv_mmu_ops.release_pmd = xen_release_pmd;
1044#if PAGETABLE_LEVELS == 4
1045 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1046 pv_mmu_ops.release_pud = xen_release_pud;
1047#endif
1048
1049#ifdef CONFIG_X86_64
1050 SetPagePinned(virt_to_page(level3_user_vsyscall));
1051#endif
1052 xen_mark_init_mm_pinned();
1053}
1054
1055/* This is called once we have the cpu_possible_map */ 666/* This is called once we have the cpu_possible_map */
1056void xen_setup_vcpu_info_placement(void) 667void xen_setup_vcpu_info_placement(void)
1057{ 668{
@@ -1126,49 +737,6 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1126 return ret; 737 return ret;
1127} 738}
1128 739
1129static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1130{
1131 pte_t pte;
1132
1133 phys >>= PAGE_SHIFT;
1134
1135 switch (idx) {
1136 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1137#ifdef CONFIG_X86_F00F_BUG
1138 case FIX_F00F_IDT:
1139#endif
1140#ifdef CONFIG_X86_32
1141 case FIX_WP_TEST:
1142 case FIX_VDSO:
1143# ifdef CONFIG_HIGHMEM
1144 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1145# endif
1146#else
1147 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1148#endif
1149#ifdef CONFIG_X86_LOCAL_APIC
1150 case FIX_APIC_BASE: /* maps dummy local APIC */
1151#endif
1152 pte = pfn_pte(phys, prot);
1153 break;
1154
1155 default:
1156 pte = mfn_pte(phys, prot);
1157 break;
1158 }
1159
1160 __native_set_fixmap(idx, pte);
1161
1162#ifdef CONFIG_X86_64
1163 /* Replicate changes to map the vsyscall page into the user
1164 pagetable vsyscall mapping. */
1165 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1166 unsigned long vaddr = __fix_to_virt(idx);
1167 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1168 }
1169#endif
1170}
1171
1172static const struct pv_info xen_info __initdata = { 740static const struct pv_info xen_info __initdata = {
1173 .paravirt_enabled = 1, 741 .paravirt_enabled = 1,
1174 .shared_kernel_pmd = 0, 742 .shared_kernel_pmd = 0,
@@ -1264,86 +832,6 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
1264#endif 832#endif
1265}; 833};
1266 834
1267static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1268 .pagetable_setup_start = xen_pagetable_setup_start,
1269 .pagetable_setup_done = xen_pagetable_setup_done,
1270
1271 .read_cr2 = xen_read_cr2,
1272 .write_cr2 = xen_write_cr2,
1273
1274 .read_cr3 = xen_read_cr3,
1275 .write_cr3 = xen_write_cr3,
1276
1277 .flush_tlb_user = xen_flush_tlb,
1278 .flush_tlb_kernel = xen_flush_tlb,
1279 .flush_tlb_single = xen_flush_tlb_single,
1280 .flush_tlb_others = xen_flush_tlb_others,
1281
1282 .pte_update = paravirt_nop,
1283 .pte_update_defer = paravirt_nop,
1284
1285 .pgd_alloc = xen_pgd_alloc,
1286 .pgd_free = xen_pgd_free,
1287
1288 .alloc_pte = xen_alloc_pte_init,
1289 .release_pte = xen_release_pte_init,
1290 .alloc_pmd = xen_alloc_pte_init,
1291 .alloc_pmd_clone = paravirt_nop,
1292 .release_pmd = xen_release_pte_init,
1293
1294#ifdef CONFIG_HIGHPTE
1295 .kmap_atomic_pte = xen_kmap_atomic_pte,
1296#endif
1297
1298#ifdef CONFIG_X86_64
1299 .set_pte = xen_set_pte,
1300#else
1301 .set_pte = xen_set_pte_init,
1302#endif
1303 .set_pte_at = xen_set_pte_at,
1304 .set_pmd = xen_set_pmd_hyper,
1305
1306 .ptep_modify_prot_start = __ptep_modify_prot_start,
1307 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1308
1309 .pte_val = xen_pte_val,
1310 .pgd_val = xen_pgd_val,
1311
1312 .make_pte = xen_make_pte,
1313 .make_pgd = xen_make_pgd,
1314
1315#ifdef CONFIG_X86_PAE
1316 .set_pte_atomic = xen_set_pte_atomic,
1317 .set_pte_present = xen_set_pte_at,
1318 .pte_clear = xen_pte_clear,
1319 .pmd_clear = xen_pmd_clear,
1320#endif /* CONFIG_X86_PAE */
1321 .set_pud = xen_set_pud_hyper,
1322
1323 .make_pmd = xen_make_pmd,
1324 .pmd_val = xen_pmd_val,
1325
1326#if PAGETABLE_LEVELS == 4
1327 .pud_val = xen_pud_val,
1328 .make_pud = xen_make_pud,
1329 .set_pgd = xen_set_pgd_hyper,
1330
1331 .alloc_pud = xen_alloc_pte_init,
1332 .release_pud = xen_release_pte_init,
1333#endif /* PAGETABLE_LEVELS == 4 */
1334
1335 .activate_mm = xen_activate_mm,
1336 .dup_mmap = xen_dup_mmap,
1337 .exit_mmap = xen_exit_mmap,
1338
1339 .lazy_mode = {
1340 .enter = paravirt_enter_lazy_mmu,
1341 .leave = xen_leave_lazy,
1342 },
1343
1344 .set_fixmap = xen_set_fixmap,
1345};
1346
1347static void xen_reboot(int reason) 835static void xen_reboot(int reason)
1348{ 836{
1349 struct sched_shutdown r = { .reason = reason }; 837 struct sched_shutdown r = { .reason = reason };
@@ -1386,223 +874,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
1386}; 874};
1387 875
1388 876
1389static void __init xen_reserve_top(void)
1390{
1391#ifdef CONFIG_X86_32
1392 unsigned long top = HYPERVISOR_VIRT_START;
1393 struct xen_platform_parameters pp;
1394
1395 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1396 top = pp.virt_start;
1397
1398 reserve_top_address(-top);
1399#endif /* CONFIG_X86_32 */
1400}
1401
1402/*
1403 * Like __va(), but returns address in the kernel mapping (which is
1404 * all we have until the physical memory mapping has been set up.
1405 */
1406static void *__ka(phys_addr_t paddr)
1407{
1408#ifdef CONFIG_X86_64
1409 return (void *)(paddr + __START_KERNEL_map);
1410#else
1411 return __va(paddr);
1412#endif
1413}
1414
1415/* Convert a machine address to physical address */
1416static unsigned long m2p(phys_addr_t maddr)
1417{
1418 phys_addr_t paddr;
1419
1420 maddr &= PTE_PFN_MASK;
1421 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1422
1423 return paddr;
1424}
1425
1426/* Convert a machine address to kernel virtual */
1427static void *m2v(phys_addr_t maddr)
1428{
1429 return __ka(m2p(maddr));
1430}
1431
1432static void set_page_prot(void *addr, pgprot_t prot)
1433{
1434 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1435 pte_t pte = pfn_pte(pfn, prot);
1436
1437 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1438 BUG();
1439}
1440
1441static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1442{
1443 unsigned pmdidx, pteidx;
1444 unsigned ident_pte;
1445 unsigned long pfn;
1446
1447 ident_pte = 0;
1448 pfn = 0;
1449 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1450 pte_t *pte_page;
1451
1452 /* Reuse or allocate a page of ptes */
1453 if (pmd_present(pmd[pmdidx]))
1454 pte_page = m2v(pmd[pmdidx].pmd);
1455 else {
1456 /* Check for free pte pages */
1457 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1458 break;
1459
1460 pte_page = &level1_ident_pgt[ident_pte];
1461 ident_pte += PTRS_PER_PTE;
1462
1463 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1464 }
1465
1466 /* Install mappings */
1467 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1468 pte_t pte;
1469
1470 if (pfn > max_pfn_mapped)
1471 max_pfn_mapped = pfn;
1472
1473 if (!pte_none(pte_page[pteidx]))
1474 continue;
1475
1476 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1477 pte_page[pteidx] = pte;
1478 }
1479 }
1480
1481 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1482 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1483
1484 set_page_prot(pmd, PAGE_KERNEL_RO);
1485}
1486
1487#ifdef CONFIG_X86_64
1488static void convert_pfn_mfn(void *v)
1489{
1490 pte_t *pte = v;
1491 int i;
1492
1493 /* All levels are converted the same way, so just treat them
1494 as ptes. */
1495 for (i = 0; i < PTRS_PER_PTE; i++)
1496 pte[i] = xen_make_pte(pte[i].pte);
1497}
1498
1499/*
1500 * Set up the inital kernel pagetable.
1501 *
1502 * We can construct this by grafting the Xen provided pagetable into
1503 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1504 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1505 * means that only the kernel has a physical mapping to start with -
1506 * but that's enough to get __va working. We need to fill in the rest
1507 * of the physical mapping once some sort of allocator has been set
1508 * up.
1509 */
1510static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1511 unsigned long max_pfn)
1512{
1513 pud_t *l3;
1514 pmd_t *l2;
1515
1516 /* Zap identity mapping */
1517 init_level4_pgt[0] = __pgd(0);
1518
1519 /* Pre-constructed entries are in pfn, so convert to mfn */
1520 convert_pfn_mfn(init_level4_pgt);
1521 convert_pfn_mfn(level3_ident_pgt);
1522 convert_pfn_mfn(level3_kernel_pgt);
1523
1524 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1525 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1526
1527 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1528 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1529
1530 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1531 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1532 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1533
1534 /* Set up identity map */
1535 xen_map_identity_early(level2_ident_pgt, max_pfn);
1536
1537 /* Make pagetable pieces RO */
1538 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1539 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1540 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1541 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1542 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1543 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1544
1545 /* Pin down new L4 */
1546 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1547 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1548
1549 /* Unpin Xen-provided one */
1550 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1551
1552 /* Switch over */
1553 pgd = init_level4_pgt;
1554
1555 /*
1556 * At this stage there can be no user pgd, and no page
1557 * structure to attach it to, so make sure we just set kernel
1558 * pgd.
1559 */
1560 xen_mc_batch();
1561 __xen_write_cr3(true, __pa(pgd));
1562 xen_mc_issue(PARAVIRT_LAZY_CPU);
1563
1564 reserve_early(__pa(xen_start_info->pt_base),
1565 __pa(xen_start_info->pt_base +
1566 xen_start_info->nr_pt_frames * PAGE_SIZE),
1567 "XEN PAGETABLES");
1568
1569 return pgd;
1570}
1571#else /* !CONFIG_X86_64 */
1572static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1573
1574static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1575 unsigned long max_pfn)
1576{
1577 pmd_t *kernel_pmd;
1578
1579 init_pg_tables_start = __pa(pgd);
1580 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1581 max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1582
1583 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1584 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1585
1586 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1587
1588 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1589 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1590 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1591
1592 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1593 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1594 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1595
1596 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1597
1598 xen_write_cr3(__pa(swapper_pg_dir));
1599
1600 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1601
1602 return swapper_pg_dir;
1603}
1604#endif /* CONFIG_X86_64 */
1605
1606/* First C function to be called on Xen boot */ 877/* First C function to be called on Xen boot */
1607asmlinkage void __init xen_start_kernel(void) 878asmlinkage void __init xen_start_kernel(void)
1608{ 879{