aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-08 18:07:15 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-16 05:06:06 -0400
commitbf18bf94dc72db998d0fbebc846c07c858a59c90 (patch)
treed795a2c9d0521d3bdc34213e4ae2f8c0cf16375d /arch/x86
parent6fcac6d305e8238939e169f4c52e8ec8a552a31f (diff)
xen64: set up userspace syscall patch
64-bit userspace expects the vdso to be mapped at a specific fixed address, which happens to be in the middle of the kernel address space. Because we have split user and kernel pagetables, we need to make special arrangements for the vsyscall mapping to appear in the kernel part of the user pagetable. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/enlighten.c46
1 files changed, 36 insertions, 10 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 87d36044054d..f64b8729cd07 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -57,6 +57,18 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
57DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); 57DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
58 58
59/* 59/*
60 * Identity map, in addition to plain kernel map. This needs to be
61 * large enough to allocate page table pages to allocate the rest.
62 * Each page can map 2MB.
63 */
64static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
65
66#ifdef CONFIG_X86_64
67/* l3 pud for userspace vsyscall mapping */
68static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
69#endif /* CONFIG_X86_64 */
70
71/*
60 * Note about cr3 (pagetable base) values: 72 * Note about cr3 (pagetable base) values:
61 * 73 *
62 * xen_cr3 contains the current logical cr3 value; it contains the 74 * xen_cr3 contains the current logical cr3 value; it contains the
@@ -831,12 +843,20 @@ static int xen_pgd_alloc(struct mm_struct *mm)
831#ifdef CONFIG_X86_64 843#ifdef CONFIG_X86_64
832 { 844 {
833 struct page *page = virt_to_page(pgd); 845 struct page *page = virt_to_page(pgd);
846 pgd_t *user_pgd;
834 847
835 BUG_ON(page->private != 0); 848 BUG_ON(page->private != 0);
836 849
837 page->private = __get_free_page(GFP_KERNEL | __GFP_ZERO); 850 ret = -ENOMEM;
838 if (page->private == 0) 851
839 ret = -ENOMEM; 852 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
853 page->private = (unsigned long)user_pgd;
854
855 if (user_pgd != NULL) {
856 user_pgd[pgd_index(VSYSCALL_START)] =
857 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
858 ret = 0;
859 }
840 860
841 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); 861 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
842 } 862 }
@@ -977,6 +997,9 @@ static __init void xen_post_allocator_init(void)
977 pv_mmu_ops.release_pud = xen_release_pud; 997 pv_mmu_ops.release_pud = xen_release_pud;
978#endif 998#endif
979 999
1000#ifdef CONFIG_X86_64
1001 SetPagePinned(virt_to_page(level3_user_vsyscall));
1002#endif
980 xen_mark_init_mm_pinned(); 1003 xen_mark_init_mm_pinned();
981} 1004}
982 1005
@@ -1088,6 +1111,15 @@ static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1088 } 1111 }
1089 1112
1090 __native_set_fixmap(idx, pte); 1113 __native_set_fixmap(idx, pte);
1114
1115#ifdef CONFIG_X86_64
1116 /* Replicate changes to map the vsyscall page into the user
1117 pagetable vsyscall mapping. */
1118 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1119 unsigned long vaddr = __fix_to_virt(idx);
1120 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1121 }
1122#endif
1091} 1123}
1092 1124
1093static const struct pv_info xen_info __initdata = { 1125static const struct pv_info xen_info __initdata = {
@@ -1427,13 +1459,6 @@ static void set_page_prot(void *addr, pgprot_t prot)
1427 BUG(); 1459 BUG();
1428} 1460}
1429 1461
1430/*
1431 * Identity map, in addition to plain kernel map. This needs to be
1432 * large enough to allocate page table pages to allocate the rest.
1433 * Each page can map 2MB.
1434 */
1435static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
1436
1437static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) 1462static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1438{ 1463{
1439 unsigned pmdidx, pteidx; 1464 unsigned pmdidx, pteidx;
@@ -1533,6 +1558,7 @@ static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pf
1533 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); 1558 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1534 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); 1559 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1535 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); 1560 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1561 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1536 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 1562 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1537 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); 1563 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1538 1564