aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/ia32
diff options
context:
space:
mode:
authorAndi Kleen <ak@muc.de>2005-04-16 18:24:55 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:24:55 -0400
commit1e01441051dda3bb01c455b6e20bce6d00563d82 (patch)
tree5dc4c69dd4522ca569f70ead0ecbb923f1451891 /arch/x86_64/ia32
parent35faa71484287fc150b8498cd5acae59ad17a356 (diff)
[PATCH] x86_64: Use a VMA for the 32bit vsyscall
Use a real VMA to map the 32bit vsyscall page This interacts better with Hugh's upcomming VMA walk optimization Also removes some ugly special cases. Code roughly modelled after the ppc64 vdso version from Ben Herrenschmidt. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/ia32')
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c4
-rw-r--r--arch/x86_64/ia32/syscall32.c92
2 files changed, 53 insertions, 43 deletions
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 93d568dfa762..99b522052d16 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -312,6 +312,10 @@ MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
312 312
313static void elf32_init(struct pt_regs *); 313static void elf32_init(struct pt_regs *);
314 314
315#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
316#define arch_setup_additional_pages syscall32_setup_pages
317extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
318
315#include "../../../fs/binfmt_elf.c" 319#include "../../../fs/binfmt_elf.c"
316 320
317static void elf32_init(struct pt_regs *regs) 321static void elf32_init(struct pt_regs *regs)
diff --git a/arch/x86_64/ia32/syscall32.c b/arch/x86_64/ia32/syscall32.c
index 399ff4985099..01d8db1a1c09 100644
--- a/arch/x86_64/ia32/syscall32.c
+++ b/arch/x86_64/ia32/syscall32.c
@@ -9,6 +9,7 @@
9#include <linux/gfp.h> 9#include <linux/gfp.h>
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/stringify.h> 11#include <linux/stringify.h>
12#include <linux/security.h>
12#include <asm/proto.h> 13#include <asm/proto.h>
13#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
14#include <asm/ia32_unistd.h> 15#include <asm/ia32_unistd.h>
@@ -30,51 +31,57 @@ extern int sysctl_vsyscall32;
30char *syscall32_page; 31char *syscall32_page;
31static int use_sysenter = -1; 32static int use_sysenter = -1;
32 33
33/* 34static struct page *
34 * Map the 32bit vsyscall page on demand. 35syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
35 * 36{
36 * RED-PEN: This knows too much about high level VM. 37 struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
37 * 38 get_page(p);
38 * Alternative would be to generate a vma with appropriate backing options 39 return p;
39 * and let it be handled by generic VM.
40 */
41int __map_syscall32(struct mm_struct *mm, unsigned long address)
42{
43 pgd_t *pgd;
44 pud_t *pud;
45 pte_t *pte;
46 pmd_t *pmd;
47 int err = -ENOMEM;
48
49 spin_lock(&mm->page_table_lock);
50 pgd = pgd_offset(mm, address);
51 pud = pud_alloc(mm, pgd, address);
52 if (pud) {
53 pmd = pmd_alloc(mm, pud, address);
54 if (pmd && (pte = pte_alloc_map(mm, pmd, address)) != NULL) {
55 if (pte_none(*pte)) {
56 set_pte(pte,
57 mk_pte(virt_to_page(syscall32_page),
58 PAGE_KERNEL_VSYSCALL32));
59 }
60 /* Flush only the local CPU. Other CPUs taking a fault
61 will just end up here again
62 This probably not needed and just paranoia. */
63 __flush_tlb_one(address);
64 err = 0;
65 }
66 }
67 spin_unlock(&mm->page_table_lock);
68 return err;
69} 40}
70 41
71int map_syscall32(struct mm_struct *mm, unsigned long address) 42/* Prevent VMA merging */
43static void syscall32_vma_close(struct vm_area_struct *vma)
72{ 44{
73 int err; 45}
74 down_read(&mm->mmap_sem); 46
75 err = __map_syscall32(mm, address); 47static struct vm_operations_struct syscall32_vm_ops = {
76 up_read(&mm->mmap_sem); 48 .close = syscall32_vma_close,
77 return err; 49 .nopage = syscall32_nopage,
50};
51
52struct linux_binprm;
53
54/* Setup a VMA at program startup for the vsyscall page */
55int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
56{
57 int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
58 struct vm_area_struct *vma;
59 struct mm_struct *mm = current->mm;
60
61 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
62 if (!vma)
63 return -ENOMEM;
64 if (security_vm_enough_memory(npages)) {
65 kmem_cache_free(vm_area_cachep, vma);
66 return -ENOMEM;
67 }
68
69 memset(vma, 0, sizeof(struct vm_area_struct));
70 /* Could randomize here */
71 vma->vm_start = VSYSCALL32_BASE;
72 vma->vm_end = VSYSCALL32_END;
73 /* MAYWRITE to allow gdb to COW and set breakpoints */
74 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYEXEC|VM_MAYWRITE;
75 vma->vm_flags |= mm->def_flags;
76 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
77 vma->vm_ops = &syscall32_vm_ops;
78 vma->vm_mm = mm;
79
80 down_write(&mm->mmap_sem);
81 insert_vm_struct(mm, vma);
82 mm->total_vm += npages;
83 up_write(&mm->mmap_sem);
84 return 0;
78} 85}
79 86
80static int __init init_syscall32(void) 87static int __init init_syscall32(void)
@@ -82,7 +89,6 @@ static int __init init_syscall32(void)
82 syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); 89 syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
83 if (!syscall32_page) 90 if (!syscall32_page)
84 panic("Cannot allocate syscall32 page"); 91 panic("Cannot allocate syscall32 page");
85 SetPageReserved(virt_to_page(syscall32_page));
86 if (use_sysenter > 0) { 92 if (use_sysenter > 0) {
87 memcpy(syscall32_page, syscall32_sysenter, 93 memcpy(syscall32_page, syscall32_sysenter,
88 syscall32_sysenter_end - syscall32_sysenter); 94 syscall32_sysenter_end - syscall32_sysenter);