aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/vsyscall
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-02-08 17:20:44 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-09 12:25:47 -0500
commit2affc857efdf7dacace234b63d289d67260c95a6 (patch)
treee37d7158f9a0a477460913b06e94d48e5eacaf2c /arch/sh/kernel/vsyscall
parentc13e4ca247311c294b032089e0d05e96f2708c16 (diff)
[PATCH] SH vdso: use install_special_mapping()
Signed-off-by: Paul Mundt <lethal@linux-sh.org> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/sh/kernel/vsyscall')
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c58
1 files changed, 9 insertions, 49 deletions
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index deb46941f315..7b0f66f03319 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -37,11 +37,12 @@ __setup("vdso=", vdso_setup);
37 * of the ELF DSO images included therein. 37 * of the ELF DSO images included therein.
38 */ 38 */
39extern const char vsyscall_trapa_start, vsyscall_trapa_end; 39extern const char vsyscall_trapa_start, vsyscall_trapa_end;
40static void *syscall_page; 40static struct page *syscall_pages[1];
41 41
42int __init vsyscall_init(void) 42int __init vsyscall_init(void)
43{ 43{
44 syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); 44 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
45 syscall_pages[0] = virt_to_page(syscall_page);
45 46
46 /* 47 /*
47 * XXX: Map this page to a fixmap entry if we get around 48 * XXX: Map this page to a fixmap entry if we get around
@@ -55,37 +56,10 @@ int __init vsyscall_init(void)
55 return 0; 56 return 0;
56} 57}
57 58
58static struct page *syscall_vma_nopage(struct vm_area_struct *vma,
59 unsigned long address, int *type)
60{
61 unsigned long offset = address - vma->vm_start;
62 struct page *page;
63
64 if (address < vma->vm_start || address > vma->vm_end)
65 return NOPAGE_SIGBUS;
66
67 page = virt_to_page(syscall_page + offset);
68
69 get_page(page);
70
71 return page;
72}
73
74/* Prevent VMA merging */
75static void syscall_vma_close(struct vm_area_struct *vma)
76{
77}
78
79static struct vm_operations_struct syscall_vm_ops = {
80 .nopage = syscall_vma_nopage,
81 .close = syscall_vma_close,
82};
83
84/* Setup a VMA at program startup for the vsyscall page */ 59/* Setup a VMA at program startup for the vsyscall page */
85int arch_setup_additional_pages(struct linux_binprm *bprm, 60int arch_setup_additional_pages(struct linux_binprm *bprm,
86 int executable_stack) 61 int executable_stack)
87{ 62{
88 struct vm_area_struct *vma;
89 struct mm_struct *mm = current->mm; 63 struct mm_struct *mm = current->mm;
90 unsigned long addr; 64 unsigned long addr;
91 int ret; 65 int ret;
@@ -97,30 +71,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
97 goto up_fail; 71 goto up_fail;
98 } 72 }
99 73
100 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 74 ret = install_special_mapping(mm, addr, PAGE_SIZE,
101 if (!vma) { 75 VM_READ | VM_EXEC |
102 ret = -ENOMEM; 76 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |
77 VM_ALWAYSDUMP,
78 syscall_pages);
79 if (unlikely(ret))
103 goto up_fail; 80 goto up_fail;
104 }
105
106 vma->vm_start = addr;
107 vma->vm_end = addr + PAGE_SIZE;
108 /* MAYWRITE to allow gdb to COW and set breakpoints */
109 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
110 vma->vm_flags |= mm->def_flags;
111 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
112 vma->vm_ops = &syscall_vm_ops;
113 vma->vm_mm = mm;
114
115 ret = insert_vm_struct(mm, vma);
116 if (unlikely(ret)) {
117 kmem_cache_free(vm_area_cachep, vma);
118 goto up_fail;
119 }
120 81
121 current->mm->context.vdso = (void *)addr; 82 current->mm->context.vdso = (void *)addr;
122 83
123 mm->total_vm++;
124up_fail: 84up_fail:
125 up_write(&mm->mmap_sem); 85 up_write(&mm->mmap_sem);
126 return ret; 86 return ret;