aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 05:33:49 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 05:33:49 -0400
commit19f9a34f87c48bbd270d617d1c986d0c23866a1a (patch)
tree19f32122aec9c16cbbf8e3331e81040a4850cb8d
parent8c12b5dc13bf8516303a8224ab4e9708b33d5b00 (diff)
sh: Initial vsyscall page support.
This implements initial support for the vsyscall page on SH. At the moment we leave it configurable due to having nommu to support from the same code base. We hook it up for the signal trampoline return at present, with more to be added later, once uClibc catches up. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/kernel/Makefile1
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/signal.c17
-rw-r--r--arch/sh/kernel/vsyscall/Makefile36
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-note.S25
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-sigreturn.S39
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-syscall.S10
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S42
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c150
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.lds.S74
-rw-r--r--arch/sh/mm/Kconfig13
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sh/mm/tlb-flush.c18
-rw-r--r--include/asm-sh/auxvec.h14
-rw-r--r--include/asm-sh/elf.h20
-rw-r--r--include/asm-sh/mmu.h7
-rw-r--r--include/asm-sh/mmu_context.h8
-rw-r--r--include/asm-sh/page.h5
-rw-r--r--include/asm-sh/processor.h6
19 files changed, 473 insertions, 17 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 0e2148f63e7e..5da88a43d350 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -9,6 +9,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
9 io.o io_generic.o sh_ksyms.o syscalls.o 9 io.o io_generic.o sh_ksyms.o syscalls.o
10 10
11obj-y += cpu/ timers/ 11obj-y += cpu/ timers/
12obj-$(CONFIG_VSYSCALL) += vsyscall/
12 13
13obj-$(CONFIG_SMP) += smp.o 14obj-$(CONFIG_SMP) += smp.o
14obj-$(CONFIG_CF_ENABLER) += cf-enabler.o 15obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 630ec1af2483..0b1d5dd7a93b 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -355,7 +355,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
355 else if (next->thread.ubc_pc && next->mm) { 355 else if (next->thread.ubc_pc && next->mm) {
356 int asid = 0; 356 int asid = 0;
357#ifdef CONFIG_MMU 357#ifdef CONFIG_MMU
358 asid |= next->mm->context & MMU_CONTEXT_ASID_MASK; 358 asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK;
359#endif 359#endif
360 ubc_set_tracing(asid, next->thread.ubc_pc); 360 ubc_set_tracing(asid, next->thread.ubc_pc);
361 } else { 361 } else {
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 2f1c9545b49f..5213f5bc6ce0 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -8,7 +8,6 @@
8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
9 * 9 *
10 */ 10 */
11
12#include <linux/sched.h> 11#include <linux/sched.h>
13#include <linux/mm.h> 12#include <linux/mm.h>
14#include <linux/smp.h> 13#include <linux/smp.h>
@@ -21,6 +20,7 @@
21#include <linux/unistd.h> 20#include <linux/unistd.h>
22#include <linux/stddef.h> 21#include <linux/stddef.h>
23#include <linux/tty.h> 22#include <linux/tty.h>
23#include <linux/elf.h>
24#include <linux/personality.h> 24#include <linux/personality.h>
25#include <linux/binfmts.h> 25#include <linux/binfmts.h>
26 26
@@ -29,8 +29,6 @@
29#include <asm/pgtable.h> 29#include <asm/pgtable.h>
30#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
31 31
32#undef DEBUG
33
34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 32#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
35 33
36/* 34/*
@@ -312,6 +310,11 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
312 return (void __user *)((sp - frame_size) & -8ul); 310 return (void __user *)((sp - frame_size) & -8ul);
313} 311}
314 312
313/* These symbols are defined with the addresses in the vsyscall page.
314 See vsyscall-trapa.S. */
315extern void __user __kernel_sigreturn;
316extern void __user __kernel_rt_sigreturn;
317
315static int setup_frame(int sig, struct k_sigaction *ka, 318static int setup_frame(int sig, struct k_sigaction *ka,
316 sigset_t *set, struct pt_regs *regs) 319 sigset_t *set, struct pt_regs *regs)
317{ 320{
@@ -340,6 +343,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
340 already in userspace. */ 343 already in userspace. */
341 if (ka->sa.sa_flags & SA_RESTORER) { 344 if (ka->sa.sa_flags & SA_RESTORER) {
342 regs->pr = (unsigned long) ka->sa.sa_restorer; 345 regs->pr = (unsigned long) ka->sa.sa_restorer;
346#ifdef CONFIG_VSYSCALL
347 } else if (likely(current->mm->context.vdso)) {
348 regs->pr = VDSO_SYM(&__kernel_sigreturn);
349#endif
343 } else { 350 } else {
344 /* Generate return code (system call to sigreturn) */ 351 /* Generate return code (system call to sigreturn) */
345 err |= __put_user(MOVW(7), &frame->retcode[0]); 352 err |= __put_user(MOVW(7), &frame->retcode[0]);
@@ -416,6 +423,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
416 already in userspace. */ 423 already in userspace. */
417 if (ka->sa.sa_flags & SA_RESTORER) { 424 if (ka->sa.sa_flags & SA_RESTORER) {
418 regs->pr = (unsigned long) ka->sa.sa_restorer; 425 regs->pr = (unsigned long) ka->sa.sa_restorer;
426#ifdef CONFIG_VSYSCALL
427 } else if (likely(current->mm->context.vdso)) {
428 regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
429#endif
419 } else { 430 } else {
420 /* Generate return code (system call to rt_sigreturn) */ 431 /* Generate return code (system call to rt_sigreturn) */
421 err |= __put_user(MOVW(7), &frame->retcode[0]); 432 err |= __put_user(MOVW(7), &frame->retcode[0]);
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
new file mode 100644
index 000000000000..4bbce1cfa359
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -0,0 +1,36 @@
1obj-y += vsyscall.o vsyscall-syscall.o
2
3$(obj)/vsyscall-syscall.o: \
4 $(foreach F,trapa,$(obj)/vsyscall-$F.so)
5
6# Teach kbuild about targets
7targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so)
8targets += vsyscall-note.o vsyscall.lds
9
10# The DSO images are built using a special linker script
11quiet_cmd_syscall = SYSCALL $@
12 cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
13 -Wl,-T,$(filter-out FORCE,$^) -o $@
14
15export CPPFLAGS_vsyscall.lds += -P -C -Ush
16
17vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
18 $(call ld-option, -Wl$(comma)--hash-style=sysv)
19
20SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
21
22$(obj)/vsyscall-trapa.so: \
23$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
24 $(call if_changed,syscall)
25
26# We also create a special relocatable object that should mirror the symbol
27# table and layout of the linked DSO. With ld -R we can then refer to
28# these symbols in the kernel code rather than hand-coded addresses.
29extra-y += vsyscall-syms.o
30$(obj)/built-in.o: $(obj)/vsyscall-syms.o
31$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
32
33SYSCFLAGS_vsyscall-syms.o = -r
34$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
35 $(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE
36 $(call if_changed,syscall)
diff --git a/arch/sh/kernel/vsyscall/vsyscall-note.S b/arch/sh/kernel/vsyscall/vsyscall-note.S
new file mode 100644
index 000000000000..d4b5be4f3d5f
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-note.S
@@ -0,0 +1,25 @@
1/*
2 * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
3 * Here we can supply some information useful to userland.
4 */
5
6#include <linux/uts.h>
7#include <linux/version.h>
8
9#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
10 .section name, flags; \
11 .balign 4; \
12 .long 1f - 0f; /* name length */ \
13 .long 3f - 2f; /* data length */ \
14 .long type; /* note type */ \
150: .asciz vendor; /* vendor name */ \
161: .balign 4; \
172:
18
19#define ASM_ELF_NOTE_END \
203: .balign 4; /* pad out section */ \
21 .previous
22
23 ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
24 .long LINUX_VERSION_CODE
25 ASM_ELF_NOTE_END
diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
new file mode 100644
index 000000000000..555a64f124ca
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
@@ -0,0 +1,39 @@
1#include <asm/unistd.h>
2
3 .text
4 .balign 32
5 .globl __kernel_sigreturn
6 .type __kernel_sigreturn,@function
7__kernel_sigreturn:
8.LSTART_sigreturn:
9 mov.w 1f, r3
10 trapa #0x10
11 or r0, r0
12 or r0, r0
13 or r0, r0
14 or r0, r0
15 or r0, r0
16
171: .short __NR_sigreturn
18.LEND_sigreturn:
19 .size __kernel_sigreturn,.-.LSTART_sigreturn
20
21 .balign 32
22 .globl __kernel_rt_sigreturn
23 .type __kernel_rt_sigreturn,@function
24__kernel_rt_sigreturn:
25.LSTART_rt_sigreturn:
26 mov.w 1f, r3
27 trapa #0x10
28 or r0, r0
29 or r0, r0
30 or r0, r0
31 or r0, r0
32 or r0, r0
33
341: .short __NR_rt_sigreturn
35.LEND_rt_sigreturn:
36 .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
37
38 .section .eh_frame,"a",@progbits
39 .previous
diff --git a/arch/sh/kernel/vsyscall/vsyscall-syscall.S b/arch/sh/kernel/vsyscall/vsyscall-syscall.S
new file mode 100644
index 000000000000..c2ac7f0282b3
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-syscall.S
@@ -0,0 +1,10 @@
1#include <linux/init.h>
2
3__INITDATA
4
5 .globl vsyscall_trapa_start, vsyscall_trapa_end
6vsyscall_trapa_start:
7 .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so"
8vsyscall_trapa_end:
9
10__FINIT
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
new file mode 100644
index 000000000000..3b6eb34c43fa
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -0,0 +1,42 @@
1 .text
2 .globl __kernel_vsyscall
3 .type __kernel_vsyscall,@function
4__kernel_vsyscall:
5.LSTART_vsyscall:
6 /* XXX: We'll have to do something here once we opt to use the vDSO
7 * page for something other than the signal trampoline.. as well as
8 * fill out .eh_frame -- PFM. */
9.LEND_vsyscall:
10 .size __kernel_vsyscall,.-.LSTART_vsyscall
11 .previous
12
13 .section .eh_frame,"a",@progbits
14.LCIE:
15 .ualong .LCIE_end - .LCIE_start
16.LCIE_start:
17 .ualong 0 /* CIE ID */
18 .byte 0x1 /* Version number */
19 .string "zRS" /* NUL-terminated augmentation string */
20 .uleb128 0x1 /* Code alignment factor */
21 .sleb128 -4 /* Data alignment factor */
22 .byte 0x11 /* Return address register column */
23 /* Augmentation length and data (none) */
24 .byte 0xc /* DW_CFA_def_cfa */
25 .uleb128 0xf /* r15 */
26 .uleb128 0x0 /* offset 0 */
27
28 .align 2
29.LCIE_end:
30
31 .ualong .LFDE_end-.LFDE_start /* Length FDE */
32.LFDE_start:
33 .ualong .LCIE /* CIE pointer */
34 .ualong .LSTART_vsyscall-. /* start address */
35 .ualong .LEND_vsyscall-.LSTART_vsyscall
36 .uleb128 0
37 .align 2
38.LFDE_end:
39 .previous
40
41/* Get the common code for the sigreturn entry points */
42#include "vsyscall-sigreturn.S"
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
new file mode 100644
index 000000000000..075d6cc1a2d7
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -0,0 +1,150 @@
1/*
2 * arch/sh/kernel/vsyscall.c
3 *
4 * Copyright (C) 2006 Paul Mundt
5 *
6 * vDSO randomization
7 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/gfp.h>
18#include <linux/module.h>
19#include <linux/elf.h>
20
21/*
22 * Should the kernel map a VDSO page into processes and pass its
23 * address down to glibc upon exec()?
24 */
25unsigned int __read_mostly vdso_enabled = 1;
26EXPORT_SYMBOL_GPL(vdso_enabled);
27
28static int __init vdso_setup(char *s)
29{
30 vdso_enabled = simple_strtoul(s, NULL, 0);
31 return 1;
32}
33__setup("vdso=", vdso_setup);
34
35/*
36 * These symbols are defined by vsyscall.o to mark the bounds
37 * of the ELF DSO images included therein.
38 */
39extern const char vsyscall_trapa_start, vsyscall_trapa_end;
40static void *syscall_page;
41
42int __init vsyscall_init(void)
43{
44 syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
45
46 /*
47 * XXX: Map this page to a fixmap entry if we get around
48 * to adding the page to ELF core dumps
49 */
50
51 memcpy(syscall_page,
52 &vsyscall_trapa_start,
53 &vsyscall_trapa_end - &vsyscall_trapa_start);
54
55 return 0;
56}
57
58static struct page *syscall_vma_nopage(struct vm_area_struct *vma,
59 unsigned long address, int *type)
60{
61 unsigned long offset = address - vma->vm_start;
62 struct page *page;
63
64 if (address < vma->vm_start || address > vma->vm_end)
65 return NOPAGE_SIGBUS;
66
67 page = virt_to_page(syscall_page + offset);
68
69 get_page(page);
70
71 return page;
72}
73
74/* Prevent VMA merging */
75static void syscall_vma_close(struct vm_area_struct *vma)
76{
77}
78
79static struct vm_operations_struct syscall_vm_ops = {
80 .nopage = syscall_vma_nopage,
81 .close = syscall_vma_close,
82};
83
84/* Setup a VMA at program startup for the vsyscall page */
85int arch_setup_additional_pages(struct linux_binprm *bprm,
86 int executable_stack)
87{
88 struct vm_area_struct *vma;
89 struct mm_struct *mm = current->mm;
90 unsigned long addr;
91 int ret;
92
93 down_write(&mm->mmap_sem);
94 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
95 if (IS_ERR_VALUE(addr)) {
96 ret = addr;
97 goto up_fail;
98 }
99
100 vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
101 if (!vma) {
102 ret = -ENOMEM;
103 goto up_fail;
104 }
105
106 vma->vm_start = addr;
107 vma->vm_end = addr + PAGE_SIZE;
108 /* MAYWRITE to allow gdb to COW and set breakpoints */
109 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
110 vma->vm_flags |= mm->def_flags;
111 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
112 vma->vm_ops = &syscall_vm_ops;
113 vma->vm_mm = mm;
114
115 ret = insert_vm_struct(mm, vma);
116 if (unlikely(ret)) {
117 kmem_cache_free(vm_area_cachep, vma);
118 goto up_fail;
119 }
120
121 current->mm->context.vdso = (void *)addr;
122
123 mm->total_vm++;
124up_fail:
125 up_write(&mm->mmap_sem);
126 return ret;
127}
128
129const char *arch_vma_name(struct vm_area_struct *vma)
130{
131 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
132 return "[vdso]";
133
134 return NULL;
135}
136
137struct vm_area_struct *get_gate_vma(struct task_struct *task)
138{
139 return NULL;
140}
141
142int in_gate_area(struct task_struct *task, unsigned long address)
143{
144 return 0;
145}
146
147int in_gate_area_no_task(unsigned long address)
148{
149 return 0;
150}
diff --git a/arch/sh/kernel/vsyscall/vsyscall.lds.S b/arch/sh/kernel/vsyscall/vsyscall.lds.S
new file mode 100644
index 000000000000..b13c3d439fee
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall.lds.S
@@ -0,0 +1,74 @@
1/*
2 * Linker script for vsyscall DSO. The vsyscall page is an ELF shared
3 * object prelinked to its virtual address, and with only one read-only
4 * segment (that fits in one page). This script controls its layout.
5 */
6#include <asm/asm-offsets.h>
7
8#ifdef CONFIG_CPU_LITTLE_ENDIAN
9OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
10#else
11OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
12#endif
13OUTPUT_ARCH(sh)
14
15/* The ELF entry point can be used to set the AT_SYSINFO value. */
16ENTRY(__kernel_vsyscall);
17
18SECTIONS
19{
20 . = SIZEOF_HEADERS;
21
22 .hash : { *(.hash) } :text
23 .gnu.hash : { *(.gnu.hash) }
24 .dynsym : { *(.dynsym) }
25 .dynstr : { *(.dynstr) }
26 .gnu.version : { *(.gnu.version) }
27 .gnu.version_d : { *(.gnu.version_d) }
28 .gnu.version_r : { *(.gnu.version_r) }
29
30 /* This linker script is used both with -r and with -shared.
31 For the layouts to match, we need to skip more than enough
32 space for the dynamic symbol table et al. If this amount
33 is insufficient, ld -shared will barf. Just increase it here. */
34 . = 0x400;
35
36 .text : { *(.text) } :text =0x90909090
37 .note : { *(.note.*) } :text :note
38 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
39 .eh_frame : { KEEP (*(.eh_frame)) } :text
40 .dynamic : { *(.dynamic) } :text :dynamic
41 .useless : {
42 *(.got.plt) *(.got)
43 *(.data .data.* .gnu.linkonce.d.*)
44 *(.dynbss)
45 *(.bss .bss.* .gnu.linkonce.b.*)
46 } :text
47}
48
49/*
50 * We must supply the ELF program headers explicitly to get just one
51 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
52 */
53PHDRS
54{
55 text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
56 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
57 note PT_NOTE FLAGS(4); /* PF_R */
58 eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
59}
60
61/*
62 * This controls what symbols we export from the DSO.
63 */
64VERSION
65{
66 LINUX_2.6 {
67 global:
68 __kernel_vsyscall;
69 __kernel_sigreturn;
70 __kernel_rt_sigreturn;
71
72 local: *;
73 };
74}
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index b445d02075e8..9dd606464d23 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -223,6 +223,19 @@ config 32BIT
223 32-bits through the SH-4A PMB. If this is not set, legacy 223 32-bits through the SH-4A PMB. If this is not set, legacy
224 29-bit physical addressing will be used. 224 29-bit physical addressing will be used.
225 225
226config VSYSCALL
227 bool "Support vsyscall page"
228 depends on MMU
229 default y
230 help
231 This will enable support for the kernel mapping a vDSO page
232 in process space, and subsequently handing down the entry point
233 to the libc through the ELF auxiliary vector.
234
235 From the kernel side this is used for the signal trampoline.
236 For systems with an MMU that can afford to give up a page,
237 (the default value) say Y.
238
226choice 239choice
227 prompt "HugeTLB page size" 240 prompt "HugeTLB page size"
228 depends on HUGETLB_PAGE && CPU_SH4 && MMU 241 depends on HUGETLB_PAGE && CPU_SH4 && MMU
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index ad182b31d846..7154d1ce9785 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -286,6 +286,9 @@ void __init mem_init(void)
286 initsize >> 10); 286 initsize >> 10);
287 287
288 p3_cache_init(); 288 p3_cache_init();
289
290 /* Initialize the vDSO */
291 vsyscall_init();
289} 292}
290 293
291void free_initmem(void) 294void free_initmem(void)
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
index fd7e42bcaa40..73ec7f6084fa 100644
--- a/arch/sh/mm/tlb-flush.c
+++ b/arch/sh/mm/tlb-flush.c
@@ -14,12 +14,12 @@
14 14
15void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 15void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
16{ 16{
17 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { 17 if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) {
18 unsigned long flags; 18 unsigned long flags;
19 unsigned long asid; 19 unsigned long asid;
20 unsigned long saved_asid = MMU_NO_ASID; 20 unsigned long saved_asid = MMU_NO_ASID;
21 21
22 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; 22 asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK;
23 page &= PAGE_MASK; 23 page &= PAGE_MASK;
24 24
25 local_irq_save(flags); 25 local_irq_save(flags);
@@ -39,20 +39,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
39{ 39{
40 struct mm_struct *mm = vma->vm_mm; 40 struct mm_struct *mm = vma->vm_mm;
41 41
42 if (mm->context != NO_CONTEXT) { 42 if (mm->context.id != NO_CONTEXT) {
43 unsigned long flags; 43 unsigned long flags;
44 int size; 44 int size;
45 45
46 local_irq_save(flags); 46 local_irq_save(flags);
47 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 47 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
48 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ 48 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
49 mm->context = NO_CONTEXT; 49 mm->context.id = NO_CONTEXT;
50 if (mm == current->mm) 50 if (mm == current->mm)
51 activate_context(mm); 51 activate_context(mm);
52 } else { 52 } else {
53 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; 53 unsigned long asid;
54 unsigned long saved_asid = MMU_NO_ASID; 54 unsigned long saved_asid = MMU_NO_ASID;
55 55
56 asid = mm->context.id & MMU_CONTEXT_ASID_MASK;
56 start &= PAGE_MASK; 57 start &= PAGE_MASK;
57 end += (PAGE_SIZE - 1); 58 end += (PAGE_SIZE - 1);
58 end &= PAGE_MASK; 59 end &= PAGE_MASK;
@@ -81,9 +82,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
81 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ 82 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
82 flush_tlb_all(); 83 flush_tlb_all();
83 } else { 84 } else {
84 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; 85 unsigned long asid;
85 unsigned long saved_asid = get_asid(); 86 unsigned long saved_asid = get_asid();
86 87
88 asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK;
87 start &= PAGE_MASK; 89 start &= PAGE_MASK;
88 end += (PAGE_SIZE - 1); 90 end += (PAGE_SIZE - 1);
89 end &= PAGE_MASK; 91 end &= PAGE_MASK;
@@ -101,11 +103,11 @@ void flush_tlb_mm(struct mm_struct *mm)
101{ 103{
102 /* Invalidate all TLB of this process. */ 104 /* Invalidate all TLB of this process. */
103 /* Instead of invalidating each TLB, we get new MMU context. */ 105 /* Instead of invalidating each TLB, we get new MMU context. */
104 if (mm->context != NO_CONTEXT) { 106 if (mm->context.id != NO_CONTEXT) {
105 unsigned long flags; 107 unsigned long flags;
106 108
107 local_irq_save(flags); 109 local_irq_save(flags);
108 mm->context = NO_CONTEXT; 110 mm->context.id = NO_CONTEXT;
109 if (mm == current->mm) 111 if (mm == current->mm)
110 activate_context(mm); 112 activate_context(mm);
111 local_irq_restore(flags); 113 local_irq_restore(flags);
diff --git a/include/asm-sh/auxvec.h b/include/asm-sh/auxvec.h
index fc21e4db5881..1b6916e63e90 100644
--- a/include/asm-sh/auxvec.h
+++ b/include/asm-sh/auxvec.h
@@ -1,4 +1,18 @@
1#ifndef __ASM_SH_AUXVEC_H 1#ifndef __ASM_SH_AUXVEC_H
2#define __ASM_SH_AUXVEC_H 2#define __ASM_SH_AUXVEC_H
3 3
4/*
5 * Architecture-neutral AT_ values in 0-17, leave some room
6 * for more of them.
7 */
8
9#ifdef CONFIG_VSYSCALL
10/*
11 * Only define this in the vsyscall case, the entry point to
12 * the vsyscall page gets placed here. The kernel will attempt
13 * to build a gate VMA we don't care about otherwise..
14 */
15#define AT_SYSINFO_EHDR 33
16#endif
17
4#endif /* __ASM_SH_AUXVEC_H */ 18#endif /* __ASM_SH_AUXVEC_H */
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h
index cc8e5e767345..3a07ab40ac4d 100644
--- a/include/asm-sh/elf.h
+++ b/include/asm-sh/elf.h
@@ -121,4 +121,24 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
121#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 121#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
122#endif 122#endif
123 123
124#ifdef CONFIG_VSYSCALL
125/* vDSO has arch_setup_additional_pages */
126#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
127struct linux_binprm;
128extern int arch_setup_additional_pages(struct linux_binprm *bprm,
129 int executable_stack);
130
131extern unsigned int vdso_enabled;
132extern void __kernel_vsyscall;
133
134#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
135#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
136
137#define ARCH_DLINFO \
138do { \
139 if (vdso_enabled) \
140 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
141} while (0)
142#endif /* CONFIG_VSYSCALL */
143
124#endif /* __ASM_SH_ELF_H */ 144#endif /* __ASM_SH_ELF_H */
diff --git a/include/asm-sh/mmu.h b/include/asm-sh/mmu.h
index 6383dc84501e..cf47df79bb94 100644
--- a/include/asm-sh/mmu.h
+++ b/include/asm-sh/mmu.h
@@ -11,7 +11,12 @@ typedef struct {
11#else 11#else
12 12
13/* Default "unsigned long" context */ 13/* Default "unsigned long" context */
14typedef unsigned long mm_context_t; 14typedef unsigned long mm_context_id_t;
15
16typedef struct {
17 mm_context_id_t id;
18 void *vdso;
19} mm_context_t;
15 20
16#endif /* CONFIG_MMU */ 21#endif /* CONFIG_MMU */
17 22
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 87678ba8d6b6..c7088efe579a 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -49,7 +49,7 @@ get_mmu_context(struct mm_struct *mm)
49 unsigned long mc = mmu_context_cache; 49 unsigned long mc = mmu_context_cache;
50 50
51 /* Check if we have old version of context. */ 51 /* Check if we have old version of context. */
52 if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) 52 if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
53 /* It's up to date, do nothing */ 53 /* It's up to date, do nothing */
54 return; 54 return;
55 55
@@ -68,7 +68,7 @@ get_mmu_context(struct mm_struct *mm)
68 if (!mc) 68 if (!mc)
69 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; 69 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
70 } 70 }
71 mm->context = mc; 71 mm->context.id = mc;
72} 72}
73 73
74/* 74/*
@@ -78,7 +78,7 @@ get_mmu_context(struct mm_struct *mm)
78static __inline__ int init_new_context(struct task_struct *tsk, 78static __inline__ int init_new_context(struct task_struct *tsk,
79 struct mm_struct *mm) 79 struct mm_struct *mm)
80{ 80{
81 mm->context = NO_CONTEXT; 81 mm->context.id = NO_CONTEXT;
82 82
83 return 0; 83 return 0;
84} 84}
@@ -123,7 +123,7 @@ static __inline__ unsigned long get_asid(void)
123static __inline__ void activate_context(struct mm_struct *mm) 123static __inline__ void activate_context(struct mm_struct *mm)
124{ 124{
125 get_mmu_context(mm); 125 get_mmu_context(mm);
126 set_asid(mm->context & MMU_CONTEXT_ASID_MASK); 126 set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
127} 127}
128 128
129/* MMU_TTB can be used for optimizing the fault handling. 129/* MMU_TTB can be used for optimizing the fault handling.
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index acf6977b4042..3d8dae31a6f6 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -117,5 +117,10 @@ typedef struct { unsigned long pgprot; } pgprot_t;
117#include <asm-generic/memory_model.h> 117#include <asm-generic/memory_model.h>
118#include <asm-generic/page.h> 118#include <asm-generic/page.h>
119 119
120/* vDSO support */
121#ifdef CONFIG_VSYSCALL
122#define __HAVE_ARCH_GATE_AREA
123#endif
124
120#endif /* __KERNEL__ */ 125#endif /* __KERNEL__ */
121#endif /* __ASM_SH_PAGE_H */ 126#endif /* __ASM_SH_PAGE_H */
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index b7cba4e91a72..474773853cd1 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -276,5 +276,11 @@ static inline void prefetch(void *x)
276#define prefetchw(x) prefetch(x) 276#define prefetchw(x) prefetch(x)
277#endif 277#endif
278 278
279#ifdef CONFIG_VSYSCALL
280extern int vsyscall_init(void);
281#else
282#define vsyscall_init() do { } while (0)
283#endif
284
279#endif /* __KERNEL__ */ 285#endif /* __KERNEL__ */
280#endif /* __ASM_SH_PROCESSOR_H */ 286#endif /* __ASM_SH_PROCESSOR_H */