aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh/auxvec.h14
-rw-r--r--include/asm-sh/elf.h20
-rw-r--r--include/asm-sh/mmu.h7
-rw-r--r--include/asm-sh/mmu_context.h8
-rw-r--r--include/asm-sh/page.h5
-rw-r--r--include/asm-sh/processor.h6
6 files changed, 55 insertions, 5 deletions
diff --git a/include/asm-sh/auxvec.h b/include/asm-sh/auxvec.h
index fc21e4db5881..1b6916e63e90 100644
--- a/include/asm-sh/auxvec.h
+++ b/include/asm-sh/auxvec.h
@@ -1,4 +1,18 @@
1#ifndef __ASM_SH_AUXVEC_H 1#ifndef __ASM_SH_AUXVEC_H
2#define __ASM_SH_AUXVEC_H 2#define __ASM_SH_AUXVEC_H
3 3
4/*
5 * Architecture-neutral AT_ values in 0-17, leave some room
6 * for more of them.
7 */
8
9#ifdef CONFIG_VSYSCALL
10/*
11 * Only define this in the vsyscall case, the entry point to
12 * the vsyscall page gets placed here. The kernel will attempt
13 * to build a gate VMA we don't care about otherwise..
14 */
15#define AT_SYSINFO_EHDR 33
16#endif
17
4#endif /* __ASM_SH_AUXVEC_H */ 18#endif /* __ASM_SH_AUXVEC_H */
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h
index cc8e5e767345..3a07ab40ac4d 100644
--- a/include/asm-sh/elf.h
+++ b/include/asm-sh/elf.h
@@ -121,4 +121,24 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
121#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) 121#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
122#endif 122#endif
123 123
124#ifdef CONFIG_VSYSCALL
125/* vDSO has arch_setup_additional_pages */
126#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
127struct linux_binprm;
128extern int arch_setup_additional_pages(struct linux_binprm *bprm,
129 int executable_stack);
130
131extern unsigned int vdso_enabled;
132extern void __kernel_vsyscall;
133
134#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
135#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
136
137#define ARCH_DLINFO \
138do { \
139 if (vdso_enabled) \
140 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
141} while (0)
142#endif /* CONFIG_VSYSCALL */
143
124#endif /* __ASM_SH_ELF_H */ 144#endif /* __ASM_SH_ELF_H */
diff --git a/include/asm-sh/mmu.h b/include/asm-sh/mmu.h
index 6383dc84501e..cf47df79bb94 100644
--- a/include/asm-sh/mmu.h
+++ b/include/asm-sh/mmu.h
@@ -11,7 +11,12 @@ typedef struct {
11#else 11#else
12 12
13/* Default "unsigned long" context */ 13/* Default "unsigned long" context */
14typedef unsigned long mm_context_t; 14typedef unsigned long mm_context_id_t;
15
16typedef struct {
17 mm_context_id_t id;
18 void *vdso;
19} mm_context_t;
15 20
16#endif /* CONFIG_MMU */ 21#endif /* CONFIG_MMU */
17 22
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h
index 87678ba8d6b6..c7088efe579a 100644
--- a/include/asm-sh/mmu_context.h
+++ b/include/asm-sh/mmu_context.h
@@ -49,7 +49,7 @@ get_mmu_context(struct mm_struct *mm)
49 unsigned long mc = mmu_context_cache; 49 unsigned long mc = mmu_context_cache;
50 50
51 /* Check if we have old version of context. */ 51 /* Check if we have old version of context. */
52 if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) 52 if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0)
53 /* It's up to date, do nothing */ 53 /* It's up to date, do nothing */
54 return; 54 return;
55 55
@@ -68,7 +68,7 @@ get_mmu_context(struct mm_struct *mm)
68 if (!mc) 68 if (!mc)
69 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; 69 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
70 } 70 }
71 mm->context = mc; 71 mm->context.id = mc;
72} 72}
73 73
74/* 74/*
@@ -78,7 +78,7 @@ get_mmu_context(struct mm_struct *mm)
78static __inline__ int init_new_context(struct task_struct *tsk, 78static __inline__ int init_new_context(struct task_struct *tsk,
79 struct mm_struct *mm) 79 struct mm_struct *mm)
80{ 80{
81 mm->context = NO_CONTEXT; 81 mm->context.id = NO_CONTEXT;
82 82
83 return 0; 83 return 0;
84} 84}
@@ -123,7 +123,7 @@ static __inline__ unsigned long get_asid(void)
123static __inline__ void activate_context(struct mm_struct *mm) 123static __inline__ void activate_context(struct mm_struct *mm)
124{ 124{
125 get_mmu_context(mm); 125 get_mmu_context(mm);
126 set_asid(mm->context & MMU_CONTEXT_ASID_MASK); 126 set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
127} 127}
128 128
129/* MMU_TTB can be used for optimizing the fault handling. 129/* MMU_TTB can be used for optimizing the fault handling.
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index acf6977b4042..3d8dae31a6f6 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -117,5 +117,10 @@ typedef struct { unsigned long pgprot; } pgprot_t;
117#include <asm-generic/memory_model.h> 117#include <asm-generic/memory_model.h>
118#include <asm-generic/page.h> 118#include <asm-generic/page.h>
119 119
120/* vDSO support */
121#ifdef CONFIG_VSYSCALL
122#define __HAVE_ARCH_GATE_AREA
123#endif
124
120#endif /* __KERNEL__ */ 125#endif /* __KERNEL__ */
121#endif /* __ASM_SH_PAGE_H */ 126#endif /* __ASM_SH_PAGE_H */
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index b7cba4e91a72..474773853cd1 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -276,5 +276,11 @@ static inline void prefetch(void *x)
276#define prefetchw(x) prefetch(x) 276#define prefetchw(x) prefetch(x)
277#endif 277#endif
278 278
279#ifdef CONFIG_VSYSCALL
280extern int vsyscall_init(void);
281#else
282#define vsyscall_init() do { } while (0)
283#endif
284
279#endif /* __KERNEL__ */ 285#endif /* __KERNEL__ */
280#endif /* __ASM_SH_PROCESSOR_H */ 286#endif /* __ASM_SH_PROCESSOR_H */