aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-09-27 05:33:49 -0400
committerPaul Mundt <lethal@linux-sh.org>2006-09-27 05:33:49 -0400
commit19f9a34f87c48bbd270d617d1c986d0c23866a1a (patch)
tree19f32122aec9c16cbbf8e3331e81040a4850cb8d /arch/sh/mm
parent8c12b5dc13bf8516303a8224ab4e9708b33d5b00 (diff)
sh: Initial vsyscall page support.
This implements initial support for the vsyscall page on SH. At the moment we leave it configurable due to having nommu to support from the same code base. We hook it up for the signal trampoline return at present, with more to be added later, once uClibc catches up. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig13
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sh/mm/tlb-flush.c18
3 files changed, 26 insertions, 8 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index b445d02075e8..9dd606464d23 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -223,6 +223,19 @@ config 32BIT
223 32-bits through the SH-4A PMB. If this is not set, legacy 223 32-bits through the SH-4A PMB. If this is not set, legacy
224 29-bit physical addressing will be used. 224 29-bit physical addressing will be used.
225 225
226config VSYSCALL
227 bool "Support vsyscall page"
228 depends on MMU
229 default y
230 help
231 This will enable support for the kernel mapping a vDSO page
232 in process space, and subsequently handing down the entry point
233 to the libc through the ELF auxiliary vector.
234
235 From the kernel side this is used for the signal trampoline.
236 For systems with an MMU that can afford to give up a page,
237 (the default value) say Y.
238
226choice 239choice
227 prompt "HugeTLB page size" 240 prompt "HugeTLB page size"
228 depends on HUGETLB_PAGE && CPU_SH4 && MMU 241 depends on HUGETLB_PAGE && CPU_SH4 && MMU
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index ad182b31d846..7154d1ce9785 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -286,6 +286,9 @@ void __init mem_init(void)
286 initsize >> 10); 286 initsize >> 10);
287 287
288 p3_cache_init(); 288 p3_cache_init();
289
290 /* Initialize the vDSO */
291 vsyscall_init();
289} 292}
290 293
291void free_initmem(void) 294void free_initmem(void)
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
index fd7e42bcaa40..73ec7f6084fa 100644
--- a/arch/sh/mm/tlb-flush.c
+++ b/arch/sh/mm/tlb-flush.c
@@ -14,12 +14,12 @@
14 14
15void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 15void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
16{ 16{
17 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { 17 if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) {
18 unsigned long flags; 18 unsigned long flags;
19 unsigned long asid; 19 unsigned long asid;
20 unsigned long saved_asid = MMU_NO_ASID; 20 unsigned long saved_asid = MMU_NO_ASID;
21 21
22 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; 22 asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK;
23 page &= PAGE_MASK; 23 page &= PAGE_MASK;
24 24
25 local_irq_save(flags); 25 local_irq_save(flags);
@@ -39,20 +39,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
39{ 39{
40 struct mm_struct *mm = vma->vm_mm; 40 struct mm_struct *mm = vma->vm_mm;
41 41
42 if (mm->context != NO_CONTEXT) { 42 if (mm->context.id != NO_CONTEXT) {
43 unsigned long flags; 43 unsigned long flags;
44 int size; 44 int size;
45 45
46 local_irq_save(flags); 46 local_irq_save(flags);
47 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 47 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
48 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ 48 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
49 mm->context = NO_CONTEXT; 49 mm->context.id = NO_CONTEXT;
50 if (mm == current->mm) 50 if (mm == current->mm)
51 activate_context(mm); 51 activate_context(mm);
52 } else { 52 } else {
53 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; 53 unsigned long asid;
54 unsigned long saved_asid = MMU_NO_ASID; 54 unsigned long saved_asid = MMU_NO_ASID;
55 55
56 asid = mm->context.id & MMU_CONTEXT_ASID_MASK;
56 start &= PAGE_MASK; 57 start &= PAGE_MASK;
57 end += (PAGE_SIZE - 1); 58 end += (PAGE_SIZE - 1);
58 end &= PAGE_MASK; 59 end &= PAGE_MASK;
@@ -81,9 +82,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
81 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ 82 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
82 flush_tlb_all(); 83 flush_tlb_all();
83 } else { 84 } else {
84 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; 85 unsigned long asid;
85 unsigned long saved_asid = get_asid(); 86 unsigned long saved_asid = get_asid();
86 87
88 asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK;
87 start &= PAGE_MASK; 89 start &= PAGE_MASK;
88 end += (PAGE_SIZE - 1); 90 end += (PAGE_SIZE - 1);
89 end &= PAGE_MASK; 91 end &= PAGE_MASK;
@@ -101,11 +103,11 @@ void flush_tlb_mm(struct mm_struct *mm)
101{ 103{
102 /* Invalidate all TLB of this process. */ 104 /* Invalidate all TLB of this process. */
103 /* Instead of invalidating each TLB, we get new MMU context. */ 105 /* Instead of invalidating each TLB, we get new MMU context. */
104 if (mm->context != NO_CONTEXT) { 106 if (mm->context.id != NO_CONTEXT) {
105 unsigned long flags; 107 unsigned long flags;
106 108
107 local_irq_save(flags); 109 local_irq_save(flags);
108 mm->context = NO_CONTEXT; 110 mm->context.id = NO_CONTEXT;
109 if (mm == current->mm) 111 if (mm == current->mm)
110 activate_context(mm); 112 activate_context(mm);
111 local_irq_restore(flags); 113 local_irq_restore(flags);