diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 13 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 18 |
3 files changed, 26 insertions, 8 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index b445d02075e8..9dd606464d23 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -223,6 +223,19 @@ config 32BIT | |||
223 | 32-bits through the SH-4A PMB. If this is not set, legacy | 223 | 32-bits through the SH-4A PMB. If this is not set, legacy |
224 | 29-bit physical addressing will be used. | 224 | 29-bit physical addressing will be used. |
225 | 225 | ||
226 | config VSYSCALL | ||
227 | bool "Support vsyscall page" | ||
228 | depends on MMU | ||
229 | default y | ||
230 | help | ||
231 | This will enable support for the kernel mapping a vDSO page | ||
232 | in process space, and subsequently handing down the entry point | ||
233 | to the libc through the ELF auxiliary vector. | ||
234 | |||
235 | From the kernel side this is used for the signal trampoline. | ||
236 | For systems with an MMU that can afford to give up a page, | ||
237 | (the default value) say Y. | ||
238 | |||
226 | choice | 239 | choice |
227 | prompt "HugeTLB page size" | 240 | prompt "HugeTLB page size" |
228 | depends on HUGETLB_PAGE && CPU_SH4 && MMU | 241 | depends on HUGETLB_PAGE && CPU_SH4 && MMU |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index ad182b31d846..7154d1ce9785 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -286,6 +286,9 @@ void __init mem_init(void) | |||
286 | initsize >> 10); | 286 | initsize >> 10); |
287 | 287 | ||
288 | p3_cache_init(); | 288 | p3_cache_init(); |
289 | |||
290 | /* Initialize the vDSO */ | ||
291 | vsyscall_init(); | ||
289 | } | 292 | } |
290 | 293 | ||
291 | void free_initmem(void) | 294 | void free_initmem(void) |
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index fd7e42bcaa40..73ec7f6084fa 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c | |||
@@ -14,12 +14,12 @@ | |||
14 | 14 | ||
15 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 15 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
16 | { | 16 | { |
17 | if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { | 17 | if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { |
18 | unsigned long flags; | 18 | unsigned long flags; |
19 | unsigned long asid; | 19 | unsigned long asid; |
20 | unsigned long saved_asid = MMU_NO_ASID; | 20 | unsigned long saved_asid = MMU_NO_ASID; |
21 | 21 | ||
22 | asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; | 22 | asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; |
23 | page &= PAGE_MASK; | 23 | page &= PAGE_MASK; |
24 | 24 | ||
25 | local_irq_save(flags); | 25 | local_irq_save(flags); |
@@ -39,20 +39,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
39 | { | 39 | { |
40 | struct mm_struct *mm = vma->vm_mm; | 40 | struct mm_struct *mm = vma->vm_mm; |
41 | 41 | ||
42 | if (mm->context != NO_CONTEXT) { | 42 | if (mm->context.id != NO_CONTEXT) { |
43 | unsigned long flags; | 43 | unsigned long flags; |
44 | int size; | 44 | int size; |
45 | 45 | ||
46 | local_irq_save(flags); | 46 | local_irq_save(flags); |
47 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 47 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
48 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 48 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
49 | mm->context = NO_CONTEXT; | 49 | mm->context.id = NO_CONTEXT; |
50 | if (mm == current->mm) | 50 | if (mm == current->mm) |
51 | activate_context(mm); | 51 | activate_context(mm); |
52 | } else { | 52 | } else { |
53 | unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; | 53 | unsigned long asid; |
54 | unsigned long saved_asid = MMU_NO_ASID; | 54 | unsigned long saved_asid = MMU_NO_ASID; |
55 | 55 | ||
56 | asid = mm->context.id & MMU_CONTEXT_ASID_MASK; | ||
56 | start &= PAGE_MASK; | 57 | start &= PAGE_MASK; |
57 | end += (PAGE_SIZE - 1); | 58 | end += (PAGE_SIZE - 1); |
58 | end &= PAGE_MASK; | 59 | end &= PAGE_MASK; |
@@ -81,9 +82,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
81 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 82 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
82 | flush_tlb_all(); | 83 | flush_tlb_all(); |
83 | } else { | 84 | } else { |
84 | unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; | 85 | unsigned long asid; |
85 | unsigned long saved_asid = get_asid(); | 86 | unsigned long saved_asid = get_asid(); |
86 | 87 | ||
88 | asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; | ||
87 | start &= PAGE_MASK; | 89 | start &= PAGE_MASK; |
88 | end += (PAGE_SIZE - 1); | 90 | end += (PAGE_SIZE - 1); |
89 | end &= PAGE_MASK; | 91 | end &= PAGE_MASK; |
@@ -101,11 +103,11 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
101 | { | 103 | { |
102 | /* Invalidate all TLB of this process. */ | 104 | /* Invalidate all TLB of this process. */ |
103 | /* Instead of invalidating each TLB, we get new MMU context. */ | 105 | /* Instead of invalidating each TLB, we get new MMU context. */ |
104 | if (mm->context != NO_CONTEXT) { | 106 | if (mm->context.id != NO_CONTEXT) { |
105 | unsigned long flags; | 107 | unsigned long flags; |
106 | 108 | ||
107 | local_irq_save(flags); | 109 | local_irq_save(flags); |
108 | mm->context = NO_CONTEXT; | 110 | mm->context.id = NO_CONTEXT; |
109 | if (mm == current->mm) | 111 | if (mm == current->mm) |
110 | activate_context(mm); | 112 | activate_context(mm); |
111 | local_irq_restore(flags); | 113 | local_irq_restore(flags); |