diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-05-07 03:38:16 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-05-07 03:38:16 -0400 |
commit | ee1acbfabd5270b40ce2cfdc202070b7ca91cdff (patch) | |
tree | a2247c699ce56431ddb0abb1ed252ea46781a110 | |
parent | 40c8bca76ecaa6b663d403d34f0fcd422bbdbffd (diff) |
sh: Handle shm_align_mask also for HAVE_ARCH_UNMAPPED_AREA_TOPDOWN.
Presently shm_align_mask is only looked at for the bottom up case, but we
still want this for proper colouring constraints in the topdown case.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/include/asm/cacheflush.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/pgtable.h | 4 | ||||
-rw-r--r-- | arch/sh/mm/mmap.c | 136 |
3 files changed, 136 insertions, 6 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 09acbc32d6c7..4c5462daa74c 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h | |||
@@ -75,7 +75,5 @@ extern void copy_from_user_page(struct vm_area_struct *vma, | |||
75 | #define flush_cache_vmap(start, end) flush_cache_all() | 75 | #define flush_cache_vmap(start, end) flush_cache_all() |
76 | #define flush_cache_vunmap(start, end) flush_cache_all() | 76 | #define flush_cache_vunmap(start, end) flush_cache_all() |
77 | 77 | ||
78 | #define HAVE_ARCH_UNMAPPED_AREA | ||
79 | |||
80 | #endif /* __KERNEL__ */ | 78 | #endif /* __KERNEL__ */ |
81 | #endif /* __ASM_SH_CACHEFLUSH_H */ | 79 | #endif /* __ASM_SH_CACHEFLUSH_H */ |
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index b517ae08b9c0..2a011b18090b 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h | |||
@@ -154,6 +154,10 @@ extern void kmap_coherent_init(void); | |||
154 | #define kmap_coherent_init() do { } while (0) | 154 | #define kmap_coherent_init() do { } while (0) |
155 | #endif | 155 | #endif |
156 | 156 | ||
157 | /* arch/sh/mm/mmap.c */ | ||
158 | #define HAVE_ARCH_UNMAPPED_AREA | ||
159 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | ||
160 | |||
157 | #include <asm-generic/pgtable.h> | 161 | #include <asm-generic/pgtable.h> |
158 | 162 | ||
159 | #endif /* __ASM_SH_PGTABLE_H */ | 163 | #endif /* __ASM_SH_PGTABLE_H */ |
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 931f4d003fa0..1b5fdfb4e0c2 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/sh/mm/mmap.c | 2 | * arch/sh/mm/mmap.c |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Paul Mundt | 4 | * Copyright (C) 2008 - 2009 Paul Mundt |
5 | * | 5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 6 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 7 | * License. See the file "COPYING" in the main directory of this archive |
@@ -21,9 +21,26 @@ EXPORT_SYMBOL(shm_align_mask); | |||
21 | /* | 21 | /* |
22 | * To avoid cache aliases, we map the shared page with same color. | 22 | * To avoid cache aliases, we map the shared page with same color. |
23 | */ | 23 | */ |
24 | #define COLOUR_ALIGN(addr, pgoff) \ | 24 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, |
25 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | 25 | unsigned long pgoff) |
26 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | 26 | { |
27 | unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; | ||
28 | unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; | ||
29 | |||
30 | return base + off; | ||
31 | } | ||
32 | |||
33 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | ||
34 | unsigned long pgoff) | ||
35 | { | ||
36 | unsigned long base = addr & ~shm_align_mask; | ||
37 | unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; | ||
38 | |||
39 | if (base + off <= addr) | ||
40 | return base + off; | ||
41 | |||
42 | return base - off; | ||
43 | } | ||
27 | 44 | ||
28 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | 45 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
29 | unsigned long len, unsigned long pgoff, unsigned long flags) | 46 | unsigned long len, unsigned long pgoff, unsigned long flags) |
@@ -103,6 +120,117 @@ full_search: | |||
103 | addr = COLOUR_ALIGN(addr, pgoff); | 120 | addr = COLOUR_ALIGN(addr, pgoff); |
104 | } | 121 | } |
105 | } | 122 | } |
123 | |||
124 | unsigned long | ||
125 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
126 | const unsigned long len, const unsigned long pgoff, | ||
127 | const unsigned long flags) | ||
128 | { | ||
129 | struct vm_area_struct *vma; | ||
130 | struct mm_struct *mm = current->mm; | ||
131 | unsigned long addr = addr0; | ||
132 | int do_colour_align; | ||
133 | |||
134 | if (flags & MAP_FIXED) { | ||
135 | /* We do not accept a shared mapping if it would violate | ||
136 | * cache aliasing constraints. | ||
137 | */ | ||
138 | if ((flags & MAP_SHARED) && | ||
139 | ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
140 | return -EINVAL; | ||
141 | return addr; | ||
142 | } | ||
143 | |||
144 | if (unlikely(len > TASK_SIZE)) | ||
145 | return -ENOMEM; | ||
146 | |||
147 | do_colour_align = 0; | ||
148 | if (filp || (flags & MAP_SHARED)) | ||
149 | do_colour_align = 1; | ||
150 | |||
151 | /* requesting a specific address */ | ||
152 | if (addr) { | ||
153 | if (do_colour_align) | ||
154 | addr = COLOUR_ALIGN(addr, pgoff); | ||
155 | else | ||
156 | addr = PAGE_ALIGN(addr); | ||
157 | |||
158 | vma = find_vma(mm, addr); | ||
159 | if (TASK_SIZE - len >= addr && | ||
160 | (!vma || addr + len <= vma->vm_start)) | ||
161 | return addr; | ||
162 | } | ||
163 | |||
164 | /* check if free_area_cache is useful for us */ | ||
165 | if (len <= mm->cached_hole_size) { | ||
166 | mm->cached_hole_size = 0; | ||
167 | mm->free_area_cache = mm->mmap_base; | ||
168 | } | ||
169 | |||
170 | /* either no address requested or can't fit in requested address hole */ | ||
171 | addr = mm->free_area_cache; | ||
172 | if (do_colour_align) { | ||
173 | unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); | ||
174 | |||
175 | addr = base + len; | ||
176 | } | ||
177 | |||
178 | /* make sure it can fit in the remaining address space */ | ||
179 | if (likely(addr > len)) { | ||
180 | vma = find_vma(mm, addr-len); | ||
181 | if (!vma || addr <= vma->vm_start) { | ||
182 | /* remember the address as a hint for next time */ | ||
183 | return (mm->free_area_cache = addr-len); | ||
184 | } | ||
185 | } | ||
186 | |||
187 | if (unlikely(mm->mmap_base < len)) | ||
188 | goto bottomup; | ||
189 | |||
190 | addr = mm->mmap_base-len; | ||
191 | if (do_colour_align) | ||
192 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
193 | |||
194 | do { | ||
195 | /* | ||
196 | * Lookup failure means no vma is above this address, | ||
197 | * else if new region fits below vma->vm_start, | ||
198 | * return with success: | ||
199 | */ | ||
200 | vma = find_vma(mm, addr); | ||
201 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
202 | /* remember the address as a hint for next time */ | ||
203 | return (mm->free_area_cache = addr); | ||
204 | } | ||
205 | |||
206 | /* remember the largest hole we saw so far */ | ||
207 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
208 | mm->cached_hole_size = vma->vm_start - addr; | ||
209 | |||
210 | /* try just below the current vma->vm_start */ | ||
211 | addr = vma->vm_start-len; | ||
212 | if (do_colour_align) | ||
213 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
214 | } while (likely(len < vma->vm_start)); | ||
215 | |||
216 | bottomup: | ||
217 | /* | ||
218 | * A failed mmap() very likely causes application failure, | ||
219 | * so fall back to the bottom-up function here. This scenario | ||
220 | * can happen with large stack limits and large mmap() | ||
221 | * allocations. | ||
222 | */ | ||
223 | mm->cached_hole_size = ~0UL; | ||
224 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
225 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
226 | /* | ||
227 | * Restore the topdown base: | ||
228 | */ | ||
229 | mm->free_area_cache = mm->mmap_base; | ||
230 | mm->cached_hole_size = ~0UL; | ||
231 | |||
232 | return addr; | ||
233 | } | ||
106 | #endif /* CONFIG_MMU */ | 234 | #endif /* CONFIG_MMU */ |
107 | 235 | ||
108 | /* | 236 | /* |