diff options
Diffstat (limited to 'arch/sparc64/kernel/sys_sparc.c')
-rw-r--r-- | arch/sparc64/kernel/sys_sparc.c | 297 |
1 files changed, 263 insertions, 34 deletions
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 5f8c822a2b4a..7a869138c37f 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c | |||
@@ -25,25 +25,93 @@ | |||
25 | #include <linux/syscalls.h> | 25 | #include <linux/syscalls.h> |
26 | #include <linux/ipc.h> | 26 | #include <linux/ipc.h> |
27 | #include <linux/personality.h> | 27 | #include <linux/personality.h> |
28 | #include <linux/random.h> | ||
28 | 29 | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #include <asm/ipc.h> | 31 | #include <asm/ipc.h> |
31 | #include <asm/utrap.h> | 32 | #include <asm/utrap.h> |
32 | #include <asm/perfctr.h> | 33 | #include <asm/perfctr.h> |
34 | #include <asm/a.out.h> | ||
33 | 35 | ||
34 | /* #define DEBUG_UNIMP_SYSCALL */ | 36 | /* #define DEBUG_UNIMP_SYSCALL */ |
35 | 37 | ||
36 | /* XXX Make this per-binary type, this way we can detect the type of | ||
37 | * XXX a binary. Every Sparc executable calls this very early on. | ||
38 | */ | ||
39 | asmlinkage unsigned long sys_getpagesize(void) | 38 | asmlinkage unsigned long sys_getpagesize(void) |
40 | { | 39 | { |
41 | return PAGE_SIZE; | 40 | return PAGE_SIZE; |
42 | } | 41 | } |
43 | 42 | ||
44 | #define COLOUR_ALIGN(addr,pgoff) \ | 43 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) |
45 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ | 44 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) |
46 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) | 45 | |
46 | /* Does addr --> addr+len fall within 4GB of the VA-space hole or | ||
47 | * overflow past the end of the 64-bit address space? | ||
48 | */ | ||
49 | static inline int invalid_64bit_range(unsigned long addr, unsigned long len) | ||
50 | { | ||
51 | unsigned long va_exclude_start, va_exclude_end; | ||
52 | |||
53 | va_exclude_start = VA_EXCLUDE_START; | ||
54 | va_exclude_end = VA_EXCLUDE_END; | ||
55 | |||
56 | if (unlikely(len >= va_exclude_start)) | ||
57 | return 1; | ||
58 | |||
59 | if (unlikely((addr + len) < addr)) | ||
60 | return 1; | ||
61 | |||
62 | if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || | ||
63 | ((addr + len) >= va_exclude_start && | ||
64 | (addr + len) < va_exclude_end))) | ||
65 | return 1; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | /* Does start,end straddle the VA-space hole? */ | ||
71 | static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) | ||
72 | { | ||
73 | unsigned long va_exclude_start, va_exclude_end; | ||
74 | |||
75 | va_exclude_start = VA_EXCLUDE_START; | ||
76 | va_exclude_end = VA_EXCLUDE_END; | ||
77 | |||
78 | if (likely(start < va_exclude_start && end < va_exclude_start)) | ||
79 | return 0; | ||
80 | |||
81 | if (likely(start >= va_exclude_end && end >= va_exclude_end)) | ||
82 | return 0; | ||
83 | |||
84 | return 1; | ||
85 | } | ||
86 | |||
87 | /* These functions differ from the default implementations in | ||
88 | * mm/mmap.c in two ways: | ||
89 | * | ||
90 | * 1) For file backed MAP_SHARED mmap()'s we D-cache color align, | ||
91 | * for fixed such mappings we just validate what the user gave us. | ||
92 | * 2) For 64-bit tasks we avoid mapping anything within 4GB of | ||
93 | * the spitfire/niagara VA-hole. | ||
94 | */ | ||
95 | |||
96 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, | ||
97 | unsigned long pgoff) | ||
98 | { | ||
99 | unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); | ||
100 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
101 | |||
102 | return base + off; | ||
103 | } | ||
104 | |||
105 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | ||
106 | unsigned long pgoff) | ||
107 | { | ||
108 | unsigned long base = addr & ~(SHMLBA-1); | ||
109 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
110 | |||
111 | if (base + off <= addr) | ||
112 | return base + off; | ||
113 | return base - off; | ||
114 | } | ||
47 | 115 | ||
48 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) | 116 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) |
49 | { | 117 | { |
@@ -64,8 +132,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |||
64 | } | 132 | } |
65 | 133 | ||
66 | if (test_thread_flag(TIF_32BIT)) | 134 | if (test_thread_flag(TIF_32BIT)) |
67 | task_size = 0xf0000000UL; | 135 | task_size = STACK_TOP32; |
68 | if (len > task_size || len > -PAGE_OFFSET) | 136 | if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) |
69 | return -ENOMEM; | 137 | return -ENOMEM; |
70 | 138 | ||
71 | do_color_align = 0; | 139 | do_color_align = 0; |
@@ -84,11 +152,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | |||
84 | return addr; | 152 | return addr; |
85 | } | 153 | } |
86 | 154 | ||
87 | if (len <= mm->cached_hole_size) { | 155 | if (len > mm->cached_hole_size) { |
156 | start_addr = addr = mm->free_area_cache; | ||
157 | } else { | ||
158 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
88 | mm->cached_hole_size = 0; | 159 | mm->cached_hole_size = 0; |
89 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
90 | } | 160 | } |
91 | start_addr = addr = mm->free_area_cache; | ||
92 | 161 | ||
93 | task_size -= len; | 162 | task_size -= len; |
94 | 163 | ||
@@ -100,11 +169,12 @@ full_search: | |||
100 | 169 | ||
101 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 170 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
102 | /* At this point: (!vma || addr < vma->vm_end). */ | 171 | /* At this point: (!vma || addr < vma->vm_end). */ |
103 | if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) { | 172 | if (addr < VA_EXCLUDE_START && |
104 | addr = PAGE_OFFSET; | 173 | (addr + len) >= VA_EXCLUDE_START) { |
105 | vma = find_vma(mm, PAGE_OFFSET); | 174 | addr = VA_EXCLUDE_END; |
175 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
106 | } | 176 | } |
107 | if (task_size < addr) { | 177 | if (unlikely(task_size < addr)) { |
108 | if (start_addr != TASK_UNMAPPED_BASE) { | 178 | if (start_addr != TASK_UNMAPPED_BASE) { |
109 | start_addr = addr = TASK_UNMAPPED_BASE; | 179 | start_addr = addr = TASK_UNMAPPED_BASE; |
110 | mm->cached_hole_size = 0; | 180 | mm->cached_hole_size = 0; |
@@ -112,7 +182,7 @@ full_search: | |||
112 | } | 182 | } |
113 | return -ENOMEM; | 183 | return -ENOMEM; |
114 | } | 184 | } |
115 | if (!vma || addr + len <= vma->vm_start) { | 185 | if (likely(!vma || addr + len <= vma->vm_start)) { |
116 | /* | 186 | /* |
117 | * Remember the place where we stopped the search: | 187 | * Remember the place where we stopped the search: |
118 | */ | 188 | */ |
@@ -128,6 +198,121 @@ full_search: | |||
128 | } | 198 | } |
129 | } | 199 | } |
130 | 200 | ||
201 | unsigned long | ||
202 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
203 | const unsigned long len, const unsigned long pgoff, | ||
204 | const unsigned long flags) | ||
205 | { | ||
206 | struct vm_area_struct *vma; | ||
207 | struct mm_struct *mm = current->mm; | ||
208 | unsigned long task_size = STACK_TOP32; | ||
209 | unsigned long addr = addr0; | ||
210 | int do_color_align; | ||
211 | |||
212 | /* This should only ever run for 32-bit processes. */ | ||
213 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
214 | |||
215 | if (flags & MAP_FIXED) { | ||
216 | /* We do not accept a shared mapping if it would violate | ||
217 | * cache aliasing constraints. | ||
218 | */ | ||
219 | if ((flags & MAP_SHARED) && | ||
220 | ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
221 | return -EINVAL; | ||
222 | return addr; | ||
223 | } | ||
224 | |||
225 | if (unlikely(len > task_size)) | ||
226 | return -ENOMEM; | ||
227 | |||
228 | do_color_align = 0; | ||
229 | if (filp || (flags & MAP_SHARED)) | ||
230 | do_color_align = 1; | ||
231 | |||
232 | /* requesting a specific address */ | ||
233 | if (addr) { | ||
234 | if (do_color_align) | ||
235 | addr = COLOUR_ALIGN(addr, pgoff); | ||
236 | else | ||
237 | addr = PAGE_ALIGN(addr); | ||
238 | |||
239 | vma = find_vma(mm, addr); | ||
240 | if (task_size - len >= addr && | ||
241 | (!vma || addr + len <= vma->vm_start)) | ||
242 | return addr; | ||
243 | } | ||
244 | |||
245 | /* check if free_area_cache is useful for us */ | ||
246 | if (len <= mm->cached_hole_size) { | ||
247 | mm->cached_hole_size = 0; | ||
248 | mm->free_area_cache = mm->mmap_base; | ||
249 | } | ||
250 | |||
251 | /* either no address requested or can't fit in requested address hole */ | ||
252 | addr = mm->free_area_cache; | ||
253 | if (do_color_align) { | ||
254 | unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); | ||
255 | |||
256 | addr = base + len; | ||
257 | } | ||
258 | |||
259 | /* make sure it can fit in the remaining address space */ | ||
260 | if (likely(addr > len)) { | ||
261 | vma = find_vma(mm, addr-len); | ||
262 | if (!vma || addr <= vma->vm_start) { | ||
263 | /* remember the address as a hint for next time */ | ||
264 | return (mm->free_area_cache = addr-len); | ||
265 | } | ||
266 | } | ||
267 | |||
268 | if (unlikely(mm->mmap_base < len)) | ||
269 | goto bottomup; | ||
270 | |||
271 | addr = mm->mmap_base-len; | ||
272 | if (do_color_align) | ||
273 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
274 | |||
275 | do { | ||
276 | /* | ||
277 | * Lookup failure means no vma is above this address, | ||
278 | * else if new region fits below vma->vm_start, | ||
279 | * return with success: | ||
280 | */ | ||
281 | vma = find_vma(mm, addr); | ||
282 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
283 | /* remember the address as a hint for next time */ | ||
284 | return (mm->free_area_cache = addr); | ||
285 | } | ||
286 | |||
287 | /* remember the largest hole we saw so far */ | ||
288 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
289 | mm->cached_hole_size = vma->vm_start - addr; | ||
290 | |||
291 | /* try just below the current vma->vm_start */ | ||
292 | addr = vma->vm_start-len; | ||
293 | if (do_color_align) | ||
294 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
295 | } while (likely(len < vma->vm_start)); | ||
296 | |||
297 | bottomup: | ||
298 | /* | ||
299 | * A failed mmap() very likely causes application failure, | ||
300 | * so fall back to the bottom-up function here. This scenario | ||
301 | * can happen with large stack limits and large mmap() | ||
302 | * allocations. | ||
303 | */ | ||
304 | mm->cached_hole_size = ~0UL; | ||
305 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
306 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
307 | /* | ||
308 | * Restore the topdown base: | ||
309 | */ | ||
310 | mm->free_area_cache = mm->mmap_base; | ||
311 | mm->cached_hole_size = ~0UL; | ||
312 | |||
313 | return addr; | ||
314 | } | ||
315 | |||
131 | /* Try to align mapping such that we align it as much as possible. */ | 316 | /* Try to align mapping such that we align it as much as possible. */ |
132 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) | 317 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) |
133 | { | 318 | { |
@@ -171,15 +356,57 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u | |||
171 | return addr; | 356 | return addr; |
172 | } | 357 | } |
173 | 358 | ||
359 | /* Essentially the same as PowerPC... */ | ||
360 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
361 | { | ||
362 | unsigned long random_factor = 0UL; | ||
363 | |||
364 | if (current->flags & PF_RANDOMIZE) { | ||
365 | random_factor = get_random_int(); | ||
366 | if (test_thread_flag(TIF_32BIT)) | ||
367 | random_factor &= ((1 * 1024 * 1024) - 1); | ||
368 | else | ||
369 | random_factor = ((random_factor << PAGE_SHIFT) & | ||
370 | 0xffffffffUL); | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * Fall back to the standard layout if the personality | ||
375 | * bit is set, or if the expected stack growth is unlimited: | ||
376 | */ | ||
377 | if (!test_thread_flag(TIF_32BIT) || | ||
378 | (current->personality & ADDR_COMPAT_LAYOUT) || | ||
379 | current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || | ||
380 | sysctl_legacy_va_layout) { | ||
381 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
382 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
383 | mm->unmap_area = arch_unmap_area; | ||
384 | } else { | ||
385 | /* We know it's 32-bit */ | ||
386 | unsigned long task_size = STACK_TOP32; | ||
387 | unsigned long gap; | ||
388 | |||
389 | gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
390 | if (gap < 128 * 1024 * 1024) | ||
391 | gap = 128 * 1024 * 1024; | ||
392 | if (gap > (task_size / 6 * 5)) | ||
393 | gap = (task_size / 6 * 5); | ||
394 | |||
395 | mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); | ||
396 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
397 | mm->unmap_area = arch_unmap_area_topdown; | ||
398 | } | ||
399 | } | ||
400 | |||
174 | asmlinkage unsigned long sparc_brk(unsigned long brk) | 401 | asmlinkage unsigned long sparc_brk(unsigned long brk) |
175 | { | 402 | { |
176 | /* People could try to be nasty and use ta 0x6d in 32bit programs */ | 403 | /* People could try to be nasty and use ta 0x6d in 32bit programs */ |
177 | if (test_thread_flag(TIF_32BIT) && | 404 | if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) |
178 | brk >= 0xf0000000UL) | ||
179 | return current->mm->brk; | 405 | return current->mm->brk; |
180 | 406 | ||
181 | if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET)) | 407 | if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) |
182 | return current->mm->brk; | 408 | return current->mm->brk; |
409 | |||
183 | return sys_brk(brk); | 410 | return sys_brk(brk); |
184 | } | 411 | } |
185 | 412 | ||
@@ -340,13 +567,16 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | |||
340 | retval = -EINVAL; | 567 | retval = -EINVAL; |
341 | 568 | ||
342 | if (test_thread_flag(TIF_32BIT)) { | 569 | if (test_thread_flag(TIF_32BIT)) { |
343 | if (len > 0xf0000000UL || | 570 | if (len >= STACK_TOP32) |
344 | ((flags & MAP_FIXED) && addr > 0xf0000000UL - len)) | 571 | goto out_putf; |
572 | |||
573 | if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len) | ||
345 | goto out_putf; | 574 | goto out_putf; |
346 | } else { | 575 | } else { |
347 | if (len > -PAGE_OFFSET || | 576 | if (len >= VA_EXCLUDE_START) |
348 | ((flags & MAP_FIXED) && | 577 | goto out_putf; |
349 | addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) | 578 | |
579 | if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len)) | ||
350 | goto out_putf; | 580 | goto out_putf; |
351 | } | 581 | } |
352 | 582 | ||
@@ -365,9 +595,9 @@ asmlinkage long sys64_munmap(unsigned long addr, size_t len) | |||
365 | { | 595 | { |
366 | long ret; | 596 | long ret; |
367 | 597 | ||
368 | if (len > -PAGE_OFFSET || | 598 | if (invalid_64bit_range(addr, len)) |
369 | (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) | ||
370 | return -EINVAL; | 599 | return -EINVAL; |
600 | |||
371 | down_write(¤t->mm->mmap_sem); | 601 | down_write(¤t->mm->mmap_sem); |
372 | ret = do_munmap(current->mm, addr, len); | 602 | ret = do_munmap(current->mm, addr, len); |
373 | up_write(¤t->mm->mmap_sem); | 603 | up_write(¤t->mm->mmap_sem); |
@@ -384,18 +614,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr, | |||
384 | { | 614 | { |
385 | struct vm_area_struct *vma; | 615 | struct vm_area_struct *vma; |
386 | unsigned long ret = -EINVAL; | 616 | unsigned long ret = -EINVAL; |
617 | |||
387 | if (test_thread_flag(TIF_32BIT)) | 618 | if (test_thread_flag(TIF_32BIT)) |
388 | goto out; | 619 | goto out; |
389 | if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET) | 620 | if (unlikely(new_len >= VA_EXCLUDE_START)) |
390 | goto out; | 621 | goto out; |
391 | if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET) | 622 | if (unlikely(invalid_64bit_range(addr, old_len))) |
392 | goto out; | 623 | goto out; |
624 | |||
393 | down_write(¤t->mm->mmap_sem); | 625 | down_write(¤t->mm->mmap_sem); |
394 | if (flags & MREMAP_FIXED) { | 626 | if (flags & MREMAP_FIXED) { |
395 | if (new_addr < PAGE_OFFSET && | 627 | if (invalid_64bit_range(new_addr, new_len)) |
396 | new_addr + new_len > -PAGE_OFFSET) | ||
397 | goto out_sem; | 628 | goto out_sem; |
398 | } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) { | 629 | } else if (invalid_64bit_range(addr, new_len)) { |
399 | unsigned long map_flags = 0; | 630 | unsigned long map_flags = 0; |
400 | struct file *file = NULL; | 631 | struct file *file = NULL; |
401 | 632 | ||
@@ -554,12 +785,10 @@ asmlinkage long sys_utrap_install(utrap_entry_t type, | |||
554 | } | 785 | } |
555 | if (!current_thread_info()->utraps) { | 786 | if (!current_thread_info()->utraps) { |
556 | current_thread_info()->utraps = | 787 | current_thread_info()->utraps = |
557 | kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); | 788 | kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); |
558 | if (!current_thread_info()->utraps) | 789 | if (!current_thread_info()->utraps) |
559 | return -ENOMEM; | 790 | return -ENOMEM; |
560 | current_thread_info()->utraps[0] = 1; | 791 | current_thread_info()->utraps[0] = 1; |
561 | memset(current_thread_info()->utraps+1, 0, | ||
562 | UT_TRAP_INSTRUCTION_31*sizeof(long)); | ||
563 | } else { | 792 | } else { |
564 | if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && | 793 | if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && |
565 | current_thread_info()->utraps[0] > 1) { | 794 | current_thread_info()->utraps[0] > 1) { |