diff options
Diffstat (limited to 'arch/sparc/kernel/sys_sparc_64.c')
-rw-r--r-- | arch/sparc/kernel/sys_sparc_64.c | 914 |
1 files changed, 914 insertions, 0 deletions
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c new file mode 100644 index 000000000000..39749e32dc7e --- /dev/null +++ b/arch/sparc/kernel/sys_sparc_64.c | |||
@@ -0,0 +1,914 @@ | |||
1 | /* linux/arch/sparc64/kernel/sys_sparc.c | ||
2 | * | ||
3 | * This file contains various random system calls that | ||
4 | * have a non-standard calling sequence on the Linux/sparc | ||
5 | * platform. | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/file.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sem.h> | ||
15 | #include <linux/msg.h> | ||
16 | #include <linux/shm.h> | ||
17 | #include <linux/stat.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/utsname.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/syscalls.h> | ||
23 | #include <linux/ipc.h> | ||
24 | #include <linux/personality.h> | ||
25 | #include <linux/random.h> | ||
26 | |||
27 | #include <asm/uaccess.h> | ||
28 | #include <asm/utrap.h> | ||
29 | #include <asm/perfctr.h> | ||
30 | #include <asm/unistd.h> | ||
31 | |||
32 | #include "entry.h" | ||
33 | #include "systbls.h" | ||
34 | |||
35 | /* #define DEBUG_UNIMP_SYSCALL */ | ||
36 | |||
37 | asmlinkage unsigned long sys_getpagesize(void) | ||
38 | { | ||
39 | return PAGE_SIZE; | ||
40 | } | ||
41 | |||
42 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) | ||
43 | #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) | ||
44 | |||
45 | /* Does addr --> addr+len fall within 4GB of the VA-space hole or | ||
46 | * overflow past the end of the 64-bit address space? | ||
47 | */ | ||
48 | static inline int invalid_64bit_range(unsigned long addr, unsigned long len) | ||
49 | { | ||
50 | unsigned long va_exclude_start, va_exclude_end; | ||
51 | |||
52 | va_exclude_start = VA_EXCLUDE_START; | ||
53 | va_exclude_end = VA_EXCLUDE_END; | ||
54 | |||
55 | if (unlikely(len >= va_exclude_start)) | ||
56 | return 1; | ||
57 | |||
58 | if (unlikely((addr + len) < addr)) | ||
59 | return 1; | ||
60 | |||
61 | if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || | ||
62 | ((addr + len) >= va_exclude_start && | ||
63 | (addr + len) < va_exclude_end))) | ||
64 | return 1; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | /* Does start,end straddle the VA-space hole? */ | ||
70 | static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) | ||
71 | { | ||
72 | unsigned long va_exclude_start, va_exclude_end; | ||
73 | |||
74 | va_exclude_start = VA_EXCLUDE_START; | ||
75 | va_exclude_end = VA_EXCLUDE_END; | ||
76 | |||
77 | if (likely(start < va_exclude_start && end < va_exclude_start)) | ||
78 | return 0; | ||
79 | |||
80 | if (likely(start >= va_exclude_end && end >= va_exclude_end)) | ||
81 | return 0; | ||
82 | |||
83 | return 1; | ||
84 | } | ||
85 | |||
86 | /* These functions differ from the default implementations in | ||
87 | * mm/mmap.c in two ways: | ||
88 | * | ||
89 | * 1) For file backed MAP_SHARED mmap()'s we D-cache color align, | ||
90 | * for fixed such mappings we just validate what the user gave us. | ||
91 | * 2) For 64-bit tasks we avoid mapping anything within 4GB of | ||
92 | * the spitfire/niagara VA-hole. | ||
93 | */ | ||
94 | |||
95 | static inline unsigned long COLOUR_ALIGN(unsigned long addr, | ||
96 | unsigned long pgoff) | ||
97 | { | ||
98 | unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); | ||
99 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
100 | |||
101 | return base + off; | ||
102 | } | ||
103 | |||
104 | static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | ||
105 | unsigned long pgoff) | ||
106 | { | ||
107 | unsigned long base = addr & ~(SHMLBA-1); | ||
108 | unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); | ||
109 | |||
110 | if (base + off <= addr) | ||
111 | return base + off; | ||
112 | return base - off; | ||
113 | } | ||
114 | |||
115 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) | ||
116 | { | ||
117 | struct mm_struct *mm = current->mm; | ||
118 | struct vm_area_struct * vma; | ||
119 | unsigned long task_size = TASK_SIZE; | ||
120 | unsigned long start_addr; | ||
121 | int do_color_align; | ||
122 | |||
123 | if (flags & MAP_FIXED) { | ||
124 | /* We do not accept a shared mapping if it would violate | ||
125 | * cache aliasing constraints. | ||
126 | */ | ||
127 | if ((flags & MAP_SHARED) && | ||
128 | ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
129 | return -EINVAL; | ||
130 | return addr; | ||
131 | } | ||
132 | |||
133 | if (test_thread_flag(TIF_32BIT)) | ||
134 | task_size = STACK_TOP32; | ||
135 | if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | do_color_align = 0; | ||
139 | if (filp || (flags & MAP_SHARED)) | ||
140 | do_color_align = 1; | ||
141 | |||
142 | if (addr) { | ||
143 | if (do_color_align) | ||
144 | addr = COLOUR_ALIGN(addr, pgoff); | ||
145 | else | ||
146 | addr = PAGE_ALIGN(addr); | ||
147 | |||
148 | vma = find_vma(mm, addr); | ||
149 | if (task_size - len >= addr && | ||
150 | (!vma || addr + len <= vma->vm_start)) | ||
151 | return addr; | ||
152 | } | ||
153 | |||
154 | if (len > mm->cached_hole_size) { | ||
155 | start_addr = addr = mm->free_area_cache; | ||
156 | } else { | ||
157 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
158 | mm->cached_hole_size = 0; | ||
159 | } | ||
160 | |||
161 | task_size -= len; | ||
162 | |||
163 | full_search: | ||
164 | if (do_color_align) | ||
165 | addr = COLOUR_ALIGN(addr, pgoff); | ||
166 | else | ||
167 | addr = PAGE_ALIGN(addr); | ||
168 | |||
169 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
170 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
171 | if (addr < VA_EXCLUDE_START && | ||
172 | (addr + len) >= VA_EXCLUDE_START) { | ||
173 | addr = VA_EXCLUDE_END; | ||
174 | vma = find_vma(mm, VA_EXCLUDE_END); | ||
175 | } | ||
176 | if (unlikely(task_size < addr)) { | ||
177 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
178 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
179 | mm->cached_hole_size = 0; | ||
180 | goto full_search; | ||
181 | } | ||
182 | return -ENOMEM; | ||
183 | } | ||
184 | if (likely(!vma || addr + len <= vma->vm_start)) { | ||
185 | /* | ||
186 | * Remember the place where we stopped the search: | ||
187 | */ | ||
188 | mm->free_area_cache = addr + len; | ||
189 | return addr; | ||
190 | } | ||
191 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
192 | mm->cached_hole_size = vma->vm_start - addr; | ||
193 | |||
194 | addr = vma->vm_end; | ||
195 | if (do_color_align) | ||
196 | addr = COLOUR_ALIGN(addr, pgoff); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | unsigned long | ||
201 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
202 | const unsigned long len, const unsigned long pgoff, | ||
203 | const unsigned long flags) | ||
204 | { | ||
205 | struct vm_area_struct *vma; | ||
206 | struct mm_struct *mm = current->mm; | ||
207 | unsigned long task_size = STACK_TOP32; | ||
208 | unsigned long addr = addr0; | ||
209 | int do_color_align; | ||
210 | |||
211 | /* This should only ever run for 32-bit processes. */ | ||
212 | BUG_ON(!test_thread_flag(TIF_32BIT)); | ||
213 | |||
214 | if (flags & MAP_FIXED) { | ||
215 | /* We do not accept a shared mapping if it would violate | ||
216 | * cache aliasing constraints. | ||
217 | */ | ||
218 | if ((flags & MAP_SHARED) && | ||
219 | ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
220 | return -EINVAL; | ||
221 | return addr; | ||
222 | } | ||
223 | |||
224 | if (unlikely(len > task_size)) | ||
225 | return -ENOMEM; | ||
226 | |||
227 | do_color_align = 0; | ||
228 | if (filp || (flags & MAP_SHARED)) | ||
229 | do_color_align = 1; | ||
230 | |||
231 | /* requesting a specific address */ | ||
232 | if (addr) { | ||
233 | if (do_color_align) | ||
234 | addr = COLOUR_ALIGN(addr, pgoff); | ||
235 | else | ||
236 | addr = PAGE_ALIGN(addr); | ||
237 | |||
238 | vma = find_vma(mm, addr); | ||
239 | if (task_size - len >= addr && | ||
240 | (!vma || addr + len <= vma->vm_start)) | ||
241 | return addr; | ||
242 | } | ||
243 | |||
244 | /* check if free_area_cache is useful for us */ | ||
245 | if (len <= mm->cached_hole_size) { | ||
246 | mm->cached_hole_size = 0; | ||
247 | mm->free_area_cache = mm->mmap_base; | ||
248 | } | ||
249 | |||
250 | /* either no address requested or can't fit in requested address hole */ | ||
251 | addr = mm->free_area_cache; | ||
252 | if (do_color_align) { | ||
253 | unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); | ||
254 | |||
255 | addr = base + len; | ||
256 | } | ||
257 | |||
258 | /* make sure it can fit in the remaining address space */ | ||
259 | if (likely(addr > len)) { | ||
260 | vma = find_vma(mm, addr-len); | ||
261 | if (!vma || addr <= vma->vm_start) { | ||
262 | /* remember the address as a hint for next time */ | ||
263 | return (mm->free_area_cache = addr-len); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | if (unlikely(mm->mmap_base < len)) | ||
268 | goto bottomup; | ||
269 | |||
270 | addr = mm->mmap_base-len; | ||
271 | if (do_color_align) | ||
272 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
273 | |||
274 | do { | ||
275 | /* | ||
276 | * Lookup failure means no vma is above this address, | ||
277 | * else if new region fits below vma->vm_start, | ||
278 | * return with success: | ||
279 | */ | ||
280 | vma = find_vma(mm, addr); | ||
281 | if (likely(!vma || addr+len <= vma->vm_start)) { | ||
282 | /* remember the address as a hint for next time */ | ||
283 | return (mm->free_area_cache = addr); | ||
284 | } | ||
285 | |||
286 | /* remember the largest hole we saw so far */ | ||
287 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
288 | mm->cached_hole_size = vma->vm_start - addr; | ||
289 | |||
290 | /* try just below the current vma->vm_start */ | ||
291 | addr = vma->vm_start-len; | ||
292 | if (do_color_align) | ||
293 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | ||
294 | } while (likely(len < vma->vm_start)); | ||
295 | |||
296 | bottomup: | ||
297 | /* | ||
298 | * A failed mmap() very likely causes application failure, | ||
299 | * so fall back to the bottom-up function here. This scenario | ||
300 | * can happen with large stack limits and large mmap() | ||
301 | * allocations. | ||
302 | */ | ||
303 | mm->cached_hole_size = ~0UL; | ||
304 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
305 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | ||
306 | /* | ||
307 | * Restore the topdown base: | ||
308 | */ | ||
309 | mm->free_area_cache = mm->mmap_base; | ||
310 | mm->cached_hole_size = ~0UL; | ||
311 | |||
312 | return addr; | ||
313 | } | ||
314 | |||
315 | /* Try to align mapping such that we align it as much as possible. */ | ||
316 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) | ||
317 | { | ||
318 | unsigned long align_goal, addr = -ENOMEM; | ||
319 | |||
320 | if (flags & MAP_FIXED) { | ||
321 | /* Ok, don't mess with it. */ | ||
322 | return get_unmapped_area(NULL, orig_addr, len, pgoff, flags); | ||
323 | } | ||
324 | flags &= ~MAP_SHARED; | ||
325 | |||
326 | align_goal = PAGE_SIZE; | ||
327 | if (len >= (4UL * 1024 * 1024)) | ||
328 | align_goal = (4UL * 1024 * 1024); | ||
329 | else if (len >= (512UL * 1024)) | ||
330 | align_goal = (512UL * 1024); | ||
331 | else if (len >= (64UL * 1024)) | ||
332 | align_goal = (64UL * 1024); | ||
333 | |||
334 | do { | ||
335 | addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); | ||
336 | if (!(addr & ~PAGE_MASK)) { | ||
337 | addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); | ||
338 | break; | ||
339 | } | ||
340 | |||
341 | if (align_goal == (4UL * 1024 * 1024)) | ||
342 | align_goal = (512UL * 1024); | ||
343 | else if (align_goal == (512UL * 1024)) | ||
344 | align_goal = (64UL * 1024); | ||
345 | else | ||
346 | align_goal = PAGE_SIZE; | ||
347 | } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE); | ||
348 | |||
349 | /* Mapping is smaller than 64K or larger areas could not | ||
350 | * be obtained. | ||
351 | */ | ||
352 | if (addr & ~PAGE_MASK) | ||
353 | addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags); | ||
354 | |||
355 | return addr; | ||
356 | } | ||
357 | |||
358 | /* Essentially the same as PowerPC... */ | ||
359 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
360 | { | ||
361 | unsigned long random_factor = 0UL; | ||
362 | |||
363 | if (current->flags & PF_RANDOMIZE) { | ||
364 | random_factor = get_random_int(); | ||
365 | if (test_thread_flag(TIF_32BIT)) | ||
366 | random_factor &= ((1 * 1024 * 1024) - 1); | ||
367 | else | ||
368 | random_factor = ((random_factor << PAGE_SHIFT) & | ||
369 | 0xffffffffUL); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Fall back to the standard layout if the personality | ||
374 | * bit is set, or if the expected stack growth is unlimited: | ||
375 | */ | ||
376 | if (!test_thread_flag(TIF_32BIT) || | ||
377 | (current->personality & ADDR_COMPAT_LAYOUT) || | ||
378 | current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || | ||
379 | sysctl_legacy_va_layout) { | ||
380 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | ||
381 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
382 | mm->unmap_area = arch_unmap_area; | ||
383 | } else { | ||
384 | /* We know it's 32-bit */ | ||
385 | unsigned long task_size = STACK_TOP32; | ||
386 | unsigned long gap; | ||
387 | |||
388 | gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; | ||
389 | if (gap < 128 * 1024 * 1024) | ||
390 | gap = 128 * 1024 * 1024; | ||
391 | if (gap > (task_size / 6 * 5)) | ||
392 | gap = (task_size / 6 * 5); | ||
393 | |||
394 | mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); | ||
395 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
396 | mm->unmap_area = arch_unmap_area_topdown; | ||
397 | } | ||
398 | } | ||
399 | |||
400 | asmlinkage unsigned long sparc_brk(unsigned long brk) | ||
401 | { | ||
402 | /* People could try to be nasty and use ta 0x6d in 32bit programs */ | ||
403 | if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) | ||
404 | return current->mm->brk; | ||
405 | |||
406 | if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) | ||
407 | return current->mm->brk; | ||
408 | |||
409 | return sys_brk(brk); | ||
410 | } | ||
411 | |||
412 | /* | ||
413 | * sys_pipe() is the normal C calling standard for creating | ||
414 | * a pipe. It's not the way unix traditionally does this, though. | ||
415 | */ | ||
416 | asmlinkage long sparc_pipe(struct pt_regs *regs) | ||
417 | { | ||
418 | int fd[2]; | ||
419 | int error; | ||
420 | |||
421 | error = do_pipe_flags(fd, 0); | ||
422 | if (error) | ||
423 | goto out; | ||
424 | regs->u_regs[UREG_I1] = fd[1]; | ||
425 | error = fd[0]; | ||
426 | out: | ||
427 | return error; | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
432 | * | ||
433 | * This is really horribly ugly. | ||
434 | */ | ||
435 | |||
436 | asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, | ||
437 | unsigned long third, void __user *ptr, long fifth) | ||
438 | { | ||
439 | long err; | ||
440 | |||
441 | /* No need for backward compatibility. We can start fresh... */ | ||
442 | if (call <= SEMCTL) { | ||
443 | switch (call) { | ||
444 | case SEMOP: | ||
445 | err = sys_semtimedop(first, ptr, | ||
446 | (unsigned)second, NULL); | ||
447 | goto out; | ||
448 | case SEMTIMEDOP: | ||
449 | err = sys_semtimedop(first, ptr, (unsigned)second, | ||
450 | (const struct timespec __user *) | ||
451 | (unsigned long) fifth); | ||
452 | goto out; | ||
453 | case SEMGET: | ||
454 | err = sys_semget(first, (int)second, (int)third); | ||
455 | goto out; | ||
456 | case SEMCTL: { | ||
457 | err = sys_semctl(first, second, | ||
458 | (int)third | IPC_64, | ||
459 | (union semun) ptr); | ||
460 | goto out; | ||
461 | } | ||
462 | default: | ||
463 | err = -ENOSYS; | ||
464 | goto out; | ||
465 | }; | ||
466 | } | ||
467 | if (call <= MSGCTL) { | ||
468 | switch (call) { | ||
469 | case MSGSND: | ||
470 | err = sys_msgsnd(first, ptr, (size_t)second, | ||
471 | (int)third); | ||
472 | goto out; | ||
473 | case MSGRCV: | ||
474 | err = sys_msgrcv(first, ptr, (size_t)second, fifth, | ||
475 | (int)third); | ||
476 | goto out; | ||
477 | case MSGGET: | ||
478 | err = sys_msgget((key_t)first, (int)second); | ||
479 | goto out; | ||
480 | case MSGCTL: | ||
481 | err = sys_msgctl(first, (int)second | IPC_64, ptr); | ||
482 | goto out; | ||
483 | default: | ||
484 | err = -ENOSYS; | ||
485 | goto out; | ||
486 | }; | ||
487 | } | ||
488 | if (call <= SHMCTL) { | ||
489 | switch (call) { | ||
490 | case SHMAT: { | ||
491 | ulong raddr; | ||
492 | err = do_shmat(first, ptr, (int)second, &raddr); | ||
493 | if (!err) { | ||
494 | if (put_user(raddr, | ||
495 | (ulong __user *) third)) | ||
496 | err = -EFAULT; | ||
497 | } | ||
498 | goto out; | ||
499 | } | ||
500 | case SHMDT: | ||
501 | err = sys_shmdt(ptr); | ||
502 | goto out; | ||
503 | case SHMGET: | ||
504 | err = sys_shmget(first, (size_t)second, (int)third); | ||
505 | goto out; | ||
506 | case SHMCTL: | ||
507 | err = sys_shmctl(first, (int)second | IPC_64, ptr); | ||
508 | goto out; | ||
509 | default: | ||
510 | err = -ENOSYS; | ||
511 | goto out; | ||
512 | }; | ||
513 | } else { | ||
514 | err = -ENOSYS; | ||
515 | } | ||
516 | out: | ||
517 | return err; | ||
518 | } | ||
519 | |||
520 | asmlinkage long sparc64_newuname(struct new_utsname __user *name) | ||
521 | { | ||
522 | int ret = sys_newuname(name); | ||
523 | |||
524 | if (current->personality == PER_LINUX32 && !ret) { | ||
525 | ret = (copy_to_user(name->machine, "sparc\0\0", 8) | ||
526 | ? -EFAULT : 0); | ||
527 | } | ||
528 | return ret; | ||
529 | } | ||
530 | |||
531 | asmlinkage long sparc64_personality(unsigned long personality) | ||
532 | { | ||
533 | int ret; | ||
534 | |||
535 | if (current->personality == PER_LINUX32 && | ||
536 | personality == PER_LINUX) | ||
537 | personality = PER_LINUX32; | ||
538 | ret = sys_personality(personality); | ||
539 | if (ret == PER_LINUX32) | ||
540 | ret = PER_LINUX; | ||
541 | |||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | int sparc_mmap_check(unsigned long addr, unsigned long len) | ||
546 | { | ||
547 | if (test_thread_flag(TIF_32BIT)) { | ||
548 | if (len >= STACK_TOP32) | ||
549 | return -EINVAL; | ||
550 | |||
551 | if (addr > STACK_TOP32 - len) | ||
552 | return -EINVAL; | ||
553 | } else { | ||
554 | if (len >= VA_EXCLUDE_START) | ||
555 | return -EINVAL; | ||
556 | |||
557 | if (invalid_64bit_range(addr, len)) | ||
558 | return -EINVAL; | ||
559 | } | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /* Linux version of mmap */ | ||
565 | asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | ||
566 | unsigned long prot, unsigned long flags, unsigned long fd, | ||
567 | unsigned long off) | ||
568 | { | ||
569 | struct file * file = NULL; | ||
570 | unsigned long retval = -EBADF; | ||
571 | |||
572 | if (!(flags & MAP_ANONYMOUS)) { | ||
573 | file = fget(fd); | ||
574 | if (!file) | ||
575 | goto out; | ||
576 | } | ||
577 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
578 | len = PAGE_ALIGN(len); | ||
579 | |||
580 | down_write(¤t->mm->mmap_sem); | ||
581 | retval = do_mmap(file, addr, len, prot, flags, off); | ||
582 | up_write(¤t->mm->mmap_sem); | ||
583 | |||
584 | if (file) | ||
585 | fput(file); | ||
586 | out: | ||
587 | return retval; | ||
588 | } | ||
589 | |||
590 | asmlinkage long sys64_munmap(unsigned long addr, size_t len) | ||
591 | { | ||
592 | long ret; | ||
593 | |||
594 | if (invalid_64bit_range(addr, len)) | ||
595 | return -EINVAL; | ||
596 | |||
597 | down_write(¤t->mm->mmap_sem); | ||
598 | ret = do_munmap(current->mm, addr, len); | ||
599 | up_write(¤t->mm->mmap_sem); | ||
600 | return ret; | ||
601 | } | ||
602 | |||
603 | extern unsigned long do_mremap(unsigned long addr, | ||
604 | unsigned long old_len, unsigned long new_len, | ||
605 | unsigned long flags, unsigned long new_addr); | ||
606 | |||
607 | asmlinkage unsigned long sys64_mremap(unsigned long addr, | ||
608 | unsigned long old_len, unsigned long new_len, | ||
609 | unsigned long flags, unsigned long new_addr) | ||
610 | { | ||
611 | unsigned long ret = -EINVAL; | ||
612 | |||
613 | if (test_thread_flag(TIF_32BIT)) | ||
614 | goto out; | ||
615 | if (unlikely(new_len >= VA_EXCLUDE_START)) | ||
616 | goto out; | ||
617 | if (unlikely(sparc_mmap_check(addr, old_len))) | ||
618 | goto out; | ||
619 | if (unlikely(sparc_mmap_check(new_addr, new_len))) | ||
620 | goto out; | ||
621 | |||
622 | down_write(¤t->mm->mmap_sem); | ||
623 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); | ||
624 | up_write(¤t->mm->mmap_sem); | ||
625 | out: | ||
626 | return ret; | ||
627 | } | ||
628 | |||
629 | /* we come to here via sys_nis_syscall so it can setup the regs argument */ | ||
630 | asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs) | ||
631 | { | ||
632 | static int count; | ||
633 | |||
634 | /* Don't make the system unusable, if someone goes stuck */ | ||
635 | if (count++ > 5) | ||
636 | return -ENOSYS; | ||
637 | |||
638 | printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]); | ||
639 | #ifdef DEBUG_UNIMP_SYSCALL | ||
640 | show_regs (regs); | ||
641 | #endif | ||
642 | |||
643 | return -ENOSYS; | ||
644 | } | ||
645 | |||
646 | /* #define DEBUG_SPARC_BREAKPOINT */ | ||
647 | |||
648 | asmlinkage void sparc_breakpoint(struct pt_regs *regs) | ||
649 | { | ||
650 | siginfo_t info; | ||
651 | |||
652 | if (test_thread_flag(TIF_32BIT)) { | ||
653 | regs->tpc &= 0xffffffff; | ||
654 | regs->tnpc &= 0xffffffff; | ||
655 | } | ||
656 | #ifdef DEBUG_SPARC_BREAKPOINT | ||
657 | printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc); | ||
658 | #endif | ||
659 | info.si_signo = SIGTRAP; | ||
660 | info.si_errno = 0; | ||
661 | info.si_code = TRAP_BRKPT; | ||
662 | info.si_addr = (void __user *)regs->tpc; | ||
663 | info.si_trapno = 0; | ||
664 | force_sig_info(SIGTRAP, &info, current); | ||
665 | #ifdef DEBUG_SPARC_BREAKPOINT | ||
666 | printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); | ||
667 | #endif | ||
668 | } | ||
669 | |||
670 | extern void check_pending(int signum); | ||
671 | |||
672 | asmlinkage long sys_getdomainname(char __user *name, int len) | ||
673 | { | ||
674 | int nlen, err; | ||
675 | |||
676 | if (len < 0) | ||
677 | return -EINVAL; | ||
678 | |||
679 | down_read(&uts_sem); | ||
680 | |||
681 | nlen = strlen(utsname()->domainname) + 1; | ||
682 | err = -EINVAL; | ||
683 | if (nlen > len) | ||
684 | goto out; | ||
685 | |||
686 | err = -EFAULT; | ||
687 | if (!copy_to_user(name, utsname()->domainname, nlen)) | ||
688 | err = 0; | ||
689 | |||
690 | out: | ||
691 | up_read(&uts_sem); | ||
692 | return err; | ||
693 | } | ||
694 | |||
695 | asmlinkage long sys_utrap_install(utrap_entry_t type, | ||
696 | utrap_handler_t new_p, | ||
697 | utrap_handler_t new_d, | ||
698 | utrap_handler_t __user *old_p, | ||
699 | utrap_handler_t __user *old_d) | ||
700 | { | ||
701 | if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31) | ||
702 | return -EINVAL; | ||
703 | if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) { | ||
704 | if (old_p) { | ||
705 | if (!current_thread_info()->utraps) { | ||
706 | if (put_user(NULL, old_p)) | ||
707 | return -EFAULT; | ||
708 | } else { | ||
709 | if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) | ||
710 | return -EFAULT; | ||
711 | } | ||
712 | } | ||
713 | if (old_d) { | ||
714 | if (put_user(NULL, old_d)) | ||
715 | return -EFAULT; | ||
716 | } | ||
717 | return 0; | ||
718 | } | ||
719 | if (!current_thread_info()->utraps) { | ||
720 | current_thread_info()->utraps = | ||
721 | kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); | ||
722 | if (!current_thread_info()->utraps) | ||
723 | return -ENOMEM; | ||
724 | current_thread_info()->utraps[0] = 1; | ||
725 | } else { | ||
726 | if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && | ||
727 | current_thread_info()->utraps[0] > 1) { | ||
728 | unsigned long *p = current_thread_info()->utraps; | ||
729 | |||
730 | current_thread_info()->utraps = | ||
731 | kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), | ||
732 | GFP_KERNEL); | ||
733 | if (!current_thread_info()->utraps) { | ||
734 | current_thread_info()->utraps = p; | ||
735 | return -ENOMEM; | ||
736 | } | ||
737 | p[0]--; | ||
738 | current_thread_info()->utraps[0] = 1; | ||
739 | memcpy(current_thread_info()->utraps+1, p+1, | ||
740 | UT_TRAP_INSTRUCTION_31*sizeof(long)); | ||
741 | } | ||
742 | } | ||
743 | if (old_p) { | ||
744 | if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p)) | ||
745 | return -EFAULT; | ||
746 | } | ||
747 | if (old_d) { | ||
748 | if (put_user(NULL, old_d)) | ||
749 | return -EFAULT; | ||
750 | } | ||
751 | current_thread_info()->utraps[type] = (long)new_p; | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | asmlinkage long sparc_memory_ordering(unsigned long model, | ||
757 | struct pt_regs *regs) | ||
758 | { | ||
759 | if (model >= 3) | ||
760 | return -EINVAL; | ||
761 | regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14); | ||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | asmlinkage long sys_rt_sigaction(int sig, | ||
766 | const struct sigaction __user *act, | ||
767 | struct sigaction __user *oact, | ||
768 | void __user *restorer, | ||
769 | size_t sigsetsize) | ||
770 | { | ||
771 | struct k_sigaction new_ka, old_ka; | ||
772 | int ret; | ||
773 | |||
774 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
775 | if (sigsetsize != sizeof(sigset_t)) | ||
776 | return -EINVAL; | ||
777 | |||
778 | if (act) { | ||
779 | new_ka.ka_restorer = restorer; | ||
780 | if (copy_from_user(&new_ka.sa, act, sizeof(*act))) | ||
781 | return -EFAULT; | ||
782 | } | ||
783 | |||
784 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
785 | |||
786 | if (!ret && oact) { | ||
787 | if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) | ||
788 | return -EFAULT; | ||
789 | } | ||
790 | |||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | /* Invoked by rtrap code to update performance counters in | ||
795 | * user space. | ||
796 | */ | ||
797 | asmlinkage void update_perfctrs(void) | ||
798 | { | ||
799 | unsigned long pic, tmp; | ||
800 | |||
801 | read_pic(pic); | ||
802 | tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic); | ||
803 | __put_user(tmp, current_thread_info()->user_cntd0); | ||
804 | tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32)); | ||
805 | __put_user(tmp, current_thread_info()->user_cntd1); | ||
806 | reset_pic(); | ||
807 | } | ||
808 | |||
809 | asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2) | ||
810 | { | ||
811 | int err = 0; | ||
812 | |||
813 | switch(opcode) { | ||
814 | case PERFCTR_ON: | ||
815 | current_thread_info()->pcr_reg = arg2; | ||
816 | current_thread_info()->user_cntd0 = (u64 __user *) arg0; | ||
817 | current_thread_info()->user_cntd1 = (u64 __user *) arg1; | ||
818 | current_thread_info()->kernel_cntd0 = | ||
819 | current_thread_info()->kernel_cntd1 = 0; | ||
820 | write_pcr(arg2); | ||
821 | reset_pic(); | ||
822 | set_thread_flag(TIF_PERFCTR); | ||
823 | break; | ||
824 | |||
825 | case PERFCTR_OFF: | ||
826 | err = -EINVAL; | ||
827 | if (test_thread_flag(TIF_PERFCTR)) { | ||
828 | current_thread_info()->user_cntd0 = | ||
829 | current_thread_info()->user_cntd1 = NULL; | ||
830 | current_thread_info()->pcr_reg = 0; | ||
831 | write_pcr(0); | ||
832 | clear_thread_flag(TIF_PERFCTR); | ||
833 | err = 0; | ||
834 | } | ||
835 | break; | ||
836 | |||
837 | case PERFCTR_READ: { | ||
838 | unsigned long pic, tmp; | ||
839 | |||
840 | if (!test_thread_flag(TIF_PERFCTR)) { | ||
841 | err = -EINVAL; | ||
842 | break; | ||
843 | } | ||
844 | read_pic(pic); | ||
845 | tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic); | ||
846 | err |= __put_user(tmp, current_thread_info()->user_cntd0); | ||
847 | tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32)); | ||
848 | err |= __put_user(tmp, current_thread_info()->user_cntd1); | ||
849 | reset_pic(); | ||
850 | break; | ||
851 | } | ||
852 | |||
853 | case PERFCTR_CLRPIC: | ||
854 | if (!test_thread_flag(TIF_PERFCTR)) { | ||
855 | err = -EINVAL; | ||
856 | break; | ||
857 | } | ||
858 | current_thread_info()->kernel_cntd0 = | ||
859 | current_thread_info()->kernel_cntd1 = 0; | ||
860 | reset_pic(); | ||
861 | break; | ||
862 | |||
863 | case PERFCTR_SETPCR: { | ||
864 | u64 __user *user_pcr = (u64 __user *)arg0; | ||
865 | |||
866 | if (!test_thread_flag(TIF_PERFCTR)) { | ||
867 | err = -EINVAL; | ||
868 | break; | ||
869 | } | ||
870 | err |= __get_user(current_thread_info()->pcr_reg, user_pcr); | ||
871 | write_pcr(current_thread_info()->pcr_reg); | ||
872 | current_thread_info()->kernel_cntd0 = | ||
873 | current_thread_info()->kernel_cntd1 = 0; | ||
874 | reset_pic(); | ||
875 | break; | ||
876 | } | ||
877 | |||
878 | case PERFCTR_GETPCR: { | ||
879 | u64 __user *user_pcr = (u64 __user *)arg0; | ||
880 | |||
881 | if (!test_thread_flag(TIF_PERFCTR)) { | ||
882 | err = -EINVAL; | ||
883 | break; | ||
884 | } | ||
885 | err |= __put_user(current_thread_info()->pcr_reg, user_pcr); | ||
886 | break; | ||
887 | } | ||
888 | |||
889 | default: | ||
890 | err = -EINVAL; | ||
891 | break; | ||
892 | }; | ||
893 | return err; | ||
894 | } | ||
895 | |||
896 | /* | ||
897 | * Do a system call from kernel instead of calling sys_execve so we | ||
898 | * end up with proper pt_regs. | ||
899 | */ | ||
900 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]) | ||
901 | { | ||
902 | long __res; | ||
903 | register long __g1 __asm__ ("g1") = __NR_execve; | ||
904 | register long __o0 __asm__ ("o0") = (long)(filename); | ||
905 | register long __o1 __asm__ ("o1") = (long)(argv); | ||
906 | register long __o2 __asm__ ("o2") = (long)(envp); | ||
907 | asm volatile ("t 0x6d\n\t" | ||
908 | "sub %%g0, %%o0, %0\n\t" | ||
909 | "movcc %%xcc, %%o0, %0\n\t" | ||
910 | : "=r" (__res), "=&r" (__o0) | ||
911 | : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) | ||
912 | : "cc"); | ||
913 | return __res; | ||
914 | } | ||