diff options
| -rw-r--r-- | include/asm-sh/uaccess.h | 564 | ||||
| -rw-r--r-- | include/asm-sh/uaccess_32.h | 575 | ||||
| -rw-r--r-- | include/asm-sh/uaccess_64.h (renamed from include/asm-sh64/uaccess.h) | 0 |
3 files changed, 578 insertions, 561 deletions
diff --git a/include/asm-sh/uaccess.h b/include/asm-sh/uaccess.h index 77c391fa93d6..ff24ce95b238 100644 --- a/include/asm-sh/uaccess.h +++ b/include/asm-sh/uaccess.h | |||
| @@ -1,563 +1,5 @@ | |||
| 1 | /* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $ | 1 | #ifdef CONFIG_SUPERH32 |
| 2 | * | 2 | # include "uaccess_32.h" |
| 3 | * User space memory access functions | ||
| 4 | * | ||
| 5 | * Copyright (C) 1999, 2002 Niibe Yutaka | ||
| 6 | * Copyright (C) 2003 Paul Mundt | ||
| 7 | * | ||
| 8 | * Based on: | ||
| 9 | * MIPS implementation version 1.15 by | ||
| 10 | * Copyright (C) 1996, 1997, 1998 by Ralf Baechle | ||
| 11 | * and i386 version. | ||
| 12 | */ | ||
| 13 | #ifndef __ASM_SH_UACCESS_H | ||
| 14 | #define __ASM_SH_UACCESS_H | ||
| 15 | |||
| 16 | #include <linux/errno.h> | ||
| 17 | #include <linux/sched.h> | ||
| 18 | |||
| 19 | #define VERIFY_READ 0 | ||
| 20 | #define VERIFY_WRITE 1 | ||
| 21 | |||
| 22 | /* | ||
| 23 | * The fs value determines whether argument validity checking should be | ||
| 24 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
| 25 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
| 26 | * | ||
| 27 | * For historical reasons (Data Segment Register?), these macros are misnamed. | ||
| 28 | */ | ||
| 29 | |||
| 30 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
| 31 | |||
| 32 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) | ||
| 33 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
| 34 | |||
| 35 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
| 36 | |||
| 37 | #define get_ds() (KERNEL_DS) | ||
| 38 | |||
| 39 | #if !defined(CONFIG_MMU) | ||
| 40 | /* NOMMU is always true */ | ||
| 41 | #define __addr_ok(addr) (1) | ||
| 42 | |||
| 43 | static inline mm_segment_t get_fs(void) | ||
| 44 | { | ||
| 45 | return USER_DS; | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline void set_fs(mm_segment_t s) | ||
| 49 | { | ||
| 50 | } | ||
| 51 | |||
| 52 | /* | ||
| 53 | * __access_ok: Check if address with size is OK or not. | ||
| 54 | * | ||
| 55 | * If we don't have an MMU (or if its disabled) the only thing we really have | ||
| 56 | * to look out for is if the address resides somewhere outside of what | ||
| 57 | * available RAM we have. | ||
| 58 | * | ||
| 59 | * TODO: This check could probably also stand to be restricted somewhat more.. | ||
| 60 | * though it still does the Right Thing(tm) for the time being. | ||
| 61 | */ | ||
| 62 | static inline int __access_ok(unsigned long addr, unsigned long size) | ||
| 63 | { | ||
| 64 | return ((addr >= memory_start) && ((addr + size) < memory_end)); | ||
| 65 | } | ||
| 66 | #else /* CONFIG_MMU */ | ||
| 67 | #define __addr_ok(addr) \ | ||
| 68 | ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) | ||
| 69 | |||
| 70 | #define get_fs() (current_thread_info()->addr_limit) | ||
| 71 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
| 72 | |||
| 73 | /* | ||
| 74 | * __access_ok: Check if address with size is OK or not. | ||
| 75 | * | ||
| 76 | * Uhhuh, this needs 33-bit arithmetic. We have a carry.. | ||
| 77 | * | ||
| 78 | * sum := addr + size; carry? --> flag = true; | ||
| 79 | * if (sum >= addr_limit) flag = true; | ||
| 80 | */ | ||
| 81 | static inline int __access_ok(unsigned long addr, unsigned long size) | ||
| 82 | { | ||
| 83 | unsigned long flag, sum; | ||
| 84 | |||
| 85 | __asm__("clrt\n\t" | ||
| 86 | "addc %3, %1\n\t" | ||
| 87 | "movt %0\n\t" | ||
| 88 | "cmp/hi %4, %1\n\t" | ||
| 89 | "rotcl %0" | ||
| 90 | :"=&r" (flag), "=r" (sum) | ||
| 91 | :"1" (addr), "r" (size), | ||
| 92 | "r" (current_thread_info()->addr_limit.seg) | ||
| 93 | :"t"); | ||
| 94 | return flag == 0; | ||
| 95 | |||
| 96 | } | ||
| 97 | #endif /* CONFIG_MMU */ | ||
| 98 | |||
| 99 | static inline int access_ok(int type, const void __user *p, unsigned long size) | ||
| 100 | { | ||
| 101 | unsigned long addr = (unsigned long)p; | ||
| 102 | return __access_ok(addr, size); | ||
| 103 | } | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Uh, these should become the main single-value transfer routines ... | ||
| 107 | * They automatically use the right size if we just have the right | ||
| 108 | * pointer type ... | ||
| 109 | * | ||
| 110 | * As SuperH uses the same address space for kernel and user data, we | ||
| 111 | * can just do these as direct assignments. | ||
| 112 | * | ||
| 113 | * Careful to not | ||
| 114 | * (a) re-use the arguments for side effects (sizeof is ok) | ||
| 115 | * (b) require any knowledge of processes at this stage | ||
| 116 | */ | ||
| 117 | #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) | ||
| 118 | #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) | ||
| 119 | |||
| 120 | /* | ||
| 121 | * The "__xxx" versions do not do address space checking, useful when | ||
| 122 | * doing multiple accesses to the same area (the user has to do the | ||
| 123 | * checks by hand with "access_ok()") | ||
| 124 | */ | ||
| 125 | #define __put_user(x,ptr) \ | ||
| 126 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
| 127 | #define __get_user(x,ptr) \ | ||
| 128 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | ||
| 129 | |||
| 130 | struct __large_struct { unsigned long buf[100]; }; | ||
| 131 | #define __m(x) (*(struct __large_struct __user *)(x)) | ||
| 132 | |||
| 133 | #define __get_user_size(x,ptr,size,retval) \ | ||
| 134 | do { \ | ||
| 135 | retval = 0; \ | ||
| 136 | __chk_user_ptr(ptr); \ | ||
| 137 | switch (size) { \ | ||
| 138 | case 1: \ | ||
| 139 | __get_user_asm(x, ptr, retval, "b"); \ | ||
| 140 | break; \ | ||
| 141 | case 2: \ | ||
| 142 | __get_user_asm(x, ptr, retval, "w"); \ | ||
| 143 | break; \ | ||
| 144 | case 4: \ | ||
| 145 | __get_user_asm(x, ptr, retval, "l"); \ | ||
| 146 | break; \ | ||
| 147 | default: \ | ||
| 148 | __get_user_unknown(); \ | ||
| 149 | break; \ | ||
| 150 | } \ | ||
| 151 | } while (0) | ||
| 152 | |||
| 153 | #define __get_user_nocheck(x,ptr,size) \ | ||
| 154 | ({ \ | ||
| 155 | long __gu_err, __gu_val; \ | ||
| 156 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
| 157 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
| 158 | __gu_err; \ | ||
| 159 | }) | ||
| 160 | |||
| 161 | #ifdef CONFIG_MMU | ||
| 162 | #define __get_user_check(x,ptr,size) \ | ||
| 163 | ({ \ | ||
| 164 | long __gu_err, __gu_val; \ | ||
| 165 | __chk_user_ptr(ptr); \ | ||
| 166 | switch (size) { \ | ||
| 167 | case 1: \ | ||
| 168 | __get_user_1(__gu_val, (ptr), __gu_err); \ | ||
| 169 | break; \ | ||
| 170 | case 2: \ | ||
| 171 | __get_user_2(__gu_val, (ptr), __gu_err); \ | ||
| 172 | break; \ | ||
| 173 | case 4: \ | ||
| 174 | __get_user_4(__gu_val, (ptr), __gu_err); \ | ||
| 175 | break; \ | ||
| 176 | default: \ | ||
| 177 | __get_user_unknown(); \ | ||
| 178 | break; \ | ||
| 179 | } \ | ||
| 180 | \ | ||
| 181 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
| 182 | __gu_err; \ | ||
| 183 | }) | ||
| 184 | |||
| 185 | #define __get_user_1(x,addr,err) ({ \ | ||
| 186 | __asm__("stc r7_bank, %1\n\t" \ | ||
| 187 | "mov.l @(8,%1), %1\n\t" \ | ||
| 188 | "and %2, %1\n\t" \ | ||
| 189 | "cmp/pz %1\n\t" \ | ||
| 190 | "bt/s 1f\n\t" \ | ||
| 191 | " mov #0, %0\n\t" \ | ||
| 192 | "0:\n" \ | ||
| 193 | "mov #-14, %0\n\t" \ | ||
| 194 | "bra 2f\n\t" \ | ||
| 195 | " mov #0, %1\n" \ | ||
| 196 | "1:\n\t" \ | ||
| 197 | "mov.b @%2, %1\n\t" \ | ||
| 198 | "extu.b %1, %1\n" \ | ||
| 199 | "2:\n" \ | ||
| 200 | ".section __ex_table,\"a\"\n\t" \ | ||
| 201 | ".long 1b, 0b\n\t" \ | ||
| 202 | ".previous" \ | ||
| 203 | : "=&r" (err), "=&r" (x) \ | ||
| 204 | : "r" (addr) \ | ||
| 205 | : "t"); \ | ||
| 206 | }) | ||
| 207 | |||
| 208 | #define __get_user_2(x,addr,err) ({ \ | ||
| 209 | __asm__("stc r7_bank, %1\n\t" \ | ||
| 210 | "mov.l @(8,%1), %1\n\t" \ | ||
| 211 | "and %2, %1\n\t" \ | ||
| 212 | "cmp/pz %1\n\t" \ | ||
| 213 | "bt/s 1f\n\t" \ | ||
| 214 | " mov #0, %0\n\t" \ | ||
| 215 | "0:\n" \ | ||
| 216 | "mov #-14, %0\n\t" \ | ||
| 217 | "bra 2f\n\t" \ | ||
| 218 | " mov #0, %1\n" \ | ||
| 219 | "1:\n\t" \ | ||
| 220 | "mov.w @%2, %1\n\t" \ | ||
| 221 | "extu.w %1, %1\n" \ | ||
| 222 | "2:\n" \ | ||
| 223 | ".section __ex_table,\"a\"\n\t" \ | ||
| 224 | ".long 1b, 0b\n\t" \ | ||
| 225 | ".previous" \ | ||
| 226 | : "=&r" (err), "=&r" (x) \ | ||
| 227 | : "r" (addr) \ | ||
| 228 | : "t"); \ | ||
| 229 | }) | ||
| 230 | |||
| 231 | #define __get_user_4(x,addr,err) ({ \ | ||
| 232 | __asm__("stc r7_bank, %1\n\t" \ | ||
| 233 | "mov.l @(8,%1), %1\n\t" \ | ||
| 234 | "and %2, %1\n\t" \ | ||
| 235 | "cmp/pz %1\n\t" \ | ||
| 236 | "bt/s 1f\n\t" \ | ||
| 237 | " mov #0, %0\n\t" \ | ||
| 238 | "0:\n" \ | ||
| 239 | "mov #-14, %0\n\t" \ | ||
| 240 | "bra 2f\n\t" \ | ||
| 241 | " mov #0, %1\n" \ | ||
| 242 | "1:\n\t" \ | ||
| 243 | "mov.l @%2, %1\n\t" \ | ||
| 244 | "2:\n" \ | ||
| 245 | ".section __ex_table,\"a\"\n\t" \ | ||
| 246 | ".long 1b, 0b\n\t" \ | ||
| 247 | ".previous" \ | ||
| 248 | : "=&r" (err), "=&r" (x) \ | ||
| 249 | : "r" (addr) \ | ||
| 250 | : "t"); \ | ||
| 251 | }) | ||
| 252 | #else /* CONFIG_MMU */ | ||
| 253 | #define __get_user_check(x,ptr,size) \ | ||
| 254 | ({ \ | ||
| 255 | long __gu_err, __gu_val; \ | ||
| 256 | if (__access_ok((unsigned long)(ptr), (size))) { \ | ||
| 257 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
| 258 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
| 259 | } else \ | ||
| 260 | __gu_err = -EFAULT; \ | ||
| 261 | __gu_err; \ | ||
| 262 | }) | ||
| 263 | #endif | ||
| 264 | |||
| 265 | #define __get_user_asm(x, addr, err, insn) \ | ||
| 266 | ({ \ | ||
| 267 | __asm__ __volatile__( \ | ||
| 268 | "1:\n\t" \ | ||
| 269 | "mov." insn " %2, %1\n\t" \ | ||
| 270 | "mov #0, %0\n" \ | ||
| 271 | "2:\n" \ | ||
| 272 | ".section .fixup,\"ax\"\n" \ | ||
| 273 | "3:\n\t" \ | ||
| 274 | "mov #0, %1\n\t" \ | ||
| 275 | "mov.l 4f, %0\n\t" \ | ||
| 276 | "jmp @%0\n\t" \ | ||
| 277 | " mov %3, %0\n" \ | ||
| 278 | "4: .long 2b\n\t" \ | ||
| 279 | ".previous\n" \ | ||
| 280 | ".section __ex_table,\"a\"\n\t" \ | ||
| 281 | ".long 1b, 3b\n\t" \ | ||
| 282 | ".previous" \ | ||
| 283 | :"=&r" (err), "=&r" (x) \ | ||
| 284 | :"m" (__m(addr)), "i" (-EFAULT)); }) | ||
| 285 | |||
| 286 | extern void __get_user_unknown(void); | ||
| 287 | |||
| 288 | #define __put_user_size(x,ptr,size,retval) \ | ||
| 289 | do { \ | ||
| 290 | retval = 0; \ | ||
| 291 | __chk_user_ptr(ptr); \ | ||
| 292 | switch (size) { \ | ||
| 293 | case 1: \ | ||
| 294 | __put_user_asm(x, ptr, retval, "b"); \ | ||
| 295 | break; \ | ||
| 296 | case 2: \ | ||
| 297 | __put_user_asm(x, ptr, retval, "w"); \ | ||
| 298 | break; \ | ||
| 299 | case 4: \ | ||
| 300 | __put_user_asm(x, ptr, retval, "l"); \ | ||
| 301 | break; \ | ||
| 302 | case 8: \ | ||
| 303 | __put_user_u64(x, ptr, retval); \ | ||
| 304 | break; \ | ||
| 305 | default: \ | ||
| 306 | __put_user_unknown(); \ | ||
| 307 | } \ | ||
| 308 | } while (0) | ||
| 309 | |||
| 310 | #define __put_user_nocheck(x,ptr,size) \ | ||
| 311 | ({ \ | ||
| 312 | long __pu_err; \ | ||
| 313 | __put_user_size((x),(ptr),(size),__pu_err); \ | ||
| 314 | __pu_err; \ | ||
| 315 | }) | ||
| 316 | |||
| 317 | #define __put_user_check(x,ptr,size) \ | ||
| 318 | ({ \ | ||
| 319 | long __pu_err = -EFAULT; \ | ||
| 320 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
| 321 | \ | ||
| 322 | if (__access_ok((unsigned long)__pu_addr,size)) \ | ||
| 323 | __put_user_size((x),__pu_addr,(size),__pu_err); \ | ||
| 324 | __pu_err; \ | ||
| 325 | }) | ||
| 326 | |||
| 327 | #define __put_user_asm(x, addr, err, insn) \ | ||
| 328 | ({ \ | ||
| 329 | __asm__ __volatile__( \ | ||
| 330 | "1:\n\t" \ | ||
| 331 | "mov." insn " %1, %2\n\t" \ | ||
| 332 | "mov #0, %0\n" \ | ||
| 333 | "2:\n" \ | ||
| 334 | ".section .fixup,\"ax\"\n" \ | ||
| 335 | "3:\n\t" \ | ||
| 336 | "nop\n\t" \ | ||
| 337 | "mov.l 4f, %0\n\t" \ | ||
| 338 | "jmp @%0\n\t" \ | ||
| 339 | "mov %3, %0\n" \ | ||
| 340 | "4: .long 2b\n\t" \ | ||
| 341 | ".previous\n" \ | ||
| 342 | ".section __ex_table,\"a\"\n\t" \ | ||
| 343 | ".long 1b, 3b\n\t" \ | ||
| 344 | ".previous" \ | ||
| 345 | :"=&r" (err) \ | ||
| 346 | :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \ | ||
| 347 | :"memory"); }) | ||
| 348 | |||
| 349 | #if defined(__LITTLE_ENDIAN__) | ||
| 350 | #define __put_user_u64(val,addr,retval) \ | ||
| 351 | ({ \ | ||
| 352 | __asm__ __volatile__( \ | ||
| 353 | "1:\n\t" \ | ||
| 354 | "mov.l %R1,%2\n\t" \ | ||
| 355 | "mov.l %S1,%T2\n\t" \ | ||
| 356 | "mov #0,%0\n" \ | ||
| 357 | "2:\n" \ | ||
| 358 | ".section .fixup,\"ax\"\n" \ | ||
| 359 | "3:\n\t" \ | ||
| 360 | "nop\n\t" \ | ||
| 361 | "mov.l 4f,%0\n\t" \ | ||
| 362 | "jmp @%0\n\t" \ | ||
| 363 | " mov %3,%0\n" \ | ||
| 364 | "4: .long 2b\n\t" \ | ||
| 365 | ".previous\n" \ | ||
| 366 | ".section __ex_table,\"a\"\n\t" \ | ||
| 367 | ".long 1b, 3b\n\t" \ | ||
| 368 | ".previous" \ | ||
| 369 | : "=r" (retval) \ | ||
| 370 | : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ | ||
| 371 | : "memory"); }) | ||
| 372 | #else | 3 | #else |
| 373 | #define __put_user_u64(val,addr,retval) \ | 4 | # include "uaccess_64.h" |
| 374 | ({ \ | ||
| 375 | __asm__ __volatile__( \ | ||
| 376 | "1:\n\t" \ | ||
| 377 | "mov.l %S1,%2\n\t" \ | ||
| 378 | "mov.l %R1,%T2\n\t" \ | ||
| 379 | "mov #0,%0\n" \ | ||
| 380 | "2:\n" \ | ||
| 381 | ".section .fixup,\"ax\"\n" \ | ||
| 382 | "3:\n\t" \ | ||
| 383 | "nop\n\t" \ | ||
| 384 | "mov.l 4f,%0\n\t" \ | ||
| 385 | "jmp @%0\n\t" \ | ||
| 386 | " mov %3,%0\n" \ | ||
| 387 | "4: .long 2b\n\t" \ | ||
| 388 | ".previous\n" \ | ||
| 389 | ".section __ex_table,\"a\"\n\t" \ | ||
| 390 | ".long 1b, 3b\n\t" \ | ||
| 391 | ".previous" \ | ||
| 392 | : "=r" (retval) \ | ||
| 393 | : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ | ||
| 394 | : "memory"); }) | ||
| 395 | #endif | 5 | #endif |
| 396 | |||
| 397 | extern void __put_user_unknown(void); | ||
| 398 | |||
| 399 | /* Generic arbitrary sized copy. */ | ||
| 400 | /* Return the number of bytes NOT copied */ | ||
| 401 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); | ||
| 402 | |||
| 403 | #define copy_to_user(to,from,n) ({ \ | ||
| 404 | void *__copy_to = (void *) (to); \ | ||
| 405 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | ||
| 406 | __kernel_size_t __copy_res; \ | ||
| 407 | if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ | ||
| 408 | __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ | ||
| 409 | } else __copy_res = __copy_size; \ | ||
| 410 | __copy_res; }) | ||
| 411 | |||
| 412 | #define copy_from_user(to,from,n) ({ \ | ||
| 413 | void *__copy_to = (void *) (to); \ | ||
| 414 | void *__copy_from = (void *) (from); \ | ||
| 415 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | ||
| 416 | __kernel_size_t __copy_res; \ | ||
| 417 | if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ | ||
| 418 | __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ | ||
| 419 | } else __copy_res = __copy_size; \ | ||
| 420 | __copy_res; }) | ||
| 421 | |||
| 422 | static __always_inline unsigned long | ||
| 423 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
| 424 | { | ||
| 425 | return __copy_user(to, (__force void *)from, n); | ||
| 426 | } | ||
| 427 | |||
| 428 | static __always_inline unsigned long __must_check | ||
| 429 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
| 430 | { | ||
| 431 | return __copy_user((__force void *)to, from, n); | ||
| 432 | } | ||
| 433 | |||
| 434 | #define __copy_to_user_inatomic __copy_to_user | ||
| 435 | #define __copy_from_user_inatomic __copy_from_user | ||
| 436 | |||
| 437 | /* | ||
| 438 | * Clear the area and return remaining number of bytes | ||
| 439 | * (on failure. Usually it's 0.) | ||
| 440 | */ | ||
| 441 | extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); | ||
| 442 | |||
| 443 | #define clear_user(addr,n) ({ \ | ||
| 444 | void * __cl_addr = (addr); \ | ||
| 445 | unsigned long __cl_size = (n); \ | ||
| 446 | if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ | ||
| 447 | __cl_size = __clear_user(__cl_addr, __cl_size); \ | ||
| 448 | __cl_size; }) | ||
| 449 | |||
| 450 | static __inline__ int | ||
| 451 | __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) | ||
| 452 | { | ||
| 453 | __kernel_size_t res; | ||
| 454 | unsigned long __dummy, _d, _s; | ||
| 455 | |||
| 456 | __asm__ __volatile__( | ||
| 457 | "9:\n" | ||
| 458 | "mov.b @%2+, %1\n\t" | ||
| 459 | "cmp/eq #0, %1\n\t" | ||
| 460 | "bt/s 2f\n" | ||
| 461 | "1:\n" | ||
| 462 | "mov.b %1, @%3\n\t" | ||
| 463 | "dt %7\n\t" | ||
| 464 | "bf/s 9b\n\t" | ||
| 465 | " add #1, %3\n\t" | ||
| 466 | "2:\n\t" | ||
| 467 | "sub %7, %0\n" | ||
| 468 | "3:\n" | ||
| 469 | ".section .fixup,\"ax\"\n" | ||
| 470 | "4:\n\t" | ||
| 471 | "mov.l 5f, %1\n\t" | ||
| 472 | "jmp @%1\n\t" | ||
| 473 | " mov %8, %0\n\t" | ||
| 474 | ".balign 4\n" | ||
| 475 | "5: .long 3b\n" | ||
| 476 | ".previous\n" | ||
| 477 | ".section __ex_table,\"a\"\n" | ||
| 478 | " .balign 4\n" | ||
| 479 | " .long 9b,4b\n" | ||
| 480 | ".previous" | ||
| 481 | : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d) | ||
| 482 | : "0" (__count), "2" (__src), "3" (__dest), "r" (__count), | ||
| 483 | "i" (-EFAULT) | ||
| 484 | : "memory", "t"); | ||
| 485 | |||
| 486 | return res; | ||
| 487 | } | ||
| 488 | |||
| 489 | #define strncpy_from_user(dest,src,count) ({ \ | ||
| 490 | unsigned long __sfu_src = (unsigned long) (src); \ | ||
| 491 | int __sfu_count = (int) (count); \ | ||
| 492 | long __sfu_res = -EFAULT; \ | ||
| 493 | if(__access_ok(__sfu_src, __sfu_count)) { \ | ||
| 494 | __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ | ||
| 495 | } __sfu_res; }) | ||
| 496 | |||
| 497 | /* | ||
| 498 | * Return the size of a string (including the ending 0!) | ||
| 499 | */ | ||
| 500 | static __inline__ long __strnlen_user(const char __user *__s, long __n) | ||
| 501 | { | ||
| 502 | unsigned long res; | ||
| 503 | unsigned long __dummy; | ||
| 504 | |||
| 505 | __asm__ __volatile__( | ||
| 506 | "9:\n" | ||
| 507 | "cmp/eq %4, %0\n\t" | ||
| 508 | "bt 2f\n" | ||
| 509 | "1:\t" | ||
| 510 | "mov.b @(%0,%3), %1\n\t" | ||
| 511 | "tst %1, %1\n\t" | ||
| 512 | "bf/s 9b\n\t" | ||
| 513 | " add #1, %0\n" | ||
| 514 | "2:\n" | ||
| 515 | ".section .fixup,\"ax\"\n" | ||
| 516 | "3:\n\t" | ||
| 517 | "mov.l 4f, %1\n\t" | ||
| 518 | "jmp @%1\n\t" | ||
| 519 | " mov #0, %0\n" | ||
| 520 | ".balign 4\n" | ||
| 521 | "4: .long 2b\n" | ||
| 522 | ".previous\n" | ||
| 523 | ".section __ex_table,\"a\"\n" | ||
| 524 | " .balign 4\n" | ||
| 525 | " .long 1b,3b\n" | ||
| 526 | ".previous" | ||
| 527 | : "=z" (res), "=&r" (__dummy) | ||
| 528 | : "0" (0), "r" (__s), "r" (__n) | ||
| 529 | : "t"); | ||
| 530 | return res; | ||
| 531 | } | ||
| 532 | |||
| 533 | static __inline__ long strnlen_user(const char __user *s, long n) | ||
| 534 | { | ||
| 535 | if (!__addr_ok(s)) | ||
| 536 | return 0; | ||
| 537 | else | ||
| 538 | return __strnlen_user(s, n); | ||
| 539 | } | ||
| 540 | |||
| 541 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | ||
| 542 | |||
| 543 | /* | ||
| 544 | * The exception table consists of pairs of addresses: the first is the | ||
| 545 | * address of an instruction that is allowed to fault, and the second is | ||
| 546 | * the address at which the program should continue. No registers are | ||
| 547 | * modified, so it is entirely up to the continuation code to figure out | ||
| 548 | * what to do. | ||
| 549 | * | ||
| 550 | * All the routines below use bits of fixup code that are out of line | ||
| 551 | * with the main instruction path. This means when everything is well, | ||
| 552 | * we don't even have to jump over them. Further, they do not intrude | ||
| 553 | * on our cache or tlb entries. | ||
| 554 | */ | ||
| 555 | |||
| 556 | struct exception_table_entry | ||
| 557 | { | ||
| 558 | unsigned long insn, fixup; | ||
| 559 | }; | ||
| 560 | |||
| 561 | extern int fixup_exception(struct pt_regs *regs); | ||
| 562 | |||
| 563 | #endif /* __ASM_SH_UACCESS_H */ | ||
diff --git a/include/asm-sh/uaccess_32.h b/include/asm-sh/uaccess_32.h new file mode 100644 index 000000000000..f18a1a5c95c0 --- /dev/null +++ b/include/asm-sh/uaccess_32.h | |||
| @@ -0,0 +1,575 @@ | |||
| 1 | /* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $ | ||
| 2 | * | ||
| 3 | * User space memory access functions | ||
| 4 | * | ||
| 5 | * Copyright (C) 1999, 2002 Niibe Yutaka | ||
| 6 | * Copyright (C) 2003 Paul Mundt | ||
| 7 | * | ||
| 8 | * Based on: | ||
| 9 | * MIPS implementation version 1.15 by | ||
| 10 | * Copyright (C) 1996, 1997, 1998 by Ralf Baechle | ||
| 11 | * and i386 version. | ||
| 12 | */ | ||
| 13 | #ifndef __ASM_SH_UACCESS_H | ||
| 14 | #define __ASM_SH_UACCESS_H | ||
| 15 | |||
| 16 | #include <linux/errno.h> | ||
| 17 | #include <linux/sched.h> | ||
| 18 | |||
| 19 | #define VERIFY_READ 0 | ||
| 20 | #define VERIFY_WRITE 1 | ||
| 21 | |||
| 22 | /* | ||
| 23 | * The fs value determines whether argument validity checking should be | ||
| 24 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
| 25 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
| 26 | * | ||
| 27 | * For historical reasons (Data Segment Register?), these macros are misnamed. | ||
| 28 | */ | ||
| 29 | |||
| 30 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
| 31 | |||
| 32 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) | ||
| 33 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
| 34 | |||
| 35 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
| 36 | |||
| 37 | #define get_ds() (KERNEL_DS) | ||
| 38 | |||
| 39 | #if !defined(CONFIG_MMU) | ||
| 40 | /* NOMMU is always true */ | ||
| 41 | #define __addr_ok(addr) (1) | ||
| 42 | |||
| 43 | static inline mm_segment_t get_fs(void) | ||
| 44 | { | ||
| 45 | return USER_DS; | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline void set_fs(mm_segment_t s) | ||
| 49 | { | ||
| 50 | } | ||
| 51 | |||
| 52 | /* | ||
| 53 | * __access_ok: Check if address with size is OK or not. | ||
| 54 | * | ||
| 55 | * If we don't have an MMU (or if its disabled) the only thing we really have | ||
| 56 | * to look out for is if the address resides somewhere outside of what | ||
| 57 | * available RAM we have. | ||
| 58 | * | ||
| 59 | * TODO: This check could probably also stand to be restricted somewhat more.. | ||
| 60 | * though it still does the Right Thing(tm) for the time being. | ||
| 61 | */ | ||
| 62 | static inline int __access_ok(unsigned long addr, unsigned long size) | ||
| 63 | { | ||
| 64 | return ((addr >= memory_start) && ((addr + size) < memory_end)); | ||
| 65 | } | ||
| 66 | #else /* CONFIG_MMU */ | ||
| 67 | #define __addr_ok(addr) \ | ||
| 68 | ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) | ||
| 69 | |||
| 70 | #define get_fs() (current_thread_info()->addr_limit) | ||
| 71 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
| 72 | |||
| 73 | /* | ||
| 74 | * __access_ok: Check if address with size is OK or not. | ||
| 75 | * | ||
| 76 | * We do three checks: | ||
| 77 | * (1) is it user space? | ||
| 78 | * (2) addr + size --> carry? | ||
| 79 | * (3) addr + size >= 0x80000000 (PAGE_OFFSET) | ||
| 80 | * | ||
| 81 | * (1) (2) (3) | RESULT | ||
| 82 | * 0 0 0 | ok | ||
| 83 | * 0 0 1 | ok | ||
| 84 | * 0 1 0 | bad | ||
| 85 | * 0 1 1 | bad | ||
| 86 | * 1 0 0 | ok | ||
| 87 | * 1 0 1 | bad | ||
| 88 | * 1 1 0 | bad | ||
| 89 | * 1 1 1 | bad | ||
| 90 | */ | ||
| 91 | static inline int __access_ok(unsigned long addr, unsigned long size) | ||
| 92 | { | ||
| 93 | unsigned long flag, tmp; | ||
| 94 | |||
| 95 | __asm__("stc r7_bank, %0\n\t" | ||
| 96 | "mov.l @(8,%0), %0\n\t" | ||
| 97 | "clrt\n\t" | ||
| 98 | "addc %2, %1\n\t" | ||
| 99 | "and %1, %0\n\t" | ||
| 100 | "rotcl %0\n\t" | ||
| 101 | "rotcl %0\n\t" | ||
| 102 | "and #3, %0" | ||
| 103 | : "=&z" (flag), "=r" (tmp) | ||
| 104 | : "r" (addr), "1" (size) | ||
| 105 | : "t"); | ||
| 106 | |||
| 107 | return flag == 0; | ||
| 108 | } | ||
| 109 | #endif /* CONFIG_MMU */ | ||
| 110 | |||
| 111 | static inline int access_ok(int type, const void __user *p, unsigned long size) | ||
| 112 | { | ||
| 113 | unsigned long addr = (unsigned long)p; | ||
| 114 | return __access_ok(addr, size); | ||
| 115 | } | ||
| 116 | |||
| 117 | /* | ||
| 118 | * Uh, these should become the main single-value transfer routines ... | ||
| 119 | * They automatically use the right size if we just have the right | ||
| 120 | * pointer type ... | ||
| 121 | * | ||
| 122 | * As SuperH uses the same address space for kernel and user data, we | ||
| 123 | * can just do these as direct assignments. | ||
| 124 | * | ||
| 125 | * Careful to not | ||
| 126 | * (a) re-use the arguments for side effects (sizeof is ok) | ||
| 127 | * (b) require any knowledge of processes at this stage | ||
| 128 | */ | ||
| 129 | #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) | ||
| 130 | #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) | ||
| 131 | |||
| 132 | /* | ||
| 133 | * The "__xxx" versions do not do address space checking, useful when | ||
| 134 | * doing multiple accesses to the same area (the user has to do the | ||
| 135 | * checks by hand with "access_ok()") | ||
| 136 | */ | ||
| 137 | #define __put_user(x,ptr) \ | ||
| 138 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
| 139 | #define __get_user(x,ptr) \ | ||
| 140 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | ||
| 141 | |||
| 142 | struct __large_struct { unsigned long buf[100]; }; | ||
| 143 | #define __m(x) (*(struct __large_struct __user *)(x)) | ||
| 144 | |||
| 145 | #define __get_user_size(x,ptr,size,retval) \ | ||
| 146 | do { \ | ||
| 147 | retval = 0; \ | ||
| 148 | __chk_user_ptr(ptr); \ | ||
| 149 | switch (size) { \ | ||
| 150 | case 1: \ | ||
| 151 | __get_user_asm(x, ptr, retval, "b"); \ | ||
| 152 | break; \ | ||
| 153 | case 2: \ | ||
| 154 | __get_user_asm(x, ptr, retval, "w"); \ | ||
| 155 | break; \ | ||
| 156 | case 4: \ | ||
| 157 | __get_user_asm(x, ptr, retval, "l"); \ | ||
| 158 | break; \ | ||
| 159 | default: \ | ||
| 160 | __get_user_unknown(); \ | ||
| 161 | break; \ | ||
| 162 | } \ | ||
| 163 | } while (0) | ||
| 164 | |||
| 165 | #define __get_user_nocheck(x,ptr,size) \ | ||
| 166 | ({ \ | ||
| 167 | long __gu_err, __gu_val; \ | ||
| 168 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
| 169 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
| 170 | __gu_err; \ | ||
| 171 | }) | ||
| 172 | |||
| 173 | #ifdef CONFIG_MMU | ||
| 174 | #define __get_user_check(x,ptr,size) \ | ||
| 175 | ({ \ | ||
| 176 | long __gu_err, __gu_val; \ | ||
| 177 | __chk_user_ptr(ptr); \ | ||
| 178 | switch (size) { \ | ||
| 179 | case 1: \ | ||
| 180 | __get_user_1(__gu_val, (ptr), __gu_err); \ | ||
| 181 | break; \ | ||
| 182 | case 2: \ | ||
| 183 | __get_user_2(__gu_val, (ptr), __gu_err); \ | ||
| 184 | break; \ | ||
| 185 | case 4: \ | ||
| 186 | __get_user_4(__gu_val, (ptr), __gu_err); \ | ||
| 187 | break; \ | ||
| 188 | default: \ | ||
| 189 | __get_user_unknown(); \ | ||
| 190 | break; \ | ||
| 191 | } \ | ||
| 192 | \ | ||
| 193 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
| 194 | __gu_err; \ | ||
| 195 | }) | ||
| 196 | |||
| 197 | #define __get_user_1(x,addr,err) ({ \ | ||
| 198 | __asm__("stc r7_bank, %1\n\t" \ | ||
| 199 | "mov.l @(8,%1), %1\n\t" \ | ||
| 200 | "and %2, %1\n\t" \ | ||
| 201 | "cmp/pz %1\n\t" \ | ||
| 202 | "bt/s 1f\n\t" \ | ||
| 203 | " mov #0, %0\n\t" \ | ||
| 204 | "0:\n" \ | ||
| 205 | "mov #-14, %0\n\t" \ | ||
| 206 | "bra 2f\n\t" \ | ||
| 207 | " mov #0, %1\n" \ | ||
| 208 | "1:\n\t" \ | ||
| 209 | "mov.b @%2, %1\n\t" \ | ||
| 210 | "extu.b %1, %1\n" \ | ||
| 211 | "2:\n" \ | ||
| 212 | ".section __ex_table,\"a\"\n\t" \ | ||
| 213 | ".long 1b, 0b\n\t" \ | ||
| 214 | ".previous" \ | ||
| 215 | : "=&r" (err), "=&r" (x) \ | ||
| 216 | : "r" (addr) \ | ||
| 217 | : "t"); \ | ||
| 218 | }) | ||
| 219 | |||
| 220 | #define __get_user_2(x,addr,err) ({ \ | ||
| 221 | __asm__("stc r7_bank, %1\n\t" \ | ||
| 222 | "mov.l @(8,%1), %1\n\t" \ | ||
| 223 | "and %2, %1\n\t" \ | ||
| 224 | "cmp/pz %1\n\t" \ | ||
| 225 | "bt/s 1f\n\t" \ | ||
| 226 | " mov #0, %0\n\t" \ | ||
| 227 | "0:\n" \ | ||
| 228 | "mov #-14, %0\n\t" \ | ||
| 229 | "bra 2f\n\t" \ | ||
| 230 | " mov #0, %1\n" \ | ||
| 231 | "1:\n\t" \ | ||
| 232 | "mov.w @%2, %1\n\t" \ | ||
| 233 | "extu.w %1, %1\n" \ | ||
| 234 | "2:\n" \ | ||
| 235 | ".section __ex_table,\"a\"\n\t" \ | ||
| 236 | ".long 1b, 0b\n\t" \ | ||
| 237 | ".previous" \ | ||
| 238 | : "=&r" (err), "=&r" (x) \ | ||
| 239 | : "r" (addr) \ | ||
| 240 | : "t"); \ | ||
| 241 | }) | ||
| 242 | |||
| 243 | #define __get_user_4(x,addr,err) ({ \ | ||
| 244 | __asm__("stc r7_bank, %1\n\t" \ | ||
| 245 | "mov.l @(8,%1), %1\n\t" \ | ||
| 246 | "and %2, %1\n\t" \ | ||
| 247 | "cmp/pz %1\n\t" \ | ||
| 248 | "bt/s 1f\n\t" \ | ||
| 249 | " mov #0, %0\n\t" \ | ||
| 250 | "0:\n" \ | ||
| 251 | "mov #-14, %0\n\t" \ | ||
| 252 | "bra 2f\n\t" \ | ||
| 253 | " mov #0, %1\n" \ | ||
| 254 | "1:\n\t" \ | ||
| 255 | "mov.l @%2, %1\n\t" \ | ||
| 256 | "2:\n" \ | ||
| 257 | ".section __ex_table,\"a\"\n\t" \ | ||
| 258 | ".long 1b, 0b\n\t" \ | ||
| 259 | ".previous" \ | ||
| 260 | : "=&r" (err), "=&r" (x) \ | ||
| 261 | : "r" (addr) \ | ||
| 262 | : "t"); \ | ||
| 263 | }) | ||
| 264 | #else /* CONFIG_MMU */ | ||
| 265 | #define __get_user_check(x,ptr,size) \ | ||
| 266 | ({ \ | ||
| 267 | long __gu_err, __gu_val; \ | ||
| 268 | if (__access_ok((unsigned long)(ptr), (size))) { \ | ||
| 269 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
| 270 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
| 271 | } else \ | ||
| 272 | __gu_err = -EFAULT; \ | ||
| 273 | __gu_err; \ | ||
| 274 | }) | ||
| 275 | #endif | ||
| 276 | |||
| 277 | #define __get_user_asm(x, addr, err, insn) \ | ||
| 278 | ({ \ | ||
| 279 | __asm__ __volatile__( \ | ||
| 280 | "1:\n\t" \ | ||
| 281 | "mov." insn " %2, %1\n\t" \ | ||
| 282 | "mov #0, %0\n" \ | ||
| 283 | "2:\n" \ | ||
| 284 | ".section .fixup,\"ax\"\n" \ | ||
| 285 | "3:\n\t" \ | ||
| 286 | "mov #0, %1\n\t" \ | ||
| 287 | "mov.l 4f, %0\n\t" \ | ||
| 288 | "jmp @%0\n\t" \ | ||
| 289 | " mov %3, %0\n" \ | ||
| 290 | "4: .long 2b\n\t" \ | ||
| 291 | ".previous\n" \ | ||
| 292 | ".section __ex_table,\"a\"\n\t" \ | ||
| 293 | ".long 1b, 3b\n\t" \ | ||
| 294 | ".previous" \ | ||
| 295 | :"=&r" (err), "=&r" (x) \ | ||
| 296 | :"m" (__m(addr)), "i" (-EFAULT)); }) | ||
| 297 | |||
| 298 | extern void __get_user_unknown(void); | ||
| 299 | |||
| 300 | #define __put_user_size(x,ptr,size,retval) \ | ||
| 301 | do { \ | ||
| 302 | retval = 0; \ | ||
| 303 | __chk_user_ptr(ptr); \ | ||
| 304 | switch (size) { \ | ||
| 305 | case 1: \ | ||
| 306 | __put_user_asm(x, ptr, retval, "b"); \ | ||
| 307 | break; \ | ||
| 308 | case 2: \ | ||
| 309 | __put_user_asm(x, ptr, retval, "w"); \ | ||
| 310 | break; \ | ||
| 311 | case 4: \ | ||
| 312 | __put_user_asm(x, ptr, retval, "l"); \ | ||
| 313 | break; \ | ||
| 314 | case 8: \ | ||
| 315 | __put_user_u64(x, ptr, retval); \ | ||
| 316 | break; \ | ||
| 317 | default: \ | ||
| 318 | __put_user_unknown(); \ | ||
| 319 | } \ | ||
| 320 | } while (0) | ||
| 321 | |||
| 322 | #define __put_user_nocheck(x,ptr,size) \ | ||
| 323 | ({ \ | ||
| 324 | long __pu_err; \ | ||
| 325 | __put_user_size((x),(ptr),(size),__pu_err); \ | ||
| 326 | __pu_err; \ | ||
| 327 | }) | ||
| 328 | |||
| 329 | #define __put_user_check(x,ptr,size) \ | ||
| 330 | ({ \ | ||
| 331 | long __pu_err = -EFAULT; \ | ||
| 332 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
| 333 | \ | ||
| 334 | if (__access_ok((unsigned long)__pu_addr,size)) \ | ||
| 335 | __put_user_size((x),__pu_addr,(size),__pu_err); \ | ||
| 336 | __pu_err; \ | ||
| 337 | }) | ||
| 338 | |||
| 339 | #define __put_user_asm(x, addr, err, insn) \ | ||
| 340 | ({ \ | ||
| 341 | __asm__ __volatile__( \ | ||
| 342 | "1:\n\t" \ | ||
| 343 | "mov." insn " %1, %2\n\t" \ | ||
| 344 | "mov #0, %0\n" \ | ||
| 345 | "2:\n" \ | ||
| 346 | ".section .fixup,\"ax\"\n" \ | ||
| 347 | "3:\n\t" \ | ||
| 348 | "nop\n\t" \ | ||
| 349 | "mov.l 4f, %0\n\t" \ | ||
| 350 | "jmp @%0\n\t" \ | ||
| 351 | "mov %3, %0\n" \ | ||
| 352 | "4: .long 2b\n\t" \ | ||
| 353 | ".previous\n" \ | ||
| 354 | ".section __ex_table,\"a\"\n\t" \ | ||
| 355 | ".long 1b, 3b\n\t" \ | ||
| 356 | ".previous" \ | ||
| 357 | :"=&r" (err) \ | ||
| 358 | :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \ | ||
| 359 | :"memory"); }) | ||
| 360 | |||
| 361 | #if defined(__LITTLE_ENDIAN__) | ||
| 362 | #define __put_user_u64(val,addr,retval) \ | ||
| 363 | ({ \ | ||
| 364 | __asm__ __volatile__( \ | ||
| 365 | "1:\n\t" \ | ||
| 366 | "mov.l %R1,%2\n\t" \ | ||
| 367 | "mov.l %S1,%T2\n\t" \ | ||
| 368 | "mov #0,%0\n" \ | ||
| 369 | "2:\n" \ | ||
| 370 | ".section .fixup,\"ax\"\n" \ | ||
| 371 | "3:\n\t" \ | ||
| 372 | "nop\n\t" \ | ||
| 373 | "mov.l 4f,%0\n\t" \ | ||
| 374 | "jmp @%0\n\t" \ | ||
| 375 | " mov %3,%0\n" \ | ||
| 376 | "4: .long 2b\n\t" \ | ||
| 377 | ".previous\n" \ | ||
| 378 | ".section __ex_table,\"a\"\n\t" \ | ||
| 379 | ".long 1b, 3b\n\t" \ | ||
| 380 | ".previous" \ | ||
| 381 | : "=r" (retval) \ | ||
| 382 | : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ | ||
| 383 | : "memory"); }) | ||
| 384 | #else | ||
| 385 | #define __put_user_u64(val,addr,retval) \ | ||
| 386 | ({ \ | ||
| 387 | __asm__ __volatile__( \ | ||
| 388 | "1:\n\t" \ | ||
| 389 | "mov.l %S1,%2\n\t" \ | ||
| 390 | "mov.l %R1,%T2\n\t" \ | ||
| 391 | "mov #0,%0\n" \ | ||
| 392 | "2:\n" \ | ||
| 393 | ".section .fixup,\"ax\"\n" \ | ||
| 394 | "3:\n\t" \ | ||
| 395 | "nop\n\t" \ | ||
| 396 | "mov.l 4f,%0\n\t" \ | ||
| 397 | "jmp @%0\n\t" \ | ||
| 398 | " mov %3,%0\n" \ | ||
| 399 | "4: .long 2b\n\t" \ | ||
| 400 | ".previous\n" \ | ||
| 401 | ".section __ex_table,\"a\"\n\t" \ | ||
| 402 | ".long 1b, 3b\n\t" \ | ||
| 403 | ".previous" \ | ||
| 404 | : "=r" (retval) \ | ||
| 405 | : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ | ||
| 406 | : "memory"); }) | ||
| 407 | #endif | ||
| 408 | |||
| 409 | extern void __put_user_unknown(void); | ||
| 410 | |||
| 411 | /* Generic arbitrary sized copy. */ | ||
| 412 | /* Return the number of bytes NOT copied */ | ||
| 413 | __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); | ||
| 414 | |||
| 415 | #define copy_to_user(to,from,n) ({ \ | ||
| 416 | void *__copy_to = (void *) (to); \ | ||
| 417 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | ||
| 418 | __kernel_size_t __copy_res; \ | ||
| 419 | if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ | ||
| 420 | __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ | ||
| 421 | } else __copy_res = __copy_size; \ | ||
| 422 | __copy_res; }) | ||
| 423 | |||
| 424 | #define copy_from_user(to,from,n) ({ \ | ||
| 425 | void *__copy_to = (void *) (to); \ | ||
| 426 | void *__copy_from = (void *) (from); \ | ||
| 427 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | ||
| 428 | __kernel_size_t __copy_res; \ | ||
| 429 | if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ | ||
| 430 | __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ | ||
| 431 | } else __copy_res = __copy_size; \ | ||
| 432 | __copy_res; }) | ||
| 433 | |||
| 434 | static __always_inline unsigned long | ||
| 435 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
| 436 | { | ||
| 437 | return __copy_user(to, (__force void *)from, n); | ||
| 438 | } | ||
| 439 | |||
| 440 | static __always_inline unsigned long __must_check | ||
| 441 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
| 442 | { | ||
| 443 | return __copy_user((__force void *)to, from, n); | ||
| 444 | } | ||
| 445 | |||
| 446 | #define __copy_to_user_inatomic __copy_to_user | ||
| 447 | #define __copy_from_user_inatomic __copy_from_user | ||
| 448 | |||
| 449 | /* | ||
| 450 | * Clear the area and return remaining number of bytes | ||
| 451 | * (on failure. Usually it's 0.) | ||
| 452 | */ | ||
| 453 | extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); | ||
| 454 | |||
| 455 | #define clear_user(addr,n) ({ \ | ||
| 456 | void * __cl_addr = (addr); \ | ||
| 457 | unsigned long __cl_size = (n); \ | ||
| 458 | if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ | ||
| 459 | __cl_size = __clear_user(__cl_addr, __cl_size); \ | ||
| 460 | __cl_size; }) | ||
| 461 | |||
| 462 | static __inline__ int | ||
| 463 | __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) | ||
| 464 | { | ||
| 465 | __kernel_size_t res; | ||
| 466 | unsigned long __dummy, _d, _s; | ||
| 467 | |||
| 468 | __asm__ __volatile__( | ||
| 469 | "9:\n" | ||
| 470 | "mov.b @%2+, %1\n\t" | ||
| 471 | "cmp/eq #0, %1\n\t" | ||
| 472 | "bt/s 2f\n" | ||
| 473 | "1:\n" | ||
| 474 | "mov.b %1, @%3\n\t" | ||
| 475 | "dt %7\n\t" | ||
| 476 | "bf/s 9b\n\t" | ||
| 477 | " add #1, %3\n\t" | ||
| 478 | "2:\n\t" | ||
| 479 | "sub %7, %0\n" | ||
| 480 | "3:\n" | ||
| 481 | ".section .fixup,\"ax\"\n" | ||
| 482 | "4:\n\t" | ||
| 483 | "mov.l 5f, %1\n\t" | ||
| 484 | "jmp @%1\n\t" | ||
| 485 | " mov %8, %0\n\t" | ||
| 486 | ".balign 4\n" | ||
| 487 | "5: .long 3b\n" | ||
| 488 | ".previous\n" | ||
| 489 | ".section __ex_table,\"a\"\n" | ||
| 490 | " .balign 4\n" | ||
| 491 | " .long 9b,4b\n" | ||
| 492 | ".previous" | ||
| 493 | : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d) | ||
| 494 | : "0" (__count), "2" (__src), "3" (__dest), "r" (__count), | ||
| 495 | "i" (-EFAULT) | ||
| 496 | : "memory", "t"); | ||
| 497 | |||
| 498 | return res; | ||
| 499 | } | ||
| 500 | |||
| 501 | #define strncpy_from_user(dest,src,count) ({ \ | ||
| 502 | unsigned long __sfu_src = (unsigned long) (src); \ | ||
| 503 | int __sfu_count = (int) (count); \ | ||
| 504 | long __sfu_res = -EFAULT; \ | ||
| 505 | if(__access_ok(__sfu_src, __sfu_count)) { \ | ||
| 506 | __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ | ||
| 507 | } __sfu_res; }) | ||
| 508 | |||
| 509 | /* | ||
| 510 | * Return the size of a string (including the ending 0!) | ||
| 511 | */ | ||
| 512 | static __inline__ long __strnlen_user(const char __user *__s, long __n) | ||
| 513 | { | ||
| 514 | unsigned long res; | ||
| 515 | unsigned long __dummy; | ||
| 516 | |||
| 517 | __asm__ __volatile__( | ||
| 518 | "9:\n" | ||
| 519 | "cmp/eq %4, %0\n\t" | ||
| 520 | "bt 2f\n" | ||
| 521 | "1:\t" | ||
| 522 | "mov.b @(%0,%3), %1\n\t" | ||
| 523 | "tst %1, %1\n\t" | ||
| 524 | "bf/s 9b\n\t" | ||
| 525 | " add #1, %0\n" | ||
| 526 | "2:\n" | ||
| 527 | ".section .fixup,\"ax\"\n" | ||
| 528 | "3:\n\t" | ||
| 529 | "mov.l 4f, %1\n\t" | ||
| 530 | "jmp @%1\n\t" | ||
| 531 | " mov #0, %0\n" | ||
| 532 | ".balign 4\n" | ||
| 533 | "4: .long 2b\n" | ||
| 534 | ".previous\n" | ||
| 535 | ".section __ex_table,\"a\"\n" | ||
| 536 | " .balign 4\n" | ||
| 537 | " .long 1b,3b\n" | ||
| 538 | ".previous" | ||
| 539 | : "=z" (res), "=&r" (__dummy) | ||
| 540 | : "0" (0), "r" (__s), "r" (__n) | ||
| 541 | : "t"); | ||
| 542 | return res; | ||
| 543 | } | ||
| 544 | |||
| 545 | static __inline__ long strnlen_user(const char __user *s, long n) | ||
| 546 | { | ||
| 547 | if (!__addr_ok(s)) | ||
| 548 | return 0; | ||
| 549 | else | ||
| 550 | return __strnlen_user(s, n); | ||
| 551 | } | ||
| 552 | |||
| 553 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | ||
| 554 | |||
| 555 | /* | ||
| 556 | * The exception table consists of pairs of addresses: the first is the | ||
| 557 | * address of an instruction that is allowed to fault, and the second is | ||
| 558 | * the address at which the program should continue. No registers are | ||
| 559 | * modified, so it is entirely up to the continuation code to figure out | ||
| 560 | * what to do. | ||
| 561 | * | ||
| 562 | * All the routines below use bits of fixup code that are out of line | ||
| 563 | * with the main instruction path. This means when everything is well, | ||
| 564 | * we don't even have to jump over them. Further, they do not intrude | ||
| 565 | * on our cache or tlb entries. | ||
| 566 | */ | ||
| 567 | |||
| 568 | struct exception_table_entry | ||
| 569 | { | ||
| 570 | unsigned long insn, fixup; | ||
| 571 | }; | ||
| 572 | |||
| 573 | extern int fixup_exception(struct pt_regs *regs); | ||
| 574 | |||
| 575 | #endif /* __ASM_SH_UACCESS_H */ | ||
diff --git a/include/asm-sh64/uaccess.h b/include/asm-sh/uaccess_64.h index 644c67b65f94..644c67b65f94 100644 --- a/include/asm-sh64/uaccess.h +++ b/include/asm-sh/uaccess_64.h | |||
