diff options
Diffstat (limited to 'include/asm-sparc64/uaccess.h')
-rw-r--r-- | include/asm-sparc64/uaccess.h | 24 |
1 files changed, 6 insertions, 18 deletions
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 80a65d7e3dbf..203e8eee6351 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h | |||
@@ -70,26 +70,14 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si | |||
70 | * with the main instruction path. This means when everything is well, | 70 | * with the main instruction path. This means when everything is well, |
71 | * we don't even have to jump over them. Further, they do not intrude | 71 | * we don't even have to jump over them. Further, they do not intrude |
72 | * on our cache or tlb entries. | 72 | * on our cache or tlb entries. |
73 | * | ||
74 | * There is a special way how to put a range of potentially faulting | ||
75 | * insns (like twenty ldd/std's with now intervening other instructions) | ||
76 | * You specify address of first in insn and 0 in fixup and in the next | ||
77 | * exception_table_entry you specify last potentially faulting insn + 1 | ||
78 | * and in fixup the routine which should handle the fault. | ||
79 | * That fixup code will get | ||
80 | * (faulting_insn_address - first_insn_in_the_range_address)/4 | ||
81 | * in %g2 (ie. index of the faulting instruction in the range). | ||
82 | */ | 73 | */ |
83 | 74 | ||
84 | struct exception_table_entry | 75 | struct exception_table_entry { |
85 | { | 76 | unsigned int insn, fixup; |
86 | unsigned insn, fixup; | ||
87 | }; | 77 | }; |
88 | 78 | ||
89 | /* Special exable search, which handles ranges. Returns fixup */ | ||
90 | unsigned long search_extables_range(unsigned long addr, unsigned long *g2); | ||
91 | |||
92 | extern void __ret_efault(void); | 79 | extern void __ret_efault(void); |
80 | extern void __retl_efault(void); | ||
93 | 81 | ||
94 | /* Uh, these should become the main single-value transfer routines.. | 82 | /* Uh, these should become the main single-value transfer routines.. |
95 | * They automatically use the right size if we just have the right | 83 | * They automatically use the right size if we just have the right |
@@ -263,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size) | |||
263 | { | 251 | { |
264 | unsigned long ret = ___copy_from_user(to, from, size); | 252 | unsigned long ret = ___copy_from_user(to, from, size); |
265 | 253 | ||
266 | if (ret) | 254 | if (unlikely(ret)) |
267 | ret = copy_from_user_fixup(to, from, size); | 255 | ret = copy_from_user_fixup(to, from, size); |
268 | return ret; | 256 | return ret; |
269 | } | 257 | } |
@@ -279,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size) | |||
279 | { | 267 | { |
280 | unsigned long ret = ___copy_to_user(to, from, size); | 268 | unsigned long ret = ___copy_to_user(to, from, size); |
281 | 269 | ||
282 | if (ret) | 270 | if (unlikely(ret)) |
283 | ret = copy_to_user_fixup(to, from, size); | 271 | ret = copy_to_user_fixup(to, from, size); |
284 | return ret; | 272 | return ret; |
285 | } | 273 | } |
@@ -295,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) | |||
295 | { | 283 | { |
296 | unsigned long ret = ___copy_in_user(to, from, size); | 284 | unsigned long ret = ___copy_in_user(to, from, size); |
297 | 285 | ||
298 | if (ret) | 286 | if (unlikely(ret)) |
299 | ret = copy_in_user_fixup(to, from, size); | 287 | ret = copy_in_user_fixup(to, from, size); |
300 | return ret; | 288 | return ret; |
301 | } | 289 | } |