diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2005-09-29 00:06:47 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-09-29 00:06:47 -0400 |
commit | efdc1e2083e04cc70721d55803889b346c1a3de2 (patch) | |
tree | 9f24fab33f795a69bb2dc43a8f3613392762ff02 /include/asm-sparc64 | |
parent | 5fd29752f09cabff582f65c0ce35518db4c64937 (diff) |
[SPARC64]: Simplify user fault fixup handling.
Instead of doing byte-at-a-time user accesses to figure
out where the fault occurred, read the saved fault_address
from the current thread structure.
For the sake of defensive programming, if the fault_address
does not fall into the user buffer range, simply assume the
whole area faulted. This will cause the fixup for
copy_from_user() to clear the entire kernel side buffer.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/uaccess.h | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index bc8ddbb1cbed..203e8eee6351 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h | |||
@@ -251,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size) | |||
251 | { | 251 | { |
252 | unsigned long ret = ___copy_from_user(to, from, size); | 252 | unsigned long ret = ___copy_from_user(to, from, size); |
253 | 253 | ||
254 | if (ret) | 254 | if (unlikely(ret)) |
255 | ret = copy_from_user_fixup(to, from, size); | 255 | ret = copy_from_user_fixup(to, from, size); |
256 | return ret; | 256 | return ret; |
257 | } | 257 | } |
@@ -267,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size) | |||
267 | { | 267 | { |
268 | unsigned long ret = ___copy_to_user(to, from, size); | 268 | unsigned long ret = ___copy_to_user(to, from, size); |
269 | 269 | ||
270 | if (ret) | 270 | if (unlikely(ret)) |
271 | ret = copy_to_user_fixup(to, from, size); | 271 | ret = copy_to_user_fixup(to, from, size); |
272 | return ret; | 272 | return ret; |
273 | } | 273 | } |
@@ -283,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) | |||
283 | { | 283 | { |
284 | unsigned long ret = ___copy_in_user(to, from, size); | 284 | unsigned long ret = ___copy_in_user(to, from, size); |
285 | 285 | ||
286 | if (ret) | 286 | if (unlikely(ret)) |
287 | ret = copy_in_user_fixup(to, from, size); | 287 | ret = copy_in_user_fixup(to, from, size); |
288 | return ret; | 288 | return ret; |
289 | } | 289 | } |