diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2013-05-26 10:30:47 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-05-28 03:41:07 -0400 |
commit | b607ae78ac8a78f8e5e36817500e7c311519f032 (patch) | |
tree | a64b7f5c4f4651a55c9f4fe9edd5992a4596e408 /arch/frv | |
parent | 56d2ef789f7c424918abdf6b95d84a64c1473220 (diff) |
frv: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-3-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/frv')
-rw-r--r-- | arch/frv/include/asm/uaccess.h | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 0b67ec5b4414..3ac9a59d65d4 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h | |||
@@ -280,14 +280,14 @@ extern long __memcpy_user(void *dst, const void *src, unsigned long count); | |||
280 | static inline unsigned long __must_check | 280 | static inline unsigned long __must_check |
281 | __copy_to_user(void __user *to, const void *from, unsigned long n) | 281 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
282 | { | 282 | { |
283 | might_sleep(); | 283 | might_fault(); |
284 | return __copy_to_user_inatomic(to, from, n); | 284 | return __copy_to_user_inatomic(to, from, n); |
285 | } | 285 | } |
286 | 286 | ||
287 | static inline unsigned long | 287 | static inline unsigned long |
288 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 288 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
289 | { | 289 | { |
290 | might_sleep(); | 290 | might_fault(); |
291 | return __copy_from_user_inatomic(to, from, n); | 291 | return __copy_from_user_inatomic(to, from, n); |
292 | } | 292 | } |
293 | 293 | ||