diff options
author | Nick Piggin <npiggin@suse.de> | 2008-09-10 07:37:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-10 07:48:49 -0400 |
commit | c10d38dda1774ed4540380333cabd229eff37094 (patch) | |
tree | be9649dab7c0017c0a700f146db70f730ad819a7 /include/asm-x86/uaccess_64.h | |
parent | 76b189e91845eab3a9d52bb97f971d312d25652d (diff) |
x86: some lock annotations for user copy paths
copy_to/from_user and all its variants (except the atomic ones) can take a
page fault and perform non-trivial work like taking mmap_sem and entering
the filesyste/pagecache.
Unfortunately, this often escapes lockdep because a common pattern is to
use it to read in some arguments just set up from userspace, or write data
back to a hot buffer. In those cases, it will be unlikely for page reclaim
to get a window in to cause copy_*_user to fault.
With the new might_lock primitives, add some annotations to x86. I don't
know if I caught all possible faulting points (it's a bit of a maze, and I
didn't really look at 32-bit). But this is a starting point.
Boots and runs OK so far.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/uaccess_64.h')
-rw-r--r-- | include/asm-x86/uaccess_64.h | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 515d4dce96b5..40a7205fe576 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
@@ -28,6 +28,10 @@ static __always_inline __must_check | |||
28 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 28 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
29 | { | 29 | { |
30 | int ret = 0; | 30 | int ret = 0; |
31 | |||
32 | might_sleep(); | ||
33 | if (current->mm) | ||
34 | might_lock_read(¤t->mm->mmap_sem); | ||
31 | if (!__builtin_constant_p(size)) | 35 | if (!__builtin_constant_p(size)) |
32 | return copy_user_generic(dst, (__force void *)src, size); | 36 | return copy_user_generic(dst, (__force void *)src, size); |
33 | switch (size) { | 37 | switch (size) { |
@@ -70,6 +74,10 @@ static __always_inline __must_check | |||
70 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | 74 | int __copy_to_user(void __user *dst, const void *src, unsigned size) |
71 | { | 75 | { |
72 | int ret = 0; | 76 | int ret = 0; |
77 | |||
78 | might_sleep(); | ||
79 | if (current->mm) | ||
80 | might_lock_read(¤t->mm->mmap_sem); | ||
73 | if (!__builtin_constant_p(size)) | 81 | if (!__builtin_constant_p(size)) |
74 | return copy_user_generic((__force void *)dst, src, size); | 82 | return copy_user_generic((__force void *)dst, src, size); |
75 | switch (size) { | 83 | switch (size) { |
@@ -112,6 +120,10 @@ static __always_inline __must_check | |||
112 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | 120 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
113 | { | 121 | { |
114 | int ret = 0; | 122 | int ret = 0; |
123 | |||
124 | might_sleep(); | ||
125 | if (current->mm) | ||
126 | might_lock_read(¤t->mm->mmap_sem); | ||
115 | if (!__builtin_constant_p(size)) | 127 | if (!__builtin_constant_p(size)) |
116 | return copy_user_generic((__force void *)dst, | 128 | return copy_user_generic((__force void *)dst, |
117 | (__force void *)src, size); | 129 | (__force void *)src, size); |