diff options
author | Nick Piggin <npiggin@suse.de> | 2008-09-10 07:37:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-10 07:48:49 -0400 |
commit | c10d38dda1774ed4540380333cabd229eff37094 (patch) | |
tree | be9649dab7c0017c0a700f146db70f730ad819a7 /include/asm-x86/uaccess_32.h | |
parent | 76b189e91845eab3a9d52bb97f971d312d25652d (diff) |
x86: some lock annotations for user copy paths
copy_to/from_user and all its variants (except the atomic ones) can take a
page fault and perform non-trivial work like taking mmap_sem and entering
the filesyste/pagecache.
Unfortunately, this often escapes lockdep because a common pattern is to
use it to read in some arguments just set up from userspace, or write data
back to a hot buffer. In those cases, it will be unlikely for page reclaim
to get a window in to cause copy_*_user to fault.
With the new might_lock primitives, add some annotations to x86. I don't
know if I caught all possible faulting points (it's a bit of a maze, and I
didn't really look at 32-bit). But this is a starting point.
Boots and runs OK so far.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/uaccess_32.h')
-rw-r--r-- | include/asm-x86/uaccess_32.h | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index 6fdef39a0bcb..d725e2d703f7 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h | |||
@@ -82,8 +82,10 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |||
82 | static __always_inline unsigned long __must_check | 82 | static __always_inline unsigned long __must_check |
83 | __copy_to_user(void __user *to, const void *from, unsigned long n) | 83 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
84 | { | 84 | { |
85 | might_sleep(); | 85 | might_sleep(); |
86 | return __copy_to_user_inatomic(to, from, n); | 86 | if (current->mm) |
87 | might_lock_read(¤t->mm->mmap_sem); | ||
88 | return __copy_to_user_inatomic(to, from, n); | ||
87 | } | 89 | } |
88 | 90 | ||
89 | static __always_inline unsigned long | 91 | static __always_inline unsigned long |
@@ -138,6 +140,8 @@ static __always_inline unsigned long | |||
138 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 140 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
139 | { | 141 | { |
140 | might_sleep(); | 142 | might_sleep(); |
143 | if (current->mm) | ||
144 | might_lock_read(¤t->mm->mmap_sem); | ||
141 | if (__builtin_constant_p(n)) { | 145 | if (__builtin_constant_p(n)) { |
142 | unsigned long ret; | 146 | unsigned long ret; |
143 | 147 | ||
@@ -160,6 +164,8 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, | |||
160 | const void __user *from, unsigned long n) | 164 | const void __user *from, unsigned long n) |
161 | { | 165 | { |
162 | might_sleep(); | 166 | might_sleep(); |
167 | if (current->mm) | ||
168 | might_lock_read(¤t->mm->mmap_sem); | ||
163 | if (__builtin_constant_p(n)) { | 169 | if (__builtin_constant_p(n)) { |
164 | unsigned long ret; | 170 | unsigned long ret; |
165 | 171 | ||