diff options
author | Nick Piggin <npiggin@suse.de> | 2008-09-10 07:37:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-10 07:48:49 -0400 |
commit | c10d38dda1774ed4540380333cabd229eff37094 (patch) | |
tree | be9649dab7c0017c0a700f146db70f730ad819a7 /arch | |
parent | 76b189e91845eab3a9d52bb97f971d312d25652d (diff) |
x86: some lock annotations for user copy paths
copy_to/from_user and all its variants (except the atomic ones) can take a
page fault and perform non-trivial work like taking mmap_sem and entering
the filesyste/pagecache.
Unfortunately, this often escapes lockdep because a common pattern is to
use it to read in some arguments just set up from userspace, or write data
back to a hot buffer. In those cases, it will be unlikely for page reclaim
to get a window in to cause copy_*_user to fault.
With the new might_lock primitives, add some annotations to x86. I don't
know if I caught all possible faulting points (it's a bit of a maze, and I
didn't really look at 32-bit). But this is a starting point.
Boots and runs OK so far.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 7 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_64.c | 4 |
2 files changed, 10 insertions, 1 deletions
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 24e60944971a..8eedde2a9cac 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -33,6 +33,8 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon | |||
33 | do { \ | 33 | do { \ |
34 | int __d0, __d1, __d2; \ | 34 | int __d0, __d1, __d2; \ |
35 | might_sleep(); \ | 35 | might_sleep(); \ |
36 | if (current->mm) \ | ||
37 | might_lock_read(¤t->mm->mmap_sem); \ | ||
36 | __asm__ __volatile__( \ | 38 | __asm__ __volatile__( \ |
37 | " testl %1,%1\n" \ | 39 | " testl %1,%1\n" \ |
38 | " jz 2f\n" \ | 40 | " jz 2f\n" \ |
@@ -120,6 +122,8 @@ EXPORT_SYMBOL(strncpy_from_user); | |||
120 | do { \ | 122 | do { \ |
121 | int __d0; \ | 123 | int __d0; \ |
122 | might_sleep(); \ | 124 | might_sleep(); \ |
125 | if (current->mm) \ | ||
126 | might_lock_read(¤t->mm->mmap_sem); \ | ||
123 | __asm__ __volatile__( \ | 127 | __asm__ __volatile__( \ |
124 | "0: rep; stosl\n" \ | 128 | "0: rep; stosl\n" \ |
125 | " movl %2,%0\n" \ | 129 | " movl %2,%0\n" \ |
@@ -148,7 +152,6 @@ do { \ | |||
148 | unsigned long | 152 | unsigned long |
149 | clear_user(void __user *to, unsigned long n) | 153 | clear_user(void __user *to, unsigned long n) |
150 | { | 154 | { |
151 | might_sleep(); | ||
152 | if (access_ok(VERIFY_WRITE, to, n)) | 155 | if (access_ok(VERIFY_WRITE, to, n)) |
153 | __do_clear_user(to, n); | 156 | __do_clear_user(to, n); |
154 | return n; | 157 | return n; |
@@ -191,6 +194,8 @@ long strnlen_user(const char __user *s, long n) | |||
191 | unsigned long res, tmp; | 194 | unsigned long res, tmp; |
192 | 195 | ||
193 | might_sleep(); | 196 | might_sleep(); |
197 | if (current->mm) | ||
198 | might_lock_read(¤t->mm->mmap_sem); | ||
194 | 199 | ||
195 | __asm__ __volatile__( | 200 | __asm__ __volatile__( |
196 | " testl %0, %0\n" | 201 | " testl %0, %0\n" |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index f4df6e7c718b..847d12945998 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -16,6 +16,8 @@ | |||
16 | do { \ | 16 | do { \ |
17 | long __d0, __d1, __d2; \ | 17 | long __d0, __d1, __d2; \ |
18 | might_sleep(); \ | 18 | might_sleep(); \ |
19 | if (current->mm) \ | ||
20 | might_lock_read(¤t->mm->mmap_sem); \ | ||
19 | __asm__ __volatile__( \ | 21 | __asm__ __volatile__( \ |
20 | " testq %1,%1\n" \ | 22 | " testq %1,%1\n" \ |
21 | " jz 2f\n" \ | 23 | " jz 2f\n" \ |
@@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
65 | { | 67 | { |
66 | long __d0; | 68 | long __d0; |
67 | might_sleep(); | 69 | might_sleep(); |
70 | if (current->mm) | ||
71 | might_lock_read(¤t->mm->mmap_sem); | ||
68 | /* no memory constraint because it doesn't change any memory gcc knows | 72 | /* no memory constraint because it doesn't change any memory gcc knows |
69 | about */ | 73 | about */ |
70 | asm volatile( | 74 | asm volatile( |