aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-09-10 07:37:17 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-10 07:48:49 -0400
commitc10d38dda1774ed4540380333cabd229eff37094 (patch)
treebe9649dab7c0017c0a700f146db70f730ad819a7 /include
parent76b189e91845eab3a9d52bb97f971d312d25652d (diff)
x86: some lock annotations for user copy paths
copy_to/from_user and all its variants (except the atomic ones) can take a page fault and perform non-trivial work like taking mmap_sem and entering the filesyste/pagecache. Unfortunately, this often escapes lockdep because a common pattern is to use it to read in some arguments just set up from userspace, or write data back to a hot buffer. In those cases, it will be unlikely for page reclaim to get a window in to cause copy_*_user to fault. With the new might_lock primitives, add some annotations to x86. I don't know if I caught all possible faulting points (it's a bit of a maze, and I didn't really look at 32-bit). But this is a starting point. Boots and runs OK so far. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/uaccess.h14
-rw-r--r--include/asm-x86/uaccess_32.h10
-rw-r--r--include/asm-x86/uaccess_64.h12
3 files changed, 34 insertions, 2 deletions
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index 5f702d1d5218..ad29752a1713 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -8,6 +8,8 @@
8#include <linux/thread_info.h> 8#include <linux/thread_info.h>
9#include <linux/prefetch.h> 9#include <linux/prefetch.h>
10#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/lockdep.h>
12#include <linux/sched.h>
11#include <asm/asm.h> 13#include <asm/asm.h>
12#include <asm/page.h> 14#include <asm/page.h>
13 15
@@ -157,6 +159,9 @@ extern int __get_user_bad(void);
157 int __ret_gu; \ 159 int __ret_gu; \
158 unsigned long __val_gu; \ 160 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \ 161 __chk_user_ptr(ptr); \
162 might_sleep(); \
163 if (current->mm) \
164 might_lock_read(&current->mm->mmap_sem); \
160 switch (sizeof(*(ptr))) { \ 165 switch (sizeof(*(ptr))) { \
161 case 1: \ 166 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \ 167 __get_user_x(1, __ret_gu, __val_gu, ptr); \
@@ -241,6 +246,9 @@ extern void __put_user_8(void);
241 int __ret_pu; \ 246 int __ret_pu; \
242 __typeof__(*(ptr)) __pu_val; \ 247 __typeof__(*(ptr)) __pu_val; \
243 __chk_user_ptr(ptr); \ 248 __chk_user_ptr(ptr); \
249 might_sleep(); \
250 if (current->mm) \
251 might_lock_read(&current->mm->mmap_sem); \
244 __pu_val = x; \ 252 __pu_val = x; \
245 switch (sizeof(*(ptr))) { \ 253 switch (sizeof(*(ptr))) { \
246 case 1: \ 254 case 1: \
@@ -265,6 +273,9 @@ extern void __put_user_8(void);
265#define __put_user_size(x, ptr, size, retval, errret) \ 273#define __put_user_size(x, ptr, size, retval, errret) \
266do { \ 274do { \
267 retval = 0; \ 275 retval = 0; \
276 might_sleep(); \
277 if (current->mm) \
278 might_lock_read(&current->mm->mmap_sem); \
268 __chk_user_ptr(ptr); \ 279 __chk_user_ptr(ptr); \
269 switch (size) { \ 280 switch (size) { \
270 case 1: \ 281 case 1: \
@@ -317,6 +328,9 @@ do { \
317#define __get_user_size(x, ptr, size, retval, errret) \ 328#define __get_user_size(x, ptr, size, retval, errret) \
318do { \ 329do { \
319 retval = 0; \ 330 retval = 0; \
331 might_sleep(); \
332 if (current->mm) \
333 might_lock_read(&current->mm->mmap_sem); \
320 __chk_user_ptr(ptr); \ 334 __chk_user_ptr(ptr); \
321 switch (size) { \ 335 switch (size) { \
322 case 1: \ 336 case 1: \
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 6fdef39a0bcb..d725e2d703f7 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -82,8 +82,10 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
82static __always_inline unsigned long __must_check 82static __always_inline unsigned long __must_check
83__copy_to_user(void __user *to, const void *from, unsigned long n) 83__copy_to_user(void __user *to, const void *from, unsigned long n)
84{ 84{
85 might_sleep(); 85 might_sleep();
86 return __copy_to_user_inatomic(to, from, n); 86 if (current->mm)
87 might_lock_read(&current->mm->mmap_sem);
88 return __copy_to_user_inatomic(to, from, n);
87} 89}
88 90
89static __always_inline unsigned long 91static __always_inline unsigned long
@@ -138,6 +140,8 @@ static __always_inline unsigned long
138__copy_from_user(void *to, const void __user *from, unsigned long n) 140__copy_from_user(void *to, const void __user *from, unsigned long n)
139{ 141{
140 might_sleep(); 142 might_sleep();
143 if (current->mm)
144 might_lock_read(&current->mm->mmap_sem);
141 if (__builtin_constant_p(n)) { 145 if (__builtin_constant_p(n)) {
142 unsigned long ret; 146 unsigned long ret;
143 147
@@ -160,6 +164,8 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
160 const void __user *from, unsigned long n) 164 const void __user *from, unsigned long n)
161{ 165{
162 might_sleep(); 166 might_sleep();
167 if (current->mm)
168 might_lock_read(&current->mm->mmap_sem);
163 if (__builtin_constant_p(n)) { 169 if (__builtin_constant_p(n)) {
164 unsigned long ret; 170 unsigned long ret;
165 171
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 515d4dce96b5..40a7205fe576 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -28,6 +28,10 @@ static __always_inline __must_check
28int __copy_from_user(void *dst, const void __user *src, unsigned size) 28int __copy_from_user(void *dst, const void __user *src, unsigned size)
29{ 29{
30 int ret = 0; 30 int ret = 0;
31
32 might_sleep();
33 if (current->mm)
34 might_lock_read(&current->mm->mmap_sem);
31 if (!__builtin_constant_p(size)) 35 if (!__builtin_constant_p(size))
32 return copy_user_generic(dst, (__force void *)src, size); 36 return copy_user_generic(dst, (__force void *)src, size);
33 switch (size) { 37 switch (size) {
@@ -70,6 +74,10 @@ static __always_inline __must_check
70int __copy_to_user(void __user *dst, const void *src, unsigned size) 74int __copy_to_user(void __user *dst, const void *src, unsigned size)
71{ 75{
72 int ret = 0; 76 int ret = 0;
77
78 might_sleep();
79 if (current->mm)
80 might_lock_read(&current->mm->mmap_sem);
73 if (!__builtin_constant_p(size)) 81 if (!__builtin_constant_p(size))
74 return copy_user_generic((__force void *)dst, src, size); 82 return copy_user_generic((__force void *)dst, src, size);
75 switch (size) { 83 switch (size) {
@@ -112,6 +120,10 @@ static __always_inline __must_check
112int __copy_in_user(void __user *dst, const void __user *src, unsigned size) 120int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
113{ 121{
114 int ret = 0; 122 int ret = 0;
123
124 might_sleep();
125 if (current->mm)
126 might_lock_read(&current->mm->mmap_sem);
115 if (!__builtin_constant_p(size)) 127 if (!__builtin_constant_p(size))
116 return copy_user_generic((__force void *)dst, 128 return copy_user_generic((__force void *)dst,
117 (__force void *)src, size); 129 (__force void *)src, size);