diff options
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 7 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_64.c | 4 | ||||
-rw-r--r-- | include/asm-x86/uaccess.h | 14 | ||||
-rw-r--r-- | include/asm-x86/uaccess_32.h | 10 | ||||
-rw-r--r-- | include/asm-x86/uaccess_64.h | 12 |
5 files changed, 44 insertions, 3 deletions
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 24e60944971a..8eedde2a9cac 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -33,6 +33,8 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon | |||
33 | do { \ | 33 | do { \ |
34 | int __d0, __d1, __d2; \ | 34 | int __d0, __d1, __d2; \ |
35 | might_sleep(); \ | 35 | might_sleep(); \ |
36 | if (current->mm) \ | ||
37 | might_lock_read(¤t->mm->mmap_sem); \ | ||
36 | __asm__ __volatile__( \ | 38 | __asm__ __volatile__( \ |
37 | " testl %1,%1\n" \ | 39 | " testl %1,%1\n" \ |
38 | " jz 2f\n" \ | 40 | " jz 2f\n" \ |
@@ -120,6 +122,8 @@ EXPORT_SYMBOL(strncpy_from_user); | |||
120 | do { \ | 122 | do { \ |
121 | int __d0; \ | 123 | int __d0; \ |
122 | might_sleep(); \ | 124 | might_sleep(); \ |
125 | if (current->mm) \ | ||
126 | might_lock_read(¤t->mm->mmap_sem); \ | ||
123 | __asm__ __volatile__( \ | 127 | __asm__ __volatile__( \ |
124 | "0: rep; stosl\n" \ | 128 | "0: rep; stosl\n" \ |
125 | " movl %2,%0\n" \ | 129 | " movl %2,%0\n" \ |
@@ -148,7 +152,6 @@ do { \ | |||
148 | unsigned long | 152 | unsigned long |
149 | clear_user(void __user *to, unsigned long n) | 153 | clear_user(void __user *to, unsigned long n) |
150 | { | 154 | { |
151 | might_sleep(); | ||
152 | if (access_ok(VERIFY_WRITE, to, n)) | 155 | if (access_ok(VERIFY_WRITE, to, n)) |
153 | __do_clear_user(to, n); | 156 | __do_clear_user(to, n); |
154 | return n; | 157 | return n; |
@@ -191,6 +194,8 @@ long strnlen_user(const char __user *s, long n) | |||
191 | unsigned long res, tmp; | 194 | unsigned long res, tmp; |
192 | 195 | ||
193 | might_sleep(); | 196 | might_sleep(); |
197 | if (current->mm) | ||
198 | might_lock_read(¤t->mm->mmap_sem); | ||
194 | 199 | ||
195 | __asm__ __volatile__( | 200 | __asm__ __volatile__( |
196 | " testl %0, %0\n" | 201 | " testl %0, %0\n" |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index f4df6e7c718b..847d12945998 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -16,6 +16,8 @@ | |||
16 | do { \ | 16 | do { \ |
17 | long __d0, __d1, __d2; \ | 17 | long __d0, __d1, __d2; \ |
18 | might_sleep(); \ | 18 | might_sleep(); \ |
19 | if (current->mm) \ | ||
20 | might_lock_read(¤t->mm->mmap_sem); \ | ||
19 | __asm__ __volatile__( \ | 21 | __asm__ __volatile__( \ |
20 | " testq %1,%1\n" \ | 22 | " testq %1,%1\n" \ |
21 | " jz 2f\n" \ | 23 | " jz 2f\n" \ |
@@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
65 | { | 67 | { |
66 | long __d0; | 68 | long __d0; |
67 | might_sleep(); | 69 | might_sleep(); |
70 | if (current->mm) | ||
71 | might_lock_read(¤t->mm->mmap_sem); | ||
68 | /* no memory constraint because it doesn't change any memory gcc knows | 72 | /* no memory constraint because it doesn't change any memory gcc knows |
69 | about */ | 73 | about */ |
70 | asm volatile( | 74 | asm volatile( |
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h index 5f702d1d5218..ad29752a1713 100644 --- a/include/asm-x86/uaccess.h +++ b/include/asm-x86/uaccess.h | |||
@@ -8,6 +8,8 @@ | |||
8 | #include <linux/thread_info.h> | 8 | #include <linux/thread_info.h> |
9 | #include <linux/prefetch.h> | 9 | #include <linux/prefetch.h> |
10 | #include <linux/string.h> | 10 | #include <linux/string.h> |
11 | #include <linux/lockdep.h> | ||
12 | #include <linux/sched.h> | ||
11 | #include <asm/asm.h> | 13 | #include <asm/asm.h> |
12 | #include <asm/page.h> | 14 | #include <asm/page.h> |
13 | 15 | ||
@@ -157,6 +159,9 @@ extern int __get_user_bad(void); | |||
157 | int __ret_gu; \ | 159 | int __ret_gu; \ |
158 | unsigned long __val_gu; \ | 160 | unsigned long __val_gu; \ |
159 | __chk_user_ptr(ptr); \ | 161 | __chk_user_ptr(ptr); \ |
162 | might_sleep(); \ | ||
163 | if (current->mm) \ | ||
164 | might_lock_read(¤t->mm->mmap_sem); \ | ||
160 | switch (sizeof(*(ptr))) { \ | 165 | switch (sizeof(*(ptr))) { \ |
161 | case 1: \ | 166 | case 1: \ |
162 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ | 167 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ |
@@ -241,6 +246,9 @@ extern void __put_user_8(void); | |||
241 | int __ret_pu; \ | 246 | int __ret_pu; \ |
242 | __typeof__(*(ptr)) __pu_val; \ | 247 | __typeof__(*(ptr)) __pu_val; \ |
243 | __chk_user_ptr(ptr); \ | 248 | __chk_user_ptr(ptr); \ |
249 | might_sleep(); \ | ||
250 | if (current->mm) \ | ||
251 | might_lock_read(¤t->mm->mmap_sem); \ | ||
244 | __pu_val = x; \ | 252 | __pu_val = x; \ |
245 | switch (sizeof(*(ptr))) { \ | 253 | switch (sizeof(*(ptr))) { \ |
246 | case 1: \ | 254 | case 1: \ |
@@ -265,6 +273,9 @@ extern void __put_user_8(void); | |||
265 | #define __put_user_size(x, ptr, size, retval, errret) \ | 273 | #define __put_user_size(x, ptr, size, retval, errret) \ |
266 | do { \ | 274 | do { \ |
267 | retval = 0; \ | 275 | retval = 0; \ |
276 | might_sleep(); \ | ||
277 | if (current->mm) \ | ||
278 | might_lock_read(¤t->mm->mmap_sem); \ | ||
268 | __chk_user_ptr(ptr); \ | 279 | __chk_user_ptr(ptr); \ |
269 | switch (size) { \ | 280 | switch (size) { \ |
270 | case 1: \ | 281 | case 1: \ |
@@ -317,6 +328,9 @@ do { \ | |||
317 | #define __get_user_size(x, ptr, size, retval, errret) \ | 328 | #define __get_user_size(x, ptr, size, retval, errret) \ |
318 | do { \ | 329 | do { \ |
319 | retval = 0; \ | 330 | retval = 0; \ |
331 | might_sleep(); \ | ||
332 | if (current->mm) \ | ||
333 | might_lock_read(¤t->mm->mmap_sem); \ | ||
320 | __chk_user_ptr(ptr); \ | 334 | __chk_user_ptr(ptr); \ |
321 | switch (size) { \ | 335 | switch (size) { \ |
322 | case 1: \ | 336 | case 1: \ |
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index 6fdef39a0bcb..d725e2d703f7 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h | |||
@@ -82,8 +82,10 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |||
82 | static __always_inline unsigned long __must_check | 82 | static __always_inline unsigned long __must_check |
83 | __copy_to_user(void __user *to, const void *from, unsigned long n) | 83 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
84 | { | 84 | { |
85 | might_sleep(); | 85 | might_sleep(); |
86 | return __copy_to_user_inatomic(to, from, n); | 86 | if (current->mm) |
87 | might_lock_read(¤t->mm->mmap_sem); | ||
88 | return __copy_to_user_inatomic(to, from, n); | ||
87 | } | 89 | } |
88 | 90 | ||
89 | static __always_inline unsigned long | 91 | static __always_inline unsigned long |
@@ -138,6 +140,8 @@ static __always_inline unsigned long | |||
138 | __copy_from_user(void *to, const void __user *from, unsigned long n) | 140 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
139 | { | 141 | { |
140 | might_sleep(); | 142 | might_sleep(); |
143 | if (current->mm) | ||
144 | might_lock_read(¤t->mm->mmap_sem); | ||
141 | if (__builtin_constant_p(n)) { | 145 | if (__builtin_constant_p(n)) { |
142 | unsigned long ret; | 146 | unsigned long ret; |
143 | 147 | ||
@@ -160,6 +164,8 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, | |||
160 | const void __user *from, unsigned long n) | 164 | const void __user *from, unsigned long n) |
161 | { | 165 | { |
162 | might_sleep(); | 166 | might_sleep(); |
167 | if (current->mm) | ||
168 | might_lock_read(¤t->mm->mmap_sem); | ||
163 | if (__builtin_constant_p(n)) { | 169 | if (__builtin_constant_p(n)) { |
164 | unsigned long ret; | 170 | unsigned long ret; |
165 | 171 | ||
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 515d4dce96b5..40a7205fe576 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
@@ -28,6 +28,10 @@ static __always_inline __must_check | |||
28 | int __copy_from_user(void *dst, const void __user *src, unsigned size) | 28 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
29 | { | 29 | { |
30 | int ret = 0; | 30 | int ret = 0; |
31 | |||
32 | might_sleep(); | ||
33 | if (current->mm) | ||
34 | might_lock_read(¤t->mm->mmap_sem); | ||
31 | if (!__builtin_constant_p(size)) | 35 | if (!__builtin_constant_p(size)) |
32 | return copy_user_generic(dst, (__force void *)src, size); | 36 | return copy_user_generic(dst, (__force void *)src, size); |
33 | switch (size) { | 37 | switch (size) { |
@@ -70,6 +74,10 @@ static __always_inline __must_check | |||
70 | int __copy_to_user(void __user *dst, const void *src, unsigned size) | 74 | int __copy_to_user(void __user *dst, const void *src, unsigned size) |
71 | { | 75 | { |
72 | int ret = 0; | 76 | int ret = 0; |
77 | |||
78 | might_sleep(); | ||
79 | if (current->mm) | ||
80 | might_lock_read(¤t->mm->mmap_sem); | ||
73 | if (!__builtin_constant_p(size)) | 81 | if (!__builtin_constant_p(size)) |
74 | return copy_user_generic((__force void *)dst, src, size); | 82 | return copy_user_generic((__force void *)dst, src, size); |
75 | switch (size) { | 83 | switch (size) { |
@@ -112,6 +120,10 @@ static __always_inline __must_check | |||
112 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | 120 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
113 | { | 121 | { |
114 | int ret = 0; | 122 | int ret = 0; |
123 | |||
124 | might_sleep(); | ||
125 | if (current->mm) | ||
126 | might_lock_read(¤t->mm->mmap_sem); | ||
115 | if (!__builtin_constant_p(size)) | 127 | if (!__builtin_constant_p(size)) |
116 | return copy_user_generic((__force void *)dst, | 128 | return copy_user_generic((__force void *)dst, |
117 | (__force void *)src, size); | 129 | (__force void *)src, size); |