aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:19:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:19:24 -0400
commite13053f50664d3d614bbc9b8c83abdad979ac7c9 (patch)
tree07ee41cd50ba26bd7ec92255184f80aff70a2e9a /arch
parent2d722f6d5671794c0de0e29e3da75006ac086718 (diff)
parent662bbcb2747c2422cf98d3d97619509379eee466 (diff)
Merge branch 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull voluntary preemption fixes from Ingo Molnar: "This tree contains a speedup which is achieved through better might_sleep()/might_fault() preemption point annotations for uaccess functions, by Michael S Tsirkin: 1. The only reason uaccess routines might sleep is if they fault. Make this explicit for all architectures. 2. A voluntary preemption point in uaccess functions means compiler can't inline them efficiently, this breaks assumptions that they are very fast and small that e.g. net code seems to make. Remove this preemption point so behaviour matches with what callers assume. 3. Accesses (e.g through socket ops) to kernel memory with KERNEL_DS like net/sunrpc does will never sleep. Remove an unconditinal might_sleep() in the might_fault() inline in kernel.h (used when PROVE_LOCKING is not set). 4. Accesses with pagefault_disable() return EFAULT but won't cause caller to sleep. Check for that and thus avoid might_sleep() when PROVE_LOCKING is set. These changes offer a nice speedup for CONFIG_PREEMPT_VOLUNTARY=y kernels, here's a network bandwidth measurement between a virtual machine and the host: before: incoming: 7122.77 Mb/s outgoing: 8480.37 Mb/s after: incoming: 8619.24 Mb/s [ +21.0% ] outgoing: 9455.42 Mb/s [ +11.5% ] I kept these changes in a separate tree, separate from scheduler changes, because it's a mixed MM and scheduler topic" * 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mm, sched: Allow uaccess in atomic with pagefault_disable() mm, sched: Drop voluntary schedule from might_fault() x86: uaccess s/might_sleep/might_fault/ tile: uaccess s/might_sleep/might_fault/ powerpc: uaccess s/might_sleep/might_fault/ mn10300: uaccess s/might_sleep/might_fault/ microblaze: uaccess s/might_sleep/might_fault/ m32r: uaccess s/might_sleep/might_fault/ frv: uaccess s/might_sleep/might_fault/ arm64: uaccess s/might_sleep/might_fault/ asm-generic: uaccess s/might_sleep/might_fault/
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/uaccess.h4
-rw-r--r--arch/frv/include/asm/uaccess.h4
-rw-r--r--arch/m32r/include/asm/uaccess.h12
-rw-r--r--arch/microblaze/include/asm/uaccess.h6
-rw-r--r--arch/mn10300/include/asm/uaccess.h4
-rw-r--r--arch/powerpc/include/asm/uaccess.h16
-rw-r--r--arch/tile/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
8 files changed, 25 insertions, 25 deletions
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 008f8481da65..edb3d5c73a32 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -166,7 +166,7 @@ do { \
166 166
167#define get_user(x, ptr) \ 167#define get_user(x, ptr) \
168({ \ 168({ \
169 might_sleep(); \ 169 might_fault(); \
170 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \ 170 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \
171 __get_user((x), (ptr)) : \ 171 __get_user((x), (ptr)) : \
172 ((x) = 0, -EFAULT); \ 172 ((x) = 0, -EFAULT); \
@@ -227,7 +227,7 @@ do { \
227 227
228#define put_user(x, ptr) \ 228#define put_user(x, ptr) \
229({ \ 229({ \
230 might_sleep(); \ 230 might_fault(); \
231 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ 231 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
232 __put_user((x), (ptr)) : \ 232 __put_user((x), (ptr)) : \
233 -EFAULT; \ 233 -EFAULT; \
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
index 0b67ec5b4414..3ac9a59d65d4 100644
--- a/arch/frv/include/asm/uaccess.h
+++ b/arch/frv/include/asm/uaccess.h
@@ -280,14 +280,14 @@ extern long __memcpy_user(void *dst, const void *src, unsigned long count);
280static inline unsigned long __must_check 280static inline unsigned long __must_check
281__copy_to_user(void __user *to, const void *from, unsigned long n) 281__copy_to_user(void __user *to, const void *from, unsigned long n)
282{ 282{
283 might_sleep(); 283 might_fault();
284 return __copy_to_user_inatomic(to, from, n); 284 return __copy_to_user_inatomic(to, from, n);
285} 285}
286 286
287static inline unsigned long 287static inline unsigned long
288__copy_from_user(void *to, const void __user *from, unsigned long n) 288__copy_from_user(void *to, const void __user *from, unsigned long n)
289{ 289{
290 might_sleep(); 290 might_fault();
291 return __copy_from_user_inatomic(to, from, n); 291 return __copy_from_user_inatomic(to, from, n);
292} 292}
293 293
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 1c7047bea200..84fe7ba53035 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -216,7 +216,7 @@ extern int fixup_exception(struct pt_regs *regs);
216({ \ 216({ \
217 long __gu_err = 0; \ 217 long __gu_err = 0; \
218 unsigned long __gu_val; \ 218 unsigned long __gu_val; \
219 might_sleep(); \ 219 might_fault(); \
220 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 220 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
221 (x) = (__typeof__(*(ptr)))__gu_val; \ 221 (x) = (__typeof__(*(ptr)))__gu_val; \
222 __gu_err; \ 222 __gu_err; \
@@ -227,7 +227,7 @@ extern int fixup_exception(struct pt_regs *regs);
227 long __gu_err = -EFAULT; \ 227 long __gu_err = -EFAULT; \
228 unsigned long __gu_val = 0; \ 228 unsigned long __gu_val = 0; \
229 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 229 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
230 might_sleep(); \ 230 might_fault(); \
231 if (access_ok(VERIFY_READ,__gu_addr,size)) \ 231 if (access_ok(VERIFY_READ,__gu_addr,size)) \
232 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ 232 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
233 (x) = (__typeof__(*(ptr)))__gu_val; \ 233 (x) = (__typeof__(*(ptr)))__gu_val; \
@@ -295,7 +295,7 @@ do { \
295#define __put_user_nocheck(x,ptr,size) \ 295#define __put_user_nocheck(x,ptr,size) \
296({ \ 296({ \
297 long __pu_err; \ 297 long __pu_err; \
298 might_sleep(); \ 298 might_fault(); \
299 __put_user_size((x),(ptr),(size),__pu_err); \ 299 __put_user_size((x),(ptr),(size),__pu_err); \
300 __pu_err; \ 300 __pu_err; \
301}) 301})
@@ -305,7 +305,7 @@ do { \
305({ \ 305({ \
306 long __pu_err = -EFAULT; \ 306 long __pu_err = -EFAULT; \
307 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 307 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
308 might_sleep(); \ 308 might_fault(); \
309 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 309 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
310 __put_user_size((x),__pu_addr,(size),__pu_err); \ 310 __put_user_size((x),__pu_addr,(size),__pu_err); \
311 __pu_err; \ 311 __pu_err; \
@@ -597,7 +597,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
597 */ 597 */
598#define copy_to_user(to,from,n) \ 598#define copy_to_user(to,from,n) \
599({ \ 599({ \
600 might_sleep(); \ 600 might_fault(); \
601 __generic_copy_to_user((to),(from),(n)); \ 601 __generic_copy_to_user((to),(from),(n)); \
602}) 602})
603 603
@@ -638,7 +638,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
638 */ 638 */
639#define copy_from_user(to,from,n) \ 639#define copy_from_user(to,from,n) \
640({ \ 640({ \
641 might_sleep(); \ 641 might_fault(); \
642 __generic_copy_from_user((to),(from),(n)); \ 642 __generic_copy_from_user((to),(from),(n)); \
643}) 643})
644 644
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 04e49553bdf9..0aa005703a0b 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -145,7 +145,7 @@ static inline unsigned long __must_check __clear_user(void __user *to,
145static inline unsigned long __must_check clear_user(void __user *to, 145static inline unsigned long __must_check clear_user(void __user *to,
146 unsigned long n) 146 unsigned long n)
147{ 147{
148 might_sleep(); 148 might_fault();
149 if (unlikely(!access_ok(VERIFY_WRITE, to, n))) 149 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
150 return n; 150 return n;
151 151
@@ -371,7 +371,7 @@ extern long __user_bad(void);
371static inline long copy_from_user(void *to, 371static inline long copy_from_user(void *to,
372 const void __user *from, unsigned long n) 372 const void __user *from, unsigned long n)
373{ 373{
374 might_sleep(); 374 might_fault();
375 if (access_ok(VERIFY_READ, from, n)) 375 if (access_ok(VERIFY_READ, from, n))
376 return __copy_from_user(to, from, n); 376 return __copy_from_user(to, from, n);
377 return n; 377 return n;
@@ -385,7 +385,7 @@ static inline long copy_from_user(void *to,
385static inline long copy_to_user(void __user *to, 385static inline long copy_to_user(void __user *to,
386 const void *from, unsigned long n) 386 const void *from, unsigned long n)
387{ 387{
388 might_sleep(); 388 might_fault();
389 if (access_ok(VERIFY_WRITE, to, n)) 389 if (access_ok(VERIFY_WRITE, to, n))
390 return __copy_to_user(to, from, n); 390 return __copy_to_user(to, from, n);
391 return n; 391 return n;
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index d7966e0f7698..537278746a15 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -471,13 +471,13 @@ extern unsigned long __generic_copy_from_user(void *, const void __user *,
471 471
472#define __copy_to_user(to, from, n) \ 472#define __copy_to_user(to, from, n) \
473({ \ 473({ \
474 might_sleep(); \ 474 might_fault(); \
475 __copy_to_user_inatomic((to), (from), (n)); \ 475 __copy_to_user_inatomic((to), (from), (n)); \
476}) 476})
477 477
478#define __copy_from_user(to, from, n) \ 478#define __copy_from_user(to, from, n) \
479({ \ 479({ \
480 might_sleep(); \ 480 might_fault(); \
481 __copy_from_user_inatomic((to), (from), (n)); \ 481 __copy_from_user_inatomic((to), (from), (n)); \
482}) 482})
483 483
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4db49590acf5..9485b43a7c00 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -178,7 +178,7 @@ do { \
178 long __pu_err; \ 178 long __pu_err; \
179 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 179 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
180 if (!is_kernel_addr((unsigned long)__pu_addr)) \ 180 if (!is_kernel_addr((unsigned long)__pu_addr)) \
181 might_sleep(); \ 181 might_fault(); \
182 __chk_user_ptr(ptr); \ 182 __chk_user_ptr(ptr); \
183 __put_user_size((x), __pu_addr, (size), __pu_err); \ 183 __put_user_size((x), __pu_addr, (size), __pu_err); \
184 __pu_err; \ 184 __pu_err; \
@@ -188,7 +188,7 @@ do { \
188({ \ 188({ \
189 long __pu_err = -EFAULT; \ 189 long __pu_err = -EFAULT; \
190 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 190 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
191 might_sleep(); \ 191 might_fault(); \
192 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 192 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
193 __put_user_size((x), __pu_addr, (size), __pu_err); \ 193 __put_user_size((x), __pu_addr, (size), __pu_err); \
194 __pu_err; \ 194 __pu_err; \
@@ -268,7 +268,7 @@ do { \
268 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 268 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
269 __chk_user_ptr(ptr); \ 269 __chk_user_ptr(ptr); \
270 if (!is_kernel_addr((unsigned long)__gu_addr)) \ 270 if (!is_kernel_addr((unsigned long)__gu_addr)) \
271 might_sleep(); \ 271 might_fault(); \
272 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 272 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
273 (x) = (__typeof__(*(ptr)))__gu_val; \ 273 (x) = (__typeof__(*(ptr)))__gu_val; \
274 __gu_err; \ 274 __gu_err; \
@@ -282,7 +282,7 @@ do { \
282 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 282 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
283 __chk_user_ptr(ptr); \ 283 __chk_user_ptr(ptr); \
284 if (!is_kernel_addr((unsigned long)__gu_addr)) \ 284 if (!is_kernel_addr((unsigned long)__gu_addr)) \
285 might_sleep(); \ 285 might_fault(); \
286 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 286 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
287 (x) = (__typeof__(*(ptr)))__gu_val; \ 287 (x) = (__typeof__(*(ptr)))__gu_val; \
288 __gu_err; \ 288 __gu_err; \
@@ -294,7 +294,7 @@ do { \
294 long __gu_err = -EFAULT; \ 294 long __gu_err = -EFAULT; \
295 unsigned long __gu_val = 0; \ 295 unsigned long __gu_val = 0; \
296 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 296 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
297 might_sleep(); \ 297 might_fault(); \
298 if (access_ok(VERIFY_READ, __gu_addr, (size))) \ 298 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
299 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 299 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
300 (x) = (__typeof__(*(ptr)))__gu_val; \ 300 (x) = (__typeof__(*(ptr)))__gu_val; \
@@ -419,14 +419,14 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
419static inline unsigned long __copy_from_user(void *to, 419static inline unsigned long __copy_from_user(void *to,
420 const void __user *from, unsigned long size) 420 const void __user *from, unsigned long size)
421{ 421{
422 might_sleep(); 422 might_fault();
423 return __copy_from_user_inatomic(to, from, size); 423 return __copy_from_user_inatomic(to, from, size);
424} 424}
425 425
426static inline unsigned long __copy_to_user(void __user *to, 426static inline unsigned long __copy_to_user(void __user *to,
427 const void *from, unsigned long size) 427 const void *from, unsigned long size)
428{ 428{
429 might_sleep(); 429 might_fault();
430 return __copy_to_user_inatomic(to, from, size); 430 return __copy_to_user_inatomic(to, from, size);
431} 431}
432 432
@@ -434,7 +434,7 @@ extern unsigned long __clear_user(void __user *addr, unsigned long size);
434 434
435static inline unsigned long clear_user(void __user *addr, unsigned long size) 435static inline unsigned long clear_user(void __user *addr, unsigned long size)
436{ 436{
437 might_sleep(); 437 might_fault();
438 if (likely(access_ok(VERIFY_WRITE, addr, size))) 438 if (likely(access_ok(VERIFY_WRITE, addr, size)))
439 return __clear_user(addr, size); 439 return __clear_user(addr, size);
440 if ((unsigned long)addr < TASK_SIZE) { 440 if ((unsigned long)addr < TASK_SIZE) {
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 8a082bc6bca5..e4d44bd7df27 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -442,7 +442,7 @@ extern unsigned long __copy_in_user_inatomic(
442static inline unsigned long __must_check 442static inline unsigned long __must_check
443__copy_in_user(void __user *to, const void __user *from, unsigned long n) 443__copy_in_user(void __user *to, const void __user *from, unsigned long n)
444{ 444{
445 might_sleep(); 445 might_fault();
446 return __copy_in_user_inatomic(to, from, n); 446 return __copy_in_user_inatomic(to, from, n);
447} 447}
448 448
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 142810c457dc..4f7923dd0007 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -235,7 +235,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
235static inline int 235static inline int
236__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) 236__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
237{ 237{
238 might_sleep(); 238 might_fault();
239 return __copy_user_nocache(dst, src, size, 1); 239 return __copy_user_nocache(dst, src, size, 1);
240} 240}
241 241