aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:19:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:19:24 -0400
commite13053f50664d3d614bbc9b8c83abdad979ac7c9 (patch)
tree07ee41cd50ba26bd7ec92255184f80aff70a2e9a /arch/m32r/include
parent2d722f6d5671794c0de0e29e3da75006ac086718 (diff)
parent662bbcb2747c2422cf98d3d97619509379eee466 (diff)
Merge branch 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull voluntary preemption fixes from Ingo Molnar: "This tree contains a speedup which is achieved through better might_sleep()/might_fault() preemption point annotations for uaccess functions, by Michael S Tsirkin: 1. The only reason uaccess routines might sleep is if they fault. Make this explicit for all architectures. 2. A voluntary preemption point in uaccess functions means compiler can't inline them efficiently, this breaks assumptions that they are very fast and small that e.g. net code seems to make. Remove this preemption point so behaviour matches with what callers assume. 3. Accesses (e.g through socket ops) to kernel memory with KERNEL_DS like net/sunrpc does will never sleep. Remove an unconditinal might_sleep() in the might_fault() inline in kernel.h (used when PROVE_LOCKING is not set). 4. Accesses with pagefault_disable() return EFAULT but won't cause caller to sleep. Check for that and thus avoid might_sleep() when PROVE_LOCKING is set. These changes offer a nice speedup for CONFIG_PREEMPT_VOLUNTARY=y kernels, here's a network bandwidth measurement between a virtual machine and the host: before: incoming: 7122.77 Mb/s outgoing: 8480.37 Mb/s after: incoming: 8619.24 Mb/s [ +21.0% ] outgoing: 9455.42 Mb/s [ +11.5% ] I kept these changes in a separate tree, separate from scheduler changes, because it's a mixed MM and scheduler topic" * 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mm, sched: Allow uaccess in atomic with pagefault_disable() mm, sched: Drop voluntary schedule from might_fault() x86: uaccess s/might_sleep/might_fault/ tile: uaccess s/might_sleep/might_fault/ powerpc: uaccess s/might_sleep/might_fault/ mn10300: uaccess s/might_sleep/might_fault/ microblaze: uaccess s/might_sleep/might_fault/ m32r: uaccess s/might_sleep/might_fault/ frv: uaccess s/might_sleep/might_fault/ arm64: uaccess s/might_sleep/might_fault/ asm-generic: uaccess s/might_sleep/might_fault/
Diffstat (limited to 'arch/m32r/include')
-rw-r--r--arch/m32r/include/asm/uaccess.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 1c7047bea200..84fe7ba53035 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -216,7 +216,7 @@ extern int fixup_exception(struct pt_regs *regs);
216({ \ 216({ \
217 long __gu_err = 0; \ 217 long __gu_err = 0; \
218 unsigned long __gu_val; \ 218 unsigned long __gu_val; \
219 might_sleep(); \ 219 might_fault(); \
220 __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 220 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
221 (x) = (__typeof__(*(ptr)))__gu_val; \ 221 (x) = (__typeof__(*(ptr)))__gu_val; \
222 __gu_err; \ 222 __gu_err; \
@@ -227,7 +227,7 @@ extern int fixup_exception(struct pt_regs *regs);
227 long __gu_err = -EFAULT; \ 227 long __gu_err = -EFAULT; \
228 unsigned long __gu_val = 0; \ 228 unsigned long __gu_val = 0; \
229 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 229 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
230 might_sleep(); \ 230 might_fault(); \
231 if (access_ok(VERIFY_READ,__gu_addr,size)) \ 231 if (access_ok(VERIFY_READ,__gu_addr,size)) \
232 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ 232 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
233 (x) = (__typeof__(*(ptr)))__gu_val; \ 233 (x) = (__typeof__(*(ptr)))__gu_val; \
@@ -295,7 +295,7 @@ do { \
295#define __put_user_nocheck(x,ptr,size) \ 295#define __put_user_nocheck(x,ptr,size) \
296({ \ 296({ \
297 long __pu_err; \ 297 long __pu_err; \
298 might_sleep(); \ 298 might_fault(); \
299 __put_user_size((x),(ptr),(size),__pu_err); \ 299 __put_user_size((x),(ptr),(size),__pu_err); \
300 __pu_err; \ 300 __pu_err; \
301}) 301})
@@ -305,7 +305,7 @@ do { \
305({ \ 305({ \
306 long __pu_err = -EFAULT; \ 306 long __pu_err = -EFAULT; \
307 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 307 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
308 might_sleep(); \ 308 might_fault(); \
309 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 309 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
310 __put_user_size((x),__pu_addr,(size),__pu_err); \ 310 __put_user_size((x),__pu_addr,(size),__pu_err); \
311 __pu_err; \ 311 __pu_err; \
@@ -597,7 +597,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
597 */ 597 */
598#define copy_to_user(to,from,n) \ 598#define copy_to_user(to,from,n) \
599({ \ 599({ \
600 might_sleep(); \ 600 might_fault(); \
601 __generic_copy_to_user((to),(from),(n)); \ 601 __generic_copy_to_user((to),(from),(n)); \
602}) 602})
603 603
@@ -638,7 +638,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
638 */ 638 */
639#define copy_from_user(to,from,n) \ 639#define copy_from_user(to,from,n) \
640({ \ 640({ \
641 might_sleep(); \ 641 might_fault(); \
642 __generic_copy_from_user((to),(from),(n)); \ 642 __generic_copy_from_user((to),(from),(n)); \
643}) 643})
644 644