aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:19:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:19:24 -0400
commite13053f50664d3d614bbc9b8c83abdad979ac7c9 (patch)
tree07ee41cd50ba26bd7ec92255184f80aff70a2e9a /arch/microblaze
parent2d722f6d5671794c0de0e29e3da75006ac086718 (diff)
parent662bbcb2747c2422cf98d3d97619509379eee466 (diff)
Merge branch 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull voluntary preemption fixes from Ingo Molnar: "This tree contains a speedup which is achieved through better might_sleep()/might_fault() preemption point annotations for uaccess functions, by Michael S Tsirkin: 1. The only reason uaccess routines might sleep is if they fault. Make this explicit for all architectures. 2. A voluntary preemption point in uaccess functions means compiler can't inline them efficiently, this breaks assumptions that they are very fast and small that e.g. net code seems to make. Remove this preemption point so behaviour matches with what callers assume. 3. Accesses (e.g through socket ops) to kernel memory with KERNEL_DS like net/sunrpc does will never sleep. Remove an unconditinal might_sleep() in the might_fault() inline in kernel.h (used when PROVE_LOCKING is not set). 4. Accesses with pagefault_disable() return EFAULT but won't cause caller to sleep. Check for that and thus avoid might_sleep() when PROVE_LOCKING is set. These changes offer a nice speedup for CONFIG_PREEMPT_VOLUNTARY=y kernels, here's a network bandwidth measurement between a virtual machine and the host: before: incoming: 7122.77 Mb/s outgoing: 8480.37 Mb/s after: incoming: 8619.24 Mb/s [ +21.0% ] outgoing: 9455.42 Mb/s [ +11.5% ] I kept these changes in a separate tree, separate from scheduler changes, because it's a mixed MM and scheduler topic" * 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mm, sched: Allow uaccess in atomic with pagefault_disable() mm, sched: Drop voluntary schedule from might_fault() x86: uaccess s/might_sleep/might_fault/ tile: uaccess s/might_sleep/might_fault/ powerpc: uaccess s/might_sleep/might_fault/ mn10300: uaccess s/might_sleep/might_fault/ microblaze: uaccess s/might_sleep/might_fault/ m32r: uaccess s/might_sleep/might_fault/ frv: uaccess s/might_sleep/might_fault/ arm64: uaccess s/might_sleep/might_fault/ asm-generic: uaccess s/might_sleep/might_fault/
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/include/asm/uaccess.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 04e49553bdf9..0aa005703a0b 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -145,7 +145,7 @@ static inline unsigned long __must_check __clear_user(void __user *to,
145static inline unsigned long __must_check clear_user(void __user *to, 145static inline unsigned long __must_check clear_user(void __user *to,
146 unsigned long n) 146 unsigned long n)
147{ 147{
148 might_sleep(); 148 might_fault();
149 if (unlikely(!access_ok(VERIFY_WRITE, to, n))) 149 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
150 return n; 150 return n;
151 151
@@ -371,7 +371,7 @@ extern long __user_bad(void);
371static inline long copy_from_user(void *to, 371static inline long copy_from_user(void *to,
372 const void __user *from, unsigned long n) 372 const void __user *from, unsigned long n)
373{ 373{
374 might_sleep(); 374 might_fault();
375 if (access_ok(VERIFY_READ, from, n)) 375 if (access_ok(VERIFY_READ, from, n))
376 return __copy_from_user(to, from, n); 376 return __copy_from_user(to, from, n);
377 return n; 377 return n;
@@ -385,7 +385,7 @@ static inline long copy_from_user(void *to,
385static inline long copy_to_user(void __user *to, 385static inline long copy_to_user(void __user *to,
386 const void *from, unsigned long n) 386 const void *from, unsigned long n)
387{ 387{
388 might_sleep(); 388 might_fault();
389 if (access_ok(VERIFY_WRITE, to, n)) 389 if (access_ok(VERIFY_WRITE, to, n))
390 return __copy_to_user(to, from, n); 390 return __copy_to_user(to, from, n);
391 return n; 391 return n;