aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/gup.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 12:52:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 12:52:27 -0400
commit7f8189068726492950bf1a2dcfd9b51314560abf (patch)
tree0fa57e4f3d4739200147a67b236e7ec8326efed3 /arch/x86/mm/gup.c
parentfb20871a54961b82d35303b43452928186c1361d (diff)
x86: don't use 'access_ok()' as a range check in get_user_pages_fast()
It's really not right to use 'access_ok()', since that is meant for the normal "get_user()" and "copy_from/to_user()" accesses, which are done through the TLB, rather than through the page tables. Why? access_ok() does both too few, and too many checks. Too many, because it is meant for regular kernel accesses that will not honor the 'user' bit in the page tables, and because it honors the USER_DS vs KERNEL_DS distinction that we shouldn't care about in GUP. And too few, because it doesn't do the 'canonical' check on the address on x86-64, since the TLB will do that for us. So instead of using a function that isn't meant for this, and does something else and much more complicated, just do the real rules: we don't want the range to overflow, and on x86-64, we want it to be a canonical low address (on 32-bit, all addresses are canonical). Acked-by: Ingo Molnar <mingo@elte.hu> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/mm/gup.c')
-rw-r--r--arch/x86/mm/gup.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 6340cef6798a..f97480941269 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -247,10 +247,15 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
247 start &= PAGE_MASK; 247 start &= PAGE_MASK;
248 addr = start; 248 addr = start;
249 len = (unsigned long) nr_pages << PAGE_SHIFT; 249 len = (unsigned long) nr_pages << PAGE_SHIFT;
250
250 end = start + len; 251 end = start + len;
251 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 252 if (end < start)
252 (void __user *)start, len))) 253 goto slow_irqon;
254
255#ifdef CONFIG_X86_64
256 if (end >> __VIRTUAL_MASK_SHIFT)
253 goto slow_irqon; 257 goto slow_irqon;
258#endif
254 259
255 /* 260 /*
256 * XXX: batch / limit 'nr', to avoid large irq off latency 261 * XXX: batch / limit 'nr', to avoid large irq off latency