aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 18:40:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 18:40:00 -0400
commit9063c61fd5cbd6f42e95929aa0e02380c9e15656 (patch)
tree0783122dfdf16ce729ebf7b851b145f24e1edae0 /arch/x86
parent2453d6ff6ffc5f0d496b7b14f509a26f99bf115e (diff)
x86, 64-bit: Clean up user address masking
The discussion about using "access_ok()" in get_user_pages_fast() (see commit 7f8189068726492950bf1a2dcfd9b51314560abf: "x86: don't use 'access_ok()' as a range check in get_user_pages_fast()" for details and end result), made us notice that x86-64 was really being very sloppy about virtual address checking. So be way more careful and straightforward about masking x86-64 virtual addresses: - All the VIRTUAL_MASK* variants now cover half of the address space, it's not like we can use the full mask on a signed integer, and the larger mask just invites mistakes when applying it to either half of the 48-bit address space. - /proc/kcore's kc_offset_to_vaddr() becomes a lot more obvious when it transforms a file offset into a (kernel-half) virtual address. - Unify/simplify the 32-bit and 64-bit USER_DS definition to be based on TASK_SIZE_MAX. This cleanup and more careful/obvious user virtual address checking also uncovered a buglet in the x86-64 implementation of strnlen_user(): it would do an "access_ok()" check on the whole potential area, even if the string itself was much shorter, and thus return an error even for valid strings. Our sloppy checking had hidden this. So this fixes 'strnlen_user()' to do this properly, the same way we already handled user strings in 'strncpy_from_user()'. Namely by just checking the first byte, and then relying on fault handling for the rest. That always works, since we impose a guard page that cannot be mapped at the end of the user space address space (and even if we didn't, we'd have the address space hole). Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/page_64_types.h2
-rw-r--r--arch/x86/include/asm/pgtable_64.h5
-rw-r--r--arch/x86/include/asm/uaccess.h7
-rw-r--r--arch/x86/lib/usercopy_64.c2
4 files changed, 4 insertions, 12 deletions
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8d382d3abf38..7639dbf5d223 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -41,7 +41,7 @@
41 41
42/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ 42/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
43#define __PHYSICAL_MASK_SHIFT 46 43#define __PHYSICAL_MASK_SHIFT 46
44#define __VIRTUAL_MASK_SHIFT 48 44#define __VIRTUAL_MASK_SHIFT 47
45 45
46/* 46/*
47 * Kernel image size is limited to 512 MB (see level2_kernel_pgt in 47 * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index abde308fdb0f..c57a30117149 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -165,10 +165,7 @@ extern void cleanup_highmap(void);
165 165
166/* fs/proc/kcore.c */ 166/* fs/proc/kcore.c */
167#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) 167#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
168#define kc_offset_to_vaddr(o) \ 168#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
169 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \
170 ? ((o) | ~__VIRTUAL_MASK) \
171 : (o))
172 169
173#define __HAVE_ARCH_PTE_SAME 170#define __HAVE_ARCH_PTE_SAME
174#endif /* !__ASSEMBLY__ */ 171#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 512ee87062c2..20e6a795e160 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -25,12 +25,7 @@
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26 26
27#define KERNEL_DS MAKE_MM_SEG(-1UL) 27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28 28#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
29#ifdef CONFIG_X86_32
30# define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
31#else
32# define USER_DS MAKE_MM_SEG(__VIRTUAL_MASK)
33#endif
34 29
35#define get_ds() (KERNEL_DS) 30#define get_ds() (KERNEL_DS)
36#define get_fs() (current_thread_info()->addr_limit) 31#define get_fs() (current_thread_info()->addr_limit)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index ec13cb5f17ed..b7c2849ffb66 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(__strnlen_user);
127 127
128long strnlen_user(const char __user *s, long n) 128long strnlen_user(const char __user *s, long n)
129{ 129{
130 if (!access_ok(VERIFY_READ, s, n)) 130 if (!access_ok(VERIFY_READ, s, 1))
131 return 0; 131 return 0;
132 return __strnlen_user(s, n); 132 return __strnlen_user(s, n);
133} 133}