aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/processor.h9
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/vdso/vma.c4
4 files changed, 9 insertions, 8 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 72914d0315e9..c7a98f738210 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -861,6 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
861 * User space process size: 3GB (default). 861 * User space process size: 3GB (default).
862 */ 862 */
863#define TASK_SIZE PAGE_OFFSET 863#define TASK_SIZE PAGE_OFFSET
864#define TASK_SIZE_MAX TASK_SIZE
864#define STACK_TOP TASK_SIZE 865#define STACK_TOP TASK_SIZE
865#define STACK_TOP_MAX STACK_TOP 866#define STACK_TOP_MAX STACK_TOP
866 867
@@ -920,7 +921,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
920/* 921/*
921 * User space process size. 47bits minus one guard page. 922 * User space process size. 47bits minus one guard page.
922 */ 923 */
923#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE) 924#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
924 925
925/* This decides where the kernel will search for a free chunk of vm 926/* This decides where the kernel will search for a free chunk of vm
926 * space during mmap's. 927 * space during mmap's.
@@ -929,12 +930,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
929 0xc0000000 : 0xFFFFe000) 930 0xc0000000 : 0xFFFFe000)
930 931
931#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 932#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
932 IA32_PAGE_OFFSET : TASK_SIZE64) 933 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
933#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 934#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
934 IA32_PAGE_OFFSET : TASK_SIZE64) 935 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
935 936
936#define STACK_TOP TASK_SIZE 937#define STACK_TOP TASK_SIZE
937#define STACK_TOP_MAX TASK_SIZE64 938#define STACK_TOP_MAX TASK_SIZE_MAX
938 939
939#define INIT_THREAD { \ 940#define INIT_THREAD { \
940 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 941 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index d2f7cd5b2c83..fb2159a5c817 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -268,7 +268,7 @@ static unsigned long debugreg_addr_limit(struct task_struct *task)
268 if (test_tsk_thread_flag(task, TIF_IA32)) 268 if (test_tsk_thread_flag(task, TIF_IA32))
269 return IA32_PAGE_OFFSET - 3; 269 return IA32_PAGE_OFFSET - 3;
270#endif 270#endif
271 return TASK_SIZE64 - 7; 271 return TASK_SIZE_MAX - 7;
272} 272}
273 273
274#endif /* CONFIG_X86_32 */ 274#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9c2dc5d79531..6fa9f175cba3 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -963,7 +963,7 @@ static int fault_in_kernel_space(unsigned long address)
963#ifdef CONFIG_X86_32 963#ifdef CONFIG_X86_32
964 return address >= TASK_SIZE; 964 return address >= TASK_SIZE;
965#else 965#else
966 return address >= TASK_SIZE64; 966 return address >= TASK_SIZE_MAX;
967#endif 967#endif
968} 968}
969 969
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 9c98cc6ba978..7133cdf9098b 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -85,8 +85,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
85 unsigned long addr, end; 85 unsigned long addr, end;
86 unsigned offset; 86 unsigned offset;
87 end = (start + PMD_SIZE - 1) & PMD_MASK; 87 end = (start + PMD_SIZE - 1) & PMD_MASK;
88 if (end >= TASK_SIZE64) 88 if (end >= TASK_SIZE_MAX)
89 end = TASK_SIZE64; 89 end = TASK_SIZE_MAX;
90 end -= len; 90 end -= len;
91 /* This loses some more bits than a modulo, but is cheaper */ 91 /* This loses some more bits than a modulo, but is cheaper */
92 offset = get_random_int() & (PTRS_PER_PTE - 1); 92 offset = get_random_int() & (PTRS_PER_PTE - 1);