aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 18:34:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 18:34:18 -0400
commit0f657262d5f99ad86b9a63fb5dcd29036c2ed916 (patch)
tree54b83052c019bc1dff662cb1b38cbff59d901535 /arch/x86/kernel
parent425dbc6db34dbd679cab1a17135c5910b271a03d (diff)
parent55920d31f1e3fea06702c74271dd56c4fc9b70ca (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: "Various x86 low level modifications: - preparatory work to support virtually mapped kernel stacks (Andy Lutomirski) - support for 64-bit __get_user() on 32-bit kernels (Benjamin LaHaise) - (involved) workaround for Knights Landing CPU erratum (Dave Hansen) - MPX enhancements (Dave Hansen) - mremap() extension to allow remapping of the special VDSO vma, for purposes of user level context save/restore (Dmitry Safonov) - hweight and entry code cleanups (Borislav Petkov) - bitops code generation optimizations and cleanups with modern GCC (H. Peter Anvin) - syscall entry code optimizations (Paolo Bonzini)" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits) x86/mm/cpa: Add missing comment in populate_pdg() x86/mm/cpa: Fix populate_pgd(): Stop trying to deallocate failed PUDs x86/syscalls: Add compat_sys_preadv64v2/compat_sys_pwritev64v2 x86/smp: Remove unnecessary initialization of thread_info::cpu x86/smp: Remove stack_smp_processor_id() x86/uaccess: Move thread_info::addr_limit to thread_struct x86/dumpstack: Rename thread_struct::sig_on_uaccess_error to sig_on_uaccess_err x86/uaccess: Move thread_info::uaccess_err and thread_info::sig_on_uaccess_err to thread_struct x86/dumpstack: When OOPSing, rewind the stack before do_exit() x86/mm/64: In vmalloc_fault(), use CR3 instead of current->active_mm x86/dumpstack/64: Handle faults when printing the "Stack: " part of an OOPS x86/dumpstack: Try harder to get a call trace on stack overflow x86/mm: Remove kernel_unmap_pages_in_pgd() and efi_cleanup_page_tables() x86/mm/cpa: In populate_pgd(), don't set the PGD entry until it's populated x86/mm/hotplug: Don't remove PGD entries in remove_pagetable() x86/mm: Use pte_none() to test for empty PTE x86/mm: Disallow running with 32-bit PTEs to work around erratum x86/mm: Ignore A/D bits in pte/pmd/pud_none() x86/mm: Move swap offset/type up in PTE to work around erratum x86/entry: Inline enter_from_user_mode() ...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/asm-offsets.c4
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/rdrand.c4
-rw-r--r--arch/x86/kernel/dumpstack.c20
-rw-r--r--arch/x86/kernel/dumpstack_64.c12
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c2
-rw-r--r--arch/x86/kernel/signal_compat.c108
-rw-r--r--arch/x86/kernel/smpboot.c1
-rw-r--r--arch/x86/kernel/vm86_32.c5
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c3
10 files changed, 148 insertions, 13 deletions
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 674134e9f5e5..2bd5c6ff7ee7 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -31,7 +31,9 @@ void common(void) {
31 BLANK(); 31 BLANK();
32 OFFSET(TI_flags, thread_info, flags); 32 OFFSET(TI_flags, thread_info, flags);
33 OFFSET(TI_status, thread_info, status); 33 OFFSET(TI_status, thread_info, status);
34 OFFSET(TI_addr_limit, thread_info, addr_limit); 34
35 BLANK();
36 OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
35 37
36 BLANK(); 38 BLANK();
37 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); 39 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0fe6953f421c..d22a7b9c4f0e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1452,7 +1452,7 @@ void cpu_init(void)
1452 struct task_struct *me; 1452 struct task_struct *me;
1453 struct tss_struct *t; 1453 struct tss_struct *t;
1454 unsigned long v; 1454 unsigned long v;
1455 int cpu = stack_smp_processor_id(); 1455 int cpu = raw_smp_processor_id();
1456 int i; 1456 int i;
1457 1457
1458 wait_for_master_cpu(cpu); 1458 wait_for_master_cpu(cpu);
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index f6f50c4ceaec..cfa97ff67bda 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -39,9 +39,9 @@ __setup("nordrand", x86_rdrand_setup);
39 */ 39 */
40#define SANITY_CHECK_LOOPS 8 40#define SANITY_CHECK_LOOPS 8
41 41
42#ifdef CONFIG_ARCH_RANDOM
42void x86_init_rdrand(struct cpuinfo_x86 *c) 43void x86_init_rdrand(struct cpuinfo_x86 *c)
43{ 44{
44#ifdef CONFIG_ARCH_RANDOM
45 unsigned long tmp; 45 unsigned long tmp;
46 int i; 46 int i;
47 47
@@ -55,5 +55,5 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
55 return; 55 return;
56 } 56 }
57 } 57 }
58#endif
59} 58}
59#endif
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index ef8017ca5ba9..de8242d8bb61 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -87,7 +87,7 @@ static inline int valid_stack_ptr(struct task_struct *task,
87 else 87 else
88 return 0; 88 return 0;
89 } 89 }
90 return p > t && p < t + THREAD_SIZE - size; 90 return p >= t && p < t + THREAD_SIZE - size;
91} 91}
92 92
93unsigned long 93unsigned long
@@ -98,6 +98,14 @@ print_context_stack(struct task_struct *task,
98{ 98{
99 struct stack_frame *frame = (struct stack_frame *)bp; 99 struct stack_frame *frame = (struct stack_frame *)bp;
100 100
101 /*
102 * If we overflowed the stack into a guard page, jump back to the
103 * bottom of the usable stack.
104 */
105 if ((unsigned long)task_stack_page(task) - (unsigned long)stack <
106 PAGE_SIZE)
107 stack = (unsigned long *)task_stack_page(task);
108
101 while (valid_stack_ptr(task, stack, sizeof(*stack), end)) { 109 while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
102 unsigned long addr; 110 unsigned long addr;
103 111
@@ -226,6 +234,8 @@ unsigned long oops_begin(void)
226EXPORT_SYMBOL_GPL(oops_begin); 234EXPORT_SYMBOL_GPL(oops_begin);
227NOKPROBE_SYMBOL(oops_begin); 235NOKPROBE_SYMBOL(oops_begin);
228 236
237void __noreturn rewind_stack_do_exit(int signr);
238
229void oops_end(unsigned long flags, struct pt_regs *regs, int signr) 239void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
230{ 240{
231 if (regs && kexec_should_crash(current)) 241 if (regs && kexec_should_crash(current))
@@ -247,7 +257,13 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
247 panic("Fatal exception in interrupt"); 257 panic("Fatal exception in interrupt");
248 if (panic_on_oops) 258 if (panic_on_oops)
249 panic("Fatal exception"); 259 panic("Fatal exception");
250 do_exit(signr); 260
261 /*
262 * We're not going to return, but we might be on an IST stack or
263 * have very little stack space left. Rewind the stack and kill
264 * the task.
265 */
266 rewind_stack_do_exit(signr);
251} 267}
252NOKPROBE_SYMBOL(oops_end); 268NOKPROBE_SYMBOL(oops_end);
253 269
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index d558a8a49016..2552a1eadfed 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -272,6 +272,8 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
272 272
273 stack = sp; 273 stack = sp;
274 for (i = 0; i < kstack_depth_to_print; i++) { 274 for (i = 0; i < kstack_depth_to_print; i++) {
275 unsigned long word;
276
275 if (stack >= irq_stack && stack <= irq_stack_end) { 277 if (stack >= irq_stack && stack <= irq_stack_end) {
276 if (stack == irq_stack_end) { 278 if (stack == irq_stack_end) {
277 stack = (unsigned long *) (irq_stack_end[-1]); 279 stack = (unsigned long *) (irq_stack_end[-1]);
@@ -281,12 +283,18 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
281 if (kstack_end(stack)) 283 if (kstack_end(stack))
282 break; 284 break;
283 } 285 }
286
287 if (probe_kernel_address(stack, word))
288 break;
289
284 if ((i % STACKSLOTS_PER_LINE) == 0) { 290 if ((i % STACKSLOTS_PER_LINE) == 0) {
285 if (i != 0) 291 if (i != 0)
286 pr_cont("\n"); 292 pr_cont("\n");
287 printk("%s %016lx", log_lvl, *stack++); 293 printk("%s %016lx", log_lvl, word);
288 } else 294 } else
289 pr_cont(" %016lx", *stack++); 295 pr_cont(" %016lx", word);
296
297 stack++;
290 touch_nmi_watchdog(); 298 touch_nmi_watchdog();
291 } 299 }
292 preempt_enable(); 300 preempt_enable();
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 64341aa485ae..d40ee8a38fed 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -42,3 +42,5 @@ EXPORT_SYMBOL(empty_zero_page);
42EXPORT_SYMBOL(___preempt_schedule); 42EXPORT_SYMBOL(___preempt_schedule);
43EXPORT_SYMBOL(___preempt_schedule_notrace); 43EXPORT_SYMBOL(___preempt_schedule_notrace);
44#endif 44#endif
45
46EXPORT_SYMBOL(__sw_hweight32);
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index dc3c0b1c816f..b44564bf86a8 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -1,11 +1,104 @@
1#include <linux/compat.h> 1#include <linux/compat.h>
2#include <linux/uaccess.h> 2#include <linux/uaccess.h>
3 3
4/*
5 * The compat_siginfo_t structure and handing code is very easy
6 * to break in several ways. It must always be updated when new
7 * updates are made to the main siginfo_t, and
8 * copy_siginfo_to_user32() must be updated when the
9 * (arch-independent) copy_siginfo_to_user() is updated.
10 *
11 * It is also easy to put a new member in the compat_siginfo_t
12 * which has implicit alignment which can move internal structure
13 * alignment around breaking the ABI. This can happen if you,
14 * for instance, put a plain 64-bit value in there.
15 */
16static inline void signal_compat_build_tests(void)
17{
18 int _sifields_offset = offsetof(compat_siginfo_t, _sifields);
19
20 /*
21 * If adding a new si_code, there is probably new data in
22 * the siginfo. Make sure folks bumping the si_code
23 * limits also have to look at this code. Make sure any
24 * new fields are handled in copy_siginfo_to_user32()!
25 */
26 BUILD_BUG_ON(NSIGILL != 8);
27 BUILD_BUG_ON(NSIGFPE != 8);
28 BUILD_BUG_ON(NSIGSEGV != 4);
29 BUILD_BUG_ON(NSIGBUS != 5);
30 BUILD_BUG_ON(NSIGTRAP != 4);
31 BUILD_BUG_ON(NSIGCHLD != 6);
32 BUILD_BUG_ON(NSIGSYS != 1);
33
34 /* This is part of the ABI and can never change in size: */
35 BUILD_BUG_ON(sizeof(compat_siginfo_t) != 128);
36 /*
37 * The offsets of all the (unioned) si_fields are fixed
38 * in the ABI, of course. Make sure none of them ever
39 * move and are always at the beginning:
40 */
41 BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
42#define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
43
44 /*
45 * Ensure that the size of each si_field never changes.
46 * If it does, it is a sign that the
47 * copy_siginfo_to_user32() code below needs to updated
48 * along with the size in the CHECK_SI_SIZE().
49 *
50 * We repeat this check for both the generic and compat
51 * siginfos.
52 *
53 * Note: it is OK for these to grow as long as the whole
54 * structure stays within the padding size (checked
55 * above).
56 */
57#define CHECK_CSI_SIZE(name, size) BUILD_BUG_ON(size != sizeof(((compat_siginfo_t *)0)->_sifields.name))
58#define CHECK_SI_SIZE(name, size) BUILD_BUG_ON(size != sizeof(((siginfo_t *)0)->_sifields.name))
59
60 CHECK_CSI_OFFSET(_kill);
61 CHECK_CSI_SIZE (_kill, 2*sizeof(int));
62 CHECK_SI_SIZE (_kill, 2*sizeof(int));
63
64 CHECK_CSI_OFFSET(_timer);
65 CHECK_CSI_SIZE (_timer, 5*sizeof(int));
66 CHECK_SI_SIZE (_timer, 6*sizeof(int));
67
68 CHECK_CSI_OFFSET(_rt);
69 CHECK_CSI_SIZE (_rt, 3*sizeof(int));
70 CHECK_SI_SIZE (_rt, 4*sizeof(int));
71
72 CHECK_CSI_OFFSET(_sigchld);
73 CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
74 CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
75
76 CHECK_CSI_OFFSET(_sigchld_x32);
77 CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
78 /* no _sigchld_x32 in the generic siginfo_t */
79
80 CHECK_CSI_OFFSET(_sigfault);
81 CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
82 CHECK_SI_SIZE (_sigfault, 8*sizeof(int));
83
84 CHECK_CSI_OFFSET(_sigpoll);
85 CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
86 CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
87
88 CHECK_CSI_OFFSET(_sigsys);
89 CHECK_CSI_SIZE (_sigsys, 3*sizeof(int));
90 CHECK_SI_SIZE (_sigsys, 4*sizeof(int));
91
92 /* any new si_fields should be added here */
93}
94
4int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) 95int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
5{ 96{
6 int err = 0; 97 int err = 0;
7 bool ia32 = test_thread_flag(TIF_IA32); 98 bool ia32 = test_thread_flag(TIF_IA32);
8 99
100 signal_compat_build_tests();
101
9 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) 102 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
10 return -EFAULT; 103 return -EFAULT;
11 104
@@ -32,6 +125,21 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
32 &to->_sifields._pad[0]); 125 &to->_sifields._pad[0]);
33 switch (from->si_code >> 16) { 126 switch (from->si_code >> 16) {
34 case __SI_FAULT >> 16: 127 case __SI_FAULT >> 16:
128 if (from->si_signo == SIGBUS &&
129 (from->si_code == BUS_MCEERR_AR ||
130 from->si_code == BUS_MCEERR_AO))
131 put_user_ex(from->si_addr_lsb, &to->si_addr_lsb);
132
133 if (from->si_signo == SIGSEGV) {
134 if (from->si_code == SEGV_BNDERR) {
135 compat_uptr_t lower = (unsigned long)&to->si_lower;
136 compat_uptr_t upper = (unsigned long)&to->si_upper;
137 put_user_ex(lower, &to->si_lower);
138 put_user_ex(upper, &to->si_upper);
139 }
140 if (from->si_code == SEGV_PKUERR)
141 put_user_ex(from->si_pkey, &to->si_pkey);
142 }
35 break; 143 break;
36 case __SI_SYS >> 16: 144 case __SI_SYS >> 16:
37 put_user_ex(from->si_syscall, &to->si_syscall); 145 put_user_ex(from->si_syscall, &to->si_syscall);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2ed0ec1353f8..d0a51939c150 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1292,7 +1292,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1292 cpumask_copy(cpu_callin_mask, cpumask_of(0)); 1292 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1293 mb(); 1293 mb();
1294 1294
1295 current_thread_info()->cpu = 0; /* needed? */
1296 for_each_possible_cpu(i) { 1295 for_each_possible_cpu(i) {
1297 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1296 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1298 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1297 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 3dce1ca0a653..01f30e56f99e 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -440,10 +440,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
440 440
441static inline int is_revectored(int nr, struct revectored_struct *bitmap) 441static inline int is_revectored(int nr, struct revectored_struct *bitmap)
442{ 442{
443 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" 443 return test_bit(nr, bitmap->__map);
444 :"=r" (nr)
445 :"m" (*bitmap), "r" (nr));
446 return nr;
447} 444}
448 445
449#define val_byte(val, n) (((__u8 *)&val)[n]) 446#define val_byte(val, n) (((__u8 *)&val)[n])
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index cd05942bc918..f1aebfb49c36 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -44,6 +44,9 @@ EXPORT_SYMBOL(clear_page);
44 44
45EXPORT_SYMBOL(csum_partial); 45EXPORT_SYMBOL(csum_partial);
46 46
47EXPORT_SYMBOL(__sw_hweight32);
48EXPORT_SYMBOL(__sw_hweight64);
49
47/* 50/*
48 * Export string functions. We normally rely on gcc builtin for most of these, 51 * Export string functions. We normally rely on gcc builtin for most of these,
49 * but gcc sometimes decides not to inline them. 52 * but gcc sometimes decides not to inline them.