aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/dumpstack.c22
-rw-r--r--arch/x86/kernel/dumpstack_32.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c12
-rw-r--r--arch/x86/kernel/traps.c20
9 files changed, 40 insertions, 36 deletions
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 84e33ff5a6d5..446702ed99dc 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
2588 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 2588 res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
2589 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 2589 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
2590 mem += IOAPIC_RESOURCE_NAME_SIZE; 2590 mem += IOAPIC_RESOURCE_NAME_SIZE;
2591 ioapics[i].iomem_res = &res[num];
2591 num++; 2592 num++;
2592 ioapics[i].iomem_res = res;
2593 } 2593 }
2594 2594
2595 ioapic_resources = res; 2595 ioapic_resources = res;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c343a54bed39..f5c69d8974e1 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
674 u64 value; 674 u64 value;
675 675
676 /* re-enable TopologyExtensions if switched off by BIOS */ 676 /* re-enable TopologyExtensions if switched off by BIOS */
677 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && 677 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
678 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 678 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
679 679
680 if (msr_set_bit(0xc0011005, 54) > 0) { 680 if (msr_set_bit(0xc0011005, 54) > 0) {
681 rdmsrl(0xc0011005, value); 681 rdmsrl(0xc0011005, value);
682 if (value & BIT_64(54)) { 682 if (value & BIT_64(54)) {
683 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 683 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
684 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); 684 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
685 } 685 }
686 } 686 }
687 } 687 }
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 2bb25c3fe2e8..ef8017ca5ba9 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
42static void 42static void
43print_ftrace_graph_addr(unsigned long addr, void *data, 43print_ftrace_graph_addr(unsigned long addr, void *data,
44 const struct stacktrace_ops *ops, 44 const struct stacktrace_ops *ops,
45 struct thread_info *tinfo, int *graph) 45 struct task_struct *task, int *graph)
46{ 46{
47 struct task_struct *task;
48 unsigned long ret_addr; 47 unsigned long ret_addr;
49 int index; 48 int index;
50 49
51 if (addr != (unsigned long)return_to_handler) 50 if (addr != (unsigned long)return_to_handler)
52 return; 51 return;
53 52
54 task = tinfo->task;
55 index = task->curr_ret_stack; 53 index = task->curr_ret_stack;
56 54
57 if (!task->ret_stack || index < *graph) 55 if (!task->ret_stack || index < *graph)
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
68static inline void 66static inline void
69print_ftrace_graph_addr(unsigned long addr, void *data, 67print_ftrace_graph_addr(unsigned long addr, void *data,
70 const struct stacktrace_ops *ops, 68 const struct stacktrace_ops *ops,
71 struct thread_info *tinfo, int *graph) 69 struct task_struct *task, int *graph)
72{ } 70{ }
73#endif 71#endif
74 72
@@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
79 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 77 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
80 */ 78 */
81 79
82static inline int valid_stack_ptr(struct thread_info *tinfo, 80static inline int valid_stack_ptr(struct task_struct *task,
83 void *p, unsigned int size, void *end) 81 void *p, unsigned int size, void *end)
84{ 82{
85 void *t = tinfo; 83 void *t = task_stack_page(task);
86 if (end) { 84 if (end) {
87 if (p < end && p >= (end-THREAD_SIZE)) 85 if (p < end && p >= (end-THREAD_SIZE))
88 return 1; 86 return 1;
@@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
93} 91}
94 92
95unsigned long 93unsigned long
96print_context_stack(struct thread_info *tinfo, 94print_context_stack(struct task_struct *task,
97 unsigned long *stack, unsigned long bp, 95 unsigned long *stack, unsigned long bp,
98 const struct stacktrace_ops *ops, void *data, 96 const struct stacktrace_ops *ops, void *data,
99 unsigned long *end, int *graph) 97 unsigned long *end, int *graph)
100{ 98{
101 struct stack_frame *frame = (struct stack_frame *)bp; 99 struct stack_frame *frame = (struct stack_frame *)bp;
102 100
103 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { 101 while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
104 unsigned long addr; 102 unsigned long addr;
105 103
106 addr = *stack; 104 addr = *stack;
@@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
112 } else { 110 } else {
113 ops->address(data, addr, 0); 111 ops->address(data, addr, 0);
114 } 112 }
115 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 113 print_ftrace_graph_addr(addr, data, ops, task, graph);
116 } 114 }
117 stack++; 115 stack++;
118 } 116 }
@@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
121EXPORT_SYMBOL_GPL(print_context_stack); 119EXPORT_SYMBOL_GPL(print_context_stack);
122 120
123unsigned long 121unsigned long
124print_context_stack_bp(struct thread_info *tinfo, 122print_context_stack_bp(struct task_struct *task,
125 unsigned long *stack, unsigned long bp, 123 unsigned long *stack, unsigned long bp,
126 const struct stacktrace_ops *ops, void *data, 124 const struct stacktrace_ops *ops, void *data,
127 unsigned long *end, int *graph) 125 unsigned long *end, int *graph)
@@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
129 struct stack_frame *frame = (struct stack_frame *)bp; 127 struct stack_frame *frame = (struct stack_frame *)bp;
130 unsigned long *ret_addr = &frame->return_address; 128 unsigned long *ret_addr = &frame->return_address;
131 129
132 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { 130 while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
133 unsigned long addr = *ret_addr; 131 unsigned long addr = *ret_addr;
134 132
135 if (!__kernel_text_address(addr)) 133 if (!__kernel_text_address(addr))
@@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
139 break; 137 break;
140 frame = frame->next_frame; 138 frame = frame->next_frame;
141 ret_addr = &frame->return_address; 139 ret_addr = &frame->return_address;
142 print_ftrace_graph_addr(addr, data, ops, tinfo, graph); 140 print_ftrace_graph_addr(addr, data, ops, task, graph);
143 } 141 }
144 142
145 return (unsigned long)frame; 143 return (unsigned long)frame;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 464ffd69b92e..fef917e79b9d 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
61 bp = stack_frame(task, regs); 61 bp = stack_frame(task, regs);
62 62
63 for (;;) { 63 for (;;) {
64 struct thread_info *context;
65 void *end_stack; 64 void *end_stack;
66 65
67 end_stack = is_hardirq_stack(stack, cpu); 66 end_stack = is_hardirq_stack(stack, cpu);
68 if (!end_stack) 67 if (!end_stack)
69 end_stack = is_softirq_stack(stack, cpu); 68 end_stack = is_softirq_stack(stack, cpu);
70 69
71 context = task_thread_info(task); 70 bp = ops->walk_stack(task, stack, bp, ops, data,
72 bp = ops->walk_stack(context, stack, bp, ops, data,
73 end_stack, &graph); 71 end_stack, &graph);
74 72
75 /* Stop if not on irq stack */ 73 /* Stop if not on irq stack */
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 5f1c6266eb30..d558a8a49016 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
153 const struct stacktrace_ops *ops, void *data) 153 const struct stacktrace_ops *ops, void *data)
154{ 154{
155 const unsigned cpu = get_cpu(); 155 const unsigned cpu = get_cpu();
156 struct thread_info *tinfo;
157 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); 156 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
158 unsigned long dummy; 157 unsigned long dummy;
159 unsigned used = 0; 158 unsigned used = 0;
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
179 * current stack address. If the stacks consist of nested 178 * current stack address. If the stacks consist of nested
180 * exceptions 179 * exceptions
181 */ 180 */
182 tinfo = task_thread_info(task);
183 while (!done) { 181 while (!done) {
184 unsigned long *stack_end; 182 unsigned long *stack_end;
185 enum stack_type stype; 183 enum stack_type stype;
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
202 if (ops->stack(data, id) < 0) 200 if (ops->stack(data, id) < 0)
203 break; 201 break;
204 202
205 bp = ops->walk_stack(tinfo, stack, bp, ops, 203 bp = ops->walk_stack(task, stack, bp, ops,
206 data, stack_end, &graph); 204 data, stack_end, &graph);
207 ops->stack(data, "<EOE>"); 205 ops->stack(data, "<EOE>");
208 /* 206 /*
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
218 216
219 if (ops->stack(data, "IRQ") < 0) 217 if (ops->stack(data, "IRQ") < 0)
220 break; 218 break;
221 bp = ops->walk_stack(tinfo, stack, bp, 219 bp = ops->walk_stack(task, stack, bp,
222 ops, data, stack_end, &graph); 220 ops, data, stack_end, &graph);
223 /* 221 /*
224 * We link to the next stack (which would be 222 * We link to the next stack (which would be
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
240 /* 238 /*
241 * This handles the process stack: 239 * This handles the process stack:
242 */ 240 */
243 bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); 241 bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
244 put_cpu(); 242 put_cpu();
245} 243}
246EXPORT_SYMBOL(dump_trace); 244EXPORT_SYMBOL(dump_trace);
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 4d38416e2a7f..04f89caef9c4 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -57,7 +57,7 @@
57# error "Need more than one PGD for the ESPFIX hack" 57# error "Need more than one PGD for the ESPFIX hack"
58#endif 58#endif
59 59
60#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) 60#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
61 61
62/* This contains the *bottom* address of the espfix stack */ 62/* This contains the *bottom* address of the espfix stack */
63DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); 63DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 38da8f29a9c8..c627bf8d98ad 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -130,11 +130,9 @@ void irq_ctx_init(int cpu)
130 130
131void do_softirq_own_stack(void) 131void do_softirq_own_stack(void)
132{ 132{
133 struct thread_info *curstk;
134 struct irq_stack *irqstk; 133 struct irq_stack *irqstk;
135 u32 *isp, *prev_esp; 134 u32 *isp, *prev_esp;
136 135
137 curstk = current_stack();
138 irqstk = __this_cpu_read(softirq_stack); 136 irqstk = __this_cpu_read(softirq_stack);
139 137
140 /* build the stack frame on the softirq stack */ 138 /* build the stack frame on the softirq stack */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 38cf7a741250..7847e5c0e0b5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
961 * normal page fault. 961 * normal page fault.
962 */ 962 */
963 regs->ip = (unsigned long)cur->addr; 963 regs->ip = (unsigned long)cur->addr;
964 /*
965 * Trap flag (TF) has been set here because this fault
966 * happened where the single stepping will be done.
967 * So clear it by resetting the current kprobe:
968 */
969 regs->flags &= ~X86_EFLAGS_TF;
970
971 /*
972 * If the TF flag was set before the kprobe hit,
973 * don't touch it:
974 */
964 regs->flags |= kcb->kprobe_old_flags; 975 regs->flags |= kcb->kprobe_old_flags;
976
965 if (kcb->kprobe_status == KPROBE_REENTER) 977 if (kcb->kprobe_status == KPROBE_REENTER)
966 restore_previous_kprobe(kcb); 978 restore_previous_kprobe(kcb);
967 else 979 else
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index d1590486204a..00f03d82e69a 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
96 local_irq_disable(); 96 local_irq_disable();
97} 97}
98 98
99/*
100 * In IST context, we explicitly disable preemption. This serves two
101 * purposes: it makes it much less likely that we would accidentally
102 * schedule in IST context and it will force a warning if we somehow
103 * manage to schedule by accident.
104 */
99void ist_enter(struct pt_regs *regs) 105void ist_enter(struct pt_regs *regs)
100{ 106{
101 if (user_mode(regs)) { 107 if (user_mode(regs)) {
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
110 rcu_nmi_enter(); 116 rcu_nmi_enter();
111 } 117 }
112 118
113 /* 119 preempt_disable();
114 * We are atomic because we're on the IST stack; or we're on
115 * x86_32, in which case we still shouldn't schedule; or we're
116 * on x86_64 and entered from user mode, in which case we're
117 * still atomic unless ist_begin_non_atomic is called.
118 */
119 preempt_count_add(HARDIRQ_OFFSET);
120 120
121 /* This code is a bit fragile. Test it. */ 121 /* This code is a bit fragile. Test it. */
122 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); 122 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
124 124
125void ist_exit(struct pt_regs *regs) 125void ist_exit(struct pt_regs *regs)
126{ 126{
127 preempt_count_sub(HARDIRQ_OFFSET); 127 preempt_enable_no_resched();
128 128
129 if (!user_mode(regs)) 129 if (!user_mode(regs))
130 rcu_nmi_exit(); 130 rcu_nmi_exit();
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
155 BUG_ON((unsigned long)(current_top_of_stack() - 155 BUG_ON((unsigned long)(current_top_of_stack() -
156 current_stack_pointer()) >= THREAD_SIZE); 156 current_stack_pointer()) >= THREAD_SIZE);
157 157
158 preempt_count_sub(HARDIRQ_OFFSET); 158 preempt_enable_no_resched();
159} 159}
160 160
161/** 161/**
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
165 */ 165 */
166void ist_end_non_atomic(void) 166void ist_end_non_atomic(void)
167{ 167{
168 preempt_count_add(HARDIRQ_OFFSET); 168 preempt_disable();
169} 169}
170 170
171static nokprobe_inline int 171static nokprobe_inline int