aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/entry/entry_32.S30
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/kernel/alternative.c9
-rw-r--r--arch/x86/kernel/setup.c4
-rw-r--r--arch/x86/kernel/unwind_frame.c49
-rw-r--r--arch/x86/mm/pat.c9
9 files changed, 81 insertions, 37 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cd18994a9555..4ccfacc7232a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -360,7 +360,7 @@ config SMP
360 Management" code will be disabled if you say Y here. 360 Management" code will be disabled if you say Y here.
361 361
362 See also <file:Documentation/x86/i386/IO-APIC.txt>, 362 See also <file:Documentation/x86/i386/IO-APIC.txt>,
363 <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at 363 <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at
364 <http://www.tldp.org/docs.html#howto>. 364 <http://www.tldp.org/docs.html#howto>.
365 365
366 If you don't know what to do here, say N. 366 If you don't know what to do here, say N.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5851411e60fb..bf240b920473 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -159,7 +159,7 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
159 # If '-Os' is enabled, disable it and print a warning. 159 # If '-Os' is enabled, disable it and print a warning.
160 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 160 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
161 undefine CONFIG_CC_OPTIMIZE_FOR_SIZE 161 undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
162 $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.) 162 $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
163 endif 163 endif
164 164
165 endif 165 endif
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 44163e8c3868..2c860ad4fe06 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
94quiet_cmd_check_data_rel = DATAREL $@ 94quiet_cmd_check_data_rel = DATAREL $@
95define cmd_check_data_rel 95define cmd_check_data_rel
96 for obj in $(filter %.o,$^); do \ 96 for obj in $(filter %.o,$^); do \
97 readelf -S $$obj | grep -qF .rel.local && { \ 97 ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
98 echo "error: $$obj has data relocations!" >&2; \ 98 echo "error: $$obj has data relocations!" >&2; \
99 exit 1; \ 99 exit 1; \
100 } || true; \ 100 } || true; \
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 50bc26949e9e..48ef7bb32c42 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -252,6 +252,23 @@ ENTRY(__switch_to_asm)
252END(__switch_to_asm) 252END(__switch_to_asm)
253 253
254/* 254/*
255 * The unwinder expects the last frame on the stack to always be at the same
256 * offset from the end of the page, which allows it to validate the stack.
257 * Calling schedule_tail() directly would break that convention because its an
258 * asmlinkage function so its argument has to be pushed on the stack. This
259 * wrapper creates a proper "end of stack" frame header before the call.
260 */
261ENTRY(schedule_tail_wrapper)
262 FRAME_BEGIN
263
264 pushl %eax
265 call schedule_tail
266 popl %eax
267
268 FRAME_END
269 ret
270ENDPROC(schedule_tail_wrapper)
271/*
255 * A newly forked process directly context switches into this address. 272 * A newly forked process directly context switches into this address.
256 * 273 *
257 * eax: prev task we switched from 274 * eax: prev task we switched from
@@ -259,24 +276,15 @@ END(__switch_to_asm)
259 * edi: kernel thread arg 276 * edi: kernel thread arg
260 */ 277 */
261ENTRY(ret_from_fork) 278ENTRY(ret_from_fork)
262 FRAME_BEGIN /* help unwinder find end of stack */ 279 call schedule_tail_wrapper
263
264 /*
265 * schedule_tail() is asmlinkage so we have to put its 'prev' argument
266 * on the stack.
267 */
268 pushl %eax
269 call schedule_tail
270 popl %eax
271 280
272 testl %ebx, %ebx 281 testl %ebx, %ebx
273 jnz 1f /* kernel threads are uncommon */ 282 jnz 1f /* kernel threads are uncommon */
274 283
2752: 2842:
276 /* When we fork, we trace the syscall return in the child, too. */ 285 /* When we fork, we trace the syscall return in the child, too. */
277 leal FRAME_OFFSET(%esp), %eax 286 movl %esp, %eax
278 call syscall_return_slowpath 287 call syscall_return_slowpath
279 FRAME_END
280 jmp restore_all 288 jmp restore_all
281 289
282 /* kernel thread */ 290 /* kernel thread */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 607d72c4a485..4a4c0834f965 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,7 +36,6 @@
36#include <asm/smap.h> 36#include <asm/smap.h>
37#include <asm/pgtable_types.h> 37#include <asm/pgtable_types.h>
38#include <asm/export.h> 38#include <asm/export.h>
39#include <asm/frame.h>
40#include <linux/err.h> 39#include <linux/err.h>
41 40
42.code64 41.code64
@@ -406,19 +405,17 @@ END(__switch_to_asm)
406 * r12: kernel thread arg 405 * r12: kernel thread arg
407 */ 406 */
408ENTRY(ret_from_fork) 407ENTRY(ret_from_fork)
409 FRAME_BEGIN /* help unwinder find end of stack */
410 movq %rax, %rdi 408 movq %rax, %rdi
411 call schedule_tail /* rdi: 'prev' task parameter */ 409 call schedule_tail /* rdi: 'prev' task parameter */
412 410
413 testq %rbx, %rbx /* from kernel_thread? */ 411 testq %rbx, %rbx /* from kernel_thread? */
414 jnz 1f /* kernel threads are uncommon */ 412 jnz 1f /* kernel threads are uncommon */
415 413
4162: 4142:
417 leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */ 415 movq %rsp, %rdi
418 call syscall_return_slowpath /* returns with IRQs disabled */ 416 call syscall_return_slowpath /* returns with IRQs disabled */
419 TRACE_IRQS_ON /* user mode is traced as IRQS on */ 417 TRACE_IRQS_ON /* user mode is traced as IRQS on */
420 SWAPGS 418 SWAPGS
421 FRAME_END
422 jmp restore_regs_and_iret 419 jmp restore_regs_and_iret
423 420
4241: 4211:
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c5b8f760473c..32e14d137416 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
409 memcpy(insnbuf, replacement, a->replacementlen); 409 memcpy(insnbuf, replacement, a->replacementlen);
410 insnbuf_sz = a->replacementlen; 410 insnbuf_sz = a->replacementlen;
411 411
412 /* 0xe8 is a relative jump; fix the offset. */ 412 /*
413 if (*insnbuf == 0xe8 && a->replacementlen == 5) { 413 * 0xe8 is a relative jump; fix the offset.
414 *
415 * Instruction length is checked before the opcode to avoid
416 * accessing uninitialized bytes for zero-length replacements.
417 */
418 if (a->replacementlen == 5 && *insnbuf == 0xe8) {
414 *(s32 *)(insnbuf + 1) += replacement - instr; 419 *(s32 *)(insnbuf + 1) += replacement - instr;
415 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", 420 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
416 *(s32 *)(insnbuf + 1), 421 *(s32 *)(insnbuf + 1),
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0b4d3c686b1e..f81823695014 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p)
980 */ 980 */
981 x86_configure_nx(); 981 x86_configure_nx();
982 982
983 simple_udelay_calibration();
984
985 parse_early_param(); 983 parse_early_param();
986 984
987#ifdef CONFIG_MEMORY_HOTPLUG 985#ifdef CONFIG_MEMORY_HOTPLUG
@@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p)
1041 */ 1039 */
1042 init_hypervisor_platform(); 1040 init_hypervisor_platform();
1043 1041
1042 simple_udelay_calibration();
1043
1044 x86_init.resources.probe_roms(); 1044 x86_init.resources.probe_roms();
1045 1045
1046 /* after parse_early_param, so could debug it */ 1046 /* after parse_early_param, so could debug it */
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 82c6d7f1fd73..b9389d72b2f7 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state)
104 return (unsigned long *)task_pt_regs(state->task) - 2; 104 return (unsigned long *)task_pt_regs(state->task) - 2;
105} 105}
106 106
107static bool is_last_frame(struct unwind_state *state)
108{
109 return state->bp == last_frame(state);
110}
111
107#ifdef CONFIG_X86_32 112#ifdef CONFIG_X86_32
108#define GCC_REALIGN_WORDS 3 113#define GCC_REALIGN_WORDS 3
109#else 114#else
@@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state)
115 return last_frame(state) - GCC_REALIGN_WORDS; 120 return last_frame(state) - GCC_REALIGN_WORDS;
116} 121}
117 122
118static bool is_last_task_frame(struct unwind_state *state) 123static bool is_last_aligned_frame(struct unwind_state *state)
119{ 124{
120 unsigned long *last_bp = last_frame(state); 125 unsigned long *last_bp = last_frame(state);
121 unsigned long *aligned_bp = last_aligned_frame(state); 126 unsigned long *aligned_bp = last_aligned_frame(state);
122 127
123 /* 128 /*
124 * We have to check for the last task frame at two different locations 129 * GCC can occasionally decide to realign the stack pointer and change
125 * because gcc can occasionally decide to realign the stack pointer and 130 * the offset of the stack frame in the prologue of a function called
126 * change the offset of the stack frame in the prologue of a function 131 * by head/entry code. Examples:
127 * called by head/entry code. Examples:
128 * 132 *
129 * <start_secondary>: 133 * <start_secondary>:
130 * push %edi 134 * push %edi
@@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state)
141 * push %rbp 145 * push %rbp
142 * mov %rsp,%rbp 146 * mov %rsp,%rbp
143 * 147 *
144 * Note that after aligning the stack, it pushes a duplicate copy of 148 * After aligning the stack, it pushes a duplicate copy of the return
145 * the return address before pushing the frame pointer. 149 * address before pushing the frame pointer.
150 */
151 return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1));
152}
153
154static bool is_last_ftrace_frame(struct unwind_state *state)
155{
156 unsigned long *last_bp = last_frame(state);
157 unsigned long *last_ftrace_bp = last_bp - 3;
158
159 /*
160 * When unwinding from an ftrace handler of a function called by entry
161 * code, the stack layout of the last frame is:
162 *
163 * bp
164 * parent ret addr
165 * bp
166 * function ret addr
167 * parent ret addr
168 * pt_regs
169 * -----------------
146 */ 170 */
147 return (state->bp == last_bp || 171 return (state->bp == last_ftrace_bp &&
148 (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); 172 *state->bp == *(state->bp + 2) &&
173 *(state->bp + 1) == *(state->bp + 4));
174}
175
176static bool is_last_task_frame(struct unwind_state *state)
177{
178 return is_last_frame(state) || is_last_aligned_frame(state) ||
179 is_last_ftrace_frame(state);
149} 180}
150 181
151/* 182/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 9b78685b66e6..83a59a67757a 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -65,9 +65,11 @@ static int __init nopat(char *str)
65} 65}
66early_param("nopat", nopat); 66early_param("nopat", nopat);
67 67
68static bool __read_mostly __pat_initialized = false;
69
68bool pat_enabled(void) 70bool pat_enabled(void)
69{ 71{
70 return !!__pat_enabled; 72 return __pat_initialized;
71} 73}
72EXPORT_SYMBOL_GPL(pat_enabled); 74EXPORT_SYMBOL_GPL(pat_enabled);
73 75
@@ -225,13 +227,14 @@ static void pat_bsp_init(u64 pat)
225 } 227 }
226 228
227 wrmsrl(MSR_IA32_CR_PAT, pat); 229 wrmsrl(MSR_IA32_CR_PAT, pat);
230 __pat_initialized = true;
228 231
229 __init_cache_modes(pat); 232 __init_cache_modes(pat);
230} 233}
231 234
232static void pat_ap_init(u64 pat) 235static void pat_ap_init(u64 pat)
233{ 236{
234 if (!boot_cpu_has(X86_FEATURE_PAT)) { 237 if (!this_cpu_has(X86_FEATURE_PAT)) {
235 /* 238 /*
236 * If this happens we are on a secondary CPU, but switched to 239 * If this happens we are on a secondary CPU, but switched to
237 * PAT on the boot CPU. We have no way to undo PAT. 240 * PAT on the boot CPU. We have no way to undo PAT.
@@ -306,7 +309,7 @@ void pat_init(void)
306 u64 pat; 309 u64 pat;
307 struct cpuinfo_x86 *c = &boot_cpu_data; 310 struct cpuinfo_x86 *c = &boot_cpu_data;
308 311
309 if (!pat_enabled()) { 312 if (!__pat_enabled) {
310 init_cache_modes(); 313 init_cache_modes();
311 return; 314 return;
312 } 315 }