aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-04 11:58:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-04 11:58:50 -0400
commitb1be9ead135939136b87d73004891a6bac35bb43 (patch)
treec6255a4fb19a528bd2e825c66fa5cd54feb038b5
parent22a093b2fb52fb656658a32adc80c24ddc200ca4 (diff)
parentb96fecbfa8c88b057e2bbf10021521c232bb3650 (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Two FPU rewrite related fixes. This addresses all known x86 regressions at this stage. Also some other misc fixes" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/fpu: Fix boot crash in the early FPU code x86/asm/entry/64: Update path names x86/fpu: Fix FPU related boot regression when CPUID masking BIOS feature is enabled x86/boot/setup: Clean up the e820_reserve_setup_data() code x86/kaslr: Fix typo in the KASLR_FLAG documentation
-rw-r--r--Documentation/x86/boot.txt2
-rw-r--r--Documentation/x86/entry_64.txt8
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/fpu/init.c7
-rw-r--r--arch/x86/kernel/setup.c7
5 files changed, 13 insertions, 13 deletions
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 7c1f9fad6674..9da6f3512249 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -406,7 +406,7 @@ Protocol: 2.00+
406 - If 0, the protected-mode code is loaded at 0x10000. 406 - If 0, the protected-mode code is loaded at 0x10000.
407 - If 1, the protected-mode code is loaded at 0x100000. 407 - If 1, the protected-mode code is loaded at 0x100000.
408 408
409 Bit 1 (kernel internal): ALSR_FLAG 409 Bit 1 (kernel internal): KASLR_FLAG
410 - Used internally by the compressed kernel to communicate 410 - Used internally by the compressed kernel to communicate
411 KASLR status to kernel proper. 411 KASLR status to kernel proper.
412 If 1, KASLR enabled. 412 If 1, KASLR enabled.
diff --git a/Documentation/x86/entry_64.txt b/Documentation/x86/entry_64.txt
index 33884d156125..c1df8eba9dfd 100644
--- a/Documentation/x86/entry_64.txt
+++ b/Documentation/x86/entry_64.txt
@@ -1,14 +1,14 @@
1This file documents some of the kernel entries in 1This file documents some of the kernel entries in
2arch/x86/kernel/entry_64.S. A lot of this explanation is adapted from 2arch/x86/entry/entry_64.S. A lot of this explanation is adapted from
3an email from Ingo Molnar: 3an email from Ingo Molnar:
4 4
5http://lkml.kernel.org/r/<20110529191055.GC9835%40elte.hu> 5http://lkml.kernel.org/r/<20110529191055.GC9835%40elte.hu>
6 6
7The x86 architecture has quite a few different ways to jump into 7The x86 architecture has quite a few different ways to jump into
8kernel code. Most of these entry points are registered in 8kernel code. Most of these entry points are registered in
9arch/x86/kernel/traps.c and implemented in arch/x86/kernel/entry_64.S 9arch/x86/kernel/traps.c and implemented in arch/x86/entry/entry_64.S
10for 64-bit, arch/x86/kernel/entry_32.S for 32-bit and finally 10for 64-bit, arch/x86/entry/entry_32.S for 32-bit and finally
11arch/x86/ia32/ia32entry.S which implements the 32-bit compatibility 11arch/x86/entry/entry_64_compat.S which implements the 32-bit compatibility
12syscall entry points and thus provides for 32-bit processes the 12syscall entry points and thus provides for 32-bit processes the
13ability to execute syscalls when running on 64-bit kernels. 13ability to execute syscalls when running on 64-bit kernels.
14 14
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9fc5e3d9d9c8..922c5e0cea4c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -742,7 +742,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
742 cpu_detect(c); 742 cpu_detect(c);
743 get_cpu_vendor(c); 743 get_cpu_vendor(c);
744 get_cpu_cap(c); 744 get_cpu_cap(c);
745 fpu__init_system(c);
746 745
747 if (this_cpu->c_early_init) 746 if (this_cpu->c_early_init)
748 this_cpu->c_early_init(c); 747 this_cpu->c_early_init(c);
@@ -754,6 +753,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
754 this_cpu->c_bsp_init(c); 753 this_cpu->c_bsp_init(c);
755 754
756 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 755 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
756 fpu__init_system(c);
757} 757}
758 758
759void __init early_cpu_init(void) 759void __init early_cpu_init(void)
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index fc878fee6a51..32826791e675 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -95,11 +95,12 @@ static void __init fpu__init_system_mxcsr(void)
95 unsigned int mask = 0; 95 unsigned int mask = 0;
96 96
97 if (cpu_has_fxsr) { 97 if (cpu_has_fxsr) {
98 struct fxregs_state fx_tmp __aligned(32) = { }; 98 /* Static because GCC does not get 16-byte stack alignment right: */
99 static struct fxregs_state fxregs __initdata;
99 100
100 asm volatile("fxsave %0" : "+m" (fx_tmp)); 101 asm volatile("fxsave %0" : "+m" (fxregs));
101 102
102 mask = fx_tmp.mxcsr_mask; 103 mask = fxregs.mxcsr_mask;
103 104
104 /* 105 /*
105 * If zero then use the default features mask, 106 * If zero then use the default features mask,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d3b95b89e9b2..80f874bf999e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -461,19 +461,18 @@ static void __init e820_reserve_setup_data(void)
461{ 461{
462 struct setup_data *data; 462 struct setup_data *data;
463 u64 pa_data; 463 u64 pa_data;
464 int found = 0;
465 464
466 pa_data = boot_params.hdr.setup_data; 465 pa_data = boot_params.hdr.setup_data;
466 if (!pa_data)
467 return;
468
467 while (pa_data) { 469 while (pa_data) {
468 data = early_memremap(pa_data, sizeof(*data)); 470 data = early_memremap(pa_data, sizeof(*data));
469 e820_update_range(pa_data, sizeof(*data)+data->len, 471 e820_update_range(pa_data, sizeof(*data)+data->len,
470 E820_RAM, E820_RESERVED_KERN); 472 E820_RAM, E820_RESERVED_KERN);
471 found = 1;
472 pa_data = data->next; 473 pa_data = data->next;
473 early_memunmap(data, sizeof(*data)); 474 early_memunmap(data, sizeof(*data));
474 } 475 }
475 if (!found)
476 return;
477 476
478 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 477 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
479 memcpy(&e820_saved, &e820, sizeof(struct e820map)); 478 memcpy(&e820_saved, &e820, sizeof(struct e820map));