aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-23 19:54:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-23 19:54:46 -0500
commit6ac3bb167fed0b3d02b4fd3daa0d819841d5f6f4 (patch)
tree49f73d917f1cd7d848e9a48c9efffea8ab2f7142
parenteb3e8d9de28a5385f75e5c42eba5fb5b0c7625be (diff)
parentc280f7736ab26a601932b1ce017a3840dbedcfdc (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "There's a number of fixes: - a round of fixes for CPUID-less legacy CPUs - a number of microcode loader fixes - i8042 detection robustization fixes - stack dump/unwinder fixes - x86 SoC platform driver fixes - a GCC 7 warning fix - virtualization related fixes" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) Revert "x86/unwind: Detect bad stack return address" x86/paravirt: Mark unused patch_default label x86/microcode/AMD: Reload proper initrd start address x86/platform/intel/quark: Add printf attribute to imr_self_test_result() x86/platform/intel-mid: Switch MPU3050 driver to IIO x86/alternatives: Do not use sync_core() to serialize I$ x86/topology: Document cpu_llc_id x86/hyperv: Handle unknown NMIs on one CPU when unknown_nmi_panic x86/asm: Rewrite sync_core() to use IRET-to-self x86/microcode/intel: Replace sync_core() with native_cpuid() Revert "x86/boot: Fail the boot if !M486 and CPUID is missing" x86/asm/32: Make sync_core() handle missing CPUID on all 32-bit kernels x86/cpu: Probe CPUID leaf 6 even when cpuid_level == 6 x86/tools: Fix gcc-7 warning in relocs.c x86/unwind: Dump stack data on warnings x86/unwind: Adjust last frame check for aligned function stacks x86/init: Fix a couple of comment typos x86/init: Remove i8042_detect() from platform ops Input: i8042 - Trust firmware a bit more when probing on X86 x86/init: Add i8042 state to the platform data ...
-rw-r--r--Documentation/x86/topology.txt9
-rw-r--r--arch/x86/boot/cpu.c6
-rw-r--r--arch/x86/include/asm/processor.h80
-rw-r--r--arch/x86/include/asm/unwind.h2
-rw-r--r--arch/x86/include/asm/x86_init.h26
-rw-r--r--arch/x86/kernel/acpi/boot.c7
-rw-r--r--arch/x86/kernel/alternative.c15
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c56
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c40
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c26
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c24
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c2
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c2
-rw-r--r--arch/x86/kernel/platform-quirks.c5
-rw-r--r--arch/x86/kernel/unwind_frame.c56
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/platform/ce4100/ce4100.c6
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c7
-rw-r--r--arch/x86/platform/intel-quark/imr_selftest.c3
-rw-r--r--arch/x86/tools/relocs.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h10
23 files changed, 279 insertions, 117 deletions
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.txt
index 06afac252f5b..f3e9d7e9ed6c 100644
--- a/Documentation/x86/topology.txt
+++ b/Documentation/x86/topology.txt
@@ -63,6 +63,15 @@ The topology of a system is described in the units of:
63 The maximum possible number of packages in the system. Helpful for per 63 The maximum possible number of packages in the system. Helpful for per
64 package facilities to preallocate per package information. 64 package facilities to preallocate per package information.
65 65
66 - cpu_llc_id:
67
68 A per-CPU variable containing:
69 - On Intel, the first APIC ID of the list of CPUs sharing the Last Level
70 Cache
71
72 - On AMD, the Node ID or Core Complex ID containing the Last Level
73 Cache. In general, it is a number identifying an LLC uniquely on the
74 system.
66 75
67* Cores: 76* Cores:
68 77
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 4224ede43b4e..26240dde081e 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -87,12 +87,6 @@ int validate_cpu(void)
87 return -1; 87 return -1;
88 } 88 }
89 89
90 if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
91 !has_eflag(X86_EFLAGS_ID)) {
92 printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
93 return -1;
94 }
95
96 if (err_flags) { 90 if (err_flags) {
97 puts("This kernel requires the following features " 91 puts("This kernel requires the following features "
98 "not present on the CPU:\n"); 92 "not present on the CPU:\n");
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6aa741fbe1df..eaf100508c36 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -602,33 +602,69 @@ static __always_inline void cpu_relax(void)
602 rep_nop(); 602 rep_nop();
603} 603}
604 604
605/* Stop speculative execution and prefetching of modified code. */ 605/*
606 * This function forces the icache and prefetched instruction stream to
607 * catch up with reality in two very specific cases:
608 *
609 * a) Text was modified using one virtual address and is about to be executed
610 * from the same physical page at a different virtual address.
611 *
612 * b) Text was modified on a different CPU, may subsequently be
613 * executed on this CPU, and you want to make sure the new version
614 * gets executed. This generally means you're calling this in a IPI.
615 *
616 * If you're calling this for a different reason, you're probably doing
617 * it wrong.
618 */
606static inline void sync_core(void) 619static inline void sync_core(void)
607{ 620{
608 int tmp;
609
610#ifdef CONFIG_M486
611 /* 621 /*
612 * Do a CPUID if available, otherwise do a jump. The jump 622 * There are quite a few ways to do this. IRET-to-self is nice
613 * can conveniently enough be the jump around CPUID. 623 * because it works on every CPU, at any CPL (so it's compatible
624 * with paravirtualization), and it never exits to a hypervisor.
625 * The only down sides are that it's a bit slow (it seems to be
626 * a bit more than 2x slower than the fastest options) and that
627 * it unmasks NMIs. The "push %cs" is needed because, in
628 * paravirtual environments, __KERNEL_CS may not be a valid CS
629 * value when we do IRET directly.
630 *
631 * In case NMI unmasking or performance ever becomes a problem,
632 * the next best option appears to be MOV-to-CR2 and an
633 * unconditional jump. That sequence also works on all CPUs,
634 * but it will fault at CPL3 (i.e. Xen PV and lguest).
635 *
636 * CPUID is the conventional way, but it's nasty: it doesn't
637 * exist on some 486-like CPUs, and it usually exits to a
638 * hypervisor.
639 *
640 * Like all of Linux's memory ordering operations, this is a
641 * compiler barrier as well.
614 */ 642 */
615 asm volatile("cmpl %2,%1\n\t" 643 register void *__sp asm(_ASM_SP);
616 "jl 1f\n\t" 644
617 "cpuid\n" 645#ifdef CONFIG_X86_32
618 "1:" 646 asm volatile (
619 : "=a" (tmp) 647 "pushfl\n\t"
620 : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) 648 "pushl %%cs\n\t"
621 : "ebx", "ecx", "edx", "memory"); 649 "pushl $1f\n\t"
650 "iret\n\t"
651 "1:"
652 : "+r" (__sp) : : "memory");
622#else 653#else
623 /* 654 unsigned int tmp;
624 * CPUID is a barrier to speculative execution. 655
625 * Prefetched instructions are automatically 656 asm volatile (
626 * invalidated when modified. 657 "mov %%ss, %0\n\t"
627 */ 658 "pushq %q0\n\t"
628 asm volatile("cpuid" 659 "pushq %%rsp\n\t"
629 : "=a" (tmp) 660 "addq $8, (%%rsp)\n\t"
630 : "0" (1) 661 "pushfq\n\t"
631 : "ebx", "ecx", "edx", "memory"); 662 "mov %%cs, %0\n\t"
663 "pushq %q0\n\t"
664 "pushq $1f\n\t"
665 "iretq\n\t"
666 "1:"
667 : "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
632#endif 668#endif
633} 669}
634 670
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index c5a7f3a930dd..6fa75b17aec3 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -12,7 +12,7 @@ struct unwind_state {
12 struct task_struct *task; 12 struct task_struct *task;
13 int graph_idx; 13 int graph_idx;
14#ifdef CONFIG_FRAME_POINTER 14#ifdef CONFIG_FRAME_POINTER
15 unsigned long *bp; 15 unsigned long *bp, *orig_sp;
16 struct pt_regs *regs; 16 struct pt_regs *regs;
17#else 17#else
18 unsigned long *sp; 18 unsigned long *sp;
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 6ba793178441..7ba7e90a9ad6 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -59,7 +59,7 @@ struct x86_init_irqs {
59 59
60/** 60/**
61 * struct x86_init_oem - oem platform specific customizing functions 61 * struct x86_init_oem - oem platform specific customizing functions
62 * @arch_setup: platform specific architecure setup 62 * @arch_setup: platform specific architecture setup
63 * @banner: print a platform specific banner 63 * @banner: print a platform specific banner
64 */ 64 */
65struct x86_init_oem { 65struct x86_init_oem {
@@ -165,8 +165,25 @@ struct x86_legacy_devices {
165}; 165};
166 166
167/** 167/**
168 * enum x86_legacy_i8042_state - i8042 keyboard controller state
169 * @X86_LEGACY_I8042_PLATFORM_ABSENT: the controller is always absent on
170 * given platform/subarch.
171 * @X86_LEGACY_I8042_FIRMWARE_ABSENT: firmware reports that the controller
172 * is absent.
173 * @X86_LEGACY_i8042_EXPECTED_PRESENT: the controller is likely to be
174 * present, the i8042 driver should probe for controller existence.
175 */
176enum x86_legacy_i8042_state {
177 X86_LEGACY_I8042_PLATFORM_ABSENT,
178 X86_LEGACY_I8042_FIRMWARE_ABSENT,
179 X86_LEGACY_I8042_EXPECTED_PRESENT,
180};
181
182/**
168 * struct x86_legacy_features - legacy x86 features 183 * struct x86_legacy_features - legacy x86 features
169 * 184 *
185 * @i8042: indicated if we expect the device to have i8042 controller
186 * present.
170 * @rtc: this device has a CMOS real-time clock present 187 * @rtc: this device has a CMOS real-time clock present
171 * @reserve_bios_regions: boot code will search for the EBDA address and the 188 * @reserve_bios_regions: boot code will search for the EBDA address and the
172 * start of the 640k - 1M BIOS region. If false, the platform must 189 * start of the 640k - 1M BIOS region. If false, the platform must
@@ -175,6 +192,7 @@ struct x86_legacy_devices {
175 * documentation for further details. 192 * documentation for further details.
176 */ 193 */
177struct x86_legacy_features { 194struct x86_legacy_features {
195 enum x86_legacy_i8042_state i8042;
178 int rtc; 196 int rtc;
179 int reserve_bios_regions; 197 int reserve_bios_regions;
180 struct x86_legacy_devices devices; 198 struct x86_legacy_devices devices;
@@ -188,15 +206,14 @@ struct x86_legacy_features {
188 * @set_wallclock: set time back to HW clock 206 * @set_wallclock: set time back to HW clock
189 * @is_untracked_pat_range exclude from PAT logic 207 * @is_untracked_pat_range exclude from PAT logic
190 * @nmi_init enable NMI on cpus 208 * @nmi_init enable NMI on cpus
191 * @i8042_detect pre-detect if i8042 controller exists
192 * @save_sched_clock_state: save state for sched_clock() on suspend 209 * @save_sched_clock_state: save state for sched_clock() on suspend
193 * @restore_sched_clock_state: restore state for sched_clock() on resume 210 * @restore_sched_clock_state: restore state for sched_clock() on resume
194 * @apic_post_init: adjust apic if neeeded 211 * @apic_post_init: adjust apic if needed
195 * @legacy: legacy features 212 * @legacy: legacy features
196 * @set_legacy_features: override legacy features. Use of this callback 213 * @set_legacy_features: override legacy features. Use of this callback
197 * is highly discouraged. You should only need 214 * is highly discouraged. You should only need
198 * this if your hardware platform requires further 215 * this if your hardware platform requires further
199 * custom fine tuning far beyong what may be 216 * custom fine tuning far beyond what may be
200 * possible in x86_early_init_platform_quirks() by 217 * possible in x86_early_init_platform_quirks() by
201 * only using the current x86_hardware_subarch 218 * only using the current x86_hardware_subarch
202 * semantics. 219 * semantics.
@@ -210,7 +227,6 @@ struct x86_platform_ops {
210 bool (*is_untracked_pat_range)(u64 start, u64 end); 227 bool (*is_untracked_pat_range)(u64 start, u64 end);
211 void (*nmi_init)(void); 228 void (*nmi_init)(void);
212 unsigned char (*get_nmi_reason)(void); 229 unsigned char (*get_nmi_reason)(void);
213 int (*i8042_detect)(void);
214 void (*save_sched_clock_state)(void); 230 void (*save_sched_clock_state)(void);
215 void (*restore_sched_clock_state)(void); 231 void (*restore_sched_clock_state)(void);
216 void (*apic_post_init)(void); 232 void (*apic_post_init)(void);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6f65b0eed384..64422f850e95 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -930,6 +930,13 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
930 x86_platform.legacy.devices.pnpbios = 0; 930 x86_platform.legacy.devices.pnpbios = 0;
931 } 931 }
932 932
933 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
934 !(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
935 x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
936 pr_debug("ACPI: i8042 controller is absent\n");
937 x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
938 }
939
933 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) { 940 if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
934 pr_debug("ACPI: not registering RTC platform device\n"); 941 pr_debug("ACPI: not registering RTC platform device\n");
935 x86_platform.legacy.rtc = 0; 942 x86_platform.legacy.rtc = 0;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 5cb272a7a5a3..c5b8f760473c 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -337,7 +337,11 @@ done:
337 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len); 337 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
338} 338}
339 339
340static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr) 340/*
341 * "noinline" to cause control flow change and thus invalidate I$ and
342 * cause refetch after modification.
343 */
344static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
341{ 345{
342 unsigned long flags; 346 unsigned long flags;
343 347
@@ -346,7 +350,6 @@ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
346 350
347 local_irq_save(flags); 351 local_irq_save(flags);
348 add_nops(instr + (a->instrlen - a->padlen), a->padlen); 352 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
349 sync_core();
350 local_irq_restore(flags); 353 local_irq_restore(flags);
351 354
352 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ", 355 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
@@ -359,9 +362,12 @@ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
359 * This implies that asymmetric systems where APs have less capabilities than 362 * This implies that asymmetric systems where APs have less capabilities than
360 * the boot processor are not handled. Tough. Make sure you disable such 363 * the boot processor are not handled. Tough. Make sure you disable such
361 * features by hand. 364 * features by hand.
365 *
366 * Marked "noinline" to cause control flow change and thus insn cache
367 * to refetch changed I$ lines.
362 */ 368 */
363void __init_or_module apply_alternatives(struct alt_instr *start, 369void __init_or_module noinline apply_alternatives(struct alt_instr *start,
364 struct alt_instr *end) 370 struct alt_instr *end)
365{ 371{
366 struct alt_instr *a; 372 struct alt_instr *a;
367 u8 *instr, *replacement; 373 u8 *instr, *replacement;
@@ -667,7 +673,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
667 unsigned long flags; 673 unsigned long flags;
668 local_irq_save(flags); 674 local_irq_save(flags);
669 memcpy(addr, opcode, len); 675 memcpy(addr, opcode, len);
670 sync_core();
671 local_irq_restore(flags); 676 local_irq_restore(flags);
672 /* Could also do a CLFLUSH here to speed up CPU recovery; but 677 /* Could also do a CLFLUSH here to speed up CPU recovery; but
673 that causes hangs on some VIA CPUs. */ 678 that causes hangs on some VIA CPUs. */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 1f6b50a449ab..dc1697ca5191 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -667,13 +667,14 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
667 c->x86_capability[CPUID_1_EDX] = edx; 667 c->x86_capability[CPUID_1_EDX] = edx;
668 } 668 }
669 669
670 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
671 if (c->cpuid_level >= 0x00000006)
672 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
673
670 /* Additional Intel-defined flags: level 0x00000007 */ 674 /* Additional Intel-defined flags: level 0x00000007 */
671 if (c->cpuid_level >= 0x00000007) { 675 if (c->cpuid_level >= 0x00000007) {
672 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 676 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
673
674 c->x86_capability[CPUID_7_0_EBX] = ebx; 677 c->x86_capability[CPUID_7_0_EBX] = ebx;
675
676 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
677 c->x86_capability[CPUID_7_ECX] = ecx; 678 c->x86_capability[CPUID_7_ECX] = ecx;
678 } 679 }
679 680
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 6f353bdb3a25..6a31e2691f3a 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -116,10 +116,11 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
116 116
117/* 117/*
118 * This scans the ucode blob for the proper container as we can have multiple 118 * This scans the ucode blob for the proper container as we can have multiple
119 * containers glued together. 119 * containers glued together. Returns the equivalence ID from the equivalence
120 * table or 0 if none found.
120 */ 121 */
121static struct container 122static u16
122find_proper_container(u8 *ucode, size_t size, u16 *ret_id) 123find_proper_container(u8 *ucode, size_t size, struct container *ret_cont)
123{ 124{
124 struct container ret = { NULL, 0 }; 125 struct container ret = { NULL, 0 };
125 u32 eax, ebx, ecx, edx; 126 u32 eax, ebx, ecx, edx;
@@ -138,7 +139,7 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
138 if (header[0] != UCODE_MAGIC || 139 if (header[0] != UCODE_MAGIC ||
139 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ 140 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
140 header[2] == 0) /* size */ 141 header[2] == 0) /* size */
141 return ret; 142 return eq_id;
142 143
143 eax = 0x00000001; 144 eax = 0x00000001;
144 ecx = 0; 145 ecx = 0;
@@ -163,8 +164,9 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
163 * ucode update loop below 164 * ucode update loop below
164 */ 165 */
165 left = ret.size - offset; 166 left = ret.size - offset;
166 *ret_id = eq_id; 167
167 return ret; 168 *ret_cont = ret;
169 return eq_id;
168 } 170 }
169 171
170 /* 172 /*
@@ -189,7 +191,7 @@ find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
189 ucode = data; 191 ucode = data;
190 } 192 }
191 193
192 return ret; 194 return eq_id;
193} 195}
194 196
195static int __apply_microcode_amd(struct microcode_amd *mc_amd) 197static int __apply_microcode_amd(struct microcode_amd *mc_amd)
@@ -214,17 +216,18 @@ static int __apply_microcode_amd(struct microcode_amd *mc_amd)
214 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call 216 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
215 * load_microcode_amd() to save equivalent cpu table and microcode patches in 217 * load_microcode_amd() to save equivalent cpu table and microcode patches in
216 * kernel heap memory. 218 * kernel heap memory.
219 *
220 * Returns true if container found (sets @ret_cont), false otherwise.
217 */ 221 */
218static struct container 222static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
219apply_microcode_early_amd(void *ucode, size_t size, bool save_patch) 223 struct container *ret_cont)
220{ 224{
221 struct container ret = { NULL, 0 };
222 u8 (*patch)[PATCH_MAX_SIZE]; 225 u8 (*patch)[PATCH_MAX_SIZE];
226 u32 rev, *header, *new_rev;
227 struct container ret;
223 int offset, left; 228 int offset, left;
224 u32 rev, *header;
225 u8 *data;
226 u16 eq_id = 0; 229 u16 eq_id = 0;
227 u32 *new_rev; 230 u8 *data;
228 231
229#ifdef CONFIG_X86_32 232#ifdef CONFIG_X86_32
230 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); 233 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
@@ -235,11 +238,11 @@ apply_microcode_early_amd(void *ucode, size_t size, bool save_patch)
235#endif 238#endif
236 239
237 if (check_current_patch_level(&rev, true)) 240 if (check_current_patch_level(&rev, true))
238 return (struct container){ NULL, 0 }; 241 return false;
239 242
240 ret = find_proper_container(ucode, size, &eq_id); 243 eq_id = find_proper_container(ucode, size, &ret);
241 if (!eq_id) 244 if (!eq_id)
242 return (struct container){ NULL, 0 }; 245 return false;
243 246
244 this_equiv_id = eq_id; 247 this_equiv_id = eq_id;
245 header = (u32 *)ret.data; 248 header = (u32 *)ret.data;
@@ -273,7 +276,11 @@ apply_microcode_early_amd(void *ucode, size_t size, bool save_patch)
273 data += offset; 276 data += offset;
274 left -= offset; 277 left -= offset;
275 } 278 }
276 return ret; 279
280 if (ret_cont)
281 *ret_cont = ret;
282
283 return true;
277} 284}
278 285
279static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) 286static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
@@ -294,6 +301,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
294void __init load_ucode_amd_bsp(unsigned int family) 301void __init load_ucode_amd_bsp(unsigned int family)
295{ 302{
296 struct ucode_cpu_info *uci; 303 struct ucode_cpu_info *uci;
304 u32 eax, ebx, ecx, edx;
297 struct cpio_data cp; 305 struct cpio_data cp;
298 const char *path; 306 const char *path;
299 bool use_pa; 307 bool use_pa;
@@ -315,9 +323,12 @@ void __init load_ucode_amd_bsp(unsigned int family)
315 return; 323 return;
316 324
317 /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */ 325 /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
318 uci->cpu_sig.sig = cpuid_eax(1); 326 eax = 1;
327 ecx = 0;
328 native_cpuid(&eax, &ebx, &ecx, &edx);
329 uci->cpu_sig.sig = eax;
319 330
320 apply_microcode_early_amd(cp.data, cp.size, true); 331 apply_microcode_early_amd(cp.data, cp.size, true, NULL);
321} 332}
322 333
323#ifdef CONFIG_X86_32 334#ifdef CONFIG_X86_32
@@ -349,7 +360,7 @@ void load_ucode_amd_ap(unsigned int family)
349 * This would set amd_ucode_patch above so that the following APs can 360 * This would set amd_ucode_patch above so that the following APs can
350 * use it directly instead of going down this path again. 361 * use it directly instead of going down this path again.
351 */ 362 */
352 apply_microcode_early_amd(cp.data, cp.size, true); 363 apply_microcode_early_amd(cp.data, cp.size, true, NULL);
353} 364}
354#else 365#else
355void load_ucode_amd_ap(unsigned int family) 366void load_ucode_amd_ap(unsigned int family)
@@ -387,8 +398,7 @@ reget:
387 } 398 }
388 } 399 }
389 400
390 cont = apply_microcode_early_amd(cp.data, cp.size, false); 401 if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) {
391 if (!(cont.data && cont.size)) {
392 cont.size = -1; 402 cont.size = -1;
393 return; 403 return;
394 } 404 }
@@ -443,7 +453,7 @@ int __init save_microcode_in_initrd_amd(unsigned int fam)
443 return -EINVAL; 453 return -EINVAL;
444 } 454 }
445 455
446 cont = find_proper_container(cp.data, cp.size, &eq_id); 456 eq_id = find_proper_container(cp.data, cp.size, &cont);
447 if (!eq_id) { 457 if (!eq_id) {
448 cont.size = -1; 458 cont.size = -1;
449 return -EINVAL; 459 return -EINVAL;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 6996413c78c3..2af69d27da62 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -44,7 +44,7 @@
44#define DRIVER_VERSION "2.2" 44#define DRIVER_VERSION "2.2"
45 45
46static struct microcode_ops *microcode_ops; 46static struct microcode_ops *microcode_ops;
47static bool dis_ucode_ldr; 47static bool dis_ucode_ldr = true;
48 48
49LIST_HEAD(microcode_cache); 49LIST_HEAD(microcode_cache);
50 50
@@ -76,6 +76,7 @@ struct cpu_info_ctx {
76static bool __init check_loader_disabled_bsp(void) 76static bool __init check_loader_disabled_bsp(void)
77{ 77{
78 static const char *__dis_opt_str = "dis_ucode_ldr"; 78 static const char *__dis_opt_str = "dis_ucode_ldr";
79 u32 a, b, c, d;
79 80
80#ifdef CONFIG_X86_32 81#ifdef CONFIG_X86_32
81 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); 82 const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
@@ -88,8 +89,23 @@ static bool __init check_loader_disabled_bsp(void)
88 bool *res = &dis_ucode_ldr; 89 bool *res = &dis_ucode_ldr;
89#endif 90#endif
90 91
91 if (cmdline_find_option_bool(cmdline, option)) 92 if (!have_cpuid_p())
92 *res = true; 93 return *res;
94
95 a = 1;
96 c = 0;
97 native_cpuid(&a, &b, &c, &d);
98
99 /*
100 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
101 * completely accurate as xen pv guests don't see that CPUID bit set but
102 * that's good enough as they don't land on the BSP path anyway.
103 */
104 if (c & BIT(31))
105 return *res;
106
107 if (cmdline_find_option_bool(cmdline, option) <= 0)
108 *res = false;
93 109
94 return *res; 110 return *res;
95} 111}
@@ -121,9 +137,6 @@ void __init load_ucode_bsp(void)
121 if (check_loader_disabled_bsp()) 137 if (check_loader_disabled_bsp())
122 return; 138 return;
123 139
124 if (!have_cpuid_p())
125 return;
126
127 vendor = x86_cpuid_vendor(); 140 vendor = x86_cpuid_vendor();
128 family = x86_cpuid_family(); 141 family = x86_cpuid_family();
129 142
@@ -157,9 +170,6 @@ void load_ucode_ap(void)
157 if (check_loader_disabled_ap()) 170 if (check_loader_disabled_ap())
158 return; 171 return;
159 172
160 if (!have_cpuid_p())
161 return;
162
163 vendor = x86_cpuid_vendor(); 173 vendor = x86_cpuid_vendor();
164 family = x86_cpuid_family(); 174 family = x86_cpuid_family();
165 175
@@ -233,14 +243,12 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
233# endif 243# endif
234 244
235 /* 245 /*
236 * Did we relocate the ramdisk? 246 * Fixup the start address: after reserve_initrd() runs, initrd_start
237 * 247 * has the virtual address of the beginning of the initrd. It also
238 * So we possibly relocate the ramdisk *after* applying microcode on the 248 * possibly relocates the ramdisk. In either case, initrd_start contains
239 * BSP so we rely on use_pa (use physical addresses) - even if it is not 249 * the updated address so use that instead.
240 * absolutely correct - to determine whether we've done the ramdisk
241 * relocation already.
242 */ 250 */
243 if (!use_pa && relocated_ramdisk) 251 if (!use_pa && initrd_start)
244 start = initrd_start; 252 start = initrd_start;
245 253
246 return find_cpio_data(path, (void *)start, size, NULL); 254 return find_cpio_data(path, (void *)start, size, NULL);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 54d50c3694d8..b624b54912e1 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -368,6 +368,26 @@ next:
368 return patch; 368 return patch;
369} 369}
370 370
371static void cpuid_1(void)
372{
373 /*
374 * According to the Intel SDM, Volume 3, 9.11.7:
375 *
376 * CPUID returns a value in a model specific register in
377 * addition to its usual register return values. The
378 * semantics of CPUID cause it to deposit an update ID value
379 * in the 64-bit model-specific register at address 08BH
380 * (IA32_BIOS_SIGN_ID). If no update is present in the
381 * processor, the value in the MSR remains unmodified.
382 *
383 * Use native_cpuid -- this code runs very early and we don't
384 * want to mess with paravirt.
385 */
386 unsigned int eax = 1, ebx, ecx = 0, edx;
387
388 native_cpuid(&eax, &ebx, &ecx, &edx);
389}
390
371static int collect_cpu_info_early(struct ucode_cpu_info *uci) 391static int collect_cpu_info_early(struct ucode_cpu_info *uci)
372{ 392{
373 unsigned int val[2]; 393 unsigned int val[2];
@@ -393,7 +413,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
393 native_wrmsrl(MSR_IA32_UCODE_REV, 0); 413 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
394 414
395 /* As documented in the SDM: Do a CPUID 1 here */ 415 /* As documented in the SDM: Do a CPUID 1 here */
396 sync_core(); 416 cpuid_1();
397 417
398 /* get the current revision from MSR 0x8B */ 418 /* get the current revision from MSR 0x8B */
399 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 419 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
@@ -593,7 +613,7 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
593 native_wrmsrl(MSR_IA32_UCODE_REV, 0); 613 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
594 614
595 /* As documented in the SDM: Do a CPUID 1 here */ 615 /* As documented in the SDM: Do a CPUID 1 here */
596 sync_core(); 616 cpuid_1();
597 617
598 /* get the current revision from MSR 0x8B */ 618 /* get the current revision from MSR 0x8B */
599 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 619 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
@@ -805,7 +825,7 @@ static int apply_microcode_intel(int cpu)
805 wrmsrl(MSR_IA32_UCODE_REV, 0); 825 wrmsrl(MSR_IA32_UCODE_REV, 0);
806 826
807 /* As documented in the SDM: Do a CPUID 1 here */ 827 /* As documented in the SDM: Do a CPUID 1 here */
808 sync_core(); 828 cpuid_1();
809 829
810 /* get the current revision from MSR 0x8B */ 830 /* get the current revision from MSR 0x8B */
811 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 831 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 6c044543545e..f37e02e41a77 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -30,6 +30,7 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/timer.h> 31#include <asm/timer.h>
32#include <asm/reboot.h> 32#include <asm/reboot.h>
33#include <asm/nmi.h>
33 34
34struct ms_hyperv_info ms_hyperv; 35struct ms_hyperv_info ms_hyperv;
35EXPORT_SYMBOL_GPL(ms_hyperv); 36EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -157,6 +158,26 @@ static unsigned char hv_get_nmi_reason(void)
157 return 0; 158 return 0;
158} 159}
159 160
161#ifdef CONFIG_X86_LOCAL_APIC
162/*
163 * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
164 * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
165 * unknown NMI on the first CPU which gets it.
166 */
167static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
168{
169 static atomic_t nmi_cpu = ATOMIC_INIT(-1);
170
171 if (!unknown_nmi_panic)
172 return NMI_DONE;
173
174 if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
175 return NMI_HANDLED;
176
177 return NMI_DONE;
178}
179#endif
180
160static void __init ms_hyperv_init_platform(void) 181static void __init ms_hyperv_init_platform(void)
161{ 182{
162 /* 183 /*
@@ -182,6 +203,9 @@ static void __init ms_hyperv_init_platform(void)
182 pr_info("HyperV: LAPIC Timer Frequency: %#x\n", 203 pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
183 lapic_timer_frequency); 204 lapic_timer_frequency);
184 } 205 }
206
207 register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
208 "hv_nmi_unknown");
185#endif 209#endif
186 210
187 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) 211 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index d33ef165b1f8..553acbbb4d32 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -68,7 +68,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
68#endif 68#endif
69 69
70 default: 70 default:
71patch_default: 71patch_default: __maybe_unused
72 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 72 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
73 break; 73 break;
74 74
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index f4fcf26c9fce..11aaf1eaa0e4 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -80,7 +80,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
80#endif 80#endif
81 81
82 default: 82 default:
83patch_default: 83patch_default: __maybe_unused
84 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 84 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
85 break; 85 break;
86 86
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
index 24a50301f150..91271122f0df 100644
--- a/arch/x86/kernel/platform-quirks.c
+++ b/arch/x86/kernel/platform-quirks.c
@@ -6,6 +6,7 @@
6 6
7void __init x86_early_init_platform_quirks(void) 7void __init x86_early_init_platform_quirks(void)
8{ 8{
9 x86_platform.legacy.i8042 = X86_LEGACY_I8042_EXPECTED_PRESENT;
9 x86_platform.legacy.rtc = 1; 10 x86_platform.legacy.rtc = 1;
10 x86_platform.legacy.reserve_bios_regions = 0; 11 x86_platform.legacy.reserve_bios_regions = 0;
11 x86_platform.legacy.devices.pnpbios = 1; 12 x86_platform.legacy.devices.pnpbios = 1;
@@ -16,10 +17,14 @@ void __init x86_early_init_platform_quirks(void)
16 break; 17 break;
17 case X86_SUBARCH_XEN: 18 case X86_SUBARCH_XEN:
18 case X86_SUBARCH_LGUEST: 19 case X86_SUBARCH_LGUEST:
20 x86_platform.legacy.devices.pnpbios = 0;
21 x86_platform.legacy.rtc = 0;
22 break;
19 case X86_SUBARCH_INTEL_MID: 23 case X86_SUBARCH_INTEL_MID:
20 case X86_SUBARCH_CE4100: 24 case X86_SUBARCH_CE4100:
21 x86_platform.legacy.devices.pnpbios = 0; 25 x86_platform.legacy.devices.pnpbios = 0;
22 x86_platform.legacy.rtc = 0; 26 x86_platform.legacy.rtc = 0;
27 x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT;
23 break; 28 break;
24 } 29 }
25 30
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index ea7b7f9a3b9e..4443e499f279 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -6,6 +6,37 @@
6 6
7#define FRAME_HEADER_SIZE (sizeof(long) * 2) 7#define FRAME_HEADER_SIZE (sizeof(long) * 2)
8 8
9static void unwind_dump(struct unwind_state *state, unsigned long *sp)
10{
11 static bool dumped_before = false;
12 bool prev_zero, zero = false;
13 unsigned long word;
14
15 if (dumped_before)
16 return;
17
18 dumped_before = true;
19
20 printk_deferred("unwind stack type:%d next_sp:%p mask:%lx graph_idx:%d\n",
21 state->stack_info.type, state->stack_info.next_sp,
22 state->stack_mask, state->graph_idx);
23
24 for (sp = state->orig_sp; sp < state->stack_info.end; sp++) {
25 word = READ_ONCE_NOCHECK(*sp);
26
27 prev_zero = zero;
28 zero = word == 0;
29
30 if (zero) {
31 if (!prev_zero)
32 printk_deferred("%p: %016x ...\n", sp, 0);
33 continue;
34 }
35
36 printk_deferred("%p: %016lx (%pB)\n", sp, word, (void *)word);
37 }
38}
39
9unsigned long unwind_get_return_address(struct unwind_state *state) 40unsigned long unwind_get_return_address(struct unwind_state *state)
10{ 41{
11 unsigned long addr; 42 unsigned long addr;
@@ -20,15 +51,7 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
20 addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p, 51 addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
21 addr_p); 52 addr_p);
22 53
23 if (!__kernel_text_address(addr)) { 54 return __kernel_text_address(addr) ? addr : 0;
24 printk_deferred_once(KERN_WARNING
25 "WARNING: unrecognized kernel stack return address %p at %p in %s:%d\n",
26 (void *)addr, addr_p, state->task->comm,
27 state->task->pid);
28 return 0;
29 }
30
31 return addr;
32} 55}
33EXPORT_SYMBOL_GPL(unwind_get_return_address); 56EXPORT_SYMBOL_GPL(unwind_get_return_address);
34 57
@@ -46,7 +69,14 @@ static bool is_last_task_frame(struct unwind_state *state)
46 unsigned long bp = (unsigned long)state->bp; 69 unsigned long bp = (unsigned long)state->bp;
47 unsigned long regs = (unsigned long)task_pt_regs(state->task); 70 unsigned long regs = (unsigned long)task_pt_regs(state->task);
48 71
49 return bp == regs - FRAME_HEADER_SIZE; 72 /*
73 * We have to check for the last task frame at two different locations
74 * because gcc can occasionally decide to realign the stack pointer and
75 * change the offset of the stack frame by a word in the prologue of a
76 * function called by head/entry code.
77 */
78 return bp == regs - FRAME_HEADER_SIZE ||
79 bp == regs - FRAME_HEADER_SIZE - sizeof(long);
50} 80}
51 81
52/* 82/*
@@ -67,6 +97,7 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
67 size_t len) 97 size_t len)
68{ 98{
69 struct stack_info *info = &state->stack_info; 99 struct stack_info *info = &state->stack_info;
100 enum stack_type orig_type = info->type;
70 101
71 /* 102 /*
72 * If addr isn't on the current stack, switch to the next one. 103 * If addr isn't on the current stack, switch to the next one.
@@ -80,6 +111,9 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
80 &state->stack_mask)) 111 &state->stack_mask))
81 return false; 112 return false;
82 113
114 if (!state->orig_sp || info->type != orig_type)
115 state->orig_sp = addr;
116
83 return true; 117 return true;
84} 118}
85 119
@@ -178,11 +212,13 @@ bad_address:
178 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", 212 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
179 state->regs, state->task->comm, 213 state->regs, state->task->comm,
180 state->task->pid, next_frame); 214 state->task->pid, next_frame);
215 unwind_dump(state, (unsigned long *)state->regs);
181 } else { 216 } else {
182 printk_deferred_once(KERN_WARNING 217 printk_deferred_once(KERN_WARNING
183 "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n", 218 "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
184 state->bp, state->task->comm, 219 state->bp, state->task->comm,
185 state->task->pid, next_frame); 220 state->task->pid, next_frame);
221 unwind_dump(state, state->bp);
186 } 222 }
187the_end: 223the_end:
188 state->stack_info.type = STACK_TYPE_UNKNOWN; 224 state->stack_info.type = STACK_TYPE_UNKNOWN;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 0bd9f1287f39..11a93f005268 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -89,7 +89,6 @@ struct x86_cpuinit_ops x86_cpuinit = {
89}; 89};
90 90
91static void default_nmi_init(void) { }; 91static void default_nmi_init(void) { };
92static int default_i8042_detect(void) { return 1; };
93 92
94struct x86_platform_ops x86_platform __ro_after_init = { 93struct x86_platform_ops x86_platform __ro_after_init = {
95 .calibrate_cpu = native_calibrate_cpu, 94 .calibrate_cpu = native_calibrate_cpu,
@@ -100,7 +99,6 @@ struct x86_platform_ops x86_platform __ro_after_init = {
100 .is_untracked_pat_range = is_ISA_range, 99 .is_untracked_pat_range = is_ISA_range,
101 .nmi_init = default_nmi_init, 100 .nmi_init = default_nmi_init,
102 .get_nmi_reason = default_get_nmi_reason, 101 .get_nmi_reason = default_get_nmi_reason,
103 .i8042_detect = default_i8042_detect,
104 .save_sched_clock_state = tsc_save_sched_clock_state, 102 .save_sched_clock_state = tsc_save_sched_clock_state,
105 .restore_sched_clock_state = tsc_restore_sched_clock_state, 103 .restore_sched_clock_state = tsc_restore_sched_clock_state,
106}; 104};
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 821cb41f00e6..ce4b06733c09 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -23,11 +23,6 @@
23#include <asm/io_apic.h> 23#include <asm/io_apic.h>
24#include <asm/emergency-restart.h> 24#include <asm/emergency-restart.h>
25 25
26static int ce4100_i8042_detect(void)
27{
28 return 0;
29}
30
31/* 26/*
32 * The CE4100 platform has an internal 8051 Microcontroller which is 27 * The CE4100 platform has an internal 8051 Microcontroller which is
33 * responsible for signaling to the external Power Management Unit the 28 * responsible for signaling to the external Power Management Unit the
@@ -145,7 +140,6 @@ static void sdv_pci_init(void)
145void __init x86_ce4100_early_setup(void) 140void __init x86_ce4100_early_setup(void)
146{ 141{
147 x86_init.oem.arch_setup = sdv_arch_setup; 142 x86_init.oem.arch_setup = sdv_arch_setup;
148 x86_platform.i8042_detect = ce4100_i8042_detect;
149 x86_init.resources.probe_roms = x86_init_noop; 143 x86_init.resources.probe_roms = x86_init_noop;
150 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 144 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
151 x86_init.mpparse.find_smp_config = x86_init_noop; 145 x86_init.mpparse.find_smp_config = x86_init_noop;
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index dd6cfa4ad3ac..61b5ed2b7d40 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -19,7 +19,7 @@ obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
19# I2C Devices 19# I2C Devices
20obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o 20obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
21obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o 21obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
22obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o 22obj-$(subst m,y,$(CONFIG_MPU3050_I2C)) += platform_mpu3050.o
23obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o 23obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o
24obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o 24obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o
25# I2C GPIO Expanders 25# I2C GPIO Expanders
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 7850128f0026..12a272582cdc 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -161,12 +161,6 @@ out:
161 regulator_has_full_constraints(); 161 regulator_has_full_constraints();
162} 162}
163 163
164/* MID systems don't have i8042 controller */
165static int intel_mid_i8042_detect(void)
166{
167 return 0;
168}
169
170/* 164/*
171 * Moorestown does not have external NMI source nor port 0x61 to report 165 * Moorestown does not have external NMI source nor port 0x61 to report
172 * NMI status. The possible NMI sources are from pmu as a result of NMI 166 * NMI status. The possible NMI sources are from pmu as a result of NMI
@@ -197,7 +191,6 @@ void __init x86_intel_mid_early_setup(void)
197 x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock; 191 x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
198 192
199 x86_platform.calibrate_tsc = intel_mid_calibrate_tsc; 193 x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
200 x86_platform.i8042_detect = intel_mid_i8042_detect;
201 x86_init.timers.wallclock_init = intel_mid_rtc_init; 194 x86_init.timers.wallclock_init = intel_mid_rtc_init;
202 x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; 195 x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
203 196
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
index f5bad40936ac..b8f562049cad 100644
--- a/arch/x86/platform/intel-quark/imr_selftest.c
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -25,7 +25,8 @@
25 * @fmt: format string. 25 * @fmt: format string.
26 * ... variadic argument list. 26 * ... variadic argument list.
27 */ 27 */
28static void __init imr_self_test_result(int res, const char *fmt, ...) 28static __printf(2, 3)
29void __init imr_self_test_result(int res, const char *fmt, ...)
29{ 30{
30 va_list vlist; 31 va_list vlist;
31 32
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 0c2fae8d929d..73eb7fd4aec4 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode)
992 die("Segment relocations found but --realmode not specified\n"); 992 die("Segment relocations found but --realmode not specified\n");
993 993
994 /* Order the relocations for more efficient processing */ 994 /* Order the relocations for more efficient processing */
995 sort_relocs(&relocs16);
996 sort_relocs(&relocs32); 995 sort_relocs(&relocs32);
997#if ELF_BITS == 64 996#if ELF_BITS == 64
998 sort_relocs(&relocs32neg); 997 sort_relocs(&relocs32neg);
999 sort_relocs(&relocs64); 998 sort_relocs(&relocs64);
999#else
1000 sort_relocs(&relocs16);
1000#endif 1001#endif
1001 1002
1002 /* Print the relocations */ 1003 /* Print the relocations */
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 73a4e68448fc..77551f522202 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -983,7 +983,11 @@ static int __init i8042_pnp_init(void)
983#if defined(__ia64__) 983#if defined(__ia64__)
984 return -ENODEV; 984 return -ENODEV;
985#else 985#else
986 pr_info("PNP: No PS/2 controller found. Probing ports directly.\n"); 986 pr_info("PNP: No PS/2 controller found.\n");
987 if (x86_platform.legacy.i8042 !=
988 X86_LEGACY_I8042_EXPECTED_PRESENT)
989 return -ENODEV;
990 pr_info("Probing ports directly.\n");
987 return 0; 991 return 0;
988#endif 992#endif
989 } 993 }
@@ -1070,8 +1074,8 @@ static int __init i8042_platform_init(void)
1070 1074
1071#ifdef CONFIG_X86 1075#ifdef CONFIG_X86
1072 u8 a20_on = 0xdf; 1076 u8 a20_on = 0xdf;
1073 /* Just return if pre-detection shows no i8042 controller exist */ 1077 /* Just return if platform does not have i8042 controller */
1074 if (!x86_platform.i8042_detect()) 1078 if (x86_platform.legacy.i8042 == X86_LEGACY_I8042_PLATFORM_ABSENT)
1075 return -ENODEV; 1079 return -ENODEV;
1076#endif 1080#endif
1077 1081