diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-03 20:51:33 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-03 20:51:33 -0500 |
commit | b831ef2cad979912850e34f82415c0c5d59de8cb (patch) | |
tree | 38bcd57fbb4dcce38a0ca926b1b765fd5e16cab2 | |
parent | b02ac6b18cd4e2c76bf0a102c20c429b973f5f76 (diff) | |
parent | dc34bdd2367fd31744ee3ba1de1b1cc0fa2ce193 (diff) |
Merge branch 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RAS changes from Ingo Molnar:
"The main system reliability related changes were from x86, but also
some generic RAS changes:
- AMD MCE error injection subsystem enhancements. (Aravind
Gopalakrishnan)
- Fix MCE and CPU hotplug interaction bug. (Ashok Raj)
- kcrash bootup robustness fix. (Baoquan He)
- kcrash cleanups. (Borislav Petkov)
- x86 microcode driver rework: simplify it by unmodularizing it and
other cleanups. (Borislav Petkov)"
* 'ras-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
x86/mce: Add a default case to the switch in __mcheck_cpu_ancient_init()
x86/mce: Add a Scalable MCA vendor flags bit
MAINTAINERS: Unify the microcode driver section
x86/microcode/intel: Move #ifdef DEBUG inside the function
x86/microcode/amd: Remove maintainers from comments
x86/microcode: Remove modularization leftovers
x86/microcode: Merge the early microcode loader
x86/microcode: Unmodularize the microcode driver
x86/mce: Fix thermal throttling reporting after kexec
kexec/crash: Say which char is the unrecognized
x86/setup/crash: Check memblock_reserve() retval
x86/setup/crash: Cleanup some more
x86/setup/crash: Remove alignment variable
x86/setup: Cleanup crashkernel reservation functions
x86/amd_nb, EDAC: Rename amd_get_node_id()
x86/setup: Do not reserve crashkernel high memory if low reservation failed
x86/microcode/amd: Do not overwrite final patch levels
x86/microcode/amd: Extract current patch level read to a function
x86/ras/mce_amd_inj: Inject bank 4 errors on the NBC
x86/ras/mce_amd_inj: Trigger deferred and thresholding errors interrupts
...
25 files changed, 1678 insertions, 1661 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 6a0b9ca65407..dcc8ed6fccde 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -660,11 +660,6 @@ F: drivers/gpu/drm/radeon/radeon_kfd.c | |||
660 | F: drivers/gpu/drm/radeon/radeon_kfd.h | 660 | F: drivers/gpu/drm/radeon/radeon_kfd.h |
661 | F: include/uapi/linux/kfd_ioctl.h | 661 | F: include/uapi/linux/kfd_ioctl.h |
662 | 662 | ||
663 | AMD MICROCODE UPDATE SUPPORT | ||
664 | M: Borislav Petkov <bp@alien8.de> | ||
665 | S: Maintained | ||
666 | F: arch/x86/kernel/cpu/microcode/amd* | ||
667 | |||
668 | AMD XGBE DRIVER | 663 | AMD XGBE DRIVER |
669 | M: Tom Lendacky <thomas.lendacky@amd.com> | 664 | M: Tom Lendacky <thomas.lendacky@amd.com> |
670 | L: netdev@vger.kernel.org | 665 | L: netdev@vger.kernel.org |
@@ -5468,12 +5463,6 @@ W: https://01.org/linux-acpi | |||
5468 | S: Supported | 5463 | S: Supported |
5469 | F: drivers/platform/x86/intel_menlow.c | 5464 | F: drivers/platform/x86/intel_menlow.c |
5470 | 5465 | ||
5471 | INTEL IA32 MICROCODE UPDATE SUPPORT | ||
5472 | M: Borislav Petkov <bp@alien8.de> | ||
5473 | S: Maintained | ||
5474 | F: arch/x86/kernel/cpu/microcode/core* | ||
5475 | F: arch/x86/kernel/cpu/microcode/intel* | ||
5476 | |||
5477 | INTEL I/OAT DMA DRIVER | 5466 | INTEL I/OAT DMA DRIVER |
5478 | M: Dave Jiang <dave.jiang@intel.com> | 5467 | M: Dave Jiang <dave.jiang@intel.com> |
5479 | R: Dan Williams <dan.j.williams@intel.com> | 5468 | R: Dan Williams <dan.j.williams@intel.com> |
@@ -11505,6 +11494,11 @@ L: linux-edac@vger.kernel.org | |||
11505 | S: Maintained | 11494 | S: Maintained |
11506 | F: arch/x86/kernel/cpu/mcheck/* | 11495 | F: arch/x86/kernel/cpu/mcheck/* |
11507 | 11496 | ||
11497 | X86 MICROCODE UPDATE SUPPORT | ||
11498 | M: Borislav Petkov <bp@alien8.de> | ||
11499 | S: Maintained | ||
11500 | F: arch/x86/kernel/cpu/microcode/* | ||
11501 | |||
11508 | X86 VDSO | 11502 | X86 VDSO |
11509 | M: Andy Lutomirski <luto@amacapital.net> | 11503 | M: Andy Lutomirski <luto@amacapital.net> |
11510 | L: linux-kernel@vger.kernel.org | 11504 | L: linux-kernel@vger.kernel.org |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 96d058a87100..255ea22ccbec 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1123,8 +1123,10 @@ config X86_REBOOTFIXUPS | |||
1123 | Say N otherwise. | 1123 | Say N otherwise. |
1124 | 1124 | ||
1125 | config MICROCODE | 1125 | config MICROCODE |
1126 | tristate "CPU microcode loading support" | 1126 | bool "CPU microcode loading support" |
1127 | default y | ||
1127 | depends on CPU_SUP_AMD || CPU_SUP_INTEL | 1128 | depends on CPU_SUP_AMD || CPU_SUP_INTEL |
1129 | depends on BLK_DEV_INITRD | ||
1128 | select FW_LOADER | 1130 | select FW_LOADER |
1129 | ---help--- | 1131 | ---help--- |
1130 | 1132 | ||
@@ -1166,24 +1168,6 @@ config MICROCODE_OLD_INTERFACE | |||
1166 | def_bool y | 1168 | def_bool y |
1167 | depends on MICROCODE | 1169 | depends on MICROCODE |
1168 | 1170 | ||
1169 | config MICROCODE_INTEL_EARLY | ||
1170 | bool | ||
1171 | |||
1172 | config MICROCODE_AMD_EARLY | ||
1173 | bool | ||
1174 | |||
1175 | config MICROCODE_EARLY | ||
1176 | bool "Early load microcode" | ||
1177 | depends on MICROCODE=y && BLK_DEV_INITRD | ||
1178 | select MICROCODE_INTEL_EARLY if MICROCODE_INTEL | ||
1179 | select MICROCODE_AMD_EARLY if MICROCODE_AMD | ||
1180 | default y | ||
1181 | help | ||
1182 | This option provides functionality to read additional microcode data | ||
1183 | at the beginning of initrd image. The data tells kernel to load | ||
1184 | microcode to CPU's as early as possible. No functional change if no | ||
1185 | microcode data is glued to the initrd, therefore it's safe to say Y. | ||
1186 | |||
1187 | config X86_MSR | 1171 | config X86_MSR |
1188 | tristate "/dev/cpu/*/msr - Model-specific register support" | 1172 | tristate "/dev/cpu/*/msr - Model-specific register support" |
1189 | ---help--- | 1173 | ---help--- |
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 1a5da2e63aee..3c56ef1ae068 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h | |||
@@ -81,7 +81,7 @@ static inline struct amd_northbridge *node_to_amd_nb(int node) | |||
81 | return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; | 81 | return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline u16 amd_get_node_id(struct pci_dev *pdev) | 84 | static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) |
85 | { | 85 | { |
86 | struct pci_dev *misc; | 86 | struct pci_dev *misc; |
87 | int i; | 87 | int i; |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 2dbc0bf2b9f3..2ea4527e462f 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -123,19 +123,27 @@ struct mca_config { | |||
123 | }; | 123 | }; |
124 | 124 | ||
125 | struct mce_vendor_flags { | 125 | struct mce_vendor_flags { |
126 | /* | 126 | /* |
127 | * overflow recovery cpuid bit indicates that overflow | 127 | * Indicates that overflow conditions are not fatal, when set. |
128 | * conditions are not fatal | 128 | */ |
129 | */ | 129 | __u64 overflow_recov : 1, |
130 | __u64 overflow_recov : 1, | 130 | |
131 | 131 | /* | |
132 | /* | 132 | * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and |
133 | * SUCCOR stands for S/W UnCorrectable error COntainment | 133 | * Recovery. It indicates support for data poisoning in HW and deferred |
134 | * and Recovery. It indicates support for data poisoning | 134 | * error interrupts. |
135 | * in HW and deferred error interrupts. | 135 | */ |
136 | */ | 136 | succor : 1, |
137 | succor : 1, | 137 | |
138 | __reserved_0 : 62; | 138 | /* |
139 | * (AMD) SMCA: This bit indicates support for Scalable MCA which expands | ||
140 | * the register space for each MCA bank and also increases number of | ||
141 | * banks. Also, to accommodate the new banks and registers, the MCA | ||
142 | * register space is moved to a new MSR range. | ||
143 | */ | ||
144 | smca : 1, | ||
145 | |||
146 | __reserved_0 : 61; | ||
139 | }; | 147 | }; |
140 | extern struct mce_vendor_flags mce_flags; | 148 | extern struct mce_vendor_flags mce_flags; |
141 | 149 | ||
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 9e6278c7140e..34e62b1dcfce 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -27,7 +27,6 @@ struct cpu_signature { | |||
27 | struct device; | 27 | struct device; |
28 | 28 | ||
29 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; | 29 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; |
30 | extern bool dis_ucode_ldr; | ||
31 | 30 | ||
32 | struct microcode_ops { | 31 | struct microcode_ops { |
33 | enum ucode_state (*request_microcode_user) (int cpu, | 32 | enum ucode_state (*request_microcode_user) (int cpu, |
@@ -55,6 +54,12 @@ struct ucode_cpu_info { | |||
55 | }; | 54 | }; |
56 | extern struct ucode_cpu_info ucode_cpu_info[]; | 55 | extern struct ucode_cpu_info ucode_cpu_info[]; |
57 | 56 | ||
57 | #ifdef CONFIG_MICROCODE | ||
58 | int __init microcode_init(void); | ||
59 | #else | ||
60 | static inline int __init microcode_init(void) { return 0; }; | ||
61 | #endif | ||
62 | |||
58 | #ifdef CONFIG_MICROCODE_INTEL | 63 | #ifdef CONFIG_MICROCODE_INTEL |
59 | extern struct microcode_ops * __init init_intel_microcode(void); | 64 | extern struct microcode_ops * __init init_intel_microcode(void); |
60 | #else | 65 | #else |
@@ -75,7 +80,6 @@ static inline struct microcode_ops * __init init_amd_microcode(void) | |||
75 | static inline void __exit exit_amd_microcode(void) {} | 80 | static inline void __exit exit_amd_microcode(void) {} |
76 | #endif | 81 | #endif |
77 | 82 | ||
78 | #ifdef CONFIG_MICROCODE_EARLY | ||
79 | #define MAX_UCODE_COUNT 128 | 83 | #define MAX_UCODE_COUNT 128 |
80 | 84 | ||
81 | #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) | 85 | #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) |
@@ -150,22 +154,18 @@ static inline unsigned int x86_model(unsigned int sig) | |||
150 | return model; | 154 | return model; |
151 | } | 155 | } |
152 | 156 | ||
157 | #ifdef CONFIG_MICROCODE | ||
153 | extern void __init load_ucode_bsp(void); | 158 | extern void __init load_ucode_bsp(void); |
154 | extern void load_ucode_ap(void); | 159 | extern void load_ucode_ap(void); |
155 | extern int __init save_microcode_in_initrd(void); | 160 | extern int __init save_microcode_in_initrd(void); |
156 | void reload_early_microcode(void); | 161 | void reload_early_microcode(void); |
157 | extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); | 162 | extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); |
158 | #else | 163 | #else |
159 | static inline void __init load_ucode_bsp(void) {} | 164 | static inline void __init load_ucode_bsp(void) { } |
160 | static inline void load_ucode_ap(void) {} | 165 | static inline void load_ucode_ap(void) { } |
161 | static inline int __init save_microcode_in_initrd(void) | 166 | static inline int __init save_microcode_in_initrd(void) { return 0; } |
162 | { | 167 | static inline void reload_early_microcode(void) { } |
163 | return 0; | 168 | static inline bool |
164 | } | 169 | get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } |
165 | static inline void reload_early_microcode(void) {} | ||
166 | static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name) | ||
167 | { | ||
168 | return false; | ||
169 | } | ||
170 | #endif | 170 | #endif |
171 | #endif /* _ASM_X86_MICROCODE_H */ | 171 | #endif /* _ASM_X86_MICROCODE_H */ |
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index ac6d328977a6..adfc847a395e 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h | |||
@@ -64,7 +64,7 @@ extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, s | |||
64 | #define PATCH_MAX_SIZE PAGE_SIZE | 64 | #define PATCH_MAX_SIZE PAGE_SIZE |
65 | extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; | 65 | extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; |
66 | 66 | ||
67 | #ifdef CONFIG_MICROCODE_AMD_EARLY | 67 | #ifdef CONFIG_MICROCODE_AMD |
68 | extern void __init load_ucode_amd_bsp(unsigned int family); | 68 | extern void __init load_ucode_amd_bsp(unsigned int family); |
69 | extern void load_ucode_amd_ap(void); | 69 | extern void load_ucode_amd_ap(void); |
70 | extern int __init save_microcode_in_initrd_amd(void); | 70 | extern int __init save_microcode_in_initrd_amd(void); |
@@ -76,4 +76,5 @@ static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } | |||
76 | void reload_ucode_amd(void) {} | 76 | void reload_ucode_amd(void) {} |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | extern bool check_current_patch_level(u32 *rev, bool early); | ||
79 | #endif /* _ASM_X86_MICROCODE_AMD_H */ | 80 | #endif /* _ASM_X86_MICROCODE_AMD_H */ |
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h index 7991c606125d..8559b0102ea1 100644 --- a/arch/x86/include/asm/microcode_intel.h +++ b/arch/x86/include/asm/microcode_intel.h | |||
@@ -57,7 +57,7 @@ extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev); | |||
57 | extern int microcode_sanity_check(void *mc, int print_err); | 57 | extern int microcode_sanity_check(void *mc, int print_err); |
58 | extern int find_matching_signature(void *mc, unsigned int csig, int cpf); | 58 | extern int find_matching_signature(void *mc, unsigned int csig, int cpf); |
59 | 59 | ||
60 | #ifdef CONFIG_MICROCODE_INTEL_EARLY | 60 | #ifdef CONFIG_MICROCODE_INTEL |
61 | extern void __init load_ucode_intel_bsp(void); | 61 | extern void __init load_ucode_intel_bsp(void); |
62 | extern void load_ucode_intel_ap(void); | 62 | extern void load_ucode_intel_ap(void); |
63 | extern void show_ucode_info_early(void); | 63 | extern void show_ucode_info_early(void); |
@@ -71,13 +71,9 @@ static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; | |||
71 | static inline void reload_ucode_intel(void) {} | 71 | static inline void reload_ucode_intel(void) {} |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) | 74 | #ifdef CONFIG_HOTPLUG_CPU |
75 | extern int save_mc_for_early(u8 *mc); | 75 | extern int save_mc_for_early(u8 *mc); |
76 | #else | 76 | #else |
77 | static inline int save_mc_for_early(u8 *mc) | 77 | static inline int save_mc_for_early(u8 *mc) { return 0; } |
78 | { | ||
79 | return 0; | ||
80 | } | ||
81 | #endif | 78 | #endif |
82 | |||
83 | #endif /* _ASM_X86_MICROCODE_INTEL_H */ | 79 | #endif /* _ASM_X86_MICROCODE_INTEL_H */ |
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 76880ede9a35..03429da2fa80 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _UAPI_ASM_X86_MCE_H | 2 | #define _UAPI_ASM_X86_MCE_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <asm/ioctls.h> | 5 | #include <linux/ioctl.h> |
6 | 6 | ||
7 | /* Fields are zero when not available */ | 7 | /* Fields are zero when not available */ |
8 | struct mce { | 8 | struct mce { |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 9d014b82a124..c5b0d562dbf5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1586,6 +1586,8 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) | |||
1586 | winchip_mcheck_init(c); | 1586 | winchip_mcheck_init(c); |
1587 | return 1; | 1587 | return 1; |
1588 | break; | 1588 | break; |
1589 | default: | ||
1590 | return 0; | ||
1589 | } | 1591 | } |
1590 | 1592 | ||
1591 | return 0; | 1593 | return 0; |
@@ -1605,6 +1607,8 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) | |||
1605 | mce_amd_feature_init(c); | 1607 | mce_amd_feature_init(c); |
1606 | mce_flags.overflow_recov = !!(ebx & BIT(0)); | 1608 | mce_flags.overflow_recov = !!(ebx & BIT(0)); |
1607 | mce_flags.succor = !!(ebx & BIT(1)); | 1609 | mce_flags.succor = !!(ebx & BIT(1)); |
1610 | mce_flags.smca = !!(ebx & BIT(3)); | ||
1611 | |||
1608 | break; | 1612 | break; |
1609 | } | 1613 | } |
1610 | 1614 | ||
@@ -2042,7 +2046,7 @@ int __init mcheck_init(void) | |||
2042 | * Disable machine checks on suspend and shutdown. We can't really handle | 2046 | * Disable machine checks on suspend and shutdown. We can't really handle |
2043 | * them later. | 2047 | * them later. |
2044 | */ | 2048 | */ |
2045 | static int mce_disable_error_reporting(void) | 2049 | static void mce_disable_error_reporting(void) |
2046 | { | 2050 | { |
2047 | int i; | 2051 | int i; |
2048 | 2052 | ||
@@ -2052,17 +2056,32 @@ static int mce_disable_error_reporting(void) | |||
2052 | if (b->init) | 2056 | if (b->init) |
2053 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); | 2057 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); |
2054 | } | 2058 | } |
2055 | return 0; | 2059 | return; |
2060 | } | ||
2061 | |||
2062 | static void vendor_disable_error_reporting(void) | ||
2063 | { | ||
2064 | /* | ||
2065 | * Don't clear on Intel CPUs. Some of these MSRs are socket-wide. | ||
2066 | * Disabling them for just a single offlined CPU is bad, since it will | ||
2067 | * inhibit reporting for all shared resources on the socket like the | ||
2068 | * last level cache (LLC), the integrated memory controller (iMC), etc. | ||
2069 | */ | ||
2070 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) | ||
2071 | return; | ||
2072 | |||
2073 | mce_disable_error_reporting(); | ||
2056 | } | 2074 | } |
2057 | 2075 | ||
2058 | static int mce_syscore_suspend(void) | 2076 | static int mce_syscore_suspend(void) |
2059 | { | 2077 | { |
2060 | return mce_disable_error_reporting(); | 2078 | vendor_disable_error_reporting(); |
2079 | return 0; | ||
2061 | } | 2080 | } |
2062 | 2081 | ||
2063 | static void mce_syscore_shutdown(void) | 2082 | static void mce_syscore_shutdown(void) |
2064 | { | 2083 | { |
2065 | mce_disable_error_reporting(); | 2084 | vendor_disable_error_reporting(); |
2066 | } | 2085 | } |
2067 | 2086 | ||
2068 | /* | 2087 | /* |
@@ -2342,19 +2361,14 @@ static void mce_device_remove(unsigned int cpu) | |||
2342 | static void mce_disable_cpu(void *h) | 2361 | static void mce_disable_cpu(void *h) |
2343 | { | 2362 | { |
2344 | unsigned long action = *(unsigned long *)h; | 2363 | unsigned long action = *(unsigned long *)h; |
2345 | int i; | ||
2346 | 2364 | ||
2347 | if (!mce_available(raw_cpu_ptr(&cpu_info))) | 2365 | if (!mce_available(raw_cpu_ptr(&cpu_info))) |
2348 | return; | 2366 | return; |
2349 | 2367 | ||
2350 | if (!(action & CPU_TASKS_FROZEN)) | 2368 | if (!(action & CPU_TASKS_FROZEN)) |
2351 | cmci_clear(); | 2369 | cmci_clear(); |
2352 | for (i = 0; i < mca_cfg.banks; i++) { | ||
2353 | struct mce_bank *b = &mce_banks[i]; | ||
2354 | 2370 | ||
2355 | if (b->init) | 2371 | vendor_disable_error_reporting(); |
2356 | wrmsrl(MSR_IA32_MCx_CTL(i), 0); | ||
2357 | } | ||
2358 | } | 2372 | } |
2359 | 2373 | ||
2360 | static void mce_reenable_cpu(void *h) | 2374 | static void mce_reenable_cpu(void *h) |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 1af51b1586d7..2c5aaf8c2e2f 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -503,14 +503,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | |||
503 | return; | 503 | return; |
504 | } | 504 | } |
505 | 505 | ||
506 | /* Check whether a vector already exists */ | ||
507 | if (h & APIC_VECTOR_MASK) { | ||
508 | printk(KERN_DEBUG | ||
509 | "CPU%d: Thermal LVT vector (%#x) already installed\n", | ||
510 | cpu, (h & APIC_VECTOR_MASK)); | ||
511 | return; | ||
512 | } | ||
513 | |||
514 | /* early Pentium M models use different method for enabling TM2 */ | 506 | /* early Pentium M models use different method for enabling TM2 */ |
515 | if (cpu_has(c, X86_FEATURE_TM2)) { | 507 | if (cpu_has(c, X86_FEATURE_TM2)) { |
516 | if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { | 508 | if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { |
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile index 285c85427c32..220b1a508513 100644 --- a/arch/x86/kernel/cpu/microcode/Makefile +++ b/arch/x86/kernel/cpu/microcode/Makefile | |||
@@ -2,6 +2,3 @@ microcode-y := core.o | |||
2 | obj-$(CONFIG_MICROCODE) += microcode.o | 2 | obj-$(CONFIG_MICROCODE) += microcode.o |
3 | microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o | 3 | microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o |
4 | microcode-$(CONFIG_MICROCODE_AMD) += amd.o | 4 | microcode-$(CONFIG_MICROCODE_AMD) += amd.o |
5 | obj-$(CONFIG_MICROCODE_EARLY) += core_early.o | ||
6 | obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o | ||
7 | obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o | ||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 12829c3ced3c..2233f8a76615 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -1,5 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * AMD CPU Microcode Update Driver for Linux | 2 | * AMD CPU Microcode Update Driver for Linux |
3 | * | ||
4 | * This driver allows to upgrade microcode on F10h AMD | ||
5 | * CPUs and later. | ||
6 | * | ||
3 | * Copyright (C) 2008-2011 Advanced Micro Devices Inc. | 7 | * Copyright (C) 2008-2011 Advanced Micro Devices Inc. |
4 | * | 8 | * |
5 | * Author: Peter Oruba <peter.oruba@amd.com> | 9 | * Author: Peter Oruba <peter.oruba@amd.com> |
@@ -7,34 +11,31 @@ | |||
7 | * Based on work by: | 11 | * Based on work by: |
8 | * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> | 12 | * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> |
9 | * | 13 | * |
10 | * Maintainers: | 14 | * early loader: |
11 | * Andreas Herrmann <herrmann.der.user@googlemail.com> | 15 | * Copyright (C) 2013 Advanced Micro Devices, Inc. |
12 | * Borislav Petkov <bp@alien8.de> | ||
13 | * | 16 | * |
14 | * This driver allows to upgrade microcode on F10h AMD | 17 | * Author: Jacob Shin <jacob.shin@amd.com> |
15 | * CPUs and later. | 18 | * Fixes: Borislav Petkov <bp@suse.de> |
16 | * | 19 | * |
17 | * Licensed under the terms of the GNU General Public | 20 | * Licensed under the terms of the GNU General Public |
18 | * License version 2. See file COPYING for details. | 21 | * License version 2. See file COPYING for details. |
19 | */ | 22 | */ |
23 | #define pr_fmt(fmt) "microcode: " fmt | ||
20 | 24 | ||
21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 25 | #include <linux/earlycpio.h> |
22 | |||
23 | #include <linux/firmware.h> | 26 | #include <linux/firmware.h> |
24 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
25 | #include <linux/vmalloc.h> | 28 | #include <linux/vmalloc.h> |
29 | #include <linux/initrd.h> | ||
26 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | ||
28 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
29 | 32 | ||
33 | #include <asm/microcode_amd.h> | ||
30 | #include <asm/microcode.h> | 34 | #include <asm/microcode.h> |
31 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
36 | #include <asm/setup.h> | ||
37 | #include <asm/cpu.h> | ||
32 | #include <asm/msr.h> | 38 | #include <asm/msr.h> |
33 | #include <asm/microcode_amd.h> | ||
34 | |||
35 | MODULE_DESCRIPTION("AMD Microcode Update Driver"); | ||
36 | MODULE_AUTHOR("Peter Oruba"); | ||
37 | MODULE_LICENSE("GPL v2"); | ||
38 | 39 | ||
39 | static struct equiv_cpu_entry *equiv_cpu_table; | 40 | static struct equiv_cpu_entry *equiv_cpu_table; |
40 | 41 | ||
@@ -47,6 +48,432 @@ struct ucode_patch { | |||
47 | 48 | ||
48 | static LIST_HEAD(pcache); | 49 | static LIST_HEAD(pcache); |
49 | 50 | ||
51 | /* | ||
52 | * This points to the current valid container of microcode patches which we will | ||
53 | * save from the initrd before jettisoning its contents. | ||
54 | */ | ||
55 | static u8 *container; | ||
56 | static size_t container_size; | ||
57 | |||
58 | static u32 ucode_new_rev; | ||
59 | u8 amd_ucode_patch[PATCH_MAX_SIZE]; | ||
60 | static u16 this_equiv_id; | ||
61 | |||
62 | static struct cpio_data ucode_cpio; | ||
63 | |||
64 | /* | ||
65 | * Microcode patch container file is prepended to the initrd in cpio format. | ||
66 | * See Documentation/x86/early-microcode.txt | ||
67 | */ | ||
68 | static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin"; | ||
69 | |||
70 | static struct cpio_data __init find_ucode_in_initrd(void) | ||
71 | { | ||
72 | long offset = 0; | ||
73 | char *path; | ||
74 | void *start; | ||
75 | size_t size; | ||
76 | |||
77 | #ifdef CONFIG_X86_32 | ||
78 | struct boot_params *p; | ||
79 | |||
80 | /* | ||
81 | * On 32-bit, early load occurs before paging is turned on so we need | ||
82 | * to use physical addresses. | ||
83 | */ | ||
84 | p = (struct boot_params *)__pa_nodebug(&boot_params); | ||
85 | path = (char *)__pa_nodebug(ucode_path); | ||
86 | start = (void *)p->hdr.ramdisk_image; | ||
87 | size = p->hdr.ramdisk_size; | ||
88 | #else | ||
89 | path = ucode_path; | ||
90 | start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); | ||
91 | size = boot_params.hdr.ramdisk_size; | ||
92 | #endif | ||
93 | |||
94 | return find_cpio_data(path, start, size, &offset); | ||
95 | } | ||
96 | |||
97 | static size_t compute_container_size(u8 *data, u32 total_size) | ||
98 | { | ||
99 | size_t size = 0; | ||
100 | u32 *header = (u32 *)data; | ||
101 | |||
102 | if (header[0] != UCODE_MAGIC || | ||
103 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | ||
104 | header[2] == 0) /* size */ | ||
105 | return size; | ||
106 | |||
107 | size = header[2] + CONTAINER_HDR_SZ; | ||
108 | total_size -= size; | ||
109 | data += size; | ||
110 | |||
111 | while (total_size) { | ||
112 | u16 patch_size; | ||
113 | |||
114 | header = (u32 *)data; | ||
115 | |||
116 | if (header[0] != UCODE_UCODE_TYPE) | ||
117 | break; | ||
118 | |||
119 | /* | ||
120 | * Sanity-check patch size. | ||
121 | */ | ||
122 | patch_size = header[1]; | ||
123 | if (patch_size > PATCH_MAX_SIZE) | ||
124 | break; | ||
125 | |||
126 | size += patch_size + SECTION_HDR_SIZE; | ||
127 | data += patch_size + SECTION_HDR_SIZE; | ||
128 | total_size -= patch_size + SECTION_HDR_SIZE; | ||
129 | } | ||
130 | |||
131 | return size; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Early load occurs before we can vmalloc(). So we look for the microcode | ||
136 | * patch container file in initrd, traverse equivalent cpu table, look for a | ||
137 | * matching microcode patch, and update, all in initrd memory in place. | ||
138 | * When vmalloc() is available for use later -- on 64-bit during first AP load, | ||
139 | * and on 32-bit during save_microcode_in_initrd_amd() -- we can call | ||
140 | * load_microcode_amd() to save equivalent cpu table and microcode patches in | ||
141 | * kernel heap memory. | ||
142 | */ | ||
143 | static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) | ||
144 | { | ||
145 | struct equiv_cpu_entry *eq; | ||
146 | size_t *cont_sz; | ||
147 | u32 *header; | ||
148 | u8 *data, **cont; | ||
149 | u8 (*patch)[PATCH_MAX_SIZE]; | ||
150 | u16 eq_id = 0; | ||
151 | int offset, left; | ||
152 | u32 rev, eax, ebx, ecx, edx; | ||
153 | u32 *new_rev; | ||
154 | |||
155 | #ifdef CONFIG_X86_32 | ||
156 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); | ||
157 | cont_sz = (size_t *)__pa_nodebug(&container_size); | ||
158 | cont = (u8 **)__pa_nodebug(&container); | ||
159 | patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); | ||
160 | #else | ||
161 | new_rev = &ucode_new_rev; | ||
162 | cont_sz = &container_size; | ||
163 | cont = &container; | ||
164 | patch = &amd_ucode_patch; | ||
165 | #endif | ||
166 | |||
167 | data = ucode; | ||
168 | left = size; | ||
169 | header = (u32 *)data; | ||
170 | |||
171 | /* find equiv cpu table */ | ||
172 | if (header[0] != UCODE_MAGIC || | ||
173 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | ||
174 | header[2] == 0) /* size */ | ||
175 | return; | ||
176 | |||
177 | eax = 0x00000001; | ||
178 | ecx = 0; | ||
179 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
180 | |||
181 | while (left > 0) { | ||
182 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); | ||
183 | |||
184 | *cont = data; | ||
185 | |||
186 | /* Advance past the container header */ | ||
187 | offset = header[2] + CONTAINER_HDR_SZ; | ||
188 | data += offset; | ||
189 | left -= offset; | ||
190 | |||
191 | eq_id = find_equiv_id(eq, eax); | ||
192 | if (eq_id) { | ||
193 | this_equiv_id = eq_id; | ||
194 | *cont_sz = compute_container_size(*cont, left + offset); | ||
195 | |||
196 | /* | ||
197 | * truncate how much we need to iterate over in the | ||
198 | * ucode update loop below | ||
199 | */ | ||
200 | left = *cont_sz - offset; | ||
201 | break; | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * support multiple container files appended together. if this | ||
206 | * one does not have a matching equivalent cpu entry, we fast | ||
207 | * forward to the next container file. | ||
208 | */ | ||
209 | while (left > 0) { | ||
210 | header = (u32 *)data; | ||
211 | if (header[0] == UCODE_MAGIC && | ||
212 | header[1] == UCODE_EQUIV_CPU_TABLE_TYPE) | ||
213 | break; | ||
214 | |||
215 | offset = header[1] + SECTION_HDR_SIZE; | ||
216 | data += offset; | ||
217 | left -= offset; | ||
218 | } | ||
219 | |||
220 | /* mark where the next microcode container file starts */ | ||
221 | offset = data - (u8 *)ucode; | ||
222 | ucode = data; | ||
223 | } | ||
224 | |||
225 | if (!eq_id) { | ||
226 | *cont = NULL; | ||
227 | *cont_sz = 0; | ||
228 | return; | ||
229 | } | ||
230 | |||
231 | if (check_current_patch_level(&rev, true)) | ||
232 | return; | ||
233 | |||
234 | while (left > 0) { | ||
235 | struct microcode_amd *mc; | ||
236 | |||
237 | header = (u32 *)data; | ||
238 | if (header[0] != UCODE_UCODE_TYPE || /* type */ | ||
239 | header[1] == 0) /* size */ | ||
240 | break; | ||
241 | |||
242 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); | ||
243 | |||
244 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { | ||
245 | |||
246 | if (!__apply_microcode_amd(mc)) { | ||
247 | rev = mc->hdr.patch_id; | ||
248 | *new_rev = rev; | ||
249 | |||
250 | if (save_patch) | ||
251 | memcpy(patch, mc, | ||
252 | min_t(u32, header[1], PATCH_MAX_SIZE)); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | offset = header[1] + SECTION_HDR_SIZE; | ||
257 | data += offset; | ||
258 | left -= offset; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | static bool __init load_builtin_amd_microcode(struct cpio_data *cp, | ||
263 | unsigned int family) | ||
264 | { | ||
265 | #ifdef CONFIG_X86_64 | ||
266 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; | ||
267 | |||
268 | if (family >= 0x15) | ||
269 | snprintf(fw_name, sizeof(fw_name), | ||
270 | "amd-ucode/microcode_amd_fam%.2xh.bin", family); | ||
271 | |||
272 | return get_builtin_firmware(cp, fw_name); | ||
273 | #else | ||
274 | return false; | ||
275 | #endif | ||
276 | } | ||
277 | |||
278 | void __init load_ucode_amd_bsp(unsigned int family) | ||
279 | { | ||
280 | struct cpio_data cp; | ||
281 | void **data; | ||
282 | size_t *size; | ||
283 | |||
284 | #ifdef CONFIG_X86_32 | ||
285 | data = (void **)__pa_nodebug(&ucode_cpio.data); | ||
286 | size = (size_t *)__pa_nodebug(&ucode_cpio.size); | ||
287 | #else | ||
288 | data = &ucode_cpio.data; | ||
289 | size = &ucode_cpio.size; | ||
290 | #endif | ||
291 | |||
292 | cp = find_ucode_in_initrd(); | ||
293 | if (!cp.data) { | ||
294 | if (!load_builtin_amd_microcode(&cp, family)) | ||
295 | return; | ||
296 | } | ||
297 | |||
298 | *data = cp.data; | ||
299 | *size = cp.size; | ||
300 | |||
301 | apply_ucode_in_initrd(cp.data, cp.size, true); | ||
302 | } | ||
303 | |||
304 | #ifdef CONFIG_X86_32 | ||
305 | /* | ||
306 | * On 32-bit, since AP's early load occurs before paging is turned on, we | ||
307 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during | ||
308 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During | ||
309 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, | ||
310 | * which is used upon resume from suspend. | ||
311 | */ | ||
312 | void load_ucode_amd_ap(void) | ||
313 | { | ||
314 | struct microcode_amd *mc; | ||
315 | size_t *usize; | ||
316 | void **ucode; | ||
317 | |||
318 | mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); | ||
319 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { | ||
320 | __apply_microcode_amd(mc); | ||
321 | return; | ||
322 | } | ||
323 | |||
324 | ucode = (void *)__pa_nodebug(&container); | ||
325 | usize = (size_t *)__pa_nodebug(&container_size); | ||
326 | |||
327 | if (!*ucode || !*usize) | ||
328 | return; | ||
329 | |||
330 | apply_ucode_in_initrd(*ucode, *usize, false); | ||
331 | } | ||
332 | |||
333 | static void __init collect_cpu_sig_on_bsp(void *arg) | ||
334 | { | ||
335 | unsigned int cpu = smp_processor_id(); | ||
336 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
337 | |||
338 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | ||
339 | } | ||
340 | |||
341 | static void __init get_bsp_sig(void) | ||
342 | { | ||
343 | unsigned int bsp = boot_cpu_data.cpu_index; | ||
344 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | ||
345 | |||
346 | if (!uci->cpu_sig.sig) | ||
347 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); | ||
348 | } | ||
349 | #else | ||
350 | void load_ucode_amd_ap(void) | ||
351 | { | ||
352 | unsigned int cpu = smp_processor_id(); | ||
353 | struct equiv_cpu_entry *eq; | ||
354 | struct microcode_amd *mc; | ||
355 | u32 rev, eax; | ||
356 | u16 eq_id; | ||
357 | |||
358 | /* Exit if called on the BSP. */ | ||
359 | if (!cpu) | ||
360 | return; | ||
361 | |||
362 | if (!container) | ||
363 | return; | ||
364 | |||
365 | /* | ||
366 | * 64-bit runs with paging enabled, thus early==false. | ||
367 | */ | ||
368 | if (check_current_patch_level(&rev, false)) | ||
369 | return; | ||
370 | |||
371 | eax = cpuid_eax(0x00000001); | ||
372 | eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); | ||
373 | |||
374 | eq_id = find_equiv_id(eq, eax); | ||
375 | if (!eq_id) | ||
376 | return; | ||
377 | |||
378 | if (eq_id == this_equiv_id) { | ||
379 | mc = (struct microcode_amd *)amd_ucode_patch; | ||
380 | |||
381 | if (mc && rev < mc->hdr.patch_id) { | ||
382 | if (!__apply_microcode_amd(mc)) | ||
383 | ucode_new_rev = mc->hdr.patch_id; | ||
384 | } | ||
385 | |||
386 | } else { | ||
387 | if (!ucode_cpio.data) | ||
388 | return; | ||
389 | |||
390 | /* | ||
391 | * AP has a different equivalence ID than BSP, looks like | ||
392 | * mixed-steppings silicon so go through the ucode blob anew. | ||
393 | */ | ||
394 | apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); | ||
395 | } | ||
396 | } | ||
397 | #endif | ||
398 | |||
399 | int __init save_microcode_in_initrd_amd(void) | ||
400 | { | ||
401 | unsigned long cont; | ||
402 | int retval = 0; | ||
403 | enum ucode_state ret; | ||
404 | u8 *cont_va; | ||
405 | u32 eax; | ||
406 | |||
407 | if (!container) | ||
408 | return -EINVAL; | ||
409 | |||
410 | #ifdef CONFIG_X86_32 | ||
411 | get_bsp_sig(); | ||
412 | cont = (unsigned long)container; | ||
413 | cont_va = __va(container); | ||
414 | #else | ||
415 | /* | ||
416 | * We need the physical address of the container for both bitness since | ||
417 | * boot_params.hdr.ramdisk_image is a physical address. | ||
418 | */ | ||
419 | cont = __pa(container); | ||
420 | cont_va = container; | ||
421 | #endif | ||
422 | |||
423 | /* | ||
424 | * Take into account the fact that the ramdisk might get relocated and | ||
425 | * therefore we need to recompute the container's position in virtual | ||
426 | * memory space. | ||
427 | */ | ||
428 | if (relocated_ramdisk) | ||
429 | container = (u8 *)(__va(relocated_ramdisk) + | ||
430 | (cont - boot_params.hdr.ramdisk_image)); | ||
431 | else | ||
432 | container = cont_va; | ||
433 | |||
434 | if (ucode_new_rev) | ||
435 | pr_info("microcode: updated early to new patch_level=0x%08x\n", | ||
436 | ucode_new_rev); | ||
437 | |||
438 | eax = cpuid_eax(0x00000001); | ||
439 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
440 | |||
441 | ret = load_microcode_amd(smp_processor_id(), eax, container, container_size); | ||
442 | if (ret != UCODE_OK) | ||
443 | retval = -EINVAL; | ||
444 | |||
445 | /* | ||
446 | * This will be freed any msec now, stash patches for the current | ||
447 | * family and switch to patch cache for cpu hotplug, etc later. | ||
448 | */ | ||
449 | container = NULL; | ||
450 | container_size = 0; | ||
451 | |||
452 | return retval; | ||
453 | } | ||
454 | |||
455 | void reload_ucode_amd(void) | ||
456 | { | ||
457 | struct microcode_amd *mc; | ||
458 | u32 rev; | ||
459 | |||
460 | /* | ||
461 | * early==false because this is a syscore ->resume path and by | ||
462 | * that time paging is long enabled. | ||
463 | */ | ||
464 | if (check_current_patch_level(&rev, false)) | ||
465 | return; | ||
466 | |||
467 | mc = (struct microcode_amd *)amd_ucode_patch; | ||
468 | |||
469 | if (mc && rev < mc->hdr.patch_id) { | ||
470 | if (!__apply_microcode_amd(mc)) { | ||
471 | ucode_new_rev = mc->hdr.patch_id; | ||
472 | pr_info("microcode: reload patch_level=0x%08x\n", | ||
473 | ucode_new_rev); | ||
474 | } | ||
475 | } | ||
476 | } | ||
50 | static u16 __find_equiv_id(unsigned int cpu) | 477 | static u16 __find_equiv_id(unsigned int cpu) |
51 | { | 478 | { |
52 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 479 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
@@ -177,6 +604,53 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size, | |||
177 | return patch_size; | 604 | return patch_size; |
178 | } | 605 | } |
179 | 606 | ||
607 | /* | ||
608 | * Those patch levels cannot be updated to newer ones and thus should be final. | ||
609 | */ | ||
610 | static u32 final_levels[] = { | ||
611 | 0x01000098, | ||
612 | 0x0100009f, | ||
613 | 0x010000af, | ||
614 | 0, /* T-101 terminator */ | ||
615 | }; | ||
616 | |||
617 | /* | ||
618 | * Check the current patch level on this CPU. | ||
619 | * | ||
620 | * @rev: Use it to return the patch level. It is set to 0 in the case of | ||
621 | * error. | ||
622 | * | ||
623 | * Returns: | ||
624 | * - true: if update should stop | ||
625 | * - false: otherwise | ||
626 | */ | ||
627 | bool check_current_patch_level(u32 *rev, bool early) | ||
628 | { | ||
629 | u32 lvl, dummy, i; | ||
630 | bool ret = false; | ||
631 | u32 *levels; | ||
632 | |||
633 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); | ||
634 | |||
635 | if (IS_ENABLED(CONFIG_X86_32) && early) | ||
636 | levels = (u32 *)__pa_nodebug(&final_levels); | ||
637 | else | ||
638 | levels = final_levels; | ||
639 | |||
640 | for (i = 0; levels[i]; i++) { | ||
641 | if (lvl == levels[i]) { | ||
642 | lvl = 0; | ||
643 | ret = true; | ||
644 | break; | ||
645 | } | ||
646 | } | ||
647 | |||
648 | if (rev) | ||
649 | *rev = lvl; | ||
650 | |||
651 | return ret; | ||
652 | } | ||
653 | |||
180 | int __apply_microcode_amd(struct microcode_amd *mc_amd) | 654 | int __apply_microcode_amd(struct microcode_amd *mc_amd) |
181 | { | 655 | { |
182 | u32 rev, dummy; | 656 | u32 rev, dummy; |
@@ -197,7 +671,7 @@ int apply_microcode_amd(int cpu) | |||
197 | struct microcode_amd *mc_amd; | 671 | struct microcode_amd *mc_amd; |
198 | struct ucode_cpu_info *uci; | 672 | struct ucode_cpu_info *uci; |
199 | struct ucode_patch *p; | 673 | struct ucode_patch *p; |
200 | u32 rev, dummy; | 674 | u32 rev; |
201 | 675 | ||
202 | BUG_ON(raw_smp_processor_id() != cpu); | 676 | BUG_ON(raw_smp_processor_id() != cpu); |
203 | 677 | ||
@@ -210,7 +684,8 @@ int apply_microcode_amd(int cpu) | |||
210 | mc_amd = p->data; | 684 | mc_amd = p->data; |
211 | uci->mc = p->data; | 685 | uci->mc = p->data; |
212 | 686 | ||
213 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); | 687 | if (check_current_patch_level(&rev, false)) |
688 | return -1; | ||
214 | 689 | ||
215 | /* need to apply patch? */ | 690 | /* need to apply patch? */ |
216 | if (rev >= mc_amd->hdr.patch_id) { | 691 | if (rev >= mc_amd->hdr.patch_id) { |
@@ -387,7 +862,7 @@ enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t s | |||
387 | if (ret != UCODE_OK) | 862 | if (ret != UCODE_OK) |
388 | cleanup(); | 863 | cleanup(); |
389 | 864 | ||
390 | #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) | 865 | #ifdef CONFIG_X86_32 |
391 | /* save BSP's matching patch for early load */ | 866 | /* save BSP's matching patch for early load */ |
392 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { | 867 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { |
393 | struct ucode_patch *p = find_patch(cpu); | 868 | struct ucode_patch *p = find_patch(cpu); |
@@ -475,7 +950,7 @@ static struct microcode_ops microcode_amd_ops = { | |||
475 | 950 | ||
476 | struct microcode_ops * __init init_amd_microcode(void) | 951 | struct microcode_ops * __init init_amd_microcode(void) |
477 | { | 952 | { |
478 | struct cpuinfo_x86 *c = &cpu_data(0); | 953 | struct cpuinfo_x86 *c = &boot_cpu_data; |
479 | 954 | ||
480 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | 955 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { |
481 | pr_warning("AMD CPU family 0x%x not supported\n", c->x86); | 956 | pr_warning("AMD CPU family 0x%x not supported\n", c->x86); |
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c deleted file mode 100644 index e8a215a9a345..000000000000 --- a/arch/x86/kernel/cpu/microcode/amd_early.c +++ /dev/null | |||
@@ -1,440 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Jacob Shin <jacob.shin@amd.com> | ||
5 | * Fixes: Borislav Petkov <bp@suse.de> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/earlycpio.h> | ||
13 | #include <linux/initrd.h> | ||
14 | |||
15 | #include <asm/cpu.h> | ||
16 | #include <asm/setup.h> | ||
17 | #include <asm/microcode_amd.h> | ||
18 | |||
19 | /* | ||
20 | * This points to the current valid container of microcode patches which we will | ||
21 | * save from the initrd before jettisoning its contents. | ||
22 | */ | ||
23 | static u8 *container; | ||
24 | static size_t container_size; | ||
25 | |||
26 | static u32 ucode_new_rev; | ||
27 | u8 amd_ucode_patch[PATCH_MAX_SIZE]; | ||
28 | static u16 this_equiv_id; | ||
29 | |||
30 | static struct cpio_data ucode_cpio; | ||
31 | |||
32 | /* | ||
33 | * Microcode patch container file is prepended to the initrd in cpio format. | ||
34 | * See Documentation/x86/early-microcode.txt | ||
35 | */ | ||
36 | static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin"; | ||
37 | |||
38 | static struct cpio_data __init find_ucode_in_initrd(void) | ||
39 | { | ||
40 | long offset = 0; | ||
41 | char *path; | ||
42 | void *start; | ||
43 | size_t size; | ||
44 | |||
45 | #ifdef CONFIG_X86_32 | ||
46 | struct boot_params *p; | ||
47 | |||
48 | /* | ||
49 | * On 32-bit, early load occurs before paging is turned on so we need | ||
50 | * to use physical addresses. | ||
51 | */ | ||
52 | p = (struct boot_params *)__pa_nodebug(&boot_params); | ||
53 | path = (char *)__pa_nodebug(ucode_path); | ||
54 | start = (void *)p->hdr.ramdisk_image; | ||
55 | size = p->hdr.ramdisk_size; | ||
56 | #else | ||
57 | path = ucode_path; | ||
58 | start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); | ||
59 | size = boot_params.hdr.ramdisk_size; | ||
60 | #endif | ||
61 | |||
62 | return find_cpio_data(path, start, size, &offset); | ||
63 | } | ||
64 | |||
65 | static size_t compute_container_size(u8 *data, u32 total_size) | ||
66 | { | ||
67 | size_t size = 0; | ||
68 | u32 *header = (u32 *)data; | ||
69 | |||
70 | if (header[0] != UCODE_MAGIC || | ||
71 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | ||
72 | header[2] == 0) /* size */ | ||
73 | return size; | ||
74 | |||
75 | size = header[2] + CONTAINER_HDR_SZ; | ||
76 | total_size -= size; | ||
77 | data += size; | ||
78 | |||
79 | while (total_size) { | ||
80 | u16 patch_size; | ||
81 | |||
82 | header = (u32 *)data; | ||
83 | |||
84 | if (header[0] != UCODE_UCODE_TYPE) | ||
85 | break; | ||
86 | |||
87 | /* | ||
88 | * Sanity-check patch size. | ||
89 | */ | ||
90 | patch_size = header[1]; | ||
91 | if (patch_size > PATCH_MAX_SIZE) | ||
92 | break; | ||
93 | |||
94 | size += patch_size + SECTION_HDR_SIZE; | ||
95 | data += patch_size + SECTION_HDR_SIZE; | ||
96 | total_size -= patch_size + SECTION_HDR_SIZE; | ||
97 | } | ||
98 | |||
99 | return size; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Early load occurs before we can vmalloc(). So we look for the microcode | ||
104 | * patch container file in initrd, traverse equivalent cpu table, look for a | ||
105 | * matching microcode patch, and update, all in initrd memory in place. | ||
106 | * When vmalloc() is available for use later -- on 64-bit during first AP load, | ||
107 | * and on 32-bit during save_microcode_in_initrd_amd() -- we can call | ||
108 | * load_microcode_amd() to save equivalent cpu table and microcode patches in | ||
109 | * kernel heap memory. | ||
110 | */ | ||
111 | static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) | ||
112 | { | ||
113 | struct equiv_cpu_entry *eq; | ||
114 | size_t *cont_sz; | ||
115 | u32 *header; | ||
116 | u8 *data, **cont; | ||
117 | u8 (*patch)[PATCH_MAX_SIZE]; | ||
118 | u16 eq_id = 0; | ||
119 | int offset, left; | ||
120 | u32 rev, eax, ebx, ecx, edx; | ||
121 | u32 *new_rev; | ||
122 | |||
123 | #ifdef CONFIG_X86_32 | ||
124 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); | ||
125 | cont_sz = (size_t *)__pa_nodebug(&container_size); | ||
126 | cont = (u8 **)__pa_nodebug(&container); | ||
127 | patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); | ||
128 | #else | ||
129 | new_rev = &ucode_new_rev; | ||
130 | cont_sz = &container_size; | ||
131 | cont = &container; | ||
132 | patch = &amd_ucode_patch; | ||
133 | #endif | ||
134 | |||
135 | data = ucode; | ||
136 | left = size; | ||
137 | header = (u32 *)data; | ||
138 | |||
139 | /* find equiv cpu table */ | ||
140 | if (header[0] != UCODE_MAGIC || | ||
141 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | ||
142 | header[2] == 0) /* size */ | ||
143 | return; | ||
144 | |||
145 | eax = 0x00000001; | ||
146 | ecx = 0; | ||
147 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
148 | |||
149 | while (left > 0) { | ||
150 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); | ||
151 | |||
152 | *cont = data; | ||
153 | |||
154 | /* Advance past the container header */ | ||
155 | offset = header[2] + CONTAINER_HDR_SZ; | ||
156 | data += offset; | ||
157 | left -= offset; | ||
158 | |||
159 | eq_id = find_equiv_id(eq, eax); | ||
160 | if (eq_id) { | ||
161 | this_equiv_id = eq_id; | ||
162 | *cont_sz = compute_container_size(*cont, left + offset); | ||
163 | |||
164 | /* | ||
165 | * truncate how much we need to iterate over in the | ||
166 | * ucode update loop below | ||
167 | */ | ||
168 | left = *cont_sz - offset; | ||
169 | break; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * support multiple container files appended together. if this | ||
174 | * one does not have a matching equivalent cpu entry, we fast | ||
175 | * forward to the next container file. | ||
176 | */ | ||
177 | while (left > 0) { | ||
178 | header = (u32 *)data; | ||
179 | if (header[0] == UCODE_MAGIC && | ||
180 | header[1] == UCODE_EQUIV_CPU_TABLE_TYPE) | ||
181 | break; | ||
182 | |||
183 | offset = header[1] + SECTION_HDR_SIZE; | ||
184 | data += offset; | ||
185 | left -= offset; | ||
186 | } | ||
187 | |||
188 | /* mark where the next microcode container file starts */ | ||
189 | offset = data - (u8 *)ucode; | ||
190 | ucode = data; | ||
191 | } | ||
192 | |||
193 | if (!eq_id) { | ||
194 | *cont = NULL; | ||
195 | *cont_sz = 0; | ||
196 | return; | ||
197 | } | ||
198 | |||
199 | /* find ucode and update if needed */ | ||
200 | |||
201 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | ||
202 | |||
203 | while (left > 0) { | ||
204 | struct microcode_amd *mc; | ||
205 | |||
206 | header = (u32 *)data; | ||
207 | if (header[0] != UCODE_UCODE_TYPE || /* type */ | ||
208 | header[1] == 0) /* size */ | ||
209 | break; | ||
210 | |||
211 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); | ||
212 | |||
213 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { | ||
214 | |||
215 | if (!__apply_microcode_amd(mc)) { | ||
216 | rev = mc->hdr.patch_id; | ||
217 | *new_rev = rev; | ||
218 | |||
219 | if (save_patch) | ||
220 | memcpy(patch, mc, | ||
221 | min_t(u32, header[1], PATCH_MAX_SIZE)); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | offset = header[1] + SECTION_HDR_SIZE; | ||
226 | data += offset; | ||
227 | left -= offset; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | static bool __init load_builtin_amd_microcode(struct cpio_data *cp, | ||
232 | unsigned int family) | ||
233 | { | ||
234 | #ifdef CONFIG_X86_64 | ||
235 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; | ||
236 | |||
237 | if (family >= 0x15) | ||
238 | snprintf(fw_name, sizeof(fw_name), | ||
239 | "amd-ucode/microcode_amd_fam%.2xh.bin", family); | ||
240 | |||
241 | return get_builtin_firmware(cp, fw_name); | ||
242 | #else | ||
243 | return false; | ||
244 | #endif | ||
245 | } | ||
246 | |||
247 | void __init load_ucode_amd_bsp(unsigned int family) | ||
248 | { | ||
249 | struct cpio_data cp; | ||
250 | void **data; | ||
251 | size_t *size; | ||
252 | |||
253 | #ifdef CONFIG_X86_32 | ||
254 | data = (void **)__pa_nodebug(&ucode_cpio.data); | ||
255 | size = (size_t *)__pa_nodebug(&ucode_cpio.size); | ||
256 | #else | ||
257 | data = &ucode_cpio.data; | ||
258 | size = &ucode_cpio.size; | ||
259 | #endif | ||
260 | |||
261 | cp = find_ucode_in_initrd(); | ||
262 | if (!cp.data) { | ||
263 | if (!load_builtin_amd_microcode(&cp, family)) | ||
264 | return; | ||
265 | } | ||
266 | |||
267 | *data = cp.data; | ||
268 | *size = cp.size; | ||
269 | |||
270 | apply_ucode_in_initrd(cp.data, cp.size, true); | ||
271 | } | ||
272 | |||
273 | #ifdef CONFIG_X86_32 | ||
274 | /* | ||
275 | * On 32-bit, since AP's early load occurs before paging is turned on, we | ||
276 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during | ||
277 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During | ||
278 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, | ||
279 | * which is used upon resume from suspend. | ||
280 | */ | ||
281 | void load_ucode_amd_ap(void) | ||
282 | { | ||
283 | struct microcode_amd *mc; | ||
284 | size_t *usize; | ||
285 | void **ucode; | ||
286 | |||
287 | mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); | ||
288 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { | ||
289 | __apply_microcode_amd(mc); | ||
290 | return; | ||
291 | } | ||
292 | |||
293 | ucode = (void *)__pa_nodebug(&container); | ||
294 | usize = (size_t *)__pa_nodebug(&container_size); | ||
295 | |||
296 | if (!*ucode || !*usize) | ||
297 | return; | ||
298 | |||
299 | apply_ucode_in_initrd(*ucode, *usize, false); | ||
300 | } | ||
301 | |||
302 | static void __init collect_cpu_sig_on_bsp(void *arg) | ||
303 | { | ||
304 | unsigned int cpu = smp_processor_id(); | ||
305 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
306 | |||
307 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | ||
308 | } | ||
309 | |||
310 | static void __init get_bsp_sig(void) | ||
311 | { | ||
312 | unsigned int bsp = boot_cpu_data.cpu_index; | ||
313 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | ||
314 | |||
315 | if (!uci->cpu_sig.sig) | ||
316 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); | ||
317 | } | ||
318 | #else | ||
319 | void load_ucode_amd_ap(void) | ||
320 | { | ||
321 | unsigned int cpu = smp_processor_id(); | ||
322 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
323 | struct equiv_cpu_entry *eq; | ||
324 | struct microcode_amd *mc; | ||
325 | u32 rev, eax; | ||
326 | u16 eq_id; | ||
327 | |||
328 | /* Exit if called on the BSP. */ | ||
329 | if (!cpu) | ||
330 | return; | ||
331 | |||
332 | if (!container) | ||
333 | return; | ||
334 | |||
335 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | ||
336 | |||
337 | uci->cpu_sig.rev = rev; | ||
338 | uci->cpu_sig.sig = eax; | ||
339 | |||
340 | eax = cpuid_eax(0x00000001); | ||
341 | eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); | ||
342 | |||
343 | eq_id = find_equiv_id(eq, eax); | ||
344 | if (!eq_id) | ||
345 | return; | ||
346 | |||
347 | if (eq_id == this_equiv_id) { | ||
348 | mc = (struct microcode_amd *)amd_ucode_patch; | ||
349 | |||
350 | if (mc && rev < mc->hdr.patch_id) { | ||
351 | if (!__apply_microcode_amd(mc)) | ||
352 | ucode_new_rev = mc->hdr.patch_id; | ||
353 | } | ||
354 | |||
355 | } else { | ||
356 | if (!ucode_cpio.data) | ||
357 | return; | ||
358 | |||
359 | /* | ||
360 | * AP has a different equivalence ID than BSP, looks like | ||
361 | * mixed-steppings silicon so go through the ucode blob anew. | ||
362 | */ | ||
363 | apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); | ||
364 | } | ||
365 | } | ||
366 | #endif | ||
367 | |||
368 | int __init save_microcode_in_initrd_amd(void) | ||
369 | { | ||
370 | unsigned long cont; | ||
371 | int retval = 0; | ||
372 | enum ucode_state ret; | ||
373 | u8 *cont_va; | ||
374 | u32 eax; | ||
375 | |||
376 | if (!container) | ||
377 | return -EINVAL; | ||
378 | |||
379 | #ifdef CONFIG_X86_32 | ||
380 | get_bsp_sig(); | ||
381 | cont = (unsigned long)container; | ||
382 | cont_va = __va(container); | ||
383 | #else | ||
384 | /* | ||
385 | * We need the physical address of the container for both bitness since | ||
386 | * boot_params.hdr.ramdisk_image is a physical address. | ||
387 | */ | ||
388 | cont = __pa(container); | ||
389 | cont_va = container; | ||
390 | #endif | ||
391 | |||
392 | /* | ||
393 | * Take into account the fact that the ramdisk might get relocated and | ||
394 | * therefore we need to recompute the container's position in virtual | ||
395 | * memory space. | ||
396 | */ | ||
397 | if (relocated_ramdisk) | ||
398 | container = (u8 *)(__va(relocated_ramdisk) + | ||
399 | (cont - boot_params.hdr.ramdisk_image)); | ||
400 | else | ||
401 | container = cont_va; | ||
402 | |||
403 | if (ucode_new_rev) | ||
404 | pr_info("microcode: updated early to new patch_level=0x%08x\n", | ||
405 | ucode_new_rev); | ||
406 | |||
407 | eax = cpuid_eax(0x00000001); | ||
408 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
409 | |||
410 | ret = load_microcode_amd(smp_processor_id(), eax, container, container_size); | ||
411 | if (ret != UCODE_OK) | ||
412 | retval = -EINVAL; | ||
413 | |||
414 | /* | ||
415 | * This will be freed any msec now, stash patches for the current | ||
416 | * family and switch to patch cache for cpu hotplug, etc later. | ||
417 | */ | ||
418 | container = NULL; | ||
419 | container_size = 0; | ||
420 | |||
421 | return retval; | ||
422 | } | ||
423 | |||
424 | void reload_ucode_amd(void) | ||
425 | { | ||
426 | struct microcode_amd *mc; | ||
427 | u32 rev, eax; | ||
428 | |||
429 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | ||
430 | |||
431 | mc = (struct microcode_amd *)amd_ucode_patch; | ||
432 | |||
433 | if (mc && rev < mc->hdr.patch_id) { | ||
434 | if (!__apply_microcode_amd(mc)) { | ||
435 | ucode_new_rev = mc->hdr.patch_id; | ||
436 | pr_info("microcode: reload patch_level=0x%08x\n", | ||
437 | ucode_new_rev); | ||
438 | } | ||
439 | } | ||
440 | } | ||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 9e3f3c7dd5d7..7fc27f1cca58 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -5,6 +5,12 @@ | |||
5 | * 2006 Shaohua Li <shaohua.li@intel.com> | 5 | * 2006 Shaohua Li <shaohua.li@intel.com> |
6 | * 2013-2015 Borislav Petkov <bp@alien8.de> | 6 | * 2013-2015 Borislav Petkov <bp@alien8.de> |
7 | * | 7 | * |
8 | * X86 CPU microcode early update for Linux: | ||
9 | * | ||
10 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | ||
11 | * H Peter Anvin" <hpa@zytor.com> | ||
12 | * (C) 2015 Borislav Petkov <bp@alien8.de> | ||
13 | * | ||
8 | * This driver allows to upgrade microcode on x86 processors. | 14 | * This driver allows to upgrade microcode on x86 processors. |
9 | * | 15 | * |
10 | * This program is free software; you can redistribute it and/or | 16 | * This program is free software; you can redistribute it and/or |
@@ -13,34 +19,39 @@ | |||
13 | * 2 of the License, or (at your option) any later version. | 19 | * 2 of the License, or (at your option) any later version. |
14 | */ | 20 | */ |
15 | 21 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 22 | #define pr_fmt(fmt) "microcode: " fmt |
17 | 23 | ||
18 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/syscore_ops.h> | ||
19 | #include <linux/miscdevice.h> | 26 | #include <linux/miscdevice.h> |
20 | #include <linux/capability.h> | 27 | #include <linux/capability.h> |
28 | #include <linux/firmware.h> | ||
21 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
22 | #include <linux/module.h> | ||
23 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
24 | #include <linux/cpu.h> | 31 | #include <linux/cpu.h> |
25 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
26 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
27 | #include <linux/syscore_ops.h> | ||
28 | 34 | ||
29 | #include <asm/microcode.h> | 35 | #include <asm/microcode_intel.h> |
30 | #include <asm/processor.h> | ||
31 | #include <asm/cpu_device_id.h> | 36 | #include <asm/cpu_device_id.h> |
37 | #include <asm/microcode_amd.h> | ||
32 | #include <asm/perf_event.h> | 38 | #include <asm/perf_event.h> |
39 | #include <asm/microcode.h> | ||
40 | #include <asm/processor.h> | ||
41 | #include <asm/cmdline.h> | ||
33 | 42 | ||
34 | MODULE_DESCRIPTION("Microcode Update Driver"); | 43 | #define MICROCODE_VERSION "2.01" |
35 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); | ||
36 | MODULE_LICENSE("GPL"); | ||
37 | |||
38 | #define MICROCODE_VERSION "2.00" | ||
39 | 44 | ||
40 | static struct microcode_ops *microcode_ops; | 45 | static struct microcode_ops *microcode_ops; |
41 | 46 | ||
42 | bool dis_ucode_ldr; | 47 | static bool dis_ucode_ldr; |
43 | module_param(dis_ucode_ldr, bool, 0); | 48 | |
49 | static int __init disable_loader(char *str) | ||
50 | { | ||
51 | dis_ucode_ldr = true; | ||
52 | return 1; | ||
53 | } | ||
54 | __setup("dis_ucode_ldr", disable_loader); | ||
44 | 55 | ||
45 | /* | 56 | /* |
46 | * Synchronization. | 57 | * Synchronization. |
@@ -68,6 +79,150 @@ struct cpu_info_ctx { | |||
68 | int err; | 79 | int err; |
69 | }; | 80 | }; |
70 | 81 | ||
82 | static bool __init check_loader_disabled_bsp(void) | ||
83 | { | ||
84 | #ifdef CONFIG_X86_32 | ||
85 | const char *cmdline = (const char *)__pa_nodebug(boot_command_line); | ||
86 | const char *opt = "dis_ucode_ldr"; | ||
87 | const char *option = (const char *)__pa_nodebug(opt); | ||
88 | bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); | ||
89 | |||
90 | #else /* CONFIG_X86_64 */ | ||
91 | const char *cmdline = boot_command_line; | ||
92 | const char *option = "dis_ucode_ldr"; | ||
93 | bool *res = &dis_ucode_ldr; | ||
94 | #endif | ||
95 | |||
96 | if (cmdline_find_option_bool(cmdline, option)) | ||
97 | *res = true; | ||
98 | |||
99 | return *res; | ||
100 | } | ||
101 | |||
102 | extern struct builtin_fw __start_builtin_fw[]; | ||
103 | extern struct builtin_fw __end_builtin_fw[]; | ||
104 | |||
105 | bool get_builtin_firmware(struct cpio_data *cd, const char *name) | ||
106 | { | ||
107 | #ifdef CONFIG_FW_LOADER | ||
108 | struct builtin_fw *b_fw; | ||
109 | |||
110 | for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { | ||
111 | if (!strcmp(name, b_fw->name)) { | ||
112 | cd->size = b_fw->size; | ||
113 | cd->data = b_fw->data; | ||
114 | return true; | ||
115 | } | ||
116 | } | ||
117 | #endif | ||
118 | return false; | ||
119 | } | ||
120 | |||
121 | void __init load_ucode_bsp(void) | ||
122 | { | ||
123 | int vendor; | ||
124 | unsigned int family; | ||
125 | |||
126 | if (check_loader_disabled_bsp()) | ||
127 | return; | ||
128 | |||
129 | if (!have_cpuid_p()) | ||
130 | return; | ||
131 | |||
132 | vendor = x86_vendor(); | ||
133 | family = x86_family(); | ||
134 | |||
135 | switch (vendor) { | ||
136 | case X86_VENDOR_INTEL: | ||
137 | if (family >= 6) | ||
138 | load_ucode_intel_bsp(); | ||
139 | break; | ||
140 | case X86_VENDOR_AMD: | ||
141 | if (family >= 0x10) | ||
142 | load_ucode_amd_bsp(family); | ||
143 | break; | ||
144 | default: | ||
145 | break; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | static bool check_loader_disabled_ap(void) | ||
150 | { | ||
151 | #ifdef CONFIG_X86_32 | ||
152 | return *((bool *)__pa_nodebug(&dis_ucode_ldr)); | ||
153 | #else | ||
154 | return dis_ucode_ldr; | ||
155 | #endif | ||
156 | } | ||
157 | |||
158 | void load_ucode_ap(void) | ||
159 | { | ||
160 | int vendor, family; | ||
161 | |||
162 | if (check_loader_disabled_ap()) | ||
163 | return; | ||
164 | |||
165 | if (!have_cpuid_p()) | ||
166 | return; | ||
167 | |||
168 | vendor = x86_vendor(); | ||
169 | family = x86_family(); | ||
170 | |||
171 | switch (vendor) { | ||
172 | case X86_VENDOR_INTEL: | ||
173 | if (family >= 6) | ||
174 | load_ucode_intel_ap(); | ||
175 | break; | ||
176 | case X86_VENDOR_AMD: | ||
177 | if (family >= 0x10) | ||
178 | load_ucode_amd_ap(); | ||
179 | break; | ||
180 | default: | ||
181 | break; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | int __init save_microcode_in_initrd(void) | ||
186 | { | ||
187 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
188 | |||
189 | switch (c->x86_vendor) { | ||
190 | case X86_VENDOR_INTEL: | ||
191 | if (c->x86 >= 6) | ||
192 | save_microcode_in_initrd_intel(); | ||
193 | break; | ||
194 | case X86_VENDOR_AMD: | ||
195 | if (c->x86 >= 0x10) | ||
196 | save_microcode_in_initrd_amd(); | ||
197 | break; | ||
198 | default: | ||
199 | break; | ||
200 | } | ||
201 | |||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | void reload_early_microcode(void) | ||
206 | { | ||
207 | int vendor, family; | ||
208 | |||
209 | vendor = x86_vendor(); | ||
210 | family = x86_family(); | ||
211 | |||
212 | switch (vendor) { | ||
213 | case X86_VENDOR_INTEL: | ||
214 | if (family >= 6) | ||
215 | reload_ucode_intel(); | ||
216 | break; | ||
217 | case X86_VENDOR_AMD: | ||
218 | if (family >= 0x10) | ||
219 | reload_ucode_amd(); | ||
220 | break; | ||
221 | default: | ||
222 | break; | ||
223 | } | ||
224 | } | ||
225 | |||
71 | static void collect_cpu_info_local(void *arg) | 226 | static void collect_cpu_info_local(void *arg) |
72 | { | 227 | { |
73 | struct cpu_info_ctx *ctx = arg; | 228 | struct cpu_info_ctx *ctx = arg; |
@@ -210,9 +365,6 @@ static void __exit microcode_dev_exit(void) | |||
210 | { | 365 | { |
211 | misc_deregister(µcode_dev); | 366 | misc_deregister(µcode_dev); |
212 | } | 367 | } |
213 | |||
214 | MODULE_ALIAS_MISCDEV(MICROCODE_MINOR); | ||
215 | MODULE_ALIAS("devname:cpu/microcode"); | ||
216 | #else | 368 | #else |
217 | #define microcode_dev_init() 0 | 369 | #define microcode_dev_init() 0 |
218 | #define microcode_dev_exit() do { } while (0) | 370 | #define microcode_dev_exit() do { } while (0) |
@@ -463,20 +615,6 @@ static struct notifier_block mc_cpu_notifier = { | |||
463 | .notifier_call = mc_cpu_callback, | 615 | .notifier_call = mc_cpu_callback, |
464 | }; | 616 | }; |
465 | 617 | ||
466 | #ifdef MODULE | ||
467 | /* Autoload on Intel and AMD systems */ | ||
468 | static const struct x86_cpu_id __initconst microcode_id[] = { | ||
469 | #ifdef CONFIG_MICROCODE_INTEL | ||
470 | { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, }, | ||
471 | #endif | ||
472 | #ifdef CONFIG_MICROCODE_AMD | ||
473 | { X86_VENDOR_AMD, X86_FAMILY_ANY, X86_MODEL_ANY, }, | ||
474 | #endif | ||
475 | {} | ||
476 | }; | ||
477 | MODULE_DEVICE_TABLE(x86cpu, microcode_id); | ||
478 | #endif | ||
479 | |||
480 | static struct attribute *cpu_root_microcode_attrs[] = { | 618 | static struct attribute *cpu_root_microcode_attrs[] = { |
481 | &dev_attr_reload.attr, | 619 | &dev_attr_reload.attr, |
482 | NULL | 620 | NULL |
@@ -487,9 +625,9 @@ static struct attribute_group cpu_root_microcode_group = { | |||
487 | .attrs = cpu_root_microcode_attrs, | 625 | .attrs = cpu_root_microcode_attrs, |
488 | }; | 626 | }; |
489 | 627 | ||
490 | static int __init microcode_init(void) | 628 | int __init microcode_init(void) |
491 | { | 629 | { |
492 | struct cpuinfo_x86 *c = &cpu_data(0); | 630 | struct cpuinfo_x86 *c = &boot_cpu_data; |
493 | int error; | 631 | int error; |
494 | 632 | ||
495 | if (paravirt_enabled() || dis_ucode_ldr) | 633 | if (paravirt_enabled() || dis_ucode_ldr) |
@@ -560,35 +698,3 @@ static int __init microcode_init(void) | |||
560 | return error; | 698 | return error; |
561 | 699 | ||
562 | } | 700 | } |
563 | module_init(microcode_init); | ||
564 | |||
565 | static void __exit microcode_exit(void) | ||
566 | { | ||
567 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
568 | |||
569 | microcode_dev_exit(); | ||
570 | |||
571 | unregister_hotcpu_notifier(&mc_cpu_notifier); | ||
572 | unregister_syscore_ops(&mc_syscore_ops); | ||
573 | |||
574 | sysfs_remove_group(&cpu_subsys.dev_root->kobj, | ||
575 | &cpu_root_microcode_group); | ||
576 | |||
577 | get_online_cpus(); | ||
578 | mutex_lock(µcode_mutex); | ||
579 | |||
580 | subsys_interface_unregister(&mc_cpu_interface); | ||
581 | |||
582 | mutex_unlock(µcode_mutex); | ||
583 | put_online_cpus(); | ||
584 | |||
585 | platform_device_unregister(microcode_pdev); | ||
586 | |||
587 | microcode_ops = NULL; | ||
588 | |||
589 | if (c->x86_vendor == X86_VENDOR_AMD) | ||
590 | exit_amd_microcode(); | ||
591 | |||
592 | pr_info("Microcode Update Driver: v" MICROCODE_VERSION " removed.\n"); | ||
593 | } | ||
594 | module_exit(microcode_exit); | ||
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c deleted file mode 100644 index 8ebc421d6299..000000000000 --- a/arch/x86/kernel/cpu/microcode/core_early.c +++ /dev/null | |||
@@ -1,170 +0,0 @@ | |||
1 | /* | ||
2 | * X86 CPU microcode early update for Linux | ||
3 | * | ||
4 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | ||
5 | * H Peter Anvin" <hpa@zytor.com> | ||
6 | * (C) 2015 Borislav Petkov <bp@alien8.de> | ||
7 | * | ||
8 | * This driver allows to early upgrade microcode on Intel processors | ||
9 | * belonging to IA-32 family - PentiumPro, Pentium II, | ||
10 | * Pentium III, Xeon, Pentium 4, etc. | ||
11 | * | ||
12 | * Reference: Section 9.11 of Volume 3, IA-32 Intel Architecture | ||
13 | * Software Developer's Manual. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | */ | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/firmware.h> | ||
22 | #include <asm/microcode.h> | ||
23 | #include <asm/microcode_intel.h> | ||
24 | #include <asm/microcode_amd.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/cmdline.h> | ||
27 | |||
28 | static bool __init check_loader_disabled_bsp(void) | ||
29 | { | ||
30 | #ifdef CONFIG_X86_32 | ||
31 | const char *cmdline = (const char *)__pa_nodebug(boot_command_line); | ||
32 | const char *opt = "dis_ucode_ldr"; | ||
33 | const char *option = (const char *)__pa_nodebug(opt); | ||
34 | bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); | ||
35 | |||
36 | #else /* CONFIG_X86_64 */ | ||
37 | const char *cmdline = boot_command_line; | ||
38 | const char *option = "dis_ucode_ldr"; | ||
39 | bool *res = &dis_ucode_ldr; | ||
40 | #endif | ||
41 | |||
42 | if (cmdline_find_option_bool(cmdline, option)) | ||
43 | *res = true; | ||
44 | |||
45 | return *res; | ||
46 | } | ||
47 | |||
48 | extern struct builtin_fw __start_builtin_fw[]; | ||
49 | extern struct builtin_fw __end_builtin_fw[]; | ||
50 | |||
51 | bool get_builtin_firmware(struct cpio_data *cd, const char *name) | ||
52 | { | ||
53 | #ifdef CONFIG_FW_LOADER | ||
54 | struct builtin_fw *b_fw; | ||
55 | |||
56 | for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { | ||
57 | if (!strcmp(name, b_fw->name)) { | ||
58 | cd->size = b_fw->size; | ||
59 | cd->data = b_fw->data; | ||
60 | return true; | ||
61 | } | ||
62 | } | ||
63 | #endif | ||
64 | return false; | ||
65 | } | ||
66 | |||
67 | void __init load_ucode_bsp(void) | ||
68 | { | ||
69 | int vendor; | ||
70 | unsigned int family; | ||
71 | |||
72 | if (check_loader_disabled_bsp()) | ||
73 | return; | ||
74 | |||
75 | if (!have_cpuid_p()) | ||
76 | return; | ||
77 | |||
78 | vendor = x86_vendor(); | ||
79 | family = x86_family(); | ||
80 | |||
81 | switch (vendor) { | ||
82 | case X86_VENDOR_INTEL: | ||
83 | if (family >= 6) | ||
84 | load_ucode_intel_bsp(); | ||
85 | break; | ||
86 | case X86_VENDOR_AMD: | ||
87 | if (family >= 0x10) | ||
88 | load_ucode_amd_bsp(family); | ||
89 | break; | ||
90 | default: | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | static bool check_loader_disabled_ap(void) | ||
96 | { | ||
97 | #ifdef CONFIG_X86_32 | ||
98 | return *((bool *)__pa_nodebug(&dis_ucode_ldr)); | ||
99 | #else | ||
100 | return dis_ucode_ldr; | ||
101 | #endif | ||
102 | } | ||
103 | |||
104 | void load_ucode_ap(void) | ||
105 | { | ||
106 | int vendor, family; | ||
107 | |||
108 | if (check_loader_disabled_ap()) | ||
109 | return; | ||
110 | |||
111 | if (!have_cpuid_p()) | ||
112 | return; | ||
113 | |||
114 | vendor = x86_vendor(); | ||
115 | family = x86_family(); | ||
116 | |||
117 | switch (vendor) { | ||
118 | case X86_VENDOR_INTEL: | ||
119 | if (family >= 6) | ||
120 | load_ucode_intel_ap(); | ||
121 | break; | ||
122 | case X86_VENDOR_AMD: | ||
123 | if (family >= 0x10) | ||
124 | load_ucode_amd_ap(); | ||
125 | break; | ||
126 | default: | ||
127 | break; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | int __init save_microcode_in_initrd(void) | ||
132 | { | ||
133 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
134 | |||
135 | switch (c->x86_vendor) { | ||
136 | case X86_VENDOR_INTEL: | ||
137 | if (c->x86 >= 6) | ||
138 | save_microcode_in_initrd_intel(); | ||
139 | break; | ||
140 | case X86_VENDOR_AMD: | ||
141 | if (c->x86 >= 0x10) | ||
142 | save_microcode_in_initrd_amd(); | ||
143 | break; | ||
144 | default: | ||
145 | break; | ||
146 | } | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | void reload_early_microcode(void) | ||
152 | { | ||
153 | int vendor, family; | ||
154 | |||
155 | vendor = x86_vendor(); | ||
156 | family = x86_family(); | ||
157 | |||
158 | switch (vendor) { | ||
159 | case X86_VENDOR_INTEL: | ||
160 | if (family >= 6) | ||
161 | reload_ucode_intel(); | ||
162 | break; | ||
163 | case X86_VENDOR_AMD: | ||
164 | if (family >= 0x10) | ||
165 | reload_ucode_amd(); | ||
166 | break; | ||
167 | default: | ||
168 | break; | ||
169 | } | ||
170 | } | ||
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 969dc17eb1b4..ce47402eb2f9 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -4,27 +4,804 @@ | |||
4 | * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk> | 4 | * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk> |
5 | * 2006 Shaohua Li <shaohua.li@intel.com> | 5 | * 2006 Shaohua Li <shaohua.li@intel.com> |
6 | * | 6 | * |
7 | * Intel CPU microcode early update for Linux | ||
8 | * | ||
9 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | ||
10 | * H Peter Anvin" <hpa@zytor.com> | ||
11 | * | ||
7 | * This program is free software; you can redistribute it and/or | 12 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 13 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version | 14 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. | 15 | * 2 of the License, or (at your option) any later version. |
11 | */ | 16 | */ |
12 | 17 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 18 | /* |
19 | * This needs to be before all headers so that pr_debug in printk.h doesn't turn | ||
20 | * printk calls into no_printk(). | ||
21 | * | ||
22 | *#define DEBUG | ||
23 | */ | ||
24 | #define pr_fmt(fmt) "microcode: " fmt | ||
14 | 25 | ||
26 | #include <linux/earlycpio.h> | ||
15 | #include <linux/firmware.h> | 27 | #include <linux/firmware.h> |
16 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
30 | #include <linux/initrd.h> | ||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/cpu.h> | ||
34 | #include <linux/mm.h> | ||
20 | 35 | ||
21 | #include <asm/microcode_intel.h> | 36 | #include <asm/microcode_intel.h> |
22 | #include <asm/processor.h> | 37 | #include <asm/processor.h> |
38 | #include <asm/tlbflush.h> | ||
39 | #include <asm/setup.h> | ||
23 | #include <asm/msr.h> | 40 | #include <asm/msr.h> |
24 | 41 | ||
25 | MODULE_DESCRIPTION("Microcode Update Driver"); | 42 | static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT]; |
26 | MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>"); | 43 | static struct mc_saved_data { |
27 | MODULE_LICENSE("GPL"); | 44 | unsigned int mc_saved_count; |
45 | struct microcode_intel **mc_saved; | ||
46 | } mc_saved_data; | ||
47 | |||
48 | static enum ucode_state | ||
49 | load_microcode_early(struct microcode_intel **saved, | ||
50 | unsigned int num_saved, struct ucode_cpu_info *uci) | ||
51 | { | ||
52 | struct microcode_intel *ucode_ptr, *new_mc = NULL; | ||
53 | struct microcode_header_intel *mc_hdr; | ||
54 | int new_rev, ret, i; | ||
55 | |||
56 | new_rev = uci->cpu_sig.rev; | ||
57 | |||
58 | for (i = 0; i < num_saved; i++) { | ||
59 | ucode_ptr = saved[i]; | ||
60 | mc_hdr = (struct microcode_header_intel *)ucode_ptr; | ||
61 | |||
62 | ret = has_newer_microcode(ucode_ptr, | ||
63 | uci->cpu_sig.sig, | ||
64 | uci->cpu_sig.pf, | ||
65 | new_rev); | ||
66 | if (!ret) | ||
67 | continue; | ||
68 | |||
69 | new_rev = mc_hdr->rev; | ||
70 | new_mc = ucode_ptr; | ||
71 | } | ||
72 | |||
73 | if (!new_mc) | ||
74 | return UCODE_NFOUND; | ||
75 | |||
76 | uci->mc = (struct microcode_intel *)new_mc; | ||
77 | return UCODE_OK; | ||
78 | } | ||
79 | |||
80 | static inline void | ||
81 | copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd, | ||
82 | unsigned long off, int num_saved) | ||
83 | { | ||
84 | int i; | ||
85 | |||
86 | for (i = 0; i < num_saved; i++) | ||
87 | mc_saved[i] = (struct microcode_intel *)(initrd[i] + off); | ||
88 | } | ||
89 | |||
90 | #ifdef CONFIG_X86_32 | ||
91 | static void | ||
92 | microcode_phys(struct microcode_intel **mc_saved_tmp, | ||
93 | struct mc_saved_data *mc_saved_data) | ||
94 | { | ||
95 | int i; | ||
96 | struct microcode_intel ***mc_saved; | ||
97 | |||
98 | mc_saved = (struct microcode_intel ***) | ||
99 | __pa_nodebug(&mc_saved_data->mc_saved); | ||
100 | for (i = 0; i < mc_saved_data->mc_saved_count; i++) { | ||
101 | struct microcode_intel *p; | ||
102 | |||
103 | p = *(struct microcode_intel **) | ||
104 | __pa_nodebug(mc_saved_data->mc_saved + i); | ||
105 | mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p); | ||
106 | } | ||
107 | } | ||
108 | #endif | ||
109 | |||
110 | static enum ucode_state | ||
111 | load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd, | ||
112 | unsigned long initrd_start, struct ucode_cpu_info *uci) | ||
113 | { | ||
114 | struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; | ||
115 | unsigned int count = mc_saved_data->mc_saved_count; | ||
116 | |||
117 | if (!mc_saved_data->mc_saved) { | ||
118 | copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count); | ||
119 | |||
120 | return load_microcode_early(mc_saved_tmp, count, uci); | ||
121 | } else { | ||
122 | #ifdef CONFIG_X86_32 | ||
123 | microcode_phys(mc_saved_tmp, mc_saved_data); | ||
124 | return load_microcode_early(mc_saved_tmp, count, uci); | ||
125 | #else | ||
126 | return load_microcode_early(mc_saved_data->mc_saved, | ||
127 | count, uci); | ||
128 | #endif | ||
129 | } | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Given CPU signature and a microcode patch, this function finds if the | ||
134 | * microcode patch has matching family and model with the CPU. | ||
135 | */ | ||
136 | static enum ucode_state | ||
137 | matching_model_microcode(struct microcode_header_intel *mc_header, | ||
138 | unsigned long sig) | ||
139 | { | ||
140 | unsigned int fam, model; | ||
141 | unsigned int fam_ucode, model_ucode; | ||
142 | struct extended_sigtable *ext_header; | ||
143 | unsigned long total_size = get_totalsize(mc_header); | ||
144 | unsigned long data_size = get_datasize(mc_header); | ||
145 | int ext_sigcount, i; | ||
146 | struct extended_signature *ext_sig; | ||
147 | |||
148 | fam = __x86_family(sig); | ||
149 | model = x86_model(sig); | ||
150 | |||
151 | fam_ucode = __x86_family(mc_header->sig); | ||
152 | model_ucode = x86_model(mc_header->sig); | ||
153 | |||
154 | if (fam == fam_ucode && model == model_ucode) | ||
155 | return UCODE_OK; | ||
156 | |||
157 | /* Look for ext. headers: */ | ||
158 | if (total_size <= data_size + MC_HEADER_SIZE) | ||
159 | return UCODE_NFOUND; | ||
160 | |||
161 | ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; | ||
162 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | ||
163 | ext_sigcount = ext_header->count; | ||
164 | |||
165 | for (i = 0; i < ext_sigcount; i++) { | ||
166 | fam_ucode = __x86_family(ext_sig->sig); | ||
167 | model_ucode = x86_model(ext_sig->sig); | ||
168 | |||
169 | if (fam == fam_ucode && model == model_ucode) | ||
170 | return UCODE_OK; | ||
171 | |||
172 | ext_sig++; | ||
173 | } | ||
174 | return UCODE_NFOUND; | ||
175 | } | ||
176 | |||
177 | static int | ||
178 | save_microcode(struct mc_saved_data *mc_saved_data, | ||
179 | struct microcode_intel **mc_saved_src, | ||
180 | unsigned int mc_saved_count) | ||
181 | { | ||
182 | int i, j; | ||
183 | struct microcode_intel **saved_ptr; | ||
184 | int ret; | ||
185 | |||
186 | if (!mc_saved_count) | ||
187 | return -EINVAL; | ||
188 | |||
189 | /* | ||
190 | * Copy new microcode data. | ||
191 | */ | ||
192 | saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL); | ||
193 | if (!saved_ptr) | ||
194 | return -ENOMEM; | ||
195 | |||
196 | for (i = 0; i < mc_saved_count; i++) { | ||
197 | struct microcode_header_intel *mc_hdr; | ||
198 | struct microcode_intel *mc; | ||
199 | unsigned long size; | ||
200 | |||
201 | if (!mc_saved_src[i]) { | ||
202 | ret = -EINVAL; | ||
203 | goto err; | ||
204 | } | ||
205 | |||
206 | mc = mc_saved_src[i]; | ||
207 | mc_hdr = &mc->hdr; | ||
208 | size = get_totalsize(mc_hdr); | ||
209 | |||
210 | saved_ptr[i] = kmalloc(size, GFP_KERNEL); | ||
211 | if (!saved_ptr[i]) { | ||
212 | ret = -ENOMEM; | ||
213 | goto err; | ||
214 | } | ||
215 | |||
216 | memcpy(saved_ptr[i], mc, size); | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Point to newly saved microcode. | ||
221 | */ | ||
222 | mc_saved_data->mc_saved = saved_ptr; | ||
223 | mc_saved_data->mc_saved_count = mc_saved_count; | ||
224 | |||
225 | return 0; | ||
226 | |||
227 | err: | ||
228 | for (j = 0; j <= i; j++) | ||
229 | kfree(saved_ptr[j]); | ||
230 | kfree(saved_ptr); | ||
231 | |||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * A microcode patch in ucode_ptr is saved into mc_saved | ||
237 | * - if it has matching signature and newer revision compared to an existing | ||
238 | * patch mc_saved. | ||
239 | * - or if it is a newly discovered microcode patch. | ||
240 | * | ||
241 | * The microcode patch should have matching model with CPU. | ||
242 | * | ||
243 | * Returns: The updated number @num_saved of saved microcode patches. | ||
244 | */ | ||
245 | static unsigned int _save_mc(struct microcode_intel **mc_saved, | ||
246 | u8 *ucode_ptr, unsigned int num_saved) | ||
247 | { | ||
248 | struct microcode_header_intel *mc_hdr, *mc_saved_hdr; | ||
249 | unsigned int sig, pf; | ||
250 | int found = 0, i; | ||
251 | |||
252 | mc_hdr = (struct microcode_header_intel *)ucode_ptr; | ||
253 | |||
254 | for (i = 0; i < num_saved; i++) { | ||
255 | mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i]; | ||
256 | sig = mc_saved_hdr->sig; | ||
257 | pf = mc_saved_hdr->pf; | ||
258 | |||
259 | if (!find_matching_signature(ucode_ptr, sig, pf)) | ||
260 | continue; | ||
261 | |||
262 | found = 1; | ||
263 | |||
264 | if (mc_hdr->rev <= mc_saved_hdr->rev) | ||
265 | continue; | ||
266 | |||
267 | /* | ||
268 | * Found an older ucode saved earlier. Replace it with | ||
269 | * this newer one. | ||
270 | */ | ||
271 | mc_saved[i] = (struct microcode_intel *)ucode_ptr; | ||
272 | break; | ||
273 | } | ||
274 | |||
275 | /* Newly detected microcode, save it to memory. */ | ||
276 | if (i >= num_saved && !found) | ||
277 | mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr; | ||
278 | |||
279 | return num_saved; | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * Get microcode matching with BSP's model. Only CPUs with the same model as | ||
284 | * BSP can stay in the platform. | ||
285 | */ | ||
286 | static enum ucode_state __init | ||
287 | get_matching_model_microcode(int cpu, unsigned long start, | ||
288 | void *data, size_t size, | ||
289 | struct mc_saved_data *mc_saved_data, | ||
290 | unsigned long *mc_saved_in_initrd, | ||
291 | struct ucode_cpu_info *uci) | ||
292 | { | ||
293 | u8 *ucode_ptr = data; | ||
294 | unsigned int leftover = size; | ||
295 | enum ucode_state state = UCODE_OK; | ||
296 | unsigned int mc_size; | ||
297 | struct microcode_header_intel *mc_header; | ||
298 | struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; | ||
299 | unsigned int mc_saved_count = mc_saved_data->mc_saved_count; | ||
300 | int i; | ||
301 | |||
302 | while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { | ||
303 | |||
304 | if (leftover < sizeof(mc_header)) | ||
305 | break; | ||
306 | |||
307 | mc_header = (struct microcode_header_intel *)ucode_ptr; | ||
308 | |||
309 | mc_size = get_totalsize(mc_header); | ||
310 | if (!mc_size || mc_size > leftover || | ||
311 | microcode_sanity_check(ucode_ptr, 0) < 0) | ||
312 | break; | ||
313 | |||
314 | leftover -= mc_size; | ||
315 | |||
316 | /* | ||
317 | * Since APs with same family and model as the BSP may boot in | ||
318 | * the platform, we need to find and save microcode patches | ||
319 | * with the same family and model as the BSP. | ||
320 | */ | ||
321 | if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != | ||
322 | UCODE_OK) { | ||
323 | ucode_ptr += mc_size; | ||
324 | continue; | ||
325 | } | ||
326 | |||
327 | mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count); | ||
328 | |||
329 | ucode_ptr += mc_size; | ||
330 | } | ||
331 | |||
332 | if (leftover) { | ||
333 | state = UCODE_ERROR; | ||
334 | goto out; | ||
335 | } | ||
336 | |||
337 | if (mc_saved_count == 0) { | ||
338 | state = UCODE_NFOUND; | ||
339 | goto out; | ||
340 | } | ||
341 | |||
342 | for (i = 0; i < mc_saved_count; i++) | ||
343 | mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start; | ||
344 | |||
345 | mc_saved_data->mc_saved_count = mc_saved_count; | ||
346 | out: | ||
347 | return state; | ||
348 | } | ||
349 | |||
350 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) | ||
351 | { | ||
352 | unsigned int val[2]; | ||
353 | unsigned int family, model; | ||
354 | struct cpu_signature csig; | ||
355 | unsigned int eax, ebx, ecx, edx; | ||
356 | |||
357 | csig.sig = 0; | ||
358 | csig.pf = 0; | ||
359 | csig.rev = 0; | ||
360 | |||
361 | memset(uci, 0, sizeof(*uci)); | ||
362 | |||
363 | eax = 0x00000001; | ||
364 | ecx = 0; | ||
365 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
366 | csig.sig = eax; | ||
367 | |||
368 | family = __x86_family(csig.sig); | ||
369 | model = x86_model(csig.sig); | ||
370 | |||
371 | if ((model >= 5) || (family > 6)) { | ||
372 | /* get processor flags from MSR 0x17 */ | ||
373 | native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | ||
374 | csig.pf = 1 << ((val[1] >> 18) & 7); | ||
375 | } | ||
376 | native_wrmsr(MSR_IA32_UCODE_REV, 0, 0); | ||
377 | |||
378 | /* As documented in the SDM: Do a CPUID 1 here */ | ||
379 | sync_core(); | ||
380 | |||
381 | /* get the current revision from MSR 0x8B */ | ||
382 | native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); | ||
383 | |||
384 | csig.rev = val[1]; | ||
385 | |||
386 | uci->cpu_sig = csig; | ||
387 | uci->valid = 1; | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static void show_saved_mc(void) | ||
393 | { | ||
394 | #ifdef DEBUG | ||
395 | int i, j; | ||
396 | unsigned int sig, pf, rev, total_size, data_size, date; | ||
397 | struct ucode_cpu_info uci; | ||
398 | |||
399 | if (mc_saved_data.mc_saved_count == 0) { | ||
400 | pr_debug("no microcode data saved.\n"); | ||
401 | return; | ||
402 | } | ||
403 | pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count); | ||
404 | |||
405 | collect_cpu_info_early(&uci); | ||
406 | |||
407 | sig = uci.cpu_sig.sig; | ||
408 | pf = uci.cpu_sig.pf; | ||
409 | rev = uci.cpu_sig.rev; | ||
410 | pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); | ||
411 | |||
412 | for (i = 0; i < mc_saved_data.mc_saved_count; i++) { | ||
413 | struct microcode_header_intel *mc_saved_header; | ||
414 | struct extended_sigtable *ext_header; | ||
415 | int ext_sigcount; | ||
416 | struct extended_signature *ext_sig; | ||
417 | |||
418 | mc_saved_header = (struct microcode_header_intel *) | ||
419 | mc_saved_data.mc_saved[i]; | ||
420 | sig = mc_saved_header->sig; | ||
421 | pf = mc_saved_header->pf; | ||
422 | rev = mc_saved_header->rev; | ||
423 | total_size = get_totalsize(mc_saved_header); | ||
424 | data_size = get_datasize(mc_saved_header); | ||
425 | date = mc_saved_header->date; | ||
426 | |||
427 | pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n", | ||
428 | i, sig, pf, rev, total_size, | ||
429 | date & 0xffff, | ||
430 | date >> 24, | ||
431 | (date >> 16) & 0xff); | ||
432 | |||
433 | /* Look for ext. headers: */ | ||
434 | if (total_size <= data_size + MC_HEADER_SIZE) | ||
435 | continue; | ||
436 | |||
437 | ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE; | ||
438 | ext_sigcount = ext_header->count; | ||
439 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | ||
440 | |||
441 | for (j = 0; j < ext_sigcount; j++) { | ||
442 | sig = ext_sig->sig; | ||
443 | pf = ext_sig->pf; | ||
444 | |||
445 | pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", | ||
446 | j, sig, pf); | ||
447 | |||
448 | ext_sig++; | ||
449 | } | ||
450 | |||
451 | } | ||
452 | #endif | ||
453 | } | ||
454 | |||
455 | #ifdef CONFIG_HOTPLUG_CPU | ||
456 | static DEFINE_MUTEX(x86_cpu_microcode_mutex); | ||
457 | /* | ||
458 | * Save this mc into mc_saved_data. So it will be loaded early when a CPU is | ||
459 | * hot added or resumes. | ||
460 | * | ||
461 | * Please make sure this mc should be a valid microcode patch before calling | ||
462 | * this function. | ||
463 | */ | ||
464 | int save_mc_for_early(u8 *mc) | ||
465 | { | ||
466 | struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; | ||
467 | unsigned int mc_saved_count_init; | ||
468 | unsigned int mc_saved_count; | ||
469 | struct microcode_intel **mc_saved; | ||
470 | int ret = 0; | ||
471 | int i; | ||
472 | |||
473 | /* | ||
474 | * Hold hotplug lock so mc_saved_data is not accessed by a CPU in | ||
475 | * hotplug. | ||
476 | */ | ||
477 | mutex_lock(&x86_cpu_microcode_mutex); | ||
478 | |||
479 | mc_saved_count_init = mc_saved_data.mc_saved_count; | ||
480 | mc_saved_count = mc_saved_data.mc_saved_count; | ||
481 | mc_saved = mc_saved_data.mc_saved; | ||
482 | |||
483 | if (mc_saved && mc_saved_count) | ||
484 | memcpy(mc_saved_tmp, mc_saved, | ||
485 | mc_saved_count * sizeof(struct microcode_intel *)); | ||
486 | /* | ||
487 | * Save the microcode patch mc in mc_save_tmp structure if it's a newer | ||
488 | * version. | ||
489 | */ | ||
490 | mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count); | ||
491 | |||
492 | /* | ||
493 | * Save the mc_save_tmp in global mc_saved_data. | ||
494 | */ | ||
495 | ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count); | ||
496 | if (ret) { | ||
497 | pr_err("Cannot save microcode patch.\n"); | ||
498 | goto out; | ||
499 | } | ||
500 | |||
501 | show_saved_mc(); | ||
502 | |||
503 | /* | ||
504 | * Free old saved microcode data. | ||
505 | */ | ||
506 | if (mc_saved) { | ||
507 | for (i = 0; i < mc_saved_count_init; i++) | ||
508 | kfree(mc_saved[i]); | ||
509 | kfree(mc_saved); | ||
510 | } | ||
511 | |||
512 | out: | ||
513 | mutex_unlock(&x86_cpu_microcode_mutex); | ||
514 | |||
515 | return ret; | ||
516 | } | ||
517 | EXPORT_SYMBOL_GPL(save_mc_for_early); | ||
518 | #endif | ||
519 | |||
520 | static bool __init load_builtin_intel_microcode(struct cpio_data *cp) | ||
521 | { | ||
522 | #ifdef CONFIG_X86_64 | ||
523 | unsigned int eax = 0x00000001, ebx, ecx = 0, edx; | ||
524 | unsigned int family, model, stepping; | ||
525 | char name[30]; | ||
526 | |||
527 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
528 | |||
529 | family = __x86_family(eax); | ||
530 | model = x86_model(eax); | ||
531 | stepping = eax & 0xf; | ||
532 | |||
533 | sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping); | ||
534 | |||
535 | return get_builtin_firmware(cp, name); | ||
536 | #else | ||
537 | return false; | ||
538 | #endif | ||
539 | } | ||
540 | |||
541 | static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin"; | ||
542 | static __init enum ucode_state | ||
543 | scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd, | ||
544 | unsigned long start, unsigned long size, | ||
545 | struct ucode_cpu_info *uci) | ||
546 | { | ||
547 | struct cpio_data cd; | ||
548 | long offset = 0; | ||
549 | #ifdef CONFIG_X86_32 | ||
550 | char *p = (char *)__pa_nodebug(ucode_name); | ||
551 | #else | ||
552 | char *p = ucode_name; | ||
553 | #endif | ||
554 | |||
555 | cd.data = NULL; | ||
556 | cd.size = 0; | ||
557 | |||
558 | cd = find_cpio_data(p, (void *)start, size, &offset); | ||
559 | if (!cd.data) { | ||
560 | if (!load_builtin_intel_microcode(&cd)) | ||
561 | return UCODE_ERROR; | ||
562 | } | ||
563 | |||
564 | return get_matching_model_microcode(0, start, cd.data, cd.size, | ||
565 | mc_saved_data, initrd, uci); | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * Print ucode update info. | ||
570 | */ | ||
571 | static void | ||
572 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) | ||
573 | { | ||
574 | int cpu = smp_processor_id(); | ||
575 | |||
576 | pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", | ||
577 | cpu, | ||
578 | uci->cpu_sig.rev, | ||
579 | date & 0xffff, | ||
580 | date >> 24, | ||
581 | (date >> 16) & 0xff); | ||
582 | } | ||
583 | |||
584 | #ifdef CONFIG_X86_32 | ||
585 | |||
586 | static int delay_ucode_info; | ||
587 | static int current_mc_date; | ||
588 | |||
589 | /* | ||
590 | * Print early updated ucode info after printk works. This is delayed info dump. | ||
591 | */ | ||
592 | void show_ucode_info_early(void) | ||
593 | { | ||
594 | struct ucode_cpu_info uci; | ||
595 | |||
596 | if (delay_ucode_info) { | ||
597 | collect_cpu_info_early(&uci); | ||
598 | print_ucode_info(&uci, current_mc_date); | ||
599 | delay_ucode_info = 0; | ||
600 | } | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * At this point, we can not call printk() yet. Keep microcode patch number in | ||
605 | * mc_saved_data.mc_saved and delay printing microcode info in | ||
606 | * show_ucode_info_early() until printk() works. | ||
607 | */ | ||
608 | static void print_ucode(struct ucode_cpu_info *uci) | ||
609 | { | ||
610 | struct microcode_intel *mc_intel; | ||
611 | int *delay_ucode_info_p; | ||
612 | int *current_mc_date_p; | ||
613 | |||
614 | mc_intel = uci->mc; | ||
615 | if (mc_intel == NULL) | ||
616 | return; | ||
617 | |||
618 | delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); | ||
619 | current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); | ||
620 | |||
621 | *delay_ucode_info_p = 1; | ||
622 | *current_mc_date_p = mc_intel->hdr.date; | ||
623 | } | ||
624 | #else | ||
625 | |||
626 | /* | ||
627 | * Flush global tlb. We only do this in x86_64 where paging has been enabled | ||
628 | * already and PGE should be enabled as well. | ||
629 | */ | ||
630 | static inline void flush_tlb_early(void) | ||
631 | { | ||
632 | __native_flush_tlb_global_irq_disabled(); | ||
633 | } | ||
634 | |||
635 | static inline void print_ucode(struct ucode_cpu_info *uci) | ||
636 | { | ||
637 | struct microcode_intel *mc_intel; | ||
638 | |||
639 | mc_intel = uci->mc; | ||
640 | if (mc_intel == NULL) | ||
641 | return; | ||
642 | |||
643 | print_ucode_info(uci, mc_intel->hdr.date); | ||
644 | } | ||
645 | #endif | ||
646 | |||
647 | static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) | ||
648 | { | ||
649 | struct microcode_intel *mc_intel; | ||
650 | unsigned int val[2]; | ||
651 | |||
652 | mc_intel = uci->mc; | ||
653 | if (mc_intel == NULL) | ||
654 | return 0; | ||
655 | |||
656 | /* write microcode via MSR 0x79 */ | ||
657 | native_wrmsr(MSR_IA32_UCODE_WRITE, | ||
658 | (unsigned long) mc_intel->bits, | ||
659 | (unsigned long) mc_intel->bits >> 16 >> 16); | ||
660 | native_wrmsr(MSR_IA32_UCODE_REV, 0, 0); | ||
661 | |||
662 | /* As documented in the SDM: Do a CPUID 1 here */ | ||
663 | sync_core(); | ||
664 | |||
665 | /* get the current revision from MSR 0x8B */ | ||
666 | native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); | ||
667 | if (val[1] != mc_intel->hdr.rev) | ||
668 | return -1; | ||
669 | |||
670 | #ifdef CONFIG_X86_64 | ||
671 | /* Flush global tlb. This is precaution. */ | ||
672 | flush_tlb_early(); | ||
673 | #endif | ||
674 | uci->cpu_sig.rev = val[1]; | ||
675 | |||
676 | if (early) | ||
677 | print_ucode(uci); | ||
678 | else | ||
679 | print_ucode_info(uci, mc_intel->hdr.date); | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /* | ||
685 | * This function converts microcode patch offsets previously stored in | ||
686 | * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data. | ||
687 | */ | ||
688 | int __init save_microcode_in_initrd_intel(void) | ||
689 | { | ||
690 | unsigned int count = mc_saved_data.mc_saved_count; | ||
691 | struct microcode_intel *mc_saved[MAX_UCODE_COUNT]; | ||
692 | int ret = 0; | ||
693 | |||
694 | if (count == 0) | ||
695 | return ret; | ||
696 | |||
697 | copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count); | ||
698 | ret = save_microcode(&mc_saved_data, mc_saved, count); | ||
699 | if (ret) | ||
700 | pr_err("Cannot save microcode patches from initrd.\n"); | ||
701 | |||
702 | show_saved_mc(); | ||
703 | |||
704 | return ret; | ||
705 | } | ||
706 | |||
707 | static void __init | ||
708 | _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data, | ||
709 | unsigned long *initrd, | ||
710 | unsigned long start, unsigned long size) | ||
711 | { | ||
712 | struct ucode_cpu_info uci; | ||
713 | enum ucode_state ret; | ||
714 | |||
715 | collect_cpu_info_early(&uci); | ||
716 | |||
717 | ret = scan_microcode(mc_saved_data, initrd, start, size, &uci); | ||
718 | if (ret != UCODE_OK) | ||
719 | return; | ||
720 | |||
721 | ret = load_microcode(mc_saved_data, initrd, start, &uci); | ||
722 | if (ret != UCODE_OK) | ||
723 | return; | ||
724 | |||
725 | apply_microcode_early(&uci, true); | ||
726 | } | ||
727 | |||
728 | void __init load_ucode_intel_bsp(void) | ||
729 | { | ||
730 | u64 start, size; | ||
731 | #ifdef CONFIG_X86_32 | ||
732 | struct boot_params *p; | ||
733 | |||
734 | p = (struct boot_params *)__pa_nodebug(&boot_params); | ||
735 | start = p->hdr.ramdisk_image; | ||
736 | size = p->hdr.ramdisk_size; | ||
737 | |||
738 | _load_ucode_intel_bsp( | ||
739 | (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), | ||
740 | (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), | ||
741 | start, size); | ||
742 | #else | ||
743 | start = boot_params.hdr.ramdisk_image + PAGE_OFFSET; | ||
744 | size = boot_params.hdr.ramdisk_size; | ||
745 | |||
746 | _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size); | ||
747 | #endif | ||
748 | } | ||
749 | |||
750 | void load_ucode_intel_ap(void) | ||
751 | { | ||
752 | struct mc_saved_data *mc_saved_data_p; | ||
753 | struct ucode_cpu_info uci; | ||
754 | unsigned long *mc_saved_in_initrd_p; | ||
755 | unsigned long initrd_start_addr; | ||
756 | enum ucode_state ret; | ||
757 | #ifdef CONFIG_X86_32 | ||
758 | unsigned long *initrd_start_p; | ||
759 | |||
760 | mc_saved_in_initrd_p = | ||
761 | (unsigned long *)__pa_nodebug(mc_saved_in_initrd); | ||
762 | mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data); | ||
763 | initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start); | ||
764 | initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p); | ||
765 | #else | ||
766 | mc_saved_data_p = &mc_saved_data; | ||
767 | mc_saved_in_initrd_p = mc_saved_in_initrd; | ||
768 | initrd_start_addr = initrd_start; | ||
769 | #endif | ||
770 | |||
771 | /* | ||
772 | * If there is no valid ucode previously saved in memory, no need to | ||
773 | * update ucode on this AP. | ||
774 | */ | ||
775 | if (mc_saved_data_p->mc_saved_count == 0) | ||
776 | return; | ||
777 | |||
778 | collect_cpu_info_early(&uci); | ||
779 | ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p, | ||
780 | initrd_start_addr, &uci); | ||
781 | |||
782 | if (ret != UCODE_OK) | ||
783 | return; | ||
784 | |||
785 | apply_microcode_early(&uci, true); | ||
786 | } | ||
787 | |||
788 | void reload_ucode_intel(void) | ||
789 | { | ||
790 | struct ucode_cpu_info uci; | ||
791 | enum ucode_state ret; | ||
792 | |||
793 | if (!mc_saved_data.mc_saved_count) | ||
794 | return; | ||
795 | |||
796 | collect_cpu_info_early(&uci); | ||
797 | |||
798 | ret = load_microcode_early(mc_saved_data.mc_saved, | ||
799 | mc_saved_data.mc_saved_count, &uci); | ||
800 | if (ret != UCODE_OK) | ||
801 | return; | ||
802 | |||
803 | apply_microcode_early(&uci, false); | ||
804 | } | ||
28 | 805 | ||
29 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | 806 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) |
30 | { | 807 | { |
@@ -264,7 +1041,7 @@ static struct microcode_ops microcode_intel_ops = { | |||
264 | 1041 | ||
265 | struct microcode_ops * __init init_intel_microcode(void) | 1042 | struct microcode_ops * __init init_intel_microcode(void) |
266 | { | 1043 | { |
267 | struct cpuinfo_x86 *c = &cpu_data(0); | 1044 | struct cpuinfo_x86 *c = &boot_cpu_data; |
268 | 1045 | ||
269 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | 1046 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || |
270 | cpu_has(c, X86_FEATURE_IA64)) { | 1047 | cpu_has(c, X86_FEATURE_IA64)) { |
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c deleted file mode 100644 index 37ea89c11520..000000000000 --- a/arch/x86/kernel/cpu/microcode/intel_early.c +++ /dev/null | |||
@@ -1,808 +0,0 @@ | |||
1 | /* | ||
2 | * Intel CPU microcode early update for Linux | ||
3 | * | ||
4 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | ||
5 | * H Peter Anvin" <hpa@zytor.com> | ||
6 | * | ||
7 | * This allows to early upgrade microcode on Intel processors | ||
8 | * belonging to IA-32 family - PentiumPro, Pentium II, | ||
9 | * Pentium III, Xeon, Pentium 4, etc. | ||
10 | * | ||
11 | * Reference: Section 9.11 of Volume 3, IA-32 Intel Architecture | ||
12 | * Software Developer's Manual. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | */ | ||
19 | |||
20 | /* | ||
21 | * This needs to be before all headers so that pr_debug in printk.h doesn't turn | ||
22 | * printk calls into no_printk(). | ||
23 | * | ||
24 | *#define DEBUG | ||
25 | */ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/earlycpio.h> | ||
31 | #include <linux/initrd.h> | ||
32 | #include <linux/cpu.h> | ||
33 | #include <asm/msr.h> | ||
34 | #include <asm/microcode_intel.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/setup.h> | ||
38 | |||
39 | #undef pr_fmt | ||
40 | #define pr_fmt(fmt) "microcode: " fmt | ||
41 | |||
42 | static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT]; | ||
43 | static struct mc_saved_data { | ||
44 | unsigned int mc_saved_count; | ||
45 | struct microcode_intel **mc_saved; | ||
46 | } mc_saved_data; | ||
47 | |||
48 | static enum ucode_state | ||
49 | load_microcode_early(struct microcode_intel **saved, | ||
50 | unsigned int num_saved, struct ucode_cpu_info *uci) | ||
51 | { | ||
52 | struct microcode_intel *ucode_ptr, *new_mc = NULL; | ||
53 | struct microcode_header_intel *mc_hdr; | ||
54 | int new_rev, ret, i; | ||
55 | |||
56 | new_rev = uci->cpu_sig.rev; | ||
57 | |||
58 | for (i = 0; i < num_saved; i++) { | ||
59 | ucode_ptr = saved[i]; | ||
60 | mc_hdr = (struct microcode_header_intel *)ucode_ptr; | ||
61 | |||
62 | ret = has_newer_microcode(ucode_ptr, | ||
63 | uci->cpu_sig.sig, | ||
64 | uci->cpu_sig.pf, | ||
65 | new_rev); | ||
66 | if (!ret) | ||
67 | continue; | ||
68 | |||
69 | new_rev = mc_hdr->rev; | ||
70 | new_mc = ucode_ptr; | ||
71 | } | ||
72 | |||
73 | if (!new_mc) | ||
74 | return UCODE_NFOUND; | ||
75 | |||
76 | uci->mc = (struct microcode_intel *)new_mc; | ||
77 | return UCODE_OK; | ||
78 | } | ||
79 | |||
80 | static inline void | ||
81 | copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd, | ||
82 | unsigned long off, int num_saved) | ||
83 | { | ||
84 | int i; | ||
85 | |||
86 | for (i = 0; i < num_saved; i++) | ||
87 | mc_saved[i] = (struct microcode_intel *)(initrd[i] + off); | ||
88 | } | ||
89 | |||
90 | #ifdef CONFIG_X86_32 | ||
91 | static void | ||
92 | microcode_phys(struct microcode_intel **mc_saved_tmp, | ||
93 | struct mc_saved_data *mc_saved_data) | ||
94 | { | ||
95 | int i; | ||
96 | struct microcode_intel ***mc_saved; | ||
97 | |||
98 | mc_saved = (struct microcode_intel ***) | ||
99 | __pa_nodebug(&mc_saved_data->mc_saved); | ||
100 | for (i = 0; i < mc_saved_data->mc_saved_count; i++) { | ||
101 | struct microcode_intel *p; | ||
102 | |||
103 | p = *(struct microcode_intel **) | ||
104 | __pa_nodebug(mc_saved_data->mc_saved + i); | ||
105 | mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p); | ||
106 | } | ||
107 | } | ||
108 | #endif | ||
109 | |||
110 | static enum ucode_state | ||
111 | load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd, | ||
112 | unsigned long initrd_start, struct ucode_cpu_info *uci) | ||
113 | { | ||
114 | struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; | ||
115 | unsigned int count = mc_saved_data->mc_saved_count; | ||
116 | |||
117 | if (!mc_saved_data->mc_saved) { | ||
118 | copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count); | ||
119 | |||
120 | return load_microcode_early(mc_saved_tmp, count, uci); | ||
121 | } else { | ||
122 | #ifdef CONFIG_X86_32 | ||
123 | microcode_phys(mc_saved_tmp, mc_saved_data); | ||
124 | return load_microcode_early(mc_saved_tmp, count, uci); | ||
125 | #else | ||
126 | return load_microcode_early(mc_saved_data->mc_saved, | ||
127 | count, uci); | ||
128 | #endif | ||
129 | } | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Given CPU signature and a microcode patch, this function finds if the | ||
134 | * microcode patch has matching family and model with the CPU. | ||
135 | */ | ||
136 | static enum ucode_state | ||
137 | matching_model_microcode(struct microcode_header_intel *mc_header, | ||
138 | unsigned long sig) | ||
139 | { | ||
140 | unsigned int fam, model; | ||
141 | unsigned int fam_ucode, model_ucode; | ||
142 | struct extended_sigtable *ext_header; | ||
143 | unsigned long total_size = get_totalsize(mc_header); | ||
144 | unsigned long data_size = get_datasize(mc_header); | ||
145 | int ext_sigcount, i; | ||
146 | struct extended_signature *ext_sig; | ||
147 | |||
148 | fam = __x86_family(sig); | ||
149 | model = x86_model(sig); | ||
150 | |||
151 | fam_ucode = __x86_family(mc_header->sig); | ||
152 | model_ucode = x86_model(mc_header->sig); | ||
153 | |||
154 | if (fam == fam_ucode && model == model_ucode) | ||
155 | return UCODE_OK; | ||
156 | |||
157 | /* Look for ext. headers: */ | ||
158 | if (total_size <= data_size + MC_HEADER_SIZE) | ||
159 | return UCODE_NFOUND; | ||
160 | |||
161 | ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; | ||
162 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | ||
163 | ext_sigcount = ext_header->count; | ||
164 | |||
165 | for (i = 0; i < ext_sigcount; i++) { | ||
166 | fam_ucode = __x86_family(ext_sig->sig); | ||
167 | model_ucode = x86_model(ext_sig->sig); | ||
168 | |||
169 | if (fam == fam_ucode && model == model_ucode) | ||
170 | return UCODE_OK; | ||
171 | |||
172 | ext_sig++; | ||
173 | } | ||
174 | return UCODE_NFOUND; | ||
175 | } | ||
176 | |||
177 | static int | ||
178 | save_microcode(struct mc_saved_data *mc_saved_data, | ||
179 | struct microcode_intel **mc_saved_src, | ||
180 | unsigned int mc_saved_count) | ||
181 | { | ||
182 | int i, j; | ||
183 | struct microcode_intel **saved_ptr; | ||
184 | int ret; | ||
185 | |||
186 | if (!mc_saved_count) | ||
187 | return -EINVAL; | ||
188 | |||
189 | /* | ||
190 | * Copy new microcode data. | ||
191 | */ | ||
192 | saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL); | ||
193 | if (!saved_ptr) | ||
194 | return -ENOMEM; | ||
195 | |||
196 | for (i = 0; i < mc_saved_count; i++) { | ||
197 | struct microcode_header_intel *mc_hdr; | ||
198 | struct microcode_intel *mc; | ||
199 | unsigned long size; | ||
200 | |||
201 | if (!mc_saved_src[i]) { | ||
202 | ret = -EINVAL; | ||
203 | goto err; | ||
204 | } | ||
205 | |||
206 | mc = mc_saved_src[i]; | ||
207 | mc_hdr = &mc->hdr; | ||
208 | size = get_totalsize(mc_hdr); | ||
209 | |||
210 | saved_ptr[i] = kmalloc(size, GFP_KERNEL); | ||
211 | if (!saved_ptr[i]) { | ||
212 | ret = -ENOMEM; | ||
213 | goto err; | ||
214 | } | ||
215 | |||
216 | memcpy(saved_ptr[i], mc, size); | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Point to newly saved microcode. | ||
221 | */ | ||
222 | mc_saved_data->mc_saved = saved_ptr; | ||
223 | mc_saved_data->mc_saved_count = mc_saved_count; | ||
224 | |||
225 | return 0; | ||
226 | |||
227 | err: | ||
228 | for (j = 0; j <= i; j++) | ||
229 | kfree(saved_ptr[j]); | ||
230 | kfree(saved_ptr); | ||
231 | |||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * A microcode patch in ucode_ptr is saved into mc_saved | ||
237 | * - if it has matching signature and newer revision compared to an existing | ||
238 | * patch mc_saved. | ||
239 | * - or if it is a newly discovered microcode patch. | ||
240 | * | ||
241 | * The microcode patch should have matching model with CPU. | ||
242 | * | ||
243 | * Returns: The updated number @num_saved of saved microcode patches. | ||
244 | */ | ||
245 | static unsigned int _save_mc(struct microcode_intel **mc_saved, | ||
246 | u8 *ucode_ptr, unsigned int num_saved) | ||
247 | { | ||
248 | struct microcode_header_intel *mc_hdr, *mc_saved_hdr; | ||
249 | unsigned int sig, pf; | ||
250 | int found = 0, i; | ||
251 | |||
252 | mc_hdr = (struct microcode_header_intel *)ucode_ptr; | ||
253 | |||
254 | for (i = 0; i < num_saved; i++) { | ||
255 | mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i]; | ||
256 | sig = mc_saved_hdr->sig; | ||
257 | pf = mc_saved_hdr->pf; | ||
258 | |||
259 | if (!find_matching_signature(ucode_ptr, sig, pf)) | ||
260 | continue; | ||
261 | |||
262 | found = 1; | ||
263 | |||
264 | if (mc_hdr->rev <= mc_saved_hdr->rev) | ||
265 | continue; | ||
266 | |||
267 | /* | ||
268 | * Found an older ucode saved earlier. Replace it with | ||
269 | * this newer one. | ||
270 | */ | ||
271 | mc_saved[i] = (struct microcode_intel *)ucode_ptr; | ||
272 | break; | ||
273 | } | ||
274 | |||
275 | /* Newly detected microcode, save it to memory. */ | ||
276 | if (i >= num_saved && !found) | ||
277 | mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr; | ||
278 | |||
279 | return num_saved; | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * Get microcode matching with BSP's model. Only CPUs with the same model as | ||
284 | * BSP can stay in the platform. | ||
285 | */ | ||
286 | static enum ucode_state __init | ||
287 | get_matching_model_microcode(int cpu, unsigned long start, | ||
288 | void *data, size_t size, | ||
289 | struct mc_saved_data *mc_saved_data, | ||
290 | unsigned long *mc_saved_in_initrd, | ||
291 | struct ucode_cpu_info *uci) | ||
292 | { | ||
293 | u8 *ucode_ptr = data; | ||
294 | unsigned int leftover = size; | ||
295 | enum ucode_state state = UCODE_OK; | ||
296 | unsigned int mc_size; | ||
297 | struct microcode_header_intel *mc_header; | ||
298 | struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; | ||
299 | unsigned int mc_saved_count = mc_saved_data->mc_saved_count; | ||
300 | int i; | ||
301 | |||
302 | while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { | ||
303 | |||
304 | if (leftover < sizeof(mc_header)) | ||
305 | break; | ||
306 | |||
307 | mc_header = (struct microcode_header_intel *)ucode_ptr; | ||
308 | |||
309 | mc_size = get_totalsize(mc_header); | ||
310 | if (!mc_size || mc_size > leftover || | ||
311 | microcode_sanity_check(ucode_ptr, 0) < 0) | ||
312 | break; | ||
313 | |||
314 | leftover -= mc_size; | ||
315 | |||
316 | /* | ||
317 | * Since APs with same family and model as the BSP may boot in | ||
318 | * the platform, we need to find and save microcode patches | ||
319 | * with the same family and model as the BSP. | ||
320 | */ | ||
321 | if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != | ||
322 | UCODE_OK) { | ||
323 | ucode_ptr += mc_size; | ||
324 | continue; | ||
325 | } | ||
326 | |||
327 | mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count); | ||
328 | |||
329 | ucode_ptr += mc_size; | ||
330 | } | ||
331 | |||
332 | if (leftover) { | ||
333 | state = UCODE_ERROR; | ||
334 | goto out; | ||
335 | } | ||
336 | |||
337 | if (mc_saved_count == 0) { | ||
338 | state = UCODE_NFOUND; | ||
339 | goto out; | ||
340 | } | ||
341 | |||
342 | for (i = 0; i < mc_saved_count; i++) | ||
343 | mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start; | ||
344 | |||
345 | mc_saved_data->mc_saved_count = mc_saved_count; | ||
346 | out: | ||
347 | return state; | ||
348 | } | ||
349 | |||
350 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) | ||
351 | { | ||
352 | unsigned int val[2]; | ||
353 | unsigned int family, model; | ||
354 | struct cpu_signature csig; | ||
355 | unsigned int eax, ebx, ecx, edx; | ||
356 | |||
357 | csig.sig = 0; | ||
358 | csig.pf = 0; | ||
359 | csig.rev = 0; | ||
360 | |||
361 | memset(uci, 0, sizeof(*uci)); | ||
362 | |||
363 | eax = 0x00000001; | ||
364 | ecx = 0; | ||
365 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
366 | csig.sig = eax; | ||
367 | |||
368 | family = __x86_family(csig.sig); | ||
369 | model = x86_model(csig.sig); | ||
370 | |||
371 | if ((model >= 5) || (family > 6)) { | ||
372 | /* get processor flags from MSR 0x17 */ | ||
373 | native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | ||
374 | csig.pf = 1 << ((val[1] >> 18) & 7); | ||
375 | } | ||
376 | native_wrmsr(MSR_IA32_UCODE_REV, 0, 0); | ||
377 | |||
378 | /* As documented in the SDM: Do a CPUID 1 here */ | ||
379 | sync_core(); | ||
380 | |||
381 | /* get the current revision from MSR 0x8B */ | ||
382 | native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); | ||
383 | |||
384 | csig.rev = val[1]; | ||
385 | |||
386 | uci->cpu_sig = csig; | ||
387 | uci->valid = 1; | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | #ifdef DEBUG | ||
393 | static void show_saved_mc(void) | ||
394 | { | ||
395 | int i, j; | ||
396 | unsigned int sig, pf, rev, total_size, data_size, date; | ||
397 | struct ucode_cpu_info uci; | ||
398 | |||
399 | if (mc_saved_data.mc_saved_count == 0) { | ||
400 | pr_debug("no microcode data saved.\n"); | ||
401 | return; | ||
402 | } | ||
403 | pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count); | ||
404 | |||
405 | collect_cpu_info_early(&uci); | ||
406 | |||
407 | sig = uci.cpu_sig.sig; | ||
408 | pf = uci.cpu_sig.pf; | ||
409 | rev = uci.cpu_sig.rev; | ||
410 | pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); | ||
411 | |||
412 | for (i = 0; i < mc_saved_data.mc_saved_count; i++) { | ||
413 | struct microcode_header_intel *mc_saved_header; | ||
414 | struct extended_sigtable *ext_header; | ||
415 | int ext_sigcount; | ||
416 | struct extended_signature *ext_sig; | ||
417 | |||
418 | mc_saved_header = (struct microcode_header_intel *) | ||
419 | mc_saved_data.mc_saved[i]; | ||
420 | sig = mc_saved_header->sig; | ||
421 | pf = mc_saved_header->pf; | ||
422 | rev = mc_saved_header->rev; | ||
423 | total_size = get_totalsize(mc_saved_header); | ||
424 | data_size = get_datasize(mc_saved_header); | ||
425 | date = mc_saved_header->date; | ||
426 | |||
427 | pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n", | ||
428 | i, sig, pf, rev, total_size, | ||
429 | date & 0xffff, | ||
430 | date >> 24, | ||
431 | (date >> 16) & 0xff); | ||
432 | |||
433 | /* Look for ext. headers: */ | ||
434 | if (total_size <= data_size + MC_HEADER_SIZE) | ||
435 | continue; | ||
436 | |||
437 | ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE; | ||
438 | ext_sigcount = ext_header->count; | ||
439 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | ||
440 | |||
441 | for (j = 0; j < ext_sigcount; j++) { | ||
442 | sig = ext_sig->sig; | ||
443 | pf = ext_sig->pf; | ||
444 | |||
445 | pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", | ||
446 | j, sig, pf); | ||
447 | |||
448 | ext_sig++; | ||
449 | } | ||
450 | |||
451 | } | ||
452 | } | ||
453 | #else | ||
454 | static inline void show_saved_mc(void) | ||
455 | { | ||
456 | } | ||
457 | #endif | ||
458 | |||
459 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) | ||
460 | static DEFINE_MUTEX(x86_cpu_microcode_mutex); | ||
461 | /* | ||
462 | * Save this mc into mc_saved_data. So it will be loaded early when a CPU is | ||
463 | * hot added or resumes. | ||
464 | * | ||
465 | * Please make sure this mc should be a valid microcode patch before calling | ||
466 | * this function. | ||
467 | */ | ||
468 | int save_mc_for_early(u8 *mc) | ||
469 | { | ||
470 | struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; | ||
471 | unsigned int mc_saved_count_init; | ||
472 | unsigned int mc_saved_count; | ||
473 | struct microcode_intel **mc_saved; | ||
474 | int ret = 0; | ||
475 | int i; | ||
476 | |||
477 | /* | ||
478 | * Hold hotplug lock so mc_saved_data is not accessed by a CPU in | ||
479 | * hotplug. | ||
480 | */ | ||
481 | mutex_lock(&x86_cpu_microcode_mutex); | ||
482 | |||
483 | mc_saved_count_init = mc_saved_data.mc_saved_count; | ||
484 | mc_saved_count = mc_saved_data.mc_saved_count; | ||
485 | mc_saved = mc_saved_data.mc_saved; | ||
486 | |||
487 | if (mc_saved && mc_saved_count) | ||
488 | memcpy(mc_saved_tmp, mc_saved, | ||
489 | mc_saved_count * sizeof(struct microcode_intel *)); | ||
490 | /* | ||
491 | * Save the microcode patch mc in mc_save_tmp structure if it's a newer | ||
492 | * version. | ||
493 | */ | ||
494 | mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count); | ||
495 | |||
496 | /* | ||
497 | * Save the mc_save_tmp in global mc_saved_data. | ||
498 | */ | ||
499 | ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count); | ||
500 | if (ret) { | ||
501 | pr_err("Cannot save microcode patch.\n"); | ||
502 | goto out; | ||
503 | } | ||
504 | |||
505 | show_saved_mc(); | ||
506 | |||
507 | /* | ||
508 | * Free old saved microcode data. | ||
509 | */ | ||
510 | if (mc_saved) { | ||
511 | for (i = 0; i < mc_saved_count_init; i++) | ||
512 | kfree(mc_saved[i]); | ||
513 | kfree(mc_saved); | ||
514 | } | ||
515 | |||
516 | out: | ||
517 | mutex_unlock(&x86_cpu_microcode_mutex); | ||
518 | |||
519 | return ret; | ||
520 | } | ||
521 | EXPORT_SYMBOL_GPL(save_mc_for_early); | ||
522 | #endif | ||
523 | |||
524 | static bool __init load_builtin_intel_microcode(struct cpio_data *cp) | ||
525 | { | ||
526 | #ifdef CONFIG_X86_64 | ||
527 | unsigned int eax = 0x00000001, ebx, ecx = 0, edx; | ||
528 | unsigned int family, model, stepping; | ||
529 | char name[30]; | ||
530 | |||
531 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
532 | |||
533 | family = __x86_family(eax); | ||
534 | model = x86_model(eax); | ||
535 | stepping = eax & 0xf; | ||
536 | |||
537 | sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping); | ||
538 | |||
539 | return get_builtin_firmware(cp, name); | ||
540 | #else | ||
541 | return false; | ||
542 | #endif | ||
543 | } | ||
544 | |||
545 | static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin"; | ||
546 | static __init enum ucode_state | ||
547 | scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd, | ||
548 | unsigned long start, unsigned long size, | ||
549 | struct ucode_cpu_info *uci) | ||
550 | { | ||
551 | struct cpio_data cd; | ||
552 | long offset = 0; | ||
553 | #ifdef CONFIG_X86_32 | ||
554 | char *p = (char *)__pa_nodebug(ucode_name); | ||
555 | #else | ||
556 | char *p = ucode_name; | ||
557 | #endif | ||
558 | |||
559 | cd.data = NULL; | ||
560 | cd.size = 0; | ||
561 | |||
562 | cd = find_cpio_data(p, (void *)start, size, &offset); | ||
563 | if (!cd.data) { | ||
564 | if (!load_builtin_intel_microcode(&cd)) | ||
565 | return UCODE_ERROR; | ||
566 | } | ||
567 | |||
568 | return get_matching_model_microcode(0, start, cd.data, cd.size, | ||
569 | mc_saved_data, initrd, uci); | ||
570 | } | ||
571 | |||
572 | /* | ||
573 | * Print ucode update info. | ||
574 | */ | ||
575 | static void | ||
576 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) | ||
577 | { | ||
578 | int cpu = smp_processor_id(); | ||
579 | |||
580 | pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", | ||
581 | cpu, | ||
582 | uci->cpu_sig.rev, | ||
583 | date & 0xffff, | ||
584 | date >> 24, | ||
585 | (date >> 16) & 0xff); | ||
586 | } | ||
587 | |||
588 | #ifdef CONFIG_X86_32 | ||
589 | |||
590 | static int delay_ucode_info; | ||
591 | static int current_mc_date; | ||
592 | |||
593 | /* | ||
594 | * Print early updated ucode info after printk works. This is delayed info dump. | ||
595 | */ | ||
596 | void show_ucode_info_early(void) | ||
597 | { | ||
598 | struct ucode_cpu_info uci; | ||
599 | |||
600 | if (delay_ucode_info) { | ||
601 | collect_cpu_info_early(&uci); | ||
602 | print_ucode_info(&uci, current_mc_date); | ||
603 | delay_ucode_info = 0; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | /* | ||
608 | * At this point, we can not call printk() yet. Keep microcode patch number in | ||
609 | * mc_saved_data.mc_saved and delay printing microcode info in | ||
610 | * show_ucode_info_early() until printk() works. | ||
611 | */ | ||
612 | static void print_ucode(struct ucode_cpu_info *uci) | ||
613 | { | ||
614 | struct microcode_intel *mc_intel; | ||
615 | int *delay_ucode_info_p; | ||
616 | int *current_mc_date_p; | ||
617 | |||
618 | mc_intel = uci->mc; | ||
619 | if (mc_intel == NULL) | ||
620 | return; | ||
621 | |||
622 | delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); | ||
623 | current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); | ||
624 | |||
625 | *delay_ucode_info_p = 1; | ||
626 | *current_mc_date_p = mc_intel->hdr.date; | ||
627 | } | ||
628 | #else | ||
629 | |||
630 | /* | ||
631 | * Flush global tlb. We only do this in x86_64 where paging has been enabled | ||
632 | * already and PGE should be enabled as well. | ||
633 | */ | ||
634 | static inline void flush_tlb_early(void) | ||
635 | { | ||
636 | __native_flush_tlb_global_irq_disabled(); | ||
637 | } | ||
638 | |||
639 | static inline void print_ucode(struct ucode_cpu_info *uci) | ||
640 | { | ||
641 | struct microcode_intel *mc_intel; | ||
642 | |||
643 | mc_intel = uci->mc; | ||
644 | if (mc_intel == NULL) | ||
645 | return; | ||
646 | |||
647 | print_ucode_info(uci, mc_intel->hdr.date); | ||
648 | } | ||
649 | #endif | ||
650 | |||
651 | static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) | ||
652 | { | ||
653 | struct microcode_intel *mc_intel; | ||
654 | unsigned int val[2]; | ||
655 | |||
656 | mc_intel = uci->mc; | ||
657 | if (mc_intel == NULL) | ||
658 | return 0; | ||
659 | |||
660 | /* write microcode via MSR 0x79 */ | ||
661 | native_wrmsr(MSR_IA32_UCODE_WRITE, | ||
662 | (unsigned long) mc_intel->bits, | ||
663 | (unsigned long) mc_intel->bits >> 16 >> 16); | ||
664 | native_wrmsr(MSR_IA32_UCODE_REV, 0, 0); | ||
665 | |||
666 | /* As documented in the SDM: Do a CPUID 1 here */ | ||
667 | sync_core(); | ||
668 | |||
669 | /* get the current revision from MSR 0x8B */ | ||
670 | native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); | ||
671 | if (val[1] != mc_intel->hdr.rev) | ||
672 | return -1; | ||
673 | |||
674 | #ifdef CONFIG_X86_64 | ||
675 | /* Flush global tlb. This is precaution. */ | ||
676 | flush_tlb_early(); | ||
677 | #endif | ||
678 | uci->cpu_sig.rev = val[1]; | ||
679 | |||
680 | if (early) | ||
681 | print_ucode(uci); | ||
682 | else | ||
683 | print_ucode_info(uci, mc_intel->hdr.date); | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * This function converts microcode patch offsets previously stored in | ||
690 | * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data. | ||
691 | */ | ||
692 | int __init save_microcode_in_initrd_intel(void) | ||
693 | { | ||
694 | unsigned int count = mc_saved_data.mc_saved_count; | ||
695 | struct microcode_intel *mc_saved[MAX_UCODE_COUNT]; | ||
696 | int ret = 0; | ||
697 | |||
698 | if (count == 0) | ||
699 | return ret; | ||
700 | |||
701 | copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count); | ||
702 | ret = save_microcode(&mc_saved_data, mc_saved, count); | ||
703 | if (ret) | ||
704 | pr_err("Cannot save microcode patches from initrd.\n"); | ||
705 | |||
706 | show_saved_mc(); | ||
707 | |||
708 | return ret; | ||
709 | } | ||
710 | |||
711 | static void __init | ||
712 | _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data, | ||
713 | unsigned long *initrd, | ||
714 | unsigned long start, unsigned long size) | ||
715 | { | ||
716 | struct ucode_cpu_info uci; | ||
717 | enum ucode_state ret; | ||
718 | |||
719 | collect_cpu_info_early(&uci); | ||
720 | |||
721 | ret = scan_microcode(mc_saved_data, initrd, start, size, &uci); | ||
722 | if (ret != UCODE_OK) | ||
723 | return; | ||
724 | |||
725 | ret = load_microcode(mc_saved_data, initrd, start, &uci); | ||
726 | if (ret != UCODE_OK) | ||
727 | return; | ||
728 | |||
729 | apply_microcode_early(&uci, true); | ||
730 | } | ||
731 | |||
732 | void __init load_ucode_intel_bsp(void) | ||
733 | { | ||
734 | u64 start, size; | ||
735 | #ifdef CONFIG_X86_32 | ||
736 | struct boot_params *p; | ||
737 | |||
738 | p = (struct boot_params *)__pa_nodebug(&boot_params); | ||
739 | start = p->hdr.ramdisk_image; | ||
740 | size = p->hdr.ramdisk_size; | ||
741 | |||
742 | _load_ucode_intel_bsp( | ||
743 | (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), | ||
744 | (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), | ||
745 | start, size); | ||
746 | #else | ||
747 | start = boot_params.hdr.ramdisk_image + PAGE_OFFSET; | ||
748 | size = boot_params.hdr.ramdisk_size; | ||
749 | |||
750 | _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size); | ||
751 | #endif | ||
752 | } | ||
753 | |||
754 | void load_ucode_intel_ap(void) | ||
755 | { | ||
756 | struct mc_saved_data *mc_saved_data_p; | ||
757 | struct ucode_cpu_info uci; | ||
758 | unsigned long *mc_saved_in_initrd_p; | ||
759 | unsigned long initrd_start_addr; | ||
760 | enum ucode_state ret; | ||
761 | #ifdef CONFIG_X86_32 | ||
762 | unsigned long *initrd_start_p; | ||
763 | |||
764 | mc_saved_in_initrd_p = | ||
765 | (unsigned long *)__pa_nodebug(mc_saved_in_initrd); | ||
766 | mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data); | ||
767 | initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start); | ||
768 | initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p); | ||
769 | #else | ||
770 | mc_saved_data_p = &mc_saved_data; | ||
771 | mc_saved_in_initrd_p = mc_saved_in_initrd; | ||
772 | initrd_start_addr = initrd_start; | ||
773 | #endif | ||
774 | |||
775 | /* | ||
776 | * If there is no valid ucode previously saved in memory, no need to | ||
777 | * update ucode on this AP. | ||
778 | */ | ||
779 | if (mc_saved_data_p->mc_saved_count == 0) | ||
780 | return; | ||
781 | |||
782 | collect_cpu_info_early(&uci); | ||
783 | ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p, | ||
784 | initrd_start_addr, &uci); | ||
785 | |||
786 | if (ret != UCODE_OK) | ||
787 | return; | ||
788 | |||
789 | apply_microcode_early(&uci, true); | ||
790 | } | ||
791 | |||
792 | void reload_ucode_intel(void) | ||
793 | { | ||
794 | struct ucode_cpu_info uci; | ||
795 | enum ucode_state ret; | ||
796 | |||
797 | if (!mc_saved_data.mc_saved_count) | ||
798 | return; | ||
799 | |||
800 | collect_cpu_info_early(&uci); | ||
801 | |||
802 | ret = load_microcode_early(mc_saved_data.mc_saved, | ||
803 | mc_saved_data.mc_saved_count, &uci); | ||
804 | if (ret != UCODE_OK) | ||
805 | return; | ||
806 | |||
807 | apply_microcode_early(&uci, false); | ||
808 | } | ||
diff --git a/arch/x86/kernel/cpu/microcode/intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c index 1883d252ff7d..b96896bcbdaf 100644 --- a/arch/x86/kernel/cpu/microcode/intel_lib.c +++ b/arch/x86/kernel/cpu/microcode/intel_lib.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | ||
29 | 28 | ||
30 | #include <asm/microcode_intel.h> | 29 | #include <asm/microcode_intel.h> |
31 | #include <asm/processor.h> | 30 | #include <asm/processor.h> |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 0e2d96ffd158..6bc9ae24b6d2 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -152,7 +152,7 @@ ENTRY(startup_32) | |||
152 | movl %eax, pa(olpc_ofw_pgd) | 152 | movl %eax, pa(olpc_ofw_pgd) |
153 | #endif | 153 | #endif |
154 | 154 | ||
155 | #ifdef CONFIG_MICROCODE_EARLY | 155 | #ifdef CONFIG_MICROCODE |
156 | /* Early load ucode on BSP. */ | 156 | /* Early load ucode on BSP. */ |
157 | call load_ucode_bsp | 157 | call load_ucode_bsp |
158 | #endif | 158 | #endif |
@@ -311,12 +311,11 @@ ENTRY(startup_32_smp) | |||
311 | movl %eax,%ss | 311 | movl %eax,%ss |
312 | leal -__PAGE_OFFSET(%ecx),%esp | 312 | leal -__PAGE_OFFSET(%ecx),%esp |
313 | 313 | ||
314 | #ifdef CONFIG_MICROCODE_EARLY | 314 | #ifdef CONFIG_MICROCODE |
315 | /* Early load ucode on AP. */ | 315 | /* Early load ucode on AP. */ |
316 | call load_ucode_ap | 316 | call load_ucode_ap |
317 | #endif | 317 | #endif |
318 | 318 | ||
319 | |||
320 | default_entry: | 319 | default_entry: |
321 | #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ | 320 | #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ |
322 | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ | 321 | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 7a83b7874b40..a1e4da98c8f0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -111,6 +111,7 @@ | |||
111 | #include <asm/mce.h> | 111 | #include <asm/mce.h> |
112 | #include <asm/alternative.h> | 112 | #include <asm/alternative.h> |
113 | #include <asm/prom.h> | 113 | #include <asm/prom.h> |
114 | #include <asm/microcode.h> | ||
114 | 115 | ||
115 | /* | 116 | /* |
116 | * max_low_pfn_mapped: highest direct mapped pfn under 4GB | 117 | * max_low_pfn_mapped: highest direct mapped pfn under 4GB |
@@ -480,34 +481,34 @@ static void __init memblock_x86_reserve_range_setup_data(void) | |||
480 | 481 | ||
481 | #ifdef CONFIG_KEXEC_CORE | 482 | #ifdef CONFIG_KEXEC_CORE |
482 | 483 | ||
484 | /* 16M alignment for crash kernel regions */ | ||
485 | #define CRASH_ALIGN (16 << 20) | ||
486 | |||
483 | /* | 487 | /* |
484 | * Keep the crash kernel below this limit. On 32 bits earlier kernels | 488 | * Keep the crash kernel below this limit. On 32 bits earlier kernels |
485 | * would limit the kernel to the low 512 MiB due to mapping restrictions. | 489 | * would limit the kernel to the low 512 MiB due to mapping restrictions. |
486 | * On 64bit, old kexec-tools need to under 896MiB. | 490 | * On 64bit, old kexec-tools need to under 896MiB. |
487 | */ | 491 | */ |
488 | #ifdef CONFIG_X86_32 | 492 | #ifdef CONFIG_X86_32 |
489 | # define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) | 493 | # define CRASH_ADDR_LOW_MAX (512 << 20) |
490 | # define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) | 494 | # define CRASH_ADDR_HIGH_MAX (512 << 20) |
491 | #else | 495 | #else |
492 | # define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) | 496 | # define CRASH_ADDR_LOW_MAX (896UL << 20) |
493 | # define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM | 497 | # define CRASH_ADDR_HIGH_MAX MAXMEM |
494 | #endif | 498 | #endif |
495 | 499 | ||
496 | static void __init reserve_crashkernel_low(void) | 500 | static int __init reserve_crashkernel_low(void) |
497 | { | 501 | { |
498 | #ifdef CONFIG_X86_64 | 502 | #ifdef CONFIG_X86_64 |
499 | const unsigned long long alignment = 16<<20; /* 16M */ | 503 | unsigned long long base, low_base = 0, low_size = 0; |
500 | unsigned long long low_base = 0, low_size = 0; | ||
501 | unsigned long total_low_mem; | 504 | unsigned long total_low_mem; |
502 | unsigned long long base; | ||
503 | bool auto_set = false; | ||
504 | int ret; | 505 | int ret; |
505 | 506 | ||
506 | total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); | 507 | total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT)); |
508 | |||
507 | /* crashkernel=Y,low */ | 509 | /* crashkernel=Y,low */ |
508 | ret = parse_crashkernel_low(boot_command_line, total_low_mem, | 510 | ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); |
509 | &low_size, &base); | 511 | if (ret) { |
510 | if (ret != 0) { | ||
511 | /* | 512 | /* |
512 | * two parts from lib/swiotlb.c: | 513 | * two parts from lib/swiotlb.c: |
513 | * -swiotlb size: user-specified with swiotlb= or default. | 514 | * -swiotlb size: user-specified with swiotlb= or default. |
@@ -517,52 +518,52 @@ static void __init reserve_crashkernel_low(void) | |||
517 | * make sure we allocate enough extra low memory so that we | 518 | * make sure we allocate enough extra low memory so that we |
518 | * don't run out of DMA buffers for 32-bit devices. | 519 | * don't run out of DMA buffers for 32-bit devices. |
519 | */ | 520 | */ |
520 | low_size = max(swiotlb_size_or_default() + (8UL<<20), 256UL<<20); | 521 | low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20); |
521 | auto_set = true; | ||
522 | } else { | 522 | } else { |
523 | /* passed with crashkernel=0,low ? */ | 523 | /* passed with crashkernel=0,low ? */ |
524 | if (!low_size) | 524 | if (!low_size) |
525 | return; | 525 | return 0; |
526 | } | 526 | } |
527 | 527 | ||
528 | low_base = memblock_find_in_range(low_size, (1ULL<<32), | 528 | low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN); |
529 | low_size, alignment); | ||
530 | |||
531 | if (!low_base) { | 529 | if (!low_base) { |
532 | if (!auto_set) | 530 | pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n", |
533 | pr_info("crashkernel low reservation failed - No suitable area found.\n"); | 531 | (unsigned long)(low_size >> 20)); |
532 | return -ENOMEM; | ||
533 | } | ||
534 | 534 | ||
535 | return; | 535 | ret = memblock_reserve(low_base, low_size); |
536 | if (ret) { | ||
537 | pr_err("%s: Error reserving crashkernel low memblock.\n", __func__); | ||
538 | return ret; | ||
536 | } | 539 | } |
537 | 540 | ||
538 | memblock_reserve(low_base, low_size); | ||
539 | pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n", | 541 | pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n", |
540 | (unsigned long)(low_size >> 20), | 542 | (unsigned long)(low_size >> 20), |
541 | (unsigned long)(low_base >> 20), | 543 | (unsigned long)(low_base >> 20), |
542 | (unsigned long)(total_low_mem >> 20)); | 544 | (unsigned long)(total_low_mem >> 20)); |
545 | |||
543 | crashk_low_res.start = low_base; | 546 | crashk_low_res.start = low_base; |
544 | crashk_low_res.end = low_base + low_size - 1; | 547 | crashk_low_res.end = low_base + low_size - 1; |
545 | insert_resource(&iomem_resource, &crashk_low_res); | 548 | insert_resource(&iomem_resource, &crashk_low_res); |
546 | #endif | 549 | #endif |
550 | return 0; | ||
547 | } | 551 | } |
548 | 552 | ||
549 | static void __init reserve_crashkernel(void) | 553 | static void __init reserve_crashkernel(void) |
550 | { | 554 | { |
551 | const unsigned long long alignment = 16<<20; /* 16M */ | 555 | unsigned long long crash_size, crash_base, total_mem; |
552 | unsigned long long total_mem; | ||
553 | unsigned long long crash_size, crash_base; | ||
554 | bool high = false; | 556 | bool high = false; |
555 | int ret; | 557 | int ret; |
556 | 558 | ||
557 | total_mem = memblock_phys_mem_size(); | 559 | total_mem = memblock_phys_mem_size(); |
558 | 560 | ||
559 | /* crashkernel=XM */ | 561 | /* crashkernel=XM */ |
560 | ret = parse_crashkernel(boot_command_line, total_mem, | 562 | ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); |
561 | &crash_size, &crash_base); | ||
562 | if (ret != 0 || crash_size <= 0) { | 563 | if (ret != 0 || crash_size <= 0) { |
563 | /* crashkernel=X,high */ | 564 | /* crashkernel=X,high */ |
564 | ret = parse_crashkernel_high(boot_command_line, total_mem, | 565 | ret = parse_crashkernel_high(boot_command_line, total_mem, |
565 | &crash_size, &crash_base); | 566 | &crash_size, &crash_base); |
566 | if (ret != 0 || crash_size <= 0) | 567 | if (ret != 0 || crash_size <= 0) |
567 | return; | 568 | return; |
568 | high = true; | 569 | high = true; |
@@ -573,11 +574,10 @@ static void __init reserve_crashkernel(void) | |||
573 | /* | 574 | /* |
574 | * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX | 575 | * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX |
575 | */ | 576 | */ |
576 | crash_base = memblock_find_in_range(alignment, | 577 | crash_base = memblock_find_in_range(CRASH_ALIGN, |
577 | high ? CRASH_KERNEL_ADDR_HIGH_MAX : | 578 | high ? CRASH_ADDR_HIGH_MAX |
578 | CRASH_KERNEL_ADDR_LOW_MAX, | 579 | : CRASH_ADDR_LOW_MAX, |
579 | crash_size, alignment); | 580 | crash_size, CRASH_ALIGN); |
580 | |||
581 | if (!crash_base) { | 581 | if (!crash_base) { |
582 | pr_info("crashkernel reservation failed - No suitable area found.\n"); | 582 | pr_info("crashkernel reservation failed - No suitable area found.\n"); |
583 | return; | 583 | return; |
@@ -587,26 +587,32 @@ static void __init reserve_crashkernel(void) | |||
587 | unsigned long long start; | 587 | unsigned long long start; |
588 | 588 | ||
589 | start = memblock_find_in_range(crash_base, | 589 | start = memblock_find_in_range(crash_base, |
590 | crash_base + crash_size, crash_size, 1<<20); | 590 | crash_base + crash_size, |
591 | crash_size, 1 << 20); | ||
591 | if (start != crash_base) { | 592 | if (start != crash_base) { |
592 | pr_info("crashkernel reservation failed - memory is in use.\n"); | 593 | pr_info("crashkernel reservation failed - memory is in use.\n"); |
593 | return; | 594 | return; |
594 | } | 595 | } |
595 | } | 596 | } |
596 | memblock_reserve(crash_base, crash_size); | 597 | ret = memblock_reserve(crash_base, crash_size); |
598 | if (ret) { | ||
599 | pr_err("%s: Error reserving crashkernel memblock.\n", __func__); | ||
600 | return; | ||
601 | } | ||
602 | |||
603 | if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) { | ||
604 | memblock_free(crash_base, crash_size); | ||
605 | return; | ||
606 | } | ||
597 | 607 | ||
598 | printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " | 608 | pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", |
599 | "for crashkernel (System RAM: %ldMB)\n", | 609 | (unsigned long)(crash_size >> 20), |
600 | (unsigned long)(crash_size >> 20), | 610 | (unsigned long)(crash_base >> 20), |
601 | (unsigned long)(crash_base >> 20), | 611 | (unsigned long)(total_mem >> 20)); |
602 | (unsigned long)(total_mem >> 20)); | ||
603 | 612 | ||
604 | crashk_res.start = crash_base; | 613 | crashk_res.start = crash_base; |
605 | crashk_res.end = crash_base + crash_size - 1; | 614 | crashk_res.end = crash_base + crash_size - 1; |
606 | insert_resource(&iomem_resource, &crashk_res); | 615 | insert_resource(&iomem_resource, &crashk_res); |
607 | |||
608 | if (crash_base >= (1ULL<<32)) | ||
609 | reserve_crashkernel_low(); | ||
610 | } | 616 | } |
611 | #else | 617 | #else |
612 | static void __init reserve_crashkernel(void) | 618 | static void __init reserve_crashkernel(void) |
@@ -1244,6 +1250,8 @@ void __init setup_arch(char **cmdline_p) | |||
1244 | if (efi_enabled(EFI_BOOT)) | 1250 | if (efi_enabled(EFI_BOOT)) |
1245 | efi_apply_memmap_quirks(); | 1251 | efi_apply_memmap_quirks(); |
1246 | #endif | 1252 | #endif |
1253 | |||
1254 | microcode_init(); | ||
1247 | } | 1255 | } |
1248 | 1256 | ||
1249 | #ifdef CONFIG_X86_32 | 1257 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 1d8a83df153a..1f37cb2b56a9 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -693,14 +693,12 @@ void free_initmem(void) | |||
693 | #ifdef CONFIG_BLK_DEV_INITRD | 693 | #ifdef CONFIG_BLK_DEV_INITRD |
694 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 694 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
695 | { | 695 | { |
696 | #ifdef CONFIG_MICROCODE_EARLY | ||
697 | /* | 696 | /* |
698 | * Remember, initrd memory may contain microcode or other useful things. | 697 | * Remember, initrd memory may contain microcode or other useful things. |
699 | * Before we lose initrd mem, we need to find a place to hold them | 698 | * Before we lose initrd mem, we need to find a place to hold them |
700 | * now that normal virtual memory is enabled. | 699 | * now that normal virtual memory is enabled. |
701 | */ | 700 | */ |
702 | save_microcode_in_initrd(); | 701 | save_microcode_in_initrd(); |
703 | #endif | ||
704 | 702 | ||
705 | /* | 703 | /* |
706 | * end could be not aligned, and We can not align that, | 704 | * end could be not aligned, and We can not align that, |
diff --git a/arch/x86/ras/Kconfig b/arch/x86/ras/Kconfig index 10fea5fc821e..df280da34825 100644 --- a/arch/x86/ras/Kconfig +++ b/arch/x86/ras/Kconfig | |||
@@ -1,11 +1,9 @@ | |||
1 | config AMD_MCE_INJ | 1 | config AMD_MCE_INJ |
2 | tristate "Simple MCE injection interface for AMD processors" | 2 | tristate "Simple MCE injection interface for AMD processors" |
3 | depends on RAS && EDAC_DECODE_MCE && DEBUG_FS | 3 | depends on RAS && EDAC_DECODE_MCE && DEBUG_FS && AMD_NB |
4 | default n | 4 | default n |
5 | help | 5 | help |
6 | This is a simple debugfs interface to inject MCEs and test different | 6 | This is a simple debugfs interface to inject MCEs and test different |
7 | aspects of the MCE handling code. | 7 | aspects of the MCE handling code. |
8 | 8 | ||
9 | WARNING: Do not even assume this interface is staying stable! | 9 | WARNING: Do not even assume this interface is staying stable! |
10 | |||
11 | |||
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c index 17e35b5bf779..55d38cfa46c2 100644 --- a/arch/x86/ras/mce_amd_inj.c +++ b/arch/x86/ras/mce_amd_inj.c | |||
@@ -17,7 +17,11 @@ | |||
17 | #include <linux/cpu.h> | 17 | #include <linux/cpu.h> |
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <linux/uaccess.h> | 19 | #include <linux/uaccess.h> |
20 | #include <linux/pci.h> | ||
21 | |||
20 | #include <asm/mce.h> | 22 | #include <asm/mce.h> |
23 | #include <asm/amd_nb.h> | ||
24 | #include <asm/irq_vectors.h> | ||
21 | 25 | ||
22 | #include "../kernel/cpu/mcheck/mce-internal.h" | 26 | #include "../kernel/cpu/mcheck/mce-internal.h" |
23 | 27 | ||
@@ -30,16 +34,21 @@ static struct dentry *dfs_inj; | |||
30 | static u8 n_banks; | 34 | static u8 n_banks; |
31 | 35 | ||
32 | #define MAX_FLAG_OPT_SIZE 3 | 36 | #define MAX_FLAG_OPT_SIZE 3 |
37 | #define NBCFG 0x44 | ||
33 | 38 | ||
34 | enum injection_type { | 39 | enum injection_type { |
35 | SW_INJ = 0, /* SW injection, simply decode the error */ | 40 | SW_INJ = 0, /* SW injection, simply decode the error */ |
36 | HW_INJ, /* Trigger a #MC */ | 41 | HW_INJ, /* Trigger a #MC */ |
42 | DFR_INT_INJ, /* Trigger Deferred error interrupt */ | ||
43 | THR_INT_INJ, /* Trigger threshold interrupt */ | ||
37 | N_INJ_TYPES, | 44 | N_INJ_TYPES, |
38 | }; | 45 | }; |
39 | 46 | ||
40 | static const char * const flags_options[] = { | 47 | static const char * const flags_options[] = { |
41 | [SW_INJ] = "sw", | 48 | [SW_INJ] = "sw", |
42 | [HW_INJ] = "hw", | 49 | [HW_INJ] = "hw", |
50 | [DFR_INT_INJ] = "df", | ||
51 | [THR_INT_INJ] = "th", | ||
43 | NULL | 52 | NULL |
44 | }; | 53 | }; |
45 | 54 | ||
@@ -129,12 +138,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf, | |||
129 | { | 138 | { |
130 | char buf[MAX_FLAG_OPT_SIZE], *__buf; | 139 | char buf[MAX_FLAG_OPT_SIZE], *__buf; |
131 | int err; | 140 | int err; |
132 | size_t ret; | ||
133 | 141 | ||
134 | if (cnt > MAX_FLAG_OPT_SIZE) | 142 | if (cnt > MAX_FLAG_OPT_SIZE) |
135 | cnt = MAX_FLAG_OPT_SIZE; | 143 | return -EINVAL; |
136 | |||
137 | ret = cnt; | ||
138 | 144 | ||
139 | if (copy_from_user(&buf, ubuf, cnt)) | 145 | if (copy_from_user(&buf, ubuf, cnt)) |
140 | return -EFAULT; | 146 | return -EFAULT; |
@@ -150,9 +156,9 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf, | |||
150 | return err; | 156 | return err; |
151 | } | 157 | } |
152 | 158 | ||
153 | *ppos += ret; | 159 | *ppos += cnt; |
154 | 160 | ||
155 | return ret; | 161 | return cnt; |
156 | } | 162 | } |
157 | 163 | ||
158 | static const struct file_operations flags_fops = { | 164 | static const struct file_operations flags_fops = { |
@@ -185,6 +191,55 @@ static void trigger_mce(void *info) | |||
185 | asm volatile("int $18"); | 191 | asm volatile("int $18"); |
186 | } | 192 | } |
187 | 193 | ||
194 | static void trigger_dfr_int(void *info) | ||
195 | { | ||
196 | asm volatile("int %0" :: "i" (DEFERRED_ERROR_VECTOR)); | ||
197 | } | ||
198 | |||
199 | static void trigger_thr_int(void *info) | ||
200 | { | ||
201 | asm volatile("int %0" :: "i" (THRESHOLD_APIC_VECTOR)); | ||
202 | } | ||
203 | |||
204 | static u32 get_nbc_for_node(int node_id) | ||
205 | { | ||
206 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
207 | u32 cores_per_node; | ||
208 | |||
209 | cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket(); | ||
210 | |||
211 | return cores_per_node * node_id; | ||
212 | } | ||
213 | |||
214 | static void toggle_nb_mca_mst_cpu(u16 nid) | ||
215 | { | ||
216 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | ||
217 | u32 val; | ||
218 | int err; | ||
219 | |||
220 | if (!F3) | ||
221 | return; | ||
222 | |||
223 | err = pci_read_config_dword(F3, NBCFG, &val); | ||
224 | if (err) { | ||
225 | pr_err("%s: Error reading F%dx%03x.\n", | ||
226 | __func__, PCI_FUNC(F3->devfn), NBCFG); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | if (val & BIT(27)) | ||
231 | return; | ||
232 | |||
233 | pr_err("%s: Set D18F3x44[NbMcaToMstCpuEn] which BIOS hasn't done.\n", | ||
234 | __func__); | ||
235 | |||
236 | val |= BIT(27); | ||
237 | err = pci_write_config_dword(F3, NBCFG, val); | ||
238 | if (err) | ||
239 | pr_err("%s: Error writing F%dx%03x.\n", | ||
240 | __func__, PCI_FUNC(F3->devfn), NBCFG); | ||
241 | } | ||
242 | |||
188 | static void do_inject(void) | 243 | static void do_inject(void) |
189 | { | 244 | { |
190 | u64 mcg_status = 0; | 245 | u64 mcg_status = 0; |
@@ -205,6 +260,26 @@ static void do_inject(void) | |||
205 | if (!(i_mce.status & MCI_STATUS_PCC)) | 260 | if (!(i_mce.status & MCI_STATUS_PCC)) |
206 | mcg_status |= MCG_STATUS_RIPV; | 261 | mcg_status |= MCG_STATUS_RIPV; |
207 | 262 | ||
263 | /* | ||
264 | * Ensure necessary status bits for deferred errors: | ||
265 | * - MCx_STATUS[Deferred]: make sure it is a deferred error | ||
266 | * - MCx_STATUS[UC] cleared: deferred errors are _not_ UC | ||
267 | */ | ||
268 | if (inj_type == DFR_INT_INJ) { | ||
269 | i_mce.status |= MCI_STATUS_DEFERRED; | ||
270 | i_mce.status |= (i_mce.status & ~MCI_STATUS_UC); | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * For multi node CPUs, logging and reporting of bank 4 errors happens | ||
275 | * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for | ||
276 | * Fam10h and later BKDGs. | ||
277 | */ | ||
278 | if (static_cpu_has(X86_FEATURE_AMD_DCM) && b == 4) { | ||
279 | toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu)); | ||
280 | cpu = get_nbc_for_node(amd_get_nb_id(cpu)); | ||
281 | } | ||
282 | |||
208 | get_online_cpus(); | 283 | get_online_cpus(); |
209 | if (!cpu_online(cpu)) | 284 | if (!cpu_online(cpu)) |
210 | goto err; | 285 | goto err; |
@@ -225,7 +300,16 @@ static void do_inject(void) | |||
225 | 300 | ||
226 | toggle_hw_mce_inject(cpu, false); | 301 | toggle_hw_mce_inject(cpu, false); |
227 | 302 | ||
228 | smp_call_function_single(cpu, trigger_mce, NULL, 0); | 303 | switch (inj_type) { |
304 | case DFR_INT_INJ: | ||
305 | smp_call_function_single(cpu, trigger_dfr_int, NULL, 0); | ||
306 | break; | ||
307 | case THR_INT_INJ: | ||
308 | smp_call_function_single(cpu, trigger_thr_int, NULL, 0); | ||
309 | break; | ||
310 | default: | ||
311 | smp_call_function_single(cpu, trigger_mce, NULL, 0); | ||
312 | } | ||
229 | 313 | ||
230 | err: | 314 | err: |
231 | put_online_cpus(); | 315 | put_online_cpus(); |
@@ -290,6 +374,11 @@ static const char readme_msg[] = | |||
290 | "\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n" | 374 | "\t handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n" |
291 | "\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n" | 375 | "\t is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n" |
292 | "\t before injecting.\n" | 376 | "\t before injecting.\n" |
377 | "\t - \"df\": Trigger APIC interrupt for Deferred error. Causes deferred \n" | ||
378 | "\t error APIC interrupt handler to handle the error if the feature is \n" | ||
379 | "\t is present in hardware. \n" | ||
380 | "\t - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n" | ||
381 | "\t APIC interrupt handler to handle the error. \n" | ||
293 | "\n"; | 382 | "\n"; |
294 | 383 | ||
295 | static ssize_t | 384 | static ssize_t |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ca03a736b106..9eee13ef83a5 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -2785,7 +2785,7 @@ static int init_one_instance(struct pci_dev *F2) | |||
2785 | struct mem_ctl_info *mci = NULL; | 2785 | struct mem_ctl_info *mci = NULL; |
2786 | struct edac_mc_layer layers[2]; | 2786 | struct edac_mc_layer layers[2]; |
2787 | int err = 0, ret; | 2787 | int err = 0, ret; |
2788 | u16 nid = amd_get_node_id(F2); | 2788 | u16 nid = amd_pci_dev_to_node_id(F2); |
2789 | 2789 | ||
2790 | ret = -ENOMEM; | 2790 | ret = -ENOMEM; |
2791 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); | 2791 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); |
@@ -2875,7 +2875,7 @@ err_ret: | |||
2875 | static int probe_one_instance(struct pci_dev *pdev, | 2875 | static int probe_one_instance(struct pci_dev *pdev, |
2876 | const struct pci_device_id *mc_type) | 2876 | const struct pci_device_id *mc_type) |
2877 | { | 2877 | { |
2878 | u16 nid = amd_get_node_id(pdev); | 2878 | u16 nid = amd_pci_dev_to_node_id(pdev); |
2879 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | 2879 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; |
2880 | struct ecc_settings *s; | 2880 | struct ecc_settings *s; |
2881 | int ret = 0; | 2881 | int ret = 0; |
@@ -2925,7 +2925,7 @@ static void remove_one_instance(struct pci_dev *pdev) | |||
2925 | { | 2925 | { |
2926 | struct mem_ctl_info *mci; | 2926 | struct mem_ctl_info *mci; |
2927 | struct amd64_pvt *pvt; | 2927 | struct amd64_pvt *pvt; |
2928 | u16 nid = amd_get_node_id(pdev); | 2928 | u16 nid = amd_pci_dev_to_node_id(pdev); |
2929 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; | 2929 | struct pci_dev *F3 = node_to_amd_nb(nid)->misc; |
2930 | struct ecc_settings *s = ecc_stngs[nid]; | 2930 | struct ecc_settings *s = ecc_stngs[nid]; |
2931 | 2931 | ||
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 201b45327804..bd9f8a03cefa 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c | |||
@@ -1149,7 +1149,7 @@ static int __init parse_crashkernel_simple(char *cmdline, | |||
1149 | if (*cur == '@') | 1149 | if (*cur == '@') |
1150 | *crash_base = memparse(cur+1, &cur); | 1150 | *crash_base = memparse(cur+1, &cur); |
1151 | else if (*cur != ' ' && *cur != '\0') { | 1151 | else if (*cur != ' ' && *cur != '\0') { |
1152 | pr_warn("crashkernel: unrecognized char\n"); | 1152 | pr_warn("crashkernel: unrecognized char: %c\n", *cur); |
1153 | return -EINVAL; | 1153 | return -EINVAL; |
1154 | } | 1154 | } |
1155 | 1155 | ||
@@ -1186,12 +1186,12 @@ static int __init parse_crashkernel_suffix(char *cmdline, | |||
1186 | 1186 | ||
1187 | /* check with suffix */ | 1187 | /* check with suffix */ |
1188 | if (strncmp(cur, suffix, strlen(suffix))) { | 1188 | if (strncmp(cur, suffix, strlen(suffix))) { |
1189 | pr_warn("crashkernel: unrecognized char\n"); | 1189 | pr_warn("crashkernel: unrecognized char: %c\n", *cur); |
1190 | return -EINVAL; | 1190 | return -EINVAL; |
1191 | } | 1191 | } |
1192 | cur += strlen(suffix); | 1192 | cur += strlen(suffix); |
1193 | if (*cur != ' ' && *cur != '\0') { | 1193 | if (*cur != ' ' && *cur != '\0') { |
1194 | pr_warn("crashkernel: unrecognized char\n"); | 1194 | pr_warn("crashkernel: unrecognized char: %c\n", *cur); |
1195 | return -EINVAL; | 1195 | return -EINVAL; |
1196 | } | 1196 | } |
1197 | 1197 | ||