aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-20 18:30:51 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-20 18:30:51 -0500
commita25a1d6c24ab50ee9dd26869912df5724d7ef48e (patch)
treec176f2208fbd1fdb98d5c95ea9205329269b053d
parent280d7a1edef214eefb1cb34915c73767355dd1b3 (diff)
parentf26483eaedec39b09b1f2bdfc3f0d18f86764327 (diff)
Merge branch 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 microcode updates from Ingo Molnar: "The main changes are further simplification and unification of the code between the AMD and Intel microcode loaders, plus other simplifications - by Borislav Petkov" * 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/microcode/AMD: Remove struct cont_desc.eq_id x86/microcode/AMD: Remove AP scanning optimization x86/microcode/AMD: Simplify saving from initrd x86/microcode/AMD: Unify load_ucode_amd_ap() x86/microcode/AMD: Check patch level only on the BSP x86/microcode: Remove local vendor variable x86/microcode/AMD: Use find_microcode_in_initrd() x86/microcode/AMD: Get rid of global this_equiv_id x86/microcode: Decrease CPUID use x86/microcode/AMD: Rework container parsing x86/microcode/AMD: Extend the container struct x86/microcode/AMD: Shorten function parameter's name x86/microcode/AMD: Clean up find_equiv_id() x86/microcode: Convert to bare minimum MSR accessors x86/MSR: Carve out bare minimum accessors
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/microcode.h9
-rw-r--r--arch/x86/include/asm/microcode_amd.h2
-rw-r--r--arch/x86/include/asm/msr.h51
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c501
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c81
6 files changed, 249 insertions, 397 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 0c5fbc68e82d..eff8e36aaf72 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -195,7 +195,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
195 195
196static inline void native_apic_msr_eoi_write(u32 reg, u32 v) 196static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
197{ 197{
198 wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0); 198 __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
199} 199}
200 200
201static inline u32 native_apic_msr_read(u32 reg) 201static inline u32 native_apic_msr_read(u32 reg)
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 2266f864b747..daadeeea00b1 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -7,18 +7,17 @@
7 7
8#define native_rdmsr(msr, val1, val2) \ 8#define native_rdmsr(msr, val1, val2) \
9do { \ 9do { \
10 u64 __val = native_read_msr((msr)); \ 10 u64 __val = __rdmsr((msr)); \
11 (void)((val1) = (u32)__val); \ 11 (void)((val1) = (u32)__val); \
12 (void)((val2) = (u32)(__val >> 32)); \ 12 (void)((val2) = (u32)(__val >> 32)); \
13} while (0) 13} while (0)
14 14
15#define native_wrmsr(msr, low, high) \ 15#define native_wrmsr(msr, low, high) \
16 native_write_msr(msr, low, high) 16 __wrmsr(msr, low, high)
17 17
18#define native_wrmsrl(msr, val) \ 18#define native_wrmsrl(msr, val) \
19 native_write_msr((msr), \ 19 __wrmsr((msr), (u32)((u64)(val)), \
20 (u32)((u64)(val)), \ 20 (u32)((u64)(val) >> 32))
21 (u32)((u64)(val) >> 32))
22 21
23struct ucode_patch { 22struct ucode_patch {
24 struct list_head plist; 23 struct list_head plist;
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 3e3e20be829a..3d57009e168b 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -54,6 +54,4 @@ static inline int __init
54save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } 54save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
55void reload_ucode_amd(void) {} 55void reload_ucode_amd(void) {}
56#endif 56#endif
57
58extern bool check_current_patch_level(u32 *rev, bool early);
59#endif /* _ASM_X86_MICROCODE_AMD_H */ 57#endif /* _ASM_X86_MICROCODE_AMD_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index db0b90c3b03e..898dba2e2e2c 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -80,7 +80,14 @@ static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
80static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} 80static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
81#endif 81#endif
82 82
83static inline unsigned long long native_read_msr(unsigned int msr) 83/*
84 * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
85 * accessors and should not have any tracing or other functionality piggybacking
86 * on them - those are *purely* for accessing MSRs and nothing more. So don't even
87 * think of extending them - you will be slapped with a stinking trout or a frozen
88 * shark will reach you, wherever you are! You've been warned.
89 */
90static inline unsigned long long notrace __rdmsr(unsigned int msr)
84{ 91{
85 DECLARE_ARGS(val, low, high); 92 DECLARE_ARGS(val, low, high);
86 93
@@ -88,11 +95,30 @@ static inline unsigned long long native_read_msr(unsigned int msr)
88 "2:\n" 95 "2:\n"
89 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe) 96 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
90 : EAX_EDX_RET(val, low, high) : "c" (msr)); 97 : EAX_EDX_RET(val, low, high) : "c" (msr));
91 if (msr_tracepoint_active(__tracepoint_read_msr)) 98
92 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
93 return EAX_EDX_VAL(val, low, high); 99 return EAX_EDX_VAL(val, low, high);
94} 100}
95 101
102static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
103{
104 asm volatile("1: wrmsr\n"
105 "2:\n"
106 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
107 : : "c" (msr), "a"(low), "d" (high) : "memory");
108}
109
110static inline unsigned long long native_read_msr(unsigned int msr)
111{
112 unsigned long long val;
113
114 val = __rdmsr(msr);
115
116 if (msr_tracepoint_active(__tracepoint_read_msr))
117 do_trace_read_msr(msr, val, 0);
118
119 return val;
120}
121
96static inline unsigned long long native_read_msr_safe(unsigned int msr, 122static inline unsigned long long native_read_msr_safe(unsigned int msr,
97 int *err) 123 int *err)
98{ 124{
@@ -116,29 +142,14 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
116 142
117/* Can be uninlined because referenced by paravirt */ 143/* Can be uninlined because referenced by paravirt */
118static inline void notrace 144static inline void notrace
119__native_write_msr_notrace(unsigned int msr, u32 low, u32 high)
120{
121 asm volatile("1: wrmsr\n"
122 "2:\n"
123 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
124 : : "c" (msr), "a"(low), "d" (high) : "memory");
125}
126
127/* Can be uninlined because referenced by paravirt */
128static inline void notrace
129native_write_msr(unsigned int msr, u32 low, u32 high) 145native_write_msr(unsigned int msr, u32 low, u32 high)
130{ 146{
131 __native_write_msr_notrace(msr, low, high); 147 __wrmsr(msr, low, high);
148
132 if (msr_tracepoint_active(__tracepoint_write_msr)) 149 if (msr_tracepoint_active(__tracepoint_write_msr))
133 do_trace_write_msr(msr, ((u64)high << 32 | low), 0); 150 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
134} 151}
135 152
136static inline void
137wrmsr_notrace(unsigned int msr, u32 low, u32 high)
138{
139 __native_write_msr_notrace(msr, low, high);
140}
141
142/* Can be uninlined because referenced by paravirt */ 153/* Can be uninlined because referenced by paravirt */
143static inline int notrace 154static inline int notrace
144native_write_msr_safe(unsigned int msr, u32 low, u32 high) 155native_write_msr_safe(unsigned int msr, u32 low, u32 high)
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 079e81733a58..7889ae492af0 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -42,16 +42,19 @@ static struct equiv_cpu_entry *equiv_cpu_table;
42 42
43/* 43/*
44 * This points to the current valid container of microcode patches which we will 44 * This points to the current valid container of microcode patches which we will
45 * save from the initrd/builtin before jettisoning its contents. 45 * save from the initrd/builtin before jettisoning its contents. @mc is the
46 * microcode patch we found to match.
46 */ 47 */
47struct container { 48struct cont_desc {
48 u8 *data; 49 struct microcode_amd *mc;
49 size_t size; 50 u32 cpuid_1_eax;
50} cont; 51 u32 psize;
52 u8 *data;
53 size_t size;
54};
51 55
52static u32 ucode_new_rev; 56static u32 ucode_new_rev;
53static u8 amd_ucode_patch[PATCH_MAX_SIZE]; 57static u8 amd_ucode_patch[PATCH_MAX_SIZE];
54static u16 this_equiv_id;
55 58
56/* 59/*
57 * Microcode patch container file is prepended to the initrd in cpio 60 * Microcode patch container file is prepended to the initrd in cpio
@@ -60,57 +63,13 @@ static u16 this_equiv_id;
60static const char 63static const char
61ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; 64ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
62 65
63static size_t compute_container_size(u8 *data, u32 total_size) 66static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
64{ 67{
65 size_t size = 0; 68 for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
66 u32 *header = (u32 *)data; 69 if (sig == equiv_table->installed_cpu)
67 70 return equiv_table->equiv_cpu;
68 if (header[0] != UCODE_MAGIC ||
69 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
70 header[2] == 0) /* size */
71 return size;
72
73 size = header[2] + CONTAINER_HDR_SZ;
74 total_size -= size;
75 data += size;
76
77 while (total_size) {
78 u16 patch_size;
79
80 header = (u32 *)data;
81
82 if (header[0] != UCODE_UCODE_TYPE)
83 break;
84
85 /*
86 * Sanity-check patch size.
87 */
88 patch_size = header[1];
89 if (patch_size > PATCH_MAX_SIZE)
90 break;
91
92 size += patch_size + SECTION_HDR_SIZE;
93 data += patch_size + SECTION_HDR_SIZE;
94 total_size -= patch_size + SECTION_HDR_SIZE;
95 } 71 }
96 72
97 return size;
98}
99
100static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
101 unsigned int sig)
102{
103 int i = 0;
104
105 if (!equiv_cpu_table)
106 return 0;
107
108 while (equiv_cpu_table[i].installed_cpu != 0) {
109 if (sig == equiv_cpu_table[i].installed_cpu)
110 return equiv_cpu_table[i].equiv_cpu;
111
112 i++;
113 }
114 return 0; 73 return 0;
115} 74}
116 75
@@ -118,91 +77,109 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
118 * This scans the ucode blob for the proper container as we can have multiple 77 * This scans the ucode blob for the proper container as we can have multiple
119 * containers glued together. Returns the equivalence ID from the equivalence 78 * containers glued together. Returns the equivalence ID from the equivalence
120 * table or 0 if none found. 79 * table or 0 if none found.
80 * Returns the amount of bytes consumed while scanning. @desc contains all the
81 * data we're going to use in later stages of the application.
121 */ 82 */
122static u16 83static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
123find_proper_container(u8 *ucode, size_t size, struct container *ret_cont)
124{ 84{
125 struct container ret = { NULL, 0 };
126 u32 eax, ebx, ecx, edx;
127 struct equiv_cpu_entry *eq; 85 struct equiv_cpu_entry *eq;
128 int offset, left; 86 ssize_t orig_size = size;
129 u16 eq_id = 0; 87 u32 *hdr = (u32 *)ucode;
130 u32 *header; 88 u16 eq_id;
131 u8 *data; 89 u8 *buf;
132 90
133 data = ucode; 91 /* Am I looking at an equivalence table header? */
134 left = size; 92 if (hdr[0] != UCODE_MAGIC ||
135 header = (u32 *)data; 93 hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
94 hdr[2] == 0)
95 return CONTAINER_HDR_SZ;
136 96
97 buf = ucode;
137 98
138 /* find equiv cpu table */ 99 eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
139 if (header[0] != UCODE_MAGIC ||
140 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
141 header[2] == 0) /* size */
142 return eq_id;
143 100
144 eax = 0x00000001; 101 /* Find the equivalence ID of our CPU in this table: */
145 ecx = 0; 102 eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
146 native_cpuid(&eax, &ebx, &ecx, &edx);
147 103
148 while (left > 0) { 104 buf += hdr[2] + CONTAINER_HDR_SZ;
149 eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); 105 size -= hdr[2] + CONTAINER_HDR_SZ;
106
107 /*
108 * Scan through the rest of the container to find where it ends. We do
109 * some basic sanity-checking too.
110 */
111 while (size > 0) {
112 struct microcode_amd *mc;
113 u32 patch_size;
150 114
151 ret.data = data; 115 hdr = (u32 *)buf;
152 116
153 /* Advance past the container header */ 117 if (hdr[0] != UCODE_UCODE_TYPE)
154 offset = header[2] + CONTAINER_HDR_SZ; 118 break;
155 data += offset;
156 left -= offset;
157 119
158 eq_id = find_equiv_id(eq, eax); 120 /* Sanity-check patch size. */
159 if (eq_id) { 121 patch_size = hdr[1];
160 ret.size = compute_container_size(ret.data, left + offset); 122 if (patch_size > PATCH_MAX_SIZE)
123 break;
161 124
162 /* 125 /* Skip patch section header: */
163 * truncate how much we need to iterate over in the 126 buf += SECTION_HDR_SIZE;
164 * ucode update loop below 127 size -= SECTION_HDR_SIZE;
165 */
166 left = ret.size - offset;
167 128
168 *ret_cont = ret; 129 mc = (struct microcode_amd *)buf;
169 return eq_id; 130 if (eq_id == mc->hdr.processor_rev_id) {
131 desc->psize = patch_size;
132 desc->mc = mc;
170 } 133 }
171 134
172 /* 135 buf += patch_size;
173 * support multiple container files appended together. if this 136 size -= patch_size;
174 * one does not have a matching equivalent cpu entry, we fast 137 }
175 * forward to the next container file.
176 */
177 while (left > 0) {
178 header = (u32 *)data;
179
180 if (header[0] == UCODE_MAGIC &&
181 header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
182 break;
183
184 offset = header[1] + SECTION_HDR_SIZE;
185 data += offset;
186 left -= offset;
187 }
188 138
189 /* mark where the next microcode container file starts */ 139 /*
190 offset = data - (u8 *)ucode; 140 * If we have found a patch (desc->mc), it means we're looking at the
191 ucode = data; 141 * container which has a patch for this CPU so return 0 to mean, @ucode
142 * already points to the proper container. Otherwise, we return the size
143 * we scanned so that we can advance to the next container in the
144 * buffer.
145 */
146 if (desc->mc) {
147 desc->data = ucode;
148 desc->size = orig_size - size;
149
150 return 0;
192 } 151 }
193 152
194 return eq_id; 153 return orig_size - size;
195} 154}
196 155
197static int __apply_microcode_amd(struct microcode_amd *mc_amd) 156/*
157 * Scan the ucode blob for the proper container as we can have multiple
158 * containers glued together.
159 */
160static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
161{
162 ssize_t rem = size;
163
164 while (rem >= 0) {
165 ssize_t s = parse_container(ucode, rem, desc);
166 if (!s)
167 return;
168
169 ucode += s;
170 rem -= s;
171 }
172}
173
174static int __apply_microcode_amd(struct microcode_amd *mc)
198{ 175{
199 u32 rev, dummy; 176 u32 rev, dummy;
200 177
201 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); 178 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
202 179
203 /* verify patch application was successful */ 180 /* verify patch application was successful */
204 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); 181 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
205 if (rev != mc_amd->hdr.patch_id) 182 if (rev != mc->hdr.patch_id)
206 return -1; 183 return -1;
207 184
208 return 0; 185 return 0;
@@ -217,17 +194,16 @@ static int __apply_microcode_amd(struct microcode_amd *mc_amd)
217 * load_microcode_amd() to save equivalent cpu table and microcode patches in 194 * load_microcode_amd() to save equivalent cpu table and microcode patches in
218 * kernel heap memory. 195 * kernel heap memory.
219 * 196 *
220 * Returns true if container found (sets @ret_cont), false otherwise. 197 * Returns true if container found (sets @desc), false otherwise.
221 */ 198 */
222static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch, 199static bool
223 struct container *ret_cont) 200apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
224{ 201{
202 struct cont_desc desc = { 0 };
225 u8 (*patch)[PATCH_MAX_SIZE]; 203 u8 (*patch)[PATCH_MAX_SIZE];
226 u32 rev, *header, *new_rev; 204 struct microcode_amd *mc;
227 struct container ret; 205 u32 rev, dummy, *new_rev;
228 int offset, left; 206 bool ret = false;
229 u16 eq_id = 0;
230 u8 *data;
231 207
232#ifdef CONFIG_X86_32 208#ifdef CONFIG_X86_32
233 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); 209 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
@@ -237,50 +213,27 @@ static bool apply_microcode_early_amd(void *ucode, size_t size, bool save_patch,
237 patch = &amd_ucode_patch; 213 patch = &amd_ucode_patch;
238#endif 214#endif
239 215
240 if (check_current_patch_level(&rev, true)) 216 desc.cpuid_1_eax = cpuid_1_eax;
241 return false;
242
243 eq_id = find_proper_container(ucode, size, &ret);
244 if (!eq_id)
245 return false;
246
247 this_equiv_id = eq_id;
248 header = (u32 *)ret.data;
249
250 /* We're pointing to an equiv table, skip over it. */
251 data = ret.data + header[2] + CONTAINER_HDR_SZ;
252 left = ret.size - (header[2] + CONTAINER_HDR_SZ);
253
254 while (left > 0) {
255 struct microcode_amd *mc;
256
257 header = (u32 *)data;
258 if (header[0] != UCODE_UCODE_TYPE || /* type */
259 header[1] == 0) /* size */
260 break;
261 217
262 mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); 218 scan_containers(ucode, size, &desc);
263 219
264 if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { 220 mc = desc.mc;
221 if (!mc)
222 return ret;
265 223
266 if (!__apply_microcode_amd(mc)) { 224 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
267 rev = mc->hdr.patch_id; 225 if (rev >= mc->hdr.patch_id)
268 *new_rev = rev; 226 return ret;
269 227
270 if (save_patch) 228 if (!__apply_microcode_amd(mc)) {
271 memcpy(patch, mc, min_t(u32, header[1], PATCH_MAX_SIZE)); 229 *new_rev = mc->hdr.patch_id;
272 } 230 ret = true;
273 }
274 231
275 offset = header[1] + SECTION_HDR_SIZE; 232 if (save_patch)
276 data += offset; 233 memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
277 left -= offset;
278 } 234 }
279 235
280 if (ret_cont) 236 return ret;
281 *ret_cont = ret;
282
283 return true;
284} 237}
285 238
286static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) 239static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
@@ -298,10 +251,9 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
298#endif 251#endif
299} 252}
300 253
301void __init load_ucode_amd_bsp(unsigned int family) 254void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
302{ 255{
303 struct ucode_cpu_info *uci; 256 struct ucode_cpu_info *uci;
304 u32 eax, ebx, ecx, edx;
305 struct cpio_data cp; 257 struct cpio_data cp;
306 const char *path; 258 const char *path;
307 bool use_pa; 259 bool use_pa;
@@ -316,184 +268,95 @@ void __init load_ucode_amd_bsp(unsigned int family)
316 use_pa = false; 268 use_pa = false;
317 } 269 }
318 270
319 if (!get_builtin_microcode(&cp, family)) 271 if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
320 cp = find_microcode_in_initrd(path, use_pa); 272 cp = find_microcode_in_initrd(path, use_pa);
321 273
322 if (!(cp.data && cp.size)) 274 /* Needed in load_microcode_amd() */
323 return; 275 uci->cpu_sig.sig = cpuid_1_eax;
324
325 /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
326 eax = 1;
327 ecx = 0;
328 native_cpuid(&eax, &ebx, &ecx, &edx);
329 uci->cpu_sig.sig = eax;
330 276
331 apply_microcode_early_amd(cp.data, cp.size, true, NULL); 277 *ret = cp;
332} 278}
333 279
334#ifdef CONFIG_X86_32 280void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
335/*
336 * On 32-bit, since AP's early load occurs before paging is turned on, we
337 * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
338 * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
339 * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
340 * which is used upon resume from suspend.
341 */
342void load_ucode_amd_ap(unsigned int family)
343{ 281{
344 struct microcode_amd *mc; 282 struct cpio_data cp = { };
345 struct cpio_data cp;
346
347 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
348 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
349 __apply_microcode_amd(mc);
350 return;
351 }
352
353 if (!get_builtin_microcode(&cp, family))
354 cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
355 283
284 __load_ucode_amd(cpuid_1_eax, &cp);
356 if (!(cp.data && cp.size)) 285 if (!(cp.data && cp.size))
357 return; 286 return;
358 287
359 /* 288 apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
360 * This would set amd_ucode_patch above so that the following APs can
361 * use it directly instead of going down this path again.
362 */
363 apply_microcode_early_amd(cp.data, cp.size, true, NULL);
364} 289}
365#else 290
366void load_ucode_amd_ap(unsigned int family) 291void load_ucode_amd_ap(unsigned int cpuid_1_eax)
367{ 292{
368 struct equiv_cpu_entry *eq;
369 struct microcode_amd *mc; 293 struct microcode_amd *mc;
370 u32 rev, eax; 294 struct cpio_data cp;
371 u16 eq_id; 295 u32 *new_rev, rev, dummy;
372
373 /* 64-bit runs with paging enabled, thus early==false. */
374 if (check_current_patch_level(&rev, false))
375 return;
376
377 /* First AP hasn't cached it yet, go through the blob. */
378 if (!cont.data) {
379 struct cpio_data cp = { NULL, 0, "" };
380 296
381 if (cont.size == -1) 297 if (IS_ENABLED(CONFIG_X86_32)) {
382 return; 298 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
299 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
300 } else {
301 mc = (struct microcode_amd *)amd_ucode_patch;
302 new_rev = &ucode_new_rev;
303 }
383 304
384reget: 305 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
385 if (!get_builtin_microcode(&cp, family)) {
386#ifdef CONFIG_BLK_DEV_INITRD
387 if (!initrd_gone)
388 cp = find_cpio_data(ucode_path, (void *)initrd_start,
389 initrd_end - initrd_start, NULL);
390#endif
391 if (!(cp.data && cp.size)) {
392 /*
393 * Mark it so that other APs do not scan again
394 * for no real reason and slow down boot
395 * needlessly.
396 */
397 cont.size = -1;
398 return;
399 }
400 }
401 306
402 if (!apply_microcode_early_amd(cp.data, cp.size, false, &cont)) { 307 /* Check whether we have saved a new patch already: */
403 cont.size = -1; 308 if (*new_rev && rev < mc->hdr.patch_id) {
309 if (!__apply_microcode_amd(mc)) {
310 *new_rev = mc->hdr.patch_id;
404 return; 311 return;
405 } 312 }
406 } 313 }
407 314
408 eax = cpuid_eax(0x00000001); 315 __load_ucode_amd(cpuid_1_eax, &cp);
409 eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ); 316 if (!(cp.data && cp.size))
410
411 eq_id = find_equiv_id(eq, eax);
412 if (!eq_id)
413 return; 317 return;
414 318
415 if (eq_id == this_equiv_id) { 319 apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
416 mc = (struct microcode_amd *)amd_ucode_patch;
417
418 if (mc && rev < mc->hdr.patch_id) {
419 if (!__apply_microcode_amd(mc))
420 ucode_new_rev = mc->hdr.patch_id;
421 }
422
423 } else {
424
425 /*
426 * AP has a different equivalence ID than BSP, looks like
427 * mixed-steppings silicon so go through the ucode blob anew.
428 */
429 goto reget;
430 }
431} 320}
432#endif /* CONFIG_X86_32 */
433 321
434static enum ucode_state 322static enum ucode_state
435load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); 323load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
436 324
437int __init save_microcode_in_initrd_amd(unsigned int fam) 325int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
438{ 326{
327 struct cont_desc desc = { 0 };
439 enum ucode_state ret; 328 enum ucode_state ret;
440 int retval = 0; 329 struct cpio_data cp;
441 u16 eq_id;
442
443 if (!cont.data) {
444 if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
445 struct cpio_data cp = { NULL, 0, "" };
446
447#ifdef CONFIG_BLK_DEV_INITRD
448 cp = find_cpio_data(ucode_path, (void *)initrd_start,
449 initrd_end - initrd_start, NULL);
450#endif
451 330
452 if (!(cp.data && cp.size)) { 331 cp = find_microcode_in_initrd(ucode_path, false);
453 cont.size = -1; 332 if (!(cp.data && cp.size))
454 return -EINVAL; 333 return -EINVAL;
455 }
456 334
457 eq_id = find_proper_container(cp.data, cp.size, &cont); 335 desc.cpuid_1_eax = cpuid_1_eax;
458 if (!eq_id) {
459 cont.size = -1;
460 return -EINVAL;
461 }
462 336
463 } else 337 scan_containers(cp.data, cp.size, &desc);
464 return -EINVAL; 338 if (!desc.mc)
465 } 339 return -EINVAL;
466 340
467 ret = load_microcode_amd(smp_processor_id(), fam, cont.data, cont.size); 341 ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
342 desc.data, desc.size);
468 if (ret != UCODE_OK) 343 if (ret != UCODE_OK)
469 retval = -EINVAL; 344 return -EINVAL;
470
471 /*
472 * This will be freed any msec now, stash patches for the current
473 * family and switch to patch cache for cpu hotplug, etc later.
474 */
475 cont.data = NULL;
476 cont.size = 0;
477 345
478 return retval; 346 return 0;
479} 347}
480 348
481void reload_ucode_amd(void) 349void reload_ucode_amd(void)
482{ 350{
483 struct microcode_amd *mc; 351 struct microcode_amd *mc;
484 u32 rev; 352 u32 rev, dummy;
485
486 /*
487 * early==false because this is a syscore ->resume path and by
488 * that time paging is long enabled.
489 */
490 if (check_current_patch_level(&rev, false))
491 return;
492 353
493 mc = (struct microcode_amd *)amd_ucode_patch; 354 mc = (struct microcode_amd *)amd_ucode_patch;
494 if (!mc) 355 if (!mc)
495 return; 356 return;
496 357
358 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
359
497 if (rev < mc->hdr.patch_id) { 360 if (rev < mc->hdr.patch_id) {
498 if (!__apply_microcode_amd(mc)) { 361 if (!__apply_microcode_amd(mc)) {
499 ucode_new_rev = mc->hdr.patch_id; 362 ucode_new_rev = mc->hdr.patch_id;
@@ -631,60 +494,13 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
631 return patch_size; 494 return patch_size;
632} 495}
633 496
634/*
635 * Those patch levels cannot be updated to newer ones and thus should be final.
636 */
637static u32 final_levels[] = {
638 0x01000098,
639 0x0100009f,
640 0x010000af,
641 0, /* T-101 terminator */
642};
643
644/*
645 * Check the current patch level on this CPU.
646 *
647 * @rev: Use it to return the patch level. It is set to 0 in the case of
648 * error.
649 *
650 * Returns:
651 * - true: if update should stop
652 * - false: otherwise
653 */
654bool check_current_patch_level(u32 *rev, bool early)
655{
656 u32 lvl, dummy, i;
657 bool ret = false;
658 u32 *levels;
659
660 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
661
662 if (IS_ENABLED(CONFIG_X86_32) && early)
663 levels = (u32 *)__pa_nodebug(&final_levels);
664 else
665 levels = final_levels;
666
667 for (i = 0; levels[i]; i++) {
668 if (lvl == levels[i]) {
669 lvl = 0;
670 ret = true;
671 break;
672 }
673 }
674
675 if (rev)
676 *rev = lvl;
677
678 return ret;
679}
680
681static int apply_microcode_amd(int cpu) 497static int apply_microcode_amd(int cpu)
682{ 498{
683 struct cpuinfo_x86 *c = &cpu_data(cpu); 499 struct cpuinfo_x86 *c = &cpu_data(cpu);
684 struct microcode_amd *mc_amd; 500 struct microcode_amd *mc_amd;
685 struct ucode_cpu_info *uci; 501 struct ucode_cpu_info *uci;
686 struct ucode_patch *p; 502 struct ucode_patch *p;
687 u32 rev; 503 u32 rev, dummy;
688 504
689 BUG_ON(raw_smp_processor_id() != cpu); 505 BUG_ON(raw_smp_processor_id() != cpu);
690 506
@@ -697,8 +513,7 @@ static int apply_microcode_amd(int cpu)
697 mc_amd = p->data; 513 mc_amd = p->data;
698 uci->mc = p->data; 514 uci->mc = p->data;
699 515
700 if (check_current_patch_level(&rev, false)) 516 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
701 return -1;
702 517
703 /* need to apply patch? */ 518 /* need to apply patch? */
704 if (rev >= mc_amd->hdr.patch_id) { 519 if (rev >= mc_amd->hdr.patch_id) {
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 73102d932760..b4a4cd39b358 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -66,19 +66,50 @@ static DEFINE_MUTEX(microcode_mutex);
66 66
67struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 67struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
68 68
69/*
70 * Operations that are run on a target cpu:
71 */
72
73struct cpu_info_ctx { 69struct cpu_info_ctx {
74 struct cpu_signature *cpu_sig; 70 struct cpu_signature *cpu_sig;
75 int err; 71 int err;
76}; 72};
77 73
74/*
75 * Those patch levels cannot be updated to newer ones and thus should be final.
76 */
77static u32 final_levels[] = {
78 0x01000098,
79 0x0100009f,
80 0x010000af,
81 0, /* T-101 terminator */
82};
83
84/*
85 * Check the current patch level on this CPU.
86 *
87 * Returns:
88 * - true: if update should stop
89 * - false: otherwise
90 */
91static bool amd_check_current_patch_level(void)
92{
93 u32 lvl, dummy, i;
94 u32 *levels;
95
96 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
97
98 if (IS_ENABLED(CONFIG_X86_32))
99 levels = (u32 *)__pa_nodebug(&final_levels);
100 else
101 levels = final_levels;
102
103 for (i = 0; levels[i]; i++) {
104 if (lvl == levels[i])
105 return true;
106 }
107 return false;
108}
109
78static bool __init check_loader_disabled_bsp(void) 110static bool __init check_loader_disabled_bsp(void)
79{ 111{
80 static const char *__dis_opt_str = "dis_ucode_ldr"; 112 static const char *__dis_opt_str = "dis_ucode_ldr";
81 u32 a, b, c, d;
82 113
83#ifdef CONFIG_X86_32 114#ifdef CONFIG_X86_32
84 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); 115 const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
@@ -94,18 +125,19 @@ static bool __init check_loader_disabled_bsp(void)
94 if (!have_cpuid_p()) 125 if (!have_cpuid_p())
95 return *res; 126 return *res;
96 127
97 a = 1;
98 c = 0;
99 native_cpuid(&a, &b, &c, &d);
100
101 /* 128 /*
102 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not 129 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
103 * completely accurate as xen pv guests don't see that CPUID bit set but 130 * completely accurate as xen pv guests don't see that CPUID bit set but
104 * that's good enough as they don't land on the BSP path anyway. 131 * that's good enough as they don't land on the BSP path anyway.
105 */ 132 */
106 if (c & BIT(31)) 133 if (native_cpuid_ecx(1) & BIT(31))
107 return *res; 134 return *res;
108 135
136 if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
137 if (amd_check_current_patch_level())
138 return *res;
139 }
140
109 if (cmdline_find_option_bool(cmdline, option) <= 0) 141 if (cmdline_find_option_bool(cmdline, option) <= 0)
110 *res = false; 142 *res = false;
111 143
@@ -133,23 +165,21 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
133 165
134void __init load_ucode_bsp(void) 166void __init load_ucode_bsp(void)
135{ 167{
136 int vendor; 168 unsigned int cpuid_1_eax;
137 unsigned int family;
138 169
139 if (check_loader_disabled_bsp()) 170 if (check_loader_disabled_bsp())
140 return; 171 return;
141 172
142 vendor = x86_cpuid_vendor(); 173 cpuid_1_eax = native_cpuid_eax(1);
143 family = x86_cpuid_family();
144 174
145 switch (vendor) { 175 switch (x86_cpuid_vendor()) {
146 case X86_VENDOR_INTEL: 176 case X86_VENDOR_INTEL:
147 if (family >= 6) 177 if (x86_family(cpuid_1_eax) >= 6)
148 load_ucode_intel_bsp(); 178 load_ucode_intel_bsp();
149 break; 179 break;
150 case X86_VENDOR_AMD: 180 case X86_VENDOR_AMD:
151 if (family >= 0x10) 181 if (x86_family(cpuid_1_eax) >= 0x10)
152 load_ucode_amd_bsp(family); 182 load_ucode_amd_bsp(cpuid_1_eax);
153 break; 183 break;
154 default: 184 default:
155 break; 185 break;
@@ -167,22 +197,21 @@ static bool check_loader_disabled_ap(void)
167 197
168void load_ucode_ap(void) 198void load_ucode_ap(void)
169{ 199{
170 int vendor, family; 200 unsigned int cpuid_1_eax;
171 201
172 if (check_loader_disabled_ap()) 202 if (check_loader_disabled_ap())
173 return; 203 return;
174 204
175 vendor = x86_cpuid_vendor(); 205 cpuid_1_eax = native_cpuid_eax(1);
176 family = x86_cpuid_family();
177 206
178 switch (vendor) { 207 switch (x86_cpuid_vendor()) {
179 case X86_VENDOR_INTEL: 208 case X86_VENDOR_INTEL:
180 if (family >= 6) 209 if (x86_family(cpuid_1_eax) >= 6)
181 load_ucode_intel_ap(); 210 load_ucode_intel_ap();
182 break; 211 break;
183 case X86_VENDOR_AMD: 212 case X86_VENDOR_AMD:
184 if (family >= 0x10) 213 if (x86_family(cpuid_1_eax) >= 0x10)
185 load_ucode_amd_ap(family); 214 load_ucode_amd_ap(cpuid_1_eax);
186 break; 215 break;
187 default: 216 default:
188 break; 217 break;
@@ -201,7 +230,7 @@ static int __init save_microcode_in_initrd(void)
201 break; 230 break;
202 case X86_VENDOR_AMD: 231 case X86_VENDOR_AMD:
203 if (c->x86 >= 0x10) 232 if (c->x86 >= 0x10)
204 ret = save_microcode_in_initrd_amd(c->x86); 233 return save_microcode_in_initrd_amd(cpuid_eax(1));
205 break; 234 break;
206 default: 235 default:
207 break; 236 break;