diff options
author | Borislav Petkov <bp@suse.de> | 2017-01-20 15:29:52 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-01-23 04:02:50 -0500 |
commit | e71bb4ec073901ad50bfa86fed74fce7ac3210fe (patch) | |
tree | bf20740e523caec9453880749bb726a9dd029f80 /arch/x86/kernel/cpu/microcode/amd.c | |
parent | f3ad136d6ef966c8ba9090770c2bfe7e85f18471 (diff) |
x86/microcode/AMD: Unify load_ucode_amd_ap()
Use a version for both bitness by adding a helper which does the actual
container finding and parsing which can be used on any CPU - BSP or AP.
Streamlines the paths more.
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/20170120202955.4091-14-bp@alien8.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/cpu/microcode/amd.c')
-rw-r--r-- | arch/x86/kernel/cpu/microcode/amd.c | 81 |
1 files changed, 31 insertions, 50 deletions
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 61743476c25b..fe9e865480af 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -261,7 +261,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) | |||
261 | #endif | 261 | #endif |
262 | } | 262 | } |
263 | 263 | ||
264 | void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) | 264 | void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret) |
265 | { | 265 | { |
266 | struct ucode_cpu_info *uci; | 266 | struct ucode_cpu_info *uci; |
267 | struct cpio_data cp; | 267 | struct cpio_data cp; |
@@ -281,89 +281,71 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) | |||
281 | if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) | 281 | if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) |
282 | cp = find_microcode_in_initrd(path, use_pa); | 282 | cp = find_microcode_in_initrd(path, use_pa); |
283 | 283 | ||
284 | if (!(cp.data && cp.size)) | ||
285 | return; | ||
286 | |||
287 | /* Needed in load_microcode_amd() */ | 284 | /* Needed in load_microcode_amd() */ |
288 | uci->cpu_sig.sig = cpuid_1_eax; | 285 | uci->cpu_sig.sig = cpuid_1_eax; |
289 | 286 | ||
290 | apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL); | 287 | *ret = cp; |
291 | } | 288 | } |
292 | 289 | ||
293 | #ifdef CONFIG_X86_32 | 290 | void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) |
294 | /* | ||
295 | * On 32-bit, since AP's early load occurs before paging is turned on, we | ||
296 | * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory. | ||
297 | * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP. | ||
298 | * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, | ||
299 | * which is used upon resume from suspend. | ||
300 | */ | ||
301 | void load_ucode_amd_ap(unsigned int cpuid_1_eax) | ||
302 | { | 291 | { |
303 | struct microcode_amd *mc; | 292 | struct cpio_data cp = { }; |
304 | struct cpio_data cp; | ||
305 | 293 | ||
306 | mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); | 294 | __load_ucode_amd(cpuid_1_eax, &cp); |
307 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { | ||
308 | __apply_microcode_amd(mc); | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) | ||
313 | cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true); | ||
314 | 295 | ||
315 | if (!(cp.data && cp.size)) | 296 | if (!(cp.data && cp.size)) |
316 | return; | 297 | return; |
317 | 298 | ||
318 | /* | ||
319 | * This would set amd_ucode_patch above so that the following APs can | ||
320 | * use it directly instead of going down this path again. | ||
321 | */ | ||
322 | apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL); | 299 | apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL); |
323 | } | 300 | } |
324 | #else | 301 | |
325 | void load_ucode_amd_ap(unsigned int cpuid_1_eax) | 302 | void load_ucode_amd_ap(unsigned int cpuid_1_eax) |
326 | { | 303 | { |
327 | struct equiv_cpu_entry *eq; | 304 | struct equiv_cpu_entry *eq; |
328 | struct microcode_amd *mc; | 305 | struct microcode_amd *mc; |
306 | struct cont_desc *desc; | ||
329 | u16 eq_id; | 307 | u16 eq_id; |
330 | 308 | ||
309 | if (IS_ENABLED(CONFIG_X86_32)) { | ||
310 | mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); | ||
311 | desc = (struct cont_desc *)__pa_nodebug(&cont); | ||
312 | } else { | ||
313 | mc = (struct microcode_amd *)amd_ucode_patch; | ||
314 | desc = &cont; | ||
315 | } | ||
316 | |||
331 | /* First AP hasn't cached it yet, go through the blob. */ | 317 | /* First AP hasn't cached it yet, go through the blob. */ |
332 | if (!cont.data) { | 318 | if (!desc->data) { |
333 | struct cpio_data cp; | 319 | struct cpio_data cp = { }; |
334 | 320 | ||
335 | if (cont.size == -1) | 321 | if (desc->size == -1) |
336 | return; | 322 | return; |
337 | 323 | ||
338 | reget: | 324 | reget: |
339 | if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) { | 325 | __load_ucode_amd(cpuid_1_eax, &cp); |
340 | cp = find_microcode_in_initrd(ucode_path, false); | 326 | if (!(cp.data && cp.size)) { |
341 | 327 | /* | |
342 | if (!(cp.data && cp.size)) { | 328 | * Mark it so that other APs do not scan again for no |
343 | /* | 329 | * real reason and slow down boot needlessly. |
344 | * Mark it so that other APs do not scan again | 330 | */ |
345 | * for no real reason and slow down boot | 331 | desc->size = -1; |
346 | * needlessly. | 332 | return; |
347 | */ | ||
348 | cont.size = -1; | ||
349 | return; | ||
350 | } | ||
351 | } | 333 | } |
352 | 334 | ||
353 | if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) { | 335 | if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, desc)) { |
354 | cont.data = NULL; | 336 | desc->data = NULL; |
355 | cont.size = -1; | 337 | desc->size = -1; |
356 | return; | 338 | return; |
357 | } | 339 | } |
358 | } | 340 | } |
359 | 341 | ||
360 | eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ); | 342 | eq = (struct equiv_cpu_entry *)(desc->data + CONTAINER_HDR_SZ); |
361 | 343 | ||
362 | eq_id = find_equiv_id(eq, cpuid_1_eax); | 344 | eq_id = find_equiv_id(eq, cpuid_1_eax); |
363 | if (!eq_id) | 345 | if (!eq_id) |
364 | return; | 346 | return; |
365 | 347 | ||
366 | if (eq_id == cont.eq_id) { | 348 | if (eq_id == desc->eq_id) { |
367 | u32 rev, dummy; | 349 | u32 rev, dummy; |
368 | 350 | ||
369 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); | 351 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); |
@@ -384,7 +366,6 @@ reget: | |||
384 | goto reget; | 366 | goto reget; |
385 | } | 367 | } |
386 | } | 368 | } |
387 | #endif /* CONFIG_X86_32 */ | ||
388 | 369 | ||
389 | static enum ucode_state | 370 | static enum ucode_state |
390 | load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); | 371 | load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); |