aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/microcode/intel.c
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2017-06-14 10:06:26 -0400
committerIngo Molnar <mingo@kernel.org>2017-06-20 06:54:25 -0400
commitbd20733045d5db55515442d828376a3a71d6be48 (patch)
tree5a87a5b4ce370035f9656b52f922f367961b3b62 /arch/x86/kernel/cpu/microcode/intel.c
parenta3d98c9358e3be28282ff60dcca01dbc9f402f30 (diff)
x86/microcode/intel: Save pointer to ucode patch for early AP loading
Normally, when the initrd is gone, we can't search it for microcode blobs to apply anymore. For that we need to stash away the patch in our own storage. And save_microcode_in_initrd_intel() looks like the proper place to do that from. So in order for early loading to work, invalidate the intel_ucode_patch pointer to the patch *before* scanning the initrd one last time. If the scanning code finds a microcode patch, it will assign that pointer again, this time with our own storage's address. This way, early microcode application during resume-from-RAM works too, even after the initrd is long gone. Tested-by: Dominik Brodowski <linux@dominikbrodowski.net> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170614140626.4462-2-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu/microcode/intel.c')
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index f522415bf9e5..d525a0bd7d28 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -166,7 +166,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
166static void save_microcode_patch(void *data, unsigned int size) 166static void save_microcode_patch(void *data, unsigned int size)
167{ 167{
168 struct microcode_header_intel *mc_hdr, *mc_saved_hdr; 168 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
169 struct ucode_patch *iter, *tmp, *p; 169 struct ucode_patch *iter, *tmp, *p = NULL;
170 bool prev_found = false; 170 bool prev_found = false;
171 unsigned int sig, pf; 171 unsigned int sig, pf;
172 172
@@ -202,6 +202,18 @@ static void save_microcode_patch(void *data, unsigned int size)
202 else 202 else
203 list_add_tail(&p->plist, &microcode_cache); 203 list_add_tail(&p->plist, &microcode_cache);
204 } 204 }
205
206 /*
207 * Save for early loading. On 32-bit, that needs to be a physical
208 * address as the APs are running from physical addresses, before
209 * paging has been enabled.
210 */
211 if (p) {
212 if (IS_ENABLED(CONFIG_X86_32))
213 intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
214 else
215 intel_ucode_patch = p->data;
216 }
205} 217}
206 218
207static int microcode_sanity_check(void *mc, int print_err) 219static int microcode_sanity_check(void *mc, int print_err)
@@ -607,6 +619,14 @@ int __init save_microcode_in_initrd_intel(void)
607 struct ucode_cpu_info uci; 619 struct ucode_cpu_info uci;
608 struct cpio_data cp; 620 struct cpio_data cp;
609 621
622 /*
623 * initrd is going away, clear patch ptr. We will scan the microcode one
624 * last time before jettisoning and save a patch, if found. Then we will
625 * update that pointer too, with a stable patch address to use when
626 * resuming the cores.
627 */
628 intel_ucode_patch = NULL;
629
610 if (!load_builtin_intel_microcode(&cp)) 630 if (!load_builtin_intel_microcode(&cp))
611 cp = find_microcode_in_initrd(ucode_path, false); 631 cp = find_microcode_in_initrd(ucode_path, false);
612 632
@@ -619,9 +639,6 @@ int __init save_microcode_in_initrd_intel(void)
619 639
620 show_saved_mc(); 640 show_saved_mc();
621 641
622 /* initrd is going away, clear patch ptr. */
623 intel_ucode_patch = NULL;
624
625 return 0; 642 return 0;
626} 643}
627 644