aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/microcode
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/microcode')
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c34
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c178
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c52
3 files changed, 199 insertions, 65 deletions
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index a998e1a7d46f..48179928ff38 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
339 return -EINVAL; 339 return -EINVAL;
340 340
341 ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); 341 ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
342 if (ret != UCODE_OK) 342 if (ret > UCODE_UPDATED)
343 return -EINVAL; 343 return -EINVAL;
344 344
345 return 0; 345 return 0;
@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
683static enum ucode_state 683static enum ucode_state
684load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) 684load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
685{ 685{
686 struct ucode_patch *p;
686 enum ucode_state ret; 687 enum ucode_state ret;
687 688
688 /* free old equiv table */ 689 /* free old equiv table */
689 free_equiv_cpu_table(); 690 free_equiv_cpu_table();
690 691
691 ret = __load_microcode_amd(family, data, size); 692 ret = __load_microcode_amd(family, data, size);
692 693 if (ret != UCODE_OK) {
693 if (ret != UCODE_OK)
694 cleanup(); 694 cleanup();
695 return ret;
696 }
695 697
696#ifdef CONFIG_X86_32 698 p = find_patch(0);
697 /* save BSP's matching patch for early load */ 699 if (!p) {
698 if (save) { 700 return ret;
699 struct ucode_patch *p = find_patch(0); 701 } else {
700 if (p) { 702 if (boot_cpu_data.microcode == p->patch_id)
701 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); 703 return ret;
702 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), 704
703 PATCH_MAX_SIZE)); 705 ret = UCODE_NEW;
704 }
705 } 706 }
706#endif 707
708 /* save BSP's matching patch for early load */
709 if (!save)
710 return ret;
711
712 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
713 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
714
707 return ret; 715 return ret;
708} 716}
709 717
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index aa1b9a422f2b..10c4fc2c91f8 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -22,13 +22,16 @@
22#define pr_fmt(fmt) "microcode: " fmt 22#define pr_fmt(fmt) "microcode: " fmt
23 23
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/stop_machine.h>
25#include <linux/syscore_ops.h> 26#include <linux/syscore_ops.h>
26#include <linux/miscdevice.h> 27#include <linux/miscdevice.h>
27#include <linux/capability.h> 28#include <linux/capability.h>
28#include <linux/firmware.h> 29#include <linux/firmware.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/delay.h>
30#include <linux/mutex.h> 32#include <linux/mutex.h>
31#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/nmi.h>
32#include <linux/fs.h> 35#include <linux/fs.h>
33#include <linux/mm.h> 36#include <linux/mm.h>
34 37
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
64 */ 67 */
65static DEFINE_MUTEX(microcode_mutex); 68static DEFINE_MUTEX(microcode_mutex);
66 69
70/*
71 * Serialize late loading so that CPUs get updated one-by-one.
72 */
73static DEFINE_SPINLOCK(update_lock);
74
67struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 75struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
68 76
69struct cpu_info_ctx { 77struct cpu_info_ctx {
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
373 return ret; 381 return ret;
374} 382}
375 383
376struct apply_microcode_ctx {
377 enum ucode_state err;
378};
379
380static void apply_microcode_local(void *arg) 384static void apply_microcode_local(void *arg)
381{ 385{
382 struct apply_microcode_ctx *ctx = arg; 386 enum ucode_state *err = arg;
383 387
384 ctx->err = microcode_ops->apply_microcode(smp_processor_id()); 388 *err = microcode_ops->apply_microcode(smp_processor_id());
385} 389}
386 390
387static int apply_microcode_on_target(int cpu) 391static int apply_microcode_on_target(int cpu)
388{ 392{
389 struct apply_microcode_ctx ctx = { .err = 0 }; 393 enum ucode_state err;
390 int ret; 394 int ret;
391 395
392 ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); 396 ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
393 if (!ret) 397 if (!ret) {
394 ret = ctx.err; 398 if (err == UCODE_ERROR)
395 399 ret = 1;
400 }
396 return ret; 401 return ret;
397} 402}
398 403
@@ -489,19 +494,114 @@ static void __exit microcode_dev_exit(void)
489/* fake device for request_firmware */ 494/* fake device for request_firmware */
490static struct platform_device *microcode_pdev; 495static struct platform_device *microcode_pdev;
491 496
492static enum ucode_state reload_for_cpu(int cpu) 497/*
498 * Late loading dance. Why the heavy-handed stomp_machine effort?
499 *
500 * - HT siblings must be idle and not execute other code while the other sibling
501 * is loading microcode in order to avoid any negative interactions caused by
502 * the loading.
503 *
504 * - In addition, microcode update on the cores must be serialized until this
505 * requirement can be relaxed in the future. Right now, this is conservative
506 * and good.
507 */
508#define SPINUNIT 100 /* 100 nsec */
509
510static int check_online_cpus(void)
493{ 511{
494 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 512 if (num_online_cpus() == num_present_cpus())
495 enum ucode_state ustate; 513 return 0;
496 514
497 if (!uci->valid) 515 pr_err("Not all CPUs online, aborting microcode update.\n");
498 return UCODE_OK; 516
517 return -EINVAL;
518}
519
520static atomic_t late_cpus_in;
521static atomic_t late_cpus_out;
522
523static int __wait_for_cpus(atomic_t *t, long long timeout)
524{
525 int all_cpus = num_online_cpus();
526
527 atomic_inc(t);
499 528
500 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true); 529 while (atomic_read(t) < all_cpus) {
501 if (ustate != UCODE_OK) 530 if (timeout < SPINUNIT) {
502 return ustate; 531 pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
532 all_cpus - atomic_read(t));
533 return 1;
534 }
503 535
504 return apply_microcode_on_target(cpu); 536 ndelay(SPINUNIT);
537 timeout -= SPINUNIT;
538
539 touch_nmi_watchdog();
540 }
541 return 0;
542}
543
544/*
545 * Returns:
546 * < 0 - on error
547 * 0 - no update done
548 * 1 - microcode was updated
549 */
550static int __reload_late(void *info)
551{
552 int cpu = smp_processor_id();
553 enum ucode_state err;
554 int ret = 0;
555
556 /*
557 * Wait for all CPUs to arrive. A load will not be attempted unless all
558 * CPUs show up.
559 * */
560 if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
561 return -1;
562
563 spin_lock(&update_lock);
564 apply_microcode_local(&err);
565 spin_unlock(&update_lock);
566
567 if (err > UCODE_NFOUND) {
568 pr_warn("Error reloading microcode on CPU %d\n", cpu);
569 return -1;
570 /* siblings return UCODE_OK because their engine got updated already */
571 } else if (err == UCODE_UPDATED || err == UCODE_OK) {
572 ret = 1;
573 } else {
574 return ret;
575 }
576
577 /*
578 * Increase the wait timeout to a safe value here since we're
579 * serializing the microcode update and that could take a while on a
580 * large number of CPUs. And that is fine as the *actual* timeout will
581 * be determined by the last CPU finished updating and thus cut short.
582 */
583 if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
584 panic("Timeout during microcode update!\n");
585
586 return ret;
587}
588
589/*
590 * Reload microcode late on all CPUs. Wait for a sec until they
591 * all gather together.
592 */
593static int microcode_reload_late(void)
594{
595 int ret;
596
597 atomic_set(&late_cpus_in, 0);
598 atomic_set(&late_cpus_out, 0);
599
600 ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
601 if (ret > 0)
602 microcode_check();
603
604 return ret;
505} 605}
506 606
507static ssize_t reload_store(struct device *dev, 607static ssize_t reload_store(struct device *dev,
@@ -509,10 +609,9 @@ static ssize_t reload_store(struct device *dev,
509 const char *buf, size_t size) 609 const char *buf, size_t size)
510{ 610{
511 enum ucode_state tmp_ret = UCODE_OK; 611 enum ucode_state tmp_ret = UCODE_OK;
512 bool do_callback = false; 612 int bsp = boot_cpu_data.cpu_index;
513 unsigned long val; 613 unsigned long val;
514 ssize_t ret = 0; 614 ssize_t ret = 0;
515 int cpu;
516 615
517 ret = kstrtoul(buf, 0, &val); 616 ret = kstrtoul(buf, 0, &val);
518 if (ret) 617 if (ret)
@@ -521,29 +620,24 @@ static ssize_t reload_store(struct device *dev,
521 if (val != 1) 620 if (val != 1)
522 return size; 621 return size;
523 622
524 get_online_cpus(); 623 tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
525 mutex_lock(&microcode_mutex); 624 if (tmp_ret != UCODE_NEW)
526 for_each_online_cpu(cpu) { 625 return size;
527 tmp_ret = reload_for_cpu(cpu);
528 if (tmp_ret > UCODE_NFOUND) {
529 pr_warn("Error reloading microcode on CPU %d\n", cpu);
530
531 /* set retval for the first encountered reload error */
532 if (!ret)
533 ret = -EINVAL;
534 }
535 626
536 if (tmp_ret == UCODE_UPDATED) 627 get_online_cpus();
537 do_callback = true;
538 }
539 628
540 if (!ret && do_callback) 629 ret = check_online_cpus();
541 microcode_check(); 630 if (ret)
631 goto put;
542 632
633 mutex_lock(&microcode_mutex);
634 ret = microcode_reload_late();
543 mutex_unlock(&microcode_mutex); 635 mutex_unlock(&microcode_mutex);
636
637put:
544 put_online_cpus(); 638 put_online_cpus();
545 639
546 if (!ret) 640 if (ret >= 0)
547 ret = size; 641 ret = size;
548 642
549 return ret; 643 return ret;
@@ -611,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
611 if (system_state != SYSTEM_RUNNING) 705 if (system_state != SYSTEM_RUNNING)
612 return UCODE_NFOUND; 706 return UCODE_NFOUND;
613 707
614 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, 708 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
615 refresh_fw); 709 if (ustate == UCODE_NEW) {
616
617 if (ustate == UCODE_OK) {
618 pr_debug("CPU%d updated upon init\n", cpu); 710 pr_debug("CPU%d updated upon init\n", cpu);
619 apply_microcode_on_target(cpu); 711 apply_microcode_on_target(cpu);
620 } 712 }
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 923054a6b760..32b8e5724f96 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
589 if (!mc) 589 if (!mc)
590 return 0; 590 return 0;
591 591
592 /*
593 * Save us the MSR write below - which is a particular expensive
594 * operation - when the other hyperthread has updated the microcode
595 * already.
596 */
597 rev = intel_get_microcode_revision();
598 if (rev >= mc->hdr.rev) {
599 uci->cpu_sig.rev = rev;
600 return UCODE_OK;
601 }
602
603 /*
604 * Writeback and invalidate caches before updating microcode to avoid
605 * internal issues depending on what the microcode is updating.
606 */
607 native_wbinvd();
608
592 /* write microcode via MSR 0x79 */ 609 /* write microcode via MSR 0x79 */
593 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 610 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
594 611
@@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
774 791
775static enum ucode_state apply_microcode_intel(int cpu) 792static enum ucode_state apply_microcode_intel(int cpu)
776{ 793{
794 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
795 struct cpuinfo_x86 *c = &cpu_data(cpu);
777 struct microcode_intel *mc; 796 struct microcode_intel *mc;
778 struct ucode_cpu_info *uci;
779 struct cpuinfo_x86 *c;
780 static int prev_rev; 797 static int prev_rev;
781 u32 rev; 798 u32 rev;
782 799
@@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
784 if (WARN_ON(raw_smp_processor_id() != cpu)) 801 if (WARN_ON(raw_smp_processor_id() != cpu))
785 return UCODE_ERROR; 802 return UCODE_ERROR;
786 803
787 uci = ucode_cpu_info + cpu; 804 /* Look for a newer patch in our cache: */
788 mc = uci->mc; 805 mc = find_patch(uci);
789 if (!mc) { 806 if (!mc) {
790 /* Look for a newer patch in our cache: */ 807 mc = uci->mc;
791 mc = find_patch(uci);
792 if (!mc) 808 if (!mc)
793 return UCODE_NFOUND; 809 return UCODE_NFOUND;
794 } 810 }
795 811
812 /*
813 * Save us the MSR write below - which is a particular expensive
814 * operation - when the other hyperthread has updated the microcode
815 * already.
816 */
817 rev = intel_get_microcode_revision();
818 if (rev >= mc->hdr.rev) {
819 uci->cpu_sig.rev = rev;
820 c->microcode = rev;
821 return UCODE_OK;
822 }
823
824 /*
825 * Writeback and invalidate caches before updating microcode to avoid
826 * internal issues depending on what the microcode is updating.
827 */
828 native_wbinvd();
829
796 /* write microcode via MSR 0x79 */ 830 /* write microcode via MSR 0x79 */
797 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 831 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
798 832
@@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
813 prev_rev = rev; 847 prev_rev = rev;
814 } 848 }
815 849
816 c = &cpu_data(cpu);
817
818 uci->cpu_sig.rev = rev; 850 uci->cpu_sig.rev = rev;
819 c->microcode = rev; 851 c->microcode = rev;
820 852
@@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
830 unsigned int leftover = size; 862 unsigned int leftover = size;
831 unsigned int curr_mc_size = 0, new_mc_size = 0; 863 unsigned int curr_mc_size = 0, new_mc_size = 0;
832 unsigned int csig, cpf; 864 unsigned int csig, cpf;
865 enum ucode_state ret = UCODE_OK;
833 866
834 while (leftover) { 867 while (leftover) {
835 struct microcode_header_intel mc_header; 868 struct microcode_header_intel mc_header;
@@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
871 new_mc = mc; 904 new_mc = mc;
872 new_mc_size = mc_size; 905 new_mc_size = mc_size;
873 mc = NULL; /* trigger new vmalloc */ 906 mc = NULL; /* trigger new vmalloc */
907 ret = UCODE_NEW;
874 } 908 }
875 909
876 ucode_ptr += mc_size; 910 ucode_ptr += mc_size;
@@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
900 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 934 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
901 cpu, new_rev, uci->cpu_sig.rev); 935 cpu, new_rev, uci->cpu_sig.rev);
902 936
903 return UCODE_OK; 937 return ret;
904} 938}
905 939
906static int get_ucode_fw(void *to, const void *from, size_t n) 940static int get_ucode_fw(void *to, const void *from, size_t n)