aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c118
1 files changed, 92 insertions, 26 deletions
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 5dd157d48606..70ecbc8099c9 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -22,13 +22,16 @@
22#define pr_fmt(fmt) "microcode: " fmt 22#define pr_fmt(fmt) "microcode: " fmt
23 23
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/stop_machine.h>
25#include <linux/syscore_ops.h> 26#include <linux/syscore_ops.h>
26#include <linux/miscdevice.h> 27#include <linux/miscdevice.h>
27#include <linux/capability.h> 28#include <linux/capability.h>
28#include <linux/firmware.h> 29#include <linux/firmware.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/delay.h>
30#include <linux/mutex.h> 32#include <linux/mutex.h>
31#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/nmi.h>
32#include <linux/fs.h> 35#include <linux/fs.h>
33#include <linux/mm.h> 36#include <linux/mm.h>
34 37
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
64 */ 67 */
65static DEFINE_MUTEX(microcode_mutex); 68static DEFINE_MUTEX(microcode_mutex);
66 69
70/*
71 * Serialize late loading so that CPUs get updated one-by-one.
72 */
73static DEFINE_SPINLOCK(update_lock);
74
67struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 75struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
68 76
69struct cpu_info_ctx { 77struct cpu_info_ctx {
@@ -486,6 +494,19 @@ static void __exit microcode_dev_exit(void)
486/* fake device for request_firmware */ 494/* fake device for request_firmware */
487static struct platform_device *microcode_pdev; 495static struct platform_device *microcode_pdev;
488 496
497/*
498 * Late loading dance. Why the heavy-handed stomp_machine effort?
499 *
500 * - HT siblings must be idle and not execute other code while the other sibling
501 * is loading microcode in order to avoid any negative interactions caused by
502 * the loading.
503 *
504 * - In addition, microcode update on the cores must be serialized until this
505 * requirement can be relaxed in the future. Right now, this is conservative
506 * and good.
507 */
508#define SPINUNIT 100 /* 100 nsec */
509
489static int check_online_cpus(void) 510static int check_online_cpus(void)
490{ 511{
491 if (num_online_cpus() == num_present_cpus()) 512 if (num_online_cpus() == num_present_cpus())
@@ -496,23 +517,85 @@ static int check_online_cpus(void)
496 return -EINVAL; 517 return -EINVAL;
497} 518}
498 519
499static enum ucode_state reload_for_cpu(int cpu) 520static atomic_t late_cpus;
521
522/*
523 * Returns:
524 * < 0 - on error
525 * 0 - no update done
526 * 1 - microcode was updated
527 */
528static int __reload_late(void *info)
500{ 529{
501 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 530 unsigned int timeout = NSEC_PER_SEC;
531 int all_cpus = num_online_cpus();
532 int cpu = smp_processor_id();
533 enum ucode_state err;
534 int ret = 0;
502 535
503 if (!uci->valid) 536 atomic_dec(&late_cpus);
504 return UCODE_OK; 537
538 /*
539 * Wait for all CPUs to arrive. A load will not be attempted unless all
540 * CPUs show up.
541 * */
542 while (atomic_read(&late_cpus)) {
543 if (timeout < SPINUNIT) {
544 pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
545 atomic_read(&late_cpus));
546 return -1;
547 }
548
549 ndelay(SPINUNIT);
550 timeout -= SPINUNIT;
551
552 touch_nmi_watchdog();
553 }
554
555 spin_lock(&update_lock);
556 apply_microcode_local(&err);
557 spin_unlock(&update_lock);
558
559 if (err > UCODE_NFOUND) {
560 pr_warn("Error reloading microcode on CPU %d\n", cpu);
561 ret = -1;
562 } else if (err == UCODE_UPDATED) {
563 ret = 1;
564 }
505 565
506 return apply_microcode_on_target(cpu); 566 atomic_inc(&late_cpus);
567
568 while (atomic_read(&late_cpus) != all_cpus)
569 cpu_relax();
570
571 return ret;
572}
573
574/*
575 * Reload microcode late on all CPUs. Wait for a sec until they
576 * all gather together.
577 */
578static int microcode_reload_late(void)
579{
580 int ret;
581
582 atomic_set(&late_cpus, num_online_cpus());
583
584 ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
585 if (ret < 0)
586 return ret;
587 else if (ret > 0)
588 microcode_check();
589
590 return ret;
507} 591}
508 592
509static ssize_t reload_store(struct device *dev, 593static ssize_t reload_store(struct device *dev,
510 struct device_attribute *attr, 594 struct device_attribute *attr,
511 const char *buf, size_t size) 595 const char *buf, size_t size)
512{ 596{
513 int cpu, bsp = boot_cpu_data.cpu_index;
514 enum ucode_state tmp_ret = UCODE_OK; 597 enum ucode_state tmp_ret = UCODE_OK;
515 bool do_callback = false; 598 int bsp = boot_cpu_data.cpu_index;
516 unsigned long val; 599 unsigned long val;
517 ssize_t ret = 0; 600 ssize_t ret = 0;
518 601
@@ -534,30 +617,13 @@ static ssize_t reload_store(struct device *dev,
534 goto put; 617 goto put;
535 618
536 mutex_lock(&microcode_mutex); 619 mutex_lock(&microcode_mutex);
537 620 ret = microcode_reload_late();
538 for_each_online_cpu(cpu) {
539 tmp_ret = reload_for_cpu(cpu);
540 if (tmp_ret > UCODE_NFOUND) {
541 pr_warn("Error reloading microcode on CPU %d\n", cpu);
542
543 /* set retval for the first encountered reload error */
544 if (!ret)
545 ret = -EINVAL;
546 }
547
548 if (tmp_ret == UCODE_UPDATED)
549 do_callback = true;
550 }
551
552 if (!ret && do_callback)
553 microcode_check();
554
555 mutex_unlock(&microcode_mutex); 621 mutex_unlock(&microcode_mutex);
556 622
557put: 623put:
558 put_online_cpus(); 624 put_online_cpus();
559 625
560 if (!ret) 626 if (ret >= 0)
561 ret = size; 627 ret = size;
562 628
563 return ret; 629 return ret;