aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/microcode/intel.c
diff options
context:
space:
mode:
authorJunichi Nomura <j-nomura@ce.jp.nec.com>2017-01-09 06:41:47 -0500
committerThomas Gleixner <tglx@linutronix.de>2017-01-09 17:11:15 -0500
commit2e86222c67bb5d942da68e8415749b32db208534 (patch)
treeec9a02c499a33cad7db57d0c5e7e951e6d7dec48 /arch/x86/kernel/cpu/microcode/intel.c
parent9fcf5ba2ef908af916e9002891fbbca20ce4dc98 (diff)
x86/microcode/intel: Use correct buffer size for saving microcode data
In generic_load_microcode(), curr_mc_size is the size of the last allocated buffer and since we have this performance "optimization" there to vmalloc a new buffer only when the current one is bigger, curr_mc_size ends up becoming the size of the biggest buffer we've seen so far. However, we end up saving the microcode patch which matches our CPU and its size is not curr_mc_size but the respective mc_size during the iteration while we're staring at it. So save that mc_size into a separate variable and use it to store the previously found microcode buffer. Without this fix, we could get oops like this: BUG: unable to handle kernel paging request at ffffc9000e30f000 IP: __memcpy+0x12/0x20 ... Call Trace: ? kmemdup+0x43/0x60 __alloc_microcode_buf+0x44/0x70 save_microcode_patch+0xd4/0x150 generic_load_microcode+0x1b8/0x260 request_microcode_user+0x15/0x20 microcode_write+0x91/0x100 __vfs_write+0x34/0x120 vfs_write+0xc1/0x130 SyS_write+0x56/0xc0 do_syscall_64+0x6c/0x160 entry_SYSCALL64_slow_path+0x25/0x25 Fixes: 06b8534cb728 ("x86/microcode: Rework microcode loading") Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: http://lkml.kernel.org/r/4f33cbfd-44f2-9bed-3b66-7446cd14256f@ce.jp.nec.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/cpu/microcode/intel.c')
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 943486589757..3f329b74e040 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -823,7 +823,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
823 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL; 823 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
824 int new_rev = uci->cpu_sig.rev; 824 int new_rev = uci->cpu_sig.rev;
825 unsigned int leftover = size; 825 unsigned int leftover = size;
826 unsigned int curr_mc_size = 0; 826 unsigned int curr_mc_size = 0, new_mc_size = 0;
827 unsigned int csig, cpf; 827 unsigned int csig, cpf;
828 828
829 while (leftover) { 829 while (leftover) {
@@ -864,6 +864,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
864 vfree(new_mc); 864 vfree(new_mc);
865 new_rev = mc_header.rev; 865 new_rev = mc_header.rev;
866 new_mc = mc; 866 new_mc = mc;
867 new_mc_size = mc_size;
867 mc = NULL; /* trigger new vmalloc */ 868 mc = NULL; /* trigger new vmalloc */
868 } 869 }
869 870
@@ -889,7 +890,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
889 * permanent memory. So it will be loaded early when a CPU is hot added 890 * permanent memory. So it will be loaded early when a CPU is hot added
890 * or resumes. 891 * or resumes.
891 */ 892 */
892 save_mc_for_early(new_mc, curr_mc_size); 893 save_mc_for_early(new_mc, new_mc_size);
893 894
894 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 895 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
895 cpu, new_rev, uci->cpu_sig.rev); 896 cpu, new_rev, uci->cpu_sig.rev);