aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS12
-rw-r--r--arch/x86/events/amd/power.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c20
-rw-r--r--arch/x86/kernel/ftrace_64.S2
-rw-r--r--arch/x86/mm/fault.c22
-rw-r--r--arch/x86/mm/tlb.c34
7 files changed, 60 insertions, 34 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index cbd1ed6bc915..845fc25812f1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6617,16 +6617,6 @@ L: linux-i2c@vger.kernel.org
6617S: Maintained 6617S: Maintained
6618F: drivers/i2c/i2c-stub.c 6618F: drivers/i2c/i2c-stub.c
6619 6619
6620i386 BOOT CODE
6621M: "H. Peter Anvin" <hpa@zytor.com>
6622S: Maintained
6623F: arch/x86/boot/
6624
6625i386 SETUP CODE / CPU ERRATA WORKAROUNDS
6626M: "H. Peter Anvin" <hpa@zytor.com>
6627T: git git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git
6628S: Maintained
6629
6630IA64 (Itanium) PLATFORM 6620IA64 (Itanium) PLATFORM
6631M: Tony Luck <tony.luck@intel.com> 6621M: Tony Luck <tony.luck@intel.com>
6632M: Fenghua Yu <fenghua.yu@intel.com> 6622M: Fenghua Yu <fenghua.yu@intel.com>
@@ -14866,7 +14856,7 @@ F: net/x25/
14866X86 ARCHITECTURE (32-BIT AND 64-BIT) 14856X86 ARCHITECTURE (32-BIT AND 64-BIT)
14867M: Thomas Gleixner <tglx@linutronix.de> 14857M: Thomas Gleixner <tglx@linutronix.de>
14868M: Ingo Molnar <mingo@redhat.com> 14858M: Ingo Molnar <mingo@redhat.com>
14869M: "H. Peter Anvin" <hpa@zytor.com> 14859R: "H. Peter Anvin" <hpa@zytor.com>
14870M: x86@kernel.org 14860M: x86@kernel.org
14871L: linux-kernel@vger.kernel.org 14861L: linux-kernel@vger.kernel.org
14872T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core 14862T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index a6eee5ac4f58..2aefacf5c5b2 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
277 int ret; 277 int ret;
278 278
279 if (!x86_match_cpu(cpu_match)) 279 if (!x86_match_cpu(cpu_match))
280 return 0; 280 return -ENODEV;
281 281
282 if (!boot_cpu_has(X86_FEATURE_ACC_POWER)) 282 if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
283 return -ENODEV; 283 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index c4fa4a85d4cb..e4fc595cd6ea 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -239,7 +239,7 @@ static int __init save_microcode_in_initrd(void)
239 break; 239 break;
240 case X86_VENDOR_AMD: 240 case X86_VENDOR_AMD:
241 if (c->x86 >= 0x10) 241 if (c->x86 >= 0x10)
242 return save_microcode_in_initrd_amd(cpuid_eax(1)); 242 ret = save_microcode_in_initrd_amd(cpuid_eax(1));
243 break; 243 break;
244 default: 244 default:
245 break; 245 break;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index d9e460fc7a3b..f7c55b0e753a 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
45/* Current microcode patch used in early patching on the APs. */ 45/* Current microcode patch used in early patching on the APs. */
46static struct microcode_intel *intel_ucode_patch; 46static struct microcode_intel *intel_ucode_patch;
47 47
48/* last level cache size per core */
49static int llc_size_per_core;
50
48static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, 51static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
49 unsigned int s2, unsigned int p2) 52 unsigned int s2, unsigned int p2)
50{ 53{
@@ -912,12 +915,14 @@ static bool is_blacklisted(unsigned int cpu)
912 915
913 /* 916 /*
914 * Late loading on model 79 with microcode revision less than 0x0b000021 917 * Late loading on model 79 with microcode revision less than 0x0b000021
915 * may result in a system hang. This behavior is documented in item 918 * and LLC size per core bigger than 2.5MB may result in a system hang.
916 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family). 919 * This behavior is documented in item BDF90, #334165 (Intel Xeon
920 * Processor E7-8800/4800 v4 Product Family).
917 */ 921 */
918 if (c->x86 == 6 && 922 if (c->x86 == 6 &&
919 c->x86_model == INTEL_FAM6_BROADWELL_X && 923 c->x86_model == INTEL_FAM6_BROADWELL_X &&
920 c->x86_mask == 0x01 && 924 c->x86_mask == 0x01 &&
925 llc_size_per_core > 2621440 &&
921 c->microcode < 0x0b000021) { 926 c->microcode < 0x0b000021) {
922 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); 927 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
923 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 928 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
@@ -975,6 +980,15 @@ static struct microcode_ops microcode_intel_ops = {
975 .apply_microcode = apply_microcode_intel, 980 .apply_microcode = apply_microcode_intel,
976}; 981};
977 982
983static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
984{
985 u64 llc_size = c->x86_cache_size * 1024;
986
987 do_div(llc_size, c->x86_max_cores);
988
989 return (int)llc_size;
990}
991
978struct microcode_ops * __init init_intel_microcode(void) 992struct microcode_ops * __init init_intel_microcode(void)
979{ 993{
980 struct cpuinfo_x86 *c = &boot_cpu_data; 994 struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -985,5 +999,7 @@ struct microcode_ops * __init init_intel_microcode(void)
985 return NULL; 999 return NULL;
986 } 1000 }
987 1001
1002 llc_size_per_core = calc_llc_size_per_core(c);
1003
988 return &microcode_intel_ops; 1004 return &microcode_intel_ops;
989} 1005}
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index ef61f540cf0a..91b2cff4b79a 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -295,7 +295,7 @@ trace:
295 restore_mcount_regs 295 restore_mcount_regs
296 296
297 jmp fgraph_trace 297 jmp fgraph_trace
298END(function_hook) 298ENDPROC(function_hook)
299#endif /* CONFIG_DYNAMIC_FTRACE */ 299#endif /* CONFIG_DYNAMIC_FTRACE */
300 300
301#ifdef CONFIG_FUNCTION_GRAPH_TRACER 301#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b3e40773dce0..800de815519c 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -439,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address)
439 if (pgd_none(*pgd_ref)) 439 if (pgd_none(*pgd_ref))
440 return -1; 440 return -1;
441 441
442 if (pgd_none(*pgd)) { 442 if (CONFIG_PGTABLE_LEVELS > 4) {
443 set_pgd(pgd, *pgd_ref); 443 if (pgd_none(*pgd)) {
444 arch_flush_lazy_mmu_mode(); 444 set_pgd(pgd, *pgd_ref);
445 } else if (CONFIG_PGTABLE_LEVELS > 4) { 445 arch_flush_lazy_mmu_mode();
446 /* 446 } else {
447 * With folded p4d, pgd_none() is always false, so the pgd may 447 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
448 * point to an empty page table entry and pgd_page_vaddr() 448 }
449 * will return garbage.
450 *
451 * We will do the correct sanity check on the p4d level.
452 */
453 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
454 } 449 }
455 450
456 /* With 4-level paging, copying happens on the p4d level. */ 451 /* With 4-level paging, copying happens on the p4d level. */
@@ -459,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
459 if (p4d_none(*p4d_ref)) 454 if (p4d_none(*p4d_ref))
460 return -1; 455 return -1;
461 456
462 if (p4d_none(*p4d)) { 457 if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) {
463 set_p4d(p4d, *p4d_ref); 458 set_p4d(p4d, *p4d_ref);
464 arch_flush_lazy_mmu_mode(); 459 arch_flush_lazy_mmu_mode();
465 } else { 460 } else {
@@ -470,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address)
470 * Below here mismatches are bugs because these lower tables 465 * Below here mismatches are bugs because these lower tables
471 * are shared: 466 * are shared:
472 */ 467 */
468 BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
473 469
474 pud = pud_offset(p4d, address); 470 pud = pud_offset(p4d, address);
475 pud_ref = pud_offset(p4d_ref, address); 471 pud_ref = pud_offset(p4d_ref, address);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a1561957dccb..5bfe61a5e8e3 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -151,6 +151,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
151 local_irq_restore(flags); 151 local_irq_restore(flags);
152} 152}
153 153
154static void sync_current_stack_to_mm(struct mm_struct *mm)
155{
156 unsigned long sp = current_stack_pointer;
157 pgd_t *pgd = pgd_offset(mm, sp);
158
159 if (CONFIG_PGTABLE_LEVELS > 4) {
160 if (unlikely(pgd_none(*pgd))) {
161 pgd_t *pgd_ref = pgd_offset_k(sp);
162
163 set_pgd(pgd, *pgd_ref);
164 }
165 } else {
166 /*
167 * "pgd" is faked. The top level entries are "p4d"s, so sync
168 * the p4d. This compiles to approximately the same code as
169 * the 5-level case.
170 */
171 p4d_t *p4d = p4d_offset(pgd, sp);
172
173 if (unlikely(p4d_none(*p4d))) {
174 pgd_t *pgd_ref = pgd_offset_k(sp);
175 p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
176
177 set_p4d(p4d, *p4d_ref);
178 }
179 }
180}
181
154void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 182void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
155 struct task_struct *tsk) 183 struct task_struct *tsk)
156{ 184{
@@ -226,11 +254,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
226 * mapped in the new pgd, we'll double-fault. Forcibly 254 * mapped in the new pgd, we'll double-fault. Forcibly
227 * map it. 255 * map it.
228 */ 256 */
229 unsigned int index = pgd_index(current_stack_pointer); 257 sync_current_stack_to_mm(next);
230 pgd_t *pgd = next->pgd + index;
231
232 if (unlikely(pgd_none(*pgd)))
233 set_pgd(pgd, init_mm.pgd[index]);
234 } 258 }
235 259
236 /* Stop remote flushes for the previous mm */ 260 /* Stop remote flushes for the previous mm */