aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-11 13:31:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-11 13:31:46 -0500
commit1ce42845f987e92eabfc6e026d44d826c25c74a5 (patch)
treed62cdad8adc590faad42bc656e31b2a8a786a77c
parentfdb0ee7c65781464168e2943a3fd6f1e66a397c9 (diff)
parent146fbb766934dc003fcbf755b519acef683576bf (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Last minute x86 fixes: - Fix a softlockup detector warning and long delays if using ptdump with KASAN enabled. - Two more TSC-adjust fixes for interesting firmware interactions. - Two commits to fix an AMD CPU topology enumeration bug that caused a measurable gaming performance regression" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm/ptdump: Fix soft lockup in page table walker x86/tsc: Make the TSC ADJUST sanitizing work for tsc_reliable x86/tsc: Avoid the large time jump when sanitizing TSC ADJUST x86/CPU/AMD: Fix Zen SMT topology x86/CPU/AMD: Bring back Compute Unit ID
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/kernel/cpu/amd.c16
-rw-r--r--arch/x86/kernel/cpu/common.c1
-rw-r--r--arch/x86/kernel/smpboot.c12
-rw-r--r--arch/x86/kernel/tsc.c5
-rw-r--r--arch/x86/kernel/tsc_sync.c16
-rw-r--r--arch/x86/mm/dump_pagetables.c2
7 files changed, 38 insertions, 15 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 1be64da0384e..e6cfe7ba2d65 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -104,6 +104,7 @@ struct cpuinfo_x86 {
104 __u8 x86_phys_bits; 104 __u8 x86_phys_bits;
105 /* CPUID returned core id bits: */ 105 /* CPUID returned core id bits: */
106 __u8 x86_coreid_bits; 106 __u8 x86_coreid_bits;
107 __u8 cu_id;
107 /* Max extended CPUID function supported: */ 108 /* Max extended CPUID function supported: */
108 __u32 extended_cpuid_level; 109 __u32 extended_cpuid_level;
109 /* Maximum supported CPUID level, -1=no CPUID: */ 110 /* Maximum supported CPUID level, -1=no CPUID: */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 1d3167269a67..2b4cf04239b6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
309 309
310 /* get information required for multi-node processors */ 310 /* get information required for multi-node processors */
311 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 311 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
312 u32 eax, ebx, ecx, edx;
312 313
313 node_id = cpuid_ecx(0x8000001e) & 7; 314 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
315
316 node_id = ecx & 0xff;
317 smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
318
319 if (c->x86 == 0x15)
320 c->cu_id = ebx & 0xff;
321
322 if (c->x86 >= 0x17) {
323 c->cpu_core_id = ebx & 0xff;
324
325 if (smp_num_siblings > 1)
326 c->x86_max_cores /= smp_num_siblings;
327 }
314 328
315 /* 329 /*
316 * We may have multiple LLCs if L3 caches exist, so check if we 330 * We may have multiple LLCs if L3 caches exist, so check if we
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9bab7a8a4293..ede03e849a8b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1015 c->x86_model_id[0] = '\0'; /* Unset */ 1015 c->x86_model_id[0] = '\0'; /* Unset */
1016 c->x86_max_cores = 1; 1016 c->x86_max_cores = 1;
1017 c->x86_coreid_bits = 0; 1017 c->x86_coreid_bits = 0;
1018 c->cu_id = 0xff;
1018#ifdef CONFIG_X86_64 1019#ifdef CONFIG_X86_64
1019 c->x86_clflush_size = 64; 1020 c->x86_clflush_size = 64;
1020 c->x86_phys_bits = 36; 1021 c->x86_phys_bits = 36;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 46732dc3b73c..99b920d0e516 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
433 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 433 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
434 434
435 if (c->phys_proc_id == o->phys_proc_id && 435 if (c->phys_proc_id == o->phys_proc_id &&
436 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && 436 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
437 c->cpu_core_id == o->cpu_core_id) 437 if (c->cpu_core_id == o->cpu_core_id)
438 return topology_sane(c, o, "smt"); 438 return topology_sane(c, o, "smt");
439
440 if ((c->cu_id != 0xff) &&
441 (o->cu_id != 0xff) &&
442 (c->cu_id == o->cu_id))
443 return topology_sane(c, o, "smt");
444 }
439 445
440 } else if (c->phys_proc_id == o->phys_proc_id && 446 } else if (c->phys_proc_id == o->phys_proc_id &&
441 c->cpu_core_id == o->cpu_core_id) { 447 c->cpu_core_id == o->cpu_core_id) {
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e41af597aed8..37e7cf544e51 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1356,6 +1356,9 @@ void __init tsc_init(void)
1356 (unsigned long)cpu_khz / 1000, 1356 (unsigned long)cpu_khz / 1000,
1357 (unsigned long)cpu_khz % 1000); 1357 (unsigned long)cpu_khz % 1000);
1358 1358
1359 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1360 tsc_store_and_check_tsc_adjust(true);
1361
1359 /* 1362 /*
1360 * Secondary CPUs do not run through tsc_init(), so set up 1363 * Secondary CPUs do not run through tsc_init(), so set up
1361 * all the scale factors for all CPUs, assuming the same 1364 * all the scale factors for all CPUs, assuming the same
@@ -1386,8 +1389,6 @@ void __init tsc_init(void)
1386 1389
1387 if (unsynchronized_tsc()) 1390 if (unsynchronized_tsc())
1388 mark_tsc_unstable("TSCs unsynchronized"); 1391 mark_tsc_unstable("TSCs unsynchronized");
1389 else
1390 tsc_store_and_check_tsc_adjust(true);
1391 1392
1392 check_system_tsc_reliable(); 1393 check_system_tsc_reliable();
1393 1394
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index d0db011051a5..728f75378475 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu)
286 if (unsynchronized_tsc()) 286 if (unsynchronized_tsc())
287 return; 287 return;
288 288
289 if (tsc_clocksource_reliable) {
290 if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
291 pr_info(
292 "Skipped synchronization checks as TSC is reliable.\n");
293 return;
294 }
295
296 /* 289 /*
297 * Set the maximum number of test runs to 290 * Set the maximum number of test runs to
298 * 1 if the CPU does not provide the TSC_ADJUST MSR 291 * 1 if the CPU does not provide the TSC_ADJUST MSR
@@ -380,14 +373,19 @@ void check_tsc_sync_target(void)
380 int cpus = 2; 373 int cpus = 2;
381 374
382 /* Also aborts if there is no TSC. */ 375 /* Also aborts if there is no TSC. */
383 if (unsynchronized_tsc() || tsc_clocksource_reliable) 376 if (unsynchronized_tsc())
384 return; 377 return;
385 378
386 /* 379 /*
387 * Store, verify and sanitize the TSC adjust register. If 380 * Store, verify and sanitize the TSC adjust register. If
388 * successful skip the test. 381 * successful skip the test.
382 *
383 * The test is also skipped when the TSC is marked reliable. This
384 * is true for SoCs which have no fallback clocksource. On these
385 * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
386 * register might have been wreckaged by the BIOS..
389 */ 387 */
390 if (tsc_store_and_check_tsc_adjust(false)) { 388 if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
391 atomic_inc(&skip_test); 389 atomic_inc(&skip_test);
392 return; 390 return;
393 } 391 }
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index ea9c49adaa1f..8aa6bea1cd6c 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -15,6 +15,7 @@
15#include <linux/debugfs.h> 15#include <linux/debugfs.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/sched.h>
18#include <linux/seq_file.h> 19#include <linux/seq_file.h>
19 20
20#include <asm/pgtable.h> 21#include <asm/pgtable.h>
@@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
406 } else 407 } else
407 note_page(m, &st, __pgprot(0), 1); 408 note_page(m, &st, __pgprot(0), 1);
408 409
410 cond_resched();
409 start++; 411 start++;
410 } 412 }
411 413