aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/intel.c22
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
2 files changed, 23 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a80029035bf2..f9e4fdd3b877 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
370 */ 370 */
371 detect_extended_topology(c); 371 detect_extended_topology(c);
372 372
373 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
374 /*
375 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
376 * detection.
377 */
378 c->x86_max_cores = intel_num_cpu_cores(c);
379#ifdef CONFIG_X86_32
380 detect_ht(c);
381#endif
382 }
383
373 l2 = init_intel_cacheinfo(c); 384 l2 = init_intel_cacheinfo(c);
374 if (c->cpuid_level > 9) { 385 if (c->cpuid_level > 9) {
375 unsigned eax = cpuid_eax(10); 386 unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
438 set_cpu_cap(c, X86_FEATURE_P3); 449 set_cpu_cap(c, X86_FEATURE_P3);
439#endif 450#endif
440 451
441 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
442 /*
443 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
444 * detection.
445 */
446 c->x86_max_cores = intel_num_cpu_cores(c);
447#ifdef CONFIG_X86_32
448 detect_ht(c);
449#endif
450 }
451
452 /* Work around errata */ 452 /* Work around errata */
453 srat_detect_node(c); 453 srat_detect_node(c);
454 454
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a952e9c85b6f..9c8f7394c612 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
730#endif 730#endif
731 } 731 }
732 732
733#ifdef CONFIG_X86_HT
734 /*
735 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
736 * turns means that the only possibility is SMT (as indicated in
737 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
738 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
739 * c->phys_proc_id.
740 */
741 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
742 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
743#endif
744
733 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 745 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
734 746
735 return l2; 747 return l2;