aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/intel_cacheinfo.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c148
1 files changed, 64 insertions, 84 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 12cd823c8d03..9ecf81f9b90f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -149,8 +149,7 @@ union _cpuid4_leaf_ecx {
149}; 149};
150 150
151struct amd_l3_cache { 151struct amd_l3_cache {
152 struct pci_dev *dev; 152 struct amd_northbridge *nb;
153 bool can_disable;
154 unsigned indices; 153 unsigned indices;
155 u8 subcaches[4]; 154 u8 subcaches[4];
156}; 155};
@@ -311,14 +310,12 @@ struct _cache_attr {
311/* 310/*
312 * L3 cache descriptors 311 * L3 cache descriptors
313 */ 312 */
314static struct amd_l3_cache **__cpuinitdata l3_caches;
315
316static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) 313static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
317{ 314{
318 unsigned int sc0, sc1, sc2, sc3; 315 unsigned int sc0, sc1, sc2, sc3;
319 u32 val = 0; 316 u32 val = 0;
320 317
321 pci_read_config_dword(l3->dev, 0x1C4, &val); 318 pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
322 319
323 /* calculate subcache sizes */ 320 /* calculate subcache sizes */
324 l3->subcaches[0] = sc0 = !(val & BIT(0)); 321 l3->subcaches[0] = sc0 = !(val & BIT(0));
@@ -327,49 +324,17 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
327 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); 324 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
328 325
329 l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1; 326 l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
327 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
330} 328}
331 329
332static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) 330static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
333{ 331 int index)
334 struct amd_l3_cache *l3;
335 struct pci_dev *dev = node_to_k8_nb_misc(node);
336
337 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
338 if (!l3) {
339 printk(KERN_WARNING "Error allocating L3 struct\n");
340 return NULL;
341 }
342
343 l3->dev = dev;
344
345 amd_calc_l3_indices(l3);
346
347 return l3;
348}
349
350static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
351 int index)
352{ 332{
333 static struct amd_l3_cache *__cpuinitdata l3_caches;
353 int node; 334 int node;
354 335
355 if (boot_cpu_data.x86 != 0x10) 336 /* only for L3, and not in virtualized environments */
356 return; 337 if (index < 3 || amd_nb_num() == 0)
357
358 if (index < 3)
359 return;
360
361 /* see errata #382 and #388 */
362 if (boot_cpu_data.x86_model < 0x8)
363 return;
364
365 if ((boot_cpu_data.x86_model == 0x8 ||
366 boot_cpu_data.x86_model == 0x9)
367 &&
368 boot_cpu_data.x86_mask < 0x1)
369 return;
370
371 /* not in virtualized environments */
372 if (k8_northbridges.num == 0)
373 return; 338 return;
374 339
375 /* 340 /*
@@ -377,7 +342,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
377 * never freed but this is done only on shutdown so it doesn't matter. 342 * never freed but this is done only on shutdown so it doesn't matter.
378 */ 343 */
379 if (!l3_caches) { 344 if (!l3_caches) {
380 int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); 345 int size = amd_nb_num() * sizeof(struct amd_l3_cache);
381 346
382 l3_caches = kzalloc(size, GFP_ATOMIC); 347 l3_caches = kzalloc(size, GFP_ATOMIC);
383 if (!l3_caches) 348 if (!l3_caches)
@@ -386,14 +351,12 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
386 351
387 node = amd_get_nb_id(smp_processor_id()); 352 node = amd_get_nb_id(smp_processor_id());
388 353
389 if (!l3_caches[node]) { 354 if (!l3_caches[node].nb) {
390 l3_caches[node] = amd_init_l3_cache(node); 355 l3_caches[node].nb = node_to_amd_nb(node);
391 l3_caches[node]->can_disable = true; 356 amd_calc_l3_indices(&l3_caches[node]);
392 } 357 }
393 358
394 WARN_ON(!l3_caches[node]); 359 this_leaf->l3 = &l3_caches[node];
395
396 this_leaf->l3 = l3_caches[node];
397} 360}
398 361
399/* 362/*
@@ -407,7 +370,7 @@ int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
407{ 370{
408 unsigned int reg = 0; 371 unsigned int reg = 0;
409 372
410 pci_read_config_dword(l3->dev, 0x1BC + slot * 4, &reg); 373 pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, &reg);
411 374
412 /* check whether this slot is activated already */ 375 /* check whether this slot is activated already */
413 if (reg & (3UL << 30)) 376 if (reg & (3UL << 30))
@@ -421,7 +384,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
421{ 384{
422 int index; 385 int index;
423 386
424 if (!this_leaf->l3 || !this_leaf->l3->can_disable) 387 if (!this_leaf->l3 ||
388 !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
425 return -EINVAL; 389 return -EINVAL;
426 390
427 index = amd_get_l3_disable_slot(this_leaf->l3, slot); 391 index = amd_get_l3_disable_slot(this_leaf->l3, slot);
@@ -456,7 +420,7 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
456 if (!l3->subcaches[i]) 420 if (!l3->subcaches[i])
457 continue; 421 continue;
458 422
459 pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); 423 pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
460 424
461 /* 425 /*
462 * We need to WBINVD on a core on the node containing the L3 426 * We need to WBINVD on a core on the node containing the L3
@@ -466,7 +430,7 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
466 wbinvd_on_cpu(cpu); 430 wbinvd_on_cpu(cpu);
467 431
468 reg |= BIT(31); 432 reg |= BIT(31);
469 pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); 433 pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
470 } 434 }
471} 435}
472 436
@@ -523,7 +487,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
523 if (!capable(CAP_SYS_ADMIN)) 487 if (!capable(CAP_SYS_ADMIN))
524 return -EPERM; 488 return -EPERM;
525 489
526 if (!this_leaf->l3 || !this_leaf->l3->can_disable) 490 if (!this_leaf->l3 ||
491 !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
527 return -EINVAL; 492 return -EINVAL;
528 493
529 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); 494 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
@@ -544,7 +509,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
544#define STORE_CACHE_DISABLE(slot) \ 509#define STORE_CACHE_DISABLE(slot) \
545static ssize_t \ 510static ssize_t \
546store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ 511store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
547 const char *buf, size_t count) \ 512 const char *buf, size_t count) \
548{ \ 513{ \
549 return store_cache_disable(this_leaf, buf, count, slot); \ 514 return store_cache_disable(this_leaf, buf, count, slot); \
550} 515}
@@ -557,10 +522,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
557 show_cache_disable_1, store_cache_disable_1); 522 show_cache_disable_1, store_cache_disable_1);
558 523
559#else /* CONFIG_AMD_NB */ 524#else /* CONFIG_AMD_NB */
560static void __cpuinit 525#define amd_init_l3_cache(x, y)
561amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index)
562{
563};
564#endif /* CONFIG_AMD_NB */ 526#endif /* CONFIG_AMD_NB */
565 527
566static int 528static int
@@ -574,7 +536,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
574 536
575 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 537 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
576 amd_cpuid4(index, &eax, &ebx, &ecx); 538 amd_cpuid4(index, &eax, &ebx, &ecx);
577 amd_check_l3_disable(this_leaf, index); 539 amd_init_l3_cache(this_leaf, index);
578 } else { 540 } else {
579 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); 541 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
580 } 542 }
@@ -982,30 +944,48 @@ define_one_ro(size);
982define_one_ro(shared_cpu_map); 944define_one_ro(shared_cpu_map);
983define_one_ro(shared_cpu_list); 945define_one_ro(shared_cpu_list);
984 946
985#define DEFAULT_SYSFS_CACHE_ATTRS \
986 &type.attr, \
987 &level.attr, \
988 &coherency_line_size.attr, \
989 &physical_line_partition.attr, \
990 &ways_of_associativity.attr, \
991 &number_of_sets.attr, \
992 &size.attr, \
993 &shared_cpu_map.attr, \
994 &shared_cpu_list.attr
995
996static struct attribute *default_attrs[] = { 947static struct attribute *default_attrs[] = {
997 DEFAULT_SYSFS_CACHE_ATTRS, 948 &type.attr,
949 &level.attr,
950 &coherency_line_size.attr,
951 &physical_line_partition.attr,
952 &ways_of_associativity.attr,
953 &number_of_sets.attr,
954 &size.attr,
955 &shared_cpu_map.attr,
956 &shared_cpu_list.attr,
998 NULL 957 NULL
999}; 958};
1000 959
1001static struct attribute *default_l3_attrs[] = {
1002 DEFAULT_SYSFS_CACHE_ATTRS,
1003#ifdef CONFIG_AMD_NB 960#ifdef CONFIG_AMD_NB
1004 &cache_disable_0.attr, 961static struct attribute ** __cpuinit amd_l3_attrs(void)
1005 &cache_disable_1.attr, 962{
963 static struct attribute **attrs;
964 int n;
965
966 if (attrs)
967 return attrs;
968
969 n = sizeof (default_attrs) / sizeof (struct attribute *);
970
971 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
972 n += 2;
973
974 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
975 if (attrs == NULL)
976 return attrs = default_attrs;
977
978 for (n = 0; default_attrs[n]; n++)
979 attrs[n] = default_attrs[n];
980
981 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
982 attrs[n++] = &cache_disable_0.attr;
983 attrs[n++] = &cache_disable_1.attr;
984 }
985
986 return attrs;
987}
1006#endif 988#endif
1007 NULL
1008};
1009 989
1010static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 990static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1011{ 991{
@@ -1116,11 +1096,11 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1116 1096
1117 this_leaf = CPUID4_INFO_IDX(cpu, i); 1097 this_leaf = CPUID4_INFO_IDX(cpu, i);
1118 1098
1119 if (this_leaf->l3 && this_leaf->l3->can_disable) 1099 ktype_cache.default_attrs = default_attrs;
1120 ktype_cache.default_attrs = default_l3_attrs; 1100#ifdef CONFIG_AMD_NB
1121 else 1101 if (this_leaf->l3)
1122 ktype_cache.default_attrs = default_attrs; 1102 ktype_cache.default_attrs = amd_l3_attrs();
1123 1103#endif
1124 retval = kobject_init_and_add(&(this_object->kobj), 1104 retval = kobject_init_and_add(&(this_object->kobj),
1125 &ktype_cache, 1105 &ktype_cache,
1126 per_cpu(ici_cache_kobject, cpu), 1106 per_cpu(ici_cache_kobject, cpu),