aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/intel_cacheinfo.c
diff options
context:
space:
mode:
authorMark Langsdorf <mark.langsdorf@amd.com>2008-07-18 17:03:52 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-28 10:17:43 -0400
commit8cb22bcb1f3ef70d4d48092e9b057175ad9ec78d (patch)
tree9f878b4eac616d505dc03e13598f52e0fe9054c5 /arch/x86/kernel/cpu/intel_cacheinfo.c
parent026e2c05ef58ef413e2d52696f125d5ea1aa8bce (diff)
x86: L3 cache index disable for 2.6.26
New versions of AMD processors have support to disable parts of their L3 caches if too many MCEs are generated by the L3 cache. This patch provides a /sysfs interface under the cache hierarchy to display which caches indices are disabled (if any) and to monitoring applications to disable a cache index. This patch does not set an automatic policy to disable the L3 cache. Policy decisions would need to be made by a RAS handler. This patch merely makes it easier to see what indices are currently disabled. Signed-off-by: Mark Langsdorf <mark.langsdorf@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c87
1 files changed, 81 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index ff517f0b8cc4..d6ea50e270e0 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -16,6 +16,7 @@
16 16
17#include <asm/processor.h> 17#include <asm/processor.h>
18#include <asm/smp.h> 18#include <asm/smp.h>
19#include <asm/k8.h>
19 20
20#define LVL_1_INST 1 21#define LVL_1_INST 1
21#define LVL_1_DATA 2 22#define LVL_1_DATA 2
@@ -130,6 +131,7 @@ struct _cpuid4_info {
130 union _cpuid4_leaf_ebx ebx; 131 union _cpuid4_leaf_ebx ebx;
131 union _cpuid4_leaf_ecx ecx; 132 union _cpuid4_leaf_ecx ecx;
132 unsigned long size; 133 unsigned long size;
134 unsigned long can_disable;
133 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ 135 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
134}; 136};
135 137
@@ -251,6 +253,13 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
251 (ebx->split.ways_of_associativity + 1) - 1; 253 (ebx->split.ways_of_associativity + 1) - 1;
252} 254}
253 255
256static void __cpuinit amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
257{
258 if (index < 3)
259 return;
260 this_leaf->can_disable = 1;
261}
262
254static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 263static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
255{ 264{
256 union _cpuid4_leaf_eax eax; 265 union _cpuid4_leaf_eax eax;
@@ -258,9 +267,12 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
258 union _cpuid4_leaf_ecx ecx; 267 union _cpuid4_leaf_ecx ecx;
259 unsigned edx; 268 unsigned edx;
260 269
261 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 270 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
262 amd_cpuid4(index, &eax, &ebx, &ecx); 271 amd_cpuid4(index, &eax, &ebx, &ecx);
263 else 272 if (boot_cpu_data.x86 >= 0x10)
273 amd_check_l3_disable(index, this_leaf);
274
275 } else
264 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); 276 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
265 if (eax.split.type == CACHE_TYPE_NULL) 277 if (eax.split.type == CACHE_TYPE_NULL)
266 return -EIO; /* better error ? */ 278 return -EIO; /* better error ? */
@@ -637,6 +649,61 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
637 } 649 }
638} 650}
639 651
652#define to_object(k) container_of(k, struct _index_kobject, kobj)
653#define to_attr(a) container_of(a, struct _cache_attr, attr)
654
655static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
656{
657 struct pci_dev *dev;
658 if (this_leaf->can_disable) {
659 int i;
660 ssize_t ret = 0;
661 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
662 dev = k8_northbridges[node];
663
664 for (i = 0; i < 2; i++) {
665 unsigned int reg;
666 pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
667 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
668 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
669 buf,
670 reg & 0x80000000 ? "Disabled" : "Allowed",
671 reg & 0x40000000 ? "Disabled" : "Allowed");
672 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n", buf,
673 (reg & 0x30000) >> 16, reg & 0xfff);
674
675 }
676 return ret;
677 }
678 return sprintf(buf, "Feature not enabled\n");
679}
680
681static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, size_t count)
682{
683 struct pci_dev *dev;
684 if (this_leaf->can_disable) {
685 /* write the MSR value */
686 unsigned int ret;
687 unsigned int index, val;
688 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
689 dev = k8_northbridges[node];
690
691 if (strlen(buf) > 15)
692 return -EINVAL;
693 ret = sscanf(buf, "%x %x", &index, &val);
694 if (ret != 2)
695 return -EINVAL;
696 if (index > 1)
697 return -EINVAL;
698 val |= 0xc0000000;
699 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
700 wbinvd();
701 pci_write_config_dword(dev, 0x1BC + index * 4, val);
702 return 1;
703 }
704 return 0;
705}
706
640struct _cache_attr { 707struct _cache_attr {
641 struct attribute attr; 708 struct attribute attr;
642 ssize_t (*show)(struct _cpuid4_info *, char *); 709 ssize_t (*show)(struct _cpuid4_info *, char *);
@@ -657,6 +724,8 @@ define_one_ro(size);
657define_one_ro(shared_cpu_map); 724define_one_ro(shared_cpu_map);
658define_one_ro(shared_cpu_list); 725define_one_ro(shared_cpu_list);
659 726
727static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
728
660static struct attribute * default_attrs[] = { 729static struct attribute * default_attrs[] = {
661 &type.attr, 730 &type.attr,
662 &level.attr, 731 &level.attr,
@@ -667,12 +736,10 @@ static struct attribute * default_attrs[] = {
667 &size.attr, 736 &size.attr,
668 &shared_cpu_map.attr, 737 &shared_cpu_map.attr,
669 &shared_cpu_list.attr, 738 &shared_cpu_list.attr,
739 &cache_disable.attr,
670 NULL 740 NULL
671}; 741};
672 742
673#define to_object(k) container_of(k, struct _index_kobject, kobj)
674#define to_attr(a) container_of(a, struct _cache_attr, attr)
675
676static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) 743static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
677{ 744{
678 struct _cache_attr *fattr = to_attr(attr); 745 struct _cache_attr *fattr = to_attr(attr);
@@ -689,7 +756,15 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
689static ssize_t store(struct kobject * kobj, struct attribute * attr, 756static ssize_t store(struct kobject * kobj, struct attribute * attr,
690 const char * buf, size_t count) 757 const char * buf, size_t count)
691{ 758{
692 return 0; 759 struct _cache_attr *fattr = to_attr(attr);
760 struct _index_kobject *this_leaf = to_object(kobj);
761 ssize_t ret;
762
763 ret = fattr->store ?
764 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
765 buf, count) :
766 0;
767 return ret;
693} 768}
694 769
695static struct sysfs_ops sysfs_ops = { 770static struct sysfs_ops sysfs_ops = {