diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-28 10:20:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-28 10:22:07 -0400 |
commit | cdcf772ed163651cacac8098b4974aba7f9e1c73 (patch) | |
tree | ce5da1bbb1a34d5c1a8bc435bd6f3ff8c917c986 /arch/x86/kernel/cpu/intel_cacheinfo.c | |
parent | a24e8d36f5fc047dac9af6200322ed393f2e3175 (diff) |
x86 l3 cache index disable for 2 6 26 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 39 |
1 files changed, 20 insertions, 19 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index a0c6c6ffed46..535d662716de 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Routines to indentify caches on Intel CPU. | 2 | * Routines to indentify caches on Intel CPU. |
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) | 5 | * Venkatesh Pallipadi : Adding cache identification through cpuid(4) |
6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. | 6 | * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. |
7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. | 7 | * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. |
8 | */ | 8 | */ |
@@ -136,9 +136,9 @@ struct _cpuid4_info { | |||
136 | }; | 136 | }; |
137 | 137 | ||
138 | static struct pci_device_id k8_nb_id[] = { | 138 | static struct pci_device_id k8_nb_id[] = { |
139 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, | 139 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) }, |
140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, | 140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) }, |
141 | {} | 141 | {} |
142 | }; | 142 | }; |
143 | 143 | ||
144 | unsigned short num_cache_leaves; | 144 | unsigned short num_cache_leaves; |
@@ -190,9 +190,10 @@ static unsigned short assocs[] __cpuinitdata = { | |||
190 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; | 190 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; |
191 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; | 191 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; |
192 | 192 | ||
193 | static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 193 | static void __cpuinit |
194 | union _cpuid4_leaf_ebx *ebx, | 194 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
195 | union _cpuid4_leaf_ecx *ecx) | 195 | union _cpuid4_leaf_ebx *ebx, |
196 | union _cpuid4_leaf_ecx *ecx) | ||
196 | { | 197 | { |
197 | unsigned dummy; | 198 | unsigned dummy; |
198 | unsigned line_size, lines_per_tag, assoc, size_in_kb; | 199 | unsigned line_size, lines_per_tag, assoc, size_in_kb; |
@@ -264,7 +265,7 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | |||
264 | { | 265 | { |
265 | if (index < 3) | 266 | if (index < 3) |
266 | return; | 267 | return; |
267 | this_leaf->can_disable = 1; | 268 | this_leaf->can_disable = 1; |
268 | } | 269 | } |
269 | 270 | ||
270 | static int | 271 | static int |
@@ -474,7 +475,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
474 | 475 | ||
475 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 476 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
476 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 477 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); |
477 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 478 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) |
478 | 479 | ||
479 | #ifdef CONFIG_SMP | 480 | #ifdef CONFIG_SMP |
480 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 481 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
@@ -511,7 +512,7 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
511 | 512 | ||
512 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 513 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
513 | for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { | 514 | for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) { |
514 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 515 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
515 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 516 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); |
516 | } | 517 | } |
517 | } | 518 | } |
@@ -593,7 +594,7 @@ struct _index_kobject { | |||
593 | 594 | ||
594 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 595 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
595 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 596 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); |
596 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 597 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) |
597 | 598 | ||
598 | #define show_one_plus(file_name, object, val) \ | 599 | #define show_one_plus(file_name, object, val) \ |
599 | static ssize_t show_##file_name \ | 600 | static ssize_t show_##file_name \ |
@@ -675,7 +676,7 @@ static struct pci_dev *get_k8_northbridge(int node) | |||
675 | if (!dev) | 676 | if (!dev) |
676 | break; | 677 | break; |
677 | } | 678 | } |
678 | return dev; | 679 | return dev; |
679 | } | 680 | } |
680 | 681 | ||
681 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | 682 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) |
@@ -736,7 +737,7 @@ store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | |||
736 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); | 737 | printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n"); |
737 | return -EINVAL; | 738 | return -EINVAL; |
738 | } | 739 | } |
739 | 740 | ||
740 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); | 741 | pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000); |
741 | wbinvd(); | 742 | wbinvd(); |
742 | pci_write_config_dword(dev, 0x1BC + index * 4, val); | 743 | pci_write_config_dword(dev, 0x1BC + index * 4, val); |
@@ -789,7 +790,7 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) | |||
789 | ret = fattr->show ? | 790 | ret = fattr->show ? |
790 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 791 | fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
791 | buf) : | 792 | buf) : |
792 | 0; | 793 | 0; |
793 | return ret; | 794 | return ret; |
794 | } | 795 | } |
795 | 796 | ||
@@ -800,9 +801,9 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
800 | struct _index_kobject *this_leaf = to_object(kobj); | 801 | struct _index_kobject *this_leaf = to_object(kobj); |
801 | ssize_t ret; | 802 | ssize_t ret; |
802 | 803 | ||
803 | ret = fattr->store ? | 804 | ret = fattr->store ? |
804 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), | 805 | fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), |
805 | buf, count) : | 806 | buf, count) : |
806 | 0; | 807 | 0; |
807 | return ret; | 808 | return ret; |
808 | } | 809 | } |