aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2010-02-18 13:37:14 -0500
committerH. Peter Anvin <hpa@zytor.com>2010-02-19 00:59:07 -0500
commitcb19060abfdecac0d1eb2d2f0e7d6b7a3f8bc4f4 (patch)
tree994491932034c4b6be2a1c08d4098899c80aff8e /arch
parentf619b3d8427eb57f0134dab75b0d217325c72411 (diff)
x86, cacheinfo: Enable L3 CID only on AMD
Final stage linking can fail with arch/x86/built-in.o: In function `store_cache_disable': intel_cacheinfo.c:(.text+0xc509): undefined reference to `amd_get_nb_id' arch/x86/built-in.o: In function `show_cache_disable': intel_cacheinfo.c:(.text+0xc7d3): undefined reference to `amd_get_nb_id' when CONFIG_CPU_SUP_AMD is not enabled because the amd_get_nb_id helper is defined in AMD-specific code but also used in generic code (intel_cacheinfo.c). Reorganize the L3 cache index disable code under CONFIG_CPU_SUP_AMD since it is AMD-only anyway. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> LKML-Reference: <20100218184210.GF20473@aftab> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c186
1 files changed, 98 insertions, 88 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index be5f5c28ddfb..d440123c556f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -293,6 +293,13 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
293 (ebx->split.ways_of_associativity + 1) - 1; 293 (ebx->split.ways_of_associativity + 1) - 1;
294} 294}
295 295
296struct _cache_attr {
297 struct attribute attr;
298 ssize_t (*show)(struct _cpuid4_info *, char *);
299 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
300};
301
302#ifdef CONFIG_CPU_SUP_AMD
296static unsigned int __cpuinit amd_calc_l3_indices(void) 303static unsigned int __cpuinit amd_calc_l3_indices(void)
297{ 304{
298 /* 305 /*
@@ -303,7 +310,7 @@ static unsigned int __cpuinit amd_calc_l3_indices(void)
303 int node = cpu_to_node(cpu); 310 int node = cpu_to_node(cpu);
304 struct pci_dev *dev = node_to_k8_nb_misc(node); 311 struct pci_dev *dev = node_to_k8_nb_misc(node);
305 unsigned int sc0, sc1, sc2, sc3; 312 unsigned int sc0, sc1, sc2, sc3;
306 u32 val; 313 u32 val = 0;
307 314
308 pci_read_config_dword(dev, 0x1C4, &val); 315 pci_read_config_dword(dev, 0x1C4, &val);
309 316
@@ -335,6 +342,94 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
335 this_leaf->l3_indices = amd_calc_l3_indices(); 342 this_leaf->l3_indices = amd_calc_l3_indices();
336} 343}
337 344
345static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
346 unsigned int index)
347{
348 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
349 int node = amd_get_nb_id(cpu);
350 struct pci_dev *dev = node_to_k8_nb_misc(node);
351 unsigned int reg = 0;
352
353 if (!this_leaf->can_disable)
354 return -EINVAL;
355
356 if (!dev)
357 return -EINVAL;
358
359 pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
360 return sprintf(buf, "0x%08x\n", reg);
361}
362
363#define SHOW_CACHE_DISABLE(index) \
364static ssize_t \
365show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
366{ \
367 return show_cache_disable(this_leaf, buf, index); \
368}
369SHOW_CACHE_DISABLE(0)
370SHOW_CACHE_DISABLE(1)
371
372static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
373 const char *buf, size_t count, unsigned int index)
374{
375 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
376 int node = amd_get_nb_id(cpu);
377 struct pci_dev *dev = node_to_k8_nb_misc(node);
378 unsigned long val = 0;
379
380#define SUBCACHE_MASK (3UL << 20)
381#define SUBCACHE_INDEX 0xfff
382
383 if (!this_leaf->can_disable)
384 return -EINVAL;
385
386 if (!capable(CAP_SYS_ADMIN))
387 return -EPERM;
388
389 if (!dev)
390 return -EINVAL;
391
392 if (strict_strtoul(buf, 10, &val) < 0)
393 return -EINVAL;
394
395 /* do not allow writes outside of allowed bits */
396 if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
397 ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
398 return -EINVAL;
399
400 val |= BIT(30);
401 pci_write_config_dword(dev, 0x1BC + index * 4, val);
402 /*
403 * We need to WBINVD on a core on the node containing the L3 cache which
404 * indices we disable therefore a simple wbinvd() is not sufficient.
405 */
406 wbinvd_on_cpu(cpu);
407 pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
408 return count;
409}
410
411#define STORE_CACHE_DISABLE(index) \
412static ssize_t \
413store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
414 const char *buf, size_t count) \
415{ \
416 return store_cache_disable(this_leaf, buf, count, index); \
417}
418STORE_CACHE_DISABLE(0)
419STORE_CACHE_DISABLE(1)
420
421static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
422 show_cache_disable_0, store_cache_disable_0);
423static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
424 show_cache_disable_1, store_cache_disable_1);
425
426#else /* CONFIG_CPU_SUP_AMD */
427static void __cpuinit
428amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
429{
430};
431#endif /* CONFIG_CPU_SUP_AMD */
432
338static int 433static int
339__cpuinit cpuid4_cache_lookup_regs(int index, 434__cpuinit cpuid4_cache_lookup_regs(int index,
340 struct _cpuid4_info_regs *this_leaf) 435 struct _cpuid4_info_regs *this_leaf)
@@ -740,88 +835,6 @@ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
740#define to_object(k) container_of(k, struct _index_kobject, kobj) 835#define to_object(k) container_of(k, struct _index_kobject, kobj)
741#define to_attr(a) container_of(a, struct _cache_attr, attr) 836#define to_attr(a) container_of(a, struct _cache_attr, attr)
742 837
743static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
744 unsigned int index)
745{
746 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
747 int node = amd_get_nb_id(cpu);
748 struct pci_dev *dev = node_to_k8_nb_misc(node);
749 unsigned int reg = 0;
750
751 if (!this_leaf->can_disable)
752 return -EINVAL;
753
754 if (!dev)
755 return -EINVAL;
756
757 pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
758 return sprintf(buf, "0x%08x\n", reg);
759}
760
761#define SHOW_CACHE_DISABLE(index) \
762static ssize_t \
763show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
764{ \
765 return show_cache_disable(this_leaf, buf, index); \
766}
767SHOW_CACHE_DISABLE(0)
768SHOW_CACHE_DISABLE(1)
769
770static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
771 const char *buf, size_t count, unsigned int index)
772{
773 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
774 int node = amd_get_nb_id(cpu);
775 struct pci_dev *dev = node_to_k8_nb_misc(node);
776 unsigned long val = 0;
777
778#define SUBCACHE_MASK (3UL << 20)
779#define SUBCACHE_INDEX 0xfff
780
781 if (!this_leaf->can_disable)
782 return -EINVAL;
783
784 if (!capable(CAP_SYS_ADMIN))
785 return -EPERM;
786
787 if (!dev)
788 return -EINVAL;
789
790 if (strict_strtoul(buf, 10, &val) < 0)
791 return -EINVAL;
792
793 /* do not allow writes outside of allowed bits */
794 if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
795 ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
796 return -EINVAL;
797
798 val |= BIT(30);
799 pci_write_config_dword(dev, 0x1BC + index * 4, val);
800 /*
801 * We need to WBINVD on a core on the node containing the L3 cache which
802 * indices we disable therefore a simple wbinvd() is not sufficient.
803 */
804 wbinvd_on_cpu(cpu);
805 pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
806 return count;
807}
808
809#define STORE_CACHE_DISABLE(index) \
810static ssize_t \
811store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
812 const char *buf, size_t count) \
813{ \
814 return store_cache_disable(this_leaf, buf, count, index); \
815}
816STORE_CACHE_DISABLE(0)
817STORE_CACHE_DISABLE(1)
818
819struct _cache_attr {
820 struct attribute attr;
821 ssize_t (*show)(struct _cpuid4_info *, char *);
822 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
823};
824
825#define define_one_ro(_name) \ 838#define define_one_ro(_name) \
826static struct _cache_attr _name = \ 839static struct _cache_attr _name = \
827 __ATTR(_name, 0444, show_##_name, NULL) 840 __ATTR(_name, 0444, show_##_name, NULL)
@@ -836,11 +849,6 @@ define_one_ro(size);
836define_one_ro(shared_cpu_map); 849define_one_ro(shared_cpu_map);
837define_one_ro(shared_cpu_list); 850define_one_ro(shared_cpu_list);
838 851
839static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
840 show_cache_disable_0, store_cache_disable_0);
841static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
842 show_cache_disable_1, store_cache_disable_1);
843
844#define DEFAULT_SYSFS_CACHE_ATTRS \ 852#define DEFAULT_SYSFS_CACHE_ATTRS \
845 &type.attr, \ 853 &type.attr, \
846 &level.attr, \ 854 &level.attr, \
@@ -859,8 +867,10 @@ static struct attribute *default_attrs[] = {
859 867
860static struct attribute *default_l3_attrs[] = { 868static struct attribute *default_l3_attrs[] = {
861 DEFAULT_SYSFS_CACHE_ATTRS, 869 DEFAULT_SYSFS_CACHE_ATTRS,
870#ifdef CONFIG_CPU_SUP_AMD
862 &cache_disable_0.attr, 871 &cache_disable_0.attr,
863 &cache_disable_1.attr, 872 &cache_disable_1.attr,
873#endif
864 NULL 874 NULL
865}; 875};
866 876