aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-07-24 05:46:08 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-12 13:28:33 -0400
commitb7d11a768b061c307aaaa6242f83da2d2388c756 (patch)
tree059eac92f029bbee30ee3a309ce2f8fdc5f6b2df /arch
parent05b217b021e003d60471eb419d0ceed84d06c5db (diff)
x86: cache_info: Kill the moronic shadow struct
Commit f9b90566c ("x86: reduce stack usage in init_intel_cacheinfo") introduced a shadow structure to reduce the stack usage on large machines instead of making the smaller structure embedded into the large one. That's definitely a candidate for the bad taste award. Move the small struct into the large one and get rid of the ugly type casts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Hans Rosenfeld <hans.rosenfeld@amd.com> Cc: Borislav Petkov <borislav.petkov@amd.com> Cc: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Mike Travis <travis@sgi.com> Link: http://lkml.kernel.org/r/20110723212626.625651773@linutronix.de Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c60
1 files changed, 22 insertions, 38 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index aa27c38fa98d..311322bb712a 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -157,22 +157,17 @@ struct amd_l3_cache {
157 u8 subcaches[4]; 157 u8 subcaches[4];
158}; 158};
159 159
160struct _cpuid4_info { 160struct _cpuid4_info_regs {
161 union _cpuid4_leaf_eax eax; 161 union _cpuid4_leaf_eax eax;
162 union _cpuid4_leaf_ebx ebx; 162 union _cpuid4_leaf_ebx ebx;
163 union _cpuid4_leaf_ecx ecx; 163 union _cpuid4_leaf_ecx ecx;
164 unsigned long size; 164 unsigned long size;
165 struct amd_l3_cache *l3; 165 struct amd_l3_cache *l3;
166 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
167}; 166};
168 167
169/* subset of above _cpuid4_info w/o shared_cpu_map */ 168struct _cpuid4_info {
170struct _cpuid4_info_regs { 169 struct _cpuid4_info_regs base;
171 union _cpuid4_leaf_eax eax; 170 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
172 union _cpuid4_leaf_ebx ebx;
173 union _cpuid4_leaf_ecx ecx;
174 unsigned long size;
175 struct amd_l3_cache *l3;
176}; 171};
177 172
178unsigned short num_cache_leaves; 173unsigned short num_cache_leaves;
@@ -387,11 +382,10 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
387{ 382{
388 int index; 383 int index;
389 384
390 if (!this_leaf->l3 || 385 if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
391 !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
392 return -EINVAL; 386 return -EINVAL;
393 387
394 index = amd_get_l3_disable_slot(this_leaf->l3, slot); 388 index = amd_get_l3_disable_slot(this_leaf->base.l3, slot);
395 if (index >= 0) 389 if (index >= 0)
396 return sprintf(buf, "%d\n", index); 390 return sprintf(buf, "%d\n", index);
397 391
@@ -480,8 +474,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
480 if (!capable(CAP_SYS_ADMIN)) 474 if (!capable(CAP_SYS_ADMIN))
481 return -EPERM; 475 return -EPERM;
482 476
483 if (!this_leaf->l3 || 477 if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
484 !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
485 return -EINVAL; 478 return -EINVAL;
486 479
487 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); 480 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
@@ -489,7 +482,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
489 if (strict_strtoul(buf, 10, &val) < 0) 482 if (strict_strtoul(buf, 10, &val) < 0)
490 return -EINVAL; 483 return -EINVAL;
491 484
492 err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val); 485 err = amd_set_l3_disable_slot(this_leaf->base.l3, cpu, slot, val);
493 if (err) { 486 if (err) {
494 if (err == -EEXIST) 487 if (err == -EEXIST)
495 printk(KERN_WARNING "L3 disable slot %d in use!\n", 488 printk(KERN_WARNING "L3 disable slot %d in use!\n",
@@ -518,7 +511,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
518static ssize_t 511static ssize_t
519show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) 512show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
520{ 513{
521 if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 514 if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
522 return -EINVAL; 515 return -EINVAL;
523 516
524 return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); 517 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
@@ -533,7 +526,7 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
533 if (!capable(CAP_SYS_ADMIN)) 526 if (!capable(CAP_SYS_ADMIN))
534 return -EPERM; 527 return -EPERM;
535 528
536 if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 529 if (!this_leaf->base.l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
537 return -EINVAL; 530 return -EINVAL;
538 531
539 if (strict_strtoul(buf, 16, &val) < 0) 532 if (strict_strtoul(buf, 16, &val) < 0)
@@ -769,7 +762,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
769 return; 762 return;
770 } 763 }
771 this_leaf = CPUID4_INFO_IDX(cpu, index); 764 this_leaf = CPUID4_INFO_IDX(cpu, index);
772 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 765 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
773 766
774 if (num_threads_sharing == 1) 767 if (num_threads_sharing == 1)
775 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); 768 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
@@ -824,24 +817,15 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
824 per_cpu(ici_cpuid4_info, cpu) = NULL; 817 per_cpu(ici_cpuid4_info, cpu) = NULL;
825} 818}
826 819
827static int
828__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
829{
830 struct _cpuid4_info_regs *leaf_regs =
831 (struct _cpuid4_info_regs *)this_leaf;
832
833 return cpuid4_cache_lookup_regs(index, leaf_regs);
834}
835
836static void __cpuinit get_cpu_leaves(void *_retval) 820static void __cpuinit get_cpu_leaves(void *_retval)
837{ 821{
838 int j, *retval = _retval, cpu = smp_processor_id(); 822 int j, *retval = _retval, cpu = smp_processor_id();
839 823
840 /* Do cpuid and store the results */ 824 /* Do cpuid and store the results */
841 for (j = 0; j < num_cache_leaves; j++) { 825 for (j = 0; j < num_cache_leaves; j++) {
842 struct _cpuid4_info *this_leaf; 826 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
843 this_leaf = CPUID4_INFO_IDX(cpu, j); 827
844 *retval = cpuid4_cache_lookup(j, this_leaf); 828 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
845 if (unlikely(*retval < 0)) { 829 if (unlikely(*retval < 0)) {
846 int i; 830 int i;
847 831
@@ -899,16 +883,16 @@ static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
899 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ 883 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
900} 884}
901 885
902show_one_plus(level, eax.split.level, 0); 886show_one_plus(level, base.eax.split.level, 0);
903show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1); 887show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
904show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1); 888show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
905show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1); 889show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
906show_one_plus(number_of_sets, ecx.split.number_of_sets, 1); 890show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
907 891
908static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, 892static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
909 unsigned int cpu) 893 unsigned int cpu)
910{ 894{
911 return sprintf(buf, "%luK\n", this_leaf->size / 1024); 895 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
912} 896}
913 897
914static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, 898static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
@@ -945,7 +929,7 @@ static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
945static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, 929static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
946 unsigned int cpu) 930 unsigned int cpu)
947{ 931{
948 switch (this_leaf->eax.split.type) { 932 switch (this_leaf->base.eax.split.type) {
949 case CACHE_TYPE_DATA: 933 case CACHE_TYPE_DATA:
950 return sprintf(buf, "Data\n"); 934 return sprintf(buf, "Data\n");
951 case CACHE_TYPE_INST: 935 case CACHE_TYPE_INST:
@@ -1134,7 +1118,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1134 1118
1135 ktype_cache.default_attrs = default_attrs; 1119 ktype_cache.default_attrs = default_attrs;
1136#ifdef CONFIG_AMD_NB 1120#ifdef CONFIG_AMD_NB
1137 if (this_leaf->l3) 1121 if (this_leaf->base.l3)
1138 ktype_cache.default_attrs = amd_l3_attrs(); 1122 ktype_cache.default_attrs = amd_l3_attrs();
1139#endif 1123#endif
1140 retval = kobject_init_and_add(&(this_object->kobj), 1124 retval = kobject_init_and_add(&(this_object->kobj),