aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/intel_cacheinfo.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu/intel_cacheinfo.c')
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c123
1 files changed, 105 insertions, 18 deletions
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index c8547a6fa7e6..e9f0b928b0a9 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -4,6 +4,7 @@
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen : CPUID4 emulation on AMD.
7 */ 8 */
8 9
9#include <linux/init.h> 10#include <linux/init.h>
@@ -130,25 +131,111 @@ struct _cpuid4_info {
130 cpumask_t shared_cpu_map; 131 cpumask_t shared_cpu_map;
131}; 132};
132 133
133static unsigned short num_cache_leaves; 134unsigned short num_cache_leaves;
135
136/* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
139
140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */
142union l1_cache {
143 struct {
144 unsigned line_size : 8;
145 unsigned lines_per_tag : 8;
146 unsigned assoc : 8;
147 unsigned size_in_kb : 8;
148 };
149 unsigned val;
150};
151
152union l2_cache {
153 struct {
154 unsigned line_size : 8;
155 unsigned lines_per_tag : 4;
156 unsigned assoc : 4;
157 unsigned size_in_kb : 16;
158 };
159 unsigned val;
160};
161
162static const unsigned short assocs[] = {
163 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
164 [8] = 16,
165 [0xf] = 0xffff // ??
166 };
167static const unsigned char levels[] = { 1, 1, 2 };
168static const unsigned char types[] = { 1, 2, 3 };
169
170static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
171 union _cpuid4_leaf_ebx *ebx,
172 union _cpuid4_leaf_ecx *ecx)
173{
174 unsigned dummy;
175 unsigned line_size, lines_per_tag, assoc, size_in_kb;
176 union l1_cache l1i, l1d;
177 union l2_cache l2;
178
179 eax->full = 0;
180 ebx->full = 0;
181 ecx->full = 0;
182
183 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
184 cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
185
186 if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
187 return;
188
189 eax->split.is_self_initializing = 1;
190 eax->split.type = types[leaf];
191 eax->split.level = levels[leaf];
192 eax->split.num_threads_sharing = 0;
193 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
194
195 if (leaf <= 1) {
196 union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
197 assoc = l1->assoc;
198 line_size = l1->line_size;
199 lines_per_tag = l1->lines_per_tag;
200 size_in_kb = l1->size_in_kb;
201 } else {
202 assoc = l2.assoc;
203 line_size = l2.line_size;
204 lines_per_tag = l2.lines_per_tag;
205 /* cpu_data has errata corrections for K7 applied */
206 size_in_kb = current_cpu_data.x86_cache_size;
207 }
208
209 if (assoc == 0xf)
210 eax->split.is_fully_associative = 1;
211 ebx->split.coherency_line_size = line_size - 1;
212 ebx->split.ways_of_associativity = assocs[assoc] - 1;
213 ebx->split.physical_line_partition = lines_per_tag - 1;
214 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
215 (ebx->split.ways_of_associativity + 1) - 1;
216}
134 217
135static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 218static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
136{ 219{
137 unsigned int eax, ebx, ecx, edx; 220 union _cpuid4_leaf_eax eax;
138 union _cpuid4_leaf_eax cache_eax; 221 union _cpuid4_leaf_ebx ebx;
222 union _cpuid4_leaf_ecx ecx;
223 unsigned edx;
139 224
140 cpuid_count(4, index, &eax, &ebx, &ecx, &edx); 225 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
141 cache_eax.full = eax; 226 amd_cpuid4(index, &eax, &ebx, &ecx);
142 if (cache_eax.split.type == CACHE_TYPE_NULL) 227 else
228 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
229 if (eax.split.type == CACHE_TYPE_NULL)
143 return -EIO; /* better error ? */ 230 return -EIO; /* better error ? */
144 231
145 this_leaf->eax.full = eax; 232 this_leaf->eax = eax;
146 this_leaf->ebx.full = ebx; 233 this_leaf->ebx = ebx;
147 this_leaf->ecx.full = ecx; 234 this_leaf->ecx = ecx;
148 this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) * 235 this_leaf->size = (ecx.split.number_of_sets + 1) *
149 (this_leaf->ebx.split.coherency_line_size + 1) * 236 (ebx.split.coherency_line_size + 1) *
150 (this_leaf->ebx.split.physical_line_partition + 1) * 237 (ebx.split.physical_line_partition + 1) *
151 (this_leaf->ebx.split.ways_of_associativity + 1); 238 (ebx.split.ways_of_associativity + 1);
152 return 0; 239 return 0;
153} 240}
154 241
@@ -174,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 261 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 262 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
176 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 263 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
177#ifdef CONFIG_SMP 264#ifdef CONFIG_X86_HT
178 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); 265 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
179#endif 266#endif
180 267
@@ -296,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
296 383
297 if (new_l2) { 384 if (new_l2) {
298 l2 = new_l2; 385 l2 = new_l2;
299#ifdef CONFIG_SMP 386#ifdef CONFIG_X86_HT
300 cpu_llc_id[cpu] = l2_id; 387 cpu_llc_id[cpu] = l2_id;
301#endif 388#endif
302 } 389 }
303 390
304 if (new_l3) { 391 if (new_l3) {
305 l3 = new_l3; 392 l3 = new_l3;
306#ifdef CONFIG_SMP 393#ifdef CONFIG_X86_HT
307 cpu_llc_id[cpu] = l3_id; 394 cpu_llc_id[cpu] = l3_id;
308#endif 395#endif
309 } 396 }
@@ -642,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
642 return; 729 return;
643} 730}
644 731
645static int cacheinfo_cpu_callback(struct notifier_block *nfb, 732static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
646 unsigned long action, void *hcpu) 733 unsigned long action, void *hcpu)
647{ 734{
648 unsigned int cpu = (unsigned long)hcpu; 735 unsigned int cpu = (unsigned long)hcpu;
@@ -660,7 +747,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
660 return NOTIFY_OK; 747 return NOTIFY_OK;
661} 748}
662 749
663static struct notifier_block cacheinfo_cpu_notifier = 750static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
664{ 751{
665 .notifier_call = cacheinfo_cpu_callback, 752 .notifier_call = cacheinfo_cpu_callback,
666}; 753};