aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/intel_cacheinfo.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu/intel_cacheinfo.c')
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c77
1 files changed, 54 insertions, 23 deletions
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index ce61921369e5..9df87b03612c 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -173,6 +173,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
173 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 173 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
176 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
177#ifdef CONFIG_SMP
178 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
179#endif
176 180
177 if (c->cpuid_level > 3) { 181 if (c->cpuid_level > 3) {
178 static int is_initialized; 182 static int is_initialized;
@@ -205,9 +209,15 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
205 break; 209 break;
206 case 2: 210 case 2:
207 new_l2 = this_leaf.size/1024; 211 new_l2 = this_leaf.size/1024;
212 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
213 index_msb = get_count_order(num_threads_sharing);
214 l2_id = c->apicid >> index_msb;
208 break; 215 break;
209 case 3: 216 case 3:
210 new_l3 = this_leaf.size/1024; 217 new_l3 = this_leaf.size/1024;
218 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
219 index_msb = get_count_order(num_threads_sharing);
220 l3_id = c->apicid >> index_msb;
211 break; 221 break;
212 default: 222 default:
213 break; 223 break;
@@ -215,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
215 } 225 }
216 } 226 }
217 } 227 }
218 if (c->cpuid_level > 1) { 228 /*
229 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
230 * trace cache
231 */
232 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
219 /* supports eax=2 call */ 233 /* supports eax=2 call */
220 int i, j, n; 234 int i, j, n;
221 int regs[4]; 235 int regs[4];
222 unsigned char *dp = (unsigned char *)regs; 236 unsigned char *dp = (unsigned char *)regs;
237 int only_trace = 0;
238
239 if (num_cache_leaves != 0 && c->x86 == 15)
240 only_trace = 1;
223 241
224 /* Number of times to iterate */ 242 /* Number of times to iterate */
225 n = cpuid_eax(2) & 0xFF; 243 n = cpuid_eax(2) & 0xFF;
@@ -241,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
241 while (cache_table[k].descriptor != 0) 259 while (cache_table[k].descriptor != 0)
242 { 260 {
243 if (cache_table[k].descriptor == des) { 261 if (cache_table[k].descriptor == des) {
262 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
263 break;
244 switch (cache_table[k].cache_type) { 264 switch (cache_table[k].cache_type) {
245 case LVL_1_INST: 265 case LVL_1_INST:
246 l1i += cache_table[k].size; 266 l1i += cache_table[k].size;
@@ -266,34 +286,45 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
266 } 286 }
267 } 287 }
268 } 288 }
289 }
269 290
270 if (new_l1d) 291 if (new_l1d)
271 l1d = new_l1d; 292 l1d = new_l1d;
272 293
273 if (new_l1i) 294 if (new_l1i)
274 l1i = new_l1i; 295 l1i = new_l1i;
275 296
276 if (new_l2) 297 if (new_l2) {
277 l2 = new_l2; 298 l2 = new_l2;
299#ifdef CONFIG_SMP
300 cpu_llc_id[cpu] = l2_id;
301#endif
302 }
278 303
279 if (new_l3) 304 if (new_l3) {
280 l3 = new_l3; 305 l3 = new_l3;
306#ifdef CONFIG_SMP
307 cpu_llc_id[cpu] = l3_id;
308#endif
309 }
281 310
282 if ( trace ) 311 if (trace)
283 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); 312 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
284 else if ( l1i ) 313 else if ( l1i )
285 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); 314 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
286 if ( l1d )
287 printk(", L1 D cache: %dK\n", l1d);
288 else
289 printk("\n");
290 if ( l2 )
291 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
292 if ( l3 )
293 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
294 315
295 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 316 if (l1d)
296 } 317 printk(", L1 D cache: %dK\n", l1d);
318 else
319 printk("\n");
320
321 if (l2)
322 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
323
324 if (l3)
325 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
326
327 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
297 328
298 return l2; 329 return l2;
299} 330}