diff options
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 69 |
1 files changed, 41 insertions, 28 deletions
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 7e7fd4e67dd0..9df87b03612c 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
@@ -225,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
225 | } | 225 | } |
226 | } | 226 | } |
227 | } | 227 | } |
228 | if (c->cpuid_level > 1) { | 228 | /* |
229 | * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for | ||
230 | * trace cache | ||
231 | */ | ||
232 | if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { | ||
229 | /* supports eax=2 call */ | 233 | /* supports eax=2 call */ |
230 | int i, j, n; | 234 | int i, j, n; |
231 | int regs[4]; | 235 | int regs[4]; |
232 | unsigned char *dp = (unsigned char *)regs; | 236 | unsigned char *dp = (unsigned char *)regs; |
237 | int only_trace = 0; | ||
238 | |||
239 | if (num_cache_leaves != 0 && c->x86 == 15) | ||
240 | only_trace = 1; | ||
233 | 241 | ||
234 | /* Number of times to iterate */ | 242 | /* Number of times to iterate */ |
235 | n = cpuid_eax(2) & 0xFF; | 243 | n = cpuid_eax(2) & 0xFF; |
@@ -251,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
251 | while (cache_table[k].descriptor != 0) | 259 | while (cache_table[k].descriptor != 0) |
252 | { | 260 | { |
253 | if (cache_table[k].descriptor == des) { | 261 | if (cache_table[k].descriptor == des) { |
262 | if (only_trace && cache_table[k].cache_type != LVL_TRACE) | ||
263 | break; | ||
254 | switch (cache_table[k].cache_type) { | 264 | switch (cache_table[k].cache_type) { |
255 | case LVL_1_INST: | 265 | case LVL_1_INST: |
256 | l1i += cache_table[k].size; | 266 | l1i += cache_table[k].size; |
@@ -276,43 +286,46 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
276 | } | 286 | } |
277 | } | 287 | } |
278 | } | 288 | } |
289 | } | ||
279 | 290 | ||
280 | if (new_l1d) | 291 | if (new_l1d) |
281 | l1d = new_l1d; | 292 | l1d = new_l1d; |
282 | 293 | ||
283 | if (new_l1i) | 294 | if (new_l1i) |
284 | l1i = new_l1i; | 295 | l1i = new_l1i; |
285 | 296 | ||
286 | if (new_l2) { | 297 | if (new_l2) { |
287 | l2 = new_l2; | 298 | l2 = new_l2; |
288 | #ifdef CONFIG_SMP | 299 | #ifdef CONFIG_SMP |
289 | cpu_llc_id[cpu] = l2_id; | 300 | cpu_llc_id[cpu] = l2_id; |
290 | #endif | 301 | #endif |
291 | } | 302 | } |
292 | 303 | ||
293 | if (new_l3) { | 304 | if (new_l3) { |
294 | l3 = new_l3; | 305 | l3 = new_l3; |
295 | #ifdef CONFIG_SMP | 306 | #ifdef CONFIG_SMP |
296 | cpu_llc_id[cpu] = l3_id; | 307 | cpu_llc_id[cpu] = l3_id; |
297 | #endif | 308 | #endif |
298 | } | ||
299 | |||
300 | if ( trace ) | ||
301 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); | ||
302 | else if ( l1i ) | ||
303 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); | ||
304 | if ( l1d ) | ||
305 | printk(", L1 D cache: %dK\n", l1d); | ||
306 | else | ||
307 | printk("\n"); | ||
308 | if ( l2 ) | ||
309 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | ||
310 | if ( l3 ) | ||
311 | printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); | ||
312 | |||
313 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); | ||
314 | } | 309 | } |
315 | 310 | ||
311 | if (trace) | ||
312 | printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); | ||
313 | else if ( l1i ) | ||
314 | printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); | ||
315 | |||
316 | if (l1d) | ||
317 | printk(", L1 D cache: %dK\n", l1d); | ||
318 | else | ||
319 | printk("\n"); | ||
320 | |||
321 | if (l2) | ||
322 | printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); | ||
323 | |||
324 | if (l3) | ||
325 | printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); | ||
326 | |||
327 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); | ||
328 | |||
316 | return l2; | 329 | return l2; |
317 | } | 330 | } |
318 | 331 | ||