aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/intel_cacheinfo.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu/intel_cacheinfo.c')
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c46
1 files changed, 32 insertions, 14 deletions
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 4dc42a189ae5..fbfd374aa336 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -293,29 +293,45 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
293#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
295{ 295{
296 struct _cpuid4_info *this_leaf; 296 struct _cpuid4_info *this_leaf, *sibling_leaf;
297 unsigned long num_threads_sharing; 297 unsigned long num_threads_sharing;
298#ifdef CONFIG_X86_HT 298 int index_msb, i;
299 struct cpuinfo_x86 *c = cpu_data + cpu; 299 struct cpuinfo_x86 *c = cpu_data;
300#endif
301 300
302 this_leaf = CPUID4_INFO_IDX(cpu, index); 301 this_leaf = CPUID4_INFO_IDX(cpu, index);
303 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 302 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
304 303
305 if (num_threads_sharing == 1) 304 if (num_threads_sharing == 1)
306 cpu_set(cpu, this_leaf->shared_cpu_map); 305 cpu_set(cpu, this_leaf->shared_cpu_map);
307#ifdef CONFIG_X86_HT 306 else {
308 else if (num_threads_sharing == smp_num_siblings) 307 index_msb = get_count_order(num_threads_sharing);
309 this_leaf->shared_cpu_map = cpu_sibling_map[cpu]; 308
310 else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings)) 309 for_each_online_cpu(i) {
311 this_leaf->shared_cpu_map = cpu_core_map[cpu]; 310 if (c[i].apicid >> index_msb ==
312 else 311 c[cpu].apicid >> index_msb) {
313 printk(KERN_DEBUG "Number of CPUs sharing cache didn't match " 312 cpu_set(i, this_leaf->shared_cpu_map);
314 "any known set of CPUs\n"); 313 if (i != cpu && cpuid4_info[i]) {
315#endif 314 sibling_leaf = CPUID4_INFO_IDX(i, index);
315 cpu_set(cpu, sibling_leaf->shared_cpu_map);
316 }
317 }
318 }
319 }
320}
321static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
322{
323 struct _cpuid4_info *this_leaf, *sibling_leaf;
324 int sibling;
325
326 this_leaf = CPUID4_INFO_IDX(cpu, index);
327 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
328 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
329 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
330 }
316} 331}
317#else 332#else
318static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} 333static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
334static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
319#endif 335#endif
320 336
321static void free_cache_attributes(unsigned int cpu) 337static void free_cache_attributes(unsigned int cpu)
@@ -574,8 +590,10 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
574 unsigned int cpu = sys_dev->id; 590 unsigned int cpu = sys_dev->id;
575 unsigned long i; 591 unsigned long i;
576 592
577 for (i = 0; i < num_cache_leaves; i++) 593 for (i = 0; i < num_cache_leaves; i++) {
594 cache_remove_shared_cpu_map(cpu, i);
578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 595 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
596 }
579 kobject_unregister(cache_kobject[cpu]); 597 kobject_unregister(cache_kobject[cpu]);
580 cpuid4_cache_sysfs_exit(cpu); 598 cpuid4_cache_sysfs_exit(cpu);
581 return; 599 return;