aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/intel_cacheinfo.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu/intel_cacheinfo.c')
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c87
1 files changed, 51 insertions, 36 deletions
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 9e0d5f83cb9f..4dc42a189ae5 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
6 */ 7 */
7 8
8#include <linux/init.h> 9#include <linux/init.h>
@@ -10,6 +11,7 @@
10#include <linux/device.h> 11#include <linux/device.h>
11#include <linux/compiler.h> 12#include <linux/compiler.h>
12#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/sched.h>
13 15
14#include <asm/processor.h> 16#include <asm/processor.h>
15#include <asm/smp.h> 17#include <asm/smp.h>
@@ -28,7 +30,7 @@ struct _cache_table
28}; 30};
29 31
30/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 32/* all the cache descriptor types we care about (no TLB or trace cache entries) */
31static struct _cache_table cache_table[] __devinitdata = 33static struct _cache_table cache_table[] __cpuinitdata =
32{ 34{
33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 35 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 36 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -117,10 +119,9 @@ struct _cpuid4_info {
117 cpumask_t shared_cpu_map; 119 cpumask_t shared_cpu_map;
118}; 120};
119 121
120#define MAX_CACHE_LEAVES 4
121static unsigned short num_cache_leaves; 122static unsigned short num_cache_leaves;
122 123
123static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 124static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
124{ 125{
125 unsigned int eax, ebx, ecx, edx; 126 unsigned int eax, ebx, ecx, edx;
126 union _cpuid4_leaf_eax cache_eax; 127 union _cpuid4_leaf_eax cache_eax;
@@ -144,23 +145,18 @@ static int __init find_num_cache_leaves(void)
144{ 145{
145 unsigned int eax, ebx, ecx, edx; 146 unsigned int eax, ebx, ecx, edx;
146 union _cpuid4_leaf_eax cache_eax; 147 union _cpuid4_leaf_eax cache_eax;
147 int i; 148 int i = -1;
148 int retval;
149 149
150 retval = MAX_CACHE_LEAVES; 150 do {
151 /* Do cpuid(4) loop to find out num_cache_leaves */ 151 ++i;
152 for (i = 0; i < MAX_CACHE_LEAVES; i++) { 152 /* Do cpuid(4) loop to find out num_cache_leaves */
153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx); 153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
154 cache_eax.full = eax; 154 cache_eax.full = eax;
155 if (cache_eax.split.type == CACHE_TYPE_NULL) { 155 } while (cache_eax.split.type != CACHE_TYPE_NULL);
156 retval = i; 156 return i;
157 break;
158 }
159 }
160 return retval;
161} 157}
162 158
163unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 159unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
164{ 160{
165 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 161 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
166 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 162 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
@@ -284,13 +280,7 @@ unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
284 if ( l3 ) 280 if ( l3 )
285 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); 281 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
286 282
287 /* 283 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
288 * This assumes the L3 cache is shared; it typically lives in
289 * the northbridge. The L1 caches are included by the L2
290 * cache, and so should not be included for the purpose of
291 * SMP switching weights.
292 */
293 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
294 } 284 }
295 285
296 return l2; 286 return l2;
@@ -301,7 +291,7 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
301#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) 291#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
302 292
303#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
304static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
305{ 295{
306 struct _cpuid4_info *this_leaf; 296 struct _cpuid4_info *this_leaf;
307 unsigned long num_threads_sharing; 297 unsigned long num_threads_sharing;
@@ -334,7 +324,7 @@ static void free_cache_attributes(unsigned int cpu)
334 cpuid4_info[cpu] = NULL; 324 cpuid4_info[cpu] = NULL;
335} 325}
336 326
337static int __devinit detect_cache_attributes(unsigned int cpu) 327static int __cpuinit detect_cache_attributes(unsigned int cpu)
338{ 328{
339 struct _cpuid4_info *this_leaf; 329 struct _cpuid4_info *this_leaf;
340 unsigned long j; 330 unsigned long j;
@@ -511,7 +501,7 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
511 free_cache_attributes(cpu); 501 free_cache_attributes(cpu);
512} 502}
513 503
514static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu) 504static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
515{ 505{
516 506
517 if (num_cache_leaves == 0) 507 if (num_cache_leaves == 0)
@@ -542,7 +532,7 @@ err_out:
542} 532}
543 533
544/* Add/Remove cache interface for CPU device */ 534/* Add/Remove cache interface for CPU device */
545static int __devinit cache_add_dev(struct sys_device * sys_dev) 535static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
546{ 536{
547 unsigned int cpu = sys_dev->id; 537 unsigned int cpu = sys_dev->id;
548 unsigned long i, j; 538 unsigned long i, j;
@@ -579,7 +569,7 @@ static int __devinit cache_add_dev(struct sys_device * sys_dev)
579 return retval; 569 return retval;
580} 570}
581 571
582static int __devexit cache_remove_dev(struct sys_device * sys_dev) 572static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
583{ 573{
584 unsigned int cpu = sys_dev->id; 574 unsigned int cpu = sys_dev->id;
585 unsigned long i; 575 unsigned long i;
@@ -588,24 +578,49 @@ static int __devexit cache_remove_dev(struct sys_device * sys_dev)
588 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
589 kobject_unregister(cache_kobject[cpu]); 579 kobject_unregister(cache_kobject[cpu]);
590 cpuid4_cache_sysfs_exit(cpu); 580 cpuid4_cache_sysfs_exit(cpu);
591 return 0; 581 return;
582}
583
584static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
585 unsigned long action, void *hcpu)
586{
587 unsigned int cpu = (unsigned long)hcpu;
588 struct sys_device *sys_dev;
589
590 sys_dev = get_cpu_sysdev(cpu);
591 switch (action) {
592 case CPU_ONLINE:
593 cache_add_dev(sys_dev);
594 break;
595 case CPU_DEAD:
596 cache_remove_dev(sys_dev);
597 break;
598 }
599 return NOTIFY_OK;
592} 600}
593 601
594static struct sysdev_driver cache_sysdev_driver = { 602static struct notifier_block cacheinfo_cpu_notifier =
595 .add = cache_add_dev, 603{
596 .remove = __devexit_p(cache_remove_dev), 604 .notifier_call = cacheinfo_cpu_callback,
597}; 605};
598 606
599/* Register/Unregister the cpu_cache driver */ 607static int __cpuinit cache_sysfs_init(void)
600static int __devinit cache_register_driver(void)
601{ 608{
609 int i;
610
602 if (num_cache_leaves == 0) 611 if (num_cache_leaves == 0)
603 return 0; 612 return 0;
604 613
605 return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver); 614 register_cpu_notifier(&cacheinfo_cpu_notifier);
615
616 for_each_online_cpu(i) {
617 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
618 (void *)(long)i);
619 }
620
621 return 0;
606} 622}
607 623
608device_initcall(cache_register_driver); 624device_initcall(cache_sysfs_init);
609 625
610#endif 626#endif
611