aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
authorAshok Raj <ashok.raj@intel.com>2005-10-30 17:59:50 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 20:37:14 -0500
commit1aa1a9f98ffd06e288be4d85ed814c6cdbccce82 (patch)
tree3b387855f4e8415cfd83bf54a2900f2a43127b73 /arch/i386/kernel
parentad74557a49d1dea428fb0ad60e75a5aa37610e1d (diff)
[PATCH] create and destroy cache sysfs entries based on cpu notifiers
cpu cache entries should be populated only when cpu is online and removed when they are logically offlined. Without which entries are not removed when cpu is offlined, or dont appear when we boot with maxcpus=1 and then kick the rest of the cpus via echo 1 to the sysfs online file. - Changed __devinit to __cpuinit for consistency. - Changed sysfs_driver_register to register_cpu_notifier. Signed-off-by: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Cc: Dave Jones <davej@codemonkey.org.uk> Cc: Zwane Mwaikambo <zwane@holomorphy.com> Cc: Greg KH <greg@kroah.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c60
1 files changed, 43 insertions, 17 deletions
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 7cc84a4a6df9..f0839334881c 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
6 */ 7 */
7 8
8#include <linux/init.h> 9#include <linux/init.h>
@@ -28,7 +29,7 @@ struct _cache_table
28}; 29};
29 30
30/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 31/* all the cache descriptor types we care about (no TLB or trace cache entries) */
31static struct _cache_table cache_table[] __devinitdata = 32static struct _cache_table cache_table[] __cpuinitdata =
32{ 33{
33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 34 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 35 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -119,7 +120,7 @@ struct _cpuid4_info {
119 120
120static unsigned short num_cache_leaves; 121static unsigned short num_cache_leaves;
121 122
122static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 123static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
123{ 124{
124 unsigned int eax, ebx, ecx, edx; 125 unsigned int eax, ebx, ecx, edx;
125 union _cpuid4_leaf_eax cache_eax; 126 union _cpuid4_leaf_eax cache_eax;
@@ -154,7 +155,7 @@ static int __init find_num_cache_leaves(void)
154 return i; 155 return i;
155} 156}
156 157
157unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 158unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
158{ 159{
159 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 160 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
160 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 161 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
@@ -289,7 +290,7 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
289#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) 290#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
290 291
291#ifdef CONFIG_SMP 292#ifdef CONFIG_SMP
292static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 293static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
293{ 294{
294 struct _cpuid4_info *this_leaf; 295 struct _cpuid4_info *this_leaf;
295 unsigned long num_threads_sharing; 296 unsigned long num_threads_sharing;
@@ -322,7 +323,7 @@ static void free_cache_attributes(unsigned int cpu)
322 cpuid4_info[cpu] = NULL; 323 cpuid4_info[cpu] = NULL;
323} 324}
324 325
325static int __devinit detect_cache_attributes(unsigned int cpu) 326static int __cpuinit detect_cache_attributes(unsigned int cpu)
326{ 327{
327 struct _cpuid4_info *this_leaf; 328 struct _cpuid4_info *this_leaf;
328 unsigned long j; 329 unsigned long j;
@@ -499,7 +500,7 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
499 free_cache_attributes(cpu); 500 free_cache_attributes(cpu);
500} 501}
501 502
502static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu) 503static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
503{ 504{
504 505
505 if (num_cache_leaves == 0) 506 if (num_cache_leaves == 0)
@@ -530,7 +531,7 @@ err_out:
530} 531}
531 532
532/* Add/Remove cache interface for CPU device */ 533/* Add/Remove cache interface for CPU device */
533static int __devinit cache_add_dev(struct sys_device * sys_dev) 534static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
534{ 535{
535 unsigned int cpu = sys_dev->id; 536 unsigned int cpu = sys_dev->id;
536 unsigned long i, j; 537 unsigned long i, j;
@@ -567,7 +568,7 @@ static int __devinit cache_add_dev(struct sys_device * sys_dev)
567 return retval; 568 return retval;
568} 569}
569 570
570static int __devexit cache_remove_dev(struct sys_device * sys_dev) 571static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
571{ 572{
572 unsigned int cpu = sys_dev->id; 573 unsigned int cpu = sys_dev->id;
573 unsigned long i; 574 unsigned long i;
@@ -576,24 +577,49 @@ static int __devexit cache_remove_dev(struct sys_device * sys_dev)
576 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 577 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
577 kobject_unregister(cache_kobject[cpu]); 578 kobject_unregister(cache_kobject[cpu]);
578 cpuid4_cache_sysfs_exit(cpu); 579 cpuid4_cache_sysfs_exit(cpu);
579 return 0; 580 return;
581}
582
583static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
584 unsigned long action, void *hcpu)
585{
586 unsigned int cpu = (unsigned long)hcpu;
587 struct sys_device *sys_dev;
588
589 sys_dev = get_cpu_sysdev(cpu);
590 switch (action) {
591 case CPU_ONLINE:
592 cache_add_dev(sys_dev);
593 break;
594 case CPU_DEAD:
595 cache_remove_dev(sys_dev);
596 break;
597 }
598 return NOTIFY_OK;
580} 599}
581 600
582static struct sysdev_driver cache_sysdev_driver = { 601static struct notifier_block cacheinfo_cpu_notifier =
583 .add = cache_add_dev, 602{
584 .remove = __devexit_p(cache_remove_dev), 603 .notifier_call = cacheinfo_cpu_callback,
585}; 604};
586 605
587/* Register/Unregister the cpu_cache driver */ 606static int __cpuinit cache_sysfs_init(void)
588static int __devinit cache_register_driver(void)
589{ 607{
608 int i;
609
590 if (num_cache_leaves == 0) 610 if (num_cache_leaves == 0)
591 return 0; 611 return 0;
592 612
593 return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver); 613 register_cpu_notifier(&cacheinfo_cpu_notifier);
614
615 for_each_online_cpu(i) {
616 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
617 (void *)(long)i);
618 }
619
620 return 0;
594} 621}
595 622
596device_initcall(cache_register_driver); 623device_initcall(cache_sysfs_init);
597 624
598#endif 625#endif
599