aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/intel_cacheinfo.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-10-29 09:34:14 -0400
committerTejun Heo <tj@kernel.org>2009-10-29 09:34:14 -0400
commit0fe1e009541e925adc1748a605d8b66188e4b2ab (patch)
treee3c7238bcb865f14a288ffe6e9d37ea7dea1ec2a /arch/x86/kernel/cpu/intel_cacheinfo.c
parentc6e22f9e3e99cc221fe01a0cacf94a9da8a59c31 (diff)
percpu: make percpu symbols in x86 unique
This patch updates percpu related symbols in x86 such that percpu symbols are unique and don't clash with local symbols. This serves two purposes of decreasing the possibility of global percpu symbol collision and allowing dropping per_cpu__ prefix from percpu symbols. * arch/x86/kernel/cpu/common.c: rename local variable to avoid collision * arch/x86/kvm/svm.c: s/svm_data/sd/ for local variables to avoid collision * arch/x86/kernel/cpu/cpu_debug.c: s/cpu_arr/cpud_arr/ s/priv_arr/cpud_priv_arr/ s/cpu_priv_count/cpud_priv_count/ * arch/x86/kernel/cpu/intel_cacheinfo.c: s/cpuid4_info/ici_cpuid4_info/ s/cache_kobject/ici_cache_kobject/ s/index_kobject/ici_index_kobject/ * arch/x86/kernel/ds.c: s/cpu_context/cpu_ds_context/ Partly based on Rusty Russell's "alloc_percpu: rename percpu vars which cause name clashes" patch. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: (kvm) Avi Kivity <avi@redhat.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: x86@kernel.org
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c54
1 files changed, 27 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 804c40e2bc3e..f5ccb4fa5a5d 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -512,8 +512,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
512#ifdef CONFIG_SYSFS 512#ifdef CONFIG_SYSFS
513 513
514/* pointer to _cpuid4_info array (for each cache leaf) */ 514/* pointer to _cpuid4_info array (for each cache leaf) */
515static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 515static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
516#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 516#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
517 517
518#ifdef CONFIG_SMP 518#ifdef CONFIG_SMP
519static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 519static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -526,7 +526,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
526 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 526 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
527 struct cpuinfo_x86 *d; 527 struct cpuinfo_x86 *d;
528 for_each_online_cpu(i) { 528 for_each_online_cpu(i) {
529 if (!per_cpu(cpuid4_info, i)) 529 if (!per_cpu(ici_cpuid4_info, i))
530 continue; 530 continue;
531 d = &cpu_data(i); 531 d = &cpu_data(i);
532 this_leaf = CPUID4_INFO_IDX(i, index); 532 this_leaf = CPUID4_INFO_IDX(i, index);
@@ -548,7 +548,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
548 c->apicid >> index_msb) { 548 c->apicid >> index_msb) {
549 cpumask_set_cpu(i, 549 cpumask_set_cpu(i,
550 to_cpumask(this_leaf->shared_cpu_map)); 550 to_cpumask(this_leaf->shared_cpu_map));
551 if (i != cpu && per_cpu(cpuid4_info, i)) { 551 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
552 sibling_leaf = 552 sibling_leaf =
553 CPUID4_INFO_IDX(i, index); 553 CPUID4_INFO_IDX(i, index);
554 cpumask_set_cpu(cpu, to_cpumask( 554 cpumask_set_cpu(cpu, to_cpumask(
@@ -587,8 +587,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
587 for (i = 0; i < num_cache_leaves; i++) 587 for (i = 0; i < num_cache_leaves; i++)
588 cache_remove_shared_cpu_map(cpu, i); 588 cache_remove_shared_cpu_map(cpu, i);
589 589
590 kfree(per_cpu(cpuid4_info, cpu)); 590 kfree(per_cpu(ici_cpuid4_info, cpu));
591 per_cpu(cpuid4_info, cpu) = NULL; 591 per_cpu(ici_cpuid4_info, cpu) = NULL;
592} 592}
593 593
594static int 594static int
@@ -627,15 +627,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
627 if (num_cache_leaves == 0) 627 if (num_cache_leaves == 0)
628 return -ENOENT; 628 return -ENOENT;
629 629
630 per_cpu(cpuid4_info, cpu) = kzalloc( 630 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
631 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 631 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
632 if (per_cpu(cpuid4_info, cpu) == NULL) 632 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
633 return -ENOMEM; 633 return -ENOMEM;
634 634
635 smp_call_function_single(cpu, get_cpu_leaves, &retval, true); 635 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
636 if (retval) { 636 if (retval) {
637 kfree(per_cpu(cpuid4_info, cpu)); 637 kfree(per_cpu(ici_cpuid4_info, cpu));
638 per_cpu(cpuid4_info, cpu) = NULL; 638 per_cpu(ici_cpuid4_info, cpu) = NULL;
639 } 639 }
640 640
641 return retval; 641 return retval;
@@ -647,7 +647,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
647extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ 647extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
648 648
649/* pointer to kobject for cpuX/cache */ 649/* pointer to kobject for cpuX/cache */
650static DEFINE_PER_CPU(struct kobject *, cache_kobject); 650static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
651 651
652struct _index_kobject { 652struct _index_kobject {
653 struct kobject kobj; 653 struct kobject kobj;
@@ -656,8 +656,8 @@ struct _index_kobject {
656}; 656};
657 657
658/* pointer to array of kobjects for cpuX/cache/indexY */ 658/* pointer to array of kobjects for cpuX/cache/indexY */
659static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 659static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
660#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 660#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
661 661
662#define show_one_plus(file_name, object, val) \ 662#define show_one_plus(file_name, object, val) \
663static ssize_t show_##file_name \ 663static ssize_t show_##file_name \
@@ -876,10 +876,10 @@ static struct kobj_type ktype_percpu_entry = {
876 876
877static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 877static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
878{ 878{
879 kfree(per_cpu(cache_kobject, cpu)); 879 kfree(per_cpu(ici_cache_kobject, cpu));
880 kfree(per_cpu(index_kobject, cpu)); 880 kfree(per_cpu(ici_index_kobject, cpu));
881 per_cpu(cache_kobject, cpu) = NULL; 881 per_cpu(ici_cache_kobject, cpu) = NULL;
882 per_cpu(index_kobject, cpu) = NULL; 882 per_cpu(ici_index_kobject, cpu) = NULL;
883 free_cache_attributes(cpu); 883 free_cache_attributes(cpu);
884} 884}
885 885
@@ -895,14 +895,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
895 return err; 895 return err;
896 896
897 /* Allocate all required memory */ 897 /* Allocate all required memory */
898 per_cpu(cache_kobject, cpu) = 898 per_cpu(ici_cache_kobject, cpu) =
899 kzalloc(sizeof(struct kobject), GFP_KERNEL); 899 kzalloc(sizeof(struct kobject), GFP_KERNEL);
900 if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) 900 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
901 goto err_out; 901 goto err_out;
902 902
903 per_cpu(index_kobject, cpu) = kzalloc( 903 per_cpu(ici_index_kobject, cpu) = kzalloc(
904 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); 904 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
905 if (unlikely(per_cpu(index_kobject, cpu) == NULL)) 905 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
906 goto err_out; 906 goto err_out;
907 907
908 return 0; 908 return 0;
@@ -926,7 +926,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
926 if (unlikely(retval < 0)) 926 if (unlikely(retval < 0))
927 return retval; 927 return retval;
928 928
929 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), 929 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
930 &ktype_percpu_entry, 930 &ktype_percpu_entry,
931 &sys_dev->kobj, "%s", "cache"); 931 &sys_dev->kobj, "%s", "cache");
932 if (retval < 0) { 932 if (retval < 0) {
@@ -940,12 +940,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
940 this_object->index = i; 940 this_object->index = i;
941 retval = kobject_init_and_add(&(this_object->kobj), 941 retval = kobject_init_and_add(&(this_object->kobj),
942 &ktype_cache, 942 &ktype_cache,
943 per_cpu(cache_kobject, cpu), 943 per_cpu(ici_cache_kobject, cpu),
944 "index%1lu", i); 944 "index%1lu", i);
945 if (unlikely(retval)) { 945 if (unlikely(retval)) {
946 for (j = 0; j < i; j++) 946 for (j = 0; j < i; j++)
947 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); 947 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
948 kobject_put(per_cpu(cache_kobject, cpu)); 948 kobject_put(per_cpu(ici_cache_kobject, cpu));
949 cpuid4_cache_sysfs_exit(cpu); 949 cpuid4_cache_sysfs_exit(cpu);
950 return retval; 950 return retval;
951 } 951 }
@@ -953,7 +953,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
953 } 953 }
954 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); 954 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
955 955
956 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 956 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
957 return 0; 957 return 0;
958} 958}
959 959
@@ -962,7 +962,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
962 unsigned int cpu = sys_dev->id; 962 unsigned int cpu = sys_dev->id;
963 unsigned long i; 963 unsigned long i;
964 964
965 if (per_cpu(cpuid4_info, cpu) == NULL) 965 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
966 return; 966 return;
967 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) 967 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
968 return; 968 return;
@@ -970,7 +970,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
970 970
971 for (i = 0; i < num_cache_leaves; i++) 971 for (i = 0; i < num_cache_leaves; i++)
972 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); 972 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
973 kobject_put(per_cpu(cache_kobject, cpu)); 973 kobject_put(per_cpu(ici_cache_kobject, cpu));
974 cpuid4_cache_sysfs_exit(cpu); 974 cpuid4_cache_sysfs_exit(cpu);
975} 975}
976 976