diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpu_debug.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 28 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 54 |
4 files changed, 60 insertions, 60 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c1afa990a6c8..20399b7b0c3f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void) | |||
1093 | 1093 | ||
1094 | void __cpuinit cpu_init(void) | 1094 | void __cpuinit cpu_init(void) |
1095 | { | 1095 | { |
1096 | struct orig_ist *orig_ist; | 1096 | struct orig_ist *oist; |
1097 | struct task_struct *me; | 1097 | struct task_struct *me; |
1098 | struct tss_struct *t; | 1098 | struct tss_struct *t; |
1099 | unsigned long v; | 1099 | unsigned long v; |
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void) | |||
1102 | 1102 | ||
1103 | cpu = stack_smp_processor_id(); | 1103 | cpu = stack_smp_processor_id(); |
1104 | t = &per_cpu(init_tss, cpu); | 1104 | t = &per_cpu(init_tss, cpu); |
1105 | orig_ist = &per_cpu(orig_ist, cpu); | 1105 | oist = &per_cpu(orig_ist, cpu); |
1106 | 1106 | ||
1107 | #ifdef CONFIG_NUMA | 1107 | #ifdef CONFIG_NUMA |
1108 | if (cpu != 0 && percpu_read(node_number) == 0 && | 1108 | if (cpu != 0 && percpu_read(node_number) == 0 && |
@@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void) | |||
1143 | /* | 1143 | /* |
1144 | * set up and load the per-CPU TSS | 1144 | * set up and load the per-CPU TSS |
1145 | */ | 1145 | */ |
1146 | if (!orig_ist->ist[0]) { | 1146 | if (!oist->ist[0]) { |
1147 | char *estacks = per_cpu(exception_stacks, cpu); | 1147 | char *estacks = per_cpu(exception_stacks, cpu); |
1148 | 1148 | ||
1149 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1149 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
1150 | estacks += exception_stack_sizes[v]; | 1150 | estacks += exception_stack_sizes[v]; |
1151 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1151 | oist->ist[v] = t->x86_tss.ist[v] = |
1152 | (unsigned long)estacks; | 1152 | (unsigned long)estacks; |
1153 | } | 1153 | } |
1154 | } | 1154 | } |
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index dca325c03999..b368cd862997 100644 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
@@ -30,9 +30,9 @@ | |||
30 | #include <asm/apic.h> | 30 | #include <asm/apic.h> |
31 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
32 | 32 | ||
33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); | 33 | static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr); |
34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); | 34 | static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr); |
35 | static DEFINE_PER_CPU(int, cpu_priv_count); | 35 | static DEFINE_PER_CPU(int, cpud_priv_count); |
36 | 36 | ||
37 | static DEFINE_MUTEX(cpu_debug_lock); | 37 | static DEFINE_MUTEX(cpu_debug_lock); |
38 | 38 | ||
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
531 | 531 | ||
532 | /* Already intialized */ | 532 | /* Already intialized */ |
533 | if (file == CPU_INDEX_BIT) | 533 | if (file == CPU_INDEX_BIT) |
534 | if (per_cpu(cpu_arr[type].init, cpu)) | 534 | if (per_cpu(cpud_arr[type].init, cpu)) |
535 | return 0; | 535 | return 0; |
536 | 536 | ||
537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 537 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
543 | priv->reg = reg; | 543 | priv->reg = reg; |
544 | priv->file = file; | 544 | priv->file = file; |
545 | mutex_lock(&cpu_debug_lock); | 545 | mutex_lock(&cpu_debug_lock); |
546 | per_cpu(priv_arr[type], cpu) = priv; | 546 | per_cpu(cpud_priv_arr[type], cpu) = priv; |
547 | per_cpu(cpu_priv_count, cpu)++; | 547 | per_cpu(cpud_priv_count, cpu)++; |
548 | mutex_unlock(&cpu_debug_lock); | 548 | mutex_unlock(&cpu_debug_lock); |
549 | 549 | ||
550 | if (file) | 550 | if (file) |
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | |||
552 | dentry, (void *)priv, &cpu_fops); | 552 | dentry, (void *)priv, &cpu_fops); |
553 | else { | 553 | else { |
554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, | 554 | debugfs_create_file(cpu_base[type].name, S_IRUGO, |
555 | per_cpu(cpu_arr[type].dentry, cpu), | 555 | per_cpu(cpud_arr[type].dentry, cpu), |
556 | (void *)priv, &cpu_fops); | 556 | (void *)priv, &cpu_fops); |
557 | mutex_lock(&cpu_debug_lock); | 557 | mutex_lock(&cpu_debug_lock); |
558 | per_cpu(cpu_arr[type].init, cpu) = 1; | 558 | per_cpu(cpud_arr[type].init, cpu) = 1; |
559 | mutex_unlock(&cpu_debug_lock); | 559 | mutex_unlock(&cpu_debug_lock); |
560 | } | 560 | } |
561 | 561 | ||
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) | |||
615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) | 615 | if (!is_typeflag_valid(cpu, cpu_base[type].flag)) |
616 | continue; | 616 | continue; |
617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); | 617 | cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); |
618 | per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; | 618 | per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry; |
619 | 619 | ||
620 | if (type < CPU_TSS_BIT) | 620 | if (type < CPU_TSS_BIT) |
621 | err = cpu_init_msr(cpu, type, cpu_dentry); | 621 | err = cpu_init_msr(cpu, type, cpu_dentry); |
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void) | |||
647 | err = cpu_init_allreg(cpu, cpu_dentry); | 647 | err = cpu_init_allreg(cpu, cpu_dentry); |
648 | 648 | ||
649 | pr_info("cpu%d(%d) debug files %d\n", | 649 | pr_info("cpu%d(%d) debug files %d\n", |
650 | cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); | 650 | cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu)); |
651 | if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { | 651 | if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) { |
652 | pr_err("Register files count %d exceeds limit %d\n", | 652 | pr_err("Register files count %d exceeds limit %d\n", |
653 | per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); | 653 | per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES); |
654 | per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; | 654 | per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES; |
655 | err = -ENFILE; | 655 | err = -ENFILE; |
656 | } | 656 | } |
657 | if (err) | 657 | if (err) |
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void) | |||
676 | debugfs_remove_recursive(cpu_debugfs_dir); | 676 | debugfs_remove_recursive(cpu_debugfs_dir); |
677 | 677 | ||
678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) | 678 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
679 | for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) | 679 | for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++) |
680 | kfree(per_cpu(priv_arr[i], cpu)); | 680 | kfree(per_cpu(cpud_priv_arr[i], cpu)); |
681 | } | 681 | } |
682 | 682 | ||
683 | module_init(cpu_debug_init); | 683 | module_init(cpu_debug_init); |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index d2e7c77c1ea4..f28decf8dde3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data { | |||
68 | unsigned int cpu_feature; | 68 | unsigned int cpu_feature; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); | 71 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); |
72 | 72 | ||
73 | static DEFINE_PER_CPU(struct aperfmperf, old_perf); | 73 | static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); |
74 | 74 | ||
75 | /* acpi_perf_data is a pointer to percpu data. */ | 75 | /* acpi_perf_data is a pointer to percpu data. */ |
76 | static struct acpi_processor_performance *acpi_perf_data; | 76 | static struct acpi_processor_performance *acpi_perf_data; |
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
214 | if (unlikely(cpumask_empty(mask))) | 214 | if (unlikely(cpumask_empty(mask))) |
215 | return 0; | 215 | return 0; |
216 | 216 | ||
217 | switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { | 217 | switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { |
218 | case SYSTEM_INTEL_MSR_CAPABLE: | 218 | case SYSTEM_INTEL_MSR_CAPABLE: |
219 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | 219 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
220 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; | 220 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; |
221 | break; | 221 | break; |
222 | case SYSTEM_IO_CAPABLE: | 222 | case SYSTEM_IO_CAPABLE: |
223 | cmd.type = SYSTEM_IO_CAPABLE; | 223 | cmd.type = SYSTEM_IO_CAPABLE; |
224 | perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; | 224 | perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; |
225 | cmd.addr.io.port = perf->control_register.address; | 225 | cmd.addr.io.port = perf->control_register.address; |
226 | cmd.addr.io.bit_width = perf->control_register.bit_width; | 226 | cmd.addr.io.bit_width = perf->control_register.bit_width; |
227 | break; | 227 | break; |
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
268 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) | 268 | if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) |
269 | return 0; | 269 | return 0; |
270 | 270 | ||
271 | ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf); | 271 | ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); |
272 | per_cpu(old_perf, cpu) = perf; | 272 | per_cpu(acfreq_old_perf, cpu) = perf; |
273 | 273 | ||
274 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; | 274 | retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; |
275 | 275 | ||
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, | |||
278 | 278 | ||
279 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 279 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
280 | { | 280 | { |
281 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 281 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); |
282 | unsigned int freq; | 282 | unsigned int freq; |
283 | unsigned int cached_freq; | 283 | unsigned int cached_freq; |
284 | 284 | ||
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, | |||
322 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | 322 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
323 | unsigned int target_freq, unsigned int relation) | 323 | unsigned int target_freq, unsigned int relation) |
324 | { | 324 | { |
325 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 325 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
326 | struct acpi_processor_performance *perf; | 326 | struct acpi_processor_performance *perf; |
327 | struct cpufreq_freqs freqs; | 327 | struct cpufreq_freqs freqs; |
328 | struct drv_cmd cmd; | 328 | struct drv_cmd cmd; |
@@ -416,7 +416,7 @@ out: | |||
416 | 416 | ||
417 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | 417 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) |
418 | { | 418 | { |
419 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 419 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
420 | 420 | ||
421 | dprintk("acpi_cpufreq_verify\n"); | 421 | dprintk("acpi_cpufreq_verify\n"); |
422 | 422 | ||
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
574 | return -ENOMEM; | 574 | return -ENOMEM; |
575 | 575 | ||
576 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); | 576 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
577 | per_cpu(drv_data, cpu) = data; | 577 | per_cpu(acfreq_data, cpu) = data; |
578 | 578 | ||
579 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 579 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
580 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; | 580 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; |
@@ -725,20 +725,20 @@ err_unreg: | |||
725 | acpi_processor_unregister_performance(perf, cpu); | 725 | acpi_processor_unregister_performance(perf, cpu); |
726 | err_free: | 726 | err_free: |
727 | kfree(data); | 727 | kfree(data); |
728 | per_cpu(drv_data, cpu) = NULL; | 728 | per_cpu(acfreq_data, cpu) = NULL; |
729 | 729 | ||
730 | return result; | 730 | return result; |
731 | } | 731 | } |
732 | 732 | ||
733 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | 733 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
734 | { | 734 | { |
735 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 735 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
736 | 736 | ||
737 | dprintk("acpi_cpufreq_cpu_exit\n"); | 737 | dprintk("acpi_cpufreq_cpu_exit\n"); |
738 | 738 | ||
739 | if (data) { | 739 | if (data) { |
740 | cpufreq_frequency_table_put_attr(policy->cpu); | 740 | cpufreq_frequency_table_put_attr(policy->cpu); |
741 | per_cpu(drv_data, policy->cpu) = NULL; | 741 | per_cpu(acfreq_data, policy->cpu) = NULL; |
742 | acpi_processor_unregister_performance(data->acpi_data, | 742 | acpi_processor_unregister_performance(data->acpi_data, |
743 | policy->cpu); | 743 | policy->cpu); |
744 | kfree(data); | 744 | kfree(data); |
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
749 | 749 | ||
750 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) | 750 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) |
751 | { | 751 | { |
752 | struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); | 752 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
753 | 753 | ||
754 | dprintk("acpi_cpufreq_resume\n"); | 754 | dprintk("acpi_cpufreq_resume\n"); |
755 | 755 | ||
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 6c40f6b5b340..0c06bca2a1dc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
499 | #ifdef CONFIG_SYSFS | 499 | #ifdef CONFIG_SYSFS |
500 | 500 | ||
501 | /* pointer to _cpuid4_info array (for each cache leaf) */ | 501 | /* pointer to _cpuid4_info array (for each cache leaf) */ |
502 | static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); | 502 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); |
503 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) | 503 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) |
504 | 504 | ||
505 | #ifdef CONFIG_SMP | 505 | #ifdef CONFIG_SMP |
506 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 506 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) |
@@ -513,7 +513,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { | 513 | if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { |
514 | struct cpuinfo_x86 *d; | 514 | struct cpuinfo_x86 *d; |
515 | for_each_online_cpu(i) { | 515 | for_each_online_cpu(i) { |
516 | if (!per_cpu(cpuid4_info, i)) | 516 | if (!per_cpu(ici_cpuid4_info, i)) |
517 | continue; | 517 | continue; |
518 | d = &cpu_data(i); | 518 | d = &cpu_data(i); |
519 | this_leaf = CPUID4_INFO_IDX(i, index); | 519 | this_leaf = CPUID4_INFO_IDX(i, index); |
@@ -535,7 +535,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
535 | c->apicid >> index_msb) { | 535 | c->apicid >> index_msb) { |
536 | cpumask_set_cpu(i, | 536 | cpumask_set_cpu(i, |
537 | to_cpumask(this_leaf->shared_cpu_map)); | 537 | to_cpumask(this_leaf->shared_cpu_map)); |
538 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 538 | if (i != cpu && per_cpu(ici_cpuid4_info, i)) { |
539 | sibling_leaf = | 539 | sibling_leaf = |
540 | CPUID4_INFO_IDX(i, index); | 540 | CPUID4_INFO_IDX(i, index); |
541 | cpumask_set_cpu(cpu, to_cpumask( | 541 | cpumask_set_cpu(cpu, to_cpumask( |
@@ -574,8 +574,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
574 | for (i = 0; i < num_cache_leaves; i++) | 574 | for (i = 0; i < num_cache_leaves; i++) |
575 | cache_remove_shared_cpu_map(cpu, i); | 575 | cache_remove_shared_cpu_map(cpu, i); |
576 | 576 | ||
577 | kfree(per_cpu(cpuid4_info, cpu)); | 577 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
578 | per_cpu(cpuid4_info, cpu) = NULL; | 578 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int | 581 | static int |
@@ -614,15 +614,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
614 | if (num_cache_leaves == 0) | 614 | if (num_cache_leaves == 0) |
615 | return -ENOENT; | 615 | return -ENOENT; |
616 | 616 | ||
617 | per_cpu(cpuid4_info, cpu) = kzalloc( | 617 | per_cpu(ici_cpuid4_info, cpu) = kzalloc( |
618 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | 618 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); |
619 | if (per_cpu(cpuid4_info, cpu) == NULL) | 619 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) |
620 | return -ENOMEM; | 620 | return -ENOMEM; |
621 | 621 | ||
622 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); | 622 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); |
623 | if (retval) { | 623 | if (retval) { |
624 | kfree(per_cpu(cpuid4_info, cpu)); | 624 | kfree(per_cpu(ici_cpuid4_info, cpu)); |
625 | per_cpu(cpuid4_info, cpu) = NULL; | 625 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
626 | } | 626 | } |
627 | 627 | ||
628 | return retval; | 628 | return retval; |
@@ -634,7 +634,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
634 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ | 634 | extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ |
635 | 635 | ||
636 | /* pointer to kobject for cpuX/cache */ | 636 | /* pointer to kobject for cpuX/cache */ |
637 | static DEFINE_PER_CPU(struct kobject *, cache_kobject); | 637 | static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); |
638 | 638 | ||
639 | struct _index_kobject { | 639 | struct _index_kobject { |
640 | struct kobject kobj; | 640 | struct kobject kobj; |
@@ -643,8 +643,8 @@ struct _index_kobject { | |||
643 | }; | 643 | }; |
644 | 644 | ||
645 | /* pointer to array of kobjects for cpuX/cache/indexY */ | 645 | /* pointer to array of kobjects for cpuX/cache/indexY */ |
646 | static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); | 646 | static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); |
647 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) | 647 | #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) |
648 | 648 | ||
649 | #define show_one_plus(file_name, object, val) \ | 649 | #define show_one_plus(file_name, object, val) \ |
650 | static ssize_t show_##file_name \ | 650 | static ssize_t show_##file_name \ |
@@ -863,10 +863,10 @@ static struct kobj_type ktype_percpu_entry = { | |||
863 | 863 | ||
864 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | 864 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) |
865 | { | 865 | { |
866 | kfree(per_cpu(cache_kobject, cpu)); | 866 | kfree(per_cpu(ici_cache_kobject, cpu)); |
867 | kfree(per_cpu(index_kobject, cpu)); | 867 | kfree(per_cpu(ici_index_kobject, cpu)); |
868 | per_cpu(cache_kobject, cpu) = NULL; | 868 | per_cpu(ici_cache_kobject, cpu) = NULL; |
869 | per_cpu(index_kobject, cpu) = NULL; | 869 | per_cpu(ici_index_kobject, cpu) = NULL; |
870 | free_cache_attributes(cpu); | 870 | free_cache_attributes(cpu); |
871 | } | 871 | } |
872 | 872 | ||
@@ -882,14 +882,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | |||
882 | return err; | 882 | return err; |
883 | 883 | ||
884 | /* Allocate all required memory */ | 884 | /* Allocate all required memory */ |
885 | per_cpu(cache_kobject, cpu) = | 885 | per_cpu(ici_cache_kobject, cpu) = |
886 | kzalloc(sizeof(struct kobject), GFP_KERNEL); | 886 | kzalloc(sizeof(struct kobject), GFP_KERNEL); |
887 | if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) | 887 | if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) |
888 | goto err_out; | 888 | goto err_out; |
889 | 889 | ||
890 | per_cpu(index_kobject, cpu) = kzalloc( | 890 | per_cpu(ici_index_kobject, cpu) = kzalloc( |
891 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); | 891 | sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); |
892 | if (unlikely(per_cpu(index_kobject, cpu) == NULL)) | 892 | if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) |
893 | goto err_out; | 893 | goto err_out; |
894 | 894 | ||
895 | return 0; | 895 | return 0; |
@@ -913,7 +913,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
913 | if (unlikely(retval < 0)) | 913 | if (unlikely(retval < 0)) |
914 | return retval; | 914 | return retval; |
915 | 915 | ||
916 | retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), | 916 | retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), |
917 | &ktype_percpu_entry, | 917 | &ktype_percpu_entry, |
918 | &sys_dev->kobj, "%s", "cache"); | 918 | &sys_dev->kobj, "%s", "cache"); |
919 | if (retval < 0) { | 919 | if (retval < 0) { |
@@ -927,12 +927,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
927 | this_object->index = i; | 927 | this_object->index = i; |
928 | retval = kobject_init_and_add(&(this_object->kobj), | 928 | retval = kobject_init_and_add(&(this_object->kobj), |
929 | &ktype_cache, | 929 | &ktype_cache, |
930 | per_cpu(cache_kobject, cpu), | 930 | per_cpu(ici_cache_kobject, cpu), |
931 | "index%1lu", i); | 931 | "index%1lu", i); |
932 | if (unlikely(retval)) { | 932 | if (unlikely(retval)) { |
933 | for (j = 0; j < i; j++) | 933 | for (j = 0; j < i; j++) |
934 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); | 934 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); |
935 | kobject_put(per_cpu(cache_kobject, cpu)); | 935 | kobject_put(per_cpu(ici_cache_kobject, cpu)); |
936 | cpuid4_cache_sysfs_exit(cpu); | 936 | cpuid4_cache_sysfs_exit(cpu); |
937 | return retval; | 937 | return retval; |
938 | } | 938 | } |
@@ -940,7 +940,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
940 | } | 940 | } |
941 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); | 941 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
942 | 942 | ||
943 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 943 | kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); |
944 | return 0; | 944 | return 0; |
945 | } | 945 | } |
946 | 946 | ||
@@ -949,7 +949,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
949 | unsigned int cpu = sys_dev->id; | 949 | unsigned int cpu = sys_dev->id; |
950 | unsigned long i; | 950 | unsigned long i; |
951 | 951 | ||
952 | if (per_cpu(cpuid4_info, cpu) == NULL) | 952 | if (per_cpu(ici_cpuid4_info, cpu) == NULL) |
953 | return; | 953 | return; |
954 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) | 954 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
955 | return; | 955 | return; |
@@ -957,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
957 | 957 | ||
958 | for (i = 0; i < num_cache_leaves; i++) | 958 | for (i = 0; i < num_cache_leaves; i++) |
959 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); | 959 | kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); |
960 | kobject_put(per_cpu(cache_kobject, cpu)); | 960 | kobject_put(per_cpu(ici_cache_kobject, cpu)); |
961 | cpuid4_cache_sysfs_exit(cpu); | 961 | cpuid4_cache_sysfs_exit(cpu); |
962 | } | 962 | } |
963 | 963 | ||