aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c30
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c54
-rw-r--r--arch/x86/kernel/ds.c4
4 files changed, 48 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cc25c2b4a567..3192f22f2fdd 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void)
1093 1093
1094void __cpuinit cpu_init(void) 1094void __cpuinit cpu_init(void)
1095{ 1095{
1096 struct orig_ist *orig_ist; 1096 struct orig_ist *oist;
1097 struct task_struct *me; 1097 struct task_struct *me;
1098 struct tss_struct *t; 1098 struct tss_struct *t;
1099 unsigned long v; 1099 unsigned long v;
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void)
1102 1102
1103 cpu = stack_smp_processor_id(); 1103 cpu = stack_smp_processor_id();
1104 t = &per_cpu(init_tss, cpu); 1104 t = &per_cpu(init_tss, cpu);
1105 orig_ist = &per_cpu(orig_ist, cpu); 1105 oist = &per_cpu(orig_ist, cpu);
1106 1106
1107#ifdef CONFIG_NUMA 1107#ifdef CONFIG_NUMA
1108 if (cpu != 0 && percpu_read(node_number) == 0 && 1108 if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void)
1143 /* 1143 /*
1144 * set up and load the per-CPU TSS 1144 * set up and load the per-CPU TSS
1145 */ 1145 */
1146 if (!orig_ist->ist[0]) { 1146 if (!oist->ist[0]) {
1147 char *estacks = per_cpu(exception_stacks, cpu); 1147 char *estacks = per_cpu(exception_stacks, cpu);
1148 1148
1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1150 estacks += exception_stack_sizes[v]; 1150 estacks += exception_stack_sizes[v];
1151 orig_ist->ist[v] = t->x86_tss.ist[v] = 1151 oist->ist[v] = t->x86_tss.ist[v] =
1152 (unsigned long)estacks; 1152 (unsigned long)estacks;
1153 } 1153 }
1154 } 1154 }
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index dca325c03999..b368cd862997 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,9 +30,9 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpud_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
38 38
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
531 531
532 /* Already intialized */ 532 /* Already intialized */
533 if (file == CPU_INDEX_BIT) 533 if (file == CPU_INDEX_BIT)
534 if (per_cpu(cpu_arr[type].init, cpu)) 534 if (per_cpu(cpud_arr[type].init, cpu))
535 return 0; 535 return 0;
536 536
537 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 537 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
543 priv->reg = reg; 543 priv->reg = reg;
544 priv->file = file; 544 priv->file = file;
545 mutex_lock(&cpu_debug_lock); 545 mutex_lock(&cpu_debug_lock);
546 per_cpu(priv_arr[type], cpu) = priv; 546 per_cpu(cpud_priv_arr[type], cpu) = priv;
547 per_cpu(cpu_priv_count, cpu)++; 547 per_cpu(cpud_priv_count, cpu)++;
548 mutex_unlock(&cpu_debug_lock); 548 mutex_unlock(&cpu_debug_lock);
549 549
550 if (file) 550 if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
552 dentry, (void *)priv, &cpu_fops); 552 dentry, (void *)priv, &cpu_fops);
553 else { 553 else {
554 debugfs_create_file(cpu_base[type].name, S_IRUGO, 554 debugfs_create_file(cpu_base[type].name, S_IRUGO,
555 per_cpu(cpu_arr[type].dentry, cpu), 555 per_cpu(cpud_arr[type].dentry, cpu),
556 (void *)priv, &cpu_fops); 556 (void *)priv, &cpu_fops);
557 mutex_lock(&cpu_debug_lock); 557 mutex_lock(&cpu_debug_lock);
558 per_cpu(cpu_arr[type].init, cpu) = 1; 558 per_cpu(cpud_arr[type].init, cpu) = 1;
559 mutex_unlock(&cpu_debug_lock); 559 mutex_unlock(&cpu_debug_lock);
560 } 560 }
561 561
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
615 if (!is_typeflag_valid(cpu, cpu_base[type].flag)) 615 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
616 continue; 616 continue;
617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); 617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
618 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; 618 per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
619 619
620 if (type < CPU_TSS_BIT) 620 if (type < CPU_TSS_BIT)
621 err = cpu_init_msr(cpu, type, cpu_dentry); 621 err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
647 err = cpu_init_allreg(cpu, cpu_dentry); 647 err = cpu_init_allreg(cpu, cpu_dentry);
648 648
649 pr_info("cpu%d(%d) debug files %d\n", 649 pr_info("cpu%d(%d) debug files %d\n",
650 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); 650 cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
651 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { 651 if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
652 pr_err("Register files count %d exceeds limit %d\n", 652 pr_err("Register files count %d exceeds limit %d\n",
653 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); 653 per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
654 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; 654 per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
655 err = -ENFILE; 655 err = -ENFILE;
656 } 656 }
657 if (err) 657 if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
676 debugfs_remove_recursive(cpu_debugfs_dir); 676 debugfs_remove_recursive(cpu_debugfs_dir);
677 677
678 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 678 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
679 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) 679 for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
680 kfree(per_cpu(priv_arr[i], cpu)); 680 kfree(per_cpu(cpud_priv_arr[i], cpu));
681} 681}
682 682
683module_init(cpu_debug_init); 683module_init(cpu_debug_init);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 804c40e2bc3e..f5ccb4fa5a5d 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -512,8 +512,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
512#ifdef CONFIG_SYSFS 512#ifdef CONFIG_SYSFS
513 513
514/* pointer to _cpuid4_info array (for each cache leaf) */ 514/* pointer to _cpuid4_info array (for each cache leaf) */
515static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 515static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
516#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 516#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
517 517
518#ifdef CONFIG_SMP 518#ifdef CONFIG_SMP
519static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 519static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -526,7 +526,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
526 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 526 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
527 struct cpuinfo_x86 *d; 527 struct cpuinfo_x86 *d;
528 for_each_online_cpu(i) { 528 for_each_online_cpu(i) {
529 if (!per_cpu(cpuid4_info, i)) 529 if (!per_cpu(ici_cpuid4_info, i))
530 continue; 530 continue;
531 d = &cpu_data(i); 531 d = &cpu_data(i);
532 this_leaf = CPUID4_INFO_IDX(i, index); 532 this_leaf = CPUID4_INFO_IDX(i, index);
@@ -548,7 +548,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
548 c->apicid >> index_msb) { 548 c->apicid >> index_msb) {
549 cpumask_set_cpu(i, 549 cpumask_set_cpu(i,
550 to_cpumask(this_leaf->shared_cpu_map)); 550 to_cpumask(this_leaf->shared_cpu_map));
551 if (i != cpu && per_cpu(cpuid4_info, i)) { 551 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
552 sibling_leaf = 552 sibling_leaf =
553 CPUID4_INFO_IDX(i, index); 553 CPUID4_INFO_IDX(i, index);
554 cpumask_set_cpu(cpu, to_cpumask( 554 cpumask_set_cpu(cpu, to_cpumask(
@@ -587,8 +587,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
587 for (i = 0; i < num_cache_leaves; i++) 587 for (i = 0; i < num_cache_leaves; i++)
588 cache_remove_shared_cpu_map(cpu, i); 588 cache_remove_shared_cpu_map(cpu, i);
589 589
590 kfree(per_cpu(cpuid4_info, cpu)); 590 kfree(per_cpu(ici_cpuid4_info, cpu));
591 per_cpu(cpuid4_info, cpu) = NULL; 591 per_cpu(ici_cpuid4_info, cpu) = NULL;
592} 592}
593 593
594static int 594static int
@@ -627,15 +627,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
627 if (num_cache_leaves == 0) 627 if (num_cache_leaves == 0)
628 return -ENOENT; 628 return -ENOENT;
629 629
630 per_cpu(cpuid4_info, cpu) = kzalloc( 630 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
631 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 631 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
632 if (per_cpu(cpuid4_info, cpu) == NULL) 632 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
633 return -ENOMEM; 633 return -ENOMEM;
634 634
635 smp_call_function_single(cpu, get_cpu_leaves, &retval, true); 635 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
636 if (retval) { 636 if (retval) {
637 kfree(per_cpu(cpuid4_info, cpu)); 637 kfree(per_cpu(ici_cpuid4_info, cpu));
638 per_cpu(cpuid4_info, cpu) = NULL; 638 per_cpu(ici_cpuid4_info, cpu) = NULL;
639 } 639 }
640 640
641 return retval; 641 return retval;
@@ -647,7 +647,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
647extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ 647extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
648 648
649/* pointer to kobject for cpuX/cache */ 649/* pointer to kobject for cpuX/cache */
650static DEFINE_PER_CPU(struct kobject *, cache_kobject); 650static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
651 651
652struct _index_kobject { 652struct _index_kobject {
653 struct kobject kobj; 653 struct kobject kobj;
@@ -656,8 +656,8 @@ struct _index_kobject {
656}; 656};
657 657
658/* pointer to array of kobjects for cpuX/cache/indexY */ 658/* pointer to array of kobjects for cpuX/cache/indexY */
659static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 659static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
660#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 660#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
661 661
662#define show_one_plus(file_name, object, val) \ 662#define show_one_plus(file_name, object, val) \
663static ssize_t show_##file_name \ 663static ssize_t show_##file_name \
@@ -876,10 +876,10 @@ static struct kobj_type ktype_percpu_entry = {
876 876
877static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 877static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
878{ 878{
879 kfree(per_cpu(cache_kobject, cpu)); 879 kfree(per_cpu(ici_cache_kobject, cpu));
880 kfree(per_cpu(index_kobject, cpu)); 880 kfree(per_cpu(ici_index_kobject, cpu));
881 per_cpu(cache_kobject, cpu) = NULL; 881 per_cpu(ici_cache_kobject, cpu) = NULL;
882 per_cpu(index_kobject, cpu) = NULL; 882 per_cpu(ici_index_kobject, cpu) = NULL;
883 free_cache_attributes(cpu); 883 free_cache_attributes(cpu);
884} 884}
885 885
@@ -895,14 +895,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
895 return err; 895 return err;
896 896
897 /* Allocate all required memory */ 897 /* Allocate all required memory */
898 per_cpu(cache_kobject, cpu) = 898 per_cpu(ici_cache_kobject, cpu) =
899 kzalloc(sizeof(struct kobject), GFP_KERNEL); 899 kzalloc(sizeof(struct kobject), GFP_KERNEL);
900 if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) 900 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
901 goto err_out; 901 goto err_out;
902 902
903 per_cpu(index_kobject, cpu) = kzalloc( 903 per_cpu(ici_index_kobject, cpu) = kzalloc(
904 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); 904 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
905 if (unlikely(per_cpu(index_kobject, cpu) == NULL)) 905 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
906 goto err_out; 906 goto err_out;
907 907
908 return 0; 908 return 0;
@@ -926,7 +926,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
926 if (unlikely(retval < 0)) 926 if (unlikely(retval < 0))
927 return retval; 927 return retval;
928 928
929 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), 929 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
930 &ktype_percpu_entry, 930 &ktype_percpu_entry,
931 &sys_dev->kobj, "%s", "cache"); 931 &sys_dev->kobj, "%s", "cache");
932 if (retval < 0) { 932 if (retval < 0) {
@@ -940,12 +940,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
940 this_object->index = i; 940 this_object->index = i;
941 retval = kobject_init_and_add(&(this_object->kobj), 941 retval = kobject_init_and_add(&(this_object->kobj),
942 &ktype_cache, 942 &ktype_cache,
943 per_cpu(cache_kobject, cpu), 943 per_cpu(ici_cache_kobject, cpu),
944 "index%1lu", i); 944 "index%1lu", i);
945 if (unlikely(retval)) { 945 if (unlikely(retval)) {
946 for (j = 0; j < i; j++) 946 for (j = 0; j < i; j++)
947 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); 947 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
948 kobject_put(per_cpu(cache_kobject, cpu)); 948 kobject_put(per_cpu(ici_cache_kobject, cpu));
949 cpuid4_cache_sysfs_exit(cpu); 949 cpuid4_cache_sysfs_exit(cpu);
950 return retval; 950 return retval;
951 } 951 }
@@ -953,7 +953,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
953 } 953 }
954 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); 954 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
955 955
956 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 956 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
957 return 0; 957 return 0;
958} 958}
959 959
@@ -962,7 +962,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
962 unsigned int cpu = sys_dev->id; 962 unsigned int cpu = sys_dev->id;
963 unsigned long i; 963 unsigned long i;
964 964
965 if (per_cpu(cpuid4_info, cpu) == NULL) 965 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
966 return; 966 return;
967 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) 967 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
968 return; 968 return;
@@ -970,7 +970,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
970 970
971 for (i = 0; i < num_cache_leaves; i++) 971 for (i = 0; i < num_cache_leaves; i++)
972 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); 972 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
973 kobject_put(per_cpu(cache_kobject, cpu)); 973 kobject_put(per_cpu(ici_cache_kobject, cpu));
974 cpuid4_cache_sysfs_exit(cpu); 974 cpuid4_cache_sysfs_exit(cpu);
975} 975}
976 976
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ef42a038f1a6..1c47390dd0e5 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -265,13 +265,13 @@ struct ds_context {
265 int cpu; 265 int cpu;
266}; 266};
267 267
268static DEFINE_PER_CPU(struct ds_context *, cpu_context); 268static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
269 269
270 270
271static struct ds_context *ds_get_context(struct task_struct *task, int cpu) 271static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
272{ 272{
273 struct ds_context **p_context = 273 struct ds_context **p_context =
274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu)); 274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
275 struct ds_context *context = NULL; 275 struct ds_context *context = NULL;
276 struct ds_context *new_context = NULL; 276 struct ds_context *new_context = NULL;
277 277