aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c30
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c54
-rw-r--r--arch/x86/kernel/ds.c4
-rw-r--r--arch/x86/kvm/svm.c63
5 files changed, 79 insertions, 80 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cc25c2b4a567..3192f22f2fdd 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void)
1093 1093
1094void __cpuinit cpu_init(void) 1094void __cpuinit cpu_init(void)
1095{ 1095{
1096 struct orig_ist *orig_ist; 1096 struct orig_ist *oist;
1097 struct task_struct *me; 1097 struct task_struct *me;
1098 struct tss_struct *t; 1098 struct tss_struct *t;
1099 unsigned long v; 1099 unsigned long v;
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void)
1102 1102
1103 cpu = stack_smp_processor_id(); 1103 cpu = stack_smp_processor_id();
1104 t = &per_cpu(init_tss, cpu); 1104 t = &per_cpu(init_tss, cpu);
1105 orig_ist = &per_cpu(orig_ist, cpu); 1105 oist = &per_cpu(orig_ist, cpu);
1106 1106
1107#ifdef CONFIG_NUMA 1107#ifdef CONFIG_NUMA
1108 if (cpu != 0 && percpu_read(node_number) == 0 && 1108 if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void)
1143 /* 1143 /*
1144 * set up and load the per-CPU TSS 1144 * set up and load the per-CPU TSS
1145 */ 1145 */
1146 if (!orig_ist->ist[0]) { 1146 if (!oist->ist[0]) {
1147 char *estacks = per_cpu(exception_stacks, cpu); 1147 char *estacks = per_cpu(exception_stacks, cpu);
1148 1148
1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1149 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1150 estacks += exception_stack_sizes[v]; 1150 estacks += exception_stack_sizes[v];
1151 orig_ist->ist[v] = t->x86_tss.ist[v] = 1151 oist->ist[v] = t->x86_tss.ist[v] =
1152 (unsigned long)estacks; 1152 (unsigned long)estacks;
1153 } 1153 }
1154 } 1154 }
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index dca325c03999..b368cd862997 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,9 +30,9 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpud_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
38 38
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
531 531
532 /* Already intialized */ 532 /* Already intialized */
533 if (file == CPU_INDEX_BIT) 533 if (file == CPU_INDEX_BIT)
534 if (per_cpu(cpu_arr[type].init, cpu)) 534 if (per_cpu(cpud_arr[type].init, cpu))
535 return 0; 535 return 0;
536 536
537 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 537 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
543 priv->reg = reg; 543 priv->reg = reg;
544 priv->file = file; 544 priv->file = file;
545 mutex_lock(&cpu_debug_lock); 545 mutex_lock(&cpu_debug_lock);
546 per_cpu(priv_arr[type], cpu) = priv; 546 per_cpu(cpud_priv_arr[type], cpu) = priv;
547 per_cpu(cpu_priv_count, cpu)++; 547 per_cpu(cpud_priv_count, cpu)++;
548 mutex_unlock(&cpu_debug_lock); 548 mutex_unlock(&cpu_debug_lock);
549 549
550 if (file) 550 if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
552 dentry, (void *)priv, &cpu_fops); 552 dentry, (void *)priv, &cpu_fops);
553 else { 553 else {
554 debugfs_create_file(cpu_base[type].name, S_IRUGO, 554 debugfs_create_file(cpu_base[type].name, S_IRUGO,
555 per_cpu(cpu_arr[type].dentry, cpu), 555 per_cpu(cpud_arr[type].dentry, cpu),
556 (void *)priv, &cpu_fops); 556 (void *)priv, &cpu_fops);
557 mutex_lock(&cpu_debug_lock); 557 mutex_lock(&cpu_debug_lock);
558 per_cpu(cpu_arr[type].init, cpu) = 1; 558 per_cpu(cpud_arr[type].init, cpu) = 1;
559 mutex_unlock(&cpu_debug_lock); 559 mutex_unlock(&cpu_debug_lock);
560 } 560 }
561 561
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
615 if (!is_typeflag_valid(cpu, cpu_base[type].flag)) 615 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
616 continue; 616 continue;
617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); 617 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
618 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; 618 per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
619 619
620 if (type < CPU_TSS_BIT) 620 if (type < CPU_TSS_BIT)
621 err = cpu_init_msr(cpu, type, cpu_dentry); 621 err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
647 err = cpu_init_allreg(cpu, cpu_dentry); 647 err = cpu_init_allreg(cpu, cpu_dentry);
648 648
649 pr_info("cpu%d(%d) debug files %d\n", 649 pr_info("cpu%d(%d) debug files %d\n",
650 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); 650 cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
651 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { 651 if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
652 pr_err("Register files count %d exceeds limit %d\n", 652 pr_err("Register files count %d exceeds limit %d\n",
653 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); 653 per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
654 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; 654 per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
655 err = -ENFILE; 655 err = -ENFILE;
656 } 656 }
657 if (err) 657 if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
676 debugfs_remove_recursive(cpu_debugfs_dir); 676 debugfs_remove_recursive(cpu_debugfs_dir);
677 677
678 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 678 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
679 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) 679 for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
680 kfree(per_cpu(priv_arr[i], cpu)); 680 kfree(per_cpu(cpud_priv_arr[i], cpu));
681} 681}
682 682
683module_init(cpu_debug_init); 683module_init(cpu_debug_init);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 804c40e2bc3e..f5ccb4fa5a5d 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -512,8 +512,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
512#ifdef CONFIG_SYSFS 512#ifdef CONFIG_SYSFS
513 513
514/* pointer to _cpuid4_info array (for each cache leaf) */ 514/* pointer to _cpuid4_info array (for each cache leaf) */
515static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info); 515static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
516#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y])) 516#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
517 517
518#ifdef CONFIG_SMP 518#ifdef CONFIG_SMP
519static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 519static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -526,7 +526,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
526 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { 526 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
527 struct cpuinfo_x86 *d; 527 struct cpuinfo_x86 *d;
528 for_each_online_cpu(i) { 528 for_each_online_cpu(i) {
529 if (!per_cpu(cpuid4_info, i)) 529 if (!per_cpu(ici_cpuid4_info, i))
530 continue; 530 continue;
531 d = &cpu_data(i); 531 d = &cpu_data(i);
532 this_leaf = CPUID4_INFO_IDX(i, index); 532 this_leaf = CPUID4_INFO_IDX(i, index);
@@ -548,7 +548,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
548 c->apicid >> index_msb) { 548 c->apicid >> index_msb) {
549 cpumask_set_cpu(i, 549 cpumask_set_cpu(i,
550 to_cpumask(this_leaf->shared_cpu_map)); 550 to_cpumask(this_leaf->shared_cpu_map));
551 if (i != cpu && per_cpu(cpuid4_info, i)) { 551 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
552 sibling_leaf = 552 sibling_leaf =
553 CPUID4_INFO_IDX(i, index); 553 CPUID4_INFO_IDX(i, index);
554 cpumask_set_cpu(cpu, to_cpumask( 554 cpumask_set_cpu(cpu, to_cpumask(
@@ -587,8 +587,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
587 for (i = 0; i < num_cache_leaves; i++) 587 for (i = 0; i < num_cache_leaves; i++)
588 cache_remove_shared_cpu_map(cpu, i); 588 cache_remove_shared_cpu_map(cpu, i);
589 589
590 kfree(per_cpu(cpuid4_info, cpu)); 590 kfree(per_cpu(ici_cpuid4_info, cpu));
591 per_cpu(cpuid4_info, cpu) = NULL; 591 per_cpu(ici_cpuid4_info, cpu) = NULL;
592} 592}
593 593
594static int 594static int
@@ -627,15 +627,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
627 if (num_cache_leaves == 0) 627 if (num_cache_leaves == 0)
628 return -ENOENT; 628 return -ENOENT;
629 629
630 per_cpu(cpuid4_info, cpu) = kzalloc( 630 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
631 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 631 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
632 if (per_cpu(cpuid4_info, cpu) == NULL) 632 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
633 return -ENOMEM; 633 return -ENOMEM;
634 634
635 smp_call_function_single(cpu, get_cpu_leaves, &retval, true); 635 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
636 if (retval) { 636 if (retval) {
637 kfree(per_cpu(cpuid4_info, cpu)); 637 kfree(per_cpu(ici_cpuid4_info, cpu));
638 per_cpu(cpuid4_info, cpu) = NULL; 638 per_cpu(ici_cpuid4_info, cpu) = NULL;
639 } 639 }
640 640
641 return retval; 641 return retval;
@@ -647,7 +647,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
647extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ 647extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
648 648
649/* pointer to kobject for cpuX/cache */ 649/* pointer to kobject for cpuX/cache */
650static DEFINE_PER_CPU(struct kobject *, cache_kobject); 650static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
651 651
652struct _index_kobject { 652struct _index_kobject {
653 struct kobject kobj; 653 struct kobject kobj;
@@ -656,8 +656,8 @@ struct _index_kobject {
656}; 656};
657 657
658/* pointer to array of kobjects for cpuX/cache/indexY */ 658/* pointer to array of kobjects for cpuX/cache/indexY */
659static DEFINE_PER_CPU(struct _index_kobject *, index_kobject); 659static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
660#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y])) 660#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
661 661
662#define show_one_plus(file_name, object, val) \ 662#define show_one_plus(file_name, object, val) \
663static ssize_t show_##file_name \ 663static ssize_t show_##file_name \
@@ -876,10 +876,10 @@ static struct kobj_type ktype_percpu_entry = {
876 876
877static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 877static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
878{ 878{
879 kfree(per_cpu(cache_kobject, cpu)); 879 kfree(per_cpu(ici_cache_kobject, cpu));
880 kfree(per_cpu(index_kobject, cpu)); 880 kfree(per_cpu(ici_index_kobject, cpu));
881 per_cpu(cache_kobject, cpu) = NULL; 881 per_cpu(ici_cache_kobject, cpu) = NULL;
882 per_cpu(index_kobject, cpu) = NULL; 882 per_cpu(ici_index_kobject, cpu) = NULL;
883 free_cache_attributes(cpu); 883 free_cache_attributes(cpu);
884} 884}
885 885
@@ -895,14 +895,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
895 return err; 895 return err;
896 896
897 /* Allocate all required memory */ 897 /* Allocate all required memory */
898 per_cpu(cache_kobject, cpu) = 898 per_cpu(ici_cache_kobject, cpu) =
899 kzalloc(sizeof(struct kobject), GFP_KERNEL); 899 kzalloc(sizeof(struct kobject), GFP_KERNEL);
900 if (unlikely(per_cpu(cache_kobject, cpu) == NULL)) 900 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
901 goto err_out; 901 goto err_out;
902 902
903 per_cpu(index_kobject, cpu) = kzalloc( 903 per_cpu(ici_index_kobject, cpu) = kzalloc(
904 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); 904 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
905 if (unlikely(per_cpu(index_kobject, cpu) == NULL)) 905 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
906 goto err_out; 906 goto err_out;
907 907
908 return 0; 908 return 0;
@@ -926,7 +926,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
926 if (unlikely(retval < 0)) 926 if (unlikely(retval < 0))
927 return retval; 927 return retval;
928 928
929 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu), 929 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
930 &ktype_percpu_entry, 930 &ktype_percpu_entry,
931 &sys_dev->kobj, "%s", "cache"); 931 &sys_dev->kobj, "%s", "cache");
932 if (retval < 0) { 932 if (retval < 0) {
@@ -940,12 +940,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
940 this_object->index = i; 940 this_object->index = i;
941 retval = kobject_init_and_add(&(this_object->kobj), 941 retval = kobject_init_and_add(&(this_object->kobj),
942 &ktype_cache, 942 &ktype_cache,
943 per_cpu(cache_kobject, cpu), 943 per_cpu(ici_cache_kobject, cpu),
944 "index%1lu", i); 944 "index%1lu", i);
945 if (unlikely(retval)) { 945 if (unlikely(retval)) {
946 for (j = 0; j < i; j++) 946 for (j = 0; j < i; j++)
947 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); 947 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
948 kobject_put(per_cpu(cache_kobject, cpu)); 948 kobject_put(per_cpu(ici_cache_kobject, cpu));
949 cpuid4_cache_sysfs_exit(cpu); 949 cpuid4_cache_sysfs_exit(cpu);
950 return retval; 950 return retval;
951 } 951 }
@@ -953,7 +953,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
953 } 953 }
954 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); 954 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
955 955
956 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); 956 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
957 return 0; 957 return 0;
958} 958}
959 959
@@ -962,7 +962,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
962 unsigned int cpu = sys_dev->id; 962 unsigned int cpu = sys_dev->id;
963 unsigned long i; 963 unsigned long i;
964 964
965 if (per_cpu(cpuid4_info, cpu) == NULL) 965 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
966 return; 966 return;
967 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) 967 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
968 return; 968 return;
@@ -970,7 +970,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
970 970
971 for (i = 0; i < num_cache_leaves; i++) 971 for (i = 0; i < num_cache_leaves; i++)
972 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); 972 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
973 kobject_put(per_cpu(cache_kobject, cpu)); 973 kobject_put(per_cpu(ici_cache_kobject, cpu));
974 cpuid4_cache_sysfs_exit(cpu); 974 cpuid4_cache_sysfs_exit(cpu);
975} 975}
976 976
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ef42a038f1a6..1c47390dd0e5 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -265,13 +265,13 @@ struct ds_context {
265 int cpu; 265 int cpu;
266}; 266};
267 267
268static DEFINE_PER_CPU(struct ds_context *, cpu_context); 268static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
269 269
270 270
271static struct ds_context *ds_get_context(struct task_struct *task, int cpu) 271static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
272{ 272{
273 struct ds_context **p_context = 273 struct ds_context **p_context =
274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu)); 274 (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
275 struct ds_context *context = NULL; 275 struct ds_context *context = NULL;
276 struct ds_context *new_context = NULL; 276 struct ds_context *new_context = NULL;
277 277
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 944cc9c04b3c..6c79a14a3b6f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -319,7 +319,7 @@ static void svm_hardware_disable(void *garbage)
319static void svm_hardware_enable(void *garbage) 319static void svm_hardware_enable(void *garbage)
320{ 320{
321 321
322 struct svm_cpu_data *svm_data; 322 struct svm_cpu_data *sd;
323 uint64_t efer; 323 uint64_t efer;
324 struct descriptor_table gdt_descr; 324 struct descriptor_table gdt_descr;
325 struct desc_struct *gdt; 325 struct desc_struct *gdt;
@@ -329,62 +329,61 @@ static void svm_hardware_enable(void *garbage)
329 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me); 329 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
330 return; 330 return;
331 } 331 }
332 svm_data = per_cpu(svm_data, me); 332 sd = per_cpu(svm_data, me);
333 333
334 if (!svm_data) { 334 if (!sd) {
335 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n", 335 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
336 me); 336 me);
337 return; 337 return;
338 } 338 }
339 339
340 svm_data->asid_generation = 1; 340 sd->asid_generation = 1;
341 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 341 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
342 svm_data->next_asid = svm_data->max_asid + 1; 342 sd->next_asid = sd->max_asid + 1;
343 343
344 kvm_get_gdt(&gdt_descr); 344 kvm_get_gdt(&gdt_descr);
345 gdt = (struct desc_struct *)gdt_descr.base; 345 gdt = (struct desc_struct *)gdt_descr.base;
346 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 346 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
347 347
348 rdmsrl(MSR_EFER, efer); 348 rdmsrl(MSR_EFER, efer);
349 wrmsrl(MSR_EFER, efer | EFER_SVME); 349 wrmsrl(MSR_EFER, efer | EFER_SVME);
350 350
351 wrmsrl(MSR_VM_HSAVE_PA, 351 wrmsrl(MSR_VM_HSAVE_PA,
352 page_to_pfn(svm_data->save_area) << PAGE_SHIFT); 352 page_to_pfn(sd->save_area) << PAGE_SHIFT);
353} 353}
354 354
355static void svm_cpu_uninit(int cpu) 355static void svm_cpu_uninit(int cpu)
356{ 356{
357 struct svm_cpu_data *svm_data 357 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
358 = per_cpu(svm_data, raw_smp_processor_id());
359 358
360 if (!svm_data) 359 if (!sd)
361 return; 360 return;
362 361
363 per_cpu(svm_data, raw_smp_processor_id()) = NULL; 362 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
364 __free_page(svm_data->save_area); 363 __free_page(sd->save_area);
365 kfree(svm_data); 364 kfree(sd);
366} 365}
367 366
368static int svm_cpu_init(int cpu) 367static int svm_cpu_init(int cpu)
369{ 368{
370 struct svm_cpu_data *svm_data; 369 struct svm_cpu_data *sd;
371 int r; 370 int r;
372 371
373 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); 372 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
374 if (!svm_data) 373 if (!sd)
375 return -ENOMEM; 374 return -ENOMEM;
376 svm_data->cpu = cpu; 375 sd->cpu = cpu;
377 svm_data->save_area = alloc_page(GFP_KERNEL); 376 sd->save_area = alloc_page(GFP_KERNEL);
378 r = -ENOMEM; 377 r = -ENOMEM;
379 if (!svm_data->save_area) 378 if (!sd->save_area)
380 goto err_1; 379 goto err_1;
381 380
382 per_cpu(svm_data, cpu) = svm_data; 381 per_cpu(svm_data, cpu) = sd;
383 382
384 return 0; 383 return 0;
385 384
386err_1: 385err_1:
387 kfree(svm_data); 386 kfree(sd);
388 return r; 387 return r;
389 388
390} 389}
@@ -1094,16 +1093,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
1094#endif 1093#endif
1095} 1094}
1096 1095
1097static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) 1096static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1098{ 1097{
1099 if (svm_data->next_asid > svm_data->max_asid) { 1098 if (sd->next_asid > sd->max_asid) {
1100 ++svm_data->asid_generation; 1099 ++sd->asid_generation;
1101 svm_data->next_asid = 1; 1100 sd->next_asid = 1;
1102 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; 1101 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1103 } 1102 }
1104 1103
1105 svm->asid_generation = svm_data->asid_generation; 1104 svm->asid_generation = sd->asid_generation;
1106 svm->vmcb->control.asid = svm_data->next_asid++; 1105 svm->vmcb->control.asid = sd->next_asid++;
1107} 1106}
1108 1107
1109static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1108static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
@@ -2377,8 +2376,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
2377{ 2376{
2378 int cpu = raw_smp_processor_id(); 2377 int cpu = raw_smp_processor_id();
2379 2378
2380 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2379 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2381 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */ 2380 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
2382 load_TR_desc(); 2381 load_TR_desc();
2383} 2382}
2384 2383
@@ -2386,12 +2385,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
2386{ 2385{
2387 int cpu = raw_smp_processor_id(); 2386 int cpu = raw_smp_processor_id();
2388 2387
2389 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); 2388 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2390 2389
2391 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 2390 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
2392 /* FIXME: handle wraparound of asid_generation */ 2391 /* FIXME: handle wraparound of asid_generation */
2393 if (svm->asid_generation != svm_data->asid_generation) 2392 if (svm->asid_generation != sd->asid_generation)
2394 new_asid(svm, svm_data); 2393 new_asid(svm, sd);
2395} 2394}
2396 2395
2397static void svm_inject_nmi(struct kvm_vcpu *vcpu) 2396static void svm_inject_nmi(struct kvm_vcpu *vcpu)