aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-08-17 13:30:40 -0400
committerTejun Heo <tj@kernel.org>2014-08-26 13:45:49 -0400
commit89cbc76768c2fa4ed95545bf961f3a14ddfeed21 (patch)
tree14a566d17dc886d3330d67404553530f8f979e2d /arch/x86/kvm
parent532d0d0690d1532dcc5a190162ad820b636bcd4d (diff)
x86: Replace __get_cpu_var uses
__get_cpu_var() is used for multiple purposes in the kernel source. One of them is address calculation via the form &__get_cpu_var(x). This calculates the address for the instance of the percpu variable of the current processor based on an offset. Other use cases are for storing and retrieving data from the current processors percpu area. __get_cpu_var() can be used as an lvalue when writing data or on the right side of an assignment. __get_cpu_var() is defined as : #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) __get_cpu_var() always only does an address determination. However, store and retrieve operations could use a segment prefix (or global register on other platforms) to avoid the address calculation. this_cpu_write() and this_cpu_read() can directly take an offset into a percpu area and use optimized assembly code to read and write per cpu variables. This patch converts __get_cpu_var into either an explicit address calculation using this_cpu_ptr() or into a use of this_cpu operations that use the offset. Thereby address calculations are avoided and less registers are used when code is generated. Transformations done to __get_cpu_var() 1. Determine the address of the percpu instance of the current processor. DEFINE_PER_CPU(int, y); int *x = &__get_cpu_var(y); Converts to int *x = this_cpu_ptr(&y); 2. Same as #1 but this time an array structure is involved. DEFINE_PER_CPU(int, y[20]); int *x = __get_cpu_var(y); Converts to int *x = this_cpu_ptr(y); 3. Retrieve the content of the current processors instance of a per cpu variable. DEFINE_PER_CPU(int, y); int x = __get_cpu_var(y) Converts to int x = __this_cpu_read(y); 4. Retrieve the content of a percpu struct DEFINE_PER_CPU(struct mystruct, y); struct mystruct x = __get_cpu_var(y); Converts to memcpy(&x, this_cpu_ptr(&y), sizeof(x)); 5. Assignment to a per cpu variable DEFINE_PER_CPU(int, y) __get_cpu_var(y) = x; Converts to __this_cpu_write(y, x); 6. Increment/Decrement etc of a per cpu variable DEFINE_PER_CPU(int, y); __get_cpu_var(y)++ Converts to __this_cpu_inc(y) Cc: Thomas Gleixner <tglx@linutronix.de> Cc: x86@kernel.org Acked-by: H. Peter Anvin <hpa@linux.intel.com> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c10
-rw-r--r--arch/x86/kvm/x86.c2
3 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ddf742768ecf..1b0e90658d8d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -670,7 +670,7 @@ static int svm_hardware_enable(void *garbage)
670 670
671 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { 671 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
672 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); 672 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
673 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT; 673 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
674 } 674 }
675 675
676 676
@@ -1312,8 +1312,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1312 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 1312 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1313 1313
1314 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) && 1314 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1315 svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) { 1315 svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1316 __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio; 1316 __this_cpu_write(current_tsc_ratio, svm->tsc_ratio);
1317 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio); 1317 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
1318 } 1318 }
1319} 1319}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bfe11cf124a1..36cf28a910b8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1601,7 +1601,7 @@ static void reload_tss(void)
1601 /* 1601 /*
1602 * VT restores TR but not its size. Useless. 1602 * VT restores TR but not its size. Useless.
1603 */ 1603 */
1604 struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1604 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1605 struct desc_struct *descs; 1605 struct desc_struct *descs;
1606 1606
1607 descs = (void *)gdt->address; 1607 descs = (void *)gdt->address;
@@ -1647,7 +1647,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1647 1647
1648static unsigned long segment_base(u16 selector) 1648static unsigned long segment_base(u16 selector)
1649{ 1649{
1650 struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1650 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1651 struct desc_struct *d; 1651 struct desc_struct *d;
1652 unsigned long table_base; 1652 unsigned long table_base;
1653 unsigned long v; 1653 unsigned long v;
@@ -1777,7 +1777,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1777 */ 1777 */
1778 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) 1778 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1779 stts(); 1779 stts();
1780 load_gdt(&__get_cpu_var(host_gdt)); 1780 load_gdt(this_cpu_ptr(&host_gdt));
1781} 1781}
1782 1782
1783static void vmx_load_host_state(struct vcpu_vmx *vmx) 1783static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -1807,7 +1807,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1807 } 1807 }
1808 1808
1809 if (vmx->loaded_vmcs->cpu != cpu) { 1809 if (vmx->loaded_vmcs->cpu != cpu) {
1810 struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 1810 struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
1811 unsigned long sysenter_esp; 1811 unsigned long sysenter_esp;
1812 1812
1813 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1813 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -2744,7 +2744,7 @@ static int hardware_enable(void *garbage)
2744 ept_sync_global(); 2744 ept_sync_global();
2745 } 2745 }
2746 2746
2747 native_store_gdt(&__get_cpu_var(host_gdt)); 2747 native_store_gdt(this_cpu_ptr(&host_gdt));
2748 2748
2749 return 0; 2749 return 0;
2750} 2750}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8f1e22d3b286..c84ee536f9a3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1556,7 +1556,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1556 1556
1557 /* Keep irq disabled to prevent changes to the clock */ 1557 /* Keep irq disabled to prevent changes to the clock */
1558 local_irq_save(flags); 1558 local_irq_save(flags);
1559 this_tsc_khz = __get_cpu_var(cpu_tsc_khz); 1559 this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1560 if (unlikely(this_tsc_khz == 0)) { 1560 if (unlikely(this_tsc_khz == 0)) {
1561 local_irq_restore(flags); 1561 local_irq_restore(flags);
1562 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); 1562 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);