aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-06 13:07:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-06 13:07:34 -0400
commitd9a73c00161f3eaa4c8c035c62f45afd1549e38a (patch)
treec5cad9e1e286438b63e512c1912e8b7f39071886 /arch/x86/kvm
parentb304441c6f3a5cb5ea80b9a719d2851544f348d6 (diff)
parentbf676945cb5bfe455321f57968967c18976f4995 (diff)
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: um, x86: Cast to (u64 *) inside set_64bit() x86-32, asm: Directly access per-cpu GDT x86-64, asm: Directly access per-cpu IST x86, asm: Merge cmpxchg_486_u64() and cmpxchg8b_emu() x86, asm: Move cmpxchg emulation code to arch/x86/lib x86, asm: Clean up and simplify <asm/cmpxchg.h> x86, asm: Clean up and simplify set_64bit() x86: Add memory modify constraints to xchg() and cmpxchg() x86-64: Simplify loading initial_gs x86: Use symbolic MSR names x86: Remove redundant K6 MSRs
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c8
-rw-r--r--arch/x86/kvm/x86.c2
3 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5c81daf3ef57..bc5b9b8d4a33 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -131,7 +131,7 @@ static struct svm_direct_access_msrs {
131 u32 index; /* Index of the MSR */ 131 u32 index; /* Index of the MSR */
132 bool always; /* True if intercept is always on */ 132 bool always; /* True if intercept is always on */
133} direct_access_msrs[] = { 133} direct_access_msrs[] = {
134 { .index = MSR_K6_STAR, .always = true }, 134 { .index = MSR_STAR, .always = true },
135 { .index = MSR_IA32_SYSENTER_CS, .always = true }, 135 { .index = MSR_IA32_SYSENTER_CS, .always = true },
136#ifdef CONFIG_X86_64 136#ifdef CONFIG_X86_64
137 { .index = MSR_GS_BASE, .always = true }, 137 { .index = MSR_GS_BASE, .always = true },
@@ -2432,7 +2432,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2432 *data = tsc_offset + native_read_tsc(); 2432 *data = tsc_offset + native_read_tsc();
2433 break; 2433 break;
2434 } 2434 }
2435 case MSR_K6_STAR: 2435 case MSR_STAR:
2436 *data = svm->vmcb->save.star; 2436 *data = svm->vmcb->save.star;
2437 break; 2437 break;
2438#ifdef CONFIG_X86_64 2438#ifdef CONFIG_X86_64
@@ -2556,7 +2556,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2556 2556
2557 break; 2557 break;
2558 } 2558 }
2559 case MSR_K6_STAR: 2559 case MSR_STAR:
2560 svm->vmcb->save.star = data; 2560 svm->vmcb->save.star = data;
2561 break; 2561 break;
2562#ifdef CONFIG_X86_64 2562#ifdef CONFIG_X86_64
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 27a0222c2946..49b25eee25ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -240,14 +240,14 @@ static u64 host_efer;
240static void ept_save_pdptrs(struct kvm_vcpu *vcpu); 240static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
241 241
242/* 242/*
243 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it 243 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
244 * away by decrementing the array size. 244 * away by decrementing the array size.
245 */ 245 */
246static const u32 vmx_msr_index[] = { 246static const u32 vmx_msr_index[] = {
247#ifdef CONFIG_X86_64 247#ifdef CONFIG_X86_64
248 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, 248 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
249#endif 249#endif
250 MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR, 250 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
251}; 251};
252#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) 252#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
253 253
@@ -1117,10 +1117,10 @@ static void setup_msrs(struct vcpu_vmx *vmx)
1117 if (index >= 0 && vmx->rdtscp_enabled) 1117 if (index >= 0 && vmx->rdtscp_enabled)
1118 move_msr_up(vmx, index, save_nmsrs++); 1118 move_msr_up(vmx, index, save_nmsrs++);
1119 /* 1119 /*
1120 * MSR_K6_STAR is only needed on long mode guests, and only 1120 * MSR_STAR is only needed on long mode guests, and only
1121 * if efer.sce is enabled. 1121 * if efer.sce is enabled.
1122 */ 1122 */
1123 index = __find_msr_index(vmx, MSR_K6_STAR); 1123 index = __find_msr_index(vmx, MSR_STAR);
1124 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) 1124 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
1125 move_msr_up(vmx, index, save_nmsrs++); 1125 move_msr_up(vmx, index, save_nmsrs++);
1126 } 1126 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 97aab036dabf..25f19078b321 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -733,7 +733,7 @@ static u32 msrs_to_save[] = {
733 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, 733 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
734 HV_X64_MSR_APIC_ASSIST_PAGE, 734 HV_X64_MSR_APIC_ASSIST_PAGE,
735 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 735 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
736 MSR_K6_STAR, 736 MSR_STAR,
737#ifdef CONFIG_X86_64 737#ifdef CONFIG_X86_64
738 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 738 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
739#endif 739#endif