aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/kvm.h8
-rw-r--r--drivers/kvm/kvm_main.c16
-rw-r--r--drivers/kvm/kvm_svm.h2
-rw-r--r--drivers/kvm/kvm_vmx.h2
-rw-r--r--drivers/kvm/svm.c22
-rw-r--r--drivers/kvm/vmx.c40
-rw-r--r--drivers/kvm/x86_emulate.c8
-rw-r--r--drivers/kvm/x86_emulate.h2
8 files changed, 50 insertions, 50 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 5785d0870ab6..930e04ce1af6 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -140,7 +140,7 @@ enum {
140 VCPU_REGS_RBP = 5, 140 VCPU_REGS_RBP = 5,
141 VCPU_REGS_RSI = 6, 141 VCPU_REGS_RSI = 6,
142 VCPU_REGS_RDI = 7, 142 VCPU_REGS_RDI = 7,
143#ifdef __x86_64__ 143#ifdef CONFIG_X86_64
144 VCPU_REGS_R8 = 8, 144 VCPU_REGS_R8 = 8,
145 VCPU_REGS_R9 = 9, 145 VCPU_REGS_R9 = 9,
146 VCPU_REGS_R10 = 10, 146 VCPU_REGS_R10 = 10,
@@ -375,7 +375,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
375void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); 375void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
376void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 376void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
377 377
378#ifdef __x86_64__ 378#ifdef CONFIG_X86_64
379void set_efer(struct kvm_vcpu *vcpu, u64 efer); 379void set_efer(struct kvm_vcpu *vcpu, u64 efer);
380#endif 380#endif
381 381
@@ -485,7 +485,7 @@ static inline unsigned long read_tr_base(void)
485 return segment_base(tr); 485 return segment_base(tr);
486} 486}
487 487
488#ifdef __x86_64__ 488#ifdef CONFIG_X86_64
489static inline unsigned long read_msr(unsigned long msr) 489static inline unsigned long read_msr(unsigned long msr)
490{ 490{
491 u64 value; 491 u64 value;
@@ -533,7 +533,7 @@ static inline u32 get_rdx_init_val(void)
533#define TSS_REDIRECTION_SIZE (256 / 8) 533#define TSS_REDIRECTION_SIZE (256 / 8)
534#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 534#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
535 535
536#ifdef __x86_64__ 536#ifdef CONFIG_X86_64
537 537
538/* 538/*
539 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore 539 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index b6b8a41b5ec8..f8f11c75ecbd 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -83,7 +83,7 @@ struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
83} 83}
84EXPORT_SYMBOL_GPL(find_msr_entry); 84EXPORT_SYMBOL_GPL(find_msr_entry);
85 85
86#ifdef __x86_64__ 86#ifdef CONFIG_X86_64
87// LDT or TSS descriptor in the GDT. 16 bytes. 87// LDT or TSS descriptor in the GDT. 16 bytes.
88struct segment_descriptor_64 { 88struct segment_descriptor_64 {
89 struct segment_descriptor s; 89 struct segment_descriptor s;
@@ -115,7 +115,7 @@ unsigned long segment_base(u16 selector)
115 } 115 }
116 d = (struct segment_descriptor *)(table_base + (selector & ~7)); 116 d = (struct segment_descriptor *)(table_base + (selector & ~7));
117 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24); 117 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
118#ifdef __x86_64__ 118#ifdef CONFIG_X86_64
119 if (d->system == 0 119 if (d->system == 0
120 && (d->type == 2 || d->type == 9 || d->type == 11)) 120 && (d->type == 2 || d->type == 9 || d->type == 11))
121 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32; 121 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
@@ -351,7 +351,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
351 } 351 }
352 352
353 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { 353 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
354#ifdef __x86_64__ 354#ifdef CONFIG_X86_64
355 if ((vcpu->shadow_efer & EFER_LME)) { 355 if ((vcpu->shadow_efer & EFER_LME)) {
356 int cs_db, cs_l; 356 int cs_db, cs_l;
357 357
@@ -1120,7 +1120,7 @@ static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1120 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata); 1120 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1121} 1121}
1122 1122
1123#ifdef __x86_64__ 1123#ifdef CONFIG_X86_64
1124 1124
1125void set_efer(struct kvm_vcpu *vcpu, u64 efer) 1125void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1126{ 1126{
@@ -1243,7 +1243,7 @@ static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
1243 regs->rdi = vcpu->regs[VCPU_REGS_RDI]; 1243 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1244 regs->rsp = vcpu->regs[VCPU_REGS_RSP]; 1244 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1245 regs->rbp = vcpu->regs[VCPU_REGS_RBP]; 1245 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1246#ifdef __x86_64__ 1246#ifdef CONFIG_X86_64
1247 regs->r8 = vcpu->regs[VCPU_REGS_R8]; 1247 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1248 regs->r9 = vcpu->regs[VCPU_REGS_R9]; 1248 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1249 regs->r10 = vcpu->regs[VCPU_REGS_R10]; 1249 regs->r10 = vcpu->regs[VCPU_REGS_R10];
@@ -1287,7 +1287,7 @@ static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
1287 vcpu->regs[VCPU_REGS_RDI] = regs->rdi; 1287 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1288 vcpu->regs[VCPU_REGS_RSP] = regs->rsp; 1288 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
1289 vcpu->regs[VCPU_REGS_RBP] = regs->rbp; 1289 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
1290#ifdef __x86_64__ 1290#ifdef CONFIG_X86_64
1291 vcpu->regs[VCPU_REGS_R8] = regs->r8; 1291 vcpu->regs[VCPU_REGS_R8] = regs->r8;
1292 vcpu->regs[VCPU_REGS_R9] = regs->r9; 1292 vcpu->regs[VCPU_REGS_R9] = regs->r9;
1293 vcpu->regs[VCPU_REGS_R10] = regs->r10; 1293 vcpu->regs[VCPU_REGS_R10] = regs->r10;
@@ -1401,7 +1401,7 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1401 vcpu->cr8 = sregs->cr8; 1401 vcpu->cr8 = sregs->cr8;
1402 1402
1403 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer; 1403 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
1404#ifdef __x86_64__ 1404#ifdef CONFIG_X86_64
1405 kvm_arch_ops->set_efer(vcpu, sregs->efer); 1405 kvm_arch_ops->set_efer(vcpu, sregs->efer);
1406#endif 1406#endif
1407 vcpu->apic_base = sregs->apic_base; 1407 vcpu->apic_base = sregs->apic_base;
@@ -1434,7 +1434,7 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1434static u32 msrs_to_save[] = { 1434static u32 msrs_to_save[] = {
1435 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, 1435 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1436 MSR_K6_STAR, 1436 MSR_K6_STAR,
1437#ifdef __x86_64__ 1437#ifdef CONFIG_X86_64
1438 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 1438 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1439#endif 1439#endif
1440 MSR_IA32_TIME_STAMP_COUNTER, 1440 MSR_IA32_TIME_STAMP_COUNTER,
diff --git a/drivers/kvm/kvm_svm.h b/drivers/kvm/kvm_svm.h
index 7d7f2aa10960..74cc862f4935 100644
--- a/drivers/kvm/kvm_svm.h
+++ b/drivers/kvm/kvm_svm.h
@@ -9,7 +9,7 @@
9#include "kvm.h" 9#include "kvm.h"
10 10
11static const u32 host_save_msrs[] = { 11static const u32 host_save_msrs[] = {
12#ifdef __x86_64__ 12#ifdef CONFIG_X86_64
13 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE, 13 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
14 MSR_FS_BASE, MSR_GS_BASE, 14 MSR_FS_BASE, MSR_GS_BASE,
15#endif 15#endif
diff --git a/drivers/kvm/kvm_vmx.h b/drivers/kvm/kvm_vmx.h
index 87e12d2bfa16..d139f73fb6e1 100644
--- a/drivers/kvm/kvm_vmx.h
+++ b/drivers/kvm/kvm_vmx.h
@@ -1,7 +1,7 @@
1#ifndef __KVM_VMX_H 1#ifndef __KVM_VMX_H
2#define __KVM_VMX_H 2#define __KVM_VMX_H
3 3
4#ifdef __x86_64__ 4#ifdef CONFIG_X86_64
5/* 5/*
6 * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt 6 * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
7 * mechanism (cpu bug AA24) 7 * mechanism (cpu bug AA24)
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index d6042eed7a78..73a022c1f7b6 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -287,7 +287,7 @@ static void svm_hardware_enable(void *garbage)
287 287
288 struct svm_cpu_data *svm_data; 288 struct svm_cpu_data *svm_data;
289 uint64_t efer; 289 uint64_t efer;
290#ifdef __x86_64__ 290#ifdef CONFIG_X86_64
291 struct desc_ptr gdt_descr; 291 struct desc_ptr gdt_descr;
292#else 292#else
293 struct Xgt_desc_struct gdt_descr; 293 struct Xgt_desc_struct gdt_descr;
@@ -397,7 +397,7 @@ static __init int svm_hardware_setup(void)
397 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); 397 memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
398 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT; 398 msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
399 399
400#ifdef __x86_64__ 400#ifdef CONFIG_X86_64
401 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1); 401 set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
402 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1); 402 set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
403 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1); 403 set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
@@ -704,7 +704,7 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
704 704
705static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 705static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
706{ 706{
707#ifdef __x86_64__ 707#ifdef CONFIG_X86_64
708 if (vcpu->shadow_efer & KVM_EFER_LME) { 708 if (vcpu->shadow_efer & KVM_EFER_LME) {
709 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) { 709 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
710 vcpu->shadow_efer |= KVM_EFER_LMA; 710 vcpu->shadow_efer |= KVM_EFER_LMA;
@@ -1097,7 +1097,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1097 case MSR_IA32_APICBASE: 1097 case MSR_IA32_APICBASE:
1098 *data = vcpu->apic_base; 1098 *data = vcpu->apic_base;
1099 break; 1099 break;
1100#ifdef __x86_64__ 1100#ifdef CONFIG_X86_64
1101 case MSR_STAR: 1101 case MSR_STAR:
1102 *data = vcpu->svm->vmcb->save.star; 1102 *data = vcpu->svm->vmcb->save.star;
1103 break; 1103 break;
@@ -1149,7 +1149,7 @@ static int rdmsr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1149static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) 1149static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1150{ 1150{
1151 switch (ecx) { 1151 switch (ecx) {
1152#ifdef __x86_64__ 1152#ifdef CONFIG_X86_64
1153 case MSR_EFER: 1153 case MSR_EFER:
1154 set_efer(vcpu, data); 1154 set_efer(vcpu, data);
1155 break; 1155 break;
@@ -1172,7 +1172,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1172 case MSR_IA32_APICBASE: 1172 case MSR_IA32_APICBASE:
1173 vcpu->apic_base = data; 1173 vcpu->apic_base = data;
1174 break; 1174 break;
1175#ifdef __x86_64___ 1175#ifdef CONFIG_X86_64_
1176 case MSR_STAR: 1176 case MSR_STAR:
1177 vcpu->svm->vmcb->save.star = data; 1177 vcpu->svm->vmcb->save.star = data;
1178 break; 1178 break;
@@ -1387,7 +1387,7 @@ again:
1387 load_db_regs(vcpu->svm->db_regs); 1387 load_db_regs(vcpu->svm->db_regs);
1388 } 1388 }
1389 asm volatile ( 1389 asm volatile (
1390#ifdef __x86_64__ 1390#ifdef CONFIG_X86_64
1391 "push %%rbx; push %%rcx; push %%rdx;" 1391 "push %%rbx; push %%rcx; push %%rdx;"
1392 "push %%rsi; push %%rdi; push %%rbp;" 1392 "push %%rsi; push %%rdi; push %%rbp;"
1393 "push %%r8; push %%r9; push %%r10; push %%r11;" 1393 "push %%r8; push %%r9; push %%r10; push %%r11;"
@@ -1397,7 +1397,7 @@ again:
1397 "push %%esi; push %%edi; push %%ebp;" 1397 "push %%esi; push %%edi; push %%ebp;"
1398#endif 1398#endif
1399 1399
1400#ifdef __x86_64__ 1400#ifdef CONFIG_X86_64
1401 "mov %c[rbx](%[vcpu]), %%rbx \n\t" 1401 "mov %c[rbx](%[vcpu]), %%rbx \n\t"
1402 "mov %c[rcx](%[vcpu]), %%rcx \n\t" 1402 "mov %c[rcx](%[vcpu]), %%rcx \n\t"
1403 "mov %c[rdx](%[vcpu]), %%rdx \n\t" 1403 "mov %c[rdx](%[vcpu]), %%rdx \n\t"
@@ -1421,7 +1421,7 @@ again:
1421 "mov %c[rbp](%[vcpu]), %%ebp \n\t" 1421 "mov %c[rbp](%[vcpu]), %%ebp \n\t"
1422#endif 1422#endif
1423 1423
1424#ifdef __x86_64__ 1424#ifdef CONFIG_X86_64
1425 /* Enter guest mode */ 1425 /* Enter guest mode */
1426 "push %%rax \n\t" 1426 "push %%rax \n\t"
1427 "mov %c[svm](%[vcpu]), %%rax \n\t" 1427 "mov %c[svm](%[vcpu]), %%rax \n\t"
@@ -1442,7 +1442,7 @@ again:
1442#endif 1442#endif
1443 1443
1444 /* Save guest registers, load host registers */ 1444 /* Save guest registers, load host registers */
1445#ifdef __x86_64__ 1445#ifdef CONFIG_X86_64
1446 "mov %%rbx, %c[rbx](%[vcpu]) \n\t" 1446 "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
1447 "mov %%rcx, %c[rcx](%[vcpu]) \n\t" 1447 "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
1448 "mov %%rdx, %c[rdx](%[vcpu]) \n\t" 1448 "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
@@ -1483,7 +1483,7 @@ again:
1483 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), 1483 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1484 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), 1484 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1485 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])) 1485 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
1486#ifdef __x86_64__ 1486#ifdef CONFIG_X86_64
1487 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), 1487 ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1488 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), 1488 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1489 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), 1489 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index fa8f7290dd4d..ad97014aa6eb 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -34,7 +34,7 @@ MODULE_LICENSE("GPL");
34static DEFINE_PER_CPU(struct vmcs *, vmxarea); 34static DEFINE_PER_CPU(struct vmcs *, vmxarea);
35static DEFINE_PER_CPU(struct vmcs *, current_vmcs); 35static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
36 36
37#ifdef __x86_64__ 37#ifdef CONFIG_X86_64
38#define HOST_IS_64 1 38#define HOST_IS_64 1
39#else 39#else
40#define HOST_IS_64 0 40#define HOST_IS_64 0
@@ -71,7 +71,7 @@ static struct kvm_vmx_segment_field {
71}; 71};
72 72
73static const u32 vmx_msr_index[] = { 73static const u32 vmx_msr_index[] = {
74#ifdef __x86_64__ 74#ifdef CONFIG_X86_64
75 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, 75 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
76#endif 76#endif
77 MSR_EFER, MSR_K6_STAR, 77 MSR_EFER, MSR_K6_STAR,
@@ -138,7 +138,7 @@ static u32 vmcs_read32(unsigned long field)
138 138
139static u64 vmcs_read64(unsigned long field) 139static u64 vmcs_read64(unsigned long field)
140{ 140{
141#ifdef __x86_64__ 141#ifdef CONFIG_X86_64
142 return vmcs_readl(field); 142 return vmcs_readl(field);
143#else 143#else
144 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); 144 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
@@ -168,7 +168,7 @@ static void vmcs_write32(unsigned long field, u32 value)
168 168
169static void vmcs_write64(unsigned long field, u64 value) 169static void vmcs_write64(unsigned long field, u64 value)
170{ 170{
171#ifdef __x86_64__ 171#ifdef CONFIG_X86_64
172 vmcs_writel(field, value); 172 vmcs_writel(field, value);
173#else 173#else
174 vmcs_writel(field, value); 174 vmcs_writel(field, value);
@@ -297,7 +297,7 @@ static void guest_write_tsc(u64 guest_tsc)
297 297
298static void reload_tss(void) 298static void reload_tss(void)
299{ 299{
300#ifndef __x86_64__ 300#ifndef CONFIG_X86_64
301 301
302 /* 302 /*
303 * VT restores TR but not its size. Useless. 303 * VT restores TR but not its size. Useless.
@@ -328,7 +328,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
328 } 328 }
329 329
330 switch (msr_index) { 330 switch (msr_index) {
331#ifdef __x86_64__ 331#ifdef CONFIG_X86_64
332 case MSR_FS_BASE: 332 case MSR_FS_BASE:
333 data = vmcs_readl(GUEST_FS_BASE); 333 data = vmcs_readl(GUEST_FS_BASE);
334 break; 334 break;
@@ -391,7 +391,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
391{ 391{
392 struct vmx_msr_entry *msr; 392 struct vmx_msr_entry *msr;
393 switch (msr_index) { 393 switch (msr_index) {
394#ifdef __x86_64__ 394#ifdef CONFIG_X86_64
395 case MSR_FS_BASE: 395 case MSR_FS_BASE:
396 vmcs_writel(GUEST_FS_BASE, data); 396 vmcs_writel(GUEST_FS_BASE, data);
397 break; 397 break;
@@ -726,7 +726,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
726 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); 726 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
727} 727}
728 728
729#ifdef __x86_64__ 729#ifdef CONFIG_X86_64
730 730
731static void enter_lmode(struct kvm_vcpu *vcpu) 731static void enter_lmode(struct kvm_vcpu *vcpu)
732{ 732{
@@ -768,7 +768,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
768 if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK)) 768 if (!vcpu->rmode.active && !(cr0 & CR0_PE_MASK))
769 enter_rmode(vcpu); 769 enter_rmode(vcpu);
770 770
771#ifdef __x86_64__ 771#ifdef CONFIG_X86_64
772 if (vcpu->shadow_efer & EFER_LME) { 772 if (vcpu->shadow_efer & EFER_LME) {
773 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) 773 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK))
774 enter_lmode(vcpu); 774 enter_lmode(vcpu);
@@ -809,7 +809,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
809 vcpu->cr4 = cr4; 809 vcpu->cr4 = cr4;
810} 810}
811 811
812#ifdef __x86_64__ 812#ifdef CONFIG_X86_64
813 813
814static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) 814static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
815{ 815{
@@ -1096,7 +1096,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1096 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */ 1096 vmcs_write16(HOST_FS_SELECTOR, read_fs()); /* 22.2.4 */
1097 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */ 1097 vmcs_write16(HOST_GS_SELECTOR, read_gs()); /* 22.2.4 */
1098 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 1098 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
1099#ifdef __x86_64__ 1099#ifdef CONFIG_X86_64
1100 rdmsrl(MSR_FS_BASE, a); 1100 rdmsrl(MSR_FS_BASE, a);
1101 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ 1101 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1102 rdmsrl(MSR_GS_BASE, a); 1102 rdmsrl(MSR_GS_BASE, a);
@@ -1174,7 +1174,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1174 vcpu->cr0 = 0x60000010; 1174 vcpu->cr0 = 0x60000010;
1175 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode 1175 vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1176 vmx_set_cr4(vcpu, 0); 1176 vmx_set_cr4(vcpu, 0);
1177#ifdef __x86_64__ 1177#ifdef CONFIG_X86_64
1178 vmx_set_efer(vcpu, 0); 1178 vmx_set_efer(vcpu, 0);
1179#endif 1179#endif
1180 1180
@@ -1690,7 +1690,7 @@ again:
1690 vmcs_write16(HOST_GS_SELECTOR, 0); 1690 vmcs_write16(HOST_GS_SELECTOR, 0);
1691 } 1691 }
1692 1692
1693#ifdef __x86_64__ 1693#ifdef CONFIG_X86_64
1694 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); 1694 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1695 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); 1695 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1696#else 1696#else
@@ -1714,7 +1714,7 @@ again:
1714 asm ( 1714 asm (
1715 /* Store host registers */ 1715 /* Store host registers */
1716 "pushf \n\t" 1716 "pushf \n\t"
1717#ifdef __x86_64__ 1717#ifdef CONFIG_X86_64
1718 "push %%rax; push %%rbx; push %%rdx;" 1718 "push %%rax; push %%rbx; push %%rdx;"
1719 "push %%rsi; push %%rdi; push %%rbp;" 1719 "push %%rsi; push %%rdi; push %%rbp;"
1720 "push %%r8; push %%r9; push %%r10; push %%r11;" 1720 "push %%r8; push %%r9; push %%r10; push %%r11;"
@@ -1728,7 +1728,7 @@ again:
1728 /* Check if vmlaunch of vmresume is needed */ 1728 /* Check if vmlaunch of vmresume is needed */
1729 "cmp $0, %1 \n\t" 1729 "cmp $0, %1 \n\t"
1730 /* Load guest registers. Don't clobber flags. */ 1730 /* Load guest registers. Don't clobber flags. */
1731#ifdef __x86_64__ 1731#ifdef CONFIG_X86_64
1732 "mov %c[cr2](%3), %%rax \n\t" 1732 "mov %c[cr2](%3), %%rax \n\t"
1733 "mov %%rax, %%cr2 \n\t" 1733 "mov %%rax, %%cr2 \n\t"
1734 "mov %c[rax](%3), %%rax \n\t" 1734 "mov %c[rax](%3), %%rax \n\t"
@@ -1765,7 +1765,7 @@ again:
1765 ".globl kvm_vmx_return \n\t" 1765 ".globl kvm_vmx_return \n\t"
1766 "kvm_vmx_return: " 1766 "kvm_vmx_return: "
1767 /* Save guest registers, load host registers, keep flags */ 1767 /* Save guest registers, load host registers, keep flags */
1768#ifdef __x86_64__ 1768#ifdef CONFIG_X86_64
1769 "xchg %3, 0(%%rsp) \n\t" 1769 "xchg %3, 0(%%rsp) \n\t"
1770 "mov %%rax, %c[rax](%3) \n\t" 1770 "mov %%rax, %c[rax](%3) \n\t"
1771 "mov %%rbx, %c[rbx](%3) \n\t" 1771 "mov %%rbx, %c[rbx](%3) \n\t"
@@ -1817,7 +1817,7 @@ again:
1817 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])), 1817 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1818 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), 1818 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1819 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])), 1819 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
1820#ifdef __x86_64__ 1820#ifdef CONFIG_X86_64
1821 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), 1821 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1822 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), 1822 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1823 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), 1823 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
@@ -1838,7 +1838,7 @@ again:
1838 fx_save(vcpu->guest_fx_image); 1838 fx_save(vcpu->guest_fx_image);
1839 fx_restore(vcpu->host_fx_image); 1839 fx_restore(vcpu->host_fx_image);
1840 1840
1841#ifndef __x86_64__ 1841#ifndef CONFIG_X86_64
1842 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 1842 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
1843#endif 1843#endif
1844 1844
@@ -1856,7 +1856,7 @@ again:
1856 */ 1856 */
1857 local_irq_disable(); 1857 local_irq_disable();
1858 load_gs(gs_sel); 1858 load_gs(gs_sel);
1859#ifdef __x86_64__ 1859#ifdef CONFIG_X86_64
1860 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); 1860 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
1861#endif 1861#endif
1862 local_irq_enable(); 1862 local_irq_enable();
@@ -1966,7 +1966,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
1966 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, 1966 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
1967 .set_cr3 = vmx_set_cr3, 1967 .set_cr3 = vmx_set_cr3,
1968 .set_cr4 = vmx_set_cr4, 1968 .set_cr4 = vmx_set_cr4,
1969#ifdef __x86_64__ 1969#ifdef CONFIG_X86_64
1970 .set_efer = vmx_set_efer, 1970 .set_efer = vmx_set_efer,
1971#endif 1971#endif
1972 .get_idt = vmx_get_idt, 1972 .get_idt = vmx_get_idt,
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 7e838bf0592d..1bff3e925fda 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -238,7 +238,7 @@ struct operand {
238 * any modified flags. 238 * any modified flags.
239 */ 239 */
240 240
241#if defined(__x86_64__) 241#if defined(CONFIG_X86_64)
242#define _LO32 "k" /* force 32-bit operand */ 242#define _LO32 "k" /* force 32-bit operand */
243#define _STK "%%rsp" /* stack pointer */ 243#define _STK "%%rsp" /* stack pointer */
244#elif defined(__i386__) 244#elif defined(__i386__)
@@ -385,7 +385,7 @@ struct operand {
385 } while (0) 385 } while (0)
386 386
387/* Emulate an instruction with quadword operands (x86/64 only). */ 387/* Emulate an instruction with quadword operands (x86/64 only). */
388#if defined(__x86_64__) 388#if defined(CONFIG_X86_64)
389#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \ 389#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \
390 do { \ 390 do { \
391 __asm__ __volatile__ ( \ 391 __asm__ __volatile__ ( \
@@ -495,7 +495,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
495 case X86EMUL_MODE_PROT32: 495 case X86EMUL_MODE_PROT32:
496 op_bytes = ad_bytes = 4; 496 op_bytes = ad_bytes = 4;
497 break; 497 break;
498#ifdef __x86_64__ 498#ifdef CONFIG_X86_64
499 case X86EMUL_MODE_PROT64: 499 case X86EMUL_MODE_PROT64:
500 op_bytes = 4; 500 op_bytes = 4;
501 ad_bytes = 8; 501 ad_bytes = 8;
@@ -1341,7 +1341,7 @@ twobyte_special_insn:
1341 } 1341 }
1342 break; 1342 break;
1343 } 1343 }
1344#elif defined(__x86_64__) 1344#elif defined(CONFIG_X86_64)
1345 { 1345 {
1346 unsigned long old, new; 1346 unsigned long old, new;
1347 if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0) 1347 if ((rc = ops->read_emulated(cr2, &old, 8, ctxt)) != 0)
diff --git a/drivers/kvm/x86_emulate.h b/drivers/kvm/x86_emulate.h
index 658b58de30fc..5d41bd55125e 100644
--- a/drivers/kvm/x86_emulate.h
+++ b/drivers/kvm/x86_emulate.h
@@ -162,7 +162,7 @@ struct x86_emulate_ctxt {
162/* Host execution mode. */ 162/* Host execution mode. */
163#if defined(__i386__) 163#if defined(__i386__)
164#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 164#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
165#elif defined(__x86_64__) 165#elif defined(CONFIG_X86_64)
166#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 166#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
167#endif 167#endif
168 168