diff options
author | Mike Day <ncmike@ncultra.org> | 2007-10-08 09:02:08 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:52:50 -0500 |
commit | d77c26fce93d07802db97498959587eb9347b31d (patch) | |
tree | ed49397152d9a8c2ce3dda751a235283f07ef220 /drivers/kvm/vmx.c | |
parent | 7e620d16b8838bc0ad5b27d2dd55796270cd588c (diff) |
KVM: CodingStyle cleanup
Signed-off-by: Mike D. Day <ncmike@ncultra.org>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/vmx.c')
-rw-r--r-- | drivers/kvm/vmx.c | 60 |
1 files changed, 32 insertions, 28 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 7b742901e783..6955580bb69e 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -62,7 +62,7 @@ struct vcpu_vmx { | |||
62 | int gs_ldt_reload_needed; | 62 | int gs_ldt_reload_needed; |
63 | int fs_reload_needed; | 63 | int fs_reload_needed; |
64 | int guest_efer_loaded; | 64 | int guest_efer_loaded; |
65 | }host_state; | 65 | } host_state; |
66 | 66 | ||
67 | }; | 67 | }; |
68 | 68 | ||
@@ -271,7 +271,7 @@ static void vmcs_writel(unsigned long field, unsigned long value) | |||
271 | u8 error; | 271 | u8 error; |
272 | 272 | ||
273 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" | 273 | asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" |
274 | : "=q"(error) : "a"(value), "d"(field) : "cc" ); | 274 | : "=q"(error) : "a"(value), "d"(field) : "cc"); |
275 | if (unlikely(error)) | 275 | if (unlikely(error)) |
276 | vmwrite_error(field, value); | 276 | vmwrite_error(field, value); |
277 | } | 277 | } |
@@ -415,10 +415,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
415 | #endif | 415 | #endif |
416 | 416 | ||
417 | #ifdef CONFIG_X86_64 | 417 | #ifdef CONFIG_X86_64 |
418 | if (is_long_mode(&vmx->vcpu)) { | 418 | if (is_long_mode(&vmx->vcpu)) |
419 | save_msrs(vmx->host_msrs + | 419 | save_msrs(vmx->host_msrs + |
420 | vmx->msr_offset_kernel_gs_base, 1); | 420 | vmx->msr_offset_kernel_gs_base, 1); |
421 | } | 421 | |
422 | #endif | 422 | #endif |
423 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); | 423 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); |
424 | load_transition_efer(vmx); | 424 | load_transition_efer(vmx); |
@@ -845,7 +845,7 @@ static int vmx_get_irq(struct kvm_vcpu *vcpu) | |||
845 | if (is_external_interrupt(idtv_info_field)) | 845 | if (is_external_interrupt(idtv_info_field)) |
846 | return idtv_info_field & VECTORING_INFO_VECTOR_MASK; | 846 | return idtv_info_field & VECTORING_INFO_VECTOR_MASK; |
847 | else | 847 | else |
848 | printk("pending exception: not handled yet\n"); | 848 | printk(KERN_DEBUG "pending exception: not handled yet\n"); |
849 | } | 849 | } |
850 | return -1; | 850 | return -1; |
851 | } | 851 | } |
@@ -893,7 +893,7 @@ static void hardware_disable(void *garbage) | |||
893 | } | 893 | } |
894 | 894 | ||
895 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | 895 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, |
896 | u32 msr, u32* result) | 896 | u32 msr, u32 *result) |
897 | { | 897 | { |
898 | u32 vmx_msr_low, vmx_msr_high; | 898 | u32 vmx_msr_low, vmx_msr_high; |
899 | u32 ctl = ctl_min | ctl_opt; | 899 | u32 ctl = ctl_min | ctl_opt; |
@@ -1102,7 +1102,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1102 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); | 1102 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | static gva_t rmode_tss_base(struct kvm* kvm) | 1105 | static gva_t rmode_tss_base(struct kvm *kvm) |
1106 | { | 1106 | { |
1107 | gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3; | 1107 | gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3; |
1108 | return base_gfn << PAGE_SHIFT; | 1108 | return base_gfn << PAGE_SHIFT; |
@@ -1385,7 +1385,7 @@ static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) | |||
1385 | vmcs_writel(GUEST_GDTR_BASE, dt->base); | 1385 | vmcs_writel(GUEST_GDTR_BASE, dt->base); |
1386 | } | 1386 | } |
1387 | 1387 | ||
1388 | static int init_rmode_tss(struct kvm* kvm) | 1388 | static int init_rmode_tss(struct kvm *kvm) |
1389 | { | 1389 | { |
1390 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; | 1390 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; |
1391 | u16 data = 0; | 1391 | u16 data = 0; |
@@ -1494,7 +1494,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1494 | vmcs_writel(GUEST_RIP, 0); | 1494 | vmcs_writel(GUEST_RIP, 0); |
1495 | vmcs_writel(GUEST_RSP, 0); | 1495 | vmcs_writel(GUEST_RSP, 0); |
1496 | 1496 | ||
1497 | //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 | 1497 | /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ |
1498 | vmcs_writel(GUEST_DR7, 0x400); | 1498 | vmcs_writel(GUEST_DR7, 0x400); |
1499 | 1499 | ||
1500 | vmcs_writel(GUEST_GDTR_BASE, 0); | 1500 | vmcs_writel(GUEST_GDTR_BASE, 0); |
@@ -1561,7 +1561,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1561 | get_idt(&dt); | 1561 | get_idt(&dt); |
1562 | vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ | 1562 | vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ |
1563 | 1563 | ||
1564 | asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); | 1564 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); |
1565 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ | 1565 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ |
1566 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); | 1566 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); |
1567 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); | 1567 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); |
@@ -1613,7 +1613,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1613 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); | 1613 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); |
1614 | 1614 | ||
1615 | vmx->vcpu.cr0 = 0x60000010; | 1615 | vmx->vcpu.cr0 = 0x60000010; |
1616 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode | 1616 | vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); /* enter rmode */ |
1617 | vmx_set_cr4(&vmx->vcpu, 0); | 1617 | vmx_set_cr4(&vmx->vcpu, 0); |
1618 | #ifdef CONFIG_X86_64 | 1618 | #ifdef CONFIG_X86_64 |
1619 | vmx_set_efer(&vmx->vcpu, 0); | 1619 | vmx_set_efer(&vmx->vcpu, 0); |
@@ -1644,7 +1644,7 @@ static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq) | |||
1644 | u16 sp = vmcs_readl(GUEST_RSP); | 1644 | u16 sp = vmcs_readl(GUEST_RSP); |
1645 | u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT); | 1645 | u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT); |
1646 | 1646 | ||
1647 | if (sp > ss_limit || sp < 6 ) { | 1647 | if (sp > ss_limit || sp < 6) { |
1648 | vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n", | 1648 | vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n", |
1649 | __FUNCTION__, | 1649 | __FUNCTION__, |
1650 | vmcs_readl(GUEST_RSP), | 1650 | vmcs_readl(GUEST_RSP), |
@@ -1664,15 +1664,18 @@ static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq) | |||
1664 | ip = vmcs_readl(GUEST_RIP); | 1664 | ip = vmcs_readl(GUEST_RIP); |
1665 | 1665 | ||
1666 | 1666 | ||
1667 | if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE || | 1667 | if (emulator_write_emulated( |
1668 | emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE || | 1668 | ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE || |
1669 | emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) { | 1669 | emulator_write_emulated( |
1670 | ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE || | ||
1671 | emulator_write_emulated( | ||
1672 | ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) { | ||
1670 | vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__); | 1673 | vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__); |
1671 | return; | 1674 | return; |
1672 | } | 1675 | } |
1673 | 1676 | ||
1674 | vmcs_writel(GUEST_RFLAGS, flags & | 1677 | vmcs_writel(GUEST_RFLAGS, flags & |
1675 | ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF)); | 1678 | ~(X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF)); |
1676 | vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ; | 1679 | vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ; |
1677 | vmcs_writel(GUEST_CS_BASE, ent[1] << 4); | 1680 | vmcs_writel(GUEST_CS_BASE, ent[1] << 4); |
1678 | vmcs_writel(GUEST_RIP, ent[0]); | 1681 | vmcs_writel(GUEST_RIP, ent[0]); |
@@ -1777,10 +1780,9 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1777 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 1780 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
1778 | 1781 | ||
1779 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | 1782 | if ((vect_info & VECTORING_INFO_VALID_MASK) && |
1780 | !is_page_fault(intr_info)) { | 1783 | !is_page_fault(intr_info)) |
1781 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " | 1784 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " |
1782 | "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info); | 1785 | "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info); |
1783 | } | ||
1784 | 1786 | ||
1785 | if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { | 1787 | if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { |
1786 | int irq = vect_info & VECTORING_INFO_VECTOR_MASK; | 1788 | int irq = vect_info & VECTORING_INFO_VECTOR_MASK; |
@@ -1831,7 +1833,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1831 | case EMULATE_DO_MMIO: | 1833 | case EMULATE_DO_MMIO: |
1832 | ++vcpu->stat.mmio_exits; | 1834 | ++vcpu->stat.mmio_exits; |
1833 | return 0; | 1835 | return 0; |
1834 | case EMULATE_FAIL: | 1836 | case EMULATE_FAIL: |
1835 | kvm_report_emulation_failure(vcpu, "pagetable"); | 1837 | kvm_report_emulation_failure(vcpu, "pagetable"); |
1836 | break; | 1838 | break; |
1837 | default: | 1839 | default: |
@@ -1849,7 +1851,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1849 | return 1; | 1851 | return 1; |
1850 | } | 1852 | } |
1851 | 1853 | ||
1852 | if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) { | 1854 | if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == |
1855 | (INTR_TYPE_EXCEPTION | 1)) { | ||
1853 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 1856 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
1854 | return 0; | 1857 | return 0; |
1855 | } | 1858 | } |
@@ -2138,8 +2141,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2138 | return 0; | 2141 | return 0; |
2139 | } | 2142 | } |
2140 | 2143 | ||
2141 | if ( (vectoring_info & VECTORING_INFO_VALID_MASK) && | 2144 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && |
2142 | exit_reason != EXIT_REASON_EXCEPTION_NMI ) | 2145 | exit_reason != EXIT_REASON_EXCEPTION_NMI) |
2143 | printk(KERN_WARNING "%s: unexpected, valid vectoring info and " | 2146 | printk(KERN_WARNING "%s: unexpected, valid vectoring info and " |
2144 | "exit reason is 0x%x\n", __FUNCTION__, exit_reason); | 2147 | "exit reason is 0x%x\n", __FUNCTION__, exit_reason); |
2145 | if (exit_reason < kvm_vmx_max_exit_handlers | 2148 | if (exit_reason < kvm_vmx_max_exit_handlers |
@@ -2238,7 +2241,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2238 | */ | 2241 | */ |
2239 | vmcs_writel(HOST_CR0, read_cr0()); | 2242 | vmcs_writel(HOST_CR0, read_cr0()); |
2240 | 2243 | ||
2241 | asm ( | 2244 | asm( |
2242 | /* Store host registers */ | 2245 | /* Store host registers */ |
2243 | #ifdef CONFIG_X86_64 | 2246 | #ifdef CONFIG_X86_64 |
2244 | "push %%rax; push %%rbx; push %%rdx;" | 2247 | "push %%rax; push %%rbx; push %%rdx;" |
@@ -2342,8 +2345,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2342 | [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), | 2345 | [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])), |
2343 | [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])), | 2346 | [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])), |
2344 | #ifdef CONFIG_X86_64 | 2347 | #ifdef CONFIG_X86_64 |
2345 | [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])), | 2348 | [r8]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8])), |
2346 | [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])), | 2349 | [r9]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9])), |
2347 | [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), | 2350 | [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])), |
2348 | [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])), | 2351 | [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])), |
2349 | [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])), | 2352 | [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])), |
@@ -2352,11 +2355,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2352 | [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])), | 2355 | [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])), |
2353 | #endif | 2356 | #endif |
2354 | [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) | 2357 | [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) |
2355 | : "cc", "memory" ); | 2358 | : "cc", "memory"); |
2356 | 2359 | ||
2357 | vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | 2360 | vcpu->interrupt_window_open = |
2361 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; | ||
2358 | 2362 | ||
2359 | asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 2363 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
2360 | vmx->launched = 1; | 2364 | vmx->launched = 1; |
2361 | 2365 | ||
2362 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 2366 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |