diff options
-rw-r--r-- | drivers/kvm/svm.c | 17 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 18 | ||||
-rw-r--r-- | drivers/kvm/x86.c | 43 | ||||
-rw-r--r-- | drivers/kvm/x86.h | 7 | ||||
-rw-r--r-- | drivers/kvm/x86_emulate.c | 4 |
5 files changed, 30 insertions, 59 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index aa8e90b404a0..f9769338c621 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -207,17 +207,6 @@ static bool svm_exception_injected(struct kvm_vcpu *vcpu) | |||
207 | return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID); | 207 | return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID); |
208 | } | 208 | } |
209 | 209 | ||
210 | static void svm_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | ||
211 | { | ||
212 | struct vcpu_svm *svm = to_svm(vcpu); | ||
213 | |||
214 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | | ||
215 | SVM_EVTINJ_VALID_ERR | | ||
216 | SVM_EVTINJ_TYPE_EXEPT | | ||
217 | GP_VECTOR; | ||
218 | svm->vmcb->control.event_inj_err = error_code; | ||
219 | } | ||
220 | |||
221 | static void inject_ud(struct kvm_vcpu *vcpu) | 210 | static void inject_ud(struct kvm_vcpu *vcpu) |
222 | { | 211 | { |
223 | to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID | | 212 | to_svm(vcpu)->vmcb->control.event_inj = SVM_EVTINJ_VALID | |
@@ -1115,7 +1104,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1115 | u64 data; | 1104 | u64 data; |
1116 | 1105 | ||
1117 | if (svm_get_msr(&svm->vcpu, ecx, &data)) | 1106 | if (svm_get_msr(&svm->vcpu, ecx, &data)) |
1118 | svm_inject_gp(&svm->vcpu, 0); | 1107 | kvm_inject_gp(&svm->vcpu, 0); |
1119 | else { | 1108 | else { |
1120 | svm->vmcb->save.rax = data & 0xffffffff; | 1109 | svm->vmcb->save.rax = data & 0xffffffff; |
1121 | svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32; | 1110 | svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32; |
@@ -1176,7 +1165,7 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1176 | | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32); | 1165 | | ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32); |
1177 | svm->next_rip = svm->vmcb->save.rip + 2; | 1166 | svm->next_rip = svm->vmcb->save.rip + 2; |
1178 | if (svm_set_msr(&svm->vcpu, ecx, data)) | 1167 | if (svm_set_msr(&svm->vcpu, ecx, data)) |
1179 | svm_inject_gp(&svm->vcpu, 0); | 1168 | kvm_inject_gp(&svm->vcpu, 0); |
1180 | else | 1169 | else |
1181 | skip_emulated_instruction(&svm->vcpu); | 1170 | skip_emulated_instruction(&svm->vcpu); |
1182 | return 1; | 1171 | return 1; |
@@ -1688,8 +1677,6 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
1688 | 1677 | ||
1689 | .tlb_flush = svm_flush_tlb, | 1678 | .tlb_flush = svm_flush_tlb, |
1690 | 1679 | ||
1691 | .inject_gp = svm_inject_gp, | ||
1692 | |||
1693 | .run = svm_vcpu_run, | 1680 | .run = svm_vcpu_run, |
1694 | .handle_exit = handle_exit, | 1681 | .handle_exit = handle_exit, |
1695 | .skip_emulated_instruction = skip_emulated_instruction, | 1682 | .skip_emulated_instruction = skip_emulated_instruction, |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index be0b12e709e5..3b3c5f7d2e7c 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -613,18 +613,6 @@ static bool vmx_exception_injected(struct kvm_vcpu *vcpu) | |||
613 | return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); | 613 | return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); |
614 | } | 614 | } |
615 | 615 | ||
616 | static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) | ||
617 | { | ||
618 | printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n", | ||
619 | vmcs_readl(GUEST_RIP)); | ||
620 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); | ||
621 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
622 | GP_VECTOR | | ||
623 | INTR_TYPE_EXCEPTION | | ||
624 | INTR_INFO_DELIEVER_CODE_MASK | | ||
625 | INTR_INFO_VALID_MASK); | ||
626 | } | ||
627 | |||
628 | static void vmx_inject_ud(struct kvm_vcpu *vcpu) | 616 | static void vmx_inject_ud(struct kvm_vcpu *vcpu) |
629 | { | 617 | { |
630 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 618 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
@@ -2083,7 +2071,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2083 | u64 data; | 2071 | u64 data; |
2084 | 2072 | ||
2085 | if (vmx_get_msr(vcpu, ecx, &data)) { | 2073 | if (vmx_get_msr(vcpu, ecx, &data)) { |
2086 | vmx_inject_gp(vcpu, 0); | 2074 | kvm_inject_gp(vcpu, 0); |
2087 | return 1; | 2075 | return 1; |
2088 | } | 2076 | } |
2089 | 2077 | ||
@@ -2101,7 +2089,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2101 | | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); | 2089 | | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32); |
2102 | 2090 | ||
2103 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | 2091 | if (vmx_set_msr(vcpu, ecx, data) != 0) { |
2104 | vmx_inject_gp(vcpu, 0); | 2092 | kvm_inject_gp(vcpu, 0); |
2105 | return 1; | 2093 | return 1; |
2106 | } | 2094 | } |
2107 | 2095 | ||
@@ -2619,8 +2607,6 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
2619 | 2607 | ||
2620 | .tlb_flush = vmx_flush_tlb, | 2608 | .tlb_flush = vmx_flush_tlb, |
2621 | 2609 | ||
2622 | .inject_gp = vmx_inject_gp, | ||
2623 | |||
2624 | .run = vmx_vcpu_run, | 2610 | .run = vmx_vcpu_run, |
2625 | .handle_exit = kvm_handle_exit, | 2611 | .handle_exit = kvm_handle_exit, |
2626 | .skip_emulated_instruction = skip_emulated_instruction, | 2612 | .skip_emulated_instruction = skip_emulated_instruction, |
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c index dc007a32a883..6deb052b5f93 100644 --- a/drivers/kvm/x86.c +++ b/drivers/kvm/x86.c | |||
@@ -128,11 +128,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) | |||
128 | } | 128 | } |
129 | EXPORT_SYMBOL_GPL(kvm_set_apic_base); | 129 | EXPORT_SYMBOL_GPL(kvm_set_apic_base); |
130 | 130 | ||
131 | static void inject_gp(struct kvm_vcpu *vcpu) | ||
132 | { | ||
133 | kvm_x86_ops->inject_gp(vcpu, 0); | ||
134 | } | ||
135 | |||
136 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) | 131 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) |
137 | { | 132 | { |
138 | WARN_ON(vcpu->exception.pending); | 133 | WARN_ON(vcpu->exception.pending); |
@@ -232,20 +227,20 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
232 | if (cr0 & CR0_RESERVED_BITS) { | 227 | if (cr0 & CR0_RESERVED_BITS) { |
233 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | 228 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", |
234 | cr0, vcpu->cr0); | 229 | cr0, vcpu->cr0); |
235 | inject_gp(vcpu); | 230 | kvm_inject_gp(vcpu, 0); |
236 | return; | 231 | return; |
237 | } | 232 | } |
238 | 233 | ||
239 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | 234 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { |
240 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | 235 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); |
241 | inject_gp(vcpu); | 236 | kvm_inject_gp(vcpu, 0); |
242 | return; | 237 | return; |
243 | } | 238 | } |
244 | 239 | ||
245 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | 240 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { |
246 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | 241 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " |
247 | "and a clear PE flag\n"); | 242 | "and a clear PE flag\n"); |
248 | inject_gp(vcpu); | 243 | kvm_inject_gp(vcpu, 0); |
249 | return; | 244 | return; |
250 | } | 245 | } |
251 | 246 | ||
@@ -257,14 +252,14 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
257 | if (!is_pae(vcpu)) { | 252 | if (!is_pae(vcpu)) { |
258 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | 253 | printk(KERN_DEBUG "set_cr0: #GP, start paging " |
259 | "in long mode while PAE is disabled\n"); | 254 | "in long mode while PAE is disabled\n"); |
260 | inject_gp(vcpu); | 255 | kvm_inject_gp(vcpu, 0); |
261 | return; | 256 | return; |
262 | } | 257 | } |
263 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 258 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
264 | if (cs_l) { | 259 | if (cs_l) { |
265 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | 260 | printk(KERN_DEBUG "set_cr0: #GP, start paging " |
266 | "in long mode while CS.L == 1\n"); | 261 | "in long mode while CS.L == 1\n"); |
267 | inject_gp(vcpu); | 262 | kvm_inject_gp(vcpu, 0); |
268 | return; | 263 | return; |
269 | 264 | ||
270 | } | 265 | } |
@@ -273,7 +268,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
273 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { | 268 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { |
274 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | 269 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " |
275 | "reserved bits\n"); | 270 | "reserved bits\n"); |
276 | inject_gp(vcpu); | 271 | kvm_inject_gp(vcpu, 0); |
277 | return; | 272 | return; |
278 | } | 273 | } |
279 | 274 | ||
@@ -299,7 +294,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
299 | { | 294 | { |
300 | if (cr4 & CR4_RESERVED_BITS) { | 295 | if (cr4 & CR4_RESERVED_BITS) { |
301 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | 296 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); |
302 | inject_gp(vcpu); | 297 | kvm_inject_gp(vcpu, 0); |
303 | return; | 298 | return; |
304 | } | 299 | } |
305 | 300 | ||
@@ -307,19 +302,19 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
307 | if (!(cr4 & X86_CR4_PAE)) { | 302 | if (!(cr4 & X86_CR4_PAE)) { |
308 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | 303 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " |
309 | "in long mode\n"); | 304 | "in long mode\n"); |
310 | inject_gp(vcpu); | 305 | kvm_inject_gp(vcpu, 0); |
311 | return; | 306 | return; |
312 | } | 307 | } |
313 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) | 308 | } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) |
314 | && !load_pdptrs(vcpu, vcpu->cr3)) { | 309 | && !load_pdptrs(vcpu, vcpu->cr3)) { |
315 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | 310 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); |
316 | inject_gp(vcpu); | 311 | kvm_inject_gp(vcpu, 0); |
317 | return; | 312 | return; |
318 | } | 313 | } |
319 | 314 | ||
320 | if (cr4 & X86_CR4_VMXE) { | 315 | if (cr4 & X86_CR4_VMXE) { |
321 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | 316 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); |
322 | inject_gp(vcpu); | 317 | kvm_inject_gp(vcpu, 0); |
323 | return; | 318 | return; |
324 | } | 319 | } |
325 | kvm_x86_ops->set_cr4(vcpu, cr4); | 320 | kvm_x86_ops->set_cr4(vcpu, cr4); |
@@ -340,7 +335,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
340 | if (is_long_mode(vcpu)) { | 335 | if (is_long_mode(vcpu)) { |
341 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | 336 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { |
342 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | 337 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); |
343 | inject_gp(vcpu); | 338 | kvm_inject_gp(vcpu, 0); |
344 | return; | 339 | return; |
345 | } | 340 | } |
346 | } else { | 341 | } else { |
@@ -348,13 +343,13 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
348 | if (cr3 & CR3_PAE_RESERVED_BITS) { | 343 | if (cr3 & CR3_PAE_RESERVED_BITS) { |
349 | printk(KERN_DEBUG | 344 | printk(KERN_DEBUG |
350 | "set_cr3: #GP, reserved bits\n"); | 345 | "set_cr3: #GP, reserved bits\n"); |
351 | inject_gp(vcpu); | 346 | kvm_inject_gp(vcpu, 0); |
352 | return; | 347 | return; |
353 | } | 348 | } |
354 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | 349 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { |
355 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | 350 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " |
356 | "reserved bits\n"); | 351 | "reserved bits\n"); |
357 | inject_gp(vcpu); | 352 | kvm_inject_gp(vcpu, 0); |
358 | return; | 353 | return; |
359 | } | 354 | } |
360 | } | 355 | } |
@@ -375,7 +370,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
375 | * to debug) behavior on the guest side. | 370 | * to debug) behavior on the guest side. |
376 | */ | 371 | */ |
377 | if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) | 372 | if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) |
378 | inject_gp(vcpu); | 373 | kvm_inject_gp(vcpu, 0); |
379 | else { | 374 | else { |
380 | vcpu->cr3 = cr3; | 375 | vcpu->cr3 = cr3; |
381 | vcpu->mmu.new_cr3(vcpu); | 376 | vcpu->mmu.new_cr3(vcpu); |
@@ -388,7 +383,7 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | |||
388 | { | 383 | { |
389 | if (cr8 & CR8_RESERVED_BITS) { | 384 | if (cr8 & CR8_RESERVED_BITS) { |
390 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | 385 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); |
391 | inject_gp(vcpu); | 386 | kvm_inject_gp(vcpu, 0); |
392 | return; | 387 | return; |
393 | } | 388 | } |
394 | if (irqchip_in_kernel(vcpu->kvm)) | 389 | if (irqchip_in_kernel(vcpu->kvm)) |
@@ -436,14 +431,14 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
436 | if (efer & EFER_RESERVED_BITS) { | 431 | if (efer & EFER_RESERVED_BITS) { |
437 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", | 432 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", |
438 | efer); | 433 | efer); |
439 | inject_gp(vcpu); | 434 | kvm_inject_gp(vcpu, 0); |
440 | return; | 435 | return; |
441 | } | 436 | } |
442 | 437 | ||
443 | if (is_paging(vcpu) | 438 | if (is_paging(vcpu) |
444 | && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) { | 439 | && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) { |
445 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); | 440 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); |
446 | inject_gp(vcpu); | 441 | kvm_inject_gp(vcpu, 0); |
447 | return; | 442 | return; |
448 | } | 443 | } |
449 | 444 | ||
@@ -2047,7 +2042,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2047 | * String I/O in reverse. Yuck. Kill the guest, fix later. | 2042 | * String I/O in reverse. Yuck. Kill the guest, fix later. |
2048 | */ | 2043 | */ |
2049 | pr_unimpl(vcpu, "guest string pio down\n"); | 2044 | pr_unimpl(vcpu, "guest string pio down\n"); |
2050 | inject_gp(vcpu); | 2045 | kvm_inject_gp(vcpu, 0); |
2051 | return 1; | 2046 | return 1; |
2052 | } | 2047 | } |
2053 | vcpu->run->io.count = now; | 2048 | vcpu->run->io.count = now; |
@@ -2062,7 +2057,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2062 | vcpu->pio.guest_pages[i] = page; | 2057 | vcpu->pio.guest_pages[i] = page; |
2063 | mutex_unlock(&vcpu->kvm->lock); | 2058 | mutex_unlock(&vcpu->kvm->lock); |
2064 | if (!page) { | 2059 | if (!page) { |
2065 | inject_gp(vcpu); | 2060 | kvm_inject_gp(vcpu, 0); |
2066 | free_pio_guest_pages(vcpu); | 2061 | free_pio_guest_pages(vcpu); |
2067 | return 1; | 2062 | return 1; |
2068 | } | 2063 | } |
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h index d3ac4e2b3a41..78396d627be0 100644 --- a/drivers/kvm/x86.h +++ b/drivers/kvm/x86.h | |||
@@ -220,8 +220,6 @@ struct kvm_x86_ops { | |||
220 | 220 | ||
221 | void (*tlb_flush)(struct kvm_vcpu *vcpu); | 221 | void (*tlb_flush)(struct kvm_vcpu *vcpu); |
222 | 222 | ||
223 | void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code); | ||
224 | |||
225 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); | 223 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); |
226 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); | 224 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); |
227 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | 225 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
@@ -467,6 +465,11 @@ static inline u32 get_rdx_init_val(void) | |||
467 | return 0x600; /* P6 family */ | 465 | return 0x600; /* P6 family */ |
468 | } | 466 | } |
469 | 467 | ||
468 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | ||
469 | { | ||
470 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | ||
471 | } | ||
472 | |||
470 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | 473 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" |
471 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | 474 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" |
472 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | 475 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" |
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c index 3e3eba70d5ac..2e259a847697 100644 --- a/drivers/kvm/x86_emulate.c +++ b/drivers/kvm/x86_emulate.c | |||
@@ -1779,7 +1779,7 @@ twobyte_insn: | |||
1779 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); | 1779 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); |
1780 | rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); | 1780 | rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); |
1781 | if (rc) { | 1781 | if (rc) { |
1782 | kvm_x86_ops->inject_gp(ctxt->vcpu, 0); | 1782 | kvm_inject_gp(ctxt->vcpu, 0); |
1783 | c->eip = ctxt->vcpu->rip; | 1783 | c->eip = ctxt->vcpu->rip; |
1784 | } | 1784 | } |
1785 | rc = X86EMUL_CONTINUE; | 1785 | rc = X86EMUL_CONTINUE; |
@@ -1789,7 +1789,7 @@ twobyte_insn: | |||
1789 | /* rdmsr */ | 1789 | /* rdmsr */ |
1790 | rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); | 1790 | rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); |
1791 | if (rc) { | 1791 | if (rc) { |
1792 | kvm_x86_ops->inject_gp(ctxt->vcpu, 0); | 1792 | kvm_inject_gp(ctxt->vcpu, 0); |
1793 | c->eip = ctxt->vcpu->rip; | 1793 | c->eip = ctxt->vcpu->rip; |
1794 | } else { | 1794 | } else { |
1795 | c->regs[VCPU_REGS_RAX] = (u32)msr_data; | 1795 | c->regs[VCPU_REGS_RAX] = (u32)msr_data; |