aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/kvm/kvm.h9
-rw-r--r--drivers/kvm/kvm_main.c124
-rw-r--r--drivers/kvm/svm.c142
-rw-r--r--drivers/kvm/vmx.c129
4 files changed, 187 insertions, 217 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 42bb225ad6c1..d93ab48424c6 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -453,13 +453,16 @@ struct kvm_x86_ops {
453 /* Create, but do not attach this VCPU */ 453 /* Create, but do not attach this VCPU */
454 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); 454 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
455 void (*vcpu_free)(struct kvm_vcpu *vcpu); 455 void (*vcpu_free)(struct kvm_vcpu *vcpu);
456 void (*vcpu_reset)(struct kvm_vcpu *vcpu);
456 457
458 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
457 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); 459 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
458 void (*vcpu_put)(struct kvm_vcpu *vcpu); 460 void (*vcpu_put)(struct kvm_vcpu *vcpu);
459 void (*vcpu_decache)(struct kvm_vcpu *vcpu); 461 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
460 462
461 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 463 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
462 struct kvm_debug_guest *dbg); 464 struct kvm_debug_guest *dbg);
465 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
463 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 466 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
464 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 467 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
465 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 468 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
@@ -491,12 +494,16 @@ struct kvm_x86_ops {
491 494
492 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code); 495 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
493 496
494 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); 497 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
498 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
495 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 499 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
496 void (*patch_hypercall)(struct kvm_vcpu *vcpu, 500 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
497 unsigned char *hypercall_addr); 501 unsigned char *hypercall_addr);
498 int (*get_irq)(struct kvm_vcpu *vcpu); 502 int (*get_irq)(struct kvm_vcpu *vcpu);
499 void (*set_irq)(struct kvm_vcpu *vcpu, int vec); 503 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
504 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
505 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
506 struct kvm_run *run);
500}; 507};
501 508
502extern struct kvm_x86_ops *kvm_x86_ops; 509extern struct kvm_x86_ops *kvm_x86_ops;
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 9bfa1bcd26e9..e17b433152cb 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -38,6 +38,7 @@
38#include <linux/cpumask.h> 38#include <linux/cpumask.h>
39#include <linux/smp.h> 39#include <linux/smp.h>
40#include <linux/anon_inodes.h> 40#include <linux/anon_inodes.h>
41#include <linux/profile.h>
41 42
42#include <asm/processor.h> 43#include <asm/processor.h>
43#include <asm/msr.h> 44#include <asm/msr.h>
@@ -1970,6 +1971,127 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1970} 1971}
1971EXPORT_SYMBOL_GPL(kvm_emulate_pio_string); 1972EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
1972 1973
1974/*
1975 * Check if userspace requested an interrupt window, and that the
1976 * interrupt window is open.
1977 *
1978 * No need to exit to userspace if we already have an interrupt queued.
1979 */
1980static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1981 struct kvm_run *kvm_run)
1982{
1983 return (!vcpu->irq_summary &&
1984 kvm_run->request_interrupt_window &&
1985 vcpu->interrupt_window_open &&
1986 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
1987}
1988
1989static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1990 struct kvm_run *kvm_run)
1991{
1992 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
1993 kvm_run->cr8 = get_cr8(vcpu);
1994 kvm_run->apic_base = kvm_get_apic_base(vcpu);
1995 if (irqchip_in_kernel(vcpu->kvm))
1996 kvm_run->ready_for_interrupt_injection = 1;
1997 else
1998 kvm_run->ready_for_interrupt_injection =
1999 (vcpu->interrupt_window_open &&
2000 vcpu->irq_summary == 0);
2001}
2002
2003static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2004{
2005 int r;
2006
2007 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2008 printk("vcpu %d received sipi with vector # %x\n",
2009 vcpu->vcpu_id, vcpu->sipi_vector);
2010 kvm_lapic_reset(vcpu);
2011 kvm_x86_ops->vcpu_reset(vcpu);
2012 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2013 }
2014
2015preempted:
2016 if (vcpu->guest_debug.enabled)
2017 kvm_x86_ops->guest_debug_pre(vcpu);
2018
2019again:
2020 r = kvm_mmu_reload(vcpu);
2021 if (unlikely(r))
2022 goto out;
2023
2024 preempt_disable();
2025
2026 kvm_x86_ops->prepare_guest_switch(vcpu);
2027 kvm_load_guest_fpu(vcpu);
2028
2029 local_irq_disable();
2030
2031 if (signal_pending(current)) {
2032 local_irq_enable();
2033 preempt_enable();
2034 r = -EINTR;
2035 kvm_run->exit_reason = KVM_EXIT_INTR;
2036 ++vcpu->stat.signal_exits;
2037 goto out;
2038 }
2039
2040 if (irqchip_in_kernel(vcpu->kvm))
2041 kvm_x86_ops->inject_pending_irq(vcpu);
2042 else if (!vcpu->mmio_read_completed)
2043 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2044
2045 vcpu->guest_mode = 1;
2046
2047 if (vcpu->requests)
2048 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2049 kvm_x86_ops->tlb_flush(vcpu);
2050
2051 kvm_x86_ops->run(vcpu, kvm_run);
2052
2053 vcpu->guest_mode = 0;
2054 local_irq_enable();
2055
2056 ++vcpu->stat.exits;
2057
2058 preempt_enable();
2059
2060 /*
2061 * Profile KVM exit RIPs:
2062 */
2063 if (unlikely(prof_on == KVM_PROFILING)) {
2064 kvm_x86_ops->cache_regs(vcpu);
2065 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
2066 }
2067
2068 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2069
2070 if (r > 0) {
2071 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2072 r = -EINTR;
2073 kvm_run->exit_reason = KVM_EXIT_INTR;
2074 ++vcpu->stat.request_irq_exits;
2075 goto out;
2076 }
2077 if (!need_resched()) {
2078 ++vcpu->stat.light_exits;
2079 goto again;
2080 }
2081 }
2082
2083out:
2084 if (r > 0) {
2085 kvm_resched(vcpu);
2086 goto preempted;
2087 }
2088
2089 post_kvm_run_save(vcpu, kvm_run);
2090
2091 return r;
2092}
2093
2094
1973static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2095static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1974{ 2096{
1975 int r; 2097 int r;
@@ -2017,7 +2139,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2017 kvm_x86_ops->decache_regs(vcpu); 2139 kvm_x86_ops->decache_regs(vcpu);
2018 } 2140 }
2019 2141
2020 r = kvm_x86_ops->run(vcpu, kvm_run); 2142 r = __vcpu_run(vcpu, kvm_run);
2021 2143
2022out: 2144out:
2023 if (vcpu->sigset_active) 2145 if (vcpu->sigset_active)
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 7b22d396c149..95681ea16382 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -22,7 +22,6 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25#include <linux/profile.h>
26#include <linux/sched.h> 25#include <linux/sched.h>
27 26
28#include <asm/desc.h> 27#include <asm/desc.h>
@@ -50,6 +49,8 @@ MODULE_LICENSE("GPL");
50#define SVM_FEATURE_LBRV (1 << 1) 49#define SVM_FEATURE_LBRV (1 << 1)
51#define SVM_DEATURE_SVML (1 << 2) 50#define SVM_DEATURE_SVML (1 << 2)
52 51
52static void kvm_reput_irq(struct vcpu_svm *svm);
53
53static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 54static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
54{ 55{
55 return container_of(vcpu, struct vcpu_svm, vcpu); 56 return container_of(vcpu, struct vcpu_svm, vcpu);
@@ -555,6 +556,13 @@ static void init_vmcb(struct vmcb *vmcb)
555 /* rdx = ?? */ 556 /* rdx = ?? */
556} 557}
557 558
559static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
560{
561 struct vcpu_svm *svm = to_svm(vcpu);
562
563 init_vmcb(svm->vmcb);
564}
565
558static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) 566static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
559{ 567{
560 struct vcpu_svm *svm; 568 struct vcpu_svm *svm;
@@ -1252,10 +1260,20 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1252}; 1260};
1253 1261
1254 1262
1255static int handle_exit(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1263static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1256{ 1264{
1265 struct vcpu_svm *svm = to_svm(vcpu);
1257 u32 exit_code = svm->vmcb->control.exit_code; 1266 u32 exit_code = svm->vmcb->control.exit_code;
1258 1267
1268 kvm_reput_irq(svm);
1269
1270 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1271 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1272 kvm_run->fail_entry.hardware_entry_failure_reason
1273 = svm->vmcb->control.exit_code;
1274 return 0;
1275 }
1276
1259 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && 1277 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
1260 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR) 1278 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
1261 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " 1279 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
@@ -1313,11 +1331,11 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1313 svm_inject_irq(svm, irq); 1331 svm_inject_irq(svm, irq);
1314} 1332}
1315 1333
1316static void svm_intr_assist(struct vcpu_svm *svm) 1334static void svm_intr_assist(struct kvm_vcpu *vcpu)
1317{ 1335{
1336 struct vcpu_svm *svm = to_svm(vcpu);
1318 struct vmcb *vmcb = svm->vmcb; 1337 struct vmcb *vmcb = svm->vmcb;
1319 int intr_vector = -1; 1338 int intr_vector = -1;
1320 struct kvm_vcpu *vcpu = &svm->vcpu;
1321 1339
1322 kvm_inject_pending_timer_irqs(vcpu); 1340 kvm_inject_pending_timer_irqs(vcpu);
1323 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) && 1341 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
@@ -1376,9 +1394,10 @@ static void svm_do_inject_vector(struct vcpu_svm *svm)
1376 svm_inject_irq(svm, irq); 1394 svm_inject_irq(svm, irq);
1377} 1395}
1378 1396
1379static void do_interrupt_requests(struct vcpu_svm *svm, 1397static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1380 struct kvm_run *kvm_run) 1398 struct kvm_run *kvm_run)
1381{ 1399{
1400 struct vcpu_svm *svm = to_svm(vcpu);
1382 struct vmcb_control_area *control = &svm->vmcb->control; 1401 struct vmcb_control_area *control = &svm->vmcb->control;
1383 1402
1384 svm->vcpu.interrupt_window_open = 1403 svm->vcpu.interrupt_window_open =
@@ -1401,35 +1420,6 @@ static void do_interrupt_requests(struct vcpu_svm *svm,
1401 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 1420 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1402} 1421}
1403 1422
1404static void post_kvm_run_save(struct vcpu_svm *svm,
1405 struct kvm_run *kvm_run)
1406{
1407 if (irqchip_in_kernel(svm->vcpu.kvm))
1408 kvm_run->ready_for_interrupt_injection = 1;
1409 else
1410 kvm_run->ready_for_interrupt_injection =
1411 (svm->vcpu.interrupt_window_open &&
1412 svm->vcpu.irq_summary == 0);
1413 kvm_run->if_flag = (svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1414 kvm_run->cr8 = get_cr8(&svm->vcpu);
1415 kvm_run->apic_base = kvm_get_apic_base(&svm->vcpu);
1416}
1417
1418/*
1419 * Check if userspace requested an interrupt window, and that the
1420 * interrupt window is open.
1421 *
1422 * No need to exit to userspace if we already have an interrupt queued.
1423 */
1424static int dm_request_for_irq_injection(struct vcpu_svm *svm,
1425 struct kvm_run *kvm_run)
1426{
1427 return (!svm->vcpu.irq_summary &&
1428 kvm_run->request_interrupt_window &&
1429 svm->vcpu.interrupt_window_open &&
1430 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
1431}
1432
1433static void save_db_regs(unsigned long *db_regs) 1423static void save_db_regs(unsigned long *db_regs)
1434{ 1424{
1435 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0])); 1425 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
@@ -1451,38 +1441,16 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1451 force_new_asid(vcpu); 1441 force_new_asid(vcpu);
1452} 1442}
1453 1443
1454static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1444static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1445{
1446}
1447
1448static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1455{ 1449{
1456 struct vcpu_svm *svm = to_svm(vcpu); 1450 struct vcpu_svm *svm = to_svm(vcpu);
1457 u16 fs_selector; 1451 u16 fs_selector;
1458 u16 gs_selector; 1452 u16 gs_selector;
1459 u16 ldt_selector; 1453 u16 ldt_selector;
1460 int r;
1461
1462again:
1463 r = kvm_mmu_reload(vcpu);
1464 if (unlikely(r))
1465 return r;
1466
1467 clgi();
1468
1469 if (signal_pending(current)) {
1470 stgi();
1471 ++vcpu->stat.signal_exits;
1472 post_kvm_run_save(svm, kvm_run);
1473 kvm_run->exit_reason = KVM_EXIT_INTR;
1474 return -EINTR;
1475 }
1476
1477 if (irqchip_in_kernel(vcpu->kvm))
1478 svm_intr_assist(svm);
1479 else if (!vcpu->mmio_read_completed)
1480 do_interrupt_requests(svm, kvm_run);
1481
1482 vcpu->guest_mode = 1;
1483 if (vcpu->requests)
1484 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
1485 svm_flush_tlb(vcpu);
1486 1454
1487 pre_svm_run(svm); 1455 pre_svm_run(svm);
1488 1456
@@ -1501,10 +1469,9 @@ again:
1501 load_db_regs(svm->db_regs); 1469 load_db_regs(svm->db_regs);
1502 } 1470 }
1503 1471
1504 if (vcpu->fpu_active) { 1472 clgi();
1505 fx_save(&vcpu->host_fx_image); 1473
1506 fx_restore(&vcpu->guest_fx_image); 1474 local_irq_enable();
1507 }
1508 1475
1509 asm volatile ( 1476 asm volatile (
1510#ifdef CONFIG_X86_64 1477#ifdef CONFIG_X86_64
@@ -1612,12 +1579,9 @@ again:
1612#endif 1579#endif
1613 : "cc", "memory" ); 1580 : "cc", "memory" );
1614 1581
1615 vcpu->guest_mode = 0; 1582 local_irq_disable();
1616 1583
1617 if (vcpu->fpu_active) { 1584 stgi();
1618 fx_save(&vcpu->guest_fx_image);
1619 fx_restore(&vcpu->host_fx_image);
1620 }
1621 1585
1622 if ((svm->vmcb->save.dr7 & 0xff)) 1586 if ((svm->vmcb->save.dr7 & 0xff))
1623 load_db_regs(svm->host_db_regs); 1587 load_db_regs(svm->host_db_regs);
@@ -1635,40 +1599,7 @@ again:
1635 1599
1636 reload_tss(vcpu); 1600 reload_tss(vcpu);
1637 1601
1638 /*
1639 * Profile KVM exit RIPs:
1640 */
1641 if (unlikely(prof_on == KVM_PROFILING))
1642 profile_hit(KVM_PROFILING,
1643 (void *)(unsigned long)svm->vmcb->save.rip);
1644
1645 stgi();
1646
1647 kvm_reput_irq(svm);
1648
1649 svm->next_rip = 0; 1602 svm->next_rip = 0;
1650
1651 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1652 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1653 kvm_run->fail_entry.hardware_entry_failure_reason
1654 = svm->vmcb->control.exit_code;
1655 post_kvm_run_save(svm, kvm_run);
1656 return 0;
1657 }
1658
1659 r = handle_exit(svm, kvm_run);
1660 if (r > 0) {
1661 if (dm_request_for_irq_injection(svm, kvm_run)) {
1662 ++vcpu->stat.request_irq_exits;
1663 post_kvm_run_save(svm, kvm_run);
1664 kvm_run->exit_reason = KVM_EXIT_INTR;
1665 return -EINTR;
1666 }
1667 kvm_resched(vcpu);
1668 goto again;
1669 }
1670 post_kvm_run_save(svm, kvm_run);
1671 return r;
1672} 1603}
1673 1604
1674static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) 1605static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -1752,7 +1683,9 @@ static struct kvm_x86_ops svm_x86_ops = {
1752 1683
1753 .vcpu_create = svm_create_vcpu, 1684 .vcpu_create = svm_create_vcpu,
1754 .vcpu_free = svm_free_vcpu, 1685 .vcpu_free = svm_free_vcpu,
1686 .vcpu_reset = svm_vcpu_reset,
1755 1687
1688 .prepare_guest_switch = svm_prepare_guest_switch,
1756 .vcpu_load = svm_vcpu_load, 1689 .vcpu_load = svm_vcpu_load,
1757 .vcpu_put = svm_vcpu_put, 1690 .vcpu_put = svm_vcpu_put,
1758 .vcpu_decache = svm_vcpu_decache, 1691 .vcpu_decache = svm_vcpu_decache,
@@ -1786,10 +1719,13 @@ static struct kvm_x86_ops svm_x86_ops = {
1786 .inject_gp = svm_inject_gp, 1719 .inject_gp = svm_inject_gp,
1787 1720
1788 .run = svm_vcpu_run, 1721 .run = svm_vcpu_run,
1722 .handle_exit = handle_exit,
1789 .skip_emulated_instruction = skip_emulated_instruction, 1723 .skip_emulated_instruction = skip_emulated_instruction,
1790 .patch_hypercall = svm_patch_hypercall, 1724 .patch_hypercall = svm_patch_hypercall,
1791 .get_irq = svm_get_irq, 1725 .get_irq = svm_get_irq,
1792 .set_irq = svm_set_irq, 1726 .set_irq = svm_set_irq,
1727 .inject_pending_irq = svm_intr_assist,
1728 .inject_pending_vectors = do_interrupt_requests,
1793}; 1729};
1794 1730
1795static int __init svm_init(void) 1731static int __init svm_init(void)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 713f78a89595..fa4277d520ca 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -25,7 +25,6 @@
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/profile.h>
29#include <linux/sched.h> 28#include <linux/sched.h>
30 29
31#include <asm/io.h> 30#include <asm/io.h>
@@ -355,8 +354,10 @@ static void load_transition_efer(struct vcpu_vmx *vmx)
355 vmx->vcpu.stat.efer_reload++; 354 vmx->vcpu.stat.efer_reload++;
356} 355}
357 356
358static void vmx_save_host_state(struct vcpu_vmx *vmx) 357static void vmx_save_host_state(struct kvm_vcpu *vcpu)
359{ 358{
359 struct vcpu_vmx *vmx = to_vmx(vcpu);
360
360 if (vmx->host_state.loaded) 361 if (vmx->host_state.loaded)
361 return; 362 return;
362 363
@@ -1598,6 +1599,13 @@ out:
1598 return ret; 1599 return ret;
1599} 1600}
1600 1601
1602static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1603{
1604 struct vcpu_vmx *vmx = to_vmx(vcpu);
1605
1606 vmx_vcpu_setup(vmx);
1607}
1608
1601static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq) 1609static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1602{ 1610{
1603 u16 ent[2]; 1611 u16 ent[2];
@@ -2019,20 +2027,6 @@ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2019 return 1; 2027 return 1;
2020} 2028}
2021 2029
2022static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2023 struct kvm_run *kvm_run)
2024{
2025 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
2026 kvm_run->cr8 = get_cr8(vcpu);
2027 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2028 if (irqchip_in_kernel(vcpu->kvm))
2029 kvm_run->ready_for_interrupt_injection = 1;
2030 else
2031 kvm_run->ready_for_interrupt_injection =
2032 (vcpu->interrupt_window_open &&
2033 vcpu->irq_summary == 0);
2034}
2035
2036static int handle_interrupt_window(struct kvm_vcpu *vcpu, 2030static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2037 struct kvm_run *kvm_run) 2031 struct kvm_run *kvm_run)
2038{ 2032{
@@ -2123,21 +2117,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2123 return 0; 2117 return 0;
2124} 2118}
2125 2119
2126/*
2127 * Check if userspace requested an interrupt window, and that the
2128 * interrupt window is open.
2129 *
2130 * No need to exit to userspace if we already have an interrupt queued.
2131 */
2132static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2133 struct kvm_run *kvm_run)
2134{
2135 return (!vcpu->irq_summary &&
2136 kvm_run->request_interrupt_window &&
2137 vcpu->interrupt_window_open &&
2138 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
2139}
2140
2141static void vmx_flush_tlb(struct kvm_vcpu *vcpu) 2120static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2142{ 2121{
2143} 2122}
@@ -2214,59 +2193,15 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
2214 enable_irq_window(vcpu); 2193 enable_irq_window(vcpu);
2215} 2194}
2216 2195
2217static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2196static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2218{ 2197{
2219 struct vcpu_vmx *vmx = to_vmx(vcpu); 2198 struct vcpu_vmx *vmx = to_vmx(vcpu);
2220 int r;
2221
2222 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2223 printk("vcpu %d received sipi with vector # %x\n",
2224 vcpu->vcpu_id, vcpu->sipi_vector);
2225 kvm_lapic_reset(vcpu);
2226 vmx_vcpu_setup(vmx);
2227 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2228 }
2229
2230preempted:
2231 if (vcpu->guest_debug.enabled)
2232 kvm_guest_debug_pre(vcpu);
2233
2234again:
2235 r = kvm_mmu_reload(vcpu);
2236 if (unlikely(r))
2237 goto out;
2238
2239 preempt_disable();
2240
2241 vmx_save_host_state(vmx);
2242 kvm_load_guest_fpu(vcpu);
2243 2199
2244 /* 2200 /*
2245 * Loading guest fpu may have cleared host cr0.ts 2201 * Loading guest fpu may have cleared host cr0.ts
2246 */ 2202 */
2247 vmcs_writel(HOST_CR0, read_cr0()); 2203 vmcs_writel(HOST_CR0, read_cr0());
2248 2204
2249 local_irq_disable();
2250
2251 if (signal_pending(current)) {
2252 local_irq_enable();
2253 preempt_enable();
2254 r = -EINTR;
2255 kvm_run->exit_reason = KVM_EXIT_INTR;
2256 ++vcpu->stat.signal_exits;
2257 goto out;
2258 }
2259
2260 if (irqchip_in_kernel(vcpu->kvm))
2261 vmx_intr_assist(vcpu);
2262 else if (!vcpu->mmio_read_completed)
2263 do_interrupt_requests(vcpu, kvm_run);
2264
2265 vcpu->guest_mode = 1;
2266 if (vcpu->requests)
2267 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2268 vmx_flush_tlb(vcpu);
2269
2270 asm ( 2205 asm (
2271 /* Store host registers */ 2206 /* Store host registers */
2272#ifdef CONFIG_X86_64 2207#ifdef CONFIG_X86_64
@@ -2383,46 +2318,10 @@ again:
2383 [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) 2318 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2384 : "cc", "memory" ); 2319 : "cc", "memory" );
2385 2320
2386 vcpu->guest_mode = 0;
2387 local_irq_enable();
2388
2389 ++vcpu->stat.exits;
2390
2391 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2321 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2392 2322
2393 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2323 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2394 vmx->launched = 1; 2324 vmx->launched = 1;
2395
2396 preempt_enable();
2397
2398 /*
2399 * Profile KVM exit RIPs:
2400 */
2401 if (unlikely(prof_on == KVM_PROFILING))
2402 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2403
2404 r = kvm_handle_exit(kvm_run, vcpu);
2405 if (r > 0) {
2406 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2407 r = -EINTR;
2408 kvm_run->exit_reason = KVM_EXIT_INTR;
2409 ++vcpu->stat.request_irq_exits;
2410 goto out;
2411 }
2412 if (!need_resched()) {
2413 ++vcpu->stat.light_exits;
2414 goto again;
2415 }
2416 }
2417
2418out:
2419 if (r > 0) {
2420 kvm_resched(vcpu);
2421 goto preempted;
2422 }
2423
2424 post_kvm_run_save(vcpu, kvm_run);
2425 return r;
2426} 2325}
2427 2326
2428static void vmx_inject_page_fault(struct kvm_vcpu *vcpu, 2327static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
@@ -2560,12 +2459,15 @@ static struct kvm_x86_ops vmx_x86_ops = {
2560 2459
2561 .vcpu_create = vmx_create_vcpu, 2460 .vcpu_create = vmx_create_vcpu,
2562 .vcpu_free = vmx_free_vcpu, 2461 .vcpu_free = vmx_free_vcpu,
2462 .vcpu_reset = vmx_vcpu_reset,
2563 2463
2464 .prepare_guest_switch = vmx_save_host_state,
2564 .vcpu_load = vmx_vcpu_load, 2465 .vcpu_load = vmx_vcpu_load,
2565 .vcpu_put = vmx_vcpu_put, 2466 .vcpu_put = vmx_vcpu_put,
2566 .vcpu_decache = vmx_vcpu_decache, 2467 .vcpu_decache = vmx_vcpu_decache,
2567 2468
2568 .set_guest_debug = set_guest_debug, 2469 .set_guest_debug = set_guest_debug,
2470 .guest_debug_pre = kvm_guest_debug_pre,
2569 .get_msr = vmx_get_msr, 2471 .get_msr = vmx_get_msr,
2570 .set_msr = vmx_set_msr, 2472 .set_msr = vmx_set_msr,
2571 .get_segment_base = vmx_get_segment_base, 2473 .get_segment_base = vmx_get_segment_base,
@@ -2594,10 +2496,13 @@ static struct kvm_x86_ops vmx_x86_ops = {
2594 .inject_gp = vmx_inject_gp, 2496 .inject_gp = vmx_inject_gp,
2595 2497
2596 .run = vmx_vcpu_run, 2498 .run = vmx_vcpu_run,
2499 .handle_exit = kvm_handle_exit,
2597 .skip_emulated_instruction = skip_emulated_instruction, 2500 .skip_emulated_instruction = skip_emulated_instruction,
2598 .patch_hypercall = vmx_patch_hypercall, 2501 .patch_hypercall = vmx_patch_hypercall,
2599 .get_irq = vmx_get_irq, 2502 .get_irq = vmx_get_irq,
2600 .set_irq = vmx_inject_irq, 2503 .set_irq = vmx_inject_irq,
2504 .inject_pending_irq = vmx_intr_assist,
2505 .inject_pending_vectors = do_interrupt_requests,
2601}; 2506};
2602 2507
2603static int __init vmx_init(void) 2508static int __init vmx_init(void)