diff options
author | Avi Kivity <avi@qumranet.com> | 2007-09-10 11:10:54 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:28 -0400 |
commit | 04d2cc7780d48a212843e38d46402d97fa1f4774 (patch) | |
tree | a209131bad59abcf574abbaae23145db3c4005e0 /drivers/kvm/kvm_main.c | |
parent | 29bd8a78082f2d7e2165a735f50b5c716ef3213b (diff) |
KVM: Move main vcpu loop into subarch independent code
This simplifies adding new code as well as reducing overall code size.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 124 |
1 files changed, 123 insertions, 1 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 9bfa1bcd26e9..e17b433152cb 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/cpumask.h> | 38 | #include <linux/cpumask.h> |
39 | #include <linux/smp.h> | 39 | #include <linux/smp.h> |
40 | #include <linux/anon_inodes.h> | 40 | #include <linux/anon_inodes.h> |
41 | #include <linux/profile.h> | ||
41 | 42 | ||
42 | #include <asm/processor.h> | 43 | #include <asm/processor.h> |
43 | #include <asm/msr.h> | 44 | #include <asm/msr.h> |
@@ -1970,6 +1971,127 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
1970 | } | 1971 | } |
1971 | EXPORT_SYMBOL_GPL(kvm_emulate_pio_string); | 1972 | EXPORT_SYMBOL_GPL(kvm_emulate_pio_string); |
1972 | 1973 | ||
1974 | /* | ||
1975 | * Check if userspace requested an interrupt window, and that the | ||
1976 | * interrupt window is open. | ||
1977 | * | ||
1978 | * No need to exit to userspace if we already have an interrupt queued. | ||
1979 | */ | ||
1980 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, | ||
1981 | struct kvm_run *kvm_run) | ||
1982 | { | ||
1983 | return (!vcpu->irq_summary && | ||
1984 | kvm_run->request_interrupt_window && | ||
1985 | vcpu->interrupt_window_open && | ||
1986 | (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); | ||
1987 | } | ||
1988 | |||
1989 | static void post_kvm_run_save(struct kvm_vcpu *vcpu, | ||
1990 | struct kvm_run *kvm_run) | ||
1991 | { | ||
1992 | kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; | ||
1993 | kvm_run->cr8 = get_cr8(vcpu); | ||
1994 | kvm_run->apic_base = kvm_get_apic_base(vcpu); | ||
1995 | if (irqchip_in_kernel(vcpu->kvm)) | ||
1996 | kvm_run->ready_for_interrupt_injection = 1; | ||
1997 | else | ||
1998 | kvm_run->ready_for_interrupt_injection = | ||
1999 | (vcpu->interrupt_window_open && | ||
2000 | vcpu->irq_summary == 0); | ||
2001 | } | ||
2002 | |||
2003 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | ||
2004 | { | ||
2005 | int r; | ||
2006 | |||
2007 | if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) { | ||
2008 | printk("vcpu %d received sipi with vector # %x\n", | ||
2009 | vcpu->vcpu_id, vcpu->sipi_vector); | ||
2010 | kvm_lapic_reset(vcpu); | ||
2011 | kvm_x86_ops->vcpu_reset(vcpu); | ||
2012 | vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; | ||
2013 | } | ||
2014 | |||
2015 | preempted: | ||
2016 | if (vcpu->guest_debug.enabled) | ||
2017 | kvm_x86_ops->guest_debug_pre(vcpu); | ||
2018 | |||
2019 | again: | ||
2020 | r = kvm_mmu_reload(vcpu); | ||
2021 | if (unlikely(r)) | ||
2022 | goto out; | ||
2023 | |||
2024 | preempt_disable(); | ||
2025 | |||
2026 | kvm_x86_ops->prepare_guest_switch(vcpu); | ||
2027 | kvm_load_guest_fpu(vcpu); | ||
2028 | |||
2029 | local_irq_disable(); | ||
2030 | |||
2031 | if (signal_pending(current)) { | ||
2032 | local_irq_enable(); | ||
2033 | preempt_enable(); | ||
2034 | r = -EINTR; | ||
2035 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
2036 | ++vcpu->stat.signal_exits; | ||
2037 | goto out; | ||
2038 | } | ||
2039 | |||
2040 | if (irqchip_in_kernel(vcpu->kvm)) | ||
2041 | kvm_x86_ops->inject_pending_irq(vcpu); | ||
2042 | else if (!vcpu->mmio_read_completed) | ||
2043 | kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run); | ||
2044 | |||
2045 | vcpu->guest_mode = 1; | ||
2046 | |||
2047 | if (vcpu->requests) | ||
2048 | if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests)) | ||
2049 | kvm_x86_ops->tlb_flush(vcpu); | ||
2050 | |||
2051 | kvm_x86_ops->run(vcpu, kvm_run); | ||
2052 | |||
2053 | vcpu->guest_mode = 0; | ||
2054 | local_irq_enable(); | ||
2055 | |||
2056 | ++vcpu->stat.exits; | ||
2057 | |||
2058 | preempt_enable(); | ||
2059 | |||
2060 | /* | ||
2061 | * Profile KVM exit RIPs: | ||
2062 | */ | ||
2063 | if (unlikely(prof_on == KVM_PROFILING)) { | ||
2064 | kvm_x86_ops->cache_regs(vcpu); | ||
2065 | profile_hit(KVM_PROFILING, (void *)vcpu->rip); | ||
2066 | } | ||
2067 | |||
2068 | r = kvm_x86_ops->handle_exit(kvm_run, vcpu); | ||
2069 | |||
2070 | if (r > 0) { | ||
2071 | if (dm_request_for_irq_injection(vcpu, kvm_run)) { | ||
2072 | r = -EINTR; | ||
2073 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
2074 | ++vcpu->stat.request_irq_exits; | ||
2075 | goto out; | ||
2076 | } | ||
2077 | if (!need_resched()) { | ||
2078 | ++vcpu->stat.light_exits; | ||
2079 | goto again; | ||
2080 | } | ||
2081 | } | ||
2082 | |||
2083 | out: | ||
2084 | if (r > 0) { | ||
2085 | kvm_resched(vcpu); | ||
2086 | goto preempted; | ||
2087 | } | ||
2088 | |||
2089 | post_kvm_run_save(vcpu, kvm_run); | ||
2090 | |||
2091 | return r; | ||
2092 | } | ||
2093 | |||
2094 | |||
1973 | static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2095 | static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1974 | { | 2096 | { |
1975 | int r; | 2097 | int r; |
@@ -2017,7 +2139,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2017 | kvm_x86_ops->decache_regs(vcpu); | 2139 | kvm_x86_ops->decache_regs(vcpu); |
2018 | } | 2140 | } |
2019 | 2141 | ||
2020 | r = kvm_x86_ops->run(vcpu, kvm_run); | 2142 | r = __vcpu_run(vcpu, kvm_run); |
2021 | 2143 | ||
2022 | out: | 2144 | out: |
2023 | if (vcpu->sigset_active) | 2145 | if (vcpu->sigset_active) |