aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/x86.c
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2007-11-01 15:16:10 -0400
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:00 -0500
commitb6c7a5dccf9471f4891df722dbd0700ce56eb2e2 (patch)
treecf7c15cb74b5949940f69df9fe9bc50161d880f5 /drivers/kvm/x86.c
parentd075206073286dca84768137af0a0bf3d11f0663 (diff)
KVM: Portability: Move x86 vcpu ioctl handlers to x86.c
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/x86.c')
-rw-r--r--drivers/kvm/x86.c427
1 files changed, 427 insertions, 0 deletions
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index ef1661f10b48..394da6605364 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1787,6 +1787,433 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1787EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 1787EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1788 1788
1789/* 1789/*
1790 * Check if userspace requested an interrupt window, and that the
1791 * interrupt window is open.
1792 *
1793 * No need to exit to userspace if we already have an interrupt queued.
1794 */
1795static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1796 struct kvm_run *kvm_run)
1797{
1798 return (!vcpu->irq_summary &&
1799 kvm_run->request_interrupt_window &&
1800 vcpu->interrupt_window_open &&
1801 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
1802}
1803
1804static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1805 struct kvm_run *kvm_run)
1806{
1807 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
1808 kvm_run->cr8 = get_cr8(vcpu);
1809 kvm_run->apic_base = kvm_get_apic_base(vcpu);
1810 if (irqchip_in_kernel(vcpu->kvm))
1811 kvm_run->ready_for_interrupt_injection = 1;
1812 else
1813 kvm_run->ready_for_interrupt_injection =
1814 (vcpu->interrupt_window_open &&
1815 vcpu->irq_summary == 0);
1816}
1817
1818static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1819{
1820 int r;
1821
1822 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
1823 pr_debug("vcpu %d received sipi with vector # %x\n",
1824 vcpu->vcpu_id, vcpu->sipi_vector);
1825 kvm_lapic_reset(vcpu);
1826 r = kvm_x86_ops->vcpu_reset(vcpu);
1827 if (r)
1828 return r;
1829 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
1830 }
1831
1832preempted:
1833 if (vcpu->guest_debug.enabled)
1834 kvm_x86_ops->guest_debug_pre(vcpu);
1835
1836again:
1837 r = kvm_mmu_reload(vcpu);
1838 if (unlikely(r))
1839 goto out;
1840
1841 kvm_inject_pending_timer_irqs(vcpu);
1842
1843 preempt_disable();
1844
1845 kvm_x86_ops->prepare_guest_switch(vcpu);
1846 kvm_load_guest_fpu(vcpu);
1847
1848 local_irq_disable();
1849
1850 if (signal_pending(current)) {
1851 local_irq_enable();
1852 preempt_enable();
1853 r = -EINTR;
1854 kvm_run->exit_reason = KVM_EXIT_INTR;
1855 ++vcpu->stat.signal_exits;
1856 goto out;
1857 }
1858
1859 if (irqchip_in_kernel(vcpu->kvm))
1860 kvm_x86_ops->inject_pending_irq(vcpu);
1861 else if (!vcpu->mmio_read_completed)
1862 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
1863
1864 vcpu->guest_mode = 1;
1865 kvm_guest_enter();
1866
1867 if (vcpu->requests)
1868 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
1869 kvm_x86_ops->tlb_flush(vcpu);
1870
1871 kvm_x86_ops->run(vcpu, kvm_run);
1872
1873 vcpu->guest_mode = 0;
1874 local_irq_enable();
1875
1876 ++vcpu->stat.exits;
1877
1878 /*
1879 * We must have an instruction between local_irq_enable() and
1880 * kvm_guest_exit(), so the timer interrupt isn't delayed by
1881 * the interrupt shadow. The stat.exits increment will do nicely.
1882 * But we need to prevent reordering, hence this barrier():
1883 */
1884 barrier();
1885
1886 kvm_guest_exit();
1887
1888 preempt_enable();
1889
1890 /*
1891 * Profile KVM exit RIPs:
1892 */
1893 if (unlikely(prof_on == KVM_PROFILING)) {
1894 kvm_x86_ops->cache_regs(vcpu);
1895 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
1896 }
1897
1898 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
1899
1900 if (r > 0) {
1901 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1902 r = -EINTR;
1903 kvm_run->exit_reason = KVM_EXIT_INTR;
1904 ++vcpu->stat.request_irq_exits;
1905 goto out;
1906 }
1907 if (!need_resched()) {
1908 ++vcpu->stat.light_exits;
1909 goto again;
1910 }
1911 }
1912
1913out:
1914 if (r > 0) {
1915 kvm_resched(vcpu);
1916 goto preempted;
1917 }
1918
1919 post_kvm_run_save(vcpu, kvm_run);
1920
1921 return r;
1922}
1923
1924int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1925{
1926 int r;
1927 sigset_t sigsaved;
1928
1929 vcpu_load(vcpu);
1930
1931 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
1932 kvm_vcpu_block(vcpu);
1933 vcpu_put(vcpu);
1934 return -EAGAIN;
1935 }
1936
1937 if (vcpu->sigset_active)
1938 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1939
1940 /* re-sync apic's tpr */
1941 if (!irqchip_in_kernel(vcpu->kvm))
1942 set_cr8(vcpu, kvm_run->cr8);
1943
1944 if (vcpu->pio.cur_count) {
1945 r = complete_pio(vcpu);
1946 if (r)
1947 goto out;
1948 }
1949#if CONFIG_HAS_IOMEM
1950 if (vcpu->mmio_needed) {
1951 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1952 vcpu->mmio_read_completed = 1;
1953 vcpu->mmio_needed = 0;
1954 r = emulate_instruction(vcpu, kvm_run,
1955 vcpu->mmio_fault_cr2, 0, 1);
1956 if (r == EMULATE_DO_MMIO) {
1957 /*
1958 * Read-modify-write. Back to userspace.
1959 */
1960 r = 0;
1961 goto out;
1962 }
1963 }
1964#endif
1965 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
1966 kvm_x86_ops->cache_regs(vcpu);
1967 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1968 kvm_x86_ops->decache_regs(vcpu);
1969 }
1970
1971 r = __vcpu_run(vcpu, kvm_run);
1972
1973out:
1974 if (vcpu->sigset_active)
1975 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1976
1977 vcpu_put(vcpu);
1978 return r;
1979}
1980
1981int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1982{
1983 vcpu_load(vcpu);
1984
1985 kvm_x86_ops->cache_regs(vcpu);
1986
1987 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1988 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1989 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1990 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1991 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1992 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1993 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1994 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1995#ifdef CONFIG_X86_64
1996 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1997 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1998 regs->r10 = vcpu->regs[VCPU_REGS_R10];
1999 regs->r11 = vcpu->regs[VCPU_REGS_R11];
2000 regs->r12 = vcpu->regs[VCPU_REGS_R12];
2001 regs->r13 = vcpu->regs[VCPU_REGS_R13];
2002 regs->r14 = vcpu->regs[VCPU_REGS_R14];
2003 regs->r15 = vcpu->regs[VCPU_REGS_R15];
2004#endif
2005
2006 regs->rip = vcpu->rip;
2007 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2008
2009 /*
2010 * Don't leak debug flags in case they were set for guest debugging
2011 */
2012 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2013 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2014
2015 vcpu_put(vcpu);
2016
2017 return 0;
2018}
2019
2020int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2021{
2022 vcpu_load(vcpu);
2023
2024 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2025 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2026 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2027 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2028 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2029 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2030 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2031 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
2032#ifdef CONFIG_X86_64
2033 vcpu->regs[VCPU_REGS_R8] = regs->r8;
2034 vcpu->regs[VCPU_REGS_R9] = regs->r9;
2035 vcpu->regs[VCPU_REGS_R10] = regs->r10;
2036 vcpu->regs[VCPU_REGS_R11] = regs->r11;
2037 vcpu->regs[VCPU_REGS_R12] = regs->r12;
2038 vcpu->regs[VCPU_REGS_R13] = regs->r13;
2039 vcpu->regs[VCPU_REGS_R14] = regs->r14;
2040 vcpu->regs[VCPU_REGS_R15] = regs->r15;
2041#endif
2042
2043 vcpu->rip = regs->rip;
2044 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2045
2046 kvm_x86_ops->decache_regs(vcpu);
2047
2048 vcpu_put(vcpu);
2049
2050 return 0;
2051}
2052
2053static void get_segment(struct kvm_vcpu *vcpu,
2054 struct kvm_segment *var, int seg)
2055{
2056 return kvm_x86_ops->get_segment(vcpu, var, seg);
2057}
2058
2059void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2060{
2061 struct kvm_segment cs;
2062
2063 get_segment(vcpu, &cs, VCPU_SREG_CS);
2064 *db = cs.db;
2065 *l = cs.l;
2066}
2067EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2068
2069int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2070 struct kvm_sregs *sregs)
2071{
2072 struct descriptor_table dt;
2073 int pending_vec;
2074
2075 vcpu_load(vcpu);
2076
2077 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2078 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2079 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2080 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2081 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2082 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2083
2084 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2085 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2086
2087 kvm_x86_ops->get_idt(vcpu, &dt);
2088 sregs->idt.limit = dt.limit;
2089 sregs->idt.base = dt.base;
2090 kvm_x86_ops->get_gdt(vcpu, &dt);
2091 sregs->gdt.limit = dt.limit;
2092 sregs->gdt.base = dt.base;
2093
2094 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2095 sregs->cr0 = vcpu->cr0;
2096 sregs->cr2 = vcpu->cr2;
2097 sregs->cr3 = vcpu->cr3;
2098 sregs->cr4 = vcpu->cr4;
2099 sregs->cr8 = get_cr8(vcpu);
2100 sregs->efer = vcpu->shadow_efer;
2101 sregs->apic_base = kvm_get_apic_base(vcpu);
2102
2103 if (irqchip_in_kernel(vcpu->kvm)) {
2104 memset(sregs->interrupt_bitmap, 0,
2105 sizeof sregs->interrupt_bitmap);
2106 pending_vec = kvm_x86_ops->get_irq(vcpu);
2107 if (pending_vec >= 0)
2108 set_bit(pending_vec,
2109 (unsigned long *)sregs->interrupt_bitmap);
2110 } else
2111 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2112 sizeof sregs->interrupt_bitmap);
2113
2114 vcpu_put(vcpu);
2115
2116 return 0;
2117}
2118
2119static void set_segment(struct kvm_vcpu *vcpu,
2120 struct kvm_segment *var, int seg)
2121{
2122 return kvm_x86_ops->set_segment(vcpu, var, seg);
2123}
2124
2125int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2126 struct kvm_sregs *sregs)
2127{
2128 int mmu_reset_needed = 0;
2129 int i, pending_vec, max_bits;
2130 struct descriptor_table dt;
2131
2132 vcpu_load(vcpu);
2133
2134 dt.limit = sregs->idt.limit;
2135 dt.base = sregs->idt.base;
2136 kvm_x86_ops->set_idt(vcpu, &dt);
2137 dt.limit = sregs->gdt.limit;
2138 dt.base = sregs->gdt.base;
2139 kvm_x86_ops->set_gdt(vcpu, &dt);
2140
2141 vcpu->cr2 = sregs->cr2;
2142 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2143 vcpu->cr3 = sregs->cr3;
2144
2145 set_cr8(vcpu, sregs->cr8);
2146
2147 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2148#ifdef CONFIG_X86_64
2149 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2150#endif
2151 kvm_set_apic_base(vcpu, sregs->apic_base);
2152
2153 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2154
2155 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2156 vcpu->cr0 = sregs->cr0;
2157 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2158
2159 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2160 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2161 if (!is_long_mode(vcpu) && is_pae(vcpu))
2162 load_pdptrs(vcpu, vcpu->cr3);
2163
2164 if (mmu_reset_needed)
2165 kvm_mmu_reset_context(vcpu);
2166
2167 if (!irqchip_in_kernel(vcpu->kvm)) {
2168 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2169 sizeof vcpu->irq_pending);
2170 vcpu->irq_summary = 0;
2171 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2172 if (vcpu->irq_pending[i])
2173 __set_bit(i, &vcpu->irq_summary);
2174 } else {
2175 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2176 pending_vec = find_first_bit(
2177 (const unsigned long *)sregs->interrupt_bitmap,
2178 max_bits);
2179 /* Only pending external irq is handled here */
2180 if (pending_vec < max_bits) {
2181 kvm_x86_ops->set_irq(vcpu, pending_vec);
2182 pr_debug("Set back pending irq %d\n",
2183 pending_vec);
2184 }
2185 }
2186
2187 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2188 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2189 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2190 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2191 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2192 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2193
2194 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2195 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2196
2197 vcpu_put(vcpu);
2198
2199 return 0;
2200}
2201
2202int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2203 struct kvm_debug_guest *dbg)
2204{
2205 int r;
2206
2207 vcpu_load(vcpu);
2208
2209 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2210
2211 vcpu_put(vcpu);
2212
2213 return r;
2214}
2215
2216/*
1790 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when 2217 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
1791 * we have asm/x86/processor.h 2218 * we have asm/x86/processor.h
1792 */ 2219 */