diff options
47 files changed, 1790 insertions, 1444 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 0fe36497642c..68cda1fc3d52 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -297,6 +297,15 @@ struct kvm_regs { | |||
297 | __u64 rip, rflags; | 297 | __u64 rip, rflags; |
298 | }; | 298 | }; |
299 | 299 | ||
300 | /* mips */ | ||
301 | struct kvm_regs { | ||
302 | /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ | ||
303 | __u64 gpr[32]; | ||
304 | __u64 hi; | ||
305 | __u64 lo; | ||
306 | __u64 pc; | ||
307 | }; | ||
308 | |||
300 | 309 | ||
301 | 4.12 KVM_SET_REGS | 310 | 4.12 KVM_SET_REGS |
302 | 311 | ||
@@ -378,7 +387,7 @@ struct kvm_translation { | |||
378 | 4.16 KVM_INTERRUPT | 387 | 4.16 KVM_INTERRUPT |
379 | 388 | ||
380 | Capability: basic | 389 | Capability: basic |
381 | Architectures: x86, ppc | 390 | Architectures: x86, ppc, mips |
382 | Type: vcpu ioctl | 391 | Type: vcpu ioctl |
383 | Parameters: struct kvm_interrupt (in) | 392 | Parameters: struct kvm_interrupt (in) |
384 | Returns: 0 on success, -1 on error | 393 | Returns: 0 on success, -1 on error |
@@ -423,6 +432,11 @@ c) KVM_INTERRUPT_SET_LEVEL | |||
423 | Note that any value for 'irq' other than the ones stated above is invalid | 432 | Note that any value for 'irq' other than the ones stated above is invalid |
424 | and incurs unexpected behavior. | 433 | and incurs unexpected behavior. |
425 | 434 | ||
435 | MIPS: | ||
436 | |||
437 | Queues an external interrupt to be injected into the virtual CPU. A negative | ||
438 | interrupt number dequeues the interrupt. | ||
439 | |||
426 | 440 | ||
427 | 4.17 KVM_DEBUG_GUEST | 441 | 4.17 KVM_DEBUG_GUEST |
428 | 442 | ||
@@ -512,7 +526,7 @@ struct kvm_cpuid { | |||
512 | 4.21 KVM_SET_SIGNAL_MASK | 526 | 4.21 KVM_SET_SIGNAL_MASK |
513 | 527 | ||
514 | Capability: basic | 528 | Capability: basic |
515 | Architectures: x86 | 529 | Architectures: all |
516 | Type: vcpu ioctl | 530 | Type: vcpu ioctl |
517 | Parameters: struct kvm_signal_mask (in) | 531 | Parameters: struct kvm_signal_mask (in) |
518 | Returns: 0 on success, -1 on error | 532 | Returns: 0 on success, -1 on error |
@@ -974,7 +988,7 @@ for vm-wide capabilities. | |||
974 | 4.38 KVM_GET_MP_STATE | 988 | 4.38 KVM_GET_MP_STATE |
975 | 989 | ||
976 | Capability: KVM_CAP_MP_STATE | 990 | Capability: KVM_CAP_MP_STATE |
977 | Architectures: x86, ia64 | 991 | Architectures: x86, ia64, s390 |
978 | Type: vcpu ioctl | 992 | Type: vcpu ioctl |
979 | Parameters: struct kvm_mp_state (out) | 993 | Parameters: struct kvm_mp_state (out) |
980 | Returns: 0 on success; -1 on error | 994 | Returns: 0 on success; -1 on error |
@@ -988,24 +1002,32 @@ uniprocessor guests). | |||
988 | 1002 | ||
989 | Possible values are: | 1003 | Possible values are: |
990 | 1004 | ||
991 | - KVM_MP_STATE_RUNNABLE: the vcpu is currently running | 1005 | - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86, ia64] |
992 | - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP) | 1006 | - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP) |
993 | which has not yet received an INIT signal | 1007 | which has not yet received an INIT signal [x86, |
1008 | ia64] | ||
994 | - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is | 1009 | - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is |
995 | now ready for a SIPI | 1010 | now ready for a SIPI [x86, ia64] |
996 | - KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and | 1011 | - KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and |
997 | is waiting for an interrupt | 1012 | is waiting for an interrupt [x86, ia64] |
998 | - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector | 1013 | - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector |
999 | accessible via KVM_GET_VCPU_EVENTS) | 1014 | accessible via KVM_GET_VCPU_EVENTS) [x86, ia64] |
1015 | - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390] | ||
1016 | - KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390] | ||
1017 | - KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted) | ||
1018 | [s390] | ||
1019 | - KVM_MP_STATE_LOAD: the vcpu is in a special load/startup state | ||
1020 | [s390] | ||
1000 | 1021 | ||
1001 | This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel | 1022 | On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an |
1002 | irqchip, the multiprocessing state must be maintained by userspace. | 1023 | in-kernel irqchip, the multiprocessing state must be maintained by userspace on |
1024 | these architectures. | ||
1003 | 1025 | ||
1004 | 1026 | ||
1005 | 4.39 KVM_SET_MP_STATE | 1027 | 4.39 KVM_SET_MP_STATE |
1006 | 1028 | ||
1007 | Capability: KVM_CAP_MP_STATE | 1029 | Capability: KVM_CAP_MP_STATE |
1008 | Architectures: x86, ia64 | 1030 | Architectures: x86, ia64, s390 |
1009 | Type: vcpu ioctl | 1031 | Type: vcpu ioctl |
1010 | Parameters: struct kvm_mp_state (in) | 1032 | Parameters: struct kvm_mp_state (in) |
1011 | Returns: 0 on success; -1 on error | 1033 | Returns: 0 on success; -1 on error |
@@ -1013,8 +1035,9 @@ Returns: 0 on success; -1 on error | |||
1013 | Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for | 1035 | Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for |
1014 | arguments. | 1036 | arguments. |
1015 | 1037 | ||
1016 | This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel | 1038 | On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an |
1017 | irqchip, the multiprocessing state must be maintained by userspace. | 1039 | in-kernel irqchip, the multiprocessing state must be maintained by userspace on |
1040 | these architectures. | ||
1018 | 1041 | ||
1019 | 1042 | ||
1020 | 4.40 KVM_SET_IDENTITY_MAP_ADDR | 1043 | 4.40 KVM_SET_IDENTITY_MAP_ADDR |
@@ -1774,122 +1797,151 @@ and architecture specific registers. Each have their own range of operation | |||
1774 | and their own constants and width. To keep track of the implemented | 1797 | and their own constants and width. To keep track of the implemented |
1775 | registers, find a list below: | 1798 | registers, find a list below: |
1776 | 1799 | ||
1777 | Arch | Register | Width (bits) | 1800 | Arch | Register | Width (bits) |
1778 | | | | 1801 | | | |
1779 | PPC | KVM_REG_PPC_HIOR | 64 | 1802 | PPC | KVM_REG_PPC_HIOR | 64 |
1780 | PPC | KVM_REG_PPC_IAC1 | 64 | 1803 | PPC | KVM_REG_PPC_IAC1 | 64 |
1781 | PPC | KVM_REG_PPC_IAC2 | 64 | 1804 | PPC | KVM_REG_PPC_IAC2 | 64 |
1782 | PPC | KVM_REG_PPC_IAC3 | 64 | 1805 | PPC | KVM_REG_PPC_IAC3 | 64 |
1783 | PPC | KVM_REG_PPC_IAC4 | 64 | 1806 | PPC | KVM_REG_PPC_IAC4 | 64 |
1784 | PPC | KVM_REG_PPC_DAC1 | 64 | 1807 | PPC | KVM_REG_PPC_DAC1 | 64 |
1785 | PPC | KVM_REG_PPC_DAC2 | 64 | 1808 | PPC | KVM_REG_PPC_DAC2 | 64 |
1786 | PPC | KVM_REG_PPC_DABR | 64 | 1809 | PPC | KVM_REG_PPC_DABR | 64 |
1787 | PPC | KVM_REG_PPC_DSCR | 64 | 1810 | PPC | KVM_REG_PPC_DSCR | 64 |
1788 | PPC | KVM_REG_PPC_PURR | 64 | 1811 | PPC | KVM_REG_PPC_PURR | 64 |
1789 | PPC | KVM_REG_PPC_SPURR | 64 | 1812 | PPC | KVM_REG_PPC_SPURR | 64 |
1790 | PPC | KVM_REG_PPC_DAR | 64 | 1813 | PPC | KVM_REG_PPC_DAR | 64 |
1791 | PPC | KVM_REG_PPC_DSISR | 32 | 1814 | PPC | KVM_REG_PPC_DSISR | 32 |
1792 | PPC | KVM_REG_PPC_AMR | 64 | 1815 | PPC | KVM_REG_PPC_AMR | 64 |
1793 | PPC | KVM_REG_PPC_UAMOR | 64 | 1816 | PPC | KVM_REG_PPC_UAMOR | 64 |
1794 | PPC | KVM_REG_PPC_MMCR0 | 64 | 1817 | PPC | KVM_REG_PPC_MMCR0 | 64 |
1795 | PPC | KVM_REG_PPC_MMCR1 | 64 | 1818 | PPC | KVM_REG_PPC_MMCR1 | 64 |
1796 | PPC | KVM_REG_PPC_MMCRA | 64 | 1819 | PPC | KVM_REG_PPC_MMCRA | 64 |
1797 | PPC | KVM_REG_PPC_MMCR2 | 64 | 1820 | PPC | KVM_REG_PPC_MMCR2 | 64 |
1798 | PPC | KVM_REG_PPC_MMCRS | 64 | 1821 | PPC | KVM_REG_PPC_MMCRS | 64 |
1799 | PPC | KVM_REG_PPC_SIAR | 64 | 1822 | PPC | KVM_REG_PPC_SIAR | 64 |
1800 | PPC | KVM_REG_PPC_SDAR | 64 | 1823 | PPC | KVM_REG_PPC_SDAR | 64 |
1801 | PPC | KVM_REG_PPC_SIER | 64 | 1824 | PPC | KVM_REG_PPC_SIER | 64 |
1802 | PPC | KVM_REG_PPC_PMC1 | 32 | 1825 | PPC | KVM_REG_PPC_PMC1 | 32 |
1803 | PPC | KVM_REG_PPC_PMC2 | 32 | 1826 | PPC | KVM_REG_PPC_PMC2 | 32 |
1804 | PPC | KVM_REG_PPC_PMC3 | 32 | 1827 | PPC | KVM_REG_PPC_PMC3 | 32 |
1805 | PPC | KVM_REG_PPC_PMC4 | 32 | 1828 | PPC | KVM_REG_PPC_PMC4 | 32 |
1806 | PPC | KVM_REG_PPC_PMC5 | 32 | 1829 | PPC | KVM_REG_PPC_PMC5 | 32 |
1807 | PPC | KVM_REG_PPC_PMC6 | 32 | 1830 | PPC | KVM_REG_PPC_PMC6 | 32 |
1808 | PPC | KVM_REG_PPC_PMC7 | 32 | 1831 | PPC | KVM_REG_PPC_PMC7 | 32 |
1809 | PPC | KVM_REG_PPC_PMC8 | 32 | 1832 | PPC | KVM_REG_PPC_PMC8 | 32 |
1810 | PPC | KVM_REG_PPC_FPR0 | 64 | 1833 | PPC | KVM_REG_PPC_FPR0 | 64 |
1834 | ... | ||
1835 | PPC | KVM_REG_PPC_FPR31 | 64 | ||
1836 | PPC | KVM_REG_PPC_VR0 | 128 | ||
1811 | ... | 1837 | ... |
1812 | PPC | KVM_REG_PPC_FPR31 | 64 | 1838 | PPC | KVM_REG_PPC_VR31 | 128 |
1813 | PPC | KVM_REG_PPC_VR0 | 128 | 1839 | PPC | KVM_REG_PPC_VSR0 | 128 |
1814 | ... | 1840 | ... |
1815 | PPC | KVM_REG_PPC_VR31 | 128 | 1841 | PPC | KVM_REG_PPC_VSR31 | 128 |
1816 | PPC | KVM_REG_PPC_VSR0 | 128 | 1842 | PPC | KVM_REG_PPC_FPSCR | 64 |
1843 | PPC | KVM_REG_PPC_VSCR | 32 | ||
1844 | PPC | KVM_REG_PPC_VPA_ADDR | 64 | ||
1845 | PPC | KVM_REG_PPC_VPA_SLB | 128 | ||
1846 | PPC | KVM_REG_PPC_VPA_DTL | 128 | ||
1847 | PPC | KVM_REG_PPC_EPCR | 32 | ||
1848 | PPC | KVM_REG_PPC_EPR | 32 | ||
1849 | PPC | KVM_REG_PPC_TCR | 32 | ||
1850 | PPC | KVM_REG_PPC_TSR | 32 | ||
1851 | PPC | KVM_REG_PPC_OR_TSR | 32 | ||
1852 | PPC | KVM_REG_PPC_CLEAR_TSR | 32 | ||
1853 | PPC | KVM_REG_PPC_MAS0 | 32 | ||
1854 | PPC | KVM_REG_PPC_MAS1 | 32 | ||
1855 | PPC | KVM_REG_PPC_MAS2 | 64 | ||
1856 | PPC | KVM_REG_PPC_MAS7_3 | 64 | ||
1857 | PPC | KVM_REG_PPC_MAS4 | 32 | ||
1858 | PPC | KVM_REG_PPC_MAS6 | 32 | ||
1859 | PPC | KVM_REG_PPC_MMUCFG | 32 | ||
1860 | PPC | KVM_REG_PPC_TLB0CFG | 32 | ||
1861 | PPC | KVM_REG_PPC_TLB1CFG | 32 | ||
1862 | PPC | KVM_REG_PPC_TLB2CFG | 32 | ||
1863 | PPC | KVM_REG_PPC_TLB3CFG | 32 | ||
1864 | PPC | KVM_REG_PPC_TLB0PS | 32 | ||
1865 | PPC | KVM_REG_PPC_TLB1PS | 32 | ||
1866 | PPC | KVM_REG_PPC_TLB2PS | 32 | ||
1867 | PPC | KVM_REG_PPC_TLB3PS | 32 | ||
1868 | PPC | KVM_REG_PPC_EPTCFG | 32 | ||
1869 | PPC | KVM_REG_PPC_ICP_STATE | 64 | ||
1870 | PPC | KVM_REG_PPC_TB_OFFSET | 64 | ||
1871 | PPC | KVM_REG_PPC_SPMC1 | 32 | ||
1872 | PPC | KVM_REG_PPC_SPMC2 | 32 | ||
1873 | PPC | KVM_REG_PPC_IAMR | 64 | ||
1874 | PPC | KVM_REG_PPC_TFHAR | 64 | ||
1875 | PPC | KVM_REG_PPC_TFIAR | 64 | ||
1876 | PPC | KVM_REG_PPC_TEXASR | 64 | ||
1877 | PPC | KVM_REG_PPC_FSCR | 64 | ||
1878 | PPC | KVM_REG_PPC_PSPB | 32 | ||
1879 | PPC | KVM_REG_PPC_EBBHR | 64 | ||
1880 | PPC | KVM_REG_PPC_EBBRR | 64 | ||
1881 | PPC | KVM_REG_PPC_BESCR | 64 | ||
1882 | PPC | KVM_REG_PPC_TAR | 64 | ||
1883 | PPC | KVM_REG_PPC_DPDES | 64 | ||
1884 | PPC | KVM_REG_PPC_DAWR | 64 | ||
1885 | PPC | KVM_REG_PPC_DAWRX | 64 | ||
1886 | PPC | KVM_REG_PPC_CIABR | 64 | ||
1887 | PPC | KVM_REG_PPC_IC | 64 | ||
1888 | PPC | KVM_REG_PPC_VTB | 64 | ||
1889 | PPC | KVM_REG_PPC_CSIGR | 64 | ||
1890 | PPC | KVM_REG_PPC_TACR | 64 | ||
1891 | PPC | KVM_REG_PPC_TCSCR | 64 | ||
1892 | PPC | KVM_REG_PPC_PID | 64 | ||
1893 | PPC | KVM_REG_PPC_ACOP | 64 | ||
1894 | PPC | KVM_REG_PPC_VRSAVE | 32 | ||
1895 | PPC | KVM_REG_PPC_LPCR | 64 | ||
1896 | PPC | KVM_REG_PPC_PPR | 64 | ||
1897 | PPC | KVM_REG_PPC_ARCH_COMPAT | 32 | ||
1898 | PPC | KVM_REG_PPC_DABRX | 32 | ||
1899 | PPC | KVM_REG_PPC_WORT | 64 | ||
1900 | PPC | KVM_REG_PPC_TM_GPR0 | 64 | ||
1817 | ... | 1901 | ... |
1818 | PPC | KVM_REG_PPC_VSR31 | 128 | 1902 | PPC | KVM_REG_PPC_TM_GPR31 | 64 |
1819 | PPC | KVM_REG_PPC_FPSCR | 64 | 1903 | PPC | KVM_REG_PPC_TM_VSR0 | 128 |
1820 | PPC | KVM_REG_PPC_VSCR | 32 | ||
1821 | PPC | KVM_REG_PPC_VPA_ADDR | 64 | ||
1822 | PPC | KVM_REG_PPC_VPA_SLB | 128 | ||
1823 | PPC | KVM_REG_PPC_VPA_DTL | 128 | ||
1824 | PPC | KVM_REG_PPC_EPCR | 32 | ||
1825 | PPC | KVM_REG_PPC_EPR | 32 | ||
1826 | PPC | KVM_REG_PPC_TCR | 32 | ||
1827 | PPC | KVM_REG_PPC_TSR | 32 | ||
1828 | PPC | KVM_REG_PPC_OR_TSR | 32 | ||
1829 | PPC | KVM_REG_PPC_CLEAR_TSR | 32 | ||
1830 | PPC | KVM_REG_PPC_MAS0 | 32 | ||
1831 | PPC | KVM_REG_PPC_MAS1 | 32 | ||
1832 | PPC | KVM_REG_PPC_MAS2 | 64 | ||
1833 | PPC | KVM_REG_PPC_MAS7_3 | 64 | ||
1834 | PPC | KVM_REG_PPC_MAS4 | 32 | ||
1835 | PPC | KVM_REG_PPC_MAS6 | 32 | ||
1836 | PPC | KVM_REG_PPC_MMUCFG | 32 | ||
1837 | PPC | KVM_REG_PPC_TLB0CFG | 32 | ||
1838 | PPC | KVM_REG_PPC_TLB1CFG | 32 | ||
1839 | PPC | KVM_REG_PPC_TLB2CFG | 32 | ||
1840 | PPC | KVM_REG_PPC_TLB3CFG | 32 | ||
1841 | PPC | KVM_REG_PPC_TLB0PS | 32 | ||
1842 | PPC | KVM_REG_PPC_TLB1PS | 32 | ||
1843 | PPC | KVM_REG_PPC_TLB2PS | 32 | ||
1844 | PPC | KVM_REG_PPC_TLB3PS | 32 | ||
1845 | PPC | KVM_REG_PPC_EPTCFG | 32 | ||
1846 | PPC | KVM_REG_PPC_ICP_STATE | 64 | ||
1847 | PPC | KVM_REG_PPC_TB_OFFSET | 64 | ||
1848 | PPC | KVM_REG_PPC_SPMC1 | 32 | ||
1849 | PPC | KVM_REG_PPC_SPMC2 | 32 | ||
1850 | PPC | KVM_REG_PPC_IAMR | 64 | ||
1851 | PPC | KVM_REG_PPC_TFHAR | 64 | ||
1852 | PPC | KVM_REG_PPC_TFIAR | 64 | ||
1853 | PPC | KVM_REG_PPC_TEXASR | 64 | ||
1854 | PPC | KVM_REG_PPC_FSCR | 64 | ||
1855 | PPC | KVM_REG_PPC_PSPB | 32 | ||
1856 | PPC | KVM_REG_PPC_EBBHR | 64 | ||
1857 | PPC | KVM_REG_PPC_EBBRR | 64 | ||
1858 | PPC | KVM_REG_PPC_BESCR | 64 | ||
1859 | PPC | KVM_REG_PPC_TAR | 64 | ||
1860 | PPC | KVM_REG_PPC_DPDES | 64 | ||
1861 | PPC | KVM_REG_PPC_DAWR | 64 | ||
1862 | PPC | KVM_REG_PPC_DAWRX | 64 | ||
1863 | PPC | KVM_REG_PPC_CIABR | 64 | ||
1864 | PPC | KVM_REG_PPC_IC | 64 | ||
1865 | PPC | KVM_REG_PPC_VTB | 64 | ||
1866 | PPC | KVM_REG_PPC_CSIGR | 64 | ||
1867 | PPC | KVM_REG_PPC_TACR | 64 | ||
1868 | PPC | KVM_REG_PPC_TCSCR | 64 | ||
1869 | PPC | KVM_REG_PPC_PID | 64 | ||
1870 | PPC | KVM_REG_PPC_ACOP | 64 | ||
1871 | PPC | KVM_REG_PPC_VRSAVE | 32 | ||
1872 | PPC | KVM_REG_PPC_LPCR | 64 | ||
1873 | PPC | KVM_REG_PPC_PPR | 64 | ||
1874 | PPC | KVM_REG_PPC_ARCH_COMPAT 32 | ||
1875 | PPC | KVM_REG_PPC_DABRX | 32 | ||
1876 | PPC | KVM_REG_PPC_WORT | 64 | ||
1877 | PPC | KVM_REG_PPC_TM_GPR0 | 64 | ||
1878 | ... | 1904 | ... |
1879 | PPC | KVM_REG_PPC_TM_GPR31 | 64 | 1905 | PPC | KVM_REG_PPC_TM_VSR63 | 128 |
1880 | PPC | KVM_REG_PPC_TM_VSR0 | 128 | 1906 | PPC | KVM_REG_PPC_TM_CR | 64 |
1907 | PPC | KVM_REG_PPC_TM_LR | 64 | ||
1908 | PPC | KVM_REG_PPC_TM_CTR | 64 | ||
1909 | PPC | KVM_REG_PPC_TM_FPSCR | 64 | ||
1910 | PPC | KVM_REG_PPC_TM_AMR | 64 | ||
1911 | PPC | KVM_REG_PPC_TM_PPR | 64 | ||
1912 | PPC | KVM_REG_PPC_TM_VRSAVE | 64 | ||
1913 | PPC | KVM_REG_PPC_TM_VSCR | 32 | ||
1914 | PPC | KVM_REG_PPC_TM_DSCR | 64 | ||
1915 | PPC | KVM_REG_PPC_TM_TAR | 64 | ||
1916 | | | | ||
1917 | MIPS | KVM_REG_MIPS_R0 | 64 | ||
1881 | ... | 1918 | ... |
1882 | PPC | KVM_REG_PPC_TM_VSR63 | 128 | 1919 | MIPS | KVM_REG_MIPS_R31 | 64 |
1883 | PPC | KVM_REG_PPC_TM_CR | 64 | 1920 | MIPS | KVM_REG_MIPS_HI | 64 |
1884 | PPC | KVM_REG_PPC_TM_LR | 64 | 1921 | MIPS | KVM_REG_MIPS_LO | 64 |
1885 | PPC | KVM_REG_PPC_TM_CTR | 64 | 1922 | MIPS | KVM_REG_MIPS_PC | 64 |
1886 | PPC | KVM_REG_PPC_TM_FPSCR | 64 | 1923 | MIPS | KVM_REG_MIPS_CP0_INDEX | 32 |
1887 | PPC | KVM_REG_PPC_TM_AMR | 64 | 1924 | MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64 |
1888 | PPC | KVM_REG_PPC_TM_PPR | 64 | 1925 | MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64 |
1889 | PPC | KVM_REG_PPC_TM_VRSAVE | 64 | 1926 | MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32 |
1890 | PPC | KVM_REG_PPC_TM_VSCR | 32 | 1927 | MIPS | KVM_REG_MIPS_CP0_WIRED | 32 |
1891 | PPC | KVM_REG_PPC_TM_DSCR | 64 | 1928 | MIPS | KVM_REG_MIPS_CP0_HWRENA | 32 |
1892 | PPC | KVM_REG_PPC_TM_TAR | 64 | 1929 | MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64 |
1930 | MIPS | KVM_REG_MIPS_CP0_COUNT | 32 | ||
1931 | MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64 | ||
1932 | MIPS | KVM_REG_MIPS_CP0_COMPARE | 32 | ||
1933 | MIPS | KVM_REG_MIPS_CP0_STATUS | 32 | ||
1934 | MIPS | KVM_REG_MIPS_CP0_CAUSE | 32 | ||
1935 | MIPS | KVM_REG_MIPS_CP0_EPC | 64 | ||
1936 | MIPS | KVM_REG_MIPS_CP0_CONFIG | 32 | ||
1937 | MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32 | ||
1938 | MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32 | ||
1939 | MIPS | KVM_REG_MIPS_CP0_CONFIG3 | 32 | ||
1940 | MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32 | ||
1941 | MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64 | ||
1942 | MIPS | KVM_REG_MIPS_COUNT_CTL | 64 | ||
1943 | MIPS | KVM_REG_MIPS_COUNT_RESUME | 64 | ||
1944 | MIPS | KVM_REG_MIPS_COUNT_HZ | 64 | ||
1893 | 1945 | ||
1894 | ARM registers are mapped using the lower 32 bits. The upper 16 of that | 1946 | ARM registers are mapped using the lower 32 bits. The upper 16 of that |
1895 | is the register group type, or coprocessor number: | 1947 | is the register group type, or coprocessor number: |
@@ -1928,6 +1980,22 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value: | |||
1928 | arm64 system registers have the following id bit patterns: | 1980 | arm64 system registers have the following id bit patterns: |
1929 | 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3> | 1981 | 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3> |
1930 | 1982 | ||
1983 | |||
1984 | MIPS registers are mapped using the lower 32 bits. The upper 16 of that is | ||
1985 | the register group type: | ||
1986 | |||
1987 | MIPS core registers (see above) have the following id bit patterns: | ||
1988 | 0x7030 0000 0000 <reg:16> | ||
1989 | |||
1990 | MIPS CP0 registers (see KVM_REG_MIPS_CP0_* above) have the following id bit | ||
1991 | patterns depending on whether they're 32-bit or 64-bit registers: | ||
1992 | 0x7020 0000 0001 00 <reg:5> <sel:3> (32-bit) | ||
1993 | 0x7030 0000 0001 00 <reg:5> <sel:3> (64-bit) | ||
1994 | |||
1995 | MIPS KVM control registers (see above) have the following id bit patterns: | ||
1996 | 0x7030 0000 0002 <reg:16> | ||
1997 | |||
1998 | |||
1931 | 4.69 KVM_GET_ONE_REG | 1999 | 4.69 KVM_GET_ONE_REG |
1932 | 2000 | ||
1933 | Capability: KVM_CAP_ONE_REG | 2001 | Capability: KVM_CAP_ONE_REG |
@@ -2415,7 +2483,7 @@ in VCPU matching underlying host. | |||
2415 | 4.84 KVM_GET_REG_LIST | 2483 | 4.84 KVM_GET_REG_LIST |
2416 | 2484 | ||
2417 | Capability: basic | 2485 | Capability: basic |
2418 | Architectures: arm, arm64 | 2486 | Architectures: arm, arm64, mips |
2419 | Type: vcpu ioctl | 2487 | Type: vcpu ioctl |
2420 | Parameters: struct kvm_reg_list (in/out) | 2488 | Parameters: struct kvm_reg_list (in/out) |
2421 | Returns: 0 on success; -1 on error | 2489 | Returns: 0 on success; -1 on error |
@@ -2866,15 +2934,18 @@ The fields in each entry are defined as follows: | |||
2866 | 6. Capabilities that can be enabled | 2934 | 6. Capabilities that can be enabled |
2867 | ----------------------------------- | 2935 | ----------------------------------- |
2868 | 2936 | ||
2869 | There are certain capabilities that change the behavior of the virtual CPU when | 2937 | There are certain capabilities that change the behavior of the virtual CPU or |
2870 | enabled. To enable them, please see section 4.37. Below you can find a list of | 2938 | the virtual machine when enabled. To enable them, please see section 4.37. |
2871 | capabilities and what their effect on the vCPU is when enabling them. | 2939 | Below you can find a list of capabilities and what their effect on the vCPU or |
2940 | the virtual machine is when enabling them. | ||
2872 | 2941 | ||
2873 | The following information is provided along with the description: | 2942 | The following information is provided along with the description: |
2874 | 2943 | ||
2875 | Architectures: which instruction set architectures provide this ioctl. | 2944 | Architectures: which instruction set architectures provide this ioctl. |
2876 | x86 includes both i386 and x86_64. | 2945 | x86 includes both i386 and x86_64. |
2877 | 2946 | ||
2947 | Target: whether this is a per-vcpu or per-vm capability. | ||
2948 | |||
2878 | Parameters: what parameters are accepted by the capability. | 2949 | Parameters: what parameters are accepted by the capability. |
2879 | 2950 | ||
2880 | Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) | 2951 | Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) |
@@ -2884,6 +2955,7 @@ The following information is provided along with the description: | |||
2884 | 6.1 KVM_CAP_PPC_OSI | 2955 | 6.1 KVM_CAP_PPC_OSI |
2885 | 2956 | ||
2886 | Architectures: ppc | 2957 | Architectures: ppc |
2958 | Target: vcpu | ||
2887 | Parameters: none | 2959 | Parameters: none |
2888 | Returns: 0 on success; -1 on error | 2960 | Returns: 0 on success; -1 on error |
2889 | 2961 | ||
@@ -2898,6 +2970,7 @@ When this capability is enabled, KVM_EXIT_OSI can occur. | |||
2898 | 6.2 KVM_CAP_PPC_PAPR | 2970 | 6.2 KVM_CAP_PPC_PAPR |
2899 | 2971 | ||
2900 | Architectures: ppc | 2972 | Architectures: ppc |
2973 | Target: vcpu | ||
2901 | Parameters: none | 2974 | Parameters: none |
2902 | Returns: 0 on success; -1 on error | 2975 | Returns: 0 on success; -1 on error |
2903 | 2976 | ||
@@ -2917,6 +2990,7 @@ When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur. | |||
2917 | 6.3 KVM_CAP_SW_TLB | 2990 | 6.3 KVM_CAP_SW_TLB |
2918 | 2991 | ||
2919 | Architectures: ppc | 2992 | Architectures: ppc |
2993 | Target: vcpu | ||
2920 | Parameters: args[0] is the address of a struct kvm_config_tlb | 2994 | Parameters: args[0] is the address of a struct kvm_config_tlb |
2921 | Returns: 0 on success; -1 on error | 2995 | Returns: 0 on success; -1 on error |
2922 | 2996 | ||
@@ -2959,6 +3033,7 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV: | |||
2959 | 6.4 KVM_CAP_S390_CSS_SUPPORT | 3033 | 6.4 KVM_CAP_S390_CSS_SUPPORT |
2960 | 3034 | ||
2961 | Architectures: s390 | 3035 | Architectures: s390 |
3036 | Target: vcpu | ||
2962 | Parameters: none | 3037 | Parameters: none |
2963 | Returns: 0 on success; -1 on error | 3038 | Returns: 0 on success; -1 on error |
2964 | 3039 | ||
@@ -2970,9 +3045,13 @@ handled in-kernel, while the other I/O instructions are passed to userspace. | |||
2970 | When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST | 3045 | When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST |
2971 | SUBCHANNEL intercepts. | 3046 | SUBCHANNEL intercepts. |
2972 | 3047 | ||
3048 | Note that even though this capability is enabled per-vcpu, the complete | ||
3049 | virtual machine is affected. | ||
3050 | |||
2973 | 6.5 KVM_CAP_PPC_EPR | 3051 | 6.5 KVM_CAP_PPC_EPR |
2974 | 3052 | ||
2975 | Architectures: ppc | 3053 | Architectures: ppc |
3054 | Target: vcpu | ||
2976 | Parameters: args[0] defines whether the proxy facility is active | 3055 | Parameters: args[0] defines whether the proxy facility is active |
2977 | Returns: 0 on success; -1 on error | 3056 | Returns: 0 on success; -1 on error |
2978 | 3057 | ||
@@ -2998,7 +3077,17 @@ This capability connects the vcpu to an in-kernel MPIC device. | |||
2998 | 6.7 KVM_CAP_IRQ_XICS | 3077 | 6.7 KVM_CAP_IRQ_XICS |
2999 | 3078 | ||
3000 | Architectures: ppc | 3079 | Architectures: ppc |
3080 | Target: vcpu | ||
3001 | Parameters: args[0] is the XICS device fd | 3081 | Parameters: args[0] is the XICS device fd |
3002 | args[1] is the XICS CPU number (server ID) for this vcpu | 3082 | args[1] is the XICS CPU number (server ID) for this vcpu |
3003 | 3083 | ||
3004 | This capability connects the vcpu to an in-kernel XICS device. | 3084 | This capability connects the vcpu to an in-kernel XICS device. |
3085 | |||
3086 | 6.8 KVM_CAP_S390_IRQCHIP | ||
3087 | |||
3088 | Architectures: s390 | ||
3089 | Target: vm | ||
3090 | Parameters: none | ||
3091 | |||
3092 | This capability enables the in-kernel irqchip for s390. Please refer to | ||
3093 | "4.24 KVM_CREATE_IRQCHIP" for details. | ||
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index b0aa95565752..7a3fc67bd7f9 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -359,13 +359,17 @@ enum emulation_result { | |||
359 | #define MIPS3_PG_FRAME 0x3fffffc0 | 359 | #define MIPS3_PG_FRAME 0x3fffffc0 |
360 | 360 | ||
361 | #define VPN2_MASK 0xffffe000 | 361 | #define VPN2_MASK 0xffffe000 |
362 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ | 362 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ |
363 | ((x).tlb_lo1 & MIPS3_PG_G)) | 363 | ((x).tlb_lo1 & MIPS3_PG_G)) |
364 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) | 364 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) |
365 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) | 365 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) |
366 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ | 366 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ |
367 | ? ((x).tlb_lo1 & MIPS3_PG_V) \ | 367 | ? ((x).tlb_lo1 & MIPS3_PG_V) \ |
368 | : ((x).tlb_lo0 & MIPS3_PG_V)) | 368 | : ((x).tlb_lo0 & MIPS3_PG_V)) |
369 | #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ | ||
370 | ((y) & VPN2_MASK & ~(x).tlb_mask)) | ||
371 | #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ | ||
372 | TLB_ASID(x) == ((y) & ASID_MASK)) | ||
369 | 373 | ||
370 | struct kvm_mips_tlb { | 374 | struct kvm_mips_tlb { |
371 | long tlb_mask; | 375 | long tlb_mask; |
@@ -760,7 +764,7 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, | |||
760 | struct kvm_vcpu *vcpu); | 764 | struct kvm_vcpu *vcpu); |
761 | 765 | ||
762 | /* Misc */ | 766 | /* Misc */ |
763 | extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu); | 767 | extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); |
764 | extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); | 768 | extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); |
765 | 769 | ||
766 | 770 | ||
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 0b8bd28a0df1..4520adc8699b 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -19,6 +19,9 @@ | |||
19 | #include <asm/mipsmtregs.h> | 19 | #include <asm/mipsmtregs.h> |
20 | #include <asm/uaccess.h> /* for segment_eq() */ | 20 | #include <asm/uaccess.h> /* for segment_eq() */ |
21 | 21 | ||
22 | extern void (*r4k_blast_dcache)(void); | ||
23 | extern void (*r4k_blast_icache)(void); | ||
24 | |||
22 | /* | 25 | /* |
23 | * This macro return a properly sign-extended address suitable as base address | 26 | * This macro return a properly sign-extended address suitable as base address |
24 | * for indexed cache operations. Two issues here: | 27 | * for indexed cache operations. Two issues here: |
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 78d87bbc99db..401fe027c261 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile | |||
@@ -5,9 +5,9 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | |||
5 | 5 | ||
6 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm | 6 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm |
7 | 7 | ||
8 | kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \ | 8 | kvm-objs := $(common-objs) mips.o emulate.o locore.o \ |
9 | kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \ | 9 | interrupt.o stats.o commpage.o \ |
10 | kvm_mips_dyntrans.o kvm_trap_emul.o | 10 | dyntrans.o trap_emul.o |
11 | 11 | ||
12 | obj-$(CONFIG_KVM) += kvm.o | 12 | obj-$(CONFIG_KVM) += kvm.o |
13 | obj-y += kvm_cb.o kvm_tlb.o | 13 | obj-y += callback.o tlb.o |
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/callback.c index 313c2e37b978..313c2e37b978 100644 --- a/arch/mips/kvm/kvm_cb.c +++ b/arch/mips/kvm/callback.c | |||
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c new file mode 100644 index 000000000000..2d6e976d1add --- /dev/null +++ b/arch/mips/kvm/commpage.c | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * commpage, currently used for Virtual COP0 registers. | ||
7 | * Mapped into the guest kernel @ 0x0. | ||
8 | * | ||
9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | |||
25 | #include "commpage.h" | ||
26 | |||
27 | void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) | ||
28 | { | ||
29 | struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; | ||
30 | |||
31 | /* Specific init values for fields */ | ||
32 | vcpu->arch.cop0 = &page->cop0; | ||
33 | } | ||
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h new file mode 100644 index 000000000000..08c5fa2bbc0f --- /dev/null +++ b/arch/mips/kvm/commpage.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: commpage: mapped into get kernel space | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #ifndef __KVM_MIPS_COMMPAGE_H__ | ||
13 | #define __KVM_MIPS_COMMPAGE_H__ | ||
14 | |||
15 | struct kvm_mips_commpage { | ||
16 | /* COP0 state is mapped into Guest kernel via commpage */ | ||
17 | struct mips_coproc cop0; | ||
18 | }; | ||
19 | |||
20 | #define KVM_MIPS_COMM_EIDI_OFFSET 0x0 | ||
21 | |||
22 | extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); | ||
23 | |||
24 | #endif /* __KVM_MIPS_COMMPAGE_H__ */ | ||
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/dyntrans.c index b80e41d858fd..521121bdebff 100644 --- a/arch/mips/kvm/kvm_mips_dyntrans.c +++ b/arch/mips/kvm/dyntrans.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. | 6 | * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/bootmem.h> | 18 | #include <linux/bootmem.h> |
19 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
20 | 20 | ||
21 | #include "kvm_mips_comm.h" | 21 | #include "commpage.h" |
22 | 22 | ||
23 | #define SYNCI_TEMPLATE 0x041f0000 | 23 | #define SYNCI_TEMPLATE 0x041f0000 |
24 | #define SYNCI_BASE(x) (((x) >> 21) & 0x1f) | 24 | #define SYNCI_BASE(x) (((x) >> 21) & 0x1f) |
@@ -28,9 +28,8 @@ | |||
28 | #define CLEAR_TEMPLATE 0x00000020 | 28 | #define CLEAR_TEMPLATE 0x00000020 |
29 | #define SW_TEMPLATE 0xac000000 | 29 | #define SW_TEMPLATE 0xac000000 |
30 | 30 | ||
31 | int | 31 | int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, |
32 | kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, | 32 | struct kvm_vcpu *vcpu) |
33 | struct kvm_vcpu *vcpu) | ||
34 | { | 33 | { |
35 | int result = 0; | 34 | int result = 0; |
36 | unsigned long kseg0_opc; | 35 | unsigned long kseg0_opc; |
@@ -47,12 +46,11 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, | |||
47 | } | 46 | } |
48 | 47 | ||
49 | /* | 48 | /* |
50 | * Address based CACHE instructions are transformed into synci(s). A little heavy | 49 | * Address based CACHE instructions are transformed into synci(s). A little |
51 | * for just D-cache invalidates, but avoids an expensive trap | 50 | * heavy for just D-cache invalidates, but avoids an expensive trap |
52 | */ | 51 | */ |
53 | int | 52 | int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, |
54 | kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, | 53 | struct kvm_vcpu *vcpu) |
55 | struct kvm_vcpu *vcpu) | ||
56 | { | 54 | { |
57 | int result = 0; | 55 | int result = 0; |
58 | unsigned long kseg0_opc; | 56 | unsigned long kseg0_opc; |
@@ -72,8 +70,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, | |||
72 | return result; | 70 | return result; |
73 | } | 71 | } |
74 | 72 | ||
75 | int | 73 | int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) |
76 | kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) | ||
77 | { | 74 | { |
78 | int32_t rt, rd, sel; | 75 | int32_t rt, rd, sel; |
79 | uint32_t mfc0_inst; | 76 | uint32_t mfc0_inst; |
@@ -115,8 +112,7 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) | |||
115 | return 0; | 112 | return 0; |
116 | } | 113 | } |
117 | 114 | ||
118 | int | 115 | int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) |
119 | kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) | ||
120 | { | 116 | { |
121 | int32_t rt, rd, sel; | 117 | int32_t rt, rd, sel; |
122 | uint32_t mtc0_inst = SW_TEMPLATE; | 118 | uint32_t mtc0_inst = SW_TEMPLATE; |
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/emulate.c index 8d4840090082..fb3e8dfd1ff6 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/emulate.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Instruction/Exception emulation | 6 | * KVM/MIPS: Instruction/Exception emulation |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -29,9 +29,9 @@ | |||
29 | #include <asm/r4kcache.h> | 29 | #include <asm/r4kcache.h> |
30 | #define CONFIG_MIPS_MT | 30 | #define CONFIG_MIPS_MT |
31 | 31 | ||
32 | #include "kvm_mips_opcode.h" | 32 | #include "opcode.h" |
33 | #include "kvm_mips_int.h" | 33 | #include "interrupt.h" |
34 | #include "kvm_mips_comm.h" | 34 | #include "commpage.h" |
35 | 35 | ||
36 | #include "trace.h" | 36 | #include "trace.h" |
37 | 37 | ||
@@ -51,18 +51,14 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
51 | if (epc & 3) | 51 | if (epc & 3) |
52 | goto unaligned; | 52 | goto unaligned; |
53 | 53 | ||
54 | /* | 54 | /* Read the instruction */ |
55 | * Read the instruction | ||
56 | */ | ||
57 | insn.word = kvm_get_inst((uint32_t *) epc, vcpu); | 55 | insn.word = kvm_get_inst((uint32_t *) epc, vcpu); |
58 | 56 | ||
59 | if (insn.word == KVM_INVALID_INST) | 57 | if (insn.word == KVM_INVALID_INST) |
60 | return KVM_INVALID_INST; | 58 | return KVM_INVALID_INST; |
61 | 59 | ||
62 | switch (insn.i_format.opcode) { | 60 | switch (insn.i_format.opcode) { |
63 | /* | 61 | /* jr and jalr are in r_format format. */ |
64 | * jr and jalr are in r_format format. | ||
65 | */ | ||
66 | case spec_op: | 62 | case spec_op: |
67 | switch (insn.r_format.func) { | 63 | switch (insn.r_format.func) { |
68 | case jalr_op: | 64 | case jalr_op: |
@@ -124,18 +120,16 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
124 | 120 | ||
125 | dspcontrol = rddsp(0x01); | 121 | dspcontrol = rddsp(0x01); |
126 | 122 | ||
127 | if (dspcontrol >= 32) { | 123 | if (dspcontrol >= 32) |
128 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 124 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
129 | } else | 125 | else |
130 | epc += 8; | 126 | epc += 8; |
131 | nextpc = epc; | 127 | nextpc = epc; |
132 | break; | 128 | break; |
133 | } | 129 | } |
134 | break; | 130 | break; |
135 | 131 | ||
136 | /* | 132 | /* These are unconditional and in j_format. */ |
137 | * These are unconditional and in j_format. | ||
138 | */ | ||
139 | case jal_op: | 133 | case jal_op: |
140 | arch->gprs[31] = instpc + 8; | 134 | arch->gprs[31] = instpc + 8; |
141 | case j_op: | 135 | case j_op: |
@@ -146,9 +140,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
146 | nextpc = epc; | 140 | nextpc = epc; |
147 | break; | 141 | break; |
148 | 142 | ||
149 | /* | 143 | /* These are conditional and in i_format. */ |
150 | * These are conditional and in i_format. | ||
151 | */ | ||
152 | case beq_op: | 144 | case beq_op: |
153 | case beql_op: | 145 | case beql_op: |
154 | if (arch->gprs[insn.i_format.rs] == | 146 | if (arch->gprs[insn.i_format.rs] == |
@@ -189,22 +181,20 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
189 | nextpc = epc; | 181 | nextpc = epc; |
190 | break; | 182 | break; |
191 | 183 | ||
192 | /* | 184 | /* And now the FPA/cp1 branch instructions. */ |
193 | * And now the FPA/cp1 branch instructions. | ||
194 | */ | ||
195 | case cop1_op: | 185 | case cop1_op: |
196 | printk("%s: unsupported cop1_op\n", __func__); | 186 | kvm_err("%s: unsupported cop1_op\n", __func__); |
197 | break; | 187 | break; |
198 | } | 188 | } |
199 | 189 | ||
200 | return nextpc; | 190 | return nextpc; |
201 | 191 | ||
202 | unaligned: | 192 | unaligned: |
203 | printk("%s: unaligned epc\n", __func__); | 193 | kvm_err("%s: unaligned epc\n", __func__); |
204 | return nextpc; | 194 | return nextpc; |
205 | 195 | ||
206 | sigill: | 196 | sigill: |
207 | printk("%s: DSP branch but not DSP ASE\n", __func__); | 197 | kvm_err("%s: DSP branch but not DSP ASE\n", __func__); |
208 | return nextpc; | 198 | return nextpc; |
209 | } | 199 | } |
210 | 200 | ||
@@ -219,7 +209,8 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) | |||
219 | er = EMULATE_FAIL; | 209 | er = EMULATE_FAIL; |
220 | } else { | 210 | } else { |
221 | vcpu->arch.pc = branch_pc; | 211 | vcpu->arch.pc = branch_pc; |
222 | kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); | 212 | kvm_debug("BD update_pc(): New PC: %#lx\n", |
213 | vcpu->arch.pc); | ||
223 | } | 214 | } |
224 | } else | 215 | } else |
225 | vcpu->arch.pc += 4; | 216 | vcpu->arch.pc += 4; |
@@ -240,6 +231,7 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) | |||
240 | static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) | 231 | static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) |
241 | { | 232 | { |
242 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 233 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
234 | |||
243 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || | 235 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || |
244 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); | 236 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); |
245 | } | 237 | } |
@@ -392,7 +384,6 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, | |||
392 | return now; | 384 | return now; |
393 | } | 385 | } |
394 | 386 | ||
395 | |||
396 | /** | 387 | /** |
397 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. | 388 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. |
398 | * @vcpu: Virtual CPU. | 389 | * @vcpu: Virtual CPU. |
@@ -760,8 +751,8 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | |||
760 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | 751 | kvm_clear_c0_guest_status(cop0, ST0_ERL); |
761 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | 752 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); |
762 | } else { | 753 | } else { |
763 | printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", | 754 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", |
764 | vcpu->arch.pc); | 755 | vcpu->arch.pc); |
765 | er = EMULATE_FAIL; | 756 | er = EMULATE_FAIL; |
766 | } | 757 | } |
767 | 758 | ||
@@ -770,8 +761,6 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | |||
770 | 761 | ||
771 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | 762 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) |
772 | { | 763 | { |
773 | enum emulation_result er = EMULATE_DONE; | ||
774 | |||
775 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, | 764 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, |
776 | vcpu->arch.pending_exceptions); | 765 | vcpu->arch.pending_exceptions); |
777 | 766 | ||
@@ -781,8 +770,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | |||
781 | vcpu->arch.wait = 1; | 770 | vcpu->arch.wait = 1; |
782 | kvm_vcpu_block(vcpu); | 771 | kvm_vcpu_block(vcpu); |
783 | 772 | ||
784 | /* We we are runnable, then definitely go off to user space to check if any | 773 | /* |
785 | * I/O interrupts are pending. | 774 | * We we are runnable, then definitely go off to user space to |
775 | * check if any I/O interrupts are pending. | ||
786 | */ | 776 | */ |
787 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | 777 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { |
788 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 778 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
@@ -790,20 +780,20 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | |||
790 | } | 780 | } |
791 | } | 781 | } |
792 | 782 | ||
793 | return er; | 783 | return EMULATE_DONE; |
794 | } | 784 | } |
795 | 785 | ||
796 | /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch | 786 | /* |
797 | * this, if things ever change | 787 | * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that |
788 | * we can catch this, if things ever change | ||
798 | */ | 789 | */ |
799 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) | 790 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) |
800 | { | 791 | { |
801 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 792 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
802 | enum emulation_result er = EMULATE_FAIL; | ||
803 | uint32_t pc = vcpu->arch.pc; | 793 | uint32_t pc = vcpu->arch.pc; |
804 | 794 | ||
805 | printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); | 795 | kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); |
806 | return er; | 796 | return EMULATE_FAIL; |
807 | } | 797 | } |
808 | 798 | ||
809 | /* Write Guest TLB Entry @ Index */ | 799 | /* Write Guest TLB Entry @ Index */ |
@@ -811,88 +801,76 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) | |||
811 | { | 801 | { |
812 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 802 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
813 | int index = kvm_read_c0_guest_index(cop0); | 803 | int index = kvm_read_c0_guest_index(cop0); |
814 | enum emulation_result er = EMULATE_DONE; | ||
815 | struct kvm_mips_tlb *tlb = NULL; | 804 | struct kvm_mips_tlb *tlb = NULL; |
816 | uint32_t pc = vcpu->arch.pc; | 805 | uint32_t pc = vcpu->arch.pc; |
817 | 806 | ||
818 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | 807 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { |
819 | printk("%s: illegal index: %d\n", __func__, index); | 808 | kvm_debug("%s: illegal index: %d\n", __func__, index); |
820 | printk | 809 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
821 | ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | 810 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
822 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 811 | kvm_read_c0_guest_entrylo0(cop0), |
823 | kvm_read_c0_guest_entrylo0(cop0), | 812 | kvm_read_c0_guest_entrylo1(cop0), |
824 | kvm_read_c0_guest_entrylo1(cop0), | 813 | kvm_read_c0_guest_pagemask(cop0)); |
825 | kvm_read_c0_guest_pagemask(cop0)); | ||
826 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; | 814 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; |
827 | } | 815 | } |
828 | 816 | ||
829 | tlb = &vcpu->arch.guest_tlb[index]; | 817 | tlb = &vcpu->arch.guest_tlb[index]; |
830 | #if 1 | 818 | /* |
831 | /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ | 819 | * Probe the shadow host TLB for the entry being overwritten, if one |
820 | * matches, invalidate it | ||
821 | */ | ||
832 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | 822 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); |
833 | #endif | ||
834 | 823 | ||
835 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | 824 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); |
836 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | 825 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); |
837 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | 826 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); |
838 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | 827 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); |
839 | 828 | ||
840 | kvm_debug | 829 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
841 | ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | 830 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
842 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 831 | kvm_read_c0_guest_entrylo0(cop0), |
843 | kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), | 832 | kvm_read_c0_guest_entrylo1(cop0), |
844 | kvm_read_c0_guest_pagemask(cop0)); | 833 | kvm_read_c0_guest_pagemask(cop0)); |
845 | 834 | ||
846 | return er; | 835 | return EMULATE_DONE; |
847 | } | 836 | } |
848 | 837 | ||
849 | /* Write Guest TLB Entry @ Random Index */ | 838 | /* Write Guest TLB Entry @ Random Index */ |
850 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) | 839 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) |
851 | { | 840 | { |
852 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 841 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
853 | enum emulation_result er = EMULATE_DONE; | ||
854 | struct kvm_mips_tlb *tlb = NULL; | 842 | struct kvm_mips_tlb *tlb = NULL; |
855 | uint32_t pc = vcpu->arch.pc; | 843 | uint32_t pc = vcpu->arch.pc; |
856 | int index; | 844 | int index; |
857 | 845 | ||
858 | #if 1 | ||
859 | get_random_bytes(&index, sizeof(index)); | 846 | get_random_bytes(&index, sizeof(index)); |
860 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); | 847 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); |
861 | #else | ||
862 | index = jiffies % KVM_MIPS_GUEST_TLB_SIZE; | ||
863 | #endif | ||
864 | |||
865 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | ||
866 | printk("%s: illegal index: %d\n", __func__, index); | ||
867 | return EMULATE_FAIL; | ||
868 | } | ||
869 | 848 | ||
870 | tlb = &vcpu->arch.guest_tlb[index]; | 849 | tlb = &vcpu->arch.guest_tlb[index]; |
871 | 850 | ||
872 | #if 1 | 851 | /* |
873 | /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ | 852 | * Probe the shadow host TLB for the entry being overwritten, if one |
853 | * matches, invalidate it | ||
854 | */ | ||
874 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | 855 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); |
875 | #endif | ||
876 | 856 | ||
877 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | 857 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); |
878 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | 858 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); |
879 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | 859 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); |
880 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | 860 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); |
881 | 861 | ||
882 | kvm_debug | 862 | kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", |
883 | ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", | 863 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
884 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 864 | kvm_read_c0_guest_entrylo0(cop0), |
885 | kvm_read_c0_guest_entrylo0(cop0), | 865 | kvm_read_c0_guest_entrylo1(cop0)); |
886 | kvm_read_c0_guest_entrylo1(cop0)); | ||
887 | 866 | ||
888 | return er; | 867 | return EMULATE_DONE; |
889 | } | 868 | } |
890 | 869 | ||
891 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | 870 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) |
892 | { | 871 | { |
893 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 872 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
894 | long entryhi = kvm_read_c0_guest_entryhi(cop0); | 873 | long entryhi = kvm_read_c0_guest_entryhi(cop0); |
895 | enum emulation_result er = EMULATE_DONE; | ||
896 | uint32_t pc = vcpu->arch.pc; | 874 | uint32_t pc = vcpu->arch.pc; |
897 | int index = -1; | 875 | int index = -1; |
898 | 876 | ||
@@ -903,12 +881,12 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | |||
903 | kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, | 881 | kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, |
904 | index); | 882 | index); |
905 | 883 | ||
906 | return er; | 884 | return EMULATE_DONE; |
907 | } | 885 | } |
908 | 886 | ||
909 | enum emulation_result | 887 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, |
910 | kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | 888 | uint32_t cause, struct kvm_run *run, |
911 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 889 | struct kvm_vcpu *vcpu) |
912 | { | 890 | { |
913 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 891 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
914 | enum emulation_result er = EMULATE_DONE; | 892 | enum emulation_result er = EMULATE_DONE; |
@@ -922,9 +900,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
922 | */ | 900 | */ |
923 | curr_pc = vcpu->arch.pc; | 901 | curr_pc = vcpu->arch.pc; |
924 | er = update_pc(vcpu, cause); | 902 | er = update_pc(vcpu, cause); |
925 | if (er == EMULATE_FAIL) { | 903 | if (er == EMULATE_FAIL) |
926 | return er; | 904 | return er; |
927 | } | ||
928 | 905 | ||
929 | copz = (inst >> 21) & 0x1f; | 906 | copz = (inst >> 21) & 0x1f; |
930 | rt = (inst >> 16) & 0x1f; | 907 | rt = (inst >> 16) & 0x1f; |
@@ -949,7 +926,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
949 | er = kvm_mips_emul_tlbp(vcpu); | 926 | er = kvm_mips_emul_tlbp(vcpu); |
950 | break; | 927 | break; |
951 | case rfe_op: | 928 | case rfe_op: |
952 | printk("!!!COP0_RFE!!!\n"); | 929 | kvm_err("!!!COP0_RFE!!!\n"); |
953 | break; | 930 | break; |
954 | case eret_op: | 931 | case eret_op: |
955 | er = kvm_mips_emul_eret(vcpu); | 932 | er = kvm_mips_emul_eret(vcpu); |
@@ -973,8 +950,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
973 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 950 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
974 | kvm_mips_trans_mfc0(inst, opc, vcpu); | 951 | kvm_mips_trans_mfc0(inst, opc, vcpu); |
975 | #endif | 952 | #endif |
976 | } | 953 | } else { |
977 | else { | ||
978 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | 954 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; |
979 | 955 | ||
980 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 956 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
@@ -999,8 +975,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
999 | if ((rd == MIPS_CP0_TLB_INDEX) | 975 | if ((rd == MIPS_CP0_TLB_INDEX) |
1000 | && (vcpu->arch.gprs[rt] >= | 976 | && (vcpu->arch.gprs[rt] >= |
1001 | KVM_MIPS_GUEST_TLB_SIZE)) { | 977 | KVM_MIPS_GUEST_TLB_SIZE)) { |
1002 | printk("Invalid TLB Index: %ld", | 978 | kvm_err("Invalid TLB Index: %ld", |
1003 | vcpu->arch.gprs[rt]); | 979 | vcpu->arch.gprs[rt]); |
1004 | er = EMULATE_FAIL; | 980 | er = EMULATE_FAIL; |
1005 | break; | 981 | break; |
1006 | } | 982 | } |
@@ -1010,21 +986,19 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1010 | kvm_change_c0_guest_ebase(cop0, | 986 | kvm_change_c0_guest_ebase(cop0, |
1011 | ~(C0_EBASE_CORE_MASK), | 987 | ~(C0_EBASE_CORE_MASK), |
1012 | vcpu->arch.gprs[rt]); | 988 | vcpu->arch.gprs[rt]); |
1013 | printk("MTCz, cop0->reg[EBASE]: %#lx\n", | 989 | kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", |
1014 | kvm_read_c0_guest_ebase(cop0)); | 990 | kvm_read_c0_guest_ebase(cop0)); |
1015 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { | 991 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { |
1016 | uint32_t nasid = | 992 | uint32_t nasid = |
1017 | vcpu->arch.gprs[rt] & ASID_MASK; | 993 | vcpu->arch.gprs[rt] & ASID_MASK; |
1018 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) | 994 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && |
1019 | && | ||
1020 | ((kvm_read_c0_guest_entryhi(cop0) & | 995 | ((kvm_read_c0_guest_entryhi(cop0) & |
1021 | ASID_MASK) != nasid)) { | 996 | ASID_MASK) != nasid)) { |
1022 | 997 | kvm_debug("MTCz, change ASID from %#lx to %#lx\n", | |
1023 | kvm_debug | 998 | kvm_read_c0_guest_entryhi(cop0) |
1024 | ("MTCz, change ASID from %#lx to %#lx\n", | 999 | & ASID_MASK, |
1025 | kvm_read_c0_guest_entryhi(cop0) & | 1000 | vcpu->arch.gprs[rt] |
1026 | ASID_MASK, | 1001 | & ASID_MASK); |
1027 | vcpu->arch.gprs[rt] & ASID_MASK); | ||
1028 | 1002 | ||
1029 | /* Blow away the shadow host TLBs */ | 1003 | /* Blow away the shadow host TLBs */ |
1030 | kvm_mips_flush_host_tlb(1); | 1004 | kvm_mips_flush_host_tlb(1); |
@@ -1049,7 +1023,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1049 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { | 1023 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { |
1050 | kvm_write_c0_guest_status(cop0, | 1024 | kvm_write_c0_guest_status(cop0, |
1051 | vcpu->arch.gprs[rt]); | 1025 | vcpu->arch.gprs[rt]); |
1052 | /* Make sure that CU1 and NMI bits are never set */ | 1026 | /* |
1027 | * Make sure that CU1 and NMI bits are | ||
1028 | * never set | ||
1029 | */ | ||
1053 | kvm_clear_c0_guest_status(cop0, | 1030 | kvm_clear_c0_guest_status(cop0, |
1054 | (ST0_CU1 | ST0_NMI)); | 1031 | (ST0_CU1 | ST0_NMI)); |
1055 | 1032 | ||
@@ -1058,6 +1035,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1058 | #endif | 1035 | #endif |
1059 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { | 1036 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { |
1060 | uint32_t old_cause, new_cause; | 1037 | uint32_t old_cause, new_cause; |
1038 | |||
1061 | old_cause = kvm_read_c0_guest_cause(cop0); | 1039 | old_cause = kvm_read_c0_guest_cause(cop0); |
1062 | new_cause = vcpu->arch.gprs[rt]; | 1040 | new_cause = vcpu->arch.gprs[rt]; |
1063 | /* Update R/W bits */ | 1041 | /* Update R/W bits */ |
@@ -1082,9 +1060,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1082 | break; | 1060 | break; |
1083 | 1061 | ||
1084 | case dmtc_op: | 1062 | case dmtc_op: |
1085 | printk | 1063 | kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", |
1086 | ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", | 1064 | vcpu->arch.pc, rt, rd, sel); |
1087 | vcpu->arch.pc, rt, rd, sel); | ||
1088 | er = EMULATE_FAIL; | 1065 | er = EMULATE_FAIL; |
1089 | break; | 1066 | break; |
1090 | 1067 | ||
@@ -1115,7 +1092,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1115 | cop0->reg[MIPS_CP0_STATUS][2] & 0xf; | 1092 | cop0->reg[MIPS_CP0_STATUS][2] & 0xf; |
1116 | uint32_t pss = | 1093 | uint32_t pss = |
1117 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; | 1094 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; |
1118 | /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ | 1095 | /* |
1096 | * We don't support any shadow register sets, so | ||
1097 | * SRSCtl[PSS] == SRSCtl[CSS] = 0 | ||
1098 | */ | ||
1119 | if (css || pss) { | 1099 | if (css || pss) { |
1120 | er = EMULATE_FAIL; | 1100 | er = EMULATE_FAIL; |
1121 | break; | 1101 | break; |
@@ -1126,21 +1106,17 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1126 | } | 1106 | } |
1127 | break; | 1107 | break; |
1128 | default: | 1108 | default: |
1129 | printk | 1109 | kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", |
1130 | ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", | 1110 | vcpu->arch.pc, copz); |
1131 | vcpu->arch.pc, copz); | ||
1132 | er = EMULATE_FAIL; | 1111 | er = EMULATE_FAIL; |
1133 | break; | 1112 | break; |
1134 | } | 1113 | } |
1135 | } | 1114 | } |
1136 | 1115 | ||
1137 | done: | 1116 | done: |
1138 | /* | 1117 | /* Rollback PC only if emulation was unsuccessful */ |
1139 | * Rollback PC only if emulation was unsuccessful | 1118 | if (er == EMULATE_FAIL) |
1140 | */ | ||
1141 | if (er == EMULATE_FAIL) { | ||
1142 | vcpu->arch.pc = curr_pc; | 1119 | vcpu->arch.pc = curr_pc; |
1143 | } | ||
1144 | 1120 | ||
1145 | dont_update_pc: | 1121 | dont_update_pc: |
1146 | /* | 1122 | /* |
@@ -1152,9 +1128,9 @@ dont_update_pc: | |||
1152 | return er; | 1128 | return er; |
1153 | } | 1129 | } |
1154 | 1130 | ||
1155 | enum emulation_result | 1131 | enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause, |
1156 | kvm_mips_emulate_store(uint32_t inst, uint32_t cause, | 1132 | struct kvm_run *run, |
1157 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1133 | struct kvm_vcpu *vcpu) |
1158 | { | 1134 | { |
1159 | enum emulation_result er = EMULATE_DO_MMIO; | 1135 | enum emulation_result er = EMULATE_DO_MMIO; |
1160 | int32_t op, base, rt, offset; | 1136 | int32_t op, base, rt, offset; |
@@ -1252,24 +1228,21 @@ kvm_mips_emulate_store(uint32_t inst, uint32_t cause, | |||
1252 | break; | 1228 | break; |
1253 | 1229 | ||
1254 | default: | 1230 | default: |
1255 | printk("Store not yet supported"); | 1231 | kvm_err("Store not yet supported"); |
1256 | er = EMULATE_FAIL; | 1232 | er = EMULATE_FAIL; |
1257 | break; | 1233 | break; |
1258 | } | 1234 | } |
1259 | 1235 | ||
1260 | /* | 1236 | /* Rollback PC if emulation was unsuccessful */ |
1261 | * Rollback PC if emulation was unsuccessful | 1237 | if (er == EMULATE_FAIL) |
1262 | */ | ||
1263 | if (er == EMULATE_FAIL) { | ||
1264 | vcpu->arch.pc = curr_pc; | 1238 | vcpu->arch.pc = curr_pc; |
1265 | } | ||
1266 | 1239 | ||
1267 | return er; | 1240 | return er; |
1268 | } | 1241 | } |
1269 | 1242 | ||
1270 | enum emulation_result | 1243 | enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, |
1271 | kvm_mips_emulate_load(uint32_t inst, uint32_t cause, | 1244 | struct kvm_run *run, |
1272 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1245 | struct kvm_vcpu *vcpu) |
1273 | { | 1246 | { |
1274 | enum emulation_result er = EMULATE_DO_MMIO; | 1247 | enum emulation_result er = EMULATE_DO_MMIO; |
1275 | int32_t op, base, rt, offset; | 1248 | int32_t op, base, rt, offset; |
@@ -1364,7 +1337,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause, | |||
1364 | break; | 1337 | break; |
1365 | 1338 | ||
1366 | default: | 1339 | default: |
1367 | printk("Load not yet supported"); | 1340 | kvm_err("Load not yet supported"); |
1368 | er = EMULATE_FAIL; | 1341 | er = EMULATE_FAIL; |
1369 | break; | 1342 | break; |
1370 | } | 1343 | } |
@@ -1383,7 +1356,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | |||
1383 | gfn = va >> PAGE_SHIFT; | 1356 | gfn = va >> PAGE_SHIFT; |
1384 | 1357 | ||
1385 | if (gfn >= kvm->arch.guest_pmap_npages) { | 1358 | if (gfn >= kvm->arch.guest_pmap_npages) { |
1386 | printk("%s: Invalid gfn: %#llx\n", __func__, gfn); | 1359 | kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); |
1387 | kvm_mips_dump_host_tlbs(); | 1360 | kvm_mips_dump_host_tlbs(); |
1388 | kvm_arch_vcpu_dump_regs(vcpu); | 1361 | kvm_arch_vcpu_dump_regs(vcpu); |
1389 | return -1; | 1362 | return -1; |
@@ -1391,7 +1364,8 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | |||
1391 | pfn = kvm->arch.guest_pmap[gfn]; | 1364 | pfn = kvm->arch.guest_pmap[gfn]; |
1392 | pa = (pfn << PAGE_SHIFT) | offset; | 1365 | pa = (pfn << PAGE_SHIFT) | offset; |
1393 | 1366 | ||
1394 | printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); | 1367 | kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va, |
1368 | CKSEG0ADDR(pa)); | ||
1395 | 1369 | ||
1396 | local_flush_icache_range(CKSEG0ADDR(pa), 32); | 1370 | local_flush_icache_range(CKSEG0ADDR(pa), 32); |
1397 | return 0; | 1371 | return 0; |
@@ -1410,13 +1384,12 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | |||
1410 | #define MIPS_CACHE_DCACHE 0x1 | 1384 | #define MIPS_CACHE_DCACHE 0x1 |
1411 | #define MIPS_CACHE_SEC 0x3 | 1385 | #define MIPS_CACHE_SEC 0x3 |
1412 | 1386 | ||
1413 | enum emulation_result | 1387 | enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, |
1414 | kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | 1388 | uint32_t cause, |
1415 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1389 | struct kvm_run *run, |
1390 | struct kvm_vcpu *vcpu) | ||
1416 | { | 1391 | { |
1417 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1392 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1418 | extern void (*r4k_blast_dcache) (void); | ||
1419 | extern void (*r4k_blast_icache) (void); | ||
1420 | enum emulation_result er = EMULATE_DONE; | 1393 | enum emulation_result er = EMULATE_DONE; |
1421 | int32_t offset, cache, op_inst, op, base; | 1394 | int32_t offset, cache, op_inst, op, base; |
1422 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1395 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -1443,22 +1416,23 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1443 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1416 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1444 | cache, op, base, arch->gprs[base], offset); | 1417 | cache, op, base, arch->gprs[base], offset); |
1445 | 1418 | ||
1446 | /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate | 1419 | /* |
1447 | * the caches entirely by stepping through all the ways/indexes | 1420 | * Treat INDEX_INV as a nop, basically issued by Linux on startup to |
1421 | * invalidate the caches entirely by stepping through all the | ||
1422 | * ways/indexes | ||
1448 | */ | 1423 | */ |
1449 | if (op == MIPS_CACHE_OP_INDEX_INV) { | 1424 | if (op == MIPS_CACHE_OP_INDEX_INV) { |
1450 | kvm_debug | 1425 | kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1451 | ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1426 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, |
1452 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, | 1427 | arch->gprs[base], offset); |
1453 | arch->gprs[base], offset); | ||
1454 | 1428 | ||
1455 | if (cache == MIPS_CACHE_DCACHE) | 1429 | if (cache == MIPS_CACHE_DCACHE) |
1456 | r4k_blast_dcache(); | 1430 | r4k_blast_dcache(); |
1457 | else if (cache == MIPS_CACHE_ICACHE) | 1431 | else if (cache == MIPS_CACHE_ICACHE) |
1458 | r4k_blast_icache(); | 1432 | r4k_blast_icache(); |
1459 | else { | 1433 | else { |
1460 | printk("%s: unsupported CACHE INDEX operation\n", | 1434 | kvm_err("%s: unsupported CACHE INDEX operation\n", |
1461 | __func__); | 1435 | __func__); |
1462 | return EMULATE_FAIL; | 1436 | return EMULATE_FAIL; |
1463 | } | 1437 | } |
1464 | 1438 | ||
@@ -1470,21 +1444,19 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1470 | 1444 | ||
1471 | preempt_disable(); | 1445 | preempt_disable(); |
1472 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { | 1446 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { |
1473 | 1447 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) | |
1474 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) { | ||
1475 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu); | 1448 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu); |
1476 | } | ||
1477 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || | 1449 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || |
1478 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { | 1450 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { |
1479 | int index; | 1451 | int index; |
1480 | 1452 | ||
1481 | /* If an entry already exists then skip */ | 1453 | /* If an entry already exists then skip */ |
1482 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { | 1454 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) |
1483 | goto skip_fault; | 1455 | goto skip_fault; |
1484 | } | ||
1485 | 1456 | ||
1486 | /* If address not in the guest TLB, then give the guest a fault, the | 1457 | /* |
1487 | * resulting handler will do the right thing | 1458 | * If address not in the guest TLB, then give the guest a fault, |
1459 | * the resulting handler will do the right thing | ||
1488 | */ | 1460 | */ |
1489 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | | 1461 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | |
1490 | (kvm_read_c0_guest_entryhi | 1462 | (kvm_read_c0_guest_entryhi |
@@ -1499,23 +1471,28 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1499 | goto dont_update_pc; | 1471 | goto dont_update_pc; |
1500 | } else { | 1472 | } else { |
1501 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | 1473 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; |
1502 | /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ | 1474 | /* |
1475 | * Check if the entry is valid, if not then setup a TLB | ||
1476 | * invalid exception to the guest | ||
1477 | */ | ||
1503 | if (!TLB_IS_VALID(*tlb, va)) { | 1478 | if (!TLB_IS_VALID(*tlb, va)) { |
1504 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, | 1479 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, |
1505 | run, vcpu); | 1480 | run, vcpu); |
1506 | preempt_enable(); | 1481 | preempt_enable(); |
1507 | goto dont_update_pc; | 1482 | goto dont_update_pc; |
1508 | } else { | 1483 | } else { |
1509 | /* We fault an entry from the guest tlb to the shadow host TLB */ | 1484 | /* |
1485 | * We fault an entry from the guest tlb to the | ||
1486 | * shadow host TLB | ||
1487 | */ | ||
1510 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, | 1488 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, |
1511 | NULL, | 1489 | NULL, |
1512 | NULL); | 1490 | NULL); |
1513 | } | 1491 | } |
1514 | } | 1492 | } |
1515 | } else { | 1493 | } else { |
1516 | printk | 1494 | kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1517 | ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1495 | cache, op, base, arch->gprs[base], offset); |
1518 | cache, op, base, arch->gprs[base], offset); | ||
1519 | er = EMULATE_FAIL; | 1496 | er = EMULATE_FAIL; |
1520 | preempt_enable(); | 1497 | preempt_enable(); |
1521 | goto dont_update_pc; | 1498 | goto dont_update_pc; |
@@ -1530,7 +1507,10 @@ skip_fault: | |||
1530 | flush_dcache_line(va); | 1507 | flush_dcache_line(va); |
1531 | 1508 | ||
1532 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1509 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1533 | /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ | 1510 | /* |
1511 | * Replace the CACHE instruction, with a SYNCI, not the same, | ||
1512 | * but avoids a trap | ||
1513 | */ | ||
1534 | kvm_mips_trans_cache_va(inst, opc, vcpu); | 1514 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1535 | #endif | 1515 | #endif |
1536 | } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { | 1516 | } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { |
@@ -1542,9 +1522,8 @@ skip_fault: | |||
1542 | kvm_mips_trans_cache_va(inst, opc, vcpu); | 1522 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1543 | #endif | 1523 | #endif |
1544 | } else { | 1524 | } else { |
1545 | printk | 1525 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1546 | ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1526 | cache, op, base, arch->gprs[base], offset); |
1547 | cache, op, base, arch->gprs[base], offset); | ||
1548 | er = EMULATE_FAIL; | 1527 | er = EMULATE_FAIL; |
1549 | preempt_enable(); | 1528 | preempt_enable(); |
1550 | goto dont_update_pc; | 1529 | goto dont_update_pc; |
@@ -1552,28 +1531,23 @@ skip_fault: | |||
1552 | 1531 | ||
1553 | preempt_enable(); | 1532 | preempt_enable(); |
1554 | 1533 | ||
1555 | dont_update_pc: | 1534 | dont_update_pc: |
1556 | /* | 1535 | /* Rollback PC */ |
1557 | * Rollback PC | ||
1558 | */ | ||
1559 | vcpu->arch.pc = curr_pc; | 1536 | vcpu->arch.pc = curr_pc; |
1560 | done: | 1537 | done: |
1561 | return er; | 1538 | return er; |
1562 | } | 1539 | } |
1563 | 1540 | ||
1564 | enum emulation_result | 1541 | enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, |
1565 | kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, | 1542 | struct kvm_run *run, |
1566 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1543 | struct kvm_vcpu *vcpu) |
1567 | { | 1544 | { |
1568 | enum emulation_result er = EMULATE_DONE; | 1545 | enum emulation_result er = EMULATE_DONE; |
1569 | uint32_t inst; | 1546 | uint32_t inst; |
1570 | 1547 | ||
1571 | /* | 1548 | /* Fetch the instruction. */ |
1572 | * Fetch the instruction. | 1549 | if (cause & CAUSEF_BD) |
1573 | */ | ||
1574 | if (cause & CAUSEF_BD) { | ||
1575 | opc += 1; | 1550 | opc += 1; |
1576 | } | ||
1577 | 1551 | ||
1578 | inst = kvm_get_inst(opc, vcpu); | 1552 | inst = kvm_get_inst(opc, vcpu); |
1579 | 1553 | ||
@@ -1601,8 +1575,8 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, | |||
1601 | break; | 1575 | break; |
1602 | 1576 | ||
1603 | default: | 1577 | default: |
1604 | printk("Instruction emulation not supported (%p/%#x)\n", opc, | 1578 | kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, |
1605 | inst); | 1579 | inst); |
1606 | kvm_arch_vcpu_dump_regs(vcpu); | 1580 | kvm_arch_vcpu_dump_regs(vcpu); |
1607 | er = EMULATE_FAIL; | 1581 | er = EMULATE_FAIL; |
1608 | break; | 1582 | break; |
@@ -1611,9 +1585,10 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, | |||
1611 | return er; | 1585 | return er; |
1612 | } | 1586 | } |
1613 | 1587 | ||
1614 | enum emulation_result | 1588 | enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, |
1615 | kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, | 1589 | uint32_t *opc, |
1616 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1590 | struct kvm_run *run, |
1591 | struct kvm_vcpu *vcpu) | ||
1617 | { | 1592 | { |
1618 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1593 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1619 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1594 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -1638,20 +1613,20 @@ kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, | |||
1638 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1613 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1639 | 1614 | ||
1640 | } else { | 1615 | } else { |
1641 | printk("Trying to deliver SYSCALL when EXL is already set\n"); | 1616 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); |
1642 | er = EMULATE_FAIL; | 1617 | er = EMULATE_FAIL; |
1643 | } | 1618 | } |
1644 | 1619 | ||
1645 | return er; | 1620 | return er; |
1646 | } | 1621 | } |
1647 | 1622 | ||
1648 | enum emulation_result | 1623 | enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, |
1649 | kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, | 1624 | uint32_t *opc, |
1650 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1625 | struct kvm_run *run, |
1626 | struct kvm_vcpu *vcpu) | ||
1651 | { | 1627 | { |
1652 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1628 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1653 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1629 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1654 | enum emulation_result er = EMULATE_DONE; | ||
1655 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | | 1630 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | |
1656 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1631 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1657 | 1632 | ||
@@ -1688,16 +1663,16 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, | |||
1688 | /* Blow away the shadow host TLBs */ | 1663 | /* Blow away the shadow host TLBs */ |
1689 | kvm_mips_flush_host_tlb(1); | 1664 | kvm_mips_flush_host_tlb(1); |
1690 | 1665 | ||
1691 | return er; | 1666 | return EMULATE_DONE; |
1692 | } | 1667 | } |
1693 | 1668 | ||
1694 | enum emulation_result | 1669 | enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, |
1695 | kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, | 1670 | uint32_t *opc, |
1696 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1671 | struct kvm_run *run, |
1672 | struct kvm_vcpu *vcpu) | ||
1697 | { | 1673 | { |
1698 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1674 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1699 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1675 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1700 | enum emulation_result er = EMULATE_DONE; | ||
1701 | unsigned long entryhi = | 1676 | unsigned long entryhi = |
1702 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1677 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1703 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1678 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
@@ -1734,16 +1709,16 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, | |||
1734 | /* Blow away the shadow host TLBs */ | 1709 | /* Blow away the shadow host TLBs */ |
1735 | kvm_mips_flush_host_tlb(1); | 1710 | kvm_mips_flush_host_tlb(1); |
1736 | 1711 | ||
1737 | return er; | 1712 | return EMULATE_DONE; |
1738 | } | 1713 | } |
1739 | 1714 | ||
1740 | enum emulation_result | 1715 | enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, |
1741 | kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, | 1716 | uint32_t *opc, |
1742 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1717 | struct kvm_run *run, |
1718 | struct kvm_vcpu *vcpu) | ||
1743 | { | 1719 | { |
1744 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1720 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1745 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1721 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1746 | enum emulation_result er = EMULATE_DONE; | ||
1747 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1722 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1748 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1723 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1749 | 1724 | ||
@@ -1778,16 +1753,16 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, | |||
1778 | /* Blow away the shadow host TLBs */ | 1753 | /* Blow away the shadow host TLBs */ |
1779 | kvm_mips_flush_host_tlb(1); | 1754 | kvm_mips_flush_host_tlb(1); |
1780 | 1755 | ||
1781 | return er; | 1756 | return EMULATE_DONE; |
1782 | } | 1757 | } |
1783 | 1758 | ||
1784 | enum emulation_result | 1759 | enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, |
1785 | kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, | 1760 | uint32_t *opc, |
1786 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1761 | struct kvm_run *run, |
1762 | struct kvm_vcpu *vcpu) | ||
1787 | { | 1763 | { |
1788 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1764 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1789 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1765 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1790 | enum emulation_result er = EMULATE_DONE; | ||
1791 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1766 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1792 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1767 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1793 | 1768 | ||
@@ -1822,13 +1797,13 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, | |||
1822 | /* Blow away the shadow host TLBs */ | 1797 | /* Blow away the shadow host TLBs */ |
1823 | kvm_mips_flush_host_tlb(1); | 1798 | kvm_mips_flush_host_tlb(1); |
1824 | 1799 | ||
1825 | return er; | 1800 | return EMULATE_DONE; |
1826 | } | 1801 | } |
1827 | 1802 | ||
1828 | /* TLBMOD: store into address matching TLB with Dirty bit off */ | 1803 | /* TLBMOD: store into address matching TLB with Dirty bit off */ |
1829 | enum emulation_result | 1804 | enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, |
1830 | kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, | 1805 | struct kvm_run *run, |
1831 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1806 | struct kvm_vcpu *vcpu) |
1832 | { | 1807 | { |
1833 | enum emulation_result er = EMULATE_DONE; | 1808 | enum emulation_result er = EMULATE_DONE; |
1834 | #ifdef DEBUG | 1809 | #ifdef DEBUG |
@@ -1837,9 +1812,7 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, | |||
1837 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1812 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1838 | int index; | 1813 | int index; |
1839 | 1814 | ||
1840 | /* | 1815 | /* If address not in the guest TLB, then we are in trouble */ |
1841 | * If address not in the guest TLB, then we are in trouble | ||
1842 | */ | ||
1843 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | 1816 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); |
1844 | if (index < 0) { | 1817 | if (index < 0) { |
1845 | /* XXXKYMA Invalidate and retry */ | 1818 | /* XXXKYMA Invalidate and retry */ |
@@ -1856,15 +1829,15 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, | |||
1856 | return er; | 1829 | return er; |
1857 | } | 1830 | } |
1858 | 1831 | ||
1859 | enum emulation_result | 1832 | enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, |
1860 | kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, | 1833 | uint32_t *opc, |
1861 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1834 | struct kvm_run *run, |
1835 | struct kvm_vcpu *vcpu) | ||
1862 | { | 1836 | { |
1863 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1837 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1864 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1838 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1865 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1839 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1866 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1840 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1867 | enum emulation_result er = EMULATE_DONE; | ||
1868 | 1841 | ||
1869 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1842 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1870 | /* save old pc */ | 1843 | /* save old pc */ |
@@ -1895,16 +1868,16 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, | |||
1895 | /* Blow away the shadow host TLBs */ | 1868 | /* Blow away the shadow host TLBs */ |
1896 | kvm_mips_flush_host_tlb(1); | 1869 | kvm_mips_flush_host_tlb(1); |
1897 | 1870 | ||
1898 | return er; | 1871 | return EMULATE_DONE; |
1899 | } | 1872 | } |
1900 | 1873 | ||
1901 | enum emulation_result | 1874 | enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, |
1902 | kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, | 1875 | uint32_t *opc, |
1903 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1876 | struct kvm_run *run, |
1877 | struct kvm_vcpu *vcpu) | ||
1904 | { | 1878 | { |
1905 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1879 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1906 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1880 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1907 | enum emulation_result er = EMULATE_DONE; | ||
1908 | 1881 | ||
1909 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1882 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1910 | /* save old pc */ | 1883 | /* save old pc */ |
@@ -1924,12 +1897,13 @@ kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, | |||
1924 | (T_COP_UNUSABLE << CAUSEB_EXCCODE)); | 1897 | (T_COP_UNUSABLE << CAUSEB_EXCCODE)); |
1925 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); | 1898 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); |
1926 | 1899 | ||
1927 | return er; | 1900 | return EMULATE_DONE; |
1928 | } | 1901 | } |
1929 | 1902 | ||
1930 | enum emulation_result | 1903 | enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, |
1931 | kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, | 1904 | uint32_t *opc, |
1932 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1905 | struct kvm_run *run, |
1906 | struct kvm_vcpu *vcpu) | ||
1933 | { | 1907 | { |
1934 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1908 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1935 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1909 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -1961,9 +1935,10 @@ kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, | |||
1961 | return er; | 1935 | return er; |
1962 | } | 1936 | } |
1963 | 1937 | ||
1964 | enum emulation_result | 1938 | enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, |
1965 | kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, | 1939 | uint32_t *opc, |
1966 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1940 | struct kvm_run *run, |
1941 | struct kvm_vcpu *vcpu) | ||
1967 | { | 1942 | { |
1968 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1943 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1969 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1944 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -1988,16 +1963,14 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, | |||
1988 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1963 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1989 | 1964 | ||
1990 | } else { | 1965 | } else { |
1991 | printk("Trying to deliver BP when EXL is already set\n"); | 1966 | kvm_err("Trying to deliver BP when EXL is already set\n"); |
1992 | er = EMULATE_FAIL; | 1967 | er = EMULATE_FAIL; |
1993 | } | 1968 | } |
1994 | 1969 | ||
1995 | return er; | 1970 | return er; |
1996 | } | 1971 | } |
1997 | 1972 | ||
1998 | /* | 1973 | /* ll/sc, rdhwr, sync emulation */ |
1999 | * ll/sc, rdhwr, sync emulation | ||
2000 | */ | ||
2001 | 1974 | ||
2002 | #define OPCODE 0xfc000000 | 1975 | #define OPCODE 0xfc000000 |
2003 | #define BASE 0x03e00000 | 1976 | #define BASE 0x03e00000 |
@@ -2012,9 +1985,9 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, | |||
2012 | #define SYNC 0x0000000f | 1985 | #define SYNC 0x0000000f |
2013 | #define RDHWR 0x0000003b | 1986 | #define RDHWR 0x0000003b |
2014 | 1987 | ||
2015 | enum emulation_result | 1988 | enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, |
2016 | kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, | 1989 | struct kvm_run *run, |
2017 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1990 | struct kvm_vcpu *vcpu) |
2018 | { | 1991 | { |
2019 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1992 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2020 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1993 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -2031,16 +2004,14 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, | |||
2031 | if (er == EMULATE_FAIL) | 2004 | if (er == EMULATE_FAIL) |
2032 | return er; | 2005 | return er; |
2033 | 2006 | ||
2034 | /* | 2007 | /* Fetch the instruction. */ |
2035 | * Fetch the instruction. | ||
2036 | */ | ||
2037 | if (cause & CAUSEF_BD) | 2008 | if (cause & CAUSEF_BD) |
2038 | opc += 1; | 2009 | opc += 1; |
2039 | 2010 | ||
2040 | inst = kvm_get_inst(opc, vcpu); | 2011 | inst = kvm_get_inst(opc, vcpu); |
2041 | 2012 | ||
2042 | if (inst == KVM_INVALID_INST) { | 2013 | if (inst == KVM_INVALID_INST) { |
2043 | printk("%s: Cannot get inst @ %p\n", __func__, opc); | 2014 | kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); |
2044 | return EMULATE_FAIL; | 2015 | return EMULATE_FAIL; |
2045 | } | 2016 | } |
2046 | 2017 | ||
@@ -2099,15 +2070,15 @@ emulate_ri: | |||
2099 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | 2070 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
2100 | } | 2071 | } |
2101 | 2072 | ||
2102 | enum emulation_result | 2073 | enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, |
2103 | kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) | 2074 | struct kvm_run *run) |
2104 | { | 2075 | { |
2105 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; | 2076 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; |
2106 | enum emulation_result er = EMULATE_DONE; | 2077 | enum emulation_result er = EMULATE_DONE; |
2107 | unsigned long curr_pc; | 2078 | unsigned long curr_pc; |
2108 | 2079 | ||
2109 | if (run->mmio.len > sizeof(*gpr)) { | 2080 | if (run->mmio.len > sizeof(*gpr)) { |
2110 | printk("Bad MMIO length: %d", run->mmio.len); | 2081 | kvm_err("Bad MMIO length: %d", run->mmio.len); |
2111 | er = EMULATE_FAIL; | 2082 | er = EMULATE_FAIL; |
2112 | goto done; | 2083 | goto done; |
2113 | } | 2084 | } |
@@ -2142,18 +2113,18 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
2142 | } | 2113 | } |
2143 | 2114 | ||
2144 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) | 2115 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) |
2145 | kvm_debug | 2116 | kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", |
2146 | ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", | 2117 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, |
2147 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, | 2118 | vcpu->mmio_needed); |
2148 | vcpu->mmio_needed); | ||
2149 | 2119 | ||
2150 | done: | 2120 | done: |
2151 | return er; | 2121 | return er; |
2152 | } | 2122 | } |
2153 | 2123 | ||
2154 | static enum emulation_result | 2124 | static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, |
2155 | kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, | 2125 | uint32_t *opc, |
2156 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 2126 | struct kvm_run *run, |
2127 | struct kvm_vcpu *vcpu) | ||
2157 | { | 2128 | { |
2158 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2129 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
2159 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2130 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
@@ -2181,16 +2152,17 @@ kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, | |||
2181 | exccode, kvm_read_c0_guest_epc(cop0), | 2152 | exccode, kvm_read_c0_guest_epc(cop0), |
2182 | kvm_read_c0_guest_badvaddr(cop0)); | 2153 | kvm_read_c0_guest_badvaddr(cop0)); |
2183 | } else { | 2154 | } else { |
2184 | printk("Trying to deliver EXC when EXL is already set\n"); | 2155 | kvm_err("Trying to deliver EXC when EXL is already set\n"); |
2185 | er = EMULATE_FAIL; | 2156 | er = EMULATE_FAIL; |
2186 | } | 2157 | } |
2187 | 2158 | ||
2188 | return er; | 2159 | return er; |
2189 | } | 2160 | } |
2190 | 2161 | ||
2191 | enum emulation_result | 2162 | enum emulation_result kvm_mips_check_privilege(unsigned long cause, |
2192 | kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | 2163 | uint32_t *opc, |
2193 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 2164 | struct kvm_run *run, |
2165 | struct kvm_vcpu *vcpu) | ||
2194 | { | 2166 | { |
2195 | enum emulation_result er = EMULATE_DONE; | 2167 | enum emulation_result er = EMULATE_DONE; |
2196 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2168 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
@@ -2215,10 +2187,13 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2215 | break; | 2187 | break; |
2216 | 2188 | ||
2217 | case T_TLB_LD_MISS: | 2189 | case T_TLB_LD_MISS: |
2218 | /* We we are accessing Guest kernel space, then send an address error exception to the guest */ | 2190 | /* |
2191 | * We we are accessing Guest kernel space, then send an | ||
2192 | * address error exception to the guest | ||
2193 | */ | ||
2219 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | 2194 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
2220 | printk("%s: LD MISS @ %#lx\n", __func__, | 2195 | kvm_debug("%s: LD MISS @ %#lx\n", __func__, |
2221 | badvaddr); | 2196 | badvaddr); |
2222 | cause &= ~0xff; | 2197 | cause &= ~0xff; |
2223 | cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); | 2198 | cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); |
2224 | er = EMULATE_PRIV_FAIL; | 2199 | er = EMULATE_PRIV_FAIL; |
@@ -2226,10 +2201,13 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2226 | break; | 2201 | break; |
2227 | 2202 | ||
2228 | case T_TLB_ST_MISS: | 2203 | case T_TLB_ST_MISS: |
2229 | /* We we are accessing Guest kernel space, then send an address error exception to the guest */ | 2204 | /* |
2205 | * We we are accessing Guest kernel space, then send an | ||
2206 | * address error exception to the guest | ||
2207 | */ | ||
2230 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | 2208 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
2231 | printk("%s: ST MISS @ %#lx\n", __func__, | 2209 | kvm_debug("%s: ST MISS @ %#lx\n", __func__, |
2232 | badvaddr); | 2210 | badvaddr); |
2233 | cause &= ~0xff; | 2211 | cause &= ~0xff; |
2234 | cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); | 2212 | cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); |
2235 | er = EMULATE_PRIV_FAIL; | 2213 | er = EMULATE_PRIV_FAIL; |
@@ -2237,8 +2215,8 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2237 | break; | 2215 | break; |
2238 | 2216 | ||
2239 | case T_ADDR_ERR_ST: | 2217 | case T_ADDR_ERR_ST: |
2240 | printk("%s: address error ST @ %#lx\n", __func__, | 2218 | kvm_debug("%s: address error ST @ %#lx\n", __func__, |
2241 | badvaddr); | 2219 | badvaddr); |
2242 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | 2220 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2243 | cause &= ~0xff; | 2221 | cause &= ~0xff; |
2244 | cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); | 2222 | cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); |
@@ -2246,8 +2224,8 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2246 | er = EMULATE_PRIV_FAIL; | 2224 | er = EMULATE_PRIV_FAIL; |
2247 | break; | 2225 | break; |
2248 | case T_ADDR_ERR_LD: | 2226 | case T_ADDR_ERR_LD: |
2249 | printk("%s: address error LD @ %#lx\n", __func__, | 2227 | kvm_debug("%s: address error LD @ %#lx\n", __func__, |
2250 | badvaddr); | 2228 | badvaddr); |
2251 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | 2229 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2252 | cause &= ~0xff; | 2230 | cause &= ~0xff; |
2253 | cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); | 2231 | cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); |
@@ -2260,21 +2238,23 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2260 | } | 2238 | } |
2261 | } | 2239 | } |
2262 | 2240 | ||
2263 | if (er == EMULATE_PRIV_FAIL) { | 2241 | if (er == EMULATE_PRIV_FAIL) |
2264 | kvm_mips_emulate_exc(cause, opc, run, vcpu); | 2242 | kvm_mips_emulate_exc(cause, opc, run, vcpu); |
2265 | } | 2243 | |
2266 | return er; | 2244 | return er; |
2267 | } | 2245 | } |
2268 | 2246 | ||
2269 | /* User Address (UA) fault, this could happen if | 2247 | /* |
2248 | * User Address (UA) fault, this could happen if | ||
2270 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | 2249 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this |
2271 | * case we pass on the fault to the guest kernel and let it handle it. | 2250 | * case we pass on the fault to the guest kernel and let it handle it. |
2272 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | 2251 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this |
2273 | * case we inject the TLB from the Guest TLB into the shadow host TLB | 2252 | * case we inject the TLB from the Guest TLB into the shadow host TLB |
2274 | */ | 2253 | */ |
2275 | enum emulation_result | 2254 | enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, |
2276 | kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | 2255 | uint32_t *opc, |
2277 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 2256 | struct kvm_run *run, |
2257 | struct kvm_vcpu *vcpu) | ||
2278 | { | 2258 | { |
2279 | enum emulation_result er = EMULATE_DONE; | 2259 | enum emulation_result er = EMULATE_DONE; |
2280 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2260 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
@@ -2284,10 +2264,11 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
2284 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", | 2264 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", |
2285 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); | 2265 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); |
2286 | 2266 | ||
2287 | /* KVM would not have got the exception if this entry was valid in the shadow host TLB | 2267 | /* |
2288 | * Check the Guest TLB, if the entry is not there then send the guest an | 2268 | * KVM would not have got the exception if this entry was valid in the |
2289 | * exception. The guest exc handler should then inject an entry into the | 2269 | * shadow host TLB. Check the Guest TLB, if the entry is not there then |
2290 | * guest TLB | 2270 | * send the guest an exception. The guest exc handler should then inject |
2271 | * an entry into the guest TLB. | ||
2291 | */ | 2272 | */ |
2292 | index = kvm_mips_guest_tlb_lookup(vcpu, | 2273 | index = kvm_mips_guest_tlb_lookup(vcpu, |
2293 | (va & VPN2_MASK) | | 2274 | (va & VPN2_MASK) | |
@@ -2299,13 +2280,17 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
2299 | } else if (exccode == T_TLB_ST_MISS) { | 2280 | } else if (exccode == T_TLB_ST_MISS) { |
2300 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); | 2281 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); |
2301 | } else { | 2282 | } else { |
2302 | printk("%s: invalid exc code: %d\n", __func__, exccode); | 2283 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2284 | exccode); | ||
2303 | er = EMULATE_FAIL; | 2285 | er = EMULATE_FAIL; |
2304 | } | 2286 | } |
2305 | } else { | 2287 | } else { |
2306 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | 2288 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; |
2307 | 2289 | ||
2308 | /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ | 2290 | /* |
2291 | * Check if the entry is valid, if not then setup a TLB invalid | ||
2292 | * exception to the guest | ||
2293 | */ | ||
2309 | if (!TLB_IS_VALID(*tlb, va)) { | 2294 | if (!TLB_IS_VALID(*tlb, va)) { |
2310 | if (exccode == T_TLB_LD_MISS) { | 2295 | if (exccode == T_TLB_LD_MISS) { |
2311 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, | 2296 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, |
@@ -2314,15 +2299,17 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
2314 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, | 2299 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, |
2315 | vcpu); | 2300 | vcpu); |
2316 | } else { | 2301 | } else { |
2317 | printk("%s: invalid exc code: %d\n", __func__, | 2302 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2318 | exccode); | 2303 | exccode); |
2319 | er = EMULATE_FAIL; | 2304 | er = EMULATE_FAIL; |
2320 | } | 2305 | } |
2321 | } else { | 2306 | } else { |
2322 | kvm_debug | 2307 | kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", |
2323 | ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", | 2308 | tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); |
2324 | tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); | 2309 | /* |
2325 | /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ | 2310 | * OK we have a Guest TLB entry, now inject it into the |
2311 | * shadow host TLB | ||
2312 | */ | ||
2326 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, | 2313 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, |
2327 | NULL); | 2314 | NULL); |
2328 | } | 2315 | } |
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/interrupt.c index 1e5de16afe29..9b4445940c2b 100644 --- a/arch/mips/kvm/kvm_mips_int.c +++ b/arch/mips/kvm/interrupt.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Interrupt delivery | 6 | * KVM/MIPS: Interrupt delivery |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
22 | 22 | ||
23 | #include "kvm_mips_int.h" | 23 | #include "interrupt.h" |
24 | 24 | ||
25 | void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) | 25 | void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) |
26 | { | 26 | { |
@@ -34,7 +34,8 @@ void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority) | |||
34 | 34 | ||
35 | void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) | 35 | void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) |
36 | { | 36 | { |
37 | /* Cause bits to reflect the pending timer interrupt, | 37 | /* |
38 | * Cause bits to reflect the pending timer interrupt, | ||
38 | * the EXC code will be set when we are actually | 39 | * the EXC code will be set when we are actually |
39 | * delivering the interrupt: | 40 | * delivering the interrupt: |
40 | */ | 41 | */ |
@@ -51,12 +52,13 @@ void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) | |||
51 | kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); | 52 | kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); |
52 | } | 53 | } |
53 | 54 | ||
54 | void | 55 | void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, |
55 | kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) | 56 | struct kvm_mips_interrupt *irq) |
56 | { | 57 | { |
57 | int intr = (int)irq->irq; | 58 | int intr = (int)irq->irq; |
58 | 59 | ||
59 | /* Cause bits to reflect the pending IO interrupt, | 60 | /* |
61 | * Cause bits to reflect the pending IO interrupt, | ||
60 | * the EXC code will be set when we are actually | 62 | * the EXC code will be set when we are actually |
61 | * delivering the interrupt: | 63 | * delivering the interrupt: |
62 | */ | 64 | */ |
@@ -83,11 +85,11 @@ kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) | |||
83 | 85 | ||
84 | } | 86 | } |
85 | 87 | ||
86 | void | 88 | void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, |
87 | kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, | 89 | struct kvm_mips_interrupt *irq) |
88 | struct kvm_mips_interrupt *irq) | ||
89 | { | 90 | { |
90 | int intr = (int)irq->irq; | 91 | int intr = (int)irq->irq; |
92 | |||
91 | switch (intr) { | 93 | switch (intr) { |
92 | case -2: | 94 | case -2: |
93 | kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); | 95 | kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); |
@@ -111,9 +113,8 @@ kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, | |||
111 | } | 113 | } |
112 | 114 | ||
113 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 115 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
114 | int | 116 | int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, |
115 | kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | 117 | uint32_t cause) |
116 | uint32_t cause) | ||
117 | { | 118 | { |
118 | int allowed = 0; | 119 | int allowed = 0; |
119 | uint32_t exccode; | 120 | uint32_t exccode; |
@@ -164,7 +165,6 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | |||
164 | 165 | ||
165 | /* Are we allowed to deliver the interrupt ??? */ | 166 | /* Are we allowed to deliver the interrupt ??? */ |
166 | if (allowed) { | 167 | if (allowed) { |
167 | |||
168 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 168 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
169 | /* save old pc */ | 169 | /* save old pc */ |
170 | kvm_write_c0_guest_epc(cop0, arch->pc); | 170 | kvm_write_c0_guest_epc(cop0, arch->pc); |
@@ -195,9 +195,8 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | |||
195 | return allowed; | 195 | return allowed; |
196 | } | 196 | } |
197 | 197 | ||
198 | int | 198 | int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, |
199 | kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, | 199 | uint32_t cause) |
200 | uint32_t cause) | ||
201 | { | 200 | { |
202 | return 1; | 201 | return 1; |
203 | } | 202 | } |
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/interrupt.h index 20da7d29eede..4ab4bdfad703 100644 --- a/arch/mips/kvm/kvm_mips_int.h +++ b/arch/mips/kvm/interrupt.h | |||
@@ -1,14 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Interrupts | 6 | * KVM/MIPS: Interrupts |
7 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 7 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
8 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 8 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | /* MIPS Exception Priorities, exceptions (including interrupts) are queued up | 11 | /* |
12 | * MIPS Exception Priorities, exceptions (including interrupts) are queued up | ||
12 | * for the guest in the order specified by their priorities | 13 | * for the guest in the order specified by their priorities |
13 | */ | 14 | */ |
14 | 15 | ||
@@ -27,6 +28,9 @@ | |||
27 | #define MIPS_EXC_MAX 12 | 28 | #define MIPS_EXC_MAX 12 |
28 | /* XXXSL More to follow */ | 29 | /* XXXSL More to follow */ |
29 | 30 | ||
31 | extern char mips32_exception[], mips32_exceptionEnd[]; | ||
32 | extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; | ||
33 | |||
30 | #define C_TI (_ULCAST_(1) << 30) | 34 | #define C_TI (_ULCAST_(1) << 30) |
31 | 35 | ||
32 | #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) | 36 | #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) |
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h deleted file mode 100644 index a4a8c85cc8f7..000000000000 --- a/arch/mips/kvm/kvm_mips_comm.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: commpage: mapped into get kernel space | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #ifndef __KVM_MIPS_COMMPAGE_H__ | ||
13 | #define __KVM_MIPS_COMMPAGE_H__ | ||
14 | |||
15 | struct kvm_mips_commpage { | ||
16 | struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */ | ||
17 | }; | ||
18 | |||
19 | #define KVM_MIPS_COMM_EIDI_OFFSET 0x0 | ||
20 | |||
21 | extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); | ||
22 | |||
23 | #endif /* __KVM_MIPS_COMMPAGE_H__ */ | ||
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c deleted file mode 100644 index 3873b1ecc40f..000000000000 --- a/arch/mips/kvm/kvm_mips_commpage.c +++ /dev/null | |||
@@ -1,37 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * commpage, currently used for Virtual COP0 registers. | ||
7 | * Mapped into the guest kernel @ 0x0. | ||
8 | * | ||
9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | |||
25 | #include "kvm_mips_comm.h" | ||
26 | |||
27 | void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) | ||
28 | { | ||
29 | struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; | ||
30 | memset(page, 0, sizeof(struct kvm_mips_commpage)); | ||
31 | |||
32 | /* Specific init values for fields */ | ||
33 | vcpu->arch.cop0 = &page->cop0; | ||
34 | memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc)); | ||
35 | |||
36 | return; | ||
37 | } | ||
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h deleted file mode 100644 index 86d3b4cc348b..000000000000 --- a/arch/mips/kvm/kvm_mips_opcode.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * Define opcode values not defined in <asm/isnt.h> | ||
12 | */ | ||
13 | |||
14 | #ifndef __KVM_MIPS_OPCODE_H__ | ||
15 | #define __KVM_MIPS_OPCODE_H__ | ||
16 | |||
17 | /* COP0 Ops */ | ||
18 | #define mfmcz_op 0x0b /* 01011 */ | ||
19 | #define wrpgpr_op 0x0e /* 01110 */ | ||
20 | |||
21 | /* COP0 opcodes (only if COP0 and CO=1): */ | ||
22 | #define wait_op 0x20 /* 100000 */ | ||
23 | |||
24 | #endif /* __KVM_MIPS_OPCODE_H__ */ | ||
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/locore.S index 033ac343e72c..d7279c03c517 100644 --- a/arch/mips/kvm/kvm_locore.S +++ b/arch/mips/kvm/locore.S | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <asm/stackframe.h> | 16 | #include <asm/stackframe.h> |
17 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | 18 | ||
19 | |||
20 | #define _C_LABEL(x) x | 19 | #define _C_LABEL(x) x |
21 | #define MIPSX(name) mips32_ ## name | 20 | #define MIPSX(name) mips32_ ## name |
22 | #define CALLFRAME_SIZ 32 | 21 | #define CALLFRAME_SIZ 32 |
@@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run) | |||
91 | LONG_S $24, PT_R24(k1) | 90 | LONG_S $24, PT_R24(k1) |
92 | LONG_S $25, PT_R25(k1) | 91 | LONG_S $25, PT_R25(k1) |
93 | 92 | ||
94 | /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ | 93 | /* |
94 | * XXXKYMA k0/k1 not saved, not being used if we got here through | ||
95 | * an ioctl() | ||
96 | */ | ||
95 | 97 | ||
96 | LONG_S $28, PT_R28(k1) | 98 | LONG_S $28, PT_R28(k1) |
97 | LONG_S $29, PT_R29(k1) | 99 | LONG_S $29, PT_R29(k1) |
@@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run) | |||
132 | /* Save the kernel gp as well */ | 134 | /* Save the kernel gp as well */ |
133 | LONG_S gp, VCPU_HOST_GP(k1) | 135 | LONG_S gp, VCPU_HOST_GP(k1) |
134 | 136 | ||
135 | /* Setup status register for running the guest in UM, interrupts are disabled */ | 137 | /* |
138 | * Setup status register for running the guest in UM, interrupts | ||
139 | * are disabled | ||
140 | */ | ||
136 | li k0, (ST0_EXL | KSU_USER | ST0_BEV) | 141 | li k0, (ST0_EXL | KSU_USER | ST0_BEV) |
137 | mtc0 k0, CP0_STATUS | 142 | mtc0 k0, CP0_STATUS |
138 | ehb | 143 | ehb |
@@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run) | |||
152 | mtc0 k0, CP0_STATUS | 157 | mtc0 k0, CP0_STATUS |
153 | ehb | 158 | ehb |
154 | 159 | ||
155 | |||
156 | /* Set Guest EPC */ | 160 | /* Set Guest EPC */ |
157 | LONG_L t0, VCPU_PC(k1) | 161 | LONG_L t0, VCPU_PC(k1) |
158 | mtc0 t0, CP0_EPC | 162 | mtc0 t0, CP0_EPC |
@@ -165,7 +169,7 @@ FEXPORT(__kvm_mips_load_asid) | |||
165 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 169 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
166 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 170 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
167 | 1: | 171 | 1: |
168 | /* t1: contains the base of the ASID array, need to get the cpu id */ | 172 | /* t1: contains the base of the ASID array, need to get the cpu id */ |
169 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ | 173 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ |
170 | INT_SLL t2, t2, 2 /* x4 */ | 174 | INT_SLL t2, t2, 2 /* x4 */ |
171 | REG_ADDU t3, t1, t2 | 175 | REG_ADDU t3, t1, t2 |
@@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1) | |||
229 | eret | 233 | eret |
230 | 234 | ||
231 | VECTOR(MIPSX(exception), unknown) | 235 | VECTOR(MIPSX(exception), unknown) |
232 | /* | 236 | /* Find out what mode we came from and jump to the proper handler. */ |
233 | * Find out what mode we came from and jump to the proper handler. | ||
234 | */ | ||
235 | mtc0 k0, CP0_ERROREPC #01: Save guest k0 | 237 | mtc0 k0, CP0_ERROREPC #01: Save guest k0 |
236 | ehb #02: | 238 | ehb #02: |
237 | 239 | ||
@@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown) | |||
239 | INT_SRL k0, k0, 10 #03: Get rid of CPUNum | 241 | INT_SRL k0, k0, 10 #03: Get rid of CPUNum |
240 | INT_SLL k0, k0, 10 #04 | 242 | INT_SLL k0, k0, 10 #04 |
241 | LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 | 243 | LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 |
242 | INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 | 244 | INT_ADDIU k0, k0, 0x2000 #06: Exception handler is |
245 | # installed @ offset 0x2000 | ||
243 | j k0 #07: jump to the function | 246 | j k0 #07: jump to the function |
244 | nop #08: branch delay slot | 247 | nop #08: branch delay slot |
245 | VECTOR_END(MIPSX(exceptionEnd)) | 248 | VECTOR_END(MIPSX(exceptionEnd)) |
@@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd)) | |||
248 | /* | 251 | /* |
249 | * Generic Guest exception handler. We end up here when the guest | 252 | * Generic Guest exception handler. We end up here when the guest |
250 | * does something that causes a trap to kernel mode. | 253 | * does something that causes a trap to kernel mode. |
251 | * | ||
252 | */ | 254 | */ |
253 | NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | 255 | NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) |
254 | /* Get the VCPU pointer from DDTATA_LO */ | 256 | /* Get the VCPU pointer from DDTATA_LO */ |
@@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
290 | LONG_S $30, VCPU_R30(k1) | 292 | LONG_S $30, VCPU_R30(k1) |
291 | LONG_S $31, VCPU_R31(k1) | 293 | LONG_S $31, VCPU_R31(k1) |
292 | 294 | ||
293 | /* We need to save hi/lo and restore them on | 295 | /* We need to save hi/lo and restore them on the way out */ |
294 | * the way out | ||
295 | */ | ||
296 | mfhi t0 | 296 | mfhi t0 |
297 | LONG_S t0, VCPU_HI(k1) | 297 | LONG_S t0, VCPU_HI(k1) |
298 | 298 | ||
@@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
321 | /* Save pointer to run in s0, will be saved by the compiler */ | 321 | /* Save pointer to run in s0, will be saved by the compiler */ |
322 | move s0, a0 | 322 | move s0, a0 |
323 | 323 | ||
324 | /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to | 324 | /* |
325 | * process the exception */ | 325 | * Save Host level EPC, BadVaddr and Cause to VCPU, useful to |
326 | * process the exception | ||
327 | */ | ||
326 | mfc0 k0,CP0_EPC | 328 | mfc0 k0,CP0_EPC |
327 | LONG_S k0, VCPU_PC(k1) | 329 | LONG_S k0, VCPU_PC(k1) |
328 | 330 | ||
@@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
351 | LONG_L k0, VCPU_HOST_EBASE(k1) | 353 | LONG_L k0, VCPU_HOST_EBASE(k1) |
352 | mtc0 k0,CP0_EBASE | 354 | mtc0 k0,CP0_EBASE |
353 | 355 | ||
354 | |||
355 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ | 356 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ |
356 | .set at | 357 | .set at |
357 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) | 358 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) |
@@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
369 | /* Saved host state */ | 370 | /* Saved host state */ |
370 | INT_ADDIU sp, sp, -PT_SIZE | 371 | INT_ADDIU sp, sp, -PT_SIZE |
371 | 372 | ||
372 | /* XXXKYMA do we need to load the host ASID, maybe not because the | 373 | /* |
374 | * XXXKYMA do we need to load the host ASID, maybe not because the | ||
373 | * kernel entries are marked GLOBAL, need to verify | 375 | * kernel entries are marked GLOBAL, need to verify |
374 | */ | 376 | */ |
375 | 377 | ||
@@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
383 | 385 | ||
384 | /* Jump to handler */ | 386 | /* Jump to handler */ |
385 | FEXPORT(__kvm_mips_jump_to_handler) | 387 | FEXPORT(__kvm_mips_jump_to_handler) |
386 | /* XXXKYMA: not sure if this is safe, how large is the stack?? | 388 | /* |
389 | * XXXKYMA: not sure if this is safe, how large is the stack?? | ||
387 | * Now jump to the kvm_mips_handle_exit() to see if we can deal | 390 | * Now jump to the kvm_mips_handle_exit() to see if we can deal |
388 | * with this in the kernel */ | 391 | * with this in the kernel |
392 | */ | ||
389 | PTR_LA t9, kvm_mips_handle_exit | 393 | PTR_LA t9, kvm_mips_handle_exit |
390 | jalr.hb t9 | 394 | jalr.hb t9 |
391 | INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ | 395 | INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ |
@@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler) | |||
394 | di | 398 | di |
395 | ehb | 399 | ehb |
396 | 400 | ||
397 | /* XXXKYMA: k0/k1 could have been blown away if we processed | 401 | /* |
402 | * XXXKYMA: k0/k1 could have been blown away if we processed | ||
398 | * an exception while we were handling the exception from the | 403 | * an exception while we were handling the exception from the |
399 | * guest, reload k1 | 404 | * guest, reload k1 |
400 | */ | 405 | */ |
@@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler) | |||
402 | move k1, s1 | 407 | move k1, s1 |
403 | INT_ADDIU k1, k1, VCPU_HOST_ARCH | 408 | INT_ADDIU k1, k1, VCPU_HOST_ARCH |
404 | 409 | ||
405 | /* Check return value, should tell us if we are returning to the | 410 | /* |
411 | * Check return value, should tell us if we are returning to the | ||
406 | * host (handle I/O etc)or resuming the guest | 412 | * host (handle I/O etc)or resuming the guest |
407 | */ | 413 | */ |
408 | andi t0, v0, RESUME_HOST | 414 | andi t0, v0, RESUME_HOST |
@@ -521,8 +527,10 @@ __kvm_mips_return_to_host: | |||
521 | LONG_L $0, PT_R0(k1) | 527 | LONG_L $0, PT_R0(k1) |
522 | LONG_L $1, PT_R1(k1) | 528 | LONG_L $1, PT_R1(k1) |
523 | 529 | ||
524 | /* r2/v0 is the return code, shift it down by 2 (arithmetic) | 530 | /* |
525 | * to recover the err code */ | 531 | * r2/v0 is the return code, shift it down by 2 (arithmetic) |
532 | * to recover the err code | ||
533 | */ | ||
526 | INT_SRA k0, v0, 2 | 534 | INT_SRA k0, v0, 2 |
527 | move $2, k0 | 535 | move $2, k0 |
528 | 536 | ||
@@ -566,7 +574,6 @@ __kvm_mips_return_to_host: | |||
566 | PTR_LI k0, 0x2000000F | 574 | PTR_LI k0, 0x2000000F |
567 | mtc0 k0, CP0_HWRENA | 575 | mtc0 k0, CP0_HWRENA |
568 | 576 | ||
569 | |||
570 | /* Restore RA, which is the address we will return to */ | 577 | /* Restore RA, which is the address we will return to */ |
571 | LONG_L ra, PT_R31(k1) | 578 | LONG_L ra, PT_R31(k1) |
572 | j ra | 579 | j ra |
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/mips.c index f3c56a182fd8..4fda672cb58e 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/mips.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -21,8 +21,8 @@ | |||
21 | 21 | ||
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | 23 | ||
24 | #include "kvm_mips_int.h" | 24 | #include "interrupt.h" |
25 | #include "kvm_mips_comm.h" | 25 | #include "commpage.h" |
26 | 26 | ||
27 | #define CREATE_TRACE_POINTS | 27 | #define CREATE_TRACE_POINTS |
28 | #include "trace.h" | 28 | #include "trace.h" |
@@ -31,38 +31,41 @@ | |||
31 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | 31 | #define VECTORSPACING 0x100 /* for EI/VI mode */ |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) |
35 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 35 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
36 | { "wait", VCPU_STAT(wait_exits) }, | 36 | { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, |
37 | { "cache", VCPU_STAT(cache_exits) }, | 37 | { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, |
38 | { "signal", VCPU_STAT(signal_exits) }, | 38 | { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, |
39 | { "interrupt", VCPU_STAT(int_exits) }, | 39 | { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, |
40 | { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, | 40 | { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, |
41 | { "tlbmod", VCPU_STAT(tlbmod_exits) }, | 41 | { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, |
42 | { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, | 42 | { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, |
43 | { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, | 43 | { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, |
44 | { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, | 44 | { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, |
45 | { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, | 45 | { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, |
46 | { "syscall", VCPU_STAT(syscall_exits) }, | 46 | { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, |
47 | { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, | 47 | { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, |
48 | { "break_inst", VCPU_STAT(break_inst_exits) }, | 48 | { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, |
49 | { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, | 49 | { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, |
50 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 50 | { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, |
51 | {NULL} | 51 | {NULL} |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) | 54 | static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) |
55 | { | 55 | { |
56 | int i; | 56 | int i; |
57 | |||
57 | for_each_possible_cpu(i) { | 58 | for_each_possible_cpu(i) { |
58 | vcpu->arch.guest_kernel_asid[i] = 0; | 59 | vcpu->arch.guest_kernel_asid[i] = 0; |
59 | vcpu->arch.guest_user_asid[i] = 0; | 60 | vcpu->arch.guest_user_asid[i] = 0; |
60 | } | 61 | } |
62 | |||
61 | return 0; | 63 | return 0; |
62 | } | 64 | } |
63 | 65 | ||
64 | /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we | 66 | /* |
65 | * are "runnable" if interrupts are pending | 67 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in |
68 | * Config7, so we are "runnable" if interrupts are pending | ||
66 | */ | 69 | */ |
67 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 70 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
68 | { | 71 | { |
@@ -94,16 +97,17 @@ void kvm_arch_hardware_unsetup(void) | |||
94 | 97 | ||
95 | void kvm_arch_check_processor_compat(void *rtn) | 98 | void kvm_arch_check_processor_compat(void *rtn) |
96 | { | 99 | { |
97 | int *r = (int *)rtn; | 100 | *(int *)rtn = 0; |
98 | *r = 0; | ||
99 | return; | ||
100 | } | 101 | } |
101 | 102 | ||
102 | static void kvm_mips_init_tlbs(struct kvm *kvm) | 103 | static void kvm_mips_init_tlbs(struct kvm *kvm) |
103 | { | 104 | { |
104 | unsigned long wired; | 105 | unsigned long wired; |
105 | 106 | ||
106 | /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ | 107 | /* |
108 | * Add a wired entry to the TLB, it is used to map the commpage to | ||
109 | * the Guest kernel | ||
110 | */ | ||
107 | wired = read_c0_wired(); | 111 | wired = read_c0_wired(); |
108 | write_c0_wired(wired + 1); | 112 | write_c0_wired(wired + 1); |
109 | mtc0_tlbw_hazard(); | 113 | mtc0_tlbw_hazard(); |
@@ -130,7 +134,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
130 | on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); | 134 | on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); |
131 | } | 135 | } |
132 | 136 | ||
133 | |||
134 | return 0; | 137 | return 0; |
135 | } | 138 | } |
136 | 139 | ||
@@ -185,8 +188,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
185 | } | 188 | } |
186 | } | 189 | } |
187 | 190 | ||
188 | long | 191 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, |
189 | kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | 192 | unsigned long arg) |
190 | { | 193 | { |
191 | return -ENOIOCTLCMD; | 194 | return -ENOIOCTLCMD; |
192 | } | 195 | } |
@@ -207,20 +210,20 @@ void kvm_arch_memslots_updated(struct kvm *kvm) | |||
207 | } | 210 | } |
208 | 211 | ||
209 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 212 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
210 | struct kvm_memory_slot *memslot, | 213 | struct kvm_memory_slot *memslot, |
211 | struct kvm_userspace_memory_region *mem, | 214 | struct kvm_userspace_memory_region *mem, |
212 | enum kvm_mr_change change) | 215 | enum kvm_mr_change change) |
213 | { | 216 | { |
214 | return 0; | 217 | return 0; |
215 | } | 218 | } |
216 | 219 | ||
217 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 220 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
218 | struct kvm_userspace_memory_region *mem, | 221 | struct kvm_userspace_memory_region *mem, |
219 | const struct kvm_memory_slot *old, | 222 | const struct kvm_memory_slot *old, |
220 | enum kvm_mr_change change) | 223 | enum kvm_mr_change change) |
221 | { | 224 | { |
222 | unsigned long npages = 0; | 225 | unsigned long npages = 0; |
223 | int i, err = 0; | 226 | int i; |
224 | 227 | ||
225 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", | 228 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", |
226 | __func__, kvm, mem->slot, mem->guest_phys_addr, | 229 | __func__, kvm, mem->slot, mem->guest_phys_addr, |
@@ -238,21 +241,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
238 | 241 | ||
239 | if (!kvm->arch.guest_pmap) { | 242 | if (!kvm->arch.guest_pmap) { |
240 | kvm_err("Failed to allocate guest PMAP"); | 243 | kvm_err("Failed to allocate guest PMAP"); |
241 | err = -ENOMEM; | 244 | return; |
242 | goto out; | ||
243 | } | 245 | } |
244 | 246 | ||
245 | kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", | 247 | kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", |
246 | npages, kvm->arch.guest_pmap); | 248 | npages, kvm->arch.guest_pmap); |
247 | 249 | ||
248 | /* Now setup the page table */ | 250 | /* Now setup the page table */ |
249 | for (i = 0; i < npages; i++) { | 251 | for (i = 0; i < npages; i++) |
250 | kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; | 252 | kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; |
251 | } | ||
252 | } | 253 | } |
253 | } | 254 | } |
254 | out: | ||
255 | return; | ||
256 | } | 255 | } |
257 | 256 | ||
258 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | 257 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
@@ -270,8 +269,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
270 | 269 | ||
271 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 270 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
272 | { | 271 | { |
273 | extern char mips32_exception[], mips32_exceptionEnd[]; | ||
274 | extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; | ||
275 | int err, size, offset; | 272 | int err, size, offset; |
276 | void *gebase; | 273 | void *gebase; |
277 | int i; | 274 | int i; |
@@ -290,14 +287,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
290 | 287 | ||
291 | kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); | 288 | kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); |
292 | 289 | ||
293 | /* Allocate space for host mode exception handlers that handle | 290 | /* |
291 | * Allocate space for host mode exception handlers that handle | ||
294 | * guest mode exits | 292 | * guest mode exits |
295 | */ | 293 | */ |
296 | if (cpu_has_veic || cpu_has_vint) { | 294 | if (cpu_has_veic || cpu_has_vint) |
297 | size = 0x200 + VECTORSPACING * 64; | 295 | size = 0x200 + VECTORSPACING * 64; |
298 | } else { | 296 | else |
299 | size = 0x4000; | 297 | size = 0x4000; |
300 | } | ||
301 | 298 | ||
302 | /* Save Linux EBASE */ | 299 | /* Save Linux EBASE */ |
303 | vcpu->arch.host_ebase = (void *)read_c0_ebase(); | 300 | vcpu->arch.host_ebase = (void *)read_c0_ebase(); |
@@ -345,7 +342,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
345 | local_flush_icache_range((unsigned long)gebase, | 342 | local_flush_icache_range((unsigned long)gebase, |
346 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); | 343 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); |
347 | 344 | ||
348 | /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ | 345 | /* |
346 | * Allocate comm page for guest kernel, a TLB will be reserved for | ||
347 | * mapping GVA @ 0xFFFF8000 to this page | ||
348 | */ | ||
349 | vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); | 349 | vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); |
350 | 350 | ||
351 | if (!vcpu->arch.kseg0_commpage) { | 351 | if (!vcpu->arch.kseg0_commpage) { |
@@ -392,9 +392,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
392 | kvm_arch_vcpu_free(vcpu); | 392 | kvm_arch_vcpu_free(vcpu); |
393 | } | 393 | } |
394 | 394 | ||
395 | int | 395 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
396 | kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 396 | struct kvm_guest_debug *dbg) |
397 | struct kvm_guest_debug *dbg) | ||
398 | { | 397 | { |
399 | return -ENOIOCTLCMD; | 398 | return -ENOIOCTLCMD; |
400 | } | 399 | } |
@@ -431,8 +430,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
431 | return r; | 430 | return r; |
432 | } | 431 | } |
433 | 432 | ||
434 | int | 433 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
435 | kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) | 434 | struct kvm_mips_interrupt *irq) |
436 | { | 435 | { |
437 | int intr = (int)irq->irq; | 436 | int intr = (int)irq->irq; |
438 | struct kvm_vcpu *dvcpu = NULL; | 437 | struct kvm_vcpu *dvcpu = NULL; |
@@ -459,23 +458,20 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) | |||
459 | 458 | ||
460 | dvcpu->arch.wait = 0; | 459 | dvcpu->arch.wait = 0; |
461 | 460 | ||
462 | if (waitqueue_active(&dvcpu->wq)) { | 461 | if (waitqueue_active(&dvcpu->wq)) |
463 | wake_up_interruptible(&dvcpu->wq); | 462 | wake_up_interruptible(&dvcpu->wq); |
464 | } | ||
465 | 463 | ||
466 | return 0; | 464 | return 0; |
467 | } | 465 | } |
468 | 466 | ||
469 | int | 467 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
470 | kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 468 | struct kvm_mp_state *mp_state) |
471 | struct kvm_mp_state *mp_state) | ||
472 | { | 469 | { |
473 | return -ENOIOCTLCMD; | 470 | return -ENOIOCTLCMD; |
474 | } | 471 | } |
475 | 472 | ||
476 | int | 473 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
477 | kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 474 | struct kvm_mp_state *mp_state) |
478 | struct kvm_mp_state *mp_state) | ||
479 | { | 475 | { |
480 | return -ENOIOCTLCMD; | 476 | return -ENOIOCTLCMD; |
481 | } | 477 | } |
@@ -632,10 +628,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |||
632 | } | 628 | } |
633 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | 629 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
634 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | 630 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; |
631 | |||
635 | return put_user(v, uaddr64); | 632 | return put_user(v, uaddr64); |
636 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | 633 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { |
637 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | 634 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; |
638 | u32 v32 = (u32)v; | 635 | u32 v32 = (u32)v; |
636 | |||
639 | return put_user(v32, uaddr32); | 637 | return put_user(v32, uaddr32); |
640 | } else { | 638 | } else { |
641 | return -EINVAL; | 639 | return -EINVAL; |
@@ -728,8 +726,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
728 | return 0; | 726 | return 0; |
729 | } | 727 | } |
730 | 728 | ||
731 | long | 729 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, |
732 | kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | 730 | unsigned long arg) |
733 | { | 731 | { |
734 | struct kvm_vcpu *vcpu = filp->private_data; | 732 | struct kvm_vcpu *vcpu = filp->private_data; |
735 | void __user *argp = (void __user *)arg; | 733 | void __user *argp = (void __user *)arg; |
@@ -739,6 +737,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
739 | case KVM_SET_ONE_REG: | 737 | case KVM_SET_ONE_REG: |
740 | case KVM_GET_ONE_REG: { | 738 | case KVM_GET_ONE_REG: { |
741 | struct kvm_one_reg reg; | 739 | struct kvm_one_reg reg; |
740 | |||
742 | if (copy_from_user(®, argp, sizeof(reg))) | 741 | if (copy_from_user(®, argp, sizeof(reg))) |
743 | return -EFAULT; | 742 | return -EFAULT; |
744 | if (ioctl == KVM_SET_ONE_REG) | 743 | if (ioctl == KVM_SET_ONE_REG) |
@@ -773,6 +772,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
773 | case KVM_INTERRUPT: | 772 | case KVM_INTERRUPT: |
774 | { | 773 | { |
775 | struct kvm_mips_interrupt irq; | 774 | struct kvm_mips_interrupt irq; |
775 | |||
776 | r = -EFAULT; | 776 | r = -EFAULT; |
777 | if (copy_from_user(&irq, argp, sizeof(irq))) | 777 | if (copy_from_user(&irq, argp, sizeof(irq))) |
778 | goto out; | 778 | goto out; |
@@ -791,9 +791,7 @@ out: | |||
791 | return r; | 791 | return r; |
792 | } | 792 | } |
793 | 793 | ||
794 | /* | 794 | /* Get (and clear) the dirty memory log for a memory slot. */ |
795 | * Get (and clear) the dirty memory log for a memory slot. | ||
796 | */ | ||
797 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 795 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
798 | { | 796 | { |
799 | struct kvm_memory_slot *memslot; | 797 | struct kvm_memory_slot *memslot; |
@@ -815,8 +813,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | |||
815 | ga = memslot->base_gfn << PAGE_SHIFT; | 813 | ga = memslot->base_gfn << PAGE_SHIFT; |
816 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | 814 | ga_end = ga + (memslot->npages << PAGE_SHIFT); |
817 | 815 | ||
818 | printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, | 816 | kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, |
819 | ga_end); | 817 | ga_end); |
820 | 818 | ||
821 | n = kvm_dirty_bitmap_bytes(memslot); | 819 | n = kvm_dirty_bitmap_bytes(memslot); |
822 | memset(memslot->dirty_bitmap, 0, n); | 820 | memset(memslot->dirty_bitmap, 0, n); |
@@ -843,16 +841,12 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
843 | 841 | ||
844 | int kvm_arch_init(void *opaque) | 842 | int kvm_arch_init(void *opaque) |
845 | { | 843 | { |
846 | int ret; | ||
847 | |||
848 | if (kvm_mips_callbacks) { | 844 | if (kvm_mips_callbacks) { |
849 | kvm_err("kvm: module already exists\n"); | 845 | kvm_err("kvm: module already exists\n"); |
850 | return -EEXIST; | 846 | return -EEXIST; |
851 | } | 847 | } |
852 | 848 | ||
853 | ret = kvm_mips_emulation_init(&kvm_mips_callbacks); | 849 | return kvm_mips_emulation_init(&kvm_mips_callbacks); |
854 | |||
855 | return ret; | ||
856 | } | 850 | } |
857 | 851 | ||
858 | void kvm_arch_exit(void) | 852 | void kvm_arch_exit(void) |
@@ -860,14 +854,14 @@ void kvm_arch_exit(void) | |||
860 | kvm_mips_callbacks = NULL; | 854 | kvm_mips_callbacks = NULL; |
861 | } | 855 | } |
862 | 856 | ||
863 | int | 857 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
864 | kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 858 | struct kvm_sregs *sregs) |
865 | { | 859 | { |
866 | return -ENOIOCTLCMD; | 860 | return -ENOIOCTLCMD; |
867 | } | 861 | } |
868 | 862 | ||
869 | int | 863 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
870 | kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 864 | struct kvm_sregs *sregs) |
871 | { | 865 | { |
872 | return -ENOIOCTLCMD; | 866 | return -ENOIOCTLCMD; |
873 | } | 867 | } |
@@ -923,24 +917,25 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) | |||
923 | if (!vcpu) | 917 | if (!vcpu) |
924 | return -1; | 918 | return -1; |
925 | 919 | ||
926 | printk("VCPU Register Dump:\n"); | 920 | kvm_debug("VCPU Register Dump:\n"); |
927 | printk("\tpc = 0x%08lx\n", vcpu->arch.pc); | 921 | kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); |
928 | printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); | 922 | kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); |
929 | 923 | ||
930 | for (i = 0; i < 32; i += 4) { | 924 | for (i = 0; i < 32; i += 4) { |
931 | printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, | 925 | kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, |
932 | vcpu->arch.gprs[i], | 926 | vcpu->arch.gprs[i], |
933 | vcpu->arch.gprs[i + 1], | 927 | vcpu->arch.gprs[i + 1], |
934 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); | 928 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); |
935 | } | 929 | } |
936 | printk("\thi: 0x%08lx\n", vcpu->arch.hi); | 930 | kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); |
937 | printk("\tlo: 0x%08lx\n", vcpu->arch.lo); | 931 | kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); |
938 | 932 | ||
939 | cop0 = vcpu->arch.cop0; | 933 | cop0 = vcpu->arch.cop0; |
940 | printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", | 934 | kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", |
941 | kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); | 935 | kvm_read_c0_guest_status(cop0), |
936 | kvm_read_c0_guest_cause(cop0)); | ||
942 | 937 | ||
943 | printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); | 938 | kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); |
944 | 939 | ||
945 | return 0; | 940 | return 0; |
946 | } | 941 | } |
@@ -980,14 +975,11 @@ static void kvm_mips_comparecount_func(unsigned long data) | |||
980 | kvm_mips_callbacks->queue_timer_int(vcpu); | 975 | kvm_mips_callbacks->queue_timer_int(vcpu); |
981 | 976 | ||
982 | vcpu->arch.wait = 0; | 977 | vcpu->arch.wait = 0; |
983 | if (waitqueue_active(&vcpu->wq)) { | 978 | if (waitqueue_active(&vcpu->wq)) |
984 | wake_up_interruptible(&vcpu->wq); | 979 | wake_up_interruptible(&vcpu->wq); |
985 | } | ||
986 | } | 980 | } |
987 | 981 | ||
988 | /* | 982 | /* low level hrtimer wake routine */ |
989 | * low level hrtimer wake routine. | ||
990 | */ | ||
991 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) | 983 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) |
992 | { | 984 | { |
993 | struct kvm_vcpu *vcpu; | 985 | struct kvm_vcpu *vcpu; |
@@ -1008,11 +1000,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1008 | 1000 | ||
1009 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 1001 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
1010 | { | 1002 | { |
1011 | return; | ||
1012 | } | 1003 | } |
1013 | 1004 | ||
1014 | int | 1005 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1015 | kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) | 1006 | struct kvm_translation *tr) |
1016 | { | 1007 | { |
1017 | return 0; | 1008 | return 0; |
1018 | } | 1009 | } |
@@ -1023,8 +1014,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1023 | return kvm_mips_callbacks->vcpu_setup(vcpu); | 1014 | return kvm_mips_callbacks->vcpu_setup(vcpu); |
1024 | } | 1015 | } |
1025 | 1016 | ||
1026 | static | 1017 | static void kvm_mips_set_c0_status(void) |
1027 | void kvm_mips_set_c0_status(void) | ||
1028 | { | 1018 | { |
1029 | uint32_t status = read_c0_status(); | 1019 | uint32_t status = read_c0_status(); |
1030 | 1020 | ||
@@ -1054,7 +1044,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1054 | run->exit_reason = KVM_EXIT_UNKNOWN; | 1044 | run->exit_reason = KVM_EXIT_UNKNOWN; |
1055 | run->ready_for_interrupt_injection = 1; | 1045 | run->ready_for_interrupt_injection = 1; |
1056 | 1046 | ||
1057 | /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ | 1047 | /* |
1048 | * Set the appropriate status bits based on host CPU features, | ||
1049 | * before we hit the scheduler | ||
1050 | */ | ||
1058 | kvm_mips_set_c0_status(); | 1051 | kvm_mips_set_c0_status(); |
1059 | 1052 | ||
1060 | local_irq_enable(); | 1053 | local_irq_enable(); |
@@ -1062,7 +1055,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1062 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", | 1055 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", |
1063 | cause, opc, run, vcpu); | 1056 | cause, opc, run, vcpu); |
1064 | 1057 | ||
1065 | /* Do a privilege check, if in UM most of these exit conditions end up | 1058 | /* |
1059 | * Do a privilege check, if in UM most of these exit conditions end up | ||
1066 | * causing an exception to be delivered to the Guest Kernel | 1060 | * causing an exception to be delivered to the Guest Kernel |
1067 | */ | 1061 | */ |
1068 | er = kvm_mips_check_privilege(cause, opc, run, vcpu); | 1062 | er = kvm_mips_check_privilege(cause, opc, run, vcpu); |
@@ -1081,9 +1075,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1081 | ++vcpu->stat.int_exits; | 1075 | ++vcpu->stat.int_exits; |
1082 | trace_kvm_exit(vcpu, INT_EXITS); | 1076 | trace_kvm_exit(vcpu, INT_EXITS); |
1083 | 1077 | ||
1084 | if (need_resched()) { | 1078 | if (need_resched()) |
1085 | cond_resched(); | 1079 | cond_resched(); |
1086 | } | ||
1087 | 1080 | ||
1088 | ret = RESUME_GUEST; | 1081 | ret = RESUME_GUEST; |
1089 | break; | 1082 | break; |
@@ -1095,9 +1088,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1095 | trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); | 1088 | trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); |
1096 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); | 1089 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); |
1097 | /* XXXKYMA: Might need to return to user space */ | 1090 | /* XXXKYMA: Might need to return to user space */ |
1098 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { | 1091 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) |
1099 | ret = RESUME_HOST; | 1092 | ret = RESUME_HOST; |
1100 | } | ||
1101 | break; | 1093 | break; |
1102 | 1094 | ||
1103 | case T_TLB_MOD: | 1095 | case T_TLB_MOD: |
@@ -1107,10 +1099,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1107 | break; | 1099 | break; |
1108 | 1100 | ||
1109 | case T_TLB_ST_MISS: | 1101 | case T_TLB_ST_MISS: |
1110 | kvm_debug | 1102 | kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", |
1111 | ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", | 1103 | cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, |
1112 | cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, | 1104 | badvaddr); |
1113 | badvaddr); | ||
1114 | 1105 | ||
1115 | ++vcpu->stat.tlbmiss_st_exits; | 1106 | ++vcpu->stat.tlbmiss_st_exits; |
1116 | trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); | 1107 | trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); |
@@ -1157,10 +1148,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1157 | break; | 1148 | break; |
1158 | 1149 | ||
1159 | default: | 1150 | default: |
1160 | kvm_err | 1151 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", |
1161 | ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", | 1152 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, |
1162 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, | 1153 | kvm_read_c0_guest_status(vcpu->arch.cop0)); |
1163 | kvm_read_c0_guest_status(vcpu->arch.cop0)); | ||
1164 | kvm_arch_vcpu_dump_regs(vcpu); | 1154 | kvm_arch_vcpu_dump_regs(vcpu); |
1165 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 1155 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1166 | ret = RESUME_HOST; | 1156 | ret = RESUME_HOST; |
@@ -1175,7 +1165,7 @@ skip_emul: | |||
1175 | kvm_mips_deliver_interrupts(vcpu, cause); | 1165 | kvm_mips_deliver_interrupts(vcpu, cause); |
1176 | 1166 | ||
1177 | if (!(ret & RESUME_HOST)) { | 1167 | if (!(ret & RESUME_HOST)) { |
1178 | /* Only check for signals if not already exiting to userspace */ | 1168 | /* Only check for signals if not already exiting to userspace */ |
1179 | if (signal_pending(current)) { | 1169 | if (signal_pending(current)) { |
1180 | run->exit_reason = KVM_EXIT_INTR; | 1170 | run->exit_reason = KVM_EXIT_INTR; |
1181 | ret = (-EINTR << 2) | RESUME_HOST; | 1171 | ret = (-EINTR << 2) | RESUME_HOST; |
@@ -1196,11 +1186,13 @@ int __init kvm_mips_init(void) | |||
1196 | if (ret) | 1186 | if (ret) |
1197 | return ret; | 1187 | return ret; |
1198 | 1188 | ||
1199 | /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. | 1189 | /* |
1200 | * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) | 1190 | * On MIPS, kernel modules are executed from "mapped space", which |
1201 | * to avoid the possibility of double faulting. The issue is that the TLB code | 1191 | * requires TLBs. The TLB handling code is statically linked with |
1202 | * references routines that are part of the the KVM module, | 1192 | * the rest of the kernel (tlb.c) to avoid the possibility of |
1203 | * which are only available once the module is loaded. | 1193 | * double faulting. The issue is that the TLB code references |
1194 | * routines that are part of the the KVM module, which are only | ||
1195 | * available once the module is loaded. | ||
1204 | */ | 1196 | */ |
1205 | kvm_mips_gfn_to_pfn = gfn_to_pfn; | 1197 | kvm_mips_gfn_to_pfn = gfn_to_pfn; |
1206 | kvm_mips_release_pfn_clean = kvm_release_pfn_clean; | 1198 | kvm_mips_release_pfn_clean = kvm_release_pfn_clean; |
diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h new file mode 100644 index 000000000000..03a6ae84c7df --- /dev/null +++ b/arch/mips/kvm/opcode.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
8 | */ | ||
9 | |||
10 | /* Define opcode values not defined in <asm/isnt.h> */ | ||
11 | |||
12 | #ifndef __KVM_MIPS_OPCODE_H__ | ||
13 | #define __KVM_MIPS_OPCODE_H__ | ||
14 | |||
15 | /* COP0 Ops */ | ||
16 | #define mfmcz_op 0x0b /* 01011 */ | ||
17 | #define wrpgpr_op 0x0e /* 01110 */ | ||
18 | |||
19 | /* COP0 opcodes (only if COP0 and CO=1): */ | ||
20 | #define wait_op 0x20 /* 100000 */ | ||
21 | |||
22 | #endif /* __KVM_MIPS_OPCODE_H__ */ | ||
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/stats.c index 075904bcac1b..a74d6024c5ad 100644 --- a/arch/mips/kvm/kvm_mips_stats.c +++ b/arch/mips/kvm/stats.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: COP0 access histogram | 6 | * KVM/MIPS: COP0 access histogram |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kvm_host.h> | 12 | #include <linux/kvm_host.h> |
13 | 13 | ||
@@ -63,20 +63,18 @@ char *kvm_cop0_str[N_MIPS_COPROC_REGS] = { | |||
63 | "DESAVE" | 63 | "DESAVE" |
64 | }; | 64 | }; |
65 | 65 | ||
66 | int kvm_mips_dump_stats(struct kvm_vcpu *vcpu) | 66 | void kvm_mips_dump_stats(struct kvm_vcpu *vcpu) |
67 | { | 67 | { |
68 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | 68 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS |
69 | int i, j; | 69 | int i, j; |
70 | 70 | ||
71 | printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); | 71 | kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); |
72 | for (i = 0; i < N_MIPS_COPROC_REGS; i++) { | 72 | for (i = 0; i < N_MIPS_COPROC_REGS; i++) { |
73 | for (j = 0; j < N_MIPS_COPROC_SEL; j++) { | 73 | for (j = 0; j < N_MIPS_COPROC_SEL; j++) { |
74 | if (vcpu->arch.cop0->stat[i][j]) | 74 | if (vcpu->arch.cop0->stat[i][j]) |
75 | printk("%s[%d]: %lu\n", kvm_cop0_str[i], j, | 75 | kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j, |
76 | vcpu->arch.cop0->stat[i][j]); | 76 | vcpu->arch.cop0->stat[i][j]); |
77 | } | 77 | } |
78 | } | 78 | } |
79 | #endif | 79 | #endif |
80 | |||
81 | return 0; | ||
82 | } | 80 | } |
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/tlb.c index 8a5a700ad8de..bbcd82242059 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/tlb.c | |||
@@ -1,14 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that | 6 | * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that |
7 | * TLB handlers run from KSEG0 | 7 | * TLB handlers run from KSEG0 |
8 | * | 8 | * |
9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/kvm_host.h> | 18 | #include <linux/kvm_host.h> |
19 | #include <linux/srcu.h> | 19 | #include <linux/srcu.h> |
20 | 20 | ||
21 | |||
22 | #include <asm/cpu.h> | 21 | #include <asm/cpu.h> |
23 | #include <asm/bootinfo.h> | 22 | #include <asm/bootinfo.h> |
24 | #include <asm/mmu_context.h> | 23 | #include <asm/mmu_context.h> |
@@ -39,13 +38,13 @@ atomic_t kvm_mips_instance; | |||
39 | EXPORT_SYMBOL(kvm_mips_instance); | 38 | EXPORT_SYMBOL(kvm_mips_instance); |
40 | 39 | ||
41 | /* These function pointers are initialized once the KVM module is loaded */ | 40 | /* These function pointers are initialized once the KVM module is loaded */ |
42 | pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); | 41 | pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); |
43 | EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); | 42 | EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); |
44 | 43 | ||
45 | void (*kvm_mips_release_pfn_clean) (pfn_t pfn); | 44 | void (*kvm_mips_release_pfn_clean)(pfn_t pfn); |
46 | EXPORT_SYMBOL(kvm_mips_release_pfn_clean); | 45 | EXPORT_SYMBOL(kvm_mips_release_pfn_clean); |
47 | 46 | ||
48 | bool(*kvm_mips_is_error_pfn) (pfn_t pfn); | 47 | bool (*kvm_mips_is_error_pfn)(pfn_t pfn); |
49 | EXPORT_SYMBOL(kvm_mips_is_error_pfn); | 48 | EXPORT_SYMBOL(kvm_mips_is_error_pfn); |
50 | 49 | ||
51 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | 50 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) |
@@ -53,21 +52,17 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | |||
53 | return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; | 52 | return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; |
54 | } | 53 | } |
55 | 54 | ||
56 | |||
57 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | 55 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) |
58 | { | 56 | { |
59 | return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; | 57 | return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; |
60 | } | 58 | } |
61 | 59 | ||
62 | inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) | 60 | inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) |
63 | { | 61 | { |
64 | return vcpu->kvm->arch.commpage_tlb; | 62 | return vcpu->kvm->arch.commpage_tlb; |
65 | } | 63 | } |
66 | 64 | ||
67 | 65 | /* Structure defining an tlb entry data set. */ | |
68 | /* | ||
69 | * Structure defining an tlb entry data set. | ||
70 | */ | ||
71 | 66 | ||
72 | void kvm_mips_dump_host_tlbs(void) | 67 | void kvm_mips_dump_host_tlbs(void) |
73 | { | 68 | { |
@@ -82,8 +77,8 @@ void kvm_mips_dump_host_tlbs(void) | |||
82 | old_entryhi = read_c0_entryhi(); | 77 | old_entryhi = read_c0_entryhi(); |
83 | old_pagemask = read_c0_pagemask(); | 78 | old_pagemask = read_c0_pagemask(); |
84 | 79 | ||
85 | printk("HOST TLBs:\n"); | 80 | kvm_info("HOST TLBs:\n"); |
86 | printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); | 81 | kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); |
87 | 82 | ||
88 | for (i = 0; i < current_cpu_data.tlbsize; i++) { | 83 | for (i = 0; i < current_cpu_data.tlbsize; i++) { |
89 | write_c0_index(i); | 84 | write_c0_index(i); |
@@ -97,25 +92,26 @@ void kvm_mips_dump_host_tlbs(void) | |||
97 | tlb.tlb_lo1 = read_c0_entrylo1(); | 92 | tlb.tlb_lo1 = read_c0_entrylo1(); |
98 | tlb.tlb_mask = read_c0_pagemask(); | 93 | tlb.tlb_mask = read_c0_pagemask(); |
99 | 94 | ||
100 | printk("TLB%c%3d Hi 0x%08lx ", | 95 | kvm_info("TLB%c%3d Hi 0x%08lx ", |
101 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', | 96 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', |
102 | i, tlb.tlb_hi); | 97 | i, tlb.tlb_hi); |
103 | printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", | 98 | kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", |
104 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), | 99 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), |
105 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', | 100 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', |
106 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', | 101 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', |
107 | (tlb.tlb_lo0 >> 3) & 7); | 102 | (tlb.tlb_lo0 >> 3) & 7); |
108 | printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", | 103 | kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", |
109 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), | 104 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), |
110 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', | 105 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', |
111 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', | 106 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', |
112 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); | 107 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); |
113 | } | 108 | } |
114 | write_c0_entryhi(old_entryhi); | 109 | write_c0_entryhi(old_entryhi); |
115 | write_c0_pagemask(old_pagemask); | 110 | write_c0_pagemask(old_pagemask); |
116 | mtc0_tlbw_hazard(); | 111 | mtc0_tlbw_hazard(); |
117 | local_irq_restore(flags); | 112 | local_irq_restore(flags); |
118 | } | 113 | } |
114 | EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); | ||
119 | 115 | ||
120 | void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) | 116 | void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) |
121 | { | 117 | { |
@@ -123,26 +119,27 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) | |||
123 | struct kvm_mips_tlb tlb; | 119 | struct kvm_mips_tlb tlb; |
124 | int i; | 120 | int i; |
125 | 121 | ||
126 | printk("Guest TLBs:\n"); | 122 | kvm_info("Guest TLBs:\n"); |
127 | printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); | 123 | kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); |
128 | 124 | ||
129 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | 125 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { |
130 | tlb = vcpu->arch.guest_tlb[i]; | 126 | tlb = vcpu->arch.guest_tlb[i]; |
131 | printk("TLB%c%3d Hi 0x%08lx ", | 127 | kvm_info("TLB%c%3d Hi 0x%08lx ", |
132 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', | 128 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', |
133 | i, tlb.tlb_hi); | 129 | i, tlb.tlb_hi); |
134 | printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", | 130 | kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", |
135 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), | 131 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), |
136 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', | 132 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', |
137 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', | 133 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', |
138 | (tlb.tlb_lo0 >> 3) & 7); | 134 | (tlb.tlb_lo0 >> 3) & 7); |
139 | printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", | 135 | kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", |
140 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), | 136 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), |
141 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', | 137 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', |
142 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', | 138 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', |
143 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); | 139 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); |
144 | } | 140 | } |
145 | } | 141 | } |
142 | EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); | ||
146 | 143 | ||
147 | static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) | 144 | static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) |
148 | { | 145 | { |
@@ -152,7 +149,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) | |||
152 | if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) | 149 | if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) |
153 | return 0; | 150 | return 0; |
154 | 151 | ||
155 | srcu_idx = srcu_read_lock(&kvm->srcu); | 152 | srcu_idx = srcu_read_lock(&kvm->srcu); |
156 | pfn = kvm_mips_gfn_to_pfn(kvm, gfn); | 153 | pfn = kvm_mips_gfn_to_pfn(kvm, gfn); |
157 | 154 | ||
158 | if (kvm_mips_is_error_pfn(pfn)) { | 155 | if (kvm_mips_is_error_pfn(pfn)) { |
@@ -169,7 +166,7 @@ out: | |||
169 | 166 | ||
170 | /* Translate guest KSEG0 addresses to Host PA */ | 167 | /* Translate guest KSEG0 addresses to Host PA */ |
171 | unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, | 168 | unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, |
172 | unsigned long gva) | 169 | unsigned long gva) |
173 | { | 170 | { |
174 | gfn_t gfn; | 171 | gfn_t gfn; |
175 | uint32_t offset = gva & ~PAGE_MASK; | 172 | uint32_t offset = gva & ~PAGE_MASK; |
@@ -194,20 +191,20 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, | |||
194 | 191 | ||
195 | return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; | 192 | return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; |
196 | } | 193 | } |
194 | EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); | ||
197 | 195 | ||
198 | /* XXXKYMA: Must be called with interrupts disabled */ | 196 | /* XXXKYMA: Must be called with interrupts disabled */ |
199 | /* set flush_dcache_mask == 0 if no dcache flush required */ | 197 | /* set flush_dcache_mask == 0 if no dcache flush required */ |
200 | int | 198 | int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, |
201 | kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | 199 | unsigned long entrylo0, unsigned long entrylo1, |
202 | unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask) | 200 | int flush_dcache_mask) |
203 | { | 201 | { |
204 | unsigned long flags; | 202 | unsigned long flags; |
205 | unsigned long old_entryhi; | 203 | unsigned long old_entryhi; |
206 | volatile int idx; | 204 | int idx; |
207 | 205 | ||
208 | local_irq_save(flags); | 206 | local_irq_save(flags); |
209 | 207 | ||
210 | |||
211 | old_entryhi = read_c0_entryhi(); | 208 | old_entryhi = read_c0_entryhi(); |
212 | write_c0_entryhi(entryhi); | 209 | write_c0_entryhi(entryhi); |
213 | mtc0_tlbw_hazard(); | 210 | mtc0_tlbw_hazard(); |
@@ -240,12 +237,14 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | |||
240 | if (flush_dcache_mask) { | 237 | if (flush_dcache_mask) { |
241 | if (entrylo0 & MIPS3_PG_V) { | 238 | if (entrylo0 & MIPS3_PG_V) { |
242 | ++vcpu->stat.flush_dcache_exits; | 239 | ++vcpu->stat.flush_dcache_exits; |
243 | flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); | 240 | flush_data_cache_page((entryhi & VPN2_MASK) & |
241 | ~flush_dcache_mask); | ||
244 | } | 242 | } |
245 | if (entrylo1 & MIPS3_PG_V) { | 243 | if (entrylo1 & MIPS3_PG_V) { |
246 | ++vcpu->stat.flush_dcache_exits; | 244 | ++vcpu->stat.flush_dcache_exits; |
247 | flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | | 245 | flush_data_cache_page(((entryhi & VPN2_MASK) & |
248 | (0x1 << PAGE_SHIFT)); | 246 | ~flush_dcache_mask) | |
247 | (0x1 << PAGE_SHIFT)); | ||
249 | } | 248 | } |
250 | } | 249 | } |
251 | 250 | ||
@@ -257,10 +256,9 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | |||
257 | return 0; | 256 | return 0; |
258 | } | 257 | } |
259 | 258 | ||
260 | |||
261 | /* XXXKYMA: Must be called with interrupts disabled */ | 259 | /* XXXKYMA: Must be called with interrupts disabled */ |
262 | int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | 260 | int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, |
263 | struct kvm_vcpu *vcpu) | 261 | struct kvm_vcpu *vcpu) |
264 | { | 262 | { |
265 | gfn_t gfn; | 263 | gfn_t gfn; |
266 | pfn_t pfn0, pfn1; | 264 | pfn_t pfn0, pfn1; |
@@ -270,7 +268,6 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | |||
270 | struct kvm *kvm = vcpu->kvm; | 268 | struct kvm *kvm = vcpu->kvm; |
271 | const int flush_dcache_mask = 0; | 269 | const int flush_dcache_mask = 0; |
272 | 270 | ||
273 | |||
274 | if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { | 271 | if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { |
275 | kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); | 272 | kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); |
276 | kvm_mips_dump_host_tlbs(); | 273 | kvm_mips_dump_host_tlbs(); |
@@ -302,14 +299,15 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | |||
302 | } | 299 | } |
303 | 300 | ||
304 | entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); | 301 | entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); |
305 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | | 302 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | |
306 | (0x1 << 1); | 303 | (1 << 2) | (0x1 << 1); |
307 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | | 304 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | |
308 | (0x1 << 1); | 305 | (1 << 2) | (0x1 << 1); |
309 | 306 | ||
310 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, | 307 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, |
311 | flush_dcache_mask); | 308 | flush_dcache_mask); |
312 | } | 309 | } |
310 | EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); | ||
313 | 311 | ||
314 | int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | 312 | int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, |
315 | struct kvm_vcpu *vcpu) | 313 | struct kvm_vcpu *vcpu) |
@@ -318,11 +316,10 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | |||
318 | unsigned long flags, old_entryhi = 0, vaddr = 0; | 316 | unsigned long flags, old_entryhi = 0, vaddr = 0; |
319 | unsigned long entrylo0 = 0, entrylo1 = 0; | 317 | unsigned long entrylo0 = 0, entrylo1 = 0; |
320 | 318 | ||
321 | |||
322 | pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; | 319 | pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; |
323 | pfn1 = 0; | 320 | pfn1 = 0; |
324 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | | 321 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | |
325 | (0x1 << 1); | 322 | (1 << 2) | (0x1 << 1); |
326 | entrylo1 = 0; | 323 | entrylo1 = 0; |
327 | 324 | ||
328 | local_irq_save(flags); | 325 | local_irq_save(flags); |
@@ -341,9 +338,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | |||
341 | mtc0_tlbw_hazard(); | 338 | mtc0_tlbw_hazard(); |
342 | tlbw_use_hazard(); | 339 | tlbw_use_hazard(); |
343 | 340 | ||
344 | kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", | 341 | kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", |
345 | vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), | 342 | vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), |
346 | read_c0_entrylo0(), read_c0_entrylo1()); | 343 | read_c0_entrylo0(), read_c0_entrylo1()); |
347 | 344 | ||
348 | /* Restore old ASID */ | 345 | /* Restore old ASID */ |
349 | write_c0_entryhi(old_entryhi); | 346 | write_c0_entryhi(old_entryhi); |
@@ -353,28 +350,33 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | |||
353 | 350 | ||
354 | return 0; | 351 | return 0; |
355 | } | 352 | } |
353 | EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); | ||
356 | 354 | ||
357 | int | 355 | int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, |
358 | kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | 356 | struct kvm_mips_tlb *tlb, |
359 | struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1) | 357 | unsigned long *hpa0, |
358 | unsigned long *hpa1) | ||
360 | { | 359 | { |
361 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; | 360 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; |
362 | struct kvm *kvm = vcpu->kvm; | 361 | struct kvm *kvm = vcpu->kvm; |
363 | pfn_t pfn0, pfn1; | 362 | pfn_t pfn0, pfn1; |
364 | 363 | ||
365 | |||
366 | if ((tlb->tlb_hi & VPN2_MASK) == 0) { | 364 | if ((tlb->tlb_hi & VPN2_MASK) == 0) { |
367 | pfn0 = 0; | 365 | pfn0 = 0; |
368 | pfn1 = 0; | 366 | pfn1 = 0; |
369 | } else { | 367 | } else { |
370 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) | 368 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) |
369 | >> PAGE_SHIFT) < 0) | ||
371 | return -1; | 370 | return -1; |
372 | 371 | ||
373 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) | 372 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) |
373 | >> PAGE_SHIFT) < 0) | ||
374 | return -1; | 374 | return -1; |
375 | 375 | ||
376 | pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; | 376 | pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) |
377 | pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; | 377 | >> PAGE_SHIFT]; |
378 | pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) | ||
379 | >> PAGE_SHIFT]; | ||
378 | } | 380 | } |
379 | 381 | ||
380 | if (hpa0) | 382 | if (hpa0) |
@@ -385,11 +387,12 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | |||
385 | 387 | ||
386 | /* Get attributes from the Guest TLB */ | 388 | /* Get attributes from the Guest TLB */ |
387 | entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? | 389 | entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? |
388 | kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); | 390 | kvm_mips_get_kernel_asid(vcpu) : |
391 | kvm_mips_get_user_asid(vcpu)); | ||
389 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | | 392 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | |
390 | (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); | 393 | (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); |
391 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | | 394 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | |
392 | (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); | 395 | (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); |
393 | 396 | ||
394 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, | 397 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, |
395 | tlb->tlb_lo0, tlb->tlb_lo1); | 398 | tlb->tlb_lo0, tlb->tlb_lo1); |
@@ -397,6 +400,7 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | |||
397 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, | 400 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, |
398 | tlb->tlb_mask); | 401 | tlb->tlb_mask); |
399 | } | 402 | } |
403 | EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); | ||
400 | 404 | ||
401 | int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | 405 | int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) |
402 | { | 406 | { |
@@ -404,10 +408,9 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
404 | int index = -1; | 408 | int index = -1; |
405 | struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; | 409 | struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; |
406 | 410 | ||
407 | |||
408 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | 411 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { |
409 | if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && | 412 | if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && |
410 | (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { | 413 | TLB_HI_ASID_HIT(tlb[i], entryhi)) { |
411 | index = i; | 414 | index = i; |
412 | break; | 415 | break; |
413 | } | 416 | } |
@@ -418,21 +421,23 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
418 | 421 | ||
419 | return index; | 422 | return index; |
420 | } | 423 | } |
424 | EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); | ||
421 | 425 | ||
422 | int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) | 426 | int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) |
423 | { | 427 | { |
424 | unsigned long old_entryhi, flags; | 428 | unsigned long old_entryhi, flags; |
425 | volatile int idx; | 429 | int idx; |
426 | |||
427 | 430 | ||
428 | local_irq_save(flags); | 431 | local_irq_save(flags); |
429 | 432 | ||
430 | old_entryhi = read_c0_entryhi(); | 433 | old_entryhi = read_c0_entryhi(); |
431 | 434 | ||
432 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | 435 | if (KVM_GUEST_KERNEL_MODE(vcpu)) |
433 | write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu)); | 436 | write_c0_entryhi((vaddr & VPN2_MASK) | |
437 | kvm_mips_get_kernel_asid(vcpu)); | ||
434 | else { | 438 | else { |
435 | write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); | 439 | write_c0_entryhi((vaddr & VPN2_MASK) | |
440 | kvm_mips_get_user_asid(vcpu)); | ||
436 | } | 441 | } |
437 | 442 | ||
438 | mtc0_tlbw_hazard(); | 443 | mtc0_tlbw_hazard(); |
@@ -452,6 +457,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) | |||
452 | 457 | ||
453 | return idx; | 458 | return idx; |
454 | } | 459 | } |
460 | EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); | ||
455 | 461 | ||
456 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | 462 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) |
457 | { | 463 | { |
@@ -460,7 +466,6 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | |||
460 | 466 | ||
461 | local_irq_save(flags); | 467 | local_irq_save(flags); |
462 | 468 | ||
463 | |||
464 | old_entryhi = read_c0_entryhi(); | 469 | old_entryhi = read_c0_entryhi(); |
465 | 470 | ||
466 | write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); | 471 | write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); |
@@ -499,8 +504,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | |||
499 | 504 | ||
500 | return 0; | 505 | return 0; |
501 | } | 506 | } |
507 | EXPORT_SYMBOL(kvm_mips_host_tlb_inv); | ||
502 | 508 | ||
503 | /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/ | 509 | /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */ |
504 | int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) | 510 | int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) |
505 | { | 511 | { |
506 | unsigned long flags, old_entryhi; | 512 | unsigned long flags, old_entryhi; |
@@ -510,7 +516,6 @@ int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) | |||
510 | 516 | ||
511 | local_irq_save(flags); | 517 | local_irq_save(flags); |
512 | 518 | ||
513 | |||
514 | old_entryhi = read_c0_entryhi(); | 519 | old_entryhi = read_c0_entryhi(); |
515 | 520 | ||
516 | write_c0_entryhi(UNIQUE_ENTRYHI(index)); | 521 | write_c0_entryhi(UNIQUE_ENTRYHI(index)); |
@@ -546,7 +551,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) | |||
546 | int entry = 0; | 551 | int entry = 0; |
547 | int maxentry = current_cpu_data.tlbsize; | 552 | int maxentry = current_cpu_data.tlbsize; |
548 | 553 | ||
549 | |||
550 | local_irq_save(flags); | 554 | local_irq_save(flags); |
551 | 555 | ||
552 | old_entryhi = read_c0_entryhi(); | 556 | old_entryhi = read_c0_entryhi(); |
@@ -554,7 +558,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) | |||
554 | 558 | ||
555 | /* Blast 'em all away. */ | 559 | /* Blast 'em all away. */ |
556 | for (entry = 0; entry < maxentry; entry++) { | 560 | for (entry = 0; entry < maxentry; entry++) { |
557 | |||
558 | write_c0_index(entry); | 561 | write_c0_index(entry); |
559 | mtc0_tlbw_hazard(); | 562 | mtc0_tlbw_hazard(); |
560 | 563 | ||
@@ -565,9 +568,8 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) | |||
565 | entryhi = read_c0_entryhi(); | 568 | entryhi = read_c0_entryhi(); |
566 | 569 | ||
567 | /* Don't blow away guest kernel entries */ | 570 | /* Don't blow away guest kernel entries */ |
568 | if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) { | 571 | if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) |
569 | continue; | 572 | continue; |
570 | } | ||
571 | } | 573 | } |
572 | 574 | ||
573 | /* Make sure all entries differ. */ | 575 | /* Make sure all entries differ. */ |
@@ -591,17 +593,17 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) | |||
591 | 593 | ||
592 | local_irq_restore(flags); | 594 | local_irq_restore(flags); |
593 | } | 595 | } |
596 | EXPORT_SYMBOL(kvm_mips_flush_host_tlb); | ||
594 | 597 | ||
595 | void | 598 | void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, |
596 | kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | 599 | struct kvm_vcpu *vcpu) |
597 | struct kvm_vcpu *vcpu) | ||
598 | { | 600 | { |
599 | unsigned long asid = asid_cache(cpu); | 601 | unsigned long asid = asid_cache(cpu); |
600 | 602 | ||
601 | if (!((asid += ASID_INC) & ASID_MASK)) { | 603 | asid += ASID_INC; |
602 | if (cpu_has_vtag_icache) { | 604 | if (!(asid & ASID_MASK)) { |
605 | if (cpu_has_vtag_icache) | ||
603 | flush_icache_all(); | 606 | flush_icache_all(); |
604 | } | ||
605 | 607 | ||
606 | kvm_local_flush_tlb_all(); /* start new asid cycle */ | 608 | kvm_local_flush_tlb_all(); /* start new asid cycle */ |
607 | 609 | ||
@@ -639,6 +641,7 @@ void kvm_local_flush_tlb_all(void) | |||
639 | 641 | ||
640 | local_irq_restore(flags); | 642 | local_irq_restore(flags); |
641 | } | 643 | } |
644 | EXPORT_SYMBOL(kvm_local_flush_tlb_all); | ||
642 | 645 | ||
643 | /** | 646 | /** |
644 | * kvm_mips_migrate_count() - Migrate timer. | 647 | * kvm_mips_migrate_count() - Migrate timer. |
@@ -699,7 +702,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
699 | } | 702 | } |
700 | 703 | ||
701 | if (!newasid) { | 704 | if (!newasid) { |
702 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ | 705 | /* |
706 | * If we preempted while the guest was executing, then reload | ||
707 | * the pre-empted ASID | ||
708 | */ | ||
703 | if (current->flags & PF_VCPU) { | 709 | if (current->flags & PF_VCPU) { |
704 | write_c0_entryhi(vcpu->arch. | 710 | write_c0_entryhi(vcpu->arch. |
705 | preempt_entryhi & ASID_MASK); | 711 | preempt_entryhi & ASID_MASK); |
@@ -708,9 +714,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
708 | } else { | 714 | } else { |
709 | /* New ASIDs were allocated for the VM */ | 715 | /* New ASIDs were allocated for the VM */ |
710 | 716 | ||
711 | /* Were we in guest context? If so then the pre-empted ASID is no longer | 717 | /* |
712 | * valid, we need to set it to what it should be based on the mode of | 718 | * Were we in guest context? If so then the pre-empted ASID is |
713 | * the Guest (Kernel/User) | 719 | * no longer valid, we need to set it to what it should be based |
720 | * on the mode of the Guest (Kernel/User) | ||
714 | */ | 721 | */ |
715 | if (current->flags & PF_VCPU) { | 722 | if (current->flags & PF_VCPU) { |
716 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | 723 | if (KVM_GUEST_KERNEL_MODE(vcpu)) |
@@ -728,6 +735,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
728 | local_irq_restore(flags); | 735 | local_irq_restore(flags); |
729 | 736 | ||
730 | } | 737 | } |
738 | EXPORT_SYMBOL(kvm_arch_vcpu_load); | ||
731 | 739 | ||
732 | /* ASID can change if another task is scheduled during preemption */ | 740 | /* ASID can change if another task is scheduled during preemption */ |
733 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 741 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
@@ -739,7 +747,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
739 | 747 | ||
740 | cpu = smp_processor_id(); | 748 | cpu = smp_processor_id(); |
741 | 749 | ||
742 | |||
743 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); | 750 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); |
744 | vcpu->arch.last_sched_cpu = cpu; | 751 | vcpu->arch.last_sched_cpu = cpu; |
745 | 752 | ||
@@ -754,11 +761,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
754 | 761 | ||
755 | local_irq_restore(flags); | 762 | local_irq_restore(flags); |
756 | } | 763 | } |
764 | EXPORT_SYMBOL(kvm_arch_vcpu_put); | ||
757 | 765 | ||
758 | uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | 766 | uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) |
759 | { | 767 | { |
760 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 768 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
761 | unsigned long paddr, flags; | 769 | unsigned long paddr, flags, vpn2, asid; |
762 | uint32_t inst; | 770 | uint32_t inst; |
763 | int index; | 771 | int index; |
764 | 772 | ||
@@ -769,16 +777,12 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
769 | if (index >= 0) { | 777 | if (index >= 0) { |
770 | inst = *(opc); | 778 | inst = *(opc); |
771 | } else { | 779 | } else { |
772 | index = | 780 | vpn2 = (unsigned long) opc & VPN2_MASK; |
773 | kvm_mips_guest_tlb_lookup(vcpu, | 781 | asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK; |
774 | ((unsigned long) opc & VPN2_MASK) | 782 | index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid); |
775 | | | ||
776 | (kvm_read_c0_guest_entryhi | ||
777 | (cop0) & ASID_MASK)); | ||
778 | if (index < 0) { | 783 | if (index < 0) { |
779 | kvm_err | 784 | kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", |
780 | ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", | 785 | __func__, opc, vcpu, read_c0_entryhi()); |
781 | __func__, opc, vcpu, read_c0_entryhi()); | ||
782 | kvm_mips_dump_host_tlbs(); | 786 | kvm_mips_dump_host_tlbs(); |
783 | local_irq_restore(flags); | 787 | local_irq_restore(flags); |
784 | return KVM_INVALID_INST; | 788 | return KVM_INVALID_INST; |
@@ -793,7 +797,7 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
793 | } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { | 797 | } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { |
794 | paddr = | 798 | paddr = |
795 | kvm_mips_translate_guest_kseg0_to_hpa(vcpu, | 799 | kvm_mips_translate_guest_kseg0_to_hpa(vcpu, |
796 | (unsigned long) opc); | 800 | (unsigned long) opc); |
797 | inst = *(uint32_t *) CKSEG0ADDR(paddr); | 801 | inst = *(uint32_t *) CKSEG0ADDR(paddr); |
798 | } else { | 802 | } else { |
799 | kvm_err("%s: illegal address: %p\n", __func__, opc); | 803 | kvm_err("%s: illegal address: %p\n", __func__, opc); |
@@ -802,18 +806,4 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
802 | 806 | ||
803 | return inst; | 807 | return inst; |
804 | } | 808 | } |
805 | |||
806 | EXPORT_SYMBOL(kvm_local_flush_tlb_all); | ||
807 | EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); | ||
808 | EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); | ||
809 | EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); | ||
810 | EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); | ||
811 | EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); | ||
812 | EXPORT_SYMBOL(kvm_mips_flush_host_tlb); | ||
813 | EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); | ||
814 | EXPORT_SYMBOL(kvm_mips_host_tlb_inv); | ||
815 | EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); | ||
816 | EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); | ||
817 | EXPORT_SYMBOL(kvm_get_inst); | 809 | EXPORT_SYMBOL(kvm_get_inst); |
818 | EXPORT_SYMBOL(kvm_arch_vcpu_load); | ||
819 | EXPORT_SYMBOL(kvm_arch_vcpu_put); | ||
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index bc9e0f406c08..c1388d40663b 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) | 10 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) |
11 | #define _TRACE_KVM_H | 11 | #define _TRACE_KVM_H |
@@ -17,9 +17,7 @@ | |||
17 | #define TRACE_INCLUDE_PATH . | 17 | #define TRACE_INCLUDE_PATH . |
18 | #define TRACE_INCLUDE_FILE trace | 18 | #define TRACE_INCLUDE_FILE trace |
19 | 19 | ||
20 | /* | 20 | /* Tracepoints for VM eists */ |
21 | * Tracepoints for VM eists | ||
22 | */ | ||
23 | extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; | 21 | extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; |
24 | 22 | ||
25 | TRACE_EVENT(kvm_exit, | 23 | TRACE_EVENT(kvm_exit, |
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/trap_emul.c index 693f952b2fbb..fd7257b70e65 100644 --- a/arch/mips/kvm/kvm_trap_emul.c +++ b/arch/mips/kvm/trap_emul.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel | 6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -16,8 +16,8 @@ | |||
16 | 16 | ||
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | 18 | ||
19 | #include "kvm_mips_opcode.h" | 19 | #include "opcode.h" |
20 | #include "kvm_mips_int.h" | 20 | #include "interrupt.h" |
21 | 21 | ||
22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | 22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) |
23 | { | 23 | { |
@@ -27,7 +27,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |||
27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | 27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) |
28 | gpa = CPHYSADDR(gva); | 28 | gpa = CPHYSADDR(gva); |
29 | else { | 29 | else { |
30 | printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); | 30 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); |
31 | kvm_mips_dump_host_tlbs(); | 31 | kvm_mips_dump_host_tlbs(); |
32 | gpa = KVM_INVALID_ADDR; | 32 | gpa = KVM_INVALID_ADDR; |
33 | } | 33 | } |
@@ -37,7 +37,6 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |||
37 | return gpa; | 37 | return gpa; |
38 | } | 38 | } |
39 | 39 | ||
40 | |||
41 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | 40 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
42 | { | 41 | { |
43 | struct kvm_run *run = vcpu->run; | 42 | struct kvm_run *run = vcpu->run; |
@@ -46,9 +45,9 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | |||
46 | enum emulation_result er = EMULATE_DONE; | 45 | enum emulation_result er = EMULATE_DONE; |
47 | int ret = RESUME_GUEST; | 46 | int ret = RESUME_GUEST; |
48 | 47 | ||
49 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { | 48 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) |
50 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); | 49 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); |
51 | } else | 50 | else |
52 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 51 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
53 | 52 | ||
54 | switch (er) { | 53 | switch (er) { |
@@ -83,9 +82,8 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | |||
83 | 82 | ||
84 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 83 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
85 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 84 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
86 | kvm_debug | 85 | kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
87 | ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | 86 | cause, opc, badvaddr); |
88 | cause, opc, badvaddr); | ||
89 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); | 87 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); |
90 | 88 | ||
91 | if (er == EMULATE_DONE) | 89 | if (er == EMULATE_DONE) |
@@ -95,20 +93,20 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | |||
95 | ret = RESUME_HOST; | 93 | ret = RESUME_HOST; |
96 | } | 94 | } |
97 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | 95 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
98 | /* XXXKYMA: The guest kernel does not expect to get this fault when we are not | 96 | /* |
99 | * using HIGHMEM. Need to address this in a HIGHMEM kernel | 97 | * XXXKYMA: The guest kernel does not expect to get this fault |
98 | * when we are not using HIGHMEM. Need to address this in a | ||
99 | * HIGHMEM kernel | ||
100 | */ | 100 | */ |
101 | printk | 101 | kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", |
102 | ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", | 102 | cause, opc, badvaddr); |
103 | cause, opc, badvaddr); | ||
104 | kvm_mips_dump_host_tlbs(); | 103 | kvm_mips_dump_host_tlbs(); |
105 | kvm_arch_vcpu_dump_regs(vcpu); | 104 | kvm_arch_vcpu_dump_regs(vcpu); |
106 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 105 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
107 | ret = RESUME_HOST; | 106 | ret = RESUME_HOST; |
108 | } else { | 107 | } else { |
109 | printk | 108 | kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
110 | ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 109 | cause, opc, badvaddr); |
111 | cause, opc, badvaddr); | ||
112 | kvm_mips_dump_host_tlbs(); | 110 | kvm_mips_dump_host_tlbs(); |
113 | kvm_arch_vcpu_dump_regs(vcpu); | 111 | kvm_arch_vcpu_dump_regs(vcpu); |
114 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 112 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -134,9 +132,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |||
134 | } | 132 | } |
135 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 133 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
136 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 134 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
137 | kvm_debug | 135 | kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
138 | ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | 136 | cause, opc, badvaddr); |
139 | cause, opc, badvaddr); | ||
140 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | 137 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
141 | if (er == EMULATE_DONE) | 138 | if (er == EMULATE_DONE) |
142 | ret = RESUME_GUEST; | 139 | ret = RESUME_GUEST; |
@@ -145,8 +142,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |||
145 | ret = RESUME_HOST; | 142 | ret = RESUME_HOST; |
146 | } | 143 | } |
147 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | 144 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
148 | /* All KSEG0 faults are handled by KVM, as the guest kernel does not | 145 | /* |
149 | * expect to ever get them | 146 | * All KSEG0 faults are handled by KVM, as the guest kernel does |
147 | * not expect to ever get them | ||
150 | */ | 148 | */ |
151 | if (kvm_mips_handle_kseg0_tlb_fault | 149 | if (kvm_mips_handle_kseg0_tlb_fault |
152 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | 150 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { |
@@ -154,9 +152,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |||
154 | ret = RESUME_HOST; | 152 | ret = RESUME_HOST; |
155 | } | 153 | } |
156 | } else { | 154 | } else { |
157 | kvm_err | 155 | kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
158 | ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 156 | cause, opc, badvaddr); |
159 | cause, opc, badvaddr); | ||
160 | kvm_mips_dump_host_tlbs(); | 157 | kvm_mips_dump_host_tlbs(); |
161 | kvm_arch_vcpu_dump_regs(vcpu); | 158 | kvm_arch_vcpu_dump_regs(vcpu); |
162 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 159 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -185,11 +182,14 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |||
185 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", | 182 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", |
186 | vcpu->arch.pc, badvaddr); | 183 | vcpu->arch.pc, badvaddr); |
187 | 184 | ||
188 | /* User Address (UA) fault, this could happen if | 185 | /* |
189 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | 186 | * User Address (UA) fault, this could happen if |
190 | * case we pass on the fault to the guest kernel and let it handle it. | 187 | * (1) TLB entry not present/valid in both Guest and shadow host |
191 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | 188 | * TLBs, in this case we pass on the fault to the guest |
192 | * case we inject the TLB from the Guest TLB into the shadow host TLB | 189 | * kernel and let it handle it. |
190 | * (2) TLB entry is present in the Guest TLB but not in the | ||
191 | * shadow, in this case we inject the TLB from the Guest TLB | ||
192 | * into the shadow host TLB | ||
193 | */ | 193 | */ |
194 | 194 | ||
195 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | 195 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
@@ -206,9 +206,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |||
206 | ret = RESUME_HOST; | 206 | ret = RESUME_HOST; |
207 | } | 207 | } |
208 | } else { | 208 | } else { |
209 | printk | 209 | kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
210 | ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 210 | cause, opc, badvaddr); |
211 | cause, opc, badvaddr); | ||
212 | kvm_mips_dump_host_tlbs(); | 211 | kvm_mips_dump_host_tlbs(); |
213 | kvm_arch_vcpu_dump_regs(vcpu); | 212 | kvm_arch_vcpu_dump_regs(vcpu); |
214 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 213 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -231,7 +230,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | |||
231 | kvm_debug("Emulate Store to MMIO space\n"); | 230 | kvm_debug("Emulate Store to MMIO space\n"); |
232 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 231 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
233 | if (er == EMULATE_FAIL) { | 232 | if (er == EMULATE_FAIL) { |
234 | printk("Emulate Store to MMIO space failed\n"); | 233 | kvm_err("Emulate Store to MMIO space failed\n"); |
235 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 234 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
236 | ret = RESUME_HOST; | 235 | ret = RESUME_HOST; |
237 | } else { | 236 | } else { |
@@ -239,9 +238,8 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | |||
239 | ret = RESUME_HOST; | 238 | ret = RESUME_HOST; |
240 | } | 239 | } |
241 | } else { | 240 | } else { |
242 | printk | 241 | kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", |
243 | ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", | 242 | cause, opc, badvaddr); |
244 | cause, opc, badvaddr); | ||
245 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 243 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
246 | ret = RESUME_HOST; | 244 | ret = RESUME_HOST; |
247 | } | 245 | } |
@@ -261,7 +259,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |||
261 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); | 259 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); |
262 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 260 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
263 | if (er == EMULATE_FAIL) { | 261 | if (er == EMULATE_FAIL) { |
264 | printk("Emulate Load from MMIO space failed\n"); | 262 | kvm_err("Emulate Load from MMIO space failed\n"); |
265 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 263 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
266 | ret = RESUME_HOST; | 264 | ret = RESUME_HOST; |
267 | } else { | 265 | } else { |
@@ -269,9 +267,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |||
269 | ret = RESUME_HOST; | 267 | ret = RESUME_HOST; |
270 | } | 268 | } |
271 | } else { | 269 | } else { |
272 | printk | 270 | kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", |
273 | ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", | 271 | cause, opc, badvaddr); |
274 | cause, opc, badvaddr); | ||
275 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 272 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
276 | ret = RESUME_HOST; | 273 | ret = RESUME_HOST; |
277 | er = EMULATE_FAIL; | 274 | er = EMULATE_FAIL; |
@@ -349,9 +346,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |||
349 | uint32_t config1; | 346 | uint32_t config1; |
350 | int vcpu_id = vcpu->vcpu_id; | 347 | int vcpu_id = vcpu->vcpu_id; |
351 | 348 | ||
352 | /* Arch specific stuff, set up config registers properly so that the | 349 | /* |
353 | * guest will come up as expected, for now we simulate a | 350 | * Arch specific stuff, set up config registers properly so that the |
354 | * MIPS 24kc | 351 | * guest will come up as expected, for now we simulate a MIPS 24kc |
355 | */ | 352 | */ |
356 | kvm_write_c0_guest_prid(cop0, 0x00019300); | 353 | kvm_write_c0_guest_prid(cop0, 0x00019300); |
357 | kvm_write_c0_guest_config(cop0, | 354 | kvm_write_c0_guest_config(cop0, |
@@ -373,14 +370,15 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |||
373 | 370 | ||
374 | kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); | 371 | kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); |
375 | /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ | 372 | /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ |
376 | kvm_write_c0_guest_config3(cop0, | 373 | kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) | |
377 | MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << | 374 | (1 << CP0C3_ULRI)); |
378 | CP0C3_ULRI)); | ||
379 | 375 | ||
380 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | 376 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ |
381 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | 377 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); |
382 | 378 | ||
383 | /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ | 379 | /* |
380 | * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) | ||
381 | */ | ||
384 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); | 382 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); |
385 | 383 | ||
386 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ | 384 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 4181d7baabba..773bef7614d8 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt { | |||
305 | struct list_head list; | 305 | struct list_head list; |
306 | atomic_t active; | 306 | atomic_t active; |
307 | struct kvm_s390_float_interrupt *float_int; | 307 | struct kvm_s390_float_interrupt *float_int; |
308 | int timer_due; /* event indicator for waitqueue below */ | ||
309 | wait_queue_head_t *wq; | 308 | wait_queue_head_t *wq; |
310 | atomic_t *cpuflags; | 309 | atomic_t *cpuflags; |
311 | unsigned int action_bits; | 310 | unsigned int action_bits; |
@@ -367,7 +366,6 @@ struct kvm_vcpu_arch { | |||
367 | s390_fp_regs guest_fpregs; | 366 | s390_fp_regs guest_fpregs; |
368 | struct kvm_s390_local_interrupt local_int; | 367 | struct kvm_s390_local_interrupt local_int; |
369 | struct hrtimer ckc_timer; | 368 | struct hrtimer ckc_timer; |
370 | struct tasklet_struct tasklet; | ||
371 | struct kvm_s390_pgm_info pgm; | 369 | struct kvm_s390_pgm_info pgm; |
372 | union { | 370 | union { |
373 | struct cpuid cpu_id; | 371 | struct cpuid cpu_id; |
@@ -418,6 +416,7 @@ struct kvm_arch{ | |||
418 | int css_support; | 416 | int css_support; |
419 | int use_irqchip; | 417 | int use_irqchip; |
420 | int use_cmma; | 418 | int use_cmma; |
419 | int user_cpu_state_ctrl; | ||
421 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; | 420 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; |
422 | wait_queue_head_t ipte_wq; | 421 | wait_queue_head_t ipte_wq; |
423 | spinlock_t start_stop_lock; | 422 | spinlock_t start_stop_lock; |
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index 5d9cc19462c4..d4096fdfc6ab 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h | |||
@@ -108,6 +108,7 @@ | |||
108 | exit_code_ipa0(0xB2, 0x17, "STETR"), \ | 108 | exit_code_ipa0(0xB2, 0x17, "STETR"), \ |
109 | exit_code_ipa0(0xB2, 0x18, "PC"), \ | 109 | exit_code_ipa0(0xB2, 0x18, "PC"), \ |
110 | exit_code_ipa0(0xB2, 0x20, "SERVC"), \ | 110 | exit_code_ipa0(0xB2, 0x20, "SERVC"), \ |
111 | exit_code_ipa0(0xB2, 0x21, "IPTE"), \ | ||
111 | exit_code_ipa0(0xB2, 0x28, "PT"), \ | 112 | exit_code_ipa0(0xB2, 0x28, "PT"), \ |
112 | exit_code_ipa0(0xB2, 0x29, "ISKE"), \ | 113 | exit_code_ipa0(0xB2, 0x29, "ISKE"), \ |
113 | exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ | 114 | exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 0161675878a2..59bd8f991b98 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -176,7 +176,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
176 | return -EOPNOTSUPP; | 176 | return -EOPNOTSUPP; |
177 | } | 177 | } |
178 | 178 | ||
179 | kvm_s390_vcpu_stop(vcpu); | 179 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
180 | kvm_s390_vcpu_stop(vcpu); | ||
180 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; | 181 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; |
181 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; | 182 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; |
182 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; | 183 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index a0b586c1913c..eaf46291d361 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -56,32 +56,26 @@ static int handle_noop(struct kvm_vcpu *vcpu) | |||
56 | static int handle_stop(struct kvm_vcpu *vcpu) | 56 | static int handle_stop(struct kvm_vcpu *vcpu) |
57 | { | 57 | { |
58 | int rc = 0; | 58 | int rc = 0; |
59 | unsigned int action_bits; | ||
59 | 60 | ||
60 | vcpu->stat.exit_stop_request++; | 61 | vcpu->stat.exit_stop_request++; |
61 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
62 | |||
63 | trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); | 62 | trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); |
64 | 63 | ||
65 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { | 64 | action_bits = vcpu->arch.local_int.action_bits; |
66 | kvm_s390_vcpu_stop(vcpu); | ||
67 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; | ||
68 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); | ||
69 | rc = -EOPNOTSUPP; | ||
70 | } | ||
71 | 65 | ||
72 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { | 66 | if (!(action_bits & ACTION_STOP_ON_STOP)) |
73 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; | 67 | return 0; |
74 | /* store status must be called unlocked. Since local_int.lock | 68 | |
75 | * only protects local_int.* and not guest memory we can give | 69 | if (action_bits & ACTION_STORE_ON_STOP) { |
76 | * up the lock here */ | ||
77 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
78 | rc = kvm_s390_vcpu_store_status(vcpu, | 70 | rc = kvm_s390_vcpu_store_status(vcpu, |
79 | KVM_S390_STORE_STATUS_NOADDR); | 71 | KVM_S390_STORE_STATUS_NOADDR); |
80 | if (rc >= 0) | 72 | if (rc) |
81 | rc = -EOPNOTSUPP; | 73 | return rc; |
82 | } else | 74 | } |
83 | spin_unlock_bh(&vcpu->arch.local_int.lock); | 75 | |
84 | return rc; | 76 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
77 | kvm_s390_vcpu_stop(vcpu); | ||
78 | return -EOPNOTSUPP; | ||
85 | } | 79 | } |
86 | 80 | ||
87 | static int handle_validity(struct kvm_vcpu *vcpu) | 81 | static int handle_validity(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 90c8de22a2a0..92528a0bdda6 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -158,6 +158,9 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | |||
158 | LCTL_CR10 | LCTL_CR11); | 158 | LCTL_CR10 | LCTL_CR11); |
159 | vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); | 159 | vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); |
160 | } | 160 | } |
161 | |||
162 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) | ||
163 | atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); | ||
161 | } | 164 | } |
162 | 165 | ||
163 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | 166 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) |
@@ -544,13 +547,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |||
544 | int rc = 0; | 547 | int rc = 0; |
545 | 548 | ||
546 | if (atomic_read(&li->active)) { | 549 | if (atomic_read(&li->active)) { |
547 | spin_lock_bh(&li->lock); | 550 | spin_lock(&li->lock); |
548 | list_for_each_entry(inti, &li->list, list) | 551 | list_for_each_entry(inti, &li->list, list) |
549 | if (__interrupt_is_deliverable(vcpu, inti)) { | 552 | if (__interrupt_is_deliverable(vcpu, inti)) { |
550 | rc = 1; | 553 | rc = 1; |
551 | break; | 554 | break; |
552 | } | 555 | } |
553 | spin_unlock_bh(&li->lock); | 556 | spin_unlock(&li->lock); |
554 | } | 557 | } |
555 | 558 | ||
556 | if ((!rc) && atomic_read(&fi->active)) { | 559 | if ((!rc) && atomic_read(&fi->active)) { |
@@ -585,88 +588,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
585 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | 588 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
586 | { | 589 | { |
587 | u64 now, sltime; | 590 | u64 now, sltime; |
588 | DECLARE_WAITQUEUE(wait, current); | ||
589 | 591 | ||
590 | vcpu->stat.exit_wait_state++; | 592 | vcpu->stat.exit_wait_state++; |
591 | if (kvm_cpu_has_interrupt(vcpu)) | ||
592 | return 0; | ||
593 | 593 | ||
594 | __set_cpu_idle(vcpu); | 594 | /* fast path */ |
595 | spin_lock_bh(&vcpu->arch.local_int.lock); | 595 | if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) |
596 | vcpu->arch.local_int.timer_due = 0; | 596 | return 0; |
597 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
598 | 597 | ||
599 | if (psw_interrupts_disabled(vcpu)) { | 598 | if (psw_interrupts_disabled(vcpu)) { |
600 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); | 599 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
601 | __unset_cpu_idle(vcpu); | ||
602 | return -EOPNOTSUPP; /* disabled wait */ | 600 | return -EOPNOTSUPP; /* disabled wait */ |
603 | } | 601 | } |
604 | 602 | ||
603 | __set_cpu_idle(vcpu); | ||
605 | if (!ckc_interrupts_enabled(vcpu)) { | 604 | if (!ckc_interrupts_enabled(vcpu)) { |
606 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); | 605 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); |
607 | goto no_timer; | 606 | goto no_timer; |
608 | } | 607 | } |
609 | 608 | ||
610 | now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; | 609 | now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; |
611 | if (vcpu->arch.sie_block->ckc < now) { | ||
612 | __unset_cpu_idle(vcpu); | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); | 610 | sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); |
617 | |||
618 | hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); | 611 | hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); |
619 | VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); | 612 | VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); |
620 | no_timer: | 613 | no_timer: |
621 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 614 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
622 | spin_lock(&vcpu->arch.local_int.float_int->lock); | 615 | kvm_vcpu_block(vcpu); |
623 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
624 | add_wait_queue(&vcpu->wq, &wait); | ||
625 | while (list_empty(&vcpu->arch.local_int.list) && | ||
626 | list_empty(&vcpu->arch.local_int.float_int->list) && | ||
627 | (!vcpu->arch.local_int.timer_due) && | ||
628 | !signal_pending(current) && | ||
629 | !kvm_s390_si_ext_call_pending(vcpu)) { | ||
630 | set_current_state(TASK_INTERRUPTIBLE); | ||
631 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
632 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | ||
633 | schedule(); | ||
634 | spin_lock(&vcpu->arch.local_int.float_int->lock); | ||
635 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
636 | } | ||
637 | __unset_cpu_idle(vcpu); | 616 | __unset_cpu_idle(vcpu); |
638 | __set_current_state(TASK_RUNNING); | ||
639 | remove_wait_queue(&vcpu->wq, &wait); | ||
640 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
641 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | ||
642 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 617 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
643 | 618 | ||
644 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); | 619 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); |
645 | return 0; | 620 | return 0; |
646 | } | 621 | } |
647 | 622 | ||
648 | void kvm_s390_tasklet(unsigned long parm) | 623 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) |
649 | { | 624 | { |
650 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; | 625 | if (waitqueue_active(&vcpu->wq)) { |
651 | 626 | /* | |
652 | spin_lock(&vcpu->arch.local_int.lock); | 627 | * The vcpu gave up the cpu voluntarily, mark it as a good |
653 | vcpu->arch.local_int.timer_due = 1; | 628 | * yield-candidate. |
654 | if (waitqueue_active(&vcpu->wq)) | 629 | */ |
630 | vcpu->preempted = true; | ||
655 | wake_up_interruptible(&vcpu->wq); | 631 | wake_up_interruptible(&vcpu->wq); |
656 | spin_unlock(&vcpu->arch.local_int.lock); | 632 | } |
657 | } | 633 | } |
658 | 634 | ||
659 | /* | ||
660 | * low level hrtimer wake routine. Because this runs in hardirq context | ||
661 | * we schedule a tasklet to do the real work. | ||
662 | */ | ||
663 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) | 635 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) |
664 | { | 636 | { |
665 | struct kvm_vcpu *vcpu; | 637 | struct kvm_vcpu *vcpu; |
666 | 638 | ||
667 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); | 639 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); |
668 | vcpu->preempted = true; | 640 | kvm_s390_vcpu_wakeup(vcpu); |
669 | tasklet_schedule(&vcpu->arch.tasklet); | ||
670 | 641 | ||
671 | return HRTIMER_NORESTART; | 642 | return HRTIMER_NORESTART; |
672 | } | 643 | } |
@@ -676,13 +647,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
676 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 647 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
677 | struct kvm_s390_interrupt_info *n, *inti = NULL; | 648 | struct kvm_s390_interrupt_info *n, *inti = NULL; |
678 | 649 | ||
679 | spin_lock_bh(&li->lock); | 650 | spin_lock(&li->lock); |
680 | list_for_each_entry_safe(inti, n, &li->list, list) { | 651 | list_for_each_entry_safe(inti, n, &li->list, list) { |
681 | list_del(&inti->list); | 652 | list_del(&inti->list); |
682 | kfree(inti); | 653 | kfree(inti); |
683 | } | 654 | } |
684 | atomic_set(&li->active, 0); | 655 | atomic_set(&li->active, 0); |
685 | spin_unlock_bh(&li->lock); | 656 | spin_unlock(&li->lock); |
686 | 657 | ||
687 | /* clear pending external calls set by sigp interpretation facility */ | 658 | /* clear pending external calls set by sigp interpretation facility */ |
688 | atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); | 659 | atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); |
@@ -701,7 +672,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
701 | if (atomic_read(&li->active)) { | 672 | if (atomic_read(&li->active)) { |
702 | do { | 673 | do { |
703 | deliver = 0; | 674 | deliver = 0; |
704 | spin_lock_bh(&li->lock); | 675 | spin_lock(&li->lock); |
705 | list_for_each_entry_safe(inti, n, &li->list, list) { | 676 | list_for_each_entry_safe(inti, n, &li->list, list) { |
706 | if (__interrupt_is_deliverable(vcpu, inti)) { | 677 | if (__interrupt_is_deliverable(vcpu, inti)) { |
707 | list_del(&inti->list); | 678 | list_del(&inti->list); |
@@ -712,7 +683,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
712 | } | 683 | } |
713 | if (list_empty(&li->list)) | 684 | if (list_empty(&li->list)) |
714 | atomic_set(&li->active, 0); | 685 | atomic_set(&li->active, 0); |
715 | spin_unlock_bh(&li->lock); | 686 | spin_unlock(&li->lock); |
716 | if (deliver) { | 687 | if (deliver) { |
717 | __do_deliver_interrupt(vcpu, inti); | 688 | __do_deliver_interrupt(vcpu, inti); |
718 | kfree(inti); | 689 | kfree(inti); |
@@ -758,7 +729,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) | |||
758 | if (atomic_read(&li->active)) { | 729 | if (atomic_read(&li->active)) { |
759 | do { | 730 | do { |
760 | deliver = 0; | 731 | deliver = 0; |
761 | spin_lock_bh(&li->lock); | 732 | spin_lock(&li->lock); |
762 | list_for_each_entry_safe(inti, n, &li->list, list) { | 733 | list_for_each_entry_safe(inti, n, &li->list, list) { |
763 | if ((inti->type == KVM_S390_MCHK) && | 734 | if ((inti->type == KVM_S390_MCHK) && |
764 | __interrupt_is_deliverable(vcpu, inti)) { | 735 | __interrupt_is_deliverable(vcpu, inti)) { |
@@ -770,7 +741,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) | |||
770 | } | 741 | } |
771 | if (list_empty(&li->list)) | 742 | if (list_empty(&li->list)) |
772 | atomic_set(&li->active, 0); | 743 | atomic_set(&li->active, 0); |
773 | spin_unlock_bh(&li->lock); | 744 | spin_unlock(&li->lock); |
774 | if (deliver) { | 745 | if (deliver) { |
775 | __do_deliver_interrupt(vcpu, inti); | 746 | __do_deliver_interrupt(vcpu, inti); |
776 | kfree(inti); | 747 | kfree(inti); |
@@ -817,11 +788,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | |||
817 | 788 | ||
818 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); | 789 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); |
819 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); | 790 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); |
820 | spin_lock_bh(&li->lock); | 791 | spin_lock(&li->lock); |
821 | list_add(&inti->list, &li->list); | 792 | list_add(&inti->list, &li->list); |
822 | atomic_set(&li->active, 1); | 793 | atomic_set(&li->active, 1); |
823 | BUG_ON(waitqueue_active(li->wq)); | 794 | BUG_ON(waitqueue_active(li->wq)); |
824 | spin_unlock_bh(&li->lock); | 795 | spin_unlock(&li->lock); |
825 | return 0; | 796 | return 0; |
826 | } | 797 | } |
827 | 798 | ||
@@ -842,11 +813,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | |||
842 | 813 | ||
843 | inti->type = KVM_S390_PROGRAM_INT; | 814 | inti->type = KVM_S390_PROGRAM_INT; |
844 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); | 815 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); |
845 | spin_lock_bh(&li->lock); | 816 | spin_lock(&li->lock); |
846 | list_add(&inti->list, &li->list); | 817 | list_add(&inti->list, &li->list); |
847 | atomic_set(&li->active, 1); | 818 | atomic_set(&li->active, 1); |
848 | BUG_ON(waitqueue_active(li->wq)); | 819 | BUG_ON(waitqueue_active(li->wq)); |
849 | spin_unlock_bh(&li->lock); | 820 | spin_unlock(&li->lock); |
850 | return 0; | 821 | return 0; |
851 | } | 822 | } |
852 | 823 | ||
@@ -934,12 +905,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
934 | } | 905 | } |
935 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); | 906 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); |
936 | li = &dst_vcpu->arch.local_int; | 907 | li = &dst_vcpu->arch.local_int; |
937 | spin_lock_bh(&li->lock); | 908 | spin_lock(&li->lock); |
938 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 909 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
939 | if (waitqueue_active(li->wq)) | 910 | spin_unlock(&li->lock); |
940 | wake_up_interruptible(li->wq); | 911 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); |
941 | kvm_get_vcpu(kvm, sigcpu)->preempted = true; | ||
942 | spin_unlock_bh(&li->lock); | ||
943 | unlock_fi: | 912 | unlock_fi: |
944 | spin_unlock(&fi->lock); | 913 | spin_unlock(&fi->lock); |
945 | mutex_unlock(&kvm->lock); | 914 | mutex_unlock(&kvm->lock); |
@@ -1081,7 +1050,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
1081 | 1050 | ||
1082 | mutex_lock(&vcpu->kvm->lock); | 1051 | mutex_lock(&vcpu->kvm->lock); |
1083 | li = &vcpu->arch.local_int; | 1052 | li = &vcpu->arch.local_int; |
1084 | spin_lock_bh(&li->lock); | 1053 | spin_lock(&li->lock); |
1085 | if (inti->type == KVM_S390_PROGRAM_INT) | 1054 | if (inti->type == KVM_S390_PROGRAM_INT) |
1086 | list_add(&inti->list, &li->list); | 1055 | list_add(&inti->list, &li->list); |
1087 | else | 1056 | else |
@@ -1090,11 +1059,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
1090 | if (inti->type == KVM_S390_SIGP_STOP) | 1059 | if (inti->type == KVM_S390_SIGP_STOP) |
1091 | li->action_bits |= ACTION_STOP_ON_STOP; | 1060 | li->action_bits |= ACTION_STOP_ON_STOP; |
1092 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1061 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
1093 | if (waitqueue_active(&vcpu->wq)) | 1062 | spin_unlock(&li->lock); |
1094 | wake_up_interruptible(&vcpu->wq); | ||
1095 | vcpu->preempted = true; | ||
1096 | spin_unlock_bh(&li->lock); | ||
1097 | mutex_unlock(&vcpu->kvm->lock); | 1063 | mutex_unlock(&vcpu->kvm->lock); |
1064 | kvm_s390_vcpu_wakeup(vcpu); | ||
1098 | return 0; | 1065 | return 0; |
1099 | } | 1066 | } |
1100 | 1067 | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2f3e14fe91a4..339b34a02fb8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -166,7 +166,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
166 | case KVM_CAP_IOEVENTFD: | 166 | case KVM_CAP_IOEVENTFD: |
167 | case KVM_CAP_DEVICE_CTRL: | 167 | case KVM_CAP_DEVICE_CTRL: |
168 | case KVM_CAP_ENABLE_CAP_VM: | 168 | case KVM_CAP_ENABLE_CAP_VM: |
169 | case KVM_CAP_S390_IRQCHIP: | ||
169 | case KVM_CAP_VM_ATTRIBUTES: | 170 | case KVM_CAP_VM_ATTRIBUTES: |
171 | case KVM_CAP_MP_STATE: | ||
170 | r = 1; | 172 | r = 1; |
171 | break; | 173 | break; |
172 | case KVM_CAP_NR_VCPUS: | 174 | case KVM_CAP_NR_VCPUS: |
@@ -595,7 +597,8 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
595 | vcpu->arch.sie_block->pp = 0; | 597 | vcpu->arch.sie_block->pp = 0; |
596 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; | 598 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
597 | kvm_clear_async_pf_completion_queue(vcpu); | 599 | kvm_clear_async_pf_completion_queue(vcpu); |
598 | kvm_s390_vcpu_stop(vcpu); | 600 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
601 | kvm_s390_vcpu_stop(vcpu); | ||
599 | kvm_s390_clear_local_irqs(vcpu); | 602 | kvm_s390_clear_local_irqs(vcpu); |
600 | } | 603 | } |
601 | 604 | ||
@@ -647,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
647 | return rc; | 650 | return rc; |
648 | } | 651 | } |
649 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 652 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
650 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, | ||
651 | (unsigned long) vcpu); | ||
652 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | 653 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
653 | get_cpu_id(&vcpu->arch.cpu_id); | 654 | get_cpu_id(&vcpu->arch.cpu_id); |
654 | vcpu->arch.cpu_id.version = 0xff; | 655 | vcpu->arch.cpu_id.version = 0xff; |
@@ -926,7 +927,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | |||
926 | { | 927 | { |
927 | int rc = 0; | 928 | int rc = 0; |
928 | 929 | ||
929 | if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) | 930 | if (!is_vcpu_stopped(vcpu)) |
930 | rc = -EBUSY; | 931 | rc = -EBUSY; |
931 | else { | 932 | else { |
932 | vcpu->run->psw_mask = psw.mask; | 933 | vcpu->run->psw_mask = psw.mask; |
@@ -980,13 +981,34 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
980 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 981 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
981 | struct kvm_mp_state *mp_state) | 982 | struct kvm_mp_state *mp_state) |
982 | { | 983 | { |
983 | return -EINVAL; /* not implemented yet */ | 984 | /* CHECK_STOP and LOAD are not supported yet */ |
985 | return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : | ||
986 | KVM_MP_STATE_OPERATING; | ||
984 | } | 987 | } |
985 | 988 | ||
986 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 989 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
987 | struct kvm_mp_state *mp_state) | 990 | struct kvm_mp_state *mp_state) |
988 | { | 991 | { |
989 | return -EINVAL; /* not implemented yet */ | 992 | int rc = 0; |
993 | |||
994 | /* user space knows about this interface - let it control the state */ | ||
995 | vcpu->kvm->arch.user_cpu_state_ctrl = 1; | ||
996 | |||
997 | switch (mp_state->mp_state) { | ||
998 | case KVM_MP_STATE_STOPPED: | ||
999 | kvm_s390_vcpu_stop(vcpu); | ||
1000 | break; | ||
1001 | case KVM_MP_STATE_OPERATING: | ||
1002 | kvm_s390_vcpu_start(vcpu); | ||
1003 | break; | ||
1004 | case KVM_MP_STATE_LOAD: | ||
1005 | case KVM_MP_STATE_CHECK_STOP: | ||
1006 | /* fall through - CHECK_STOP and LOAD are not supported yet */ | ||
1007 | default: | ||
1008 | rc = -ENXIO; | ||
1009 | } | ||
1010 | |||
1011 | return rc; | ||
990 | } | 1012 | } |
991 | 1013 | ||
992 | bool kvm_s390_cmma_enabled(struct kvm *kvm) | 1014 | bool kvm_s390_cmma_enabled(struct kvm *kvm) |
@@ -1045,6 +1067,9 @@ retry: | |||
1045 | goto retry; | 1067 | goto retry; |
1046 | } | 1068 | } |
1047 | 1069 | ||
1070 | /* nothing to do, just clear the request */ | ||
1071 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
1072 | |||
1048 | return 0; | 1073 | return 0; |
1049 | } | 1074 | } |
1050 | 1075 | ||
@@ -1284,7 +1309,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1284 | if (vcpu->sigset_active) | 1309 | if (vcpu->sigset_active) |
1285 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 1310 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
1286 | 1311 | ||
1287 | kvm_s390_vcpu_start(vcpu); | 1312 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { |
1313 | kvm_s390_vcpu_start(vcpu); | ||
1314 | } else if (is_vcpu_stopped(vcpu)) { | ||
1315 | pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", | ||
1316 | vcpu->vcpu_id); | ||
1317 | return -EINVAL; | ||
1318 | } | ||
1288 | 1319 | ||
1289 | switch (kvm_run->exit_reason) { | 1320 | switch (kvm_run->exit_reason) { |
1290 | case KVM_EXIT_S390_SIEIC: | 1321 | case KVM_EXIT_S390_SIEIC: |
@@ -1413,11 +1444,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
1413 | return kvm_s390_store_status_unloaded(vcpu, addr); | 1444 | return kvm_s390_store_status_unloaded(vcpu, addr); |
1414 | } | 1445 | } |
1415 | 1446 | ||
1416 | static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) | ||
1417 | { | ||
1418 | return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; | ||
1419 | } | ||
1420 | |||
1421 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) | 1447 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) |
1422 | { | 1448 | { |
1423 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); | 1449 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); |
@@ -1451,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) | |||
1451 | 1477 | ||
1452 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); | 1478 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); |
1453 | /* Only one cpu at a time may enter/leave the STOPPED state. */ | 1479 | /* Only one cpu at a time may enter/leave the STOPPED state. */ |
1454 | spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); | 1480 | spin_lock(&vcpu->kvm->arch.start_stop_lock); |
1455 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); | 1481 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); |
1456 | 1482 | ||
1457 | for (i = 0; i < online_vcpus; i++) { | 1483 | for (i = 0; i < online_vcpus; i++) { |
@@ -1477,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) | |||
1477 | * Let's play safe and flush the VCPU at startup. | 1503 | * Let's play safe and flush the VCPU at startup. |
1478 | */ | 1504 | */ |
1479 | vcpu->arch.sie_block->ihcpu = 0xffff; | 1505 | vcpu->arch.sie_block->ihcpu = 0xffff; |
1480 | spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); | 1506 | spin_unlock(&vcpu->kvm->arch.start_stop_lock); |
1481 | return; | 1507 | return; |
1482 | } | 1508 | } |
1483 | 1509 | ||
@@ -1491,10 +1517,18 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) | |||
1491 | 1517 | ||
1492 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); | 1518 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); |
1493 | /* Only one cpu at a time may enter/leave the STOPPED state. */ | 1519 | /* Only one cpu at a time may enter/leave the STOPPED state. */ |
1494 | spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); | 1520 | spin_lock(&vcpu->kvm->arch.start_stop_lock); |
1495 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); | 1521 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); |
1496 | 1522 | ||
1523 | /* Need to lock access to action_bits to avoid a SIGP race condition */ | ||
1524 | spin_lock(&vcpu->arch.local_int.lock); | ||
1497 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 1525 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
1526 | |||
1527 | /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ | ||
1528 | vcpu->arch.local_int.action_bits &= | ||
1529 | ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); | ||
1530 | spin_unlock(&vcpu->arch.local_int.lock); | ||
1531 | |||
1498 | __disable_ibs_on_vcpu(vcpu); | 1532 | __disable_ibs_on_vcpu(vcpu); |
1499 | 1533 | ||
1500 | for (i = 0; i < online_vcpus; i++) { | 1534 | for (i = 0; i < online_vcpus; i++) { |
@@ -1512,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) | |||
1512 | __enable_ibs_on_vcpu(started_vcpu); | 1546 | __enable_ibs_on_vcpu(started_vcpu); |
1513 | } | 1547 | } |
1514 | 1548 | ||
1515 | spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); | 1549 | spin_unlock(&vcpu->kvm->arch.start_stop_lock); |
1516 | return; | 1550 | return; |
1517 | } | 1551 | } |
1518 | 1552 | ||
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index a8655ed31616..3862fa2cefe0 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -45,9 +45,9 @@ do { \ | |||
45 | d_args); \ | 45 | d_args); \ |
46 | } while (0) | 46 | } while (0) |
47 | 47 | ||
48 | static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu) | 48 | static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) |
49 | { | 49 | { |
50 | return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT; | 50 | return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline int kvm_is_ucontrol(struct kvm *kvm) | 53 | static inline int kvm_is_ucontrol(struct kvm *kvm) |
@@ -129,9 +129,15 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
129 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; | 129 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; |
130 | } | 130 | } |
131 | 131 | ||
132 | /* are cpu states controlled by user space */ | ||
133 | static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) | ||
134 | { | ||
135 | return kvm->arch.user_cpu_state_ctrl != 0; | ||
136 | } | ||
137 | |||
132 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); | 138 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); |
139 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); | ||
133 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); | 140 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); |
134 | void kvm_s390_tasklet(unsigned long parm); | ||
135 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); | 141 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); |
136 | void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); | 142 | void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); |
137 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); | 143 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 43079a48cc98..cf243ba3d50f 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
125 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; | 125 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
126 | } | 126 | } |
127 | 127 | ||
128 | static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | 128 | static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) |
129 | { | 129 | { |
130 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; | ||
130 | struct kvm_s390_interrupt_info *inti; | 131 | struct kvm_s390_interrupt_info *inti; |
131 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 132 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
132 | 133 | ||
@@ -135,7 +136,13 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | |||
135 | return -ENOMEM; | 136 | return -ENOMEM; |
136 | inti->type = KVM_S390_SIGP_STOP; | 137 | inti->type = KVM_S390_SIGP_STOP; |
137 | 138 | ||
138 | spin_lock_bh(&li->lock); | 139 | spin_lock(&li->lock); |
140 | if (li->action_bits & ACTION_STOP_ON_STOP) { | ||
141 | /* another SIGP STOP is pending */ | ||
142 | kfree(inti); | ||
143 | rc = SIGP_CC_BUSY; | ||
144 | goto out; | ||
145 | } | ||
139 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 146 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
140 | kfree(inti); | 147 | kfree(inti); |
141 | if ((action & ACTION_STORE_ON_STOP) != 0) | 148 | if ((action & ACTION_STORE_ON_STOP) != 0) |
@@ -144,19 +151,17 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | |||
144 | } | 151 | } |
145 | list_add_tail(&inti->list, &li->list); | 152 | list_add_tail(&inti->list, &li->list); |
146 | atomic_set(&li->active, 1); | 153 | atomic_set(&li->active, 1); |
147 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | ||
148 | li->action_bits |= action; | 154 | li->action_bits |= action; |
149 | if (waitqueue_active(li->wq)) | 155 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
150 | wake_up_interruptible(li->wq); | 156 | kvm_s390_vcpu_wakeup(dst_vcpu); |
151 | out: | 157 | out: |
152 | spin_unlock_bh(&li->lock); | 158 | spin_unlock(&li->lock); |
153 | 159 | ||
154 | return rc; | 160 | return rc; |
155 | } | 161 | } |
156 | 162 | ||
157 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | 163 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) |
158 | { | 164 | { |
159 | struct kvm_s390_local_interrupt *li; | ||
160 | struct kvm_vcpu *dst_vcpu = NULL; | 165 | struct kvm_vcpu *dst_vcpu = NULL; |
161 | int rc; | 166 | int rc; |
162 | 167 | ||
@@ -166,9 +171,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | |||
166 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 171 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
167 | if (!dst_vcpu) | 172 | if (!dst_vcpu) |
168 | return SIGP_CC_NOT_OPERATIONAL; | 173 | return SIGP_CC_NOT_OPERATIONAL; |
169 | li = &dst_vcpu->arch.local_int; | ||
170 | 174 | ||
171 | rc = __inject_sigp_stop(li, action); | 175 | rc = __inject_sigp_stop(dst_vcpu, action); |
172 | 176 | ||
173 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); | 177 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); |
174 | 178 | ||
@@ -238,7 +242,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
238 | if (!inti) | 242 | if (!inti) |
239 | return SIGP_CC_BUSY; | 243 | return SIGP_CC_BUSY; |
240 | 244 | ||
241 | spin_lock_bh(&li->lock); | 245 | spin_lock(&li->lock); |
242 | /* cpu must be in stopped state */ | 246 | /* cpu must be in stopped state */ |
243 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 247 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
244 | *reg &= 0xffffffff00000000UL; | 248 | *reg &= 0xffffffff00000000UL; |
@@ -253,13 +257,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
253 | 257 | ||
254 | list_add_tail(&inti->list, &li->list); | 258 | list_add_tail(&inti->list, &li->list); |
255 | atomic_set(&li->active, 1); | 259 | atomic_set(&li->active, 1); |
256 | if (waitqueue_active(li->wq)) | 260 | kvm_s390_vcpu_wakeup(dst_vcpu); |
257 | wake_up_interruptible(li->wq); | ||
258 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 261 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
259 | 262 | ||
260 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); | 263 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); |
261 | out_li: | 264 | out_li: |
262 | spin_unlock_bh(&li->lock); | 265 | spin_unlock(&li->lock); |
263 | return rc; | 266 | return rc; |
264 | } | 267 | } |
265 | 268 | ||
@@ -275,9 +278,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, | |||
275 | if (!dst_vcpu) | 278 | if (!dst_vcpu) |
276 | return SIGP_CC_NOT_OPERATIONAL; | 279 | return SIGP_CC_NOT_OPERATIONAL; |
277 | 280 | ||
278 | spin_lock_bh(&dst_vcpu->arch.local_int.lock); | 281 | spin_lock(&dst_vcpu->arch.local_int.lock); |
279 | flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); | 282 | flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); |
280 | spin_unlock_bh(&dst_vcpu->arch.local_int.lock); | 283 | spin_unlock(&dst_vcpu->arch.local_int.lock); |
281 | if (!(flags & CPUSTAT_STOPPED)) { | 284 | if (!(flags & CPUSTAT_STOPPED)) { |
282 | *reg &= 0xffffffff00000000UL; | 285 | *reg &= 0xffffffff00000000UL; |
283 | *reg |= SIGP_STATUS_INCORRECT_STATE; | 286 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
@@ -338,10 +341,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
338 | if (!dst_vcpu) | 341 | if (!dst_vcpu) |
339 | return SIGP_CC_NOT_OPERATIONAL; | 342 | return SIGP_CC_NOT_OPERATIONAL; |
340 | li = &dst_vcpu->arch.local_int; | 343 | li = &dst_vcpu->arch.local_int; |
341 | spin_lock_bh(&li->lock); | 344 | spin_lock(&li->lock); |
342 | if (li->action_bits & ACTION_STOP_ON_STOP) | 345 | if (li->action_bits & ACTION_STOP_ON_STOP) |
343 | rc = SIGP_CC_BUSY; | 346 | rc = SIGP_CC_BUSY; |
344 | spin_unlock_bh(&li->lock); | 347 | spin_unlock(&li->lock); |
345 | 348 | ||
346 | return rc; | 349 | return rc; |
347 | } | 350 | } |
@@ -461,12 +464,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | |||
461 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 464 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
462 | BUG_ON(dest_vcpu == NULL); | 465 | BUG_ON(dest_vcpu == NULL); |
463 | 466 | ||
464 | spin_lock_bh(&dest_vcpu->arch.local_int.lock); | 467 | kvm_s390_vcpu_wakeup(dest_vcpu); |
465 | if (waitqueue_active(&dest_vcpu->wq)) | ||
466 | wake_up_interruptible(&dest_vcpu->wq); | ||
467 | dest_vcpu->preempted = true; | ||
468 | spin_unlock_bh(&dest_vcpu->arch.local_int.lock); | ||
469 | |||
470 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); | 468 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); |
471 | return 0; | 469 | return 0; |
472 | } | 470 | } |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index a04fe4eb237d..eb181178fe0b 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -37,6 +37,7 @@ struct x86_instruction_info { | |||
37 | u8 modrm_reg; /* index of register used */ | 37 | u8 modrm_reg; /* index of register used */ |
38 | u8 modrm_rm; /* rm part of modrm */ | 38 | u8 modrm_rm; /* rm part of modrm */ |
39 | u64 src_val; /* value of source operand */ | 39 | u64 src_val; /* value of source operand */ |
40 | u64 dst_val; /* value of destination operand */ | ||
40 | u8 src_bytes; /* size of source operand */ | 41 | u8 src_bytes; /* size of source operand */ |
41 | u8 dst_bytes; /* size of destination operand */ | 42 | u8 dst_bytes; /* size of destination operand */ |
42 | u8 ad_bytes; /* size of src/dst address */ | 43 | u8 ad_bytes; /* size of src/dst address */ |
@@ -194,6 +195,7 @@ struct x86_emulate_ops { | |||
194 | int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); | 195 | int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); |
195 | int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); | 196 | int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); |
196 | int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); | 197 | int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); |
198 | int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc); | ||
197 | int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata); | 199 | int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata); |
198 | void (*halt)(struct x86_emulate_ctxt *ctxt); | 200 | void (*halt)(struct x86_emulate_ctxt *ctxt); |
199 | void (*wbinvd)(struct x86_emulate_ctxt *ctxt); | 201 | void (*wbinvd)(struct x86_emulate_ctxt *ctxt); |
@@ -231,7 +233,7 @@ struct operand { | |||
231 | union { | 233 | union { |
232 | unsigned long val; | 234 | unsigned long val; |
233 | u64 val64; | 235 | u64 val64; |
234 | char valptr[sizeof(unsigned long) + 2]; | 236 | char valptr[sizeof(sse128_t)]; |
235 | sse128_t vec_val; | 237 | sse128_t vec_val; |
236 | u64 mm_val; | 238 | u64 mm_val; |
237 | void *data; | 239 | void *data; |
@@ -240,8 +242,8 @@ struct operand { | |||
240 | 242 | ||
241 | struct fetch_cache { | 243 | struct fetch_cache { |
242 | u8 data[15]; | 244 | u8 data[15]; |
243 | unsigned long start; | 245 | u8 *ptr; |
244 | unsigned long end; | 246 | u8 *end; |
245 | }; | 247 | }; |
246 | 248 | ||
247 | struct read_cache { | 249 | struct read_cache { |
@@ -286,30 +288,36 @@ struct x86_emulate_ctxt { | |||
286 | u8 opcode_len; | 288 | u8 opcode_len; |
287 | u8 b; | 289 | u8 b; |
288 | u8 intercept; | 290 | u8 intercept; |
289 | u8 lock_prefix; | ||
290 | u8 rep_prefix; | ||
291 | u8 op_bytes; | 291 | u8 op_bytes; |
292 | u8 ad_bytes; | 292 | u8 ad_bytes; |
293 | u8 rex_prefix; | ||
294 | struct operand src; | 293 | struct operand src; |
295 | struct operand src2; | 294 | struct operand src2; |
296 | struct operand dst; | 295 | struct operand dst; |
297 | bool has_seg_override; | ||
298 | u8 seg_override; | ||
299 | u64 d; | ||
300 | int (*execute)(struct x86_emulate_ctxt *ctxt); | 296 | int (*execute)(struct x86_emulate_ctxt *ctxt); |
301 | int (*check_perm)(struct x86_emulate_ctxt *ctxt); | 297 | int (*check_perm)(struct x86_emulate_ctxt *ctxt); |
298 | /* | ||
299 | * The following six fields are cleared together, | ||
300 | * the rest are initialized unconditionally in x86_decode_insn | ||
301 | * or elsewhere | ||
302 | */ | ||
303 | bool rip_relative; | ||
304 | u8 rex_prefix; | ||
305 | u8 lock_prefix; | ||
306 | u8 rep_prefix; | ||
307 | /* bitmaps of registers in _regs[] that can be read */ | ||
308 | u32 regs_valid; | ||
309 | /* bitmaps of registers in _regs[] that have been written */ | ||
310 | u32 regs_dirty; | ||
302 | /* modrm */ | 311 | /* modrm */ |
303 | u8 modrm; | 312 | u8 modrm; |
304 | u8 modrm_mod; | 313 | u8 modrm_mod; |
305 | u8 modrm_reg; | 314 | u8 modrm_reg; |
306 | u8 modrm_rm; | 315 | u8 modrm_rm; |
307 | u8 modrm_seg; | 316 | u8 modrm_seg; |
308 | bool rip_relative; | 317 | u8 seg_override; |
318 | u64 d; | ||
309 | unsigned long _eip; | 319 | unsigned long _eip; |
310 | struct operand memop; | 320 | struct operand memop; |
311 | u32 regs_valid; /* bitmaps of registers in _regs[] that can be read */ | ||
312 | u32 regs_dirty; /* bitmaps of registers in _regs[] that have been written */ | ||
313 | /* Fields above regs are cleared together. */ | 321 | /* Fields above regs are cleared together. */ |
314 | unsigned long _regs[NR_VCPU_REGS]; | 322 | unsigned long _regs[NR_VCPU_REGS]; |
315 | struct operand *memopp; | 323 | struct operand *memopp; |
@@ -407,6 +415,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); | |||
407 | #define EMULATION_OK 0 | 415 | #define EMULATION_OK 0 |
408 | #define EMULATION_RESTART 1 | 416 | #define EMULATION_RESTART 1 |
409 | #define EMULATION_INTERCEPTED 2 | 417 | #define EMULATION_INTERCEPTED 2 |
418 | void init_decode_cache(struct x86_emulate_ctxt *ctxt); | ||
410 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); | 419 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); |
411 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | 420 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, |
412 | u16 tss_selector, int idt_index, int reason, | 421 | u16 tss_selector, int idt_index, int reason, |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 49205d01b9ad..572460175ba5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -152,14 +152,16 @@ enum { | |||
152 | 152 | ||
153 | #define DR6_BD (1 << 13) | 153 | #define DR6_BD (1 << 13) |
154 | #define DR6_BS (1 << 14) | 154 | #define DR6_BS (1 << 14) |
155 | #define DR6_FIXED_1 0xffff0ff0 | 155 | #define DR6_RTM (1 << 16) |
156 | #define DR6_VOLATILE 0x0000e00f | 156 | #define DR6_FIXED_1 0xfffe0ff0 |
157 | #define DR6_INIT 0xffff0ff0 | ||
158 | #define DR6_VOLATILE 0x0001e00f | ||
157 | 159 | ||
158 | #define DR7_BP_EN_MASK 0x000000ff | 160 | #define DR7_BP_EN_MASK 0x000000ff |
159 | #define DR7_GE (1 << 9) | 161 | #define DR7_GE (1 << 9) |
160 | #define DR7_GD (1 << 13) | 162 | #define DR7_GD (1 << 13) |
161 | #define DR7_FIXED_1 0x00000400 | 163 | #define DR7_FIXED_1 0x00000400 |
162 | #define DR7_VOLATILE 0xffff23ff | 164 | #define DR7_VOLATILE 0xffff2bff |
163 | 165 | ||
164 | /* apic attention bits */ | 166 | /* apic attention bits */ |
165 | #define KVM_APIC_CHECK_VAPIC 0 | 167 | #define KVM_APIC_CHECK_VAPIC 0 |
@@ -448,7 +450,7 @@ struct kvm_vcpu_arch { | |||
448 | u64 tsc_offset_adjustment; | 450 | u64 tsc_offset_adjustment; |
449 | u64 this_tsc_nsec; | 451 | u64 this_tsc_nsec; |
450 | u64 this_tsc_write; | 452 | u64 this_tsc_write; |
451 | u8 this_tsc_generation; | 453 | u64 this_tsc_generation; |
452 | bool tsc_catchup; | 454 | bool tsc_catchup; |
453 | bool tsc_always_catchup; | 455 | bool tsc_always_catchup; |
454 | s8 virtual_tsc_shift; | 456 | s8 virtual_tsc_shift; |
@@ -591,7 +593,7 @@ struct kvm_arch { | |||
591 | u64 cur_tsc_nsec; | 593 | u64 cur_tsc_nsec; |
592 | u64 cur_tsc_write; | 594 | u64 cur_tsc_write; |
593 | u64 cur_tsc_offset; | 595 | u64 cur_tsc_offset; |
594 | u8 cur_tsc_generation; | 596 | u64 cur_tsc_generation; |
595 | int nr_vcpus_matched_tsc; | 597 | int nr_vcpus_matched_tsc; |
596 | 598 | ||
597 | spinlock_t pvclock_gtod_sync_lock; | 599 | spinlock_t pvclock_gtod_sync_lock; |
@@ -717,7 +719,7 @@ struct kvm_x86_ops { | |||
717 | int (*handle_exit)(struct kvm_vcpu *vcpu); | 719 | int (*handle_exit)(struct kvm_vcpu *vcpu); |
718 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | 720 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
719 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 721 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
720 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 722 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); |
721 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, | 723 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
722 | unsigned char *hypercall_addr); | 724 | unsigned char *hypercall_addr); |
723 | void (*set_irq)(struct kvm_vcpu *vcpu); | 725 | void (*set_irq)(struct kvm_vcpu *vcpu); |
@@ -1070,6 +1072,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); | |||
1070 | bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); | 1072 | bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); |
1071 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); | 1073 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); |
1072 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); | 1074 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
1075 | int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc); | ||
1073 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); | 1076 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); |
1074 | void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); | 1077 | void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); |
1075 | void kvm_deliver_pmi(struct kvm_vcpu *vcpu); | 1078 | void kvm_deliver_pmi(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 7004d21e6219..bcbfade26d8d 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -51,6 +51,9 @@ | |||
51 | #define CPU_BASED_MONITOR_EXITING 0x20000000 | 51 | #define CPU_BASED_MONITOR_EXITING 0x20000000 |
52 | #define CPU_BASED_PAUSE_EXITING 0x40000000 | 52 | #define CPU_BASED_PAUSE_EXITING 0x40000000 |
53 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 | 53 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 |
54 | |||
55 | #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 | ||
56 | |||
54 | /* | 57 | /* |
55 | * Definitions of Secondary Processor-Based VM-Execution Controls. | 58 | * Definitions of Secondary Processor-Based VM-Execution Controls. |
56 | */ | 59 | */ |
@@ -76,7 +79,7 @@ | |||
76 | 79 | ||
77 | #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 | 80 | #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 |
78 | 81 | ||
79 | #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000002 | 82 | #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 |
80 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 | 83 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 |
81 | #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 | 84 | #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 |
82 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 | 85 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 |
@@ -89,7 +92,7 @@ | |||
89 | 92 | ||
90 | #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff | 93 | #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff |
91 | 94 | ||
92 | #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000002 | 95 | #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 |
93 | #define VM_ENTRY_IA32E_MODE 0x00000200 | 96 | #define VM_ENTRY_IA32E_MODE 0x00000200 |
94 | #define VM_ENTRY_SMM 0x00000400 | 97 | #define VM_ENTRY_SMM 0x00000400 |
95 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 | 98 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 |
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index d3a87780c70b..d7dcef58aefa 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h | |||
@@ -23,7 +23,10 @@ | |||
23 | #define GP_VECTOR 13 | 23 | #define GP_VECTOR 13 |
24 | #define PF_VECTOR 14 | 24 | #define PF_VECTOR 14 |
25 | #define MF_VECTOR 16 | 25 | #define MF_VECTOR 16 |
26 | #define AC_VECTOR 17 | ||
26 | #define MC_VECTOR 18 | 27 | #define MC_VECTOR 18 |
28 | #define XM_VECTOR 19 | ||
29 | #define VE_VECTOR 20 | ||
27 | 30 | ||
28 | /* Select x86 specific features in <linux/kvm.h> */ | 31 | /* Select x86 specific features in <linux/kvm.h> */ |
29 | #define __KVM_HAVE_PIT | 32 | #define __KVM_HAVE_PIT |
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index fcf2b3ae1bf0..eaefcc66c855 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h | |||
@@ -558,6 +558,7 @@ | |||
558 | 558 | ||
559 | /* VMX_BASIC bits and bitmasks */ | 559 | /* VMX_BASIC bits and bitmasks */ |
560 | #define VMX_BASIC_VMCS_SIZE_SHIFT 32 | 560 | #define VMX_BASIC_VMCS_SIZE_SHIFT 32 |
561 | #define VMX_BASIC_TRUE_CTLS (1ULL << 55) | ||
561 | #define VMX_BASIC_64 0x0001000000000000LLU | 562 | #define VMX_BASIC_64 0x0001000000000000LLU |
562 | #define VMX_BASIC_MEM_TYPE_SHIFT 50 | 563 | #define VMX_BASIC_MEM_TYPE_SHIFT 50 |
563 | #define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU | 564 | #define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU |
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index f9087315e0cd..a5380590ab0e 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h | |||
@@ -95,4 +95,12 @@ static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) | |||
95 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 95 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
96 | return best && (best->edx & bit(X86_FEATURE_GBPAGES)); | 96 | return best && (best->edx & bit(X86_FEATURE_GBPAGES)); |
97 | } | 97 | } |
98 | |||
99 | static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) | ||
100 | { | ||
101 | struct kvm_cpuid_entry2 *best; | ||
102 | |||
103 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
104 | return best && (best->ebx & bit(X86_FEATURE_RTM)); | ||
105 | } | ||
98 | #endif | 106 | #endif |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e4e833d3d7d7..56657b0bb3bb 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -162,6 +162,10 @@ | |||
162 | #define NoWrite ((u64)1 << 45) /* No writeback */ | 162 | #define NoWrite ((u64)1 << 45) /* No writeback */ |
163 | #define SrcWrite ((u64)1 << 46) /* Write back src operand */ | 163 | #define SrcWrite ((u64)1 << 46) /* Write back src operand */ |
164 | #define NoMod ((u64)1 << 47) /* Mod field is ignored */ | 164 | #define NoMod ((u64)1 << 47) /* Mod field is ignored */ |
165 | #define Intercept ((u64)1 << 48) /* Has valid intercept field */ | ||
166 | #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ | ||
167 | #define NoBigReal ((u64)1 << 50) /* No big real mode */ | ||
168 | #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ | ||
165 | 169 | ||
166 | #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) | 170 | #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) |
167 | 171 | ||
@@ -426,6 +430,7 @@ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, | |||
426 | .modrm_reg = ctxt->modrm_reg, | 430 | .modrm_reg = ctxt->modrm_reg, |
427 | .modrm_rm = ctxt->modrm_rm, | 431 | .modrm_rm = ctxt->modrm_rm, |
428 | .src_val = ctxt->src.val64, | 432 | .src_val = ctxt->src.val64, |
433 | .dst_val = ctxt->dst.val64, | ||
429 | .src_bytes = ctxt->src.bytes, | 434 | .src_bytes = ctxt->src.bytes, |
430 | .dst_bytes = ctxt->dst.bytes, | 435 | .dst_bytes = ctxt->dst.bytes, |
431 | .ad_bytes = ctxt->ad_bytes, | 436 | .ad_bytes = ctxt->ad_bytes, |
@@ -511,12 +516,6 @@ static u32 desc_limit_scaled(struct desc_struct *desc) | |||
511 | return desc->g ? (limit << 12) | 0xfff : limit; | 516 | return desc->g ? (limit << 12) | 0xfff : limit; |
512 | } | 517 | } |
513 | 518 | ||
514 | static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg) | ||
515 | { | ||
516 | ctxt->has_seg_override = true; | ||
517 | ctxt->seg_override = seg; | ||
518 | } | ||
519 | |||
520 | static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) | 519 | static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) |
521 | { | 520 | { |
522 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) | 521 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) |
@@ -525,14 +524,6 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) | |||
525 | return ctxt->ops->get_cached_segment_base(ctxt, seg); | 524 | return ctxt->ops->get_cached_segment_base(ctxt, seg); |
526 | } | 525 | } |
527 | 526 | ||
528 | static unsigned seg_override(struct x86_emulate_ctxt *ctxt) | ||
529 | { | ||
530 | if (!ctxt->has_seg_override) | ||
531 | return 0; | ||
532 | |||
533 | return ctxt->seg_override; | ||
534 | } | ||
535 | |||
536 | static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, | 527 | static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, |
537 | u32 error, bool valid) | 528 | u32 error, bool valid) |
538 | { | 529 | { |
@@ -651,7 +642,12 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, | |||
651 | if (!fetch && (desc.type & 8) && !(desc.type & 2)) | 642 | if (!fetch && (desc.type & 8) && !(desc.type & 2)) |
652 | goto bad; | 643 | goto bad; |
653 | lim = desc_limit_scaled(&desc); | 644 | lim = desc_limit_scaled(&desc); |
654 | if ((desc.type & 8) || !(desc.type & 4)) { | 645 | if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && |
646 | (ctxt->d & NoBigReal)) { | ||
647 | /* la is between zero and 0xffff */ | ||
648 | if (la > 0xffff || (u32)(la + size - 1) > 0xffff) | ||
649 | goto bad; | ||
650 | } else if ((desc.type & 8) || !(desc.type & 4)) { | ||
655 | /* expand-up segment */ | 651 | /* expand-up segment */ |
656 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) | 652 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) |
657 | goto bad; | 653 | goto bad; |
@@ -716,68 +712,71 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, | |||
716 | } | 712 | } |
717 | 713 | ||
718 | /* | 714 | /* |
719 | * Fetch the next byte of the instruction being emulated which is pointed to | 715 | * Prefetch the remaining bytes of the instruction without crossing page |
720 | * by ctxt->_eip, then increment ctxt->_eip. | ||
721 | * | ||
722 | * Also prefetch the remaining bytes of the instruction without crossing page | ||
723 | * boundary if they are not in fetch_cache yet. | 716 | * boundary if they are not in fetch_cache yet. |
724 | */ | 717 | */ |
725 | static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest) | 718 | static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) |
726 | { | 719 | { |
727 | struct fetch_cache *fc = &ctxt->fetch; | ||
728 | int rc; | 720 | int rc; |
729 | int size, cur_size; | 721 | unsigned size; |
730 | 722 | unsigned long linear; | |
731 | if (ctxt->_eip == fc->end) { | 723 | int cur_size = ctxt->fetch.end - ctxt->fetch.data; |
732 | unsigned long linear; | 724 | struct segmented_address addr = { .seg = VCPU_SREG_CS, |
733 | struct segmented_address addr = { .seg = VCPU_SREG_CS, | 725 | .ea = ctxt->eip + cur_size }; |
734 | .ea = ctxt->_eip }; | 726 | |
735 | cur_size = fc->end - fc->start; | 727 | size = 15UL ^ cur_size; |
736 | size = min(15UL - cur_size, | 728 | rc = __linearize(ctxt, addr, size, false, true, &linear); |
737 | PAGE_SIZE - offset_in_page(ctxt->_eip)); | 729 | if (unlikely(rc != X86EMUL_CONTINUE)) |
738 | rc = __linearize(ctxt, addr, size, false, true, &linear); | 730 | return rc; |
739 | if (unlikely(rc != X86EMUL_CONTINUE)) | ||
740 | return rc; | ||
741 | rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size, | ||
742 | size, &ctxt->exception); | ||
743 | if (unlikely(rc != X86EMUL_CONTINUE)) | ||
744 | return rc; | ||
745 | fc->end += size; | ||
746 | } | ||
747 | *dest = fc->data[ctxt->_eip - fc->start]; | ||
748 | ctxt->_eip++; | ||
749 | return X86EMUL_CONTINUE; | ||
750 | } | ||
751 | 731 | ||
752 | static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, | 732 | size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); |
753 | void *dest, unsigned size) | ||
754 | { | ||
755 | int rc; | ||
756 | 733 | ||
757 | /* x86 instructions are limited to 15 bytes. */ | 734 | /* |
758 | if (unlikely(ctxt->_eip + size - ctxt->eip > 15)) | 735 | * One instruction can only straddle two pages, |
736 | * and one has been loaded at the beginning of | ||
737 | * x86_decode_insn. So, if not enough bytes | ||
738 | * still, we must have hit the 15-byte boundary. | ||
739 | */ | ||
740 | if (unlikely(size < op_size)) | ||
759 | return X86EMUL_UNHANDLEABLE; | 741 | return X86EMUL_UNHANDLEABLE; |
760 | while (size--) { | 742 | rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, |
761 | rc = do_insn_fetch_byte(ctxt, dest++); | 743 | size, &ctxt->exception); |
762 | if (rc != X86EMUL_CONTINUE) | 744 | if (unlikely(rc != X86EMUL_CONTINUE)) |
763 | return rc; | 745 | return rc; |
764 | } | 746 | ctxt->fetch.end += size; |
765 | return X86EMUL_CONTINUE; | 747 | return X86EMUL_CONTINUE; |
766 | } | 748 | } |
767 | 749 | ||
750 | static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, | ||
751 | unsigned size) | ||
752 | { | ||
753 | if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size)) | ||
754 | return __do_insn_fetch_bytes(ctxt, size); | ||
755 | else | ||
756 | return X86EMUL_CONTINUE; | ||
757 | } | ||
758 | |||
768 | /* Fetch next part of the instruction being emulated. */ | 759 | /* Fetch next part of the instruction being emulated. */ |
769 | #define insn_fetch(_type, _ctxt) \ | 760 | #define insn_fetch(_type, _ctxt) \ |
770 | ({ unsigned long _x; \ | 761 | ({ _type _x; \ |
771 | rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \ | 762 | \ |
763 | rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ | ||
772 | if (rc != X86EMUL_CONTINUE) \ | 764 | if (rc != X86EMUL_CONTINUE) \ |
773 | goto done; \ | 765 | goto done; \ |
774 | (_type)_x; \ | 766 | ctxt->_eip += sizeof(_type); \ |
767 | _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ | ||
768 | ctxt->fetch.ptr += sizeof(_type); \ | ||
769 | _x; \ | ||
775 | }) | 770 | }) |
776 | 771 | ||
777 | #define insn_fetch_arr(_arr, _size, _ctxt) \ | 772 | #define insn_fetch_arr(_arr, _size, _ctxt) \ |
778 | ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \ | 773 | ({ \ |
774 | rc = do_insn_fetch_bytes(_ctxt, _size); \ | ||
779 | if (rc != X86EMUL_CONTINUE) \ | 775 | if (rc != X86EMUL_CONTINUE) \ |
780 | goto done; \ | 776 | goto done; \ |
777 | ctxt->_eip += (_size); \ | ||
778 | memcpy(_arr, ctxt->fetch.ptr, _size); \ | ||
779 | ctxt->fetch.ptr += (_size); \ | ||
781 | }) | 780 | }) |
782 | 781 | ||
783 | /* | 782 | /* |
@@ -1063,19 +1062,17 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1063 | struct operand *op) | 1062 | struct operand *op) |
1064 | { | 1063 | { |
1065 | u8 sib; | 1064 | u8 sib; |
1066 | int index_reg = 0, base_reg = 0, scale; | 1065 | int index_reg, base_reg, scale; |
1067 | int rc = X86EMUL_CONTINUE; | 1066 | int rc = X86EMUL_CONTINUE; |
1068 | ulong modrm_ea = 0; | 1067 | ulong modrm_ea = 0; |
1069 | 1068 | ||
1070 | if (ctxt->rex_prefix) { | 1069 | ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ |
1071 | ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */ | 1070 | index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ |
1072 | index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */ | 1071 | base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ |
1073 | ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */ | ||
1074 | } | ||
1075 | 1072 | ||
1076 | ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6; | 1073 | ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; |
1077 | ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; | 1074 | ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; |
1078 | ctxt->modrm_rm |= (ctxt->modrm & 0x07); | 1075 | ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); |
1079 | ctxt->modrm_seg = VCPU_SREG_DS; | 1076 | ctxt->modrm_seg = VCPU_SREG_DS; |
1080 | 1077 | ||
1081 | if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { | 1078 | if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { |
@@ -1093,7 +1090,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1093 | if (ctxt->d & Mmx) { | 1090 | if (ctxt->d & Mmx) { |
1094 | op->type = OP_MM; | 1091 | op->type = OP_MM; |
1095 | op->bytes = 8; | 1092 | op->bytes = 8; |
1096 | op->addr.xmm = ctxt->modrm_rm & 7; | 1093 | op->addr.mm = ctxt->modrm_rm & 7; |
1097 | return rc; | 1094 | return rc; |
1098 | } | 1095 | } |
1099 | fetch_register_operand(op); | 1096 | fetch_register_operand(op); |
@@ -1190,6 +1187,9 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1190 | } | 1187 | } |
1191 | } | 1188 | } |
1192 | op->addr.mem.ea = modrm_ea; | 1189 | op->addr.mem.ea = modrm_ea; |
1190 | if (ctxt->ad_bytes != 8) | ||
1191 | ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; | ||
1192 | |||
1193 | done: | 1193 | done: |
1194 | return rc; | 1194 | return rc; |
1195 | } | 1195 | } |
@@ -1220,12 +1220,14 @@ static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) | |||
1220 | long sv = 0, mask; | 1220 | long sv = 0, mask; |
1221 | 1221 | ||
1222 | if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { | 1222 | if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { |
1223 | mask = ~(ctxt->dst.bytes * 8 - 1); | 1223 | mask = ~((long)ctxt->dst.bytes * 8 - 1); |
1224 | 1224 | ||
1225 | if (ctxt->src.bytes == 2) | 1225 | if (ctxt->src.bytes == 2) |
1226 | sv = (s16)ctxt->src.val & (s16)mask; | 1226 | sv = (s16)ctxt->src.val & (s16)mask; |
1227 | else if (ctxt->src.bytes == 4) | 1227 | else if (ctxt->src.bytes == 4) |
1228 | sv = (s32)ctxt->src.val & (s32)mask; | 1228 | sv = (s32)ctxt->src.val & (s32)mask; |
1229 | else | ||
1230 | sv = (s64)ctxt->src.val & (s64)mask; | ||
1229 | 1231 | ||
1230 | ctxt->dst.addr.mem.ea += (sv >> 3); | 1232 | ctxt->dst.addr.mem.ea += (sv >> 3); |
1231 | } | 1233 | } |
@@ -1315,8 +1317,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
1315 | in_page = (ctxt->eflags & EFLG_DF) ? | 1317 | in_page = (ctxt->eflags & EFLG_DF) ? |
1316 | offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : | 1318 | offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : |
1317 | PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); | 1319 | PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); |
1318 | n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, | 1320 | n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); |
1319 | count); | ||
1320 | if (n == 0) | 1321 | if (n == 0) |
1321 | n = 1; | 1322 | n = 1; |
1322 | rc->pos = rc->end = 0; | 1323 | rc->pos = rc->end = 0; |
@@ -1358,17 +1359,19 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, | |||
1358 | u16 selector, struct desc_ptr *dt) | 1359 | u16 selector, struct desc_ptr *dt) |
1359 | { | 1360 | { |
1360 | const struct x86_emulate_ops *ops = ctxt->ops; | 1361 | const struct x86_emulate_ops *ops = ctxt->ops; |
1362 | u32 base3 = 0; | ||
1361 | 1363 | ||
1362 | if (selector & 1 << 2) { | 1364 | if (selector & 1 << 2) { |
1363 | struct desc_struct desc; | 1365 | struct desc_struct desc; |
1364 | u16 sel; | 1366 | u16 sel; |
1365 | 1367 | ||
1366 | memset (dt, 0, sizeof *dt); | 1368 | memset (dt, 0, sizeof *dt); |
1367 | if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR)) | 1369 | if (!ops->get_segment(ctxt, &sel, &desc, &base3, |
1370 | VCPU_SREG_LDTR)) | ||
1368 | return; | 1371 | return; |
1369 | 1372 | ||
1370 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ | 1373 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ |
1371 | dt->address = get_desc_base(&desc); | 1374 | dt->address = get_desc_base(&desc) | ((u64)base3 << 32); |
1372 | } else | 1375 | } else |
1373 | ops->get_gdt(ctxt, dt); | 1376 | ops->get_gdt(ctxt, dt); |
1374 | } | 1377 | } |
@@ -1422,6 +1425,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1422 | ulong desc_addr; | 1425 | ulong desc_addr; |
1423 | int ret; | 1426 | int ret; |
1424 | u16 dummy; | 1427 | u16 dummy; |
1428 | u32 base3 = 0; | ||
1425 | 1429 | ||
1426 | memset(&seg_desc, 0, sizeof seg_desc); | 1430 | memset(&seg_desc, 0, sizeof seg_desc); |
1427 | 1431 | ||
@@ -1538,9 +1542,14 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1538 | ret = write_segment_descriptor(ctxt, selector, &seg_desc); | 1542 | ret = write_segment_descriptor(ctxt, selector, &seg_desc); |
1539 | if (ret != X86EMUL_CONTINUE) | 1543 | if (ret != X86EMUL_CONTINUE) |
1540 | return ret; | 1544 | return ret; |
1545 | } else if (ctxt->mode == X86EMUL_MODE_PROT64) { | ||
1546 | ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, | ||
1547 | sizeof(base3), &ctxt->exception); | ||
1548 | if (ret != X86EMUL_CONTINUE) | ||
1549 | return ret; | ||
1541 | } | 1550 | } |
1542 | load: | 1551 | load: |
1543 | ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg); | 1552 | ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); |
1544 | return X86EMUL_CONTINUE; | 1553 | return X86EMUL_CONTINUE; |
1545 | exception: | 1554 | exception: |
1546 | emulate_exception(ctxt, err_vec, err_code, true); | 1555 | emulate_exception(ctxt, err_vec, err_code, true); |
@@ -1575,34 +1584,28 @@ static void write_register_operand(struct operand *op) | |||
1575 | 1584 | ||
1576 | static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) | 1585 | static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) |
1577 | { | 1586 | { |
1578 | int rc; | ||
1579 | |||
1580 | switch (op->type) { | 1587 | switch (op->type) { |
1581 | case OP_REG: | 1588 | case OP_REG: |
1582 | write_register_operand(op); | 1589 | write_register_operand(op); |
1583 | break; | 1590 | break; |
1584 | case OP_MEM: | 1591 | case OP_MEM: |
1585 | if (ctxt->lock_prefix) | 1592 | if (ctxt->lock_prefix) |
1586 | rc = segmented_cmpxchg(ctxt, | 1593 | return segmented_cmpxchg(ctxt, |
1594 | op->addr.mem, | ||
1595 | &op->orig_val, | ||
1596 | &op->val, | ||
1597 | op->bytes); | ||
1598 | else | ||
1599 | return segmented_write(ctxt, | ||
1587 | op->addr.mem, | 1600 | op->addr.mem, |
1588 | &op->orig_val, | ||
1589 | &op->val, | 1601 | &op->val, |
1590 | op->bytes); | 1602 | op->bytes); |
1591 | else | ||
1592 | rc = segmented_write(ctxt, | ||
1593 | op->addr.mem, | ||
1594 | &op->val, | ||
1595 | op->bytes); | ||
1596 | if (rc != X86EMUL_CONTINUE) | ||
1597 | return rc; | ||
1598 | break; | 1603 | break; |
1599 | case OP_MEM_STR: | 1604 | case OP_MEM_STR: |
1600 | rc = segmented_write(ctxt, | 1605 | return segmented_write(ctxt, |
1601 | op->addr.mem, | 1606 | op->addr.mem, |
1602 | op->data, | 1607 | op->data, |
1603 | op->bytes * op->count); | 1608 | op->bytes * op->count); |
1604 | if (rc != X86EMUL_CONTINUE) | ||
1605 | return rc; | ||
1606 | break; | 1609 | break; |
1607 | case OP_XMM: | 1610 | case OP_XMM: |
1608 | write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); | 1611 | write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); |
@@ -1671,7 +1674,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1671 | return rc; | 1674 | return rc; |
1672 | 1675 | ||
1673 | change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF | 1676 | change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF |
1674 | | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID; | 1677 | | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; |
1675 | 1678 | ||
1676 | switch(ctxt->mode) { | 1679 | switch(ctxt->mode) { |
1677 | case X86EMUL_MODE_PROT64: | 1680 | case X86EMUL_MODE_PROT64: |
@@ -1754,6 +1757,9 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) | |||
1754 | if (rc != X86EMUL_CONTINUE) | 1757 | if (rc != X86EMUL_CONTINUE) |
1755 | return rc; | 1758 | return rc; |
1756 | 1759 | ||
1760 | if (ctxt->modrm_reg == VCPU_SREG_SS) | ||
1761 | ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; | ||
1762 | |||
1757 | rc = load_segment_descriptor(ctxt, (u16)selector, seg); | 1763 | rc = load_segment_descriptor(ctxt, (u16)selector, seg); |
1758 | return rc; | 1764 | return rc; |
1759 | } | 1765 | } |
@@ -1991,6 +1997,9 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) | |||
1991 | { | 1997 | { |
1992 | u64 old = ctxt->dst.orig_val64; | 1998 | u64 old = ctxt->dst.orig_val64; |
1993 | 1999 | ||
2000 | if (ctxt->dst.bytes == 16) | ||
2001 | return X86EMUL_UNHANDLEABLE; | ||
2002 | |||
1994 | if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || | 2003 | if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || |
1995 | ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { | 2004 | ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { |
1996 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); | 2005 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); |
@@ -2017,6 +2026,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) | |||
2017 | { | 2026 | { |
2018 | int rc; | 2027 | int rc; |
2019 | unsigned long cs; | 2028 | unsigned long cs; |
2029 | int cpl = ctxt->ops->cpl(ctxt); | ||
2020 | 2030 | ||
2021 | rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); | 2031 | rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); |
2022 | if (rc != X86EMUL_CONTINUE) | 2032 | if (rc != X86EMUL_CONTINUE) |
@@ -2026,6 +2036,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) | |||
2026 | rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); | 2036 | rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); |
2027 | if (rc != X86EMUL_CONTINUE) | 2037 | if (rc != X86EMUL_CONTINUE) |
2028 | return rc; | 2038 | return rc; |
2039 | /* Outer-privilege level return is not implemented */ | ||
2040 | if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) | ||
2041 | return X86EMUL_UNHANDLEABLE; | ||
2029 | rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); | 2042 | rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); |
2030 | return rc; | 2043 | return rc; |
2031 | } | 2044 | } |
@@ -2044,8 +2057,10 @@ static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) | |||
2044 | static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) | 2057 | static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) |
2045 | { | 2058 | { |
2046 | /* Save real source value, then compare EAX against destination. */ | 2059 | /* Save real source value, then compare EAX against destination. */ |
2060 | ctxt->dst.orig_val = ctxt->dst.val; | ||
2061 | ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); | ||
2047 | ctxt->src.orig_val = ctxt->src.val; | 2062 | ctxt->src.orig_val = ctxt->src.val; |
2048 | ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX); | 2063 | ctxt->src.val = ctxt->dst.orig_val; |
2049 | fastop(ctxt, em_cmp); | 2064 | fastop(ctxt, em_cmp); |
2050 | 2065 | ||
2051 | if (ctxt->eflags & EFLG_ZF) { | 2066 | if (ctxt->eflags & EFLG_ZF) { |
@@ -2055,6 +2070,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) | |||
2055 | /* Failure: write the value we saw to EAX. */ | 2070 | /* Failure: write the value we saw to EAX. */ |
2056 | ctxt->dst.type = OP_REG; | 2071 | ctxt->dst.type = OP_REG; |
2057 | ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); | 2072 | ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); |
2073 | ctxt->dst.val = ctxt->dst.orig_val; | ||
2058 | } | 2074 | } |
2059 | return X86EMUL_CONTINUE; | 2075 | return X86EMUL_CONTINUE; |
2060 | } | 2076 | } |
@@ -2194,7 +2210,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
2194 | *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; | 2210 | *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; |
2195 | if (efer & EFER_LMA) { | 2211 | if (efer & EFER_LMA) { |
2196 | #ifdef CONFIG_X86_64 | 2212 | #ifdef CONFIG_X86_64 |
2197 | *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF; | 2213 | *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; |
2198 | 2214 | ||
2199 | ops->get_msr(ctxt, | 2215 | ops->get_msr(ctxt, |
2200 | ctxt->mode == X86EMUL_MODE_PROT64 ? | 2216 | ctxt->mode == X86EMUL_MODE_PROT64 ? |
@@ -2202,14 +2218,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
2202 | ctxt->_eip = msr_data; | 2218 | ctxt->_eip = msr_data; |
2203 | 2219 | ||
2204 | ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); | 2220 | ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); |
2205 | ctxt->eflags &= ~(msr_data | EFLG_RF); | 2221 | ctxt->eflags &= ~msr_data; |
2206 | #endif | 2222 | #endif |
2207 | } else { | 2223 | } else { |
2208 | /* legacy mode */ | 2224 | /* legacy mode */ |
2209 | ops->get_msr(ctxt, MSR_STAR, &msr_data); | 2225 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
2210 | ctxt->_eip = (u32)msr_data; | 2226 | ctxt->_eip = (u32)msr_data; |
2211 | 2227 | ||
2212 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); | 2228 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF); |
2213 | } | 2229 | } |
2214 | 2230 | ||
2215 | return X86EMUL_CONTINUE; | 2231 | return X86EMUL_CONTINUE; |
@@ -2258,7 +2274,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) | |||
2258 | break; | 2274 | break; |
2259 | } | 2275 | } |
2260 | 2276 | ||
2261 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); | 2277 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF); |
2262 | cs_sel = (u16)msr_data; | 2278 | cs_sel = (u16)msr_data; |
2263 | cs_sel &= ~SELECTOR_RPL_MASK; | 2279 | cs_sel &= ~SELECTOR_RPL_MASK; |
2264 | ss_sel = cs_sel + 8; | 2280 | ss_sel = cs_sel + 8; |
@@ -2964,7 +2980,7 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt) | |||
2964 | 2980 | ||
2965 | static int em_mov(struct x86_emulate_ctxt *ctxt) | 2981 | static int em_mov(struct x86_emulate_ctxt *ctxt) |
2966 | { | 2982 | { |
2967 | memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes); | 2983 | memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); |
2968 | return X86EMUL_CONTINUE; | 2984 | return X86EMUL_CONTINUE; |
2969 | } | 2985 | } |
2970 | 2986 | ||
@@ -3221,7 +3237,8 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt) | |||
3221 | 3237 | ||
3222 | static int em_smsw(struct x86_emulate_ctxt *ctxt) | 3238 | static int em_smsw(struct x86_emulate_ctxt *ctxt) |
3223 | { | 3239 | { |
3224 | ctxt->dst.bytes = 2; | 3240 | if (ctxt->dst.type == OP_MEM) |
3241 | ctxt->dst.bytes = 2; | ||
3225 | ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); | 3242 | ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); |
3226 | return X86EMUL_CONTINUE; | 3243 | return X86EMUL_CONTINUE; |
3227 | } | 3244 | } |
@@ -3496,7 +3513,7 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt) | |||
3496 | u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); | 3513 | u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); |
3497 | 3514 | ||
3498 | if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || | 3515 | if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || |
3499 | (rcx > 3)) | 3516 | ctxt->ops->check_pmc(ctxt, rcx)) |
3500 | return emulate_gp(ctxt, 0); | 3517 | return emulate_gp(ctxt, 0); |
3501 | 3518 | ||
3502 | return X86EMUL_CONTINUE; | 3519 | return X86EMUL_CONTINUE; |
@@ -3521,9 +3538,9 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) | |||
3521 | } | 3538 | } |
3522 | 3539 | ||
3523 | #define D(_y) { .flags = (_y) } | 3540 | #define D(_y) { .flags = (_y) } |
3524 | #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i } | 3541 | #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } |
3525 | #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \ | 3542 | #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ |
3526 | .check_perm = (_p) } | 3543 | .intercept = x86_intercept_##_i, .check_perm = (_p) } |
3527 | #define N D(NotImpl) | 3544 | #define N D(NotImpl) |
3528 | #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } | 3545 | #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } |
3529 | #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } | 3546 | #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } |
@@ -3532,10 +3549,10 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) | |||
3532 | #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } | 3549 | #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } |
3533 | #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } | 3550 | #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } |
3534 | #define II(_f, _e, _i) \ | 3551 | #define II(_f, _e, _i) \ |
3535 | { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } | 3552 | { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } |
3536 | #define IIP(_f, _e, _i, _p) \ | 3553 | #define IIP(_f, _e, _i, _p) \ |
3537 | { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \ | 3554 | { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ |
3538 | .check_perm = (_p) } | 3555 | .intercept = x86_intercept_##_i, .check_perm = (_p) } |
3539 | #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } | 3556 | #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } |
3540 | 3557 | ||
3541 | #define D2bv(_f) D((_f) | ByteOp), D(_f) | 3558 | #define D2bv(_f) D((_f) | ByteOp), D(_f) |
@@ -3634,8 +3651,8 @@ static const struct opcode group6[] = { | |||
3634 | }; | 3651 | }; |
3635 | 3652 | ||
3636 | static const struct group_dual group7 = { { | 3653 | static const struct group_dual group7 = { { |
3637 | II(Mov | DstMem | Priv, em_sgdt, sgdt), | 3654 | II(Mov | DstMem, em_sgdt, sgdt), |
3638 | II(Mov | DstMem | Priv, em_sidt, sidt), | 3655 | II(Mov | DstMem, em_sidt, sidt), |
3639 | II(SrcMem | Priv, em_lgdt, lgdt), | 3656 | II(SrcMem | Priv, em_lgdt, lgdt), |
3640 | II(SrcMem | Priv, em_lidt, lidt), | 3657 | II(SrcMem | Priv, em_lidt, lidt), |
3641 | II(SrcNone | DstMem | Mov, em_smsw, smsw), N, | 3658 | II(SrcNone | DstMem | Mov, em_smsw, smsw), N, |
@@ -3899,7 +3916,7 @@ static const struct opcode twobyte_table[256] = { | |||
3899 | N, N, | 3916 | N, N, |
3900 | N, N, N, N, N, N, N, N, | 3917 | N, N, N, N, N, N, N, N, |
3901 | /* 0x40 - 0x4F */ | 3918 | /* 0x40 - 0x4F */ |
3902 | X16(D(DstReg | SrcMem | ModRM | Mov)), | 3919 | X16(D(DstReg | SrcMem | ModRM)), |
3903 | /* 0x50 - 0x5F */ | 3920 | /* 0x50 - 0x5F */ |
3904 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, | 3921 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, |
3905 | /* 0x60 - 0x6F */ | 3922 | /* 0x60 - 0x6F */ |
@@ -4061,12 +4078,12 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4061 | mem_common: | 4078 | mem_common: |
4062 | *op = ctxt->memop; | 4079 | *op = ctxt->memop; |
4063 | ctxt->memopp = op; | 4080 | ctxt->memopp = op; |
4064 | if ((ctxt->d & BitOp) && op == &ctxt->dst) | 4081 | if (ctxt->d & BitOp) |
4065 | fetch_bit_operand(ctxt); | 4082 | fetch_bit_operand(ctxt); |
4066 | op->orig_val = op->val; | 4083 | op->orig_val = op->val; |
4067 | break; | 4084 | break; |
4068 | case OpMem64: | 4085 | case OpMem64: |
4069 | ctxt->memop.bytes = 8; | 4086 | ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; |
4070 | goto mem_common; | 4087 | goto mem_common; |
4071 | case OpAcc: | 4088 | case OpAcc: |
4072 | op->type = OP_REG; | 4089 | op->type = OP_REG; |
@@ -4150,7 +4167,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4150 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | 4167 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
4151 | op->addr.mem.ea = | 4168 | op->addr.mem.ea = |
4152 | register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); | 4169 | register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); |
4153 | op->addr.mem.seg = seg_override(ctxt); | 4170 | op->addr.mem.seg = ctxt->seg_override; |
4154 | op->val = 0; | 4171 | op->val = 0; |
4155 | op->count = 1; | 4172 | op->count = 1; |
4156 | break; | 4173 | break; |
@@ -4161,7 +4178,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4161 | register_address(ctxt, | 4178 | register_address(ctxt, |
4162 | reg_read(ctxt, VCPU_REGS_RBX) + | 4179 | reg_read(ctxt, VCPU_REGS_RBX) + |
4163 | (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); | 4180 | (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); |
4164 | op->addr.mem.seg = seg_override(ctxt); | 4181 | op->addr.mem.seg = ctxt->seg_override; |
4165 | op->val = 0; | 4182 | op->val = 0; |
4166 | break; | 4183 | break; |
4167 | case OpImmFAddr: | 4184 | case OpImmFAddr: |
@@ -4208,16 +4225,22 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
4208 | int mode = ctxt->mode; | 4225 | int mode = ctxt->mode; |
4209 | int def_op_bytes, def_ad_bytes, goffset, simd_prefix; | 4226 | int def_op_bytes, def_ad_bytes, goffset, simd_prefix; |
4210 | bool op_prefix = false; | 4227 | bool op_prefix = false; |
4228 | bool has_seg_override = false; | ||
4211 | struct opcode opcode; | 4229 | struct opcode opcode; |
4212 | 4230 | ||
4213 | ctxt->memop.type = OP_NONE; | 4231 | ctxt->memop.type = OP_NONE; |
4214 | ctxt->memopp = NULL; | 4232 | ctxt->memopp = NULL; |
4215 | ctxt->_eip = ctxt->eip; | 4233 | ctxt->_eip = ctxt->eip; |
4216 | ctxt->fetch.start = ctxt->_eip; | 4234 | ctxt->fetch.ptr = ctxt->fetch.data; |
4217 | ctxt->fetch.end = ctxt->fetch.start + insn_len; | 4235 | ctxt->fetch.end = ctxt->fetch.data + insn_len; |
4218 | ctxt->opcode_len = 1; | 4236 | ctxt->opcode_len = 1; |
4219 | if (insn_len > 0) | 4237 | if (insn_len > 0) |
4220 | memcpy(ctxt->fetch.data, insn, insn_len); | 4238 | memcpy(ctxt->fetch.data, insn, insn_len); |
4239 | else { | ||
4240 | rc = __do_insn_fetch_bytes(ctxt, 1); | ||
4241 | if (rc != X86EMUL_CONTINUE) | ||
4242 | return rc; | ||
4243 | } | ||
4221 | 4244 | ||
4222 | switch (mode) { | 4245 | switch (mode) { |
4223 | case X86EMUL_MODE_REAL: | 4246 | case X86EMUL_MODE_REAL: |
@@ -4261,11 +4284,13 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
4261 | case 0x2e: /* CS override */ | 4284 | case 0x2e: /* CS override */ |
4262 | case 0x36: /* SS override */ | 4285 | case 0x36: /* SS override */ |
4263 | case 0x3e: /* DS override */ | 4286 | case 0x3e: /* DS override */ |
4264 | set_seg_override(ctxt, (ctxt->b >> 3) & 3); | 4287 | has_seg_override = true; |
4288 | ctxt->seg_override = (ctxt->b >> 3) & 3; | ||
4265 | break; | 4289 | break; |
4266 | case 0x64: /* FS override */ | 4290 | case 0x64: /* FS override */ |
4267 | case 0x65: /* GS override */ | 4291 | case 0x65: /* GS override */ |
4268 | set_seg_override(ctxt, ctxt->b & 7); | 4292 | has_seg_override = true; |
4293 | ctxt->seg_override = ctxt->b & 7; | ||
4269 | break; | 4294 | break; |
4270 | case 0x40 ... 0x4f: /* REX */ | 4295 | case 0x40 ... 0x4f: /* REX */ |
4271 | if (mode != X86EMUL_MODE_PROT64) | 4296 | if (mode != X86EMUL_MODE_PROT64) |
@@ -4314,6 +4339,13 @@ done_prefixes: | |||
4314 | if (ctxt->d & ModRM) | 4339 | if (ctxt->d & ModRM) |
4315 | ctxt->modrm = insn_fetch(u8, ctxt); | 4340 | ctxt->modrm = insn_fetch(u8, ctxt); |
4316 | 4341 | ||
4342 | /* vex-prefix instructions are not implemented */ | ||
4343 | if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && | ||
4344 | (mode == X86EMUL_MODE_PROT64 || | ||
4345 | (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) { | ||
4346 | ctxt->d = NotImpl; | ||
4347 | } | ||
4348 | |||
4317 | while (ctxt->d & GroupMask) { | 4349 | while (ctxt->d & GroupMask) { |
4318 | switch (ctxt->d & GroupMask) { | 4350 | switch (ctxt->d & GroupMask) { |
4319 | case Group: | 4351 | case Group: |
@@ -4356,49 +4388,59 @@ done_prefixes: | |||
4356 | ctxt->d |= opcode.flags; | 4388 | ctxt->d |= opcode.flags; |
4357 | } | 4389 | } |
4358 | 4390 | ||
4359 | ctxt->execute = opcode.u.execute; | ||
4360 | ctxt->check_perm = opcode.check_perm; | ||
4361 | ctxt->intercept = opcode.intercept; | ||
4362 | |||
4363 | /* Unrecognised? */ | 4391 | /* Unrecognised? */ |
4364 | if (ctxt->d == 0 || (ctxt->d & NotImpl)) | 4392 | if (ctxt->d == 0) |
4365 | return EMULATION_FAILED; | 4393 | return EMULATION_FAILED; |
4366 | 4394 | ||
4367 | if (!(ctxt->d & EmulateOnUD) && ctxt->ud) | 4395 | ctxt->execute = opcode.u.execute; |
4368 | return EMULATION_FAILED; | ||
4369 | 4396 | ||
4370 | if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) | 4397 | if (unlikely(ctxt->d & |
4371 | ctxt->op_bytes = 8; | 4398 | (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { |
4399 | /* | ||
4400 | * These are copied unconditionally here, and checked unconditionally | ||
4401 | * in x86_emulate_insn. | ||
4402 | */ | ||
4403 | ctxt->check_perm = opcode.check_perm; | ||
4404 | ctxt->intercept = opcode.intercept; | ||
4405 | |||
4406 | if (ctxt->d & NotImpl) | ||
4407 | return EMULATION_FAILED; | ||
4408 | |||
4409 | if (!(ctxt->d & EmulateOnUD) && ctxt->ud) | ||
4410 | return EMULATION_FAILED; | ||
4372 | 4411 | ||
4373 | if (ctxt->d & Op3264) { | 4412 | if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) |
4374 | if (mode == X86EMUL_MODE_PROT64) | ||
4375 | ctxt->op_bytes = 8; | 4413 | ctxt->op_bytes = 8; |
4376 | else | ||
4377 | ctxt->op_bytes = 4; | ||
4378 | } | ||
4379 | 4414 | ||
4380 | if (ctxt->d & Sse) | 4415 | if (ctxt->d & Op3264) { |
4381 | ctxt->op_bytes = 16; | 4416 | if (mode == X86EMUL_MODE_PROT64) |
4382 | else if (ctxt->d & Mmx) | 4417 | ctxt->op_bytes = 8; |
4383 | ctxt->op_bytes = 8; | 4418 | else |
4419 | ctxt->op_bytes = 4; | ||
4420 | } | ||
4421 | |||
4422 | if (ctxt->d & Sse) | ||
4423 | ctxt->op_bytes = 16; | ||
4424 | else if (ctxt->d & Mmx) | ||
4425 | ctxt->op_bytes = 8; | ||
4426 | } | ||
4384 | 4427 | ||
4385 | /* ModRM and SIB bytes. */ | 4428 | /* ModRM and SIB bytes. */ |
4386 | if (ctxt->d & ModRM) { | 4429 | if (ctxt->d & ModRM) { |
4387 | rc = decode_modrm(ctxt, &ctxt->memop); | 4430 | rc = decode_modrm(ctxt, &ctxt->memop); |
4388 | if (!ctxt->has_seg_override) | 4431 | if (!has_seg_override) { |
4389 | set_seg_override(ctxt, ctxt->modrm_seg); | 4432 | has_seg_override = true; |
4433 | ctxt->seg_override = ctxt->modrm_seg; | ||
4434 | } | ||
4390 | } else if (ctxt->d & MemAbs) | 4435 | } else if (ctxt->d & MemAbs) |
4391 | rc = decode_abs(ctxt, &ctxt->memop); | 4436 | rc = decode_abs(ctxt, &ctxt->memop); |
4392 | if (rc != X86EMUL_CONTINUE) | 4437 | if (rc != X86EMUL_CONTINUE) |
4393 | goto done; | 4438 | goto done; |
4394 | 4439 | ||
4395 | if (!ctxt->has_seg_override) | 4440 | if (!has_seg_override) |
4396 | set_seg_override(ctxt, VCPU_SREG_DS); | 4441 | ctxt->seg_override = VCPU_SREG_DS; |
4397 | |||
4398 | ctxt->memop.addr.mem.seg = seg_override(ctxt); | ||
4399 | 4442 | ||
4400 | if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8) | 4443 | ctxt->memop.addr.mem.seg = ctxt->seg_override; |
4401 | ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; | ||
4402 | 4444 | ||
4403 | /* | 4445 | /* |
4404 | * Decode and fetch the source operand: register, memory | 4446 | * Decode and fetch the source operand: register, memory |
@@ -4420,7 +4462,7 @@ done_prefixes: | |||
4420 | rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); | 4462 | rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); |
4421 | 4463 | ||
4422 | done: | 4464 | done: |
4423 | if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative) | 4465 | if (ctxt->rip_relative) |
4424 | ctxt->memopp->addr.mem.ea += ctxt->_eip; | 4466 | ctxt->memopp->addr.mem.ea += ctxt->_eip; |
4425 | 4467 | ||
4426 | return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; | 4468 | return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; |
@@ -4495,6 +4537,16 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) | |||
4495 | return X86EMUL_CONTINUE; | 4537 | return X86EMUL_CONTINUE; |
4496 | } | 4538 | } |
4497 | 4539 | ||
4540 | void init_decode_cache(struct x86_emulate_ctxt *ctxt) | ||
4541 | { | ||
4542 | memset(&ctxt->rip_relative, 0, | ||
4543 | (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); | ||
4544 | |||
4545 | ctxt->io_read.pos = 0; | ||
4546 | ctxt->io_read.end = 0; | ||
4547 | ctxt->mem_read.end = 0; | ||
4548 | } | ||
4549 | |||
4498 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | 4550 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) |
4499 | { | 4551 | { |
4500 | const struct x86_emulate_ops *ops = ctxt->ops; | 4552 | const struct x86_emulate_ops *ops = ctxt->ops; |
@@ -4503,12 +4555,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4503 | 4555 | ||
4504 | ctxt->mem_read.pos = 0; | 4556 | ctxt->mem_read.pos = 0; |
4505 | 4557 | ||
4506 | if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || | ||
4507 | (ctxt->d & Undefined)) { | ||
4508 | rc = emulate_ud(ctxt); | ||
4509 | goto done; | ||
4510 | } | ||
4511 | |||
4512 | /* LOCK prefix is allowed only with some instructions */ | 4558 | /* LOCK prefix is allowed only with some instructions */ |
4513 | if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { | 4559 | if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { |
4514 | rc = emulate_ud(ctxt); | 4560 | rc = emulate_ud(ctxt); |
@@ -4520,69 +4566,82 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4520 | goto done; | 4566 | goto done; |
4521 | } | 4567 | } |
4522 | 4568 | ||
4523 | if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) | 4569 | if (unlikely(ctxt->d & |
4524 | || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { | 4570 | (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { |
4525 | rc = emulate_ud(ctxt); | 4571 | if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || |
4526 | goto done; | 4572 | (ctxt->d & Undefined)) { |
4527 | } | 4573 | rc = emulate_ud(ctxt); |
4528 | 4574 | goto done; | |
4529 | if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { | 4575 | } |
4530 | rc = emulate_nm(ctxt); | ||
4531 | goto done; | ||
4532 | } | ||
4533 | 4576 | ||
4534 | if (ctxt->d & Mmx) { | 4577 | if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) |
4535 | rc = flush_pending_x87_faults(ctxt); | 4578 | || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { |
4536 | if (rc != X86EMUL_CONTINUE) | 4579 | rc = emulate_ud(ctxt); |
4537 | goto done; | 4580 | goto done; |
4538 | /* | 4581 | } |
4539 | * Now that we know the fpu is exception safe, we can fetch | ||
4540 | * operands from it. | ||
4541 | */ | ||
4542 | fetch_possible_mmx_operand(ctxt, &ctxt->src); | ||
4543 | fetch_possible_mmx_operand(ctxt, &ctxt->src2); | ||
4544 | if (!(ctxt->d & Mov)) | ||
4545 | fetch_possible_mmx_operand(ctxt, &ctxt->dst); | ||
4546 | } | ||
4547 | 4582 | ||
4548 | if (unlikely(ctxt->guest_mode) && ctxt->intercept) { | 4583 | if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { |
4549 | rc = emulator_check_intercept(ctxt, ctxt->intercept, | 4584 | rc = emulate_nm(ctxt); |
4550 | X86_ICPT_PRE_EXCEPT); | ||
4551 | if (rc != X86EMUL_CONTINUE) | ||
4552 | goto done; | 4585 | goto done; |
4553 | } | 4586 | } |
4554 | 4587 | ||
4555 | /* Privileged instruction can be executed only in CPL=0 */ | 4588 | if (ctxt->d & Mmx) { |
4556 | if ((ctxt->d & Priv) && ops->cpl(ctxt)) { | 4589 | rc = flush_pending_x87_faults(ctxt); |
4557 | rc = emulate_gp(ctxt, 0); | 4590 | if (rc != X86EMUL_CONTINUE) |
4558 | goto done; | 4591 | goto done; |
4559 | } | 4592 | /* |
4593 | * Now that we know the fpu is exception safe, we can fetch | ||
4594 | * operands from it. | ||
4595 | */ | ||
4596 | fetch_possible_mmx_operand(ctxt, &ctxt->src); | ||
4597 | fetch_possible_mmx_operand(ctxt, &ctxt->src2); | ||
4598 | if (!(ctxt->d & Mov)) | ||
4599 | fetch_possible_mmx_operand(ctxt, &ctxt->dst); | ||
4600 | } | ||
4560 | 4601 | ||
4561 | /* Instruction can only be executed in protected mode */ | 4602 | if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { |
4562 | if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { | 4603 | rc = emulator_check_intercept(ctxt, ctxt->intercept, |
4563 | rc = emulate_ud(ctxt); | 4604 | X86_ICPT_PRE_EXCEPT); |
4564 | goto done; | 4605 | if (rc != X86EMUL_CONTINUE) |
4565 | } | 4606 | goto done; |
4607 | } | ||
4566 | 4608 | ||
4567 | /* Do instruction specific permission checks */ | 4609 | /* Privileged instruction can be executed only in CPL=0 */ |
4568 | if (ctxt->check_perm) { | 4610 | if ((ctxt->d & Priv) && ops->cpl(ctxt)) { |
4569 | rc = ctxt->check_perm(ctxt); | 4611 | if (ctxt->d & PrivUD) |
4570 | if (rc != X86EMUL_CONTINUE) | 4612 | rc = emulate_ud(ctxt); |
4613 | else | ||
4614 | rc = emulate_gp(ctxt, 0); | ||
4571 | goto done; | 4615 | goto done; |
4572 | } | 4616 | } |
4573 | 4617 | ||
4574 | if (unlikely(ctxt->guest_mode) && ctxt->intercept) { | 4618 | /* Instruction can only be executed in protected mode */ |
4575 | rc = emulator_check_intercept(ctxt, ctxt->intercept, | 4619 | if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { |
4576 | X86_ICPT_POST_EXCEPT); | 4620 | rc = emulate_ud(ctxt); |
4577 | if (rc != X86EMUL_CONTINUE) | ||
4578 | goto done; | 4621 | goto done; |
4579 | } | 4622 | } |
4580 | 4623 | ||
4581 | if (ctxt->rep_prefix && (ctxt->d & String)) { | 4624 | /* Do instruction specific permission checks */ |
4582 | /* All REP prefixes have the same first termination condition */ | 4625 | if (ctxt->d & CheckPerm) { |
4583 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { | 4626 | rc = ctxt->check_perm(ctxt); |
4584 | ctxt->eip = ctxt->_eip; | 4627 | if (rc != X86EMUL_CONTINUE) |
4585 | goto done; | 4628 | goto done; |
4629 | } | ||
4630 | |||
4631 | if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { | ||
4632 | rc = emulator_check_intercept(ctxt, ctxt->intercept, | ||
4633 | X86_ICPT_POST_EXCEPT); | ||
4634 | if (rc != X86EMUL_CONTINUE) | ||
4635 | goto done; | ||
4636 | } | ||
4637 | |||
4638 | if (ctxt->rep_prefix && (ctxt->d & String)) { | ||
4639 | /* All REP prefixes have the same first termination condition */ | ||
4640 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { | ||
4641 | ctxt->eip = ctxt->_eip; | ||
4642 | ctxt->eflags &= ~EFLG_RF; | ||
4643 | goto done; | ||
4644 | } | ||
4586 | } | 4645 | } |
4587 | } | 4646 | } |
4588 | 4647 | ||
@@ -4616,13 +4675,18 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4616 | 4675 | ||
4617 | special_insn: | 4676 | special_insn: |
4618 | 4677 | ||
4619 | if (unlikely(ctxt->guest_mode) && ctxt->intercept) { | 4678 | if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { |
4620 | rc = emulator_check_intercept(ctxt, ctxt->intercept, | 4679 | rc = emulator_check_intercept(ctxt, ctxt->intercept, |
4621 | X86_ICPT_POST_MEMACCESS); | 4680 | X86_ICPT_POST_MEMACCESS); |
4622 | if (rc != X86EMUL_CONTINUE) | 4681 | if (rc != X86EMUL_CONTINUE) |
4623 | goto done; | 4682 | goto done; |
4624 | } | 4683 | } |
4625 | 4684 | ||
4685 | if (ctxt->rep_prefix && (ctxt->d & String)) | ||
4686 | ctxt->eflags |= EFLG_RF; | ||
4687 | else | ||
4688 | ctxt->eflags &= ~EFLG_RF; | ||
4689 | |||
4626 | if (ctxt->execute) { | 4690 | if (ctxt->execute) { |
4627 | if (ctxt->d & Fastop) { | 4691 | if (ctxt->d & Fastop) { |
4628 | void (*fop)(struct fastop *) = (void *)ctxt->execute; | 4692 | void (*fop)(struct fastop *) = (void *)ctxt->execute; |
@@ -4657,8 +4721,9 @@ special_insn: | |||
4657 | break; | 4721 | break; |
4658 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ | 4722 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ |
4659 | if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) | 4723 | if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) |
4660 | break; | 4724 | ctxt->dst.type = OP_NONE; |
4661 | rc = em_xchg(ctxt); | 4725 | else |
4726 | rc = em_xchg(ctxt); | ||
4662 | break; | 4727 | break; |
4663 | case 0x98: /* cbw/cwde/cdqe */ | 4728 | case 0x98: /* cbw/cwde/cdqe */ |
4664 | switch (ctxt->op_bytes) { | 4729 | switch (ctxt->op_bytes) { |
@@ -4709,17 +4774,17 @@ special_insn: | |||
4709 | goto done; | 4774 | goto done; |
4710 | 4775 | ||
4711 | writeback: | 4776 | writeback: |
4712 | if (!(ctxt->d & NoWrite)) { | ||
4713 | rc = writeback(ctxt, &ctxt->dst); | ||
4714 | if (rc != X86EMUL_CONTINUE) | ||
4715 | goto done; | ||
4716 | } | ||
4717 | if (ctxt->d & SrcWrite) { | 4777 | if (ctxt->d & SrcWrite) { |
4718 | BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); | 4778 | BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); |
4719 | rc = writeback(ctxt, &ctxt->src); | 4779 | rc = writeback(ctxt, &ctxt->src); |
4720 | if (rc != X86EMUL_CONTINUE) | 4780 | if (rc != X86EMUL_CONTINUE) |
4721 | goto done; | 4781 | goto done; |
4722 | } | 4782 | } |
4783 | if (!(ctxt->d & NoWrite)) { | ||
4784 | rc = writeback(ctxt, &ctxt->dst); | ||
4785 | if (rc != X86EMUL_CONTINUE) | ||
4786 | goto done; | ||
4787 | } | ||
4723 | 4788 | ||
4724 | /* | 4789 | /* |
4725 | * restore dst type in case the decoding will be reused | 4790 | * restore dst type in case the decoding will be reused |
@@ -4761,6 +4826,7 @@ writeback: | |||
4761 | } | 4826 | } |
4762 | goto done; /* skip rip writeback */ | 4827 | goto done; /* skip rip writeback */ |
4763 | } | 4828 | } |
4829 | ctxt->eflags &= ~EFLG_RF; | ||
4764 | } | 4830 | } |
4765 | 4831 | ||
4766 | ctxt->eip = ctxt->_eip; | 4832 | ctxt->eip = ctxt->_eip; |
@@ -4793,8 +4859,10 @@ twobyte_insn: | |||
4793 | ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); | 4859 | ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); |
4794 | break; | 4860 | break; |
4795 | case 0x40 ... 0x4f: /* cmov */ | 4861 | case 0x40 ... 0x4f: /* cmov */ |
4796 | ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val; | 4862 | if (test_cc(ctxt->b, ctxt->eflags)) |
4797 | if (!test_cc(ctxt->b, ctxt->eflags)) | 4863 | ctxt->dst.val = ctxt->src.val; |
4864 | else if (ctxt->mode != X86EMUL_MODE_PROT64 || | ||
4865 | ctxt->op_bytes != 4) | ||
4798 | ctxt->dst.type = OP_NONE; /* no writeback */ | 4866 | ctxt->dst.type = OP_NONE; /* no writeback */ |
4799 | break; | 4867 | break; |
4800 | case 0x80 ... 0x8f: /* jnz rel, etc*/ | 4868 | case 0x80 ... 0x8f: /* jnz rel, etc*/ |
@@ -4818,8 +4886,8 @@ twobyte_insn: | |||
4818 | break; | 4886 | break; |
4819 | case 0xc3: /* movnti */ | 4887 | case 0xc3: /* movnti */ |
4820 | ctxt->dst.bytes = ctxt->op_bytes; | 4888 | ctxt->dst.bytes = ctxt->op_bytes; |
4821 | ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val : | 4889 | ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val : |
4822 | (u64) ctxt->src.val; | 4890 | (u32) ctxt->src.val; |
4823 | break; | 4891 | break; |
4824 | default: | 4892 | default: |
4825 | goto cannot_emulate; | 4893 | goto cannot_emulate; |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 006911858174..3855103f71fd 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1451,7 +1451,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) | |||
1451 | vcpu->arch.apic_arb_prio = 0; | 1451 | vcpu->arch.apic_arb_prio = 0; |
1452 | vcpu->arch.apic_attention = 0; | 1452 | vcpu->arch.apic_attention = 0; |
1453 | 1453 | ||
1454 | apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" | 1454 | apic_debug("%s: vcpu=%p, id=%d, base_msr=" |
1455 | "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, | 1455 | "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, |
1456 | vcpu, kvm_apic_id(apic), | 1456 | vcpu, kvm_apic_id(apic), |
1457 | vcpu->arch.apic_base, apic->base_address); | 1457 | vcpu->arch.apic_base, apic->base_address); |
@@ -1895,7 +1895,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | |||
1895 | /* evaluate pending_events before reading the vector */ | 1895 | /* evaluate pending_events before reading the vector */ |
1896 | smp_rmb(); | 1896 | smp_rmb(); |
1897 | sipi_vector = apic->sipi_vector; | 1897 | sipi_vector = apic->sipi_vector; |
1898 | pr_debug("vcpu %d received sipi with vector # %x\n", | 1898 | apic_debug("vcpu %d received sipi with vector # %x\n", |
1899 | vcpu->vcpu_id, sipi_vector); | 1899 | vcpu->vcpu_id, sipi_vector); |
1900 | kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); | 1900 | kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); |
1901 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 1901 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 2e5652b62fd6..5aaf35641768 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h | |||
@@ -22,7 +22,7 @@ | |||
22 | __entry->unsync = sp->unsync; | 22 | __entry->unsync = sp->unsync; |
23 | 23 | ||
24 | #define KVM_MMU_PAGE_PRINTK() ({ \ | 24 | #define KVM_MMU_PAGE_PRINTK() ({ \ |
25 | const char *ret = trace_seq_buffer_ptr(p); \ | 25 | const u32 saved_len = p->len; \ |
26 | static const char *access_str[] = { \ | 26 | static const char *access_str[] = { \ |
27 | "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ | 27 | "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ |
28 | }; \ | 28 | }; \ |
@@ -41,7 +41,7 @@ | |||
41 | role.nxe ? "" : "!", \ | 41 | role.nxe ? "" : "!", \ |
42 | __entry->root_count, \ | 42 | __entry->root_count, \ |
43 | __entry->unsync ? "unsync" : "sync", 0); \ | 43 | __entry->unsync ? "unsync" : "sync", 0); \ |
44 | ret; \ | 44 | p->buffer + saved_len; \ |
45 | }) | 45 | }) |
46 | 46 | ||
47 | #define kvm_mmu_trace_pferr_flags \ | 47 | #define kvm_mmu_trace_pferr_flags \ |
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index cbecaa90399c..3dd6accb64ec 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c | |||
@@ -428,6 +428,15 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
428 | return 1; | 428 | return 1; |
429 | } | 429 | } |
430 | 430 | ||
431 | int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc) | ||
432 | { | ||
433 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | ||
434 | bool fixed = pmc & (1u << 30); | ||
435 | pmc &= ~(3u << 30); | ||
436 | return (!fixed && pmc >= pmu->nr_arch_gp_counters) || | ||
437 | (fixed && pmc >= pmu->nr_arch_fixed_counters); | ||
438 | } | ||
439 | |||
431 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) | 440 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) |
432 | { | 441 | { |
433 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | 442 | struct kvm_pmu *pmu = &vcpu->arch.pmu; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b5e994ad0135..ddf742768ecf 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -486,14 +486,14 @@ static int is_external_interrupt(u32 info) | |||
486 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); | 486 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); |
487 | } | 487 | } |
488 | 488 | ||
489 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | 489 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
490 | { | 490 | { |
491 | struct vcpu_svm *svm = to_svm(vcpu); | 491 | struct vcpu_svm *svm = to_svm(vcpu); |
492 | u32 ret = 0; | 492 | u32 ret = 0; |
493 | 493 | ||
494 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) | 494 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) |
495 | ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; | 495 | ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; |
496 | return ret & mask; | 496 | return ret; |
497 | } | 497 | } |
498 | 498 | ||
499 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | 499 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
@@ -1415,7 +1415,16 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
1415 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; | 1415 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; |
1416 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; | 1416 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; |
1417 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; | 1417 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; |
1418 | var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; | 1418 | |
1419 | /* | ||
1420 | * AMD CPUs circa 2014 track the G bit for all segments except CS. | ||
1421 | * However, the SVM spec states that the G bit is not observed by the | ||
1422 | * CPU, and some VMware virtual CPUs drop the G bit for all segments. | ||
1423 | * So let's synthesize a legal G bit for all segments, this helps | ||
1424 | * running KVM nested. It also helps cross-vendor migration, because | ||
1425 | * Intel's vmentry has a check on the 'G' bit. | ||
1426 | */ | ||
1427 | var->g = s->limit > 0xfffff; | ||
1419 | 1428 | ||
1420 | /* | 1429 | /* |
1421 | * AMD's VMCB does not have an explicit unusable field, so emulate it | 1430 | * AMD's VMCB does not have an explicit unusable field, so emulate it |
@@ -1424,14 +1433,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
1424 | var->unusable = !var->present || (var->type == 0); | 1433 | var->unusable = !var->present || (var->type == 0); |
1425 | 1434 | ||
1426 | switch (seg) { | 1435 | switch (seg) { |
1427 | case VCPU_SREG_CS: | ||
1428 | /* | ||
1429 | * SVM always stores 0 for the 'G' bit in the CS selector in | ||
1430 | * the VMCB on a VMEXIT. This hurts cross-vendor migration: | ||
1431 | * Intel's VMENTRY has a check on the 'G' bit. | ||
1432 | */ | ||
1433 | var->g = s->limit > 0xfffff; | ||
1434 | break; | ||
1435 | case VCPU_SREG_TR: | 1436 | case VCPU_SREG_TR: |
1436 | /* | 1437 | /* |
1437 | * Work around a bug where the busy flag in the tr selector | 1438 | * Work around a bug where the busy flag in the tr selector |
@@ -2116,22 +2117,27 @@ static void nested_svm_unmap(struct page *page) | |||
2116 | 2117 | ||
2117 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) | 2118 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) |
2118 | { | 2119 | { |
2119 | unsigned port; | 2120 | unsigned port, size, iopm_len; |
2120 | u8 val, bit; | 2121 | u16 val, mask; |
2122 | u8 start_bit; | ||
2121 | u64 gpa; | 2123 | u64 gpa; |
2122 | 2124 | ||
2123 | if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) | 2125 | if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) |
2124 | return NESTED_EXIT_HOST; | 2126 | return NESTED_EXIT_HOST; |
2125 | 2127 | ||
2126 | port = svm->vmcb->control.exit_info_1 >> 16; | 2128 | port = svm->vmcb->control.exit_info_1 >> 16; |
2129 | size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> | ||
2130 | SVM_IOIO_SIZE_SHIFT; | ||
2127 | gpa = svm->nested.vmcb_iopm + (port / 8); | 2131 | gpa = svm->nested.vmcb_iopm + (port / 8); |
2128 | bit = port % 8; | 2132 | start_bit = port % 8; |
2129 | val = 0; | 2133 | iopm_len = (start_bit + size > 8) ? 2 : 1; |
2134 | mask = (0xf >> (4 - size)) << start_bit; | ||
2135 | val = 0; | ||
2130 | 2136 | ||
2131 | if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1)) | 2137 | if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len)) |
2132 | val &= (1 << bit); | 2138 | return NESTED_EXIT_DONE; |
2133 | 2139 | ||
2134 | return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; | 2140 | return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
2135 | } | 2141 | } |
2136 | 2142 | ||
2137 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) | 2143 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
@@ -4205,7 +4211,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, | |||
4205 | if (info->intercept == x86_intercept_cr_write) | 4211 | if (info->intercept == x86_intercept_cr_write) |
4206 | icpt_info.exit_code += info->modrm_reg; | 4212 | icpt_info.exit_code += info->modrm_reg; |
4207 | 4213 | ||
4208 | if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0) | 4214 | if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || |
4215 | info->intercept == x86_intercept_clts) | ||
4209 | break; | 4216 | break; |
4210 | 4217 | ||
4211 | intercept = svm->nested.intercept; | 4218 | intercept = svm->nested.intercept; |
@@ -4250,14 +4257,14 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, | |||
4250 | u64 exit_info; | 4257 | u64 exit_info; |
4251 | u32 bytes; | 4258 | u32 bytes; |
4252 | 4259 | ||
4253 | exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16; | ||
4254 | |||
4255 | if (info->intercept == x86_intercept_in || | 4260 | if (info->intercept == x86_intercept_in || |
4256 | info->intercept == x86_intercept_ins) { | 4261 | info->intercept == x86_intercept_ins) { |
4257 | exit_info |= SVM_IOIO_TYPE_MASK; | 4262 | exit_info = ((info->src_val & 0xffff) << 16) | |
4258 | bytes = info->src_bytes; | 4263 | SVM_IOIO_TYPE_MASK; |
4259 | } else { | ||
4260 | bytes = info->dst_bytes; | 4264 | bytes = info->dst_bytes; |
4265 | } else { | ||
4266 | exit_info = (info->dst_val & 0xffff) << 16; | ||
4267 | bytes = info->src_bytes; | ||
4261 | } | 4268 | } |
4262 | 4269 | ||
4263 | if (info->intercept == x86_intercept_outs || | 4270 | if (info->intercept == x86_intercept_outs || |
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 33574c95220d..e850a7d332be 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h | |||
@@ -721,10 +721,10 @@ TRACE_EVENT(kvm_emulate_insn, | |||
721 | ), | 721 | ), |
722 | 722 | ||
723 | TP_fast_assign( | 723 | TP_fast_assign( |
724 | __entry->rip = vcpu->arch.emulate_ctxt.fetch.start; | ||
725 | __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); | 724 | __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); |
726 | __entry->len = vcpu->arch.emulate_ctxt._eip | 725 | __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr |
727 | - vcpu->arch.emulate_ctxt.fetch.start; | 726 | - vcpu->arch.emulate_ctxt.fetch.data; |
727 | __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len; | ||
728 | memcpy(__entry->insn, | 728 | memcpy(__entry->insn, |
729 | vcpu->arch.emulate_ctxt.fetch.data, | 729 | vcpu->arch.emulate_ctxt.fetch.data, |
730 | 15); | 730 | 15); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 801332edefc3..e618f34bde2d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -383,6 +383,9 @@ struct nested_vmx { | |||
383 | 383 | ||
384 | struct hrtimer preemption_timer; | 384 | struct hrtimer preemption_timer; |
385 | bool preemption_timer_expired; | 385 | bool preemption_timer_expired; |
386 | |||
387 | /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ | ||
388 | u64 vmcs01_debugctl; | ||
386 | }; | 389 | }; |
387 | 390 | ||
388 | #define POSTED_INTR_ON 0 | 391 | #define POSTED_INTR_ON 0 |
@@ -740,7 +743,6 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var); | |||
740 | static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu); | 743 | static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu); |
741 | static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); | 744 | static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); |
742 | static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); | 745 | static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); |
743 | static bool vmx_mpx_supported(void); | ||
744 | 746 | ||
745 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | 747 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
746 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | 748 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); |
@@ -820,7 +822,6 @@ static const u32 vmx_msr_index[] = { | |||
820 | #endif | 822 | #endif |
821 | MSR_EFER, MSR_TSC_AUX, MSR_STAR, | 823 | MSR_EFER, MSR_TSC_AUX, MSR_STAR, |
822 | }; | 824 | }; |
823 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | ||
824 | 825 | ||
825 | static inline bool is_page_fault(u32 intr_info) | 826 | static inline bool is_page_fault(u32 intr_info) |
826 | { | 827 | { |
@@ -1940,7 +1941,7 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | |||
1940 | vmcs_writel(GUEST_RFLAGS, rflags); | 1941 | vmcs_writel(GUEST_RFLAGS, rflags); |
1941 | } | 1942 | } |
1942 | 1943 | ||
1943 | static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | 1944 | static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
1944 | { | 1945 | { |
1945 | u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | 1946 | u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
1946 | int ret = 0; | 1947 | int ret = 0; |
@@ -1950,7 +1951,7 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | |||
1950 | if (interruptibility & GUEST_INTR_STATE_MOV_SS) | 1951 | if (interruptibility & GUEST_INTR_STATE_MOV_SS) |
1951 | ret |= KVM_X86_SHADOW_INT_MOV_SS; | 1952 | ret |= KVM_X86_SHADOW_INT_MOV_SS; |
1952 | 1953 | ||
1953 | return ret & mask; | 1954 | return ret; |
1954 | } | 1955 | } |
1955 | 1956 | ||
1956 | static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | 1957 | static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
@@ -2239,10 +2240,13 @@ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) | |||
2239 | * or other means. | 2240 | * or other means. |
2240 | */ | 2241 | */ |
2241 | static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high; | 2242 | static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high; |
2243 | static u32 nested_vmx_true_procbased_ctls_low; | ||
2242 | static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high; | 2244 | static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high; |
2243 | static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; | 2245 | static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; |
2244 | static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; | 2246 | static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; |
2247 | static u32 nested_vmx_true_exit_ctls_low; | ||
2245 | static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; | 2248 | static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; |
2249 | static u32 nested_vmx_true_entry_ctls_low; | ||
2246 | static u32 nested_vmx_misc_low, nested_vmx_misc_high; | 2250 | static u32 nested_vmx_misc_low, nested_vmx_misc_high; |
2247 | static u32 nested_vmx_ept_caps; | 2251 | static u32 nested_vmx_ept_caps; |
2248 | static __init void nested_vmx_setup_ctls_msrs(void) | 2252 | static __init void nested_vmx_setup_ctls_msrs(void) |
@@ -2265,21 +2269,13 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
2265 | /* pin-based controls */ | 2269 | /* pin-based controls */ |
2266 | rdmsr(MSR_IA32_VMX_PINBASED_CTLS, | 2270 | rdmsr(MSR_IA32_VMX_PINBASED_CTLS, |
2267 | nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high); | 2271 | nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high); |
2268 | /* | ||
2269 | * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is | ||
2270 | * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR. | ||
2271 | */ | ||
2272 | nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; | 2272 | nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; |
2273 | nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | | 2273 | nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | |
2274 | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; | 2274 | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; |
2275 | nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | | 2275 | nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | |
2276 | PIN_BASED_VMX_PREEMPTION_TIMER; | 2276 | PIN_BASED_VMX_PREEMPTION_TIMER; |
2277 | 2277 | ||
2278 | /* | 2278 | /* exit controls */ |
2279 | * Exit controls | ||
2280 | * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and | ||
2281 | * 17 must be 1. | ||
2282 | */ | ||
2283 | rdmsr(MSR_IA32_VMX_EXIT_CTLS, | 2279 | rdmsr(MSR_IA32_VMX_EXIT_CTLS, |
2284 | nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); | 2280 | nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); |
2285 | nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; | 2281 | nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; |
@@ -2296,10 +2292,13 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
2296 | if (vmx_mpx_supported()) | 2292 | if (vmx_mpx_supported()) |
2297 | nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; | 2293 | nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; |
2298 | 2294 | ||
2295 | /* We support free control of debug control saving. */ | ||
2296 | nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low & | ||
2297 | ~VM_EXIT_SAVE_DEBUG_CONTROLS; | ||
2298 | |||
2299 | /* entry controls */ | 2299 | /* entry controls */ |
2300 | rdmsr(MSR_IA32_VMX_ENTRY_CTLS, | 2300 | rdmsr(MSR_IA32_VMX_ENTRY_CTLS, |
2301 | nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); | 2301 | nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); |
2302 | /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */ | ||
2303 | nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; | 2302 | nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; |
2304 | nested_vmx_entry_ctls_high &= | 2303 | nested_vmx_entry_ctls_high &= |
2305 | #ifdef CONFIG_X86_64 | 2304 | #ifdef CONFIG_X86_64 |
@@ -2311,10 +2310,14 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
2311 | if (vmx_mpx_supported()) | 2310 | if (vmx_mpx_supported()) |
2312 | nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; | 2311 | nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; |
2313 | 2312 | ||
2313 | /* We support free control of debug control loading. */ | ||
2314 | nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low & | ||
2315 | ~VM_ENTRY_LOAD_DEBUG_CONTROLS; | ||
2316 | |||
2314 | /* cpu-based controls */ | 2317 | /* cpu-based controls */ |
2315 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, | 2318 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, |
2316 | nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); | 2319 | nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); |
2317 | nested_vmx_procbased_ctls_low = 0; | 2320 | nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; |
2318 | nested_vmx_procbased_ctls_high &= | 2321 | nested_vmx_procbased_ctls_high &= |
2319 | CPU_BASED_VIRTUAL_INTR_PENDING | | 2322 | CPU_BASED_VIRTUAL_INTR_PENDING | |
2320 | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | | 2323 | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | |
@@ -2335,7 +2338,12 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
2335 | * can use it to avoid exits to L1 - even when L0 runs L2 | 2338 | * can use it to avoid exits to L1 - even when L0 runs L2 |
2336 | * without MSR bitmaps. | 2339 | * without MSR bitmaps. |
2337 | */ | 2340 | */ |
2338 | nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS; | 2341 | nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | |
2342 | CPU_BASED_USE_MSR_BITMAPS; | ||
2343 | |||
2344 | /* We support free control of CR3 access interception. */ | ||
2345 | nested_vmx_true_procbased_ctls_low = nested_vmx_procbased_ctls_low & | ||
2346 | ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); | ||
2339 | 2347 | ||
2340 | /* secondary cpu-based controls */ | 2348 | /* secondary cpu-based controls */ |
2341 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | 2349 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, |
@@ -2394,7 +2402,7 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2394 | * guest, and the VMCS structure we give it - not about the | 2402 | * guest, and the VMCS structure we give it - not about the |
2395 | * VMX support of the underlying hardware. | 2403 | * VMX support of the underlying hardware. |
2396 | */ | 2404 | */ |
2397 | *pdata = VMCS12_REVISION | | 2405 | *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS | |
2398 | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | | 2406 | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | |
2399 | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); | 2407 | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); |
2400 | break; | 2408 | break; |
@@ -2404,16 +2412,25 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2404 | nested_vmx_pinbased_ctls_high); | 2412 | nested_vmx_pinbased_ctls_high); |
2405 | break; | 2413 | break; |
2406 | case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: | 2414 | case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: |
2415 | *pdata = vmx_control_msr(nested_vmx_true_procbased_ctls_low, | ||
2416 | nested_vmx_procbased_ctls_high); | ||
2417 | break; | ||
2407 | case MSR_IA32_VMX_PROCBASED_CTLS: | 2418 | case MSR_IA32_VMX_PROCBASED_CTLS: |
2408 | *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low, | 2419 | *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low, |
2409 | nested_vmx_procbased_ctls_high); | 2420 | nested_vmx_procbased_ctls_high); |
2410 | break; | 2421 | break; |
2411 | case MSR_IA32_VMX_TRUE_EXIT_CTLS: | 2422 | case MSR_IA32_VMX_TRUE_EXIT_CTLS: |
2423 | *pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low, | ||
2424 | nested_vmx_exit_ctls_high); | ||
2425 | break; | ||
2412 | case MSR_IA32_VMX_EXIT_CTLS: | 2426 | case MSR_IA32_VMX_EXIT_CTLS: |
2413 | *pdata = vmx_control_msr(nested_vmx_exit_ctls_low, | 2427 | *pdata = vmx_control_msr(nested_vmx_exit_ctls_low, |
2414 | nested_vmx_exit_ctls_high); | 2428 | nested_vmx_exit_ctls_high); |
2415 | break; | 2429 | break; |
2416 | case MSR_IA32_VMX_TRUE_ENTRY_CTLS: | 2430 | case MSR_IA32_VMX_TRUE_ENTRY_CTLS: |
2431 | *pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low, | ||
2432 | nested_vmx_entry_ctls_high); | ||
2433 | break; | ||
2417 | case MSR_IA32_VMX_ENTRY_CTLS: | 2434 | case MSR_IA32_VMX_ENTRY_CTLS: |
2418 | *pdata = vmx_control_msr(nested_vmx_entry_ctls_low, | 2435 | *pdata = vmx_control_msr(nested_vmx_entry_ctls_low, |
2419 | nested_vmx_entry_ctls_high); | 2436 | nested_vmx_entry_ctls_high); |
@@ -2442,7 +2459,7 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2442 | *pdata = -1ULL; | 2459 | *pdata = -1ULL; |
2443 | break; | 2460 | break; |
2444 | case MSR_IA32_VMX_VMCS_ENUM: | 2461 | case MSR_IA32_VMX_VMCS_ENUM: |
2445 | *pdata = 0x1f; | 2462 | *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */ |
2446 | break; | 2463 | break; |
2447 | case MSR_IA32_VMX_PROCBASED_CTLS2: | 2464 | case MSR_IA32_VMX_PROCBASED_CTLS2: |
2448 | *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low, | 2465 | *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low, |
@@ -3653,7 +3670,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
3653 | vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); | 3670 | vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); |
3654 | 3671 | ||
3655 | out: | 3672 | out: |
3656 | vmx->emulation_required |= emulation_required(vcpu); | 3673 | vmx->emulation_required = emulation_required(vcpu); |
3657 | } | 3674 | } |
3658 | 3675 | ||
3659 | static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | 3676 | static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) |
@@ -4422,7 +4439,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
4422 | vmx->vcpu.arch.pat = host_pat; | 4439 | vmx->vcpu.arch.pat = host_pat; |
4423 | } | 4440 | } |
4424 | 4441 | ||
4425 | for (i = 0; i < NR_VMX_MSR; ++i) { | 4442 | for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { |
4426 | u32 index = vmx_msr_index[i]; | 4443 | u32 index = vmx_msr_index[i]; |
4427 | u32 data_low, data_high; | 4444 | u32 data_low, data_high; |
4428 | int j = vmx->nmsrs; | 4445 | int j = vmx->nmsrs; |
@@ -4873,7 +4890,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
4873 | if (!(vcpu->guest_debug & | 4890 | if (!(vcpu->guest_debug & |
4874 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { | 4891 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { |
4875 | vcpu->arch.dr6 &= ~15; | 4892 | vcpu->arch.dr6 &= ~15; |
4876 | vcpu->arch.dr6 |= dr6; | 4893 | vcpu->arch.dr6 |= dr6 | DR6_RTM; |
4877 | if (!(dr6 & ~DR6_RESERVED)) /* icebp */ | 4894 | if (!(dr6 & ~DR6_RESERVED)) /* icebp */ |
4878 | skip_emulated_instruction(vcpu); | 4895 | skip_emulated_instruction(vcpu); |
4879 | 4896 | ||
@@ -5039,7 +5056,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
5039 | reg = (exit_qualification >> 8) & 15; | 5056 | reg = (exit_qualification >> 8) & 15; |
5040 | switch ((exit_qualification >> 4) & 3) { | 5057 | switch ((exit_qualification >> 4) & 3) { |
5041 | case 0: /* mov to cr */ | 5058 | case 0: /* mov to cr */ |
5042 | val = kvm_register_read(vcpu, reg); | 5059 | val = kvm_register_readl(vcpu, reg); |
5043 | trace_kvm_cr_write(cr, val); | 5060 | trace_kvm_cr_write(cr, val); |
5044 | switch (cr) { | 5061 | switch (cr) { |
5045 | case 0: | 5062 | case 0: |
@@ -5056,7 +5073,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
5056 | return 1; | 5073 | return 1; |
5057 | case 8: { | 5074 | case 8: { |
5058 | u8 cr8_prev = kvm_get_cr8(vcpu); | 5075 | u8 cr8_prev = kvm_get_cr8(vcpu); |
5059 | u8 cr8 = kvm_register_read(vcpu, reg); | 5076 | u8 cr8 = (u8)val; |
5060 | err = kvm_set_cr8(vcpu, cr8); | 5077 | err = kvm_set_cr8(vcpu, cr8); |
5061 | kvm_complete_insn_gp(vcpu, err); | 5078 | kvm_complete_insn_gp(vcpu, err); |
5062 | if (irqchip_in_kernel(vcpu->kvm)) | 5079 | if (irqchip_in_kernel(vcpu->kvm)) |
@@ -5132,7 +5149,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) | |||
5132 | return 0; | 5149 | return 0; |
5133 | } else { | 5150 | } else { |
5134 | vcpu->arch.dr7 &= ~DR7_GD; | 5151 | vcpu->arch.dr7 &= ~DR7_GD; |
5135 | vcpu->arch.dr6 |= DR6_BD; | 5152 | vcpu->arch.dr6 |= DR6_BD | DR6_RTM; |
5136 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | 5153 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); |
5137 | kvm_queue_exception(vcpu, DB_VECTOR); | 5154 | kvm_queue_exception(vcpu, DB_VECTOR); |
5138 | return 1; | 5155 | return 1; |
@@ -5165,7 +5182,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) | |||
5165 | return 1; | 5182 | return 1; |
5166 | kvm_register_write(vcpu, reg, val); | 5183 | kvm_register_write(vcpu, reg, val); |
5167 | } else | 5184 | } else |
5168 | if (kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg))) | 5185 | if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) |
5169 | return 1; | 5186 | return 1; |
5170 | 5187 | ||
5171 | skip_emulated_instruction(vcpu); | 5188 | skip_emulated_instruction(vcpu); |
@@ -5621,7 +5638,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
5621 | cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 5638 | cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
5622 | intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; | 5639 | intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; |
5623 | 5640 | ||
5624 | while (!guest_state_valid(vcpu) && count-- != 0) { | 5641 | while (vmx->emulation_required && count-- != 0) { |
5625 | if (intr_window_requested && vmx_interrupt_allowed(vcpu)) | 5642 | if (intr_window_requested && vmx_interrupt_allowed(vcpu)) |
5626 | return handle_interrupt_window(&vmx->vcpu); | 5643 | return handle_interrupt_window(&vmx->vcpu); |
5627 | 5644 | ||
@@ -5655,7 +5672,6 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
5655 | schedule(); | 5672 | schedule(); |
5656 | } | 5673 | } |
5657 | 5674 | ||
5658 | vmx->emulation_required = emulation_required(vcpu); | ||
5659 | out: | 5675 | out: |
5660 | return ret; | 5676 | return ret; |
5661 | } | 5677 | } |
@@ -5754,22 +5770,27 @@ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) | |||
5754 | 5770 | ||
5755 | /* | 5771 | /* |
5756 | * Free all VMCSs saved for this vcpu, except the one pointed by | 5772 | * Free all VMCSs saved for this vcpu, except the one pointed by |
5757 | * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one | 5773 | * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs |
5758 | * currently used, if running L2), and vmcs01 when running L2. | 5774 | * must be &vmx->vmcs01. |
5759 | */ | 5775 | */ |
5760 | static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) | 5776 | static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) |
5761 | { | 5777 | { |
5762 | struct vmcs02_list *item, *n; | 5778 | struct vmcs02_list *item, *n; |
5779 | |||
5780 | WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); | ||
5763 | list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { | 5781 | list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { |
5764 | if (vmx->loaded_vmcs != &item->vmcs02) | 5782 | /* |
5765 | free_loaded_vmcs(&item->vmcs02); | 5783 | * Something will leak if the above WARN triggers. Better than |
5784 | * a use-after-free. | ||
5785 | */ | ||
5786 | if (vmx->loaded_vmcs == &item->vmcs02) | ||
5787 | continue; | ||
5788 | |||
5789 | free_loaded_vmcs(&item->vmcs02); | ||
5766 | list_del(&item->list); | 5790 | list_del(&item->list); |
5767 | kfree(item); | 5791 | kfree(item); |
5792 | vmx->nested.vmcs02_num--; | ||
5768 | } | 5793 | } |
5769 | vmx->nested.vmcs02_num = 0; | ||
5770 | |||
5771 | if (vmx->loaded_vmcs != &vmx->vmcs01) | ||
5772 | free_loaded_vmcs(&vmx->vmcs01); | ||
5773 | } | 5794 | } |
5774 | 5795 | ||
5775 | /* | 5796 | /* |
@@ -5918,7 +5939,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
5918 | * which replaces physical address width with 32 | 5939 | * which replaces physical address width with 32 |
5919 | * | 5940 | * |
5920 | */ | 5941 | */ |
5921 | if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) { | 5942 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
5922 | nested_vmx_failInvalid(vcpu); | 5943 | nested_vmx_failInvalid(vcpu); |
5923 | skip_emulated_instruction(vcpu); | 5944 | skip_emulated_instruction(vcpu); |
5924 | return 1; | 5945 | return 1; |
@@ -5936,7 +5957,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
5936 | vmx->nested.vmxon_ptr = vmptr; | 5957 | vmx->nested.vmxon_ptr = vmptr; |
5937 | break; | 5958 | break; |
5938 | case EXIT_REASON_VMCLEAR: | 5959 | case EXIT_REASON_VMCLEAR: |
5939 | if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) { | 5960 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
5940 | nested_vmx_failValid(vcpu, | 5961 | nested_vmx_failValid(vcpu, |
5941 | VMXERR_VMCLEAR_INVALID_ADDRESS); | 5962 | VMXERR_VMCLEAR_INVALID_ADDRESS); |
5942 | skip_emulated_instruction(vcpu); | 5963 | skip_emulated_instruction(vcpu); |
@@ -5951,7 +5972,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
5951 | } | 5972 | } |
5952 | break; | 5973 | break; |
5953 | case EXIT_REASON_VMPTRLD: | 5974 | case EXIT_REASON_VMPTRLD: |
5954 | if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) { | 5975 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
5955 | nested_vmx_failValid(vcpu, | 5976 | nested_vmx_failValid(vcpu, |
5956 | VMXERR_VMPTRLD_INVALID_ADDRESS); | 5977 | VMXERR_VMPTRLD_INVALID_ADDRESS); |
5957 | skip_emulated_instruction(vcpu); | 5978 | skip_emulated_instruction(vcpu); |
@@ -6086,20 +6107,27 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) | |||
6086 | static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) | 6107 | static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) |
6087 | { | 6108 | { |
6088 | u32 exec_control; | 6109 | u32 exec_control; |
6110 | if (vmx->nested.current_vmptr == -1ull) | ||
6111 | return; | ||
6112 | |||
6113 | /* current_vmptr and current_vmcs12 are always set/reset together */ | ||
6114 | if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) | ||
6115 | return; | ||
6116 | |||
6089 | if (enable_shadow_vmcs) { | 6117 | if (enable_shadow_vmcs) { |
6090 | if (vmx->nested.current_vmcs12 != NULL) { | 6118 | /* copy to memory all shadowed fields in case |
6091 | /* copy to memory all shadowed fields in case | 6119 | they were modified */ |
6092 | they were modified */ | 6120 | copy_shadow_to_vmcs12(vmx); |
6093 | copy_shadow_to_vmcs12(vmx); | 6121 | vmx->nested.sync_shadow_vmcs = false; |
6094 | vmx->nested.sync_shadow_vmcs = false; | 6122 | exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); |
6095 | exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); | 6123 | exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; |
6096 | exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; | 6124 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); |
6097 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); | 6125 | vmcs_write64(VMCS_LINK_POINTER, -1ull); |
6098 | vmcs_write64(VMCS_LINK_POINTER, -1ull); | ||
6099 | } | ||
6100 | } | 6126 | } |
6101 | kunmap(vmx->nested.current_vmcs12_page); | 6127 | kunmap(vmx->nested.current_vmcs12_page); |
6102 | nested_release_page(vmx->nested.current_vmcs12_page); | 6128 | nested_release_page(vmx->nested.current_vmcs12_page); |
6129 | vmx->nested.current_vmptr = -1ull; | ||
6130 | vmx->nested.current_vmcs12 = NULL; | ||
6103 | } | 6131 | } |
6104 | 6132 | ||
6105 | /* | 6133 | /* |
@@ -6110,12 +6138,9 @@ static void free_nested(struct vcpu_vmx *vmx) | |||
6110 | { | 6138 | { |
6111 | if (!vmx->nested.vmxon) | 6139 | if (!vmx->nested.vmxon) |
6112 | return; | 6140 | return; |
6141 | |||
6113 | vmx->nested.vmxon = false; | 6142 | vmx->nested.vmxon = false; |
6114 | if (vmx->nested.current_vmptr != -1ull) { | 6143 | nested_release_vmcs12(vmx); |
6115 | nested_release_vmcs12(vmx); | ||
6116 | vmx->nested.current_vmptr = -1ull; | ||
6117 | vmx->nested.current_vmcs12 = NULL; | ||
6118 | } | ||
6119 | if (enable_shadow_vmcs) | 6144 | if (enable_shadow_vmcs) |
6120 | free_vmcs(vmx->nested.current_shadow_vmcs); | 6145 | free_vmcs(vmx->nested.current_shadow_vmcs); |
6121 | /* Unpin physical memory we referred to in current vmcs02 */ | 6146 | /* Unpin physical memory we referred to in current vmcs02 */ |
@@ -6152,11 +6177,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) | |||
6152 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) | 6177 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) |
6153 | return 1; | 6178 | return 1; |
6154 | 6179 | ||
6155 | if (vmptr == vmx->nested.current_vmptr) { | 6180 | if (vmptr == vmx->nested.current_vmptr) |
6156 | nested_release_vmcs12(vmx); | 6181 | nested_release_vmcs12(vmx); |
6157 | vmx->nested.current_vmptr = -1ull; | ||
6158 | vmx->nested.current_vmcs12 = NULL; | ||
6159 | } | ||
6160 | 6182 | ||
6161 | page = nested_get_page(vcpu, vmptr); | 6183 | page = nested_get_page(vcpu, vmptr); |
6162 | if (page == NULL) { | 6184 | if (page == NULL) { |
@@ -6384,7 +6406,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) | |||
6384 | return 1; | 6406 | return 1; |
6385 | 6407 | ||
6386 | /* Decode instruction info and find the field to read */ | 6408 | /* Decode instruction info and find the field to read */ |
6387 | field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); | 6409 | field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); |
6388 | /* Read the field, zero-extended to a u64 field_value */ | 6410 | /* Read the field, zero-extended to a u64 field_value */ |
6389 | if (!vmcs12_read_any(vcpu, field, &field_value)) { | 6411 | if (!vmcs12_read_any(vcpu, field, &field_value)) { |
6390 | nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); | 6412 | nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); |
@@ -6397,7 +6419,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) | |||
6397 | * on the guest's mode (32 or 64 bit), not on the given field's length. | 6419 | * on the guest's mode (32 or 64 bit), not on the given field's length. |
6398 | */ | 6420 | */ |
6399 | if (vmx_instruction_info & (1u << 10)) { | 6421 | if (vmx_instruction_info & (1u << 10)) { |
6400 | kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf), | 6422 | kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), |
6401 | field_value); | 6423 | field_value); |
6402 | } else { | 6424 | } else { |
6403 | if (get_vmx_mem_address(vcpu, exit_qualification, | 6425 | if (get_vmx_mem_address(vcpu, exit_qualification, |
@@ -6434,21 +6456,21 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) | |||
6434 | return 1; | 6456 | return 1; |
6435 | 6457 | ||
6436 | if (vmx_instruction_info & (1u << 10)) | 6458 | if (vmx_instruction_info & (1u << 10)) |
6437 | field_value = kvm_register_read(vcpu, | 6459 | field_value = kvm_register_readl(vcpu, |
6438 | (((vmx_instruction_info) >> 3) & 0xf)); | 6460 | (((vmx_instruction_info) >> 3) & 0xf)); |
6439 | else { | 6461 | else { |
6440 | if (get_vmx_mem_address(vcpu, exit_qualification, | 6462 | if (get_vmx_mem_address(vcpu, exit_qualification, |
6441 | vmx_instruction_info, &gva)) | 6463 | vmx_instruction_info, &gva)) |
6442 | return 1; | 6464 | return 1; |
6443 | if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, | 6465 | if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, |
6444 | &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) { | 6466 | &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { |
6445 | kvm_inject_page_fault(vcpu, &e); | 6467 | kvm_inject_page_fault(vcpu, &e); |
6446 | return 1; | 6468 | return 1; |
6447 | } | 6469 | } |
6448 | } | 6470 | } |
6449 | 6471 | ||
6450 | 6472 | ||
6451 | field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); | 6473 | field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); |
6452 | if (vmcs_field_readonly(field)) { | 6474 | if (vmcs_field_readonly(field)) { |
6453 | nested_vmx_failValid(vcpu, | 6475 | nested_vmx_failValid(vcpu, |
6454 | VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); | 6476 | VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); |
@@ -6498,9 +6520,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) | |||
6498 | skip_emulated_instruction(vcpu); | 6520 | skip_emulated_instruction(vcpu); |
6499 | return 1; | 6521 | return 1; |
6500 | } | 6522 | } |
6501 | if (vmx->nested.current_vmptr != -1ull) | ||
6502 | nested_release_vmcs12(vmx); | ||
6503 | 6523 | ||
6524 | nested_release_vmcs12(vmx); | ||
6504 | vmx->nested.current_vmptr = vmptr; | 6525 | vmx->nested.current_vmptr = vmptr; |
6505 | vmx->nested.current_vmcs12 = new_vmcs12; | 6526 | vmx->nested.current_vmcs12 = new_vmcs12; |
6506 | vmx->nested.current_vmcs12_page = page; | 6527 | vmx->nested.current_vmcs12_page = page; |
@@ -6571,7 +6592,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) | |||
6571 | } | 6592 | } |
6572 | 6593 | ||
6573 | vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); | 6594 | vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); |
6574 | type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf); | 6595 | type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); |
6575 | 6596 | ||
6576 | types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; | 6597 | types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; |
6577 | 6598 | ||
@@ -6751,7 +6772,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
6751 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 6772 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
6752 | int cr = exit_qualification & 15; | 6773 | int cr = exit_qualification & 15; |
6753 | int reg = (exit_qualification >> 8) & 15; | 6774 | int reg = (exit_qualification >> 8) & 15; |
6754 | unsigned long val = kvm_register_read(vcpu, reg); | 6775 | unsigned long val = kvm_register_readl(vcpu, reg); |
6755 | 6776 | ||
6756 | switch ((exit_qualification >> 4) & 3) { | 6777 | switch ((exit_qualification >> 4) & 3) { |
6757 | case 0: /* mov to cr */ | 6778 | case 0: /* mov to cr */ |
@@ -7112,7 +7133,26 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) | |||
7112 | if (max_irr == -1) | 7133 | if (max_irr == -1) |
7113 | return; | 7134 | return; |
7114 | 7135 | ||
7115 | vmx_set_rvi(max_irr); | 7136 | /* |
7137 | * If a vmexit is needed, vmx_check_nested_events handles it. | ||
7138 | */ | ||
7139 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) | ||
7140 | return; | ||
7141 | |||
7142 | if (!is_guest_mode(vcpu)) { | ||
7143 | vmx_set_rvi(max_irr); | ||
7144 | return; | ||
7145 | } | ||
7146 | |||
7147 | /* | ||
7148 | * Fall back to pre-APICv interrupt injection since L2 | ||
7149 | * is run without virtual interrupt delivery. | ||
7150 | */ | ||
7151 | if (!kvm_event_needs_reinjection(vcpu) && | ||
7152 | vmx_interrupt_allowed(vcpu)) { | ||
7153 | kvm_queue_interrupt(vcpu, max_irr, false); | ||
7154 | vmx_inject_irq(vcpu); | ||
7155 | } | ||
7116 | } | 7156 | } |
7117 | 7157 | ||
7118 | static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) | 7158 | static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) |
@@ -7520,13 +7560,31 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
7520 | vmx_complete_interrupts(vmx); | 7560 | vmx_complete_interrupts(vmx); |
7521 | } | 7561 | } |
7522 | 7562 | ||
7563 | static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) | ||
7564 | { | ||
7565 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
7566 | int cpu; | ||
7567 | |||
7568 | if (vmx->loaded_vmcs == &vmx->vmcs01) | ||
7569 | return; | ||
7570 | |||
7571 | cpu = get_cpu(); | ||
7572 | vmx->loaded_vmcs = &vmx->vmcs01; | ||
7573 | vmx_vcpu_put(vcpu); | ||
7574 | vmx_vcpu_load(vcpu, cpu); | ||
7575 | vcpu->cpu = cpu; | ||
7576 | put_cpu(); | ||
7577 | } | ||
7578 | |||
7523 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | 7579 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) |
7524 | { | 7580 | { |
7525 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 7581 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
7526 | 7582 | ||
7527 | free_vpid(vmx); | 7583 | free_vpid(vmx); |
7528 | free_loaded_vmcs(vmx->loaded_vmcs); | 7584 | leave_guest_mode(vcpu); |
7585 | vmx_load_vmcs01(vcpu); | ||
7529 | free_nested(vmx); | 7586 | free_nested(vmx); |
7587 | free_loaded_vmcs(vmx->loaded_vmcs); | ||
7530 | kfree(vmx->guest_msrs); | 7588 | kfree(vmx->guest_msrs); |
7531 | kvm_vcpu_uninit(vcpu); | 7589 | kvm_vcpu_uninit(vcpu); |
7532 | kmem_cache_free(kvm_vcpu_cache, vmx); | 7590 | kmem_cache_free(kvm_vcpu_cache, vmx); |
@@ -7548,6 +7606,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
7548 | goto free_vcpu; | 7606 | goto free_vcpu; |
7549 | 7607 | ||
7550 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | 7608 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); |
7609 | BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) | ||
7610 | > PAGE_SIZE); | ||
7611 | |||
7551 | err = -ENOMEM; | 7612 | err = -ENOMEM; |
7552 | if (!vmx->guest_msrs) { | 7613 | if (!vmx->guest_msrs) { |
7553 | goto uninit_vcpu; | 7614 | goto uninit_vcpu; |
@@ -7836,7 +7897,13 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
7836 | vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); | 7897 | vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); |
7837 | vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); | 7898 | vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); |
7838 | 7899 | ||
7839 | vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); | 7900 | if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { |
7901 | kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); | ||
7902 | vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); | ||
7903 | } else { | ||
7904 | kvm_set_dr(vcpu, 7, vcpu->arch.dr7); | ||
7905 | vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); | ||
7906 | } | ||
7840 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 7907 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
7841 | vmcs12->vm_entry_intr_info_field); | 7908 | vmcs12->vm_entry_intr_info_field); |
7842 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, | 7909 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, |
@@ -7846,7 +7913,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
7846 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | 7913 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, |
7847 | vmcs12->guest_interruptibility_info); | 7914 | vmcs12->guest_interruptibility_info); |
7848 | vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); | 7915 | vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); |
7849 | kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); | ||
7850 | vmx_set_rflags(vcpu, vmcs12->guest_rflags); | 7916 | vmx_set_rflags(vcpu, vmcs12->guest_rflags); |
7851 | vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, | 7917 | vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, |
7852 | vmcs12->guest_pending_dbg_exceptions); | 7918 | vmcs12->guest_pending_dbg_exceptions); |
@@ -8113,14 +8179,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8113 | } | 8179 | } |
8114 | 8180 | ||
8115 | if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && | 8181 | if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && |
8116 | !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) { | 8182 | !PAGE_ALIGNED(vmcs12->msr_bitmap)) { |
8117 | /*TODO: Also verify bits beyond physical address width are 0*/ | 8183 | /*TODO: Also verify bits beyond physical address width are 0*/ |
8118 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 8184 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
8119 | return 1; | 8185 | return 1; |
8120 | } | 8186 | } |
8121 | 8187 | ||
8122 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && | 8188 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && |
8123 | !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) { | 8189 | !PAGE_ALIGNED(vmcs12->apic_access_addr)) { |
8124 | /*TODO: Also verify bits beyond physical address width are 0*/ | 8190 | /*TODO: Also verify bits beyond physical address width are 0*/ |
8125 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 8191 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
8126 | return 1; | 8192 | return 1; |
@@ -8136,15 +8202,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8136 | } | 8202 | } |
8137 | 8203 | ||
8138 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, | 8204 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, |
8139 | nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) || | 8205 | nested_vmx_true_procbased_ctls_low, |
8206 | nested_vmx_procbased_ctls_high) || | ||
8140 | !vmx_control_verify(vmcs12->secondary_vm_exec_control, | 8207 | !vmx_control_verify(vmcs12->secondary_vm_exec_control, |
8141 | nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) || | 8208 | nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) || |
8142 | !vmx_control_verify(vmcs12->pin_based_vm_exec_control, | 8209 | !vmx_control_verify(vmcs12->pin_based_vm_exec_control, |
8143 | nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) || | 8210 | nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) || |
8144 | !vmx_control_verify(vmcs12->vm_exit_controls, | 8211 | !vmx_control_verify(vmcs12->vm_exit_controls, |
8145 | nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) || | 8212 | nested_vmx_true_exit_ctls_low, |
8213 | nested_vmx_exit_ctls_high) || | ||
8146 | !vmx_control_verify(vmcs12->vm_entry_controls, | 8214 | !vmx_control_verify(vmcs12->vm_entry_controls, |
8147 | nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high)) | 8215 | nested_vmx_true_entry_ctls_low, |
8216 | nested_vmx_entry_ctls_high)) | ||
8148 | { | 8217 | { |
8149 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 8218 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
8150 | return 1; | 8219 | return 1; |
@@ -8221,6 +8290,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8221 | 8290 | ||
8222 | vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); | 8291 | vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); |
8223 | 8292 | ||
8293 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) | ||
8294 | vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); | ||
8295 | |||
8224 | cpu = get_cpu(); | 8296 | cpu = get_cpu(); |
8225 | vmx->loaded_vmcs = vmcs02; | 8297 | vmx->loaded_vmcs = vmcs02; |
8226 | vmx_vcpu_put(vcpu); | 8298 | vmx_vcpu_put(vcpu); |
@@ -8398,7 +8470,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
8398 | vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); | 8470 | vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); |
8399 | vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); | 8471 | vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); |
8400 | 8472 | ||
8401 | kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); | ||
8402 | vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); | 8473 | vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); |
8403 | vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); | 8474 | vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); |
8404 | vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); | 8475 | vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); |
@@ -8477,9 +8548,13 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
8477 | (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | | 8548 | (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | |
8478 | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); | 8549 | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); |
8479 | 8550 | ||
8551 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { | ||
8552 | kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); | ||
8553 | vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); | ||
8554 | } | ||
8555 | |||
8480 | /* TODO: These cannot have changed unless we have MSR bitmaps and | 8556 | /* TODO: These cannot have changed unless we have MSR bitmaps and |
8481 | * the relevant bit asks not to trap the change */ | 8557 | * the relevant bit asks not to trap the change */ |
8482 | vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); | ||
8483 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) | 8558 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) |
8484 | vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); | 8559 | vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); |
8485 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) | 8560 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) |
@@ -8670,7 +8745,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
8670 | unsigned long exit_qualification) | 8745 | unsigned long exit_qualification) |
8671 | { | 8746 | { |
8672 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 8747 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
8673 | int cpu; | ||
8674 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 8748 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
8675 | 8749 | ||
8676 | /* trying to cancel vmlaunch/vmresume is a bug */ | 8750 | /* trying to cancel vmlaunch/vmresume is a bug */ |
@@ -8695,12 +8769,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
8695 | vmcs12->vm_exit_intr_error_code, | 8769 | vmcs12->vm_exit_intr_error_code, |
8696 | KVM_ISA_VMX); | 8770 | KVM_ISA_VMX); |
8697 | 8771 | ||
8698 | cpu = get_cpu(); | 8772 | vmx_load_vmcs01(vcpu); |
8699 | vmx->loaded_vmcs = &vmx->vmcs01; | ||
8700 | vmx_vcpu_put(vcpu); | ||
8701 | vmx_vcpu_load(vcpu, cpu); | ||
8702 | vcpu->cpu = cpu; | ||
8703 | put_cpu(); | ||
8704 | 8773 | ||
8705 | vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); | 8774 | vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); |
8706 | vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); | 8775 | vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); |
@@ -8890,7 +8959,7 @@ static int __init vmx_init(void) | |||
8890 | 8959 | ||
8891 | rdmsrl_safe(MSR_EFER, &host_efer); | 8960 | rdmsrl_safe(MSR_EFER, &host_efer); |
8892 | 8961 | ||
8893 | for (i = 0; i < NR_VMX_MSR; ++i) | 8962 | for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) |
8894 | kvm_define_shared_msr(i, vmx_msr_index[i]); | 8963 | kvm_define_shared_msr(i, vmx_msr_index[i]); |
8895 | 8964 | ||
8896 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); | 8965 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ef432f891d30..b86d329b953a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -87,6 +87,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); | |||
87 | 87 | ||
88 | static void update_cr8_intercept(struct kvm_vcpu *vcpu); | 88 | static void update_cr8_intercept(struct kvm_vcpu *vcpu); |
89 | static void process_nmi(struct kvm_vcpu *vcpu); | 89 | static void process_nmi(struct kvm_vcpu *vcpu); |
90 | static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | ||
90 | 91 | ||
91 | struct kvm_x86_ops *kvm_x86_ops; | 92 | struct kvm_x86_ops *kvm_x86_ops; |
92 | EXPORT_SYMBOL_GPL(kvm_x86_ops); | 93 | EXPORT_SYMBOL_GPL(kvm_x86_ops); |
@@ -211,6 +212,7 @@ static void shared_msr_update(unsigned slot, u32 msr) | |||
211 | 212 | ||
212 | void kvm_define_shared_msr(unsigned slot, u32 msr) | 213 | void kvm_define_shared_msr(unsigned slot, u32 msr) |
213 | { | 214 | { |
215 | BUG_ON(slot >= KVM_NR_SHARED_MSRS); | ||
214 | if (slot >= shared_msrs_global.nr) | 216 | if (slot >= shared_msrs_global.nr) |
215 | shared_msrs_global.nr = slot + 1; | 217 | shared_msrs_global.nr = slot + 1; |
216 | shared_msrs_global.msrs[slot] = msr; | 218 | shared_msrs_global.msrs[slot] = msr; |
@@ -310,6 +312,31 @@ static int exception_class(int vector) | |||
310 | return EXCPT_BENIGN; | 312 | return EXCPT_BENIGN; |
311 | } | 313 | } |
312 | 314 | ||
315 | #define EXCPT_FAULT 0 | ||
316 | #define EXCPT_TRAP 1 | ||
317 | #define EXCPT_ABORT 2 | ||
318 | #define EXCPT_INTERRUPT 3 | ||
319 | |||
320 | static int exception_type(int vector) | ||
321 | { | ||
322 | unsigned int mask; | ||
323 | |||
324 | if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) | ||
325 | return EXCPT_INTERRUPT; | ||
326 | |||
327 | mask = 1 << vector; | ||
328 | |||
329 | /* #DB is trap, as instruction watchpoints are handled elsewhere */ | ||
330 | if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) | ||
331 | return EXCPT_TRAP; | ||
332 | |||
333 | if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) | ||
334 | return EXCPT_ABORT; | ||
335 | |||
336 | /* Reserved exceptions will result in fault */ | ||
337 | return EXCPT_FAULT; | ||
338 | } | ||
339 | |||
313 | static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | 340 | static void kvm_multiple_exception(struct kvm_vcpu *vcpu, |
314 | unsigned nr, bool has_error, u32 error_code, | 341 | unsigned nr, bool has_error, u32 error_code, |
315 | bool reinject) | 342 | bool reinject) |
@@ -758,6 +785,15 @@ static void kvm_update_dr7(struct kvm_vcpu *vcpu) | |||
758 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; | 785 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; |
759 | } | 786 | } |
760 | 787 | ||
788 | static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) | ||
789 | { | ||
790 | u64 fixed = DR6_FIXED_1; | ||
791 | |||
792 | if (!guest_cpuid_has_rtm(vcpu)) | ||
793 | fixed |= DR6_RTM; | ||
794 | return fixed; | ||
795 | } | ||
796 | |||
761 | static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | 797 | static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) |
762 | { | 798 | { |
763 | switch (dr) { | 799 | switch (dr) { |
@@ -773,7 +809,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | |||
773 | case 6: | 809 | case 6: |
774 | if (val & 0xffffffff00000000ULL) | 810 | if (val & 0xffffffff00000000ULL) |
775 | return -1; /* #GP */ | 811 | return -1; /* #GP */ |
776 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; | 812 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); |
777 | kvm_update_dr6(vcpu); | 813 | kvm_update_dr6(vcpu); |
778 | break; | 814 | break; |
779 | case 5: | 815 | case 5: |
@@ -1215,6 +1251,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1215 | unsigned long flags; | 1251 | unsigned long flags; |
1216 | s64 usdiff; | 1252 | s64 usdiff; |
1217 | bool matched; | 1253 | bool matched; |
1254 | bool already_matched; | ||
1218 | u64 data = msr->data; | 1255 | u64 data = msr->data; |
1219 | 1256 | ||
1220 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); | 1257 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); |
@@ -1279,6 +1316,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1279 | pr_debug("kvm: adjusted tsc offset by %llu\n", delta); | 1316 | pr_debug("kvm: adjusted tsc offset by %llu\n", delta); |
1280 | } | 1317 | } |
1281 | matched = true; | 1318 | matched = true; |
1319 | already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); | ||
1282 | } else { | 1320 | } else { |
1283 | /* | 1321 | /* |
1284 | * We split periods of matched TSC writes into generations. | 1322 | * We split periods of matched TSC writes into generations. |
@@ -1294,7 +1332,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1294 | kvm->arch.cur_tsc_write = data; | 1332 | kvm->arch.cur_tsc_write = data; |
1295 | kvm->arch.cur_tsc_offset = offset; | 1333 | kvm->arch.cur_tsc_offset = offset; |
1296 | matched = false; | 1334 | matched = false; |
1297 | pr_debug("kvm: new tsc generation %u, clock %llu\n", | 1335 | pr_debug("kvm: new tsc generation %llu, clock %llu\n", |
1298 | kvm->arch.cur_tsc_generation, data); | 1336 | kvm->arch.cur_tsc_generation, data); |
1299 | } | 1337 | } |
1300 | 1338 | ||
@@ -1319,10 +1357,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1319 | raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); | 1357 | raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); |
1320 | 1358 | ||
1321 | spin_lock(&kvm->arch.pvclock_gtod_sync_lock); | 1359 | spin_lock(&kvm->arch.pvclock_gtod_sync_lock); |
1322 | if (matched) | 1360 | if (!matched) { |
1323 | kvm->arch.nr_vcpus_matched_tsc++; | ||
1324 | else | ||
1325 | kvm->arch.nr_vcpus_matched_tsc = 0; | 1361 | kvm->arch.nr_vcpus_matched_tsc = 0; |
1362 | } else if (!already_matched) { | ||
1363 | kvm->arch.nr_vcpus_matched_tsc++; | ||
1364 | } | ||
1326 | 1365 | ||
1327 | kvm_track_tsc_matching(vcpu); | 1366 | kvm_track_tsc_matching(vcpu); |
1328 | spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); | 1367 | spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); |
@@ -2032,6 +2071,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2032 | data &= ~(u64)0x40; /* ignore flush filter disable */ | 2071 | data &= ~(u64)0x40; /* ignore flush filter disable */ |
2033 | data &= ~(u64)0x100; /* ignore ignne emulation enable */ | 2072 | data &= ~(u64)0x100; /* ignore ignne emulation enable */ |
2034 | data &= ~(u64)0x8; /* ignore TLB cache disable */ | 2073 | data &= ~(u64)0x8; /* ignore TLB cache disable */ |
2074 | data &= ~(u64)0x40000; /* ignore Mc status write enable */ | ||
2035 | if (data != 0) { | 2075 | if (data != 0) { |
2036 | vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", | 2076 | vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", |
2037 | data); | 2077 | data); |
@@ -2974,9 +3014,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
2974 | vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; | 3014 | vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; |
2975 | events->interrupt.nr = vcpu->arch.interrupt.nr; | 3015 | events->interrupt.nr = vcpu->arch.interrupt.nr; |
2976 | events->interrupt.soft = 0; | 3016 | events->interrupt.soft = 0; |
2977 | events->interrupt.shadow = | 3017 | events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); |
2978 | kvm_x86_ops->get_interrupt_shadow(vcpu, | ||
2979 | KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); | ||
2980 | 3018 | ||
2981 | events->nmi.injected = vcpu->arch.nmi_injected; | 3019 | events->nmi.injected = vcpu->arch.nmi_injected; |
2982 | events->nmi.pending = vcpu->arch.nmi_pending != 0; | 3020 | events->nmi.pending = vcpu->arch.nmi_pending != 0; |
@@ -4082,7 +4120,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, | |||
4082 | 4120 | ||
4083 | if (gpa == UNMAPPED_GVA) | 4121 | if (gpa == UNMAPPED_GVA) |
4084 | return X86EMUL_PROPAGATE_FAULT; | 4122 | return X86EMUL_PROPAGATE_FAULT; |
4085 | ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); | 4123 | ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data, |
4124 | offset, toread); | ||
4086 | if (ret < 0) { | 4125 | if (ret < 0) { |
4087 | r = X86EMUL_IO_NEEDED; | 4126 | r = X86EMUL_IO_NEEDED; |
4088 | goto out; | 4127 | goto out; |
@@ -4103,10 +4142,24 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, | |||
4103 | { | 4142 | { |
4104 | struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); | 4143 | struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); |
4105 | u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | 4144 | u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; |
4145 | unsigned offset; | ||
4146 | int ret; | ||
4106 | 4147 | ||
4107 | return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, | 4148 | /* Inline kvm_read_guest_virt_helper for speed. */ |
4108 | access | PFERR_FETCH_MASK, | 4149 | gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, |
4109 | exception); | 4150 | exception); |
4151 | if (unlikely(gpa == UNMAPPED_GVA)) | ||
4152 | return X86EMUL_PROPAGATE_FAULT; | ||
4153 | |||
4154 | offset = addr & (PAGE_SIZE-1); | ||
4155 | if (WARN_ON(offset + bytes > PAGE_SIZE)) | ||
4156 | bytes = (unsigned)PAGE_SIZE - offset; | ||
4157 | ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val, | ||
4158 | offset, bytes); | ||
4159 | if (unlikely(ret < 0)) | ||
4160 | return X86EMUL_IO_NEEDED; | ||
4161 | |||
4162 | return X86EMUL_CONTINUE; | ||
4110 | } | 4163 | } |
4111 | 4164 | ||
4112 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, | 4165 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, |
@@ -4730,7 +4783,6 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, | |||
4730 | if (desc->g) | 4783 | if (desc->g) |
4731 | var.limit = (var.limit << 12) | 0xfff; | 4784 | var.limit = (var.limit << 12) | 0xfff; |
4732 | var.type = desc->type; | 4785 | var.type = desc->type; |
4733 | var.present = desc->p; | ||
4734 | var.dpl = desc->dpl; | 4786 | var.dpl = desc->dpl; |
4735 | var.db = desc->d; | 4787 | var.db = desc->d; |
4736 | var.s = desc->s; | 4788 | var.s = desc->s; |
@@ -4762,6 +4814,12 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, | |||
4762 | return kvm_set_msr(emul_to_vcpu(ctxt), &msr); | 4814 | return kvm_set_msr(emul_to_vcpu(ctxt), &msr); |
4763 | } | 4815 | } |
4764 | 4816 | ||
4817 | static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, | ||
4818 | u32 pmc) | ||
4819 | { | ||
4820 | return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc); | ||
4821 | } | ||
4822 | |||
4765 | static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, | 4823 | static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, |
4766 | u32 pmc, u64 *pdata) | 4824 | u32 pmc, u64 *pdata) |
4767 | { | 4825 | { |
@@ -4838,6 +4896,7 @@ static const struct x86_emulate_ops emulate_ops = { | |||
4838 | .set_dr = emulator_set_dr, | 4896 | .set_dr = emulator_set_dr, |
4839 | .set_msr = emulator_set_msr, | 4897 | .set_msr = emulator_set_msr, |
4840 | .get_msr = emulator_get_msr, | 4898 | .get_msr = emulator_get_msr, |
4899 | .check_pmc = emulator_check_pmc, | ||
4841 | .read_pmc = emulator_read_pmc, | 4900 | .read_pmc = emulator_read_pmc, |
4842 | .halt = emulator_halt, | 4901 | .halt = emulator_halt, |
4843 | .wbinvd = emulator_wbinvd, | 4902 | .wbinvd = emulator_wbinvd, |
@@ -4850,7 +4909,7 @@ static const struct x86_emulate_ops emulate_ops = { | |||
4850 | 4909 | ||
4851 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) | 4910 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) |
4852 | { | 4911 | { |
4853 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); | 4912 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); |
4854 | /* | 4913 | /* |
4855 | * an sti; sti; sequence only disable interrupts for the first | 4914 | * an sti; sti; sequence only disable interrupts for the first |
4856 | * instruction. So, if the last instruction, be it emulated or | 4915 | * instruction. So, if the last instruction, be it emulated or |
@@ -4858,8 +4917,13 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) | |||
4858 | * means that the last instruction is an sti. We should not | 4917 | * means that the last instruction is an sti. We should not |
4859 | * leave the flag on in this case. The same goes for mov ss | 4918 | * leave the flag on in this case. The same goes for mov ss |
4860 | */ | 4919 | */ |
4861 | if (!(int_shadow & mask)) | 4920 | if (int_shadow & mask) |
4921 | mask = 0; | ||
4922 | if (unlikely(int_shadow || mask)) { | ||
4862 | kvm_x86_ops->set_interrupt_shadow(vcpu, mask); | 4923 | kvm_x86_ops->set_interrupt_shadow(vcpu, mask); |
4924 | if (!mask) | ||
4925 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
4926 | } | ||
4863 | } | 4927 | } |
4864 | 4928 | ||
4865 | static void inject_emulated_exception(struct kvm_vcpu *vcpu) | 4929 | static void inject_emulated_exception(struct kvm_vcpu *vcpu) |
@@ -4874,19 +4938,6 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) | |||
4874 | kvm_queue_exception(vcpu, ctxt->exception.vector); | 4938 | kvm_queue_exception(vcpu, ctxt->exception.vector); |
4875 | } | 4939 | } |
4876 | 4940 | ||
4877 | static void init_decode_cache(struct x86_emulate_ctxt *ctxt) | ||
4878 | { | ||
4879 | memset(&ctxt->opcode_len, 0, | ||
4880 | (void *)&ctxt->_regs - (void *)&ctxt->opcode_len); | ||
4881 | |||
4882 | ctxt->fetch.start = 0; | ||
4883 | ctxt->fetch.end = 0; | ||
4884 | ctxt->io_read.pos = 0; | ||
4885 | ctxt->io_read.end = 0; | ||
4886 | ctxt->mem_read.pos = 0; | ||
4887 | ctxt->mem_read.end = 0; | ||
4888 | } | ||
4889 | |||
4890 | static void init_emulate_ctxt(struct kvm_vcpu *vcpu) | 4941 | static void init_emulate_ctxt(struct kvm_vcpu *vcpu) |
4891 | { | 4942 | { |
4892 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; | 4943 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; |
@@ -5085,23 +5136,22 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, | |||
5085 | return dr6; | 5136 | return dr6; |
5086 | } | 5137 | } |
5087 | 5138 | ||
5088 | static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) | 5139 | static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) |
5089 | { | 5140 | { |
5090 | struct kvm_run *kvm_run = vcpu->run; | 5141 | struct kvm_run *kvm_run = vcpu->run; |
5091 | 5142 | ||
5092 | /* | 5143 | /* |
5093 | * Use the "raw" value to see if TF was passed to the processor. | 5144 | * rflags is the old, "raw" value of the flags. The new value has |
5094 | * Note that the new value of the flags has not been saved yet. | 5145 | * not been saved yet. |
5095 | * | 5146 | * |
5096 | * This is correct even for TF set by the guest, because "the | 5147 | * This is correct even for TF set by the guest, because "the |
5097 | * processor will not generate this exception after the instruction | 5148 | * processor will not generate this exception after the instruction |
5098 | * that sets the TF flag". | 5149 | * that sets the TF flag". |
5099 | */ | 5150 | */ |
5100 | unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); | ||
5101 | |||
5102 | if (unlikely(rflags & X86_EFLAGS_TF)) { | 5151 | if (unlikely(rflags & X86_EFLAGS_TF)) { |
5103 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | 5152 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { |
5104 | kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; | 5153 | kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | |
5154 | DR6_RTM; | ||
5105 | kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; | 5155 | kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; |
5106 | kvm_run->debug.arch.exception = DB_VECTOR; | 5156 | kvm_run->debug.arch.exception = DB_VECTOR; |
5107 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 5157 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
@@ -5114,7 +5164,7 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) | |||
5114 | * cleared by the processor". | 5164 | * cleared by the processor". |
5115 | */ | 5165 | */ |
5116 | vcpu->arch.dr6 &= ~15; | 5166 | vcpu->arch.dr6 &= ~15; |
5117 | vcpu->arch.dr6 |= DR6_BS; | 5167 | vcpu->arch.dr6 |= DR6_BS | DR6_RTM; |
5118 | kvm_queue_exception(vcpu, DB_VECTOR); | 5168 | kvm_queue_exception(vcpu, DB_VECTOR); |
5119 | } | 5169 | } |
5120 | } | 5170 | } |
@@ -5133,7 +5183,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) | |||
5133 | vcpu->arch.eff_db); | 5183 | vcpu->arch.eff_db); |
5134 | 5184 | ||
5135 | if (dr6 != 0) { | 5185 | if (dr6 != 0) { |
5136 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; | 5186 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; |
5137 | kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + | 5187 | kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + |
5138 | get_segment_base(vcpu, VCPU_SREG_CS); | 5188 | get_segment_base(vcpu, VCPU_SREG_CS); |
5139 | 5189 | ||
@@ -5144,14 +5194,15 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) | |||
5144 | } | 5194 | } |
5145 | } | 5195 | } |
5146 | 5196 | ||
5147 | if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) { | 5197 | if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && |
5198 | !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { | ||
5148 | dr6 = kvm_vcpu_check_hw_bp(eip, 0, | 5199 | dr6 = kvm_vcpu_check_hw_bp(eip, 0, |
5149 | vcpu->arch.dr7, | 5200 | vcpu->arch.dr7, |
5150 | vcpu->arch.db); | 5201 | vcpu->arch.db); |
5151 | 5202 | ||
5152 | if (dr6 != 0) { | 5203 | if (dr6 != 0) { |
5153 | vcpu->arch.dr6 &= ~15; | 5204 | vcpu->arch.dr6 &= ~15; |
5154 | vcpu->arch.dr6 |= dr6; | 5205 | vcpu->arch.dr6 |= dr6 | DR6_RTM; |
5155 | kvm_queue_exception(vcpu, DB_VECTOR); | 5206 | kvm_queue_exception(vcpu, DB_VECTOR); |
5156 | *r = EMULATE_DONE; | 5207 | *r = EMULATE_DONE; |
5157 | return true; | 5208 | return true; |
@@ -5215,6 +5266,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, | |||
5215 | 5266 | ||
5216 | if (emulation_type & EMULTYPE_SKIP) { | 5267 | if (emulation_type & EMULTYPE_SKIP) { |
5217 | kvm_rip_write(vcpu, ctxt->_eip); | 5268 | kvm_rip_write(vcpu, ctxt->_eip); |
5269 | if (ctxt->eflags & X86_EFLAGS_RF) | ||
5270 | kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); | ||
5218 | return EMULATE_DONE; | 5271 | return EMULATE_DONE; |
5219 | } | 5272 | } |
5220 | 5273 | ||
@@ -5265,13 +5318,22 @@ restart: | |||
5265 | r = EMULATE_DONE; | 5318 | r = EMULATE_DONE; |
5266 | 5319 | ||
5267 | if (writeback) { | 5320 | if (writeback) { |
5321 | unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); | ||
5268 | toggle_interruptibility(vcpu, ctxt->interruptibility); | 5322 | toggle_interruptibility(vcpu, ctxt->interruptibility); |
5269 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5270 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; | 5323 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; |
5271 | kvm_rip_write(vcpu, ctxt->eip); | 5324 | kvm_rip_write(vcpu, ctxt->eip); |
5272 | if (r == EMULATE_DONE) | 5325 | if (r == EMULATE_DONE) |
5273 | kvm_vcpu_check_singlestep(vcpu, &r); | 5326 | kvm_vcpu_check_singlestep(vcpu, rflags, &r); |
5274 | kvm_set_rflags(vcpu, ctxt->eflags); | 5327 | __kvm_set_rflags(vcpu, ctxt->eflags); |
5328 | |||
5329 | /* | ||
5330 | * For STI, interrupts are shadowed; so KVM_REQ_EVENT will | ||
5331 | * do nothing, and it will be requested again as soon as | ||
5332 | * the shadow expires. But we still need to check here, | ||
5333 | * because POPF has no interrupt shadow. | ||
5334 | */ | ||
5335 | if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) | ||
5336 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5275 | } else | 5337 | } else |
5276 | vcpu->arch.emulate_regs_need_sync_to_vcpu = true; | 5338 | vcpu->arch.emulate_regs_need_sync_to_vcpu = true; |
5277 | 5339 | ||
@@ -5662,7 +5724,6 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
5662 | u64 param, ingpa, outgpa, ret; | 5724 | u64 param, ingpa, outgpa, ret; |
5663 | uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; | 5725 | uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; |
5664 | bool fast, longmode; | 5726 | bool fast, longmode; |
5665 | int cs_db, cs_l; | ||
5666 | 5727 | ||
5667 | /* | 5728 | /* |
5668 | * hypercall generates UD from non zero cpl and real mode | 5729 | * hypercall generates UD from non zero cpl and real mode |
@@ -5673,8 +5734,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
5673 | return 0; | 5734 | return 0; |
5674 | } | 5735 | } |
5675 | 5736 | ||
5676 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 5737 | longmode = is_64_bit_mode(vcpu); |
5677 | longmode = is_long_mode(vcpu) && cs_l == 1; | ||
5678 | 5738 | ||
5679 | if (!longmode) { | 5739 | if (!longmode) { |
5680 | param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | | 5740 | param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | |
@@ -5739,7 +5799,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) | |||
5739 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | 5799 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) |
5740 | { | 5800 | { |
5741 | unsigned long nr, a0, a1, a2, a3, ret; | 5801 | unsigned long nr, a0, a1, a2, a3, ret; |
5742 | int r = 1; | 5802 | int op_64_bit, r = 1; |
5743 | 5803 | ||
5744 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) | 5804 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) |
5745 | return kvm_hv_hypercall(vcpu); | 5805 | return kvm_hv_hypercall(vcpu); |
@@ -5752,7 +5812,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
5752 | 5812 | ||
5753 | trace_kvm_hypercall(nr, a0, a1, a2, a3); | 5813 | trace_kvm_hypercall(nr, a0, a1, a2, a3); |
5754 | 5814 | ||
5755 | if (!is_long_mode(vcpu)) { | 5815 | op_64_bit = is_64_bit_mode(vcpu); |
5816 | if (!op_64_bit) { | ||
5756 | nr &= 0xFFFFFFFF; | 5817 | nr &= 0xFFFFFFFF; |
5757 | a0 &= 0xFFFFFFFF; | 5818 | a0 &= 0xFFFFFFFF; |
5758 | a1 &= 0xFFFFFFFF; | 5819 | a1 &= 0xFFFFFFFF; |
@@ -5778,6 +5839,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
5778 | break; | 5839 | break; |
5779 | } | 5840 | } |
5780 | out: | 5841 | out: |
5842 | if (!op_64_bit) | ||
5843 | ret = (u32)ret; | ||
5781 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); | 5844 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); |
5782 | ++vcpu->stat.hypercalls; | 5845 | ++vcpu->stat.hypercalls; |
5783 | return r; | 5846 | return r; |
@@ -5856,6 +5919,11 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) | |||
5856 | trace_kvm_inj_exception(vcpu->arch.exception.nr, | 5919 | trace_kvm_inj_exception(vcpu->arch.exception.nr, |
5857 | vcpu->arch.exception.has_error_code, | 5920 | vcpu->arch.exception.has_error_code, |
5858 | vcpu->arch.exception.error_code); | 5921 | vcpu->arch.exception.error_code); |
5922 | |||
5923 | if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) | ||
5924 | __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | | ||
5925 | X86_EFLAGS_RF); | ||
5926 | |||
5859 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, | 5927 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, |
5860 | vcpu->arch.exception.has_error_code, | 5928 | vcpu->arch.exception.has_error_code, |
5861 | vcpu->arch.exception.error_code, | 5929 | vcpu->arch.exception.error_code, |
@@ -6847,9 +6915,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu) | |||
6847 | atomic_set(&vcpu->arch.nmi_queued, 0); | 6915 | atomic_set(&vcpu->arch.nmi_queued, 0); |
6848 | vcpu->arch.nmi_pending = 0; | 6916 | vcpu->arch.nmi_pending = 0; |
6849 | vcpu->arch.nmi_injected = false; | 6917 | vcpu->arch.nmi_injected = false; |
6918 | kvm_clear_interrupt_queue(vcpu); | ||
6919 | kvm_clear_exception_queue(vcpu); | ||
6850 | 6920 | ||
6851 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); | 6921 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); |
6852 | vcpu->arch.dr6 = DR6_FIXED_1; | 6922 | vcpu->arch.dr6 = DR6_INIT; |
6853 | kvm_update_dr6(vcpu); | 6923 | kvm_update_dr6(vcpu); |
6854 | vcpu->arch.dr7 = DR7_FIXED_1; | 6924 | vcpu->arch.dr7 = DR7_FIXED_1; |
6855 | kvm_update_dr7(vcpu); | 6925 | kvm_update_dr7(vcpu); |
@@ -7405,12 +7475,17 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) | |||
7405 | } | 7475 | } |
7406 | EXPORT_SYMBOL_GPL(kvm_get_rflags); | 7476 | EXPORT_SYMBOL_GPL(kvm_get_rflags); |
7407 | 7477 | ||
7408 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 7478 | static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
7409 | { | 7479 | { |
7410 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && | 7480 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && |
7411 | kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) | 7481 | kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) |
7412 | rflags |= X86_EFLAGS_TF; | 7482 | rflags |= X86_EFLAGS_TF; |
7413 | kvm_x86_ops->set_rflags(vcpu, rflags); | 7483 | kvm_x86_ops->set_rflags(vcpu, rflags); |
7484 | } | ||
7485 | |||
7486 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | ||
7487 | { | ||
7488 | __kvm_set_rflags(vcpu, rflags); | ||
7414 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 7489 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
7415 | } | 7490 | } |
7416 | EXPORT_SYMBOL_GPL(kvm_set_rflags); | 7491 | EXPORT_SYMBOL_GPL(kvm_set_rflags); |
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 8c97bac9a895..306a1b77581f 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
@@ -47,6 +47,16 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) | |||
47 | #endif | 47 | #endif |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) | ||
51 | { | ||
52 | int cs_db, cs_l; | ||
53 | |||
54 | if (!is_long_mode(vcpu)) | ||
55 | return false; | ||
56 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | ||
57 | return cs_l; | ||
58 | } | ||
59 | |||
50 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) | 60 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) |
51 | { | 61 | { |
52 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | 62 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; |
@@ -108,6 +118,23 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) | |||
108 | return false; | 118 | return false; |
109 | } | 119 | } |
110 | 120 | ||
121 | static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, | ||
122 | enum kvm_reg reg) | ||
123 | { | ||
124 | unsigned long val = kvm_register_read(vcpu, reg); | ||
125 | |||
126 | return is_64_bit_mode(vcpu) ? val : (u32)val; | ||
127 | } | ||
128 | |||
129 | static inline void kvm_register_writel(struct kvm_vcpu *vcpu, | ||
130 | enum kvm_reg reg, | ||
131 | unsigned long val) | ||
132 | { | ||
133 | if (!is_64_bit_mode(vcpu)) | ||
134 | val = (u32)val; | ||
135 | return kvm_register_write(vcpu, reg, val); | ||
136 | } | ||
137 | |||
111 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); | 138 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
112 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | 139 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); |
113 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); | 140 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e11d8f170a62..9b744af871d7 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -399,13 +399,18 @@ struct kvm_vapic_addr { | |||
399 | __u64 vapic_addr; | 399 | __u64 vapic_addr; |
400 | }; | 400 | }; |
401 | 401 | ||
402 | /* for KVM_SET_MPSTATE */ | 402 | /* for KVM_SET_MP_STATE */ |
403 | 403 | ||
404 | /* not all states are valid on all architectures */ | ||
404 | #define KVM_MP_STATE_RUNNABLE 0 | 405 | #define KVM_MP_STATE_RUNNABLE 0 |
405 | #define KVM_MP_STATE_UNINITIALIZED 1 | 406 | #define KVM_MP_STATE_UNINITIALIZED 1 |
406 | #define KVM_MP_STATE_INIT_RECEIVED 2 | 407 | #define KVM_MP_STATE_INIT_RECEIVED 2 |
407 | #define KVM_MP_STATE_HALTED 3 | 408 | #define KVM_MP_STATE_HALTED 3 |
408 | #define KVM_MP_STATE_SIPI_RECEIVED 4 | 409 | #define KVM_MP_STATE_SIPI_RECEIVED 4 |
410 | #define KVM_MP_STATE_STOPPED 5 | ||
411 | #define KVM_MP_STATE_CHECK_STOP 6 | ||
412 | #define KVM_MP_STATE_OPERATING 7 | ||
413 | #define KVM_MP_STATE_LOAD 8 | ||
409 | 414 | ||
410 | struct kvm_mp_state { | 415 | struct kvm_mp_state { |
411 | __u32 mp_state; | 416 | __u32 mp_state; |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 2458a1dc2ba9..e8ce34c9db32 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -254,10 +254,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, | |||
254 | spin_lock(&ioapic->lock); | 254 | spin_lock(&ioapic->lock); |
255 | for (index = 0; index < IOAPIC_NUM_PINS; index++) { | 255 | for (index = 0; index < IOAPIC_NUM_PINS; index++) { |
256 | e = &ioapic->redirtbl[index]; | 256 | e = &ioapic->redirtbl[index]; |
257 | if (!e->fields.mask && | 257 | if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || |
258 | (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || | 258 | kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) || |
259 | kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, | 259 | index == RTC_GSI) { |
260 | index) || index == RTC_GSI)) { | ||
261 | if (kvm_apic_match_dest(vcpu, NULL, 0, | 260 | if (kvm_apic_match_dest(vcpu, NULL, 0, |
262 | e->fields.dest_id, e->fields.dest_mode)) { | 261 | e->fields.dest_id, e->fields.dest_mode)) { |
263 | __set_bit(e->fields.vector, | 262 | __set_bit(e->fields.vector, |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index ced4a542a031..a228ee82bad2 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -323,13 +323,13 @@ out: | |||
323 | 323 | ||
324 | #define IOAPIC_ROUTING_ENTRY(irq) \ | 324 | #define IOAPIC_ROUTING_ENTRY(irq) \ |
325 | { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ | 325 | { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ |
326 | .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } | 326 | .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } } |
327 | #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) | 327 | #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) |
328 | 328 | ||
329 | #ifdef CONFIG_X86 | 329 | #ifdef CONFIG_X86 |
330 | # define PIC_ROUTING_ENTRY(irq) \ | 330 | # define PIC_ROUTING_ENTRY(irq) \ |
331 | { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ | 331 | { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ |
332 | .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 } | 332 | .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } } |
333 | # define ROUTING_ENTRY2(irq) \ | 333 | # define ROUTING_ENTRY2(irq) \ |
334 | IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) | 334 | IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) |
335 | #else | 335 | #else |