aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorMohammed Gamal <m.gamal005@gmail.com>2008-08-17 09:38:32 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:20 -0400
commit648dfaa7df2d3692db4e63dcb18dccb275d9c5a7 (patch)
treec7e52aa54420df759d3ae2dcdf0fc239f831024f /arch/x86/kvm/vmx.c
parent6762b7299aa115e11815decd1fd982d015f09615 (diff)
KVM: VMX: Add Guest State Validity Checks
This patch adds functions to check whether guest state is VMX compliant. Signed-off-by: Mohammed Gamal <m.gamal005@gmail.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c180
1 files changed, 180 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 81db7d48ab80..e889b768c751 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1721,6 +1721,186 @@ static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1721 vmcs_writel(GUEST_GDTR_BASE, dt->base); 1721 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1722} 1722}
1723 1723
1724static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
1725{
1726 struct kvm_segment var;
1727 u32 ar;
1728
1729 vmx_get_segment(vcpu, &var, seg);
1730 ar = vmx_segment_access_rights(&var);
1731
1732 if (var.base != (var.selector << 4))
1733 return false;
1734 if (var.limit != 0xffff)
1735 return false;
1736 if (ar != 0xf3)
1737 return false;
1738
1739 return true;
1740}
1741
1742static bool code_segment_valid(struct kvm_vcpu *vcpu)
1743{
1744 struct kvm_segment cs;
1745 unsigned int cs_rpl;
1746
1747 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1748 cs_rpl = cs.selector & SELECTOR_RPL_MASK;
1749
1750 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
1751 return false;
1752 if (!cs.s)
1753 return false;
1754 if (!(~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK))) {
1755 if (cs.dpl > cs_rpl)
1756 return false;
1757 } else if (cs.type & AR_TYPE_CODE_MASK) {
1758 if (cs.dpl != cs_rpl)
1759 return false;
1760 }
1761 if (!cs.present)
1762 return false;
1763
1764 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
1765 return true;
1766}
1767
1768static bool stack_segment_valid(struct kvm_vcpu *vcpu)
1769{
1770 struct kvm_segment ss;
1771 unsigned int ss_rpl;
1772
1773 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1774 ss_rpl = ss.selector & SELECTOR_RPL_MASK;
1775
1776 if ((ss.type != 3) || (ss.type != 7))
1777 return false;
1778 if (!ss.s)
1779 return false;
1780 if (ss.dpl != ss_rpl) /* DPL != RPL */
1781 return false;
1782 if (!ss.present)
1783 return false;
1784
1785 return true;
1786}
1787
1788static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
1789{
1790 struct kvm_segment var;
1791 unsigned int rpl;
1792
1793 vmx_get_segment(vcpu, &var, seg);
1794 rpl = var.selector & SELECTOR_RPL_MASK;
1795
1796 if (!var.s)
1797 return false;
1798 if (!var.present)
1799 return false;
1800 if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
1801 if (var.dpl < rpl) /* DPL < RPL */
1802 return false;
1803 }
1804
1805 /* TODO: Add other members to kvm_segment_field to allow checking for other access
1806 * rights flags
1807 */
1808 return true;
1809}
1810
1811static bool tr_valid(struct kvm_vcpu *vcpu)
1812{
1813 struct kvm_segment tr;
1814
1815 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
1816
1817 if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
1818 return false;
1819 if ((tr.type != 3) || (tr.type != 11)) /* TODO: Check if guest is in IA32e mode */
1820 return false;
1821 if (!tr.present)
1822 return false;
1823
1824 return true;
1825}
1826
1827static bool ldtr_valid(struct kvm_vcpu *vcpu)
1828{
1829 struct kvm_segment ldtr;
1830
1831 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
1832
1833 if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
1834 return false;
1835 if (ldtr.type != 2)
1836 return false;
1837 if (!ldtr.present)
1838 return false;
1839
1840 return true;
1841}
1842
1843static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
1844{
1845 struct kvm_segment cs, ss;
1846
1847 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1848 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1849
1850 return ((cs.selector & SELECTOR_RPL_MASK) ==
1851 (ss.selector & SELECTOR_RPL_MASK));
1852}
1853
1854/*
1855 * Check if guest state is valid. Returns true if valid, false if
1856 * not.
1857 * We assume that registers are always usable
1858 */
1859static bool guest_state_valid(struct kvm_vcpu *vcpu)
1860{
1861 /* real mode guest state checks */
1862 if (!(vcpu->arch.cr0 & X86_CR0_PE)) {
1863 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
1864 return false;
1865 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
1866 return false;
1867 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
1868 return false;
1869 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
1870 return false;
1871 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
1872 return false;
1873 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
1874 return false;
1875 } else {
1876 /* protected mode guest state checks */
1877 if (!cs_ss_rpl_check(vcpu))
1878 return false;
1879 if (!code_segment_valid(vcpu))
1880 return false;
1881 if (!stack_segment_valid(vcpu))
1882 return false;
1883 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
1884 return false;
1885 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
1886 return false;
1887 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
1888 return false;
1889 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
1890 return false;
1891 if (!tr_valid(vcpu))
1892 return false;
1893 if (!ldtr_valid(vcpu))
1894 return false;
1895 }
1896 /* TODO:
1897 * - Add checks on RIP
1898 * - Add checks on RFLAGS
1899 */
1900
1901 return true;
1902}
1903
1724static int init_rmode_tss(struct kvm *kvm) 1904static int init_rmode_tss(struct kvm *kvm)
1725{ 1905{
1726 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; 1906 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;