aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.h')
-rw-r--r--arch/x86/kvm/mmu.h44
1 files changed, 36 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 292615274358..3842e70bdb7c 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,11 +44,17 @@
44#define PT_DIRECTORY_LEVEL 2 44#define PT_DIRECTORY_LEVEL 2
45#define PT_PAGE_TABLE_LEVEL 1 45#define PT_PAGE_TABLE_LEVEL 1
46 46
47#define PFERR_PRESENT_MASK (1U << 0) 47#define PFERR_PRESENT_BIT 0
48#define PFERR_WRITE_MASK (1U << 1) 48#define PFERR_WRITE_BIT 1
49#define PFERR_USER_MASK (1U << 2) 49#define PFERR_USER_BIT 2
50#define PFERR_RSVD_MASK (1U << 3) 50#define PFERR_RSVD_BIT 3
51#define PFERR_FETCH_MASK (1U << 4) 51#define PFERR_FETCH_BIT 4
52
53#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
54#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
55#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
56#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
57#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
52 58
53int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 59int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
54void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); 60void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
@@ -73,6 +79,8 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
73void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 79void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
74void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, 80void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
75 bool execonly); 81 bool execonly);
82void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
83 bool ept);
76 84
77static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 85static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
78{ 86{
@@ -110,10 +118,30 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
110 * Will a fault with a given page-fault error code (pfec) cause a permission 118 * Will a fault with a given page-fault error code (pfec) cause a permission
111 * fault with the given access (in ACC_* format)? 119 * fault with the given access (in ACC_* format)?
112 */ 120 */
113static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access, 121static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
114 unsigned pfec) 122 unsigned pte_access, unsigned pfec)
115{ 123{
116 return (mmu->permissions[pfec >> 1] >> pte_access) & 1; 124 int cpl = kvm_x86_ops->get_cpl(vcpu);
125 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
126
127 /*
128 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
129 *
130 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
131 * (these are implicit supervisor accesses) regardless of the value
132 * of EFLAGS.AC.
133 *
134 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
135 * the result in X86_EFLAGS_AC. We then insert it in place of
136 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
137 * but it will be one in index if SMAP checks are being overridden.
138 * It is important to keep this branchless.
139 */
140 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
141 int index = (pfec >> 1) +
142 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
143
144 return (mmu->permissions[index] >> pte_access) & 1;
117} 145}
118 146
119void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); 147void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);