diff options
Diffstat (limited to 'arch/x86/kvm/mmu.h')
-rw-r--r-- | arch/x86/kvm/mmu.h | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 7086ca85d3e7..e374db9af021 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h | |||
@@ -49,6 +49,8 @@ | |||
49 | #define PFERR_FETCH_MASK (1U << 4) | 49 | #define PFERR_FETCH_MASK (1U << 4) |
50 | 50 | ||
51 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); | 51 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); |
52 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); | ||
53 | int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); | ||
52 | int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); | 54 | int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); |
53 | 55 | ||
54 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) | 56 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) |
@@ -76,4 +78,27 @@ static inline int is_present_gpte(unsigned long pte) | |||
76 | return pte & PT_PRESENT_MASK; | 78 | return pte & PT_PRESENT_MASK; |
77 | } | 79 | } |
78 | 80 | ||
81 | static inline int is_writable_pte(unsigned long pte) | ||
82 | { | ||
83 | return pte & PT_WRITABLE_MASK; | ||
84 | } | ||
85 | |||
86 | static inline bool is_write_protection(struct kvm_vcpu *vcpu) | ||
87 | { | ||
88 | return kvm_read_cr0_bits(vcpu, X86_CR0_WP); | ||
89 | } | ||
90 | |||
91 | static inline bool check_write_user_access(struct kvm_vcpu *vcpu, | ||
92 | bool write_fault, bool user_fault, | ||
93 | unsigned long pte) | ||
94 | { | ||
95 | if (unlikely(write_fault && !is_writable_pte(pte) | ||
96 | && (user_fault || is_write_protection(vcpu)))) | ||
97 | return false; | ||
98 | |||
99 | if (unlikely(user_fault && !(pte & PT_USER_MASK))) | ||
100 | return false; | ||
101 | |||
102 | return true; | ||
103 | } | ||
79 | #endif | 104 | #endif |