diff options
author | Tiejun Chen <tiejun.chen@intel.com> | 2014-09-01 06:44:04 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-09-03 04:04:10 -0400 |
commit | d143148383d0395539073dd6c2f25ddf6656bdcc (patch) | |
tree | 122881b4567d4a8279cb27793ce5542b976268df | |
parent | 56f17dd3fbc44adcdbc3340fe3988ddb833a47a7 (diff) |
KVM: mmio: cleanup kvm_set_mmio_spte_mask
Just reuse rsvd_bits() inside kvm_set_mmio_spte_mask()
for slightly better code.
Signed-off-by: Tiejun Chen <tiejun.chen@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.h | 5 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 |
3 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1cd2a5fbde07..6b6df0c5be3d 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -295,11 +295,6 @@ static bool check_mmio_spte(struct kvm *kvm, u64 spte) | |||
295 | return likely(kvm_gen == spte_gen); | 295 | return likely(kvm_gen == spte_gen); |
296 | } | 296 | } |
297 | 297 | ||
298 | static inline u64 rsvd_bits(int s, int e) | ||
299 | { | ||
300 | return ((1ULL << (e - s + 1)) - 1) << s; | ||
301 | } | ||
302 | |||
303 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 298 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
304 | u64 dirty_mask, u64 nx_mask, u64 x_mask) | 299 | u64 dirty_mask, u64 nx_mask, u64 x_mask) |
305 | { | 300 | { |
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index b982112d2ca5..bde8ee725754 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h | |||
@@ -56,6 +56,11 @@ | |||
56 | #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) | 56 | #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) |
57 | #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) | 57 | #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) |
58 | 58 | ||
59 | static inline u64 rsvd_bits(int s, int e) | ||
60 | { | ||
61 | return ((1ULL << (e - s + 1)) - 1) << s; | ||
62 | } | ||
63 | |||
59 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); | 64 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); |
60 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); | 65 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); |
61 | 66 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 916e89515210..e4ed85e07a01 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5569,7 +5569,7 @@ static void kvm_set_mmio_spte_mask(void) | |||
5569 | * entry to generate page fault with PFER.RSV = 1. | 5569 | * entry to generate page fault with PFER.RSV = 1. |
5570 | */ | 5570 | */ |
5571 | /* Mask the reserved physical address bits. */ | 5571 | /* Mask the reserved physical address bits. */ |
5572 | mask = ((1ull << (51 - maxphyaddr + 1)) - 1) << maxphyaddr; | 5572 | mask = rsvd_bits(maxphyaddr, 51); |
5573 | 5573 | ||
5574 | /* Bit 62 is always reserved for 32bit host. */ | 5574 | /* Bit 62 is always reserved for 32bit host. */ |
5575 | mask |= 0x3ull << 62; | 5575 | mask |= 0x3ull << 62; |