aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-09-12 13:46:56 -0400
committerAvi Kivity <avi@redhat.com>2012-09-20 06:00:09 -0400
commit6fd01b711bee96ce3356f7b6f370ab708e37504b (patch)
treec277c3daf5ba0ec856e582025cbc27ccab4e6ec9 /arch/x86
parent13d22b6aebb000aeaf137862c6c0e0c4d138d798 (diff)
KVM: MMU: Optimize is_last_gpte()
Instead of branchy code depending on level, gpte.ps, and mmu configuration, prepare everything in a bitmap during mode changes and look it up during runtime. Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/kvm/mmu.c31
-rw-r--r--arch/x86/kvm/mmu.h3
-rw-r--r--arch/x86/kvm/paging_tmpl.h20
4 files changed, 41 insertions, 20 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3318bde206a5..43aeb9422839 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -298,6 +298,13 @@ struct kvm_mmu {
298 u64 *lm_root; 298 u64 *lm_root;
299 u64 rsvd_bits_mask[2][4]; 299 u64 rsvd_bits_mask[2][4];
300 300
301 /*
302 * Bitmap: bit set = last pte in walk
303 * index[0:1]: level (zero-based)
304 * index[2]: pte.ps
305 */
306 u8 last_pte_bitmap;
307
301 bool nx; 308 bool nx;
302 309
303 u64 pdptrs[4]; /* pae */ 310 u64 pdptrs[4]; /* pae */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9c6188931f87..d289fee1ffb8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3447,6 +3447,15 @@ static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
3447 return access; 3447 return access;
3448} 3448}
3449 3449
3450static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
3451{
3452 unsigned index;
3453
3454 index = level - 1;
3455 index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2);
3456 return mmu->last_pte_bitmap & (1 << index);
3457}
3458
3450#define PTTYPE 64 3459#define PTTYPE 64
3451#include "paging_tmpl.h" 3460#include "paging_tmpl.h"
3452#undef PTTYPE 3461#undef PTTYPE
@@ -3548,6 +3557,24 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu
3548 } 3557 }
3549} 3558}
3550 3559
3560static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
3561{
3562 u8 map;
3563 unsigned level, root_level = mmu->root_level;
3564 const unsigned ps_set_index = 1 << 2; /* bit 2 of index: ps */
3565
3566 if (root_level == PT32E_ROOT_LEVEL)
3567 --root_level;
3568 /* PT_PAGE_TABLE_LEVEL always terminates */
3569 map = 1 | (1 << ps_set_index);
3570 for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) {
3571 if (level <= PT_PDPE_LEVEL
3572 && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu)))
3573 map |= 1 << (ps_set_index | (level - 1));
3574 }
3575 mmu->last_pte_bitmap = map;
3576}
3577
3551static int paging64_init_context_common(struct kvm_vcpu *vcpu, 3578static int paging64_init_context_common(struct kvm_vcpu *vcpu,
3552 struct kvm_mmu *context, 3579 struct kvm_mmu *context,
3553 int level) 3580 int level)
@@ -3557,6 +3584,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
3557 3584
3558 reset_rsvds_bits_mask(vcpu, context); 3585 reset_rsvds_bits_mask(vcpu, context);
3559 update_permission_bitmask(vcpu, context); 3586 update_permission_bitmask(vcpu, context);
3587 update_last_pte_bitmap(vcpu, context);
3560 3588
3561 ASSERT(is_pae(vcpu)); 3589 ASSERT(is_pae(vcpu));
3562 context->new_cr3 = paging_new_cr3; 3590 context->new_cr3 = paging_new_cr3;
@@ -3586,6 +3614,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
3586 3614
3587 reset_rsvds_bits_mask(vcpu, context); 3615 reset_rsvds_bits_mask(vcpu, context);
3588 update_permission_bitmask(vcpu, context); 3616 update_permission_bitmask(vcpu, context);
3617 update_last_pte_bitmap(vcpu, context);
3589 3618
3590 context->new_cr3 = paging_new_cr3; 3619 context->new_cr3 = paging_new_cr3;
3591 context->page_fault = paging32_page_fault; 3620 context->page_fault = paging32_page_fault;
@@ -3647,6 +3676,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
3647 } 3676 }
3648 3677
3649 update_permission_bitmask(vcpu, context); 3678 update_permission_bitmask(vcpu, context);
3679 update_last_pte_bitmap(vcpu, context);
3650 3680
3651 return 0; 3681 return 0;
3652} 3682}
@@ -3724,6 +3754,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
3724 } 3754 }
3725 3755
3726 update_permission_bitmask(vcpu, g_context); 3756 update_permission_bitmask(vcpu, g_context);
3757 update_last_pte_bitmap(vcpu, g_context);
3727 3758
3728 return 0; 3759 return 0;
3729} 3760}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 584660775d08..69871080e866 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -20,7 +20,8 @@
20#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) 20#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
21#define PT_DIRTY_SHIFT 6 21#define PT_DIRTY_SHIFT 6
22#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) 22#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
23#define PT_PAGE_SIZE_MASK (1ULL << 7) 23#define PT_PAGE_SIZE_SHIFT 7
24#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
24#define PT_PAT_MASK (1ULL << 7) 25#define PT_PAT_MASK (1ULL << 7)
25#define PT_GLOBAL_MASK (1ULL << 8) 26#define PT_GLOBAL_MASK (1ULL << 8)
26#define PT64_NX_SHIFT 63 27#define PT64_NX_SHIFT 63
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1b4c14d235a0..134ea7b1c585 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -103,24 +103,6 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
103 return (ret != orig_pte); 103 return (ret != orig_pte);
104} 104}
105 105
106static bool FNAME(is_last_gpte)(struct guest_walker *walker,
107 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
108 pt_element_t gpte)
109{
110 if (walker->level == PT_PAGE_TABLE_LEVEL)
111 return true;
112
113 if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
114 (PTTYPE == 64 || is_pse(vcpu)))
115 return true;
116
117 if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
118 (mmu->root_level == PT64_ROOT_LEVEL))
119 return true;
120
121 return false;
122}
123
124static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, 106static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
125 struct kvm_mmu *mmu, 107 struct kvm_mmu *mmu,
126 struct guest_walker *walker, 108 struct guest_walker *walker,
@@ -247,7 +229,7 @@ retry_walk:
247 pte_access = pt_access & gpte_access(vcpu, pte); 229 pte_access = pt_access & gpte_access(vcpu, pte);
248 230
249 walker->ptes[walker->level - 1] = pte; 231 walker->ptes[walker->level - 1] = pte;
250 } while (!FNAME(is_last_gpte)(walker, vcpu, mmu, pte)); 232 } while (!is_last_gpte(mmu, walker->level, pte));
251 233
252 eperm |= permission_fault(mmu, pte_access, access); 234 eperm |= permission_fault(mmu, pte_access, access);
253 if (unlikely(eperm)) { 235 if (unlikely(eperm)) {