aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2008-12-25 07:39:47 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:02:52 -0400
commit2d11123a77e54d5cea262c958e8498f4a08bce3d (patch)
tree4068b8f6c35f424c32ad0c160d8719d8463bfe28 /arch/x86/kvm/mmu.c
parent971cc3dcbc0e020b82f568e61a47b72be03307dd (diff)
KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow()
Using a for_each loop style removes the need to write callback and nasty casts. Implement the walk_shadow() using the for_each_shadow_entry(). Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c69
1 files changed, 49 insertions, 20 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5f03ec324e35..c669f2af1d12 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -150,6 +150,20 @@ struct kvm_shadow_walk {
150 u64 addr, u64 *spte, int level); 150 u64 addr, u64 *spte, int level);
151}; 151};
152 152
153struct kvm_shadow_walk_iterator {
154 u64 addr;
155 hpa_t shadow_addr;
156 int level;
157 u64 *sptep;
158 unsigned index;
159};
160
161#define for_each_shadow_entry(_vcpu, _addr, _walker) \
162 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
163 shadow_walk_okay(&(_walker)); \
164 shadow_walk_next(&(_walker)))
165
166
153struct kvm_unsync_walk { 167struct kvm_unsync_walk {
154 int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); 168 int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
155}; 169};
@@ -1254,33 +1268,48 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1254 return sp; 1268 return sp;
1255} 1269}
1256 1270
1271static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1272 struct kvm_vcpu *vcpu, u64 addr)
1273{
1274 iterator->addr = addr;
1275 iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1276 iterator->level = vcpu->arch.mmu.shadow_root_level;
1277 if (iterator->level == PT32E_ROOT_LEVEL) {
1278 iterator->shadow_addr
1279 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1280 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1281 --iterator->level;
1282 if (!iterator->shadow_addr)
1283 iterator->level = 0;
1284 }
1285}
1286
1287static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1288{
1289 if (iterator->level < PT_PAGE_TABLE_LEVEL)
1290 return false;
1291 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1292 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1293 return true;
1294}
1295
1296static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1297{
1298 iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1299 --iterator->level;
1300}
1301
1257static int walk_shadow(struct kvm_shadow_walk *walker, 1302static int walk_shadow(struct kvm_shadow_walk *walker,
1258 struct kvm_vcpu *vcpu, u64 addr) 1303 struct kvm_vcpu *vcpu, u64 addr)
1259{ 1304{
1260 hpa_t shadow_addr; 1305 struct kvm_shadow_walk_iterator iterator;
1261 int level;
1262 int r; 1306 int r;
1263 u64 *sptep;
1264 unsigned index;
1265
1266 shadow_addr = vcpu->arch.mmu.root_hpa;
1267 level = vcpu->arch.mmu.shadow_root_level;
1268 if (level == PT32E_ROOT_LEVEL) {
1269 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1270 shadow_addr &= PT64_BASE_ADDR_MASK;
1271 if (!shadow_addr)
1272 return 1;
1273 --level;
1274 }
1275 1307
1276 while (level >= PT_PAGE_TABLE_LEVEL) { 1308 for_each_shadow_entry(vcpu, addr, iterator) {
1277 index = SHADOW_PT_INDEX(addr, level); 1309 r = walker->entry(walker, vcpu, addr,
1278 sptep = ((u64 *)__va(shadow_addr)) + index; 1310 iterator.sptep, iterator.level);
1279 r = walker->entry(walker, vcpu, addr, sptep, level);
1280 if (r) 1311 if (r)
1281 return r; 1312 return r;
1282 shadow_addr = *sptep & PT64_BASE_ADDR_MASK;
1283 --level;
1284 } 1313 }
1285 return 0; 1314 return 0;
1286} 1315}