diff options
-rw-r--r-- | arch/x86/kvm/mmu.c | 69 |
1 files changed, 49 insertions, 20 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5f03ec324e35..c669f2af1d12 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -150,6 +150,20 @@ struct kvm_shadow_walk { | |||
150 | u64 addr, u64 *spte, int level); | 150 | u64 addr, u64 *spte, int level); |
151 | }; | 151 | }; |
152 | 152 | ||
153 | struct kvm_shadow_walk_iterator { | ||
154 | u64 addr; | ||
155 | hpa_t shadow_addr; | ||
156 | int level; | ||
157 | u64 *sptep; | ||
158 | unsigned index; | ||
159 | }; | ||
160 | |||
161 | #define for_each_shadow_entry(_vcpu, _addr, _walker) \ | ||
162 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ | ||
163 | shadow_walk_okay(&(_walker)); \ | ||
164 | shadow_walk_next(&(_walker))) | ||
165 | |||
166 | |||
153 | struct kvm_unsync_walk { | 167 | struct kvm_unsync_walk { |
154 | int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); | 168 | int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); |
155 | }; | 169 | }; |
@@ -1254,33 +1268,48 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1254 | return sp; | 1268 | return sp; |
1255 | } | 1269 | } |
1256 | 1270 | ||
1271 | static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, | ||
1272 | struct kvm_vcpu *vcpu, u64 addr) | ||
1273 | { | ||
1274 | iterator->addr = addr; | ||
1275 | iterator->shadow_addr = vcpu->arch.mmu.root_hpa; | ||
1276 | iterator->level = vcpu->arch.mmu.shadow_root_level; | ||
1277 | if (iterator->level == PT32E_ROOT_LEVEL) { | ||
1278 | iterator->shadow_addr | ||
1279 | = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; | ||
1280 | iterator->shadow_addr &= PT64_BASE_ADDR_MASK; | ||
1281 | --iterator->level; | ||
1282 | if (!iterator->shadow_addr) | ||
1283 | iterator->level = 0; | ||
1284 | } | ||
1285 | } | ||
1286 | |||
1287 | static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) | ||
1288 | { | ||
1289 | if (iterator->level < PT_PAGE_TABLE_LEVEL) | ||
1290 | return false; | ||
1291 | iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); | ||
1292 | iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; | ||
1293 | return true; | ||
1294 | } | ||
1295 | |||
1296 | static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) | ||
1297 | { | ||
1298 | iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK; | ||
1299 | --iterator->level; | ||
1300 | } | ||
1301 | |||
1257 | static int walk_shadow(struct kvm_shadow_walk *walker, | 1302 | static int walk_shadow(struct kvm_shadow_walk *walker, |
1258 | struct kvm_vcpu *vcpu, u64 addr) | 1303 | struct kvm_vcpu *vcpu, u64 addr) |
1259 | { | 1304 | { |
1260 | hpa_t shadow_addr; | 1305 | struct kvm_shadow_walk_iterator iterator; |
1261 | int level; | ||
1262 | int r; | 1306 | int r; |
1263 | u64 *sptep; | ||
1264 | unsigned index; | ||
1265 | |||
1266 | shadow_addr = vcpu->arch.mmu.root_hpa; | ||
1267 | level = vcpu->arch.mmu.shadow_root_level; | ||
1268 | if (level == PT32E_ROOT_LEVEL) { | ||
1269 | shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; | ||
1270 | shadow_addr &= PT64_BASE_ADDR_MASK; | ||
1271 | if (!shadow_addr) | ||
1272 | return 1; | ||
1273 | --level; | ||
1274 | } | ||
1275 | 1307 | ||
1276 | while (level >= PT_PAGE_TABLE_LEVEL) { | 1308 | for_each_shadow_entry(vcpu, addr, iterator) { |
1277 | index = SHADOW_PT_INDEX(addr, level); | 1309 | r = walker->entry(walker, vcpu, addr, |
1278 | sptep = ((u64 *)__va(shadow_addr)) + index; | 1310 | iterator.sptep, iterator.level); |
1279 | r = walker->entry(walker, vcpu, addr, sptep, level); | ||
1280 | if (r) | 1311 | if (r) |
1281 | return r; | 1312 | return r; |
1282 | shadow_addr = *sptep & PT64_BASE_ADDR_MASK; | ||
1283 | --level; | ||
1284 | } | 1313 | } |
1285 | return 0; | 1314 | return 0; |
1286 | } | 1315 | } |