diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 195 |
1 files changed, 195 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b18e65ce3683..c438224cca34 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1215,6 +1215,60 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, | |||
1215 | return flush; | 1215 | return flush; |
1216 | } | 1216 | } |
1217 | 1217 | ||
1218 | static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep) | ||
1219 | { | ||
1220 | u64 spte = *sptep; | ||
1221 | |||
1222 | rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); | ||
1223 | |||
1224 | spte &= ~shadow_dirty_mask; | ||
1225 | |||
1226 | return mmu_spte_update(sptep, spte); | ||
1227 | } | ||
1228 | |||
1229 | static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp) | ||
1230 | { | ||
1231 | u64 *sptep; | ||
1232 | struct rmap_iterator iter; | ||
1233 | bool flush = false; | ||
1234 | |||
1235 | for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { | ||
1236 | BUG_ON(!(*sptep & PT_PRESENT_MASK)); | ||
1237 | |||
1238 | flush |= spte_clear_dirty(kvm, sptep); | ||
1239 | sptep = rmap_get_next(&iter); | ||
1240 | } | ||
1241 | |||
1242 | return flush; | ||
1243 | } | ||
1244 | |||
1245 | static bool spte_set_dirty(struct kvm *kvm, u64 *sptep) | ||
1246 | { | ||
1247 | u64 spte = *sptep; | ||
1248 | |||
1249 | rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); | ||
1250 | |||
1251 | spte |= shadow_dirty_mask; | ||
1252 | |||
1253 | return mmu_spte_update(sptep, spte); | ||
1254 | } | ||
1255 | |||
1256 | static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp) | ||
1257 | { | ||
1258 | u64 *sptep; | ||
1259 | struct rmap_iterator iter; | ||
1260 | bool flush = false; | ||
1261 | |||
1262 | for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { | ||
1263 | BUG_ON(!(*sptep & PT_PRESENT_MASK)); | ||
1264 | |||
1265 | flush |= spte_set_dirty(kvm, sptep); | ||
1266 | sptep = rmap_get_next(&iter); | ||
1267 | } | ||
1268 | |||
1269 | return flush; | ||
1270 | } | ||
1271 | |||
1218 | /** | 1272 | /** |
1219 | * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages | 1273 | * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages |
1220 | * @kvm: kvm instance | 1274 | * @kvm: kvm instance |
@@ -1242,6 +1296,32 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, | |||
1242 | } | 1296 | } |
1243 | 1297 | ||
1244 | /** | 1298 | /** |
1299 | * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages | ||
1300 | * @kvm: kvm instance | ||
1301 | * @slot: slot to clear D-bit | ||
1302 | * @gfn_offset: start of the BITS_PER_LONG pages we care about | ||
1303 | * @mask: indicates which pages we should clear D-bit | ||
1304 | * | ||
1305 | * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. | ||
1306 | */ | ||
1307 | void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, | ||
1308 | struct kvm_memory_slot *slot, | ||
1309 | gfn_t gfn_offset, unsigned long mask) | ||
1310 | { | ||
1311 | unsigned long *rmapp; | ||
1312 | |||
1313 | while (mask) { | ||
1314 | rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), | ||
1315 | PT_PAGE_TABLE_LEVEL, slot); | ||
1316 | __rmap_clear_dirty(kvm, rmapp); | ||
1317 | |||
1318 | /* clear the first set bit */ | ||
1319 | mask &= mask - 1; | ||
1320 | } | ||
1321 | } | ||
1322 | EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); | ||
1323 | |||
1324 | /** | ||
1245 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected | 1325 | * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected |
1246 | * PT level pages. | 1326 | * PT level pages. |
1247 | * | 1327 | * |
@@ -4368,6 +4448,121 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | |||
4368 | kvm_flush_remote_tlbs(kvm); | 4448 | kvm_flush_remote_tlbs(kvm); |
4369 | } | 4449 | } |
4370 | 4450 | ||
4451 | void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, | ||
4452 | struct kvm_memory_slot *memslot) | ||
4453 | { | ||
4454 | gfn_t last_gfn; | ||
4455 | unsigned long *rmapp; | ||
4456 | unsigned long last_index, index; | ||
4457 | bool flush = false; | ||
4458 | |||
4459 | last_gfn = memslot->base_gfn + memslot->npages - 1; | ||
4460 | |||
4461 | spin_lock(&kvm->mmu_lock); | ||
4462 | |||
4463 | rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1]; | ||
4464 | last_index = gfn_to_index(last_gfn, memslot->base_gfn, | ||
4465 | PT_PAGE_TABLE_LEVEL); | ||
4466 | |||
4467 | for (index = 0; index <= last_index; ++index, ++rmapp) { | ||
4468 | if (*rmapp) | ||
4469 | flush |= __rmap_clear_dirty(kvm, rmapp); | ||
4470 | |||
4471 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) | ||
4472 | cond_resched_lock(&kvm->mmu_lock); | ||
4473 | } | ||
4474 | |||
4475 | spin_unlock(&kvm->mmu_lock); | ||
4476 | |||
4477 | lockdep_assert_held(&kvm->slots_lock); | ||
4478 | |||
4479 | /* | ||
4480 | * It's also safe to flush TLBs out of mmu lock here as currently this | ||
4481 | * function is only used for dirty logging, in which case flushing TLB | ||
4482 | * out of mmu lock also guarantees no dirty pages will be lost in | ||
4483 | * dirty_bitmap. | ||
4484 | */ | ||
4485 | if (flush) | ||
4486 | kvm_flush_remote_tlbs(kvm); | ||
4487 | } | ||
4488 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); | ||
4489 | |||
4490 | void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, | ||
4491 | struct kvm_memory_slot *memslot) | ||
4492 | { | ||
4493 | gfn_t last_gfn; | ||
4494 | int i; | ||
4495 | bool flush = false; | ||
4496 | |||
4497 | last_gfn = memslot->base_gfn + memslot->npages - 1; | ||
4498 | |||
4499 | spin_lock(&kvm->mmu_lock); | ||
4500 | |||
4501 | for (i = PT_PAGE_TABLE_LEVEL + 1; /* skip rmap for 4K page */ | ||
4502 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { | ||
4503 | unsigned long *rmapp; | ||
4504 | unsigned long last_index, index; | ||
4505 | |||
4506 | rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; | ||
4507 | last_index = gfn_to_index(last_gfn, memslot->base_gfn, i); | ||
4508 | |||
4509 | for (index = 0; index <= last_index; ++index, ++rmapp) { | ||
4510 | if (*rmapp) | ||
4511 | flush |= __rmap_write_protect(kvm, rmapp, | ||
4512 | false); | ||
4513 | |||
4514 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) | ||
4515 | cond_resched_lock(&kvm->mmu_lock); | ||
4516 | } | ||
4517 | } | ||
4518 | spin_unlock(&kvm->mmu_lock); | ||
4519 | |||
4520 | /* see kvm_mmu_slot_remove_write_access */ | ||
4521 | lockdep_assert_held(&kvm->slots_lock); | ||
4522 | |||
4523 | if (flush) | ||
4524 | kvm_flush_remote_tlbs(kvm); | ||
4525 | } | ||
4526 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); | ||
4527 | |||
4528 | void kvm_mmu_slot_set_dirty(struct kvm *kvm, | ||
4529 | struct kvm_memory_slot *memslot) | ||
4530 | { | ||
4531 | gfn_t last_gfn; | ||
4532 | int i; | ||
4533 | bool flush = false; | ||
4534 | |||
4535 | last_gfn = memslot->base_gfn + memslot->npages - 1; | ||
4536 | |||
4537 | spin_lock(&kvm->mmu_lock); | ||
4538 | |||
4539 | for (i = PT_PAGE_TABLE_LEVEL; | ||
4540 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { | ||
4541 | unsigned long *rmapp; | ||
4542 | unsigned long last_index, index; | ||
4543 | |||
4544 | rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; | ||
4545 | last_index = gfn_to_index(last_gfn, memslot->base_gfn, i); | ||
4546 | |||
4547 | for (index = 0; index <= last_index; ++index, ++rmapp) { | ||
4548 | if (*rmapp) | ||
4549 | flush |= __rmap_set_dirty(kvm, rmapp); | ||
4550 | |||
4551 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) | ||
4552 | cond_resched_lock(&kvm->mmu_lock); | ||
4553 | } | ||
4554 | } | ||
4555 | |||
4556 | spin_unlock(&kvm->mmu_lock); | ||
4557 | |||
4558 | lockdep_assert_held(&kvm->slots_lock); | ||
4559 | |||
4560 | /* see kvm_mmu_slot_leaf_clear_dirty */ | ||
4561 | if (flush) | ||
4562 | kvm_flush_remote_tlbs(kvm); | ||
4563 | } | ||
4564 | EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); | ||
4565 | |||
4371 | #define BATCH_ZAP_PAGES 10 | 4566 | #define BATCH_ZAP_PAGES 10 |
4372 | static void kvm_zap_obsolete_pages(struct kvm *kvm) | 4567 | static void kvm_zap_obsolete_pages(struct kvm *kvm) |
4373 | { | 4568 | { |