diff options
author | Avi Kivity <avi@qumranet.com> | 2007-05-06 08:50:58 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-07-16 05:05:40 -0400 |
commit | 47ad8e689b4f94f9fc3b2588a7aaa65e4eca667c (patch) | |
tree | ee050d3f0a86f937f10bbc2f6e1893201de323af /drivers | |
parent | 4b02d6daa12465b209ec4f50c363f9553a51f45b (diff) |
KVM: MMU: Store shadow page tables as kernel virtual addresses, not physical
Simpifies things a bit.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/kvm/kvm.h | 2 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 32 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 2 |
3 files changed, 17 insertions, 19 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 5e6dac5a3c00..fc4a6c1235f0 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -139,7 +139,7 @@ struct kvm_mmu_page { | |||
139 | gfn_t gfn; | 139 | gfn_t gfn; |
140 | union kvm_mmu_page_role role; | 140 | union kvm_mmu_page_role role; |
141 | 141 | ||
142 | hpa_t page_hpa; | 142 | u64 *spt; |
143 | unsigned long slot_bitmap; /* One bit set per slot which has memory | 143 | unsigned long slot_bitmap; /* One bit set per slot which has memory |
144 | * in this shadow page. | 144 | * in this shadow page. |
145 | */ | 145 | */ |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index a96c9ae54f3c..c85c6649280e 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -439,13 +439,12 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) | |||
439 | } | 439 | } |
440 | 440 | ||
441 | #ifdef MMU_DEBUG | 441 | #ifdef MMU_DEBUG |
442 | static int is_empty_shadow_page(hpa_t page_hpa) | 442 | static int is_empty_shadow_page(u64 *spt) |
443 | { | 443 | { |
444 | u64 *pos; | 444 | u64 *pos; |
445 | u64 *end; | 445 | u64 *end; |
446 | 446 | ||
447 | for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64); | 447 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
448 | pos != end; pos++) | ||
449 | if (*pos != 0) { | 448 | if (*pos != 0) { |
450 | printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, | 449 | printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, |
451 | pos, *pos); | 450 | pos, *pos); |
@@ -458,7 +457,7 @@ static int is_empty_shadow_page(hpa_t page_hpa) | |||
458 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, | 457 | static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, |
459 | struct kvm_mmu_page *page_head) | 458 | struct kvm_mmu_page *page_head) |
460 | { | 459 | { |
461 | ASSERT(is_empty_shadow_page(page_head->page_hpa)); | 460 | ASSERT(is_empty_shadow_page(page_head->spt)); |
462 | list_move(&page_head->link, &vcpu->free_pages); | 461 | list_move(&page_head->link, &vcpu->free_pages); |
463 | ++vcpu->kvm->n_free_mmu_pages; | 462 | ++vcpu->kvm->n_free_mmu_pages; |
464 | } | 463 | } |
@@ -478,7 +477,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | |||
478 | 477 | ||
479 | page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); | 478 | page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); |
480 | list_move(&page->link, &vcpu->kvm->active_mmu_pages); | 479 | list_move(&page->link, &vcpu->kvm->active_mmu_pages); |
481 | ASSERT(is_empty_shadow_page(page->page_hpa)); | 480 | ASSERT(is_empty_shadow_page(page->spt)); |
482 | page->slot_bitmap = 0; | 481 | page->slot_bitmap = 0; |
483 | page->multimapped = 0; | 482 | page->multimapped = 0; |
484 | page->parent_pte = parent_pte; | 483 | page->parent_pte = parent_pte; |
@@ -636,7 +635,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu, | |||
636 | u64 *pt; | 635 | u64 *pt; |
637 | u64 ent; | 636 | u64 ent; |
638 | 637 | ||
639 | pt = __va(page->page_hpa); | 638 | pt = page->spt; |
640 | 639 | ||
641 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { | 640 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { |
642 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | 641 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
@@ -803,7 +802,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) | |||
803 | return -ENOMEM; | 802 | return -ENOMEM; |
804 | } | 803 | } |
805 | 804 | ||
806 | table[index] = new_table->page_hpa | PT_PRESENT_MASK | 805 | table[index] = __pa(new_table->spt) | PT_PRESENT_MASK |
807 | | PT_WRITABLE_MASK | PT_USER_MASK; | 806 | | PT_WRITABLE_MASK | PT_USER_MASK; |
808 | } | 807 | } |
809 | table_addr = table[index] & PT64_BASE_ADDR_MASK; | 808 | table_addr = table[index] & PT64_BASE_ADDR_MASK; |
@@ -855,7 +854,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
855 | ASSERT(!VALID_PAGE(root)); | 854 | ASSERT(!VALID_PAGE(root)); |
856 | page = kvm_mmu_get_page(vcpu, root_gfn, 0, | 855 | page = kvm_mmu_get_page(vcpu, root_gfn, 0, |
857 | PT64_ROOT_LEVEL, 0, 0, NULL); | 856 | PT64_ROOT_LEVEL, 0, 0, NULL); |
858 | root = page->page_hpa; | 857 | root = __pa(page->spt); |
859 | ++page->root_count; | 858 | ++page->root_count; |
860 | vcpu->mmu.root_hpa = root; | 859 | vcpu->mmu.root_hpa = root; |
861 | return; | 860 | return; |
@@ -876,7 +875,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
876 | page = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 875 | page = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
877 | PT32_ROOT_LEVEL, !is_paging(vcpu), | 876 | PT32_ROOT_LEVEL, !is_paging(vcpu), |
878 | 0, NULL); | 877 | 0, NULL); |
879 | root = page->page_hpa; | 878 | root = __pa(page->spt); |
880 | ++page->root_count; | 879 | ++page->root_count; |
881 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; | 880 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; |
882 | } | 881 | } |
@@ -1220,8 +1219,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1220 | if (quadrant != page->role.quadrant) | 1219 | if (quadrant != page->role.quadrant) |
1221 | continue; | 1220 | continue; |
1222 | } | 1221 | } |
1223 | spte = __va(page->page_hpa); | 1222 | spte = &page->spt[page_offset / sizeof(*spte)]; |
1224 | spte += page_offset / sizeof(*spte); | ||
1225 | while (npte--) { | 1223 | while (npte--) { |
1226 | mmu_pte_write_zap_pte(vcpu, page, spte); | 1224 | mmu_pte_write_zap_pte(vcpu, page, spte); |
1227 | mmu_pte_write_new_pte(vcpu, page, spte, new, bytes); | 1225 | mmu_pte_write_new_pte(vcpu, page, spte, new, bytes); |
@@ -1262,8 +1260,8 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) | |||
1262 | page = list_entry(vcpu->free_pages.next, | 1260 | page = list_entry(vcpu->free_pages.next, |
1263 | struct kvm_mmu_page, link); | 1261 | struct kvm_mmu_page, link); |
1264 | list_del(&page->link); | 1262 | list_del(&page->link); |
1265 | __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT)); | 1263 | free_page((unsigned long)page->spt); |
1266 | page->page_hpa = INVALID_PAGE; | 1264 | page->spt = NULL; |
1267 | } | 1265 | } |
1268 | free_page((unsigned long)vcpu->mmu.pae_root); | 1266 | free_page((unsigned long)vcpu->mmu.pae_root); |
1269 | } | 1267 | } |
@@ -1282,8 +1280,8 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | |||
1282 | if ((page = alloc_page(GFP_KERNEL)) == NULL) | 1280 | if ((page = alloc_page(GFP_KERNEL)) == NULL) |
1283 | goto error_1; | 1281 | goto error_1; |
1284 | set_page_private(page, (unsigned long)page_header); | 1282 | set_page_private(page, (unsigned long)page_header); |
1285 | page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; | 1283 | page_header->spt = page_address(page); |
1286 | memset(__va(page_header->page_hpa), 0, PAGE_SIZE); | 1284 | memset(page_header->spt, 0, PAGE_SIZE); |
1287 | list_add(&page_header->link, &vcpu->free_pages); | 1285 | list_add(&page_header->link, &vcpu->free_pages); |
1288 | ++vcpu->kvm->n_free_mmu_pages; | 1286 | ++vcpu->kvm->n_free_mmu_pages; |
1289 | } | 1287 | } |
@@ -1346,7 +1344,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot) | |||
1346 | if (!test_bit(slot, &page->slot_bitmap)) | 1344 | if (!test_bit(slot, &page->slot_bitmap)) |
1347 | continue; | 1345 | continue; |
1348 | 1346 | ||
1349 | pt = __va(page->page_hpa); | 1347 | pt = page->spt; |
1350 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | 1348 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
1351 | /* avoid RMW */ | 1349 | /* avoid RMW */ |
1352 | if (pt[i] & PT_WRITABLE_MASK) { | 1350 | if (pt[i] & PT_WRITABLE_MASK) { |
@@ -1497,7 +1495,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu) | |||
1497 | int i; | 1495 | int i; |
1498 | 1496 | ||
1499 | list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { | 1497 | list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { |
1500 | u64 *pt = __va(page->page_hpa); | 1498 | u64 *pt = page->spt; |
1501 | 1499 | ||
1502 | if (page->role.level != PT_PAGE_TABLE_LEVEL) | 1500 | if (page->role.level != PT_PAGE_TABLE_LEVEL) |
1503 | continue; | 1501 | continue; |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 10ba0a80ce59..6dd0da9a5d15 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -304,7 +304,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
304 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, | 304 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, |
305 | metaphysical, hugepage_access, | 305 | metaphysical, hugepage_access, |
306 | shadow_ent); | 306 | shadow_ent); |
307 | shadow_addr = shadow_page->page_hpa; | 307 | shadow_addr = __pa(shadow_page->spt); |
308 | shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK | 308 | shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK |
309 | | PT_WRITABLE_MASK | PT_USER_MASK; | 309 | | PT_WRITABLE_MASK | PT_USER_MASK; |
310 | *shadow_ent = shadow_pte; | 310 | *shadow_ent = shadow_pte; |