diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 19:36:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-06 02:55:24 -0500 |
commit | 25c0de2cc6c26cb99553c2444936a7951c120c09 (patch) | |
tree | c2b5bd7ff189a1232ed3a89b50caa2ebbad67634 | |
parent | aef3d3fe1314f2a130f5ccc7114df20865ba784f (diff) |
[PATCH] KVM: MMU: Make kvm_mmu_alloc_page() return a kvm_mmu_page pointer
This allows further manipulation on the shadow page table.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/kvm/mmu.c | 24 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 6 |
2 files changed, 15 insertions, 15 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 1dcbbd511660..da4d7ddb9bdc 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -292,12 +292,13 @@ static int is_empty_shadow_page(hpa_t page_hpa) | |||
292 | return 1; | 292 | return 1; |
293 | } | 293 | } |
294 | 294 | ||
295 | static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte) | 295 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
296 | u64 *parent_pte) | ||
296 | { | 297 | { |
297 | struct kvm_mmu_page *page; | 298 | struct kvm_mmu_page *page; |
298 | 299 | ||
299 | if (list_empty(&vcpu->free_pages)) | 300 | if (list_empty(&vcpu->free_pages)) |
300 | return INVALID_PAGE; | 301 | return NULL; |
301 | 302 | ||
302 | page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); | 303 | page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); |
303 | list_del(&page->link); | 304 | list_del(&page->link); |
@@ -306,7 +307,7 @@ static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte) | |||
306 | page->slot_bitmap = 0; | 307 | page->slot_bitmap = 0; |
307 | page->global = 1; | 308 | page->global = 1; |
308 | page->parent_pte = parent_pte; | 309 | page->parent_pte = parent_pte; |
309 | return page->page_hpa; | 310 | return page; |
310 | } | 311 | } |
311 | 312 | ||
312 | static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) | 313 | static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) |
@@ -402,19 +403,16 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) | |||
402 | } | 403 | } |
403 | 404 | ||
404 | if (table[index] == 0) { | 405 | if (table[index] == 0) { |
405 | hpa_t new_table = kvm_mmu_alloc_page(vcpu, | 406 | struct kvm_mmu_page *new_table; |
406 | &table[index]); | ||
407 | 407 | ||
408 | if (!VALID_PAGE(new_table)) { | 408 | new_table = kvm_mmu_alloc_page(vcpu, &table[index]); |
409 | if (!new_table) { | ||
409 | pgprintk("nonpaging_map: ENOMEM\n"); | 410 | pgprintk("nonpaging_map: ENOMEM\n"); |
410 | return -ENOMEM; | 411 | return -ENOMEM; |
411 | } | 412 | } |
412 | 413 | ||
413 | if (level == PT32E_ROOT_LEVEL) | 414 | table[index] = new_table->page_hpa | PT_PRESENT_MASK |
414 | table[index] = new_table | PT_PRESENT_MASK; | 415 | | PT_WRITABLE_MASK | PT_USER_MASK; |
415 | else | ||
416 | table[index] = new_table | PT_PRESENT_MASK | | ||
417 | PT_WRITABLE_MASK | PT_USER_MASK; | ||
418 | } | 416 | } |
419 | table_addr = table[index] & PT64_BASE_ADDR_MASK; | 417 | table_addr = table[index] & PT64_BASE_ADDR_MASK; |
420 | } | 418 | } |
@@ -454,7 +452,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
454 | hpa_t root = vcpu->mmu.root_hpa; | 452 | hpa_t root = vcpu->mmu.root_hpa; |
455 | 453 | ||
456 | ASSERT(!VALID_PAGE(root)); | 454 | ASSERT(!VALID_PAGE(root)); |
457 | root = kvm_mmu_alloc_page(vcpu, NULL); | 455 | root = kvm_mmu_alloc_page(vcpu, NULL)->page_hpa; |
458 | vcpu->mmu.root_hpa = root; | 456 | vcpu->mmu.root_hpa = root; |
459 | return; | 457 | return; |
460 | } | 458 | } |
@@ -463,7 +461,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
463 | hpa_t root = vcpu->mmu.pae_root[i]; | 461 | hpa_t root = vcpu->mmu.pae_root[i]; |
464 | 462 | ||
465 | ASSERT(!VALID_PAGE(root)); | 463 | ASSERT(!VALID_PAGE(root)); |
466 | root = kvm_mmu_alloc_page(vcpu, NULL); | 464 | root = kvm_mmu_alloc_page(vcpu, NULL)->page_hpa; |
467 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; | 465 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; |
468 | } | 466 | } |
469 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); | 467 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 7af49ae80e5a..11cac9ddf26a 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -179,6 +179,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
179 | for (; ; level--) { | 179 | for (; ; level--) { |
180 | u32 index = SHADOW_PT_INDEX(addr, level); | 180 | u32 index = SHADOW_PT_INDEX(addr, level); |
181 | u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; | 181 | u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; |
182 | struct kvm_mmu_page *shadow_page; | ||
182 | u64 shadow_pte; | 183 | u64 shadow_pte; |
183 | 184 | ||
184 | if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { | 185 | if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { |
@@ -204,9 +205,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
204 | return shadow_ent; | 205 | return shadow_ent; |
205 | } | 206 | } |
206 | 207 | ||
207 | shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent); | 208 | shadow_page = kvm_mmu_alloc_page(vcpu, shadow_ent); |
208 | if (!VALID_PAGE(shadow_addr)) | 209 | if (!shadow_page) |
209 | return ERR_PTR(-ENOMEM); | 210 | return ERR_PTR(-ENOMEM); |
211 | shadow_addr = shadow_page->page_hpa; | ||
210 | shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK | 212 | shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK |
211 | | PT_WRITABLE_MASK | PT_USER_MASK; | 213 | | PT_WRITABLE_MASK | PT_USER_MASK; |
212 | *shadow_ent = shadow_pte; | 214 | *shadow_ent = shadow_pte; |