diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-07-27 10:30:43 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:33:18 -0400 |
commit | d25797b24c0ff2efc2b2fabaebb0ec0cafc0d3e3 (patch) | |
tree | 2c73de073f169e66cc50141a604a1b72498d96d7 /arch/x86/kvm/mmu.c | |
parent | 44ad9944f151390363fc6edaba466de8dfef050f (diff) |
KVM: MMU: rename is_largepage_backed to mapping_level
With the new name and the corresponding backend changes this function
can now support multiple hugepage sizes.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 100 |
1 files changed, 67 insertions, 33 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b93ad2c79c15..c707936b2414 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -393,37 +393,52 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) | |||
393 | * Return the pointer to the largepage write count for a given | 393 | * Return the pointer to the largepage write count for a given |
394 | * gfn, handling slots that are not large page aligned. | 394 | * gfn, handling slots that are not large page aligned. |
395 | */ | 395 | */ |
396 | static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot) | 396 | static int *slot_largepage_idx(gfn_t gfn, |
397 | struct kvm_memory_slot *slot, | ||
398 | int level) | ||
397 | { | 399 | { |
398 | unsigned long idx; | 400 | unsigned long idx; |
399 | 401 | ||
400 | idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) - | 402 | idx = (gfn / KVM_PAGES_PER_HPAGE(level)) - |
401 | (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)); | 403 | (slot->base_gfn / KVM_PAGES_PER_HPAGE(level)); |
402 | return &slot->lpage_info[0][idx].write_count; | 404 | return &slot->lpage_info[level - 2][idx].write_count; |
403 | } | 405 | } |
404 | 406 | ||
405 | static void account_shadowed(struct kvm *kvm, gfn_t gfn) | 407 | static void account_shadowed(struct kvm *kvm, gfn_t gfn) |
406 | { | 408 | { |
409 | struct kvm_memory_slot *slot; | ||
407 | int *write_count; | 410 | int *write_count; |
411 | int i; | ||
408 | 412 | ||
409 | gfn = unalias_gfn(kvm, gfn); | 413 | gfn = unalias_gfn(kvm, gfn); |
410 | write_count = slot_largepage_idx(gfn, | 414 | |
411 | gfn_to_memslot_unaliased(kvm, gfn)); | 415 | slot = gfn_to_memslot_unaliased(kvm, gfn); |
412 | *write_count += 1; | 416 | for (i = PT_DIRECTORY_LEVEL; |
417 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { | ||
418 | write_count = slot_largepage_idx(gfn, slot, i); | ||
419 | *write_count += 1; | ||
420 | } | ||
413 | } | 421 | } |
414 | 422 | ||
415 | static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) | 423 | static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) |
416 | { | 424 | { |
425 | struct kvm_memory_slot *slot; | ||
417 | int *write_count; | 426 | int *write_count; |
427 | int i; | ||
418 | 428 | ||
419 | gfn = unalias_gfn(kvm, gfn); | 429 | gfn = unalias_gfn(kvm, gfn); |
420 | write_count = slot_largepage_idx(gfn, | 430 | for (i = PT_DIRECTORY_LEVEL; |
421 | gfn_to_memslot_unaliased(kvm, gfn)); | 431 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
422 | *write_count -= 1; | 432 | slot = gfn_to_memslot_unaliased(kvm, gfn); |
423 | WARN_ON(*write_count < 0); | 433 | write_count = slot_largepage_idx(gfn, slot, i); |
434 | *write_count -= 1; | ||
435 | WARN_ON(*write_count < 0); | ||
436 | } | ||
424 | } | 437 | } |
425 | 438 | ||
426 | static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn) | 439 | static int has_wrprotected_page(struct kvm *kvm, |
440 | gfn_t gfn, | ||
441 | int level) | ||
427 | { | 442 | { |
428 | struct kvm_memory_slot *slot; | 443 | struct kvm_memory_slot *slot; |
429 | int *largepage_idx; | 444 | int *largepage_idx; |
@@ -431,47 +446,67 @@ static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn) | |||
431 | gfn = unalias_gfn(kvm, gfn); | 446 | gfn = unalias_gfn(kvm, gfn); |
432 | slot = gfn_to_memslot_unaliased(kvm, gfn); | 447 | slot = gfn_to_memslot_unaliased(kvm, gfn); |
433 | if (slot) { | 448 | if (slot) { |
434 | largepage_idx = slot_largepage_idx(gfn, slot); | 449 | largepage_idx = slot_largepage_idx(gfn, slot, level); |
435 | return *largepage_idx; | 450 | return *largepage_idx; |
436 | } | 451 | } |
437 | 452 | ||
438 | return 1; | 453 | return 1; |
439 | } | 454 | } |
440 | 455 | ||
441 | static int host_largepage_backed(struct kvm *kvm, gfn_t gfn) | 456 | static int host_mapping_level(struct kvm *kvm, gfn_t gfn) |
442 | { | 457 | { |
458 | unsigned long page_size = PAGE_SIZE; | ||
443 | struct vm_area_struct *vma; | 459 | struct vm_area_struct *vma; |
444 | unsigned long addr; | 460 | unsigned long addr; |
445 | int ret = 0; | 461 | int i, ret = 0; |
446 | 462 | ||
447 | addr = gfn_to_hva(kvm, gfn); | 463 | addr = gfn_to_hva(kvm, gfn); |
448 | if (kvm_is_error_hva(addr)) | 464 | if (kvm_is_error_hva(addr)) |
449 | return ret; | 465 | return page_size; |
450 | 466 | ||
451 | down_read(¤t->mm->mmap_sem); | 467 | down_read(¤t->mm->mmap_sem); |
452 | vma = find_vma(current->mm, addr); | 468 | vma = find_vma(current->mm, addr); |
453 | if (vma && is_vm_hugetlb_page(vma)) | 469 | if (!vma) |
454 | ret = 1; | 470 | goto out; |
471 | |||
472 | page_size = vma_kernel_pagesize(vma); | ||
473 | |||
474 | out: | ||
455 | up_read(¤t->mm->mmap_sem); | 475 | up_read(¤t->mm->mmap_sem); |
456 | 476 | ||
477 | for (i = PT_PAGE_TABLE_LEVEL; | ||
478 | i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) { | ||
479 | if (page_size >= KVM_HPAGE_SIZE(i)) | ||
480 | ret = i; | ||
481 | else | ||
482 | break; | ||
483 | } | ||
484 | |||
457 | return ret; | 485 | return ret; |
458 | } | 486 | } |
459 | 487 | ||
460 | static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn) | 488 | static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) |
461 | { | 489 | { |
462 | struct kvm_memory_slot *slot; | 490 | struct kvm_memory_slot *slot; |
463 | 491 | int host_level; | |
464 | if (has_wrprotected_page(vcpu->kvm, large_gfn)) | 492 | int level = PT_PAGE_TABLE_LEVEL; |
465 | return 0; | ||
466 | |||
467 | if (!host_largepage_backed(vcpu->kvm, large_gfn)) | ||
468 | return 0; | ||
469 | 493 | ||
470 | slot = gfn_to_memslot(vcpu->kvm, large_gfn); | 494 | slot = gfn_to_memslot(vcpu->kvm, large_gfn); |
471 | if (slot && slot->dirty_bitmap) | 495 | if (slot && slot->dirty_bitmap) |
472 | return 0; | 496 | return PT_PAGE_TABLE_LEVEL; |
473 | 497 | ||
474 | return 1; | 498 | host_level = host_mapping_level(vcpu->kvm, large_gfn); |
499 | |||
500 | if (host_level == PT_PAGE_TABLE_LEVEL) | ||
501 | return host_level; | ||
502 | |||
503 | for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) { | ||
504 | |||
505 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) | ||
506 | break; | ||
507 | } | ||
508 | |||
509 | return level - 1; | ||
475 | } | 510 | } |
476 | 511 | ||
477 | /* | 512 | /* |
@@ -1733,7 +1768,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
1733 | if ((pte_access & ACC_WRITE_MASK) | 1768 | if ((pte_access & ACC_WRITE_MASK) |
1734 | || (write_fault && !is_write_protection(vcpu) && !user_fault)) { | 1769 | || (write_fault && !is_write_protection(vcpu) && !user_fault)) { |
1735 | 1770 | ||
1736 | if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) { | 1771 | if (largepage && has_wrprotected_page(vcpu->kvm, gfn, 1)) { |
1737 | ret = 1; | 1772 | ret = 1; |
1738 | spte = shadow_trap_nonpresent_pte; | 1773 | spte = shadow_trap_nonpresent_pte; |
1739 | goto set_pte; | 1774 | goto set_pte; |
@@ -1884,8 +1919,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
1884 | pfn_t pfn; | 1919 | pfn_t pfn; |
1885 | unsigned long mmu_seq; | 1920 | unsigned long mmu_seq; |
1886 | 1921 | ||
1887 | if (is_largepage_backed(vcpu, gfn & | 1922 | if (mapping_level(vcpu, gfn) == PT_DIRECTORY_LEVEL) { |
1888 | ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) { | ||
1889 | gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); | 1923 | gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); |
1890 | largepage = 1; | 1924 | largepage = 1; |
1891 | } | 1925 | } |
@@ -2091,8 +2125,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, | |||
2091 | if (r) | 2125 | if (r) |
2092 | return r; | 2126 | return r; |
2093 | 2127 | ||
2094 | if (is_largepage_backed(vcpu, gfn & | 2128 | if (mapping_level(vcpu, gfn) == PT_DIRECTORY_LEVEL) { |
2095 | ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) { | ||
2096 | gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); | 2129 | gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); |
2097 | largepage = 1; | 2130 | largepage = 1; |
2098 | } | 2131 | } |
@@ -2494,7 +2527,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2494 | return; | 2527 | return; |
2495 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 2528 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
2496 | 2529 | ||
2497 | if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { | 2530 | if (is_large_pte(gpte) && |
2531 | (mapping_level(vcpu, gfn) == PT_DIRECTORY_LEVEL)) { | ||
2498 | gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); | 2532 | gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); |
2499 | vcpu->arch.update_pte.largepage = 1; | 2533 | vcpu->arch.update_pte.largepage = 1; |
2500 | } | 2534 | } |