diff options
author | Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | 2010-12-06 22:59:07 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:30:47 -0500 |
commit | d4dbf470096c51cb4785167ea59fdbdea87ccbe4 (patch) | |
tree | fbc50ca20c1ed53ce431c5e923fb46e7faeef986 /arch/x86/kvm | |
parent | 443381a828910efa3d71ba4491d180f2d0bb4212 (diff) |
KVM: MMU: Make the way of accessing lpage_info more generic
Large page information has two elements but one of them, write_count, alone
is accessed by a helper function.
This patch replaces this helper function with more generic one which returns
newly named kvm_lpage_info structure and use it to access the other element
rmap_pde.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 54 |
1 files changed, 25 insertions, 29 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index abda57fac659..475a1225f6ec 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -477,46 +477,46 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) | |||
477 | } | 477 | } |
478 | 478 | ||
479 | /* | 479 | /* |
480 | * Return the pointer to the largepage write count for a given | 480 | * Return the pointer to the large page information for a given gfn, |
481 | * gfn, handling slots that are not large page aligned. | 481 | * handling slots that are not large page aligned. |
482 | */ | 482 | */ |
483 | static int *slot_largepage_idx(gfn_t gfn, | 483 | static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, |
484 | struct kvm_memory_slot *slot, | 484 | struct kvm_memory_slot *slot, |
485 | int level) | 485 | int level) |
486 | { | 486 | { |
487 | unsigned long idx; | 487 | unsigned long idx; |
488 | 488 | ||
489 | idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - | 489 | idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - |
490 | (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | 490 | (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); |
491 | return &slot->lpage_info[level - 2][idx].write_count; | 491 | return &slot->lpage_info[level - 2][idx]; |
492 | } | 492 | } |
493 | 493 | ||
494 | static void account_shadowed(struct kvm *kvm, gfn_t gfn) | 494 | static void account_shadowed(struct kvm *kvm, gfn_t gfn) |
495 | { | 495 | { |
496 | struct kvm_memory_slot *slot; | 496 | struct kvm_memory_slot *slot; |
497 | int *write_count; | 497 | struct kvm_lpage_info *linfo; |
498 | int i; | 498 | int i; |
499 | 499 | ||
500 | slot = gfn_to_memslot(kvm, gfn); | 500 | slot = gfn_to_memslot(kvm, gfn); |
501 | for (i = PT_DIRECTORY_LEVEL; | 501 | for (i = PT_DIRECTORY_LEVEL; |
502 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { | 502 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
503 | write_count = slot_largepage_idx(gfn, slot, i); | 503 | linfo = lpage_info_slot(gfn, slot, i); |
504 | *write_count += 1; | 504 | linfo->write_count += 1; |
505 | } | 505 | } |
506 | } | 506 | } |
507 | 507 | ||
508 | static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) | 508 | static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) |
509 | { | 509 | { |
510 | struct kvm_memory_slot *slot; | 510 | struct kvm_memory_slot *slot; |
511 | int *write_count; | 511 | struct kvm_lpage_info *linfo; |
512 | int i; | 512 | int i; |
513 | 513 | ||
514 | slot = gfn_to_memslot(kvm, gfn); | 514 | slot = gfn_to_memslot(kvm, gfn); |
515 | for (i = PT_DIRECTORY_LEVEL; | 515 | for (i = PT_DIRECTORY_LEVEL; |
516 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { | 516 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
517 | write_count = slot_largepage_idx(gfn, slot, i); | 517 | linfo = lpage_info_slot(gfn, slot, i); |
518 | *write_count -= 1; | 518 | linfo->write_count -= 1; |
519 | WARN_ON(*write_count < 0); | 519 | WARN_ON(linfo->write_count < 0); |
520 | } | 520 | } |
521 | } | 521 | } |
522 | 522 | ||
@@ -525,12 +525,12 @@ static int has_wrprotected_page(struct kvm *kvm, | |||
525 | int level) | 525 | int level) |
526 | { | 526 | { |
527 | struct kvm_memory_slot *slot; | 527 | struct kvm_memory_slot *slot; |
528 | int *largepage_idx; | 528 | struct kvm_lpage_info *linfo; |
529 | 529 | ||
530 | slot = gfn_to_memslot(kvm, gfn); | 530 | slot = gfn_to_memslot(kvm, gfn); |
531 | if (slot) { | 531 | if (slot) { |
532 | largepage_idx = slot_largepage_idx(gfn, slot, level); | 532 | linfo = lpage_info_slot(gfn, slot, level); |
533 | return *largepage_idx; | 533 | return linfo->write_count; |
534 | } | 534 | } |
535 | 535 | ||
536 | return 1; | 536 | return 1; |
@@ -585,16 +585,15 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) | |||
585 | static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) | 585 | static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) |
586 | { | 586 | { |
587 | struct kvm_memory_slot *slot; | 587 | struct kvm_memory_slot *slot; |
588 | unsigned long idx; | 588 | struct kvm_lpage_info *linfo; |
589 | 589 | ||
590 | slot = gfn_to_memslot(kvm, gfn); | 590 | slot = gfn_to_memslot(kvm, gfn); |
591 | if (likely(level == PT_PAGE_TABLE_LEVEL)) | 591 | if (likely(level == PT_PAGE_TABLE_LEVEL)) |
592 | return &slot->rmap[gfn - slot->base_gfn]; | 592 | return &slot->rmap[gfn - slot->base_gfn]; |
593 | 593 | ||
594 | idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - | 594 | linfo = lpage_info_slot(gfn, slot, level); |
595 | (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); | ||
596 | 595 | ||
597 | return &slot->lpage_info[level - 2][idx].rmap_pde; | 596 | return &linfo->rmap_pde; |
598 | } | 597 | } |
599 | 598 | ||
600 | /* | 599 | /* |
@@ -882,19 +881,16 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, | |||
882 | end = start + (memslot->npages << PAGE_SHIFT); | 881 | end = start + (memslot->npages << PAGE_SHIFT); |
883 | if (hva >= start && hva < end) { | 882 | if (hva >= start && hva < end) { |
884 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; | 883 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; |
884 | gfn_t gfn = memslot->base_gfn + gfn_offset; | ||
885 | 885 | ||
886 | ret = handler(kvm, &memslot->rmap[gfn_offset], data); | 886 | ret = handler(kvm, &memslot->rmap[gfn_offset], data); |
887 | 887 | ||
888 | for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { | 888 | for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { |
889 | unsigned long idx; | 889 | struct kvm_lpage_info *linfo; |
890 | int sh; | 890 | |
891 | 891 | linfo = lpage_info_slot(gfn, memslot, | |
892 | sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j); | 892 | PT_DIRECTORY_LEVEL + j); |
893 | idx = ((memslot->base_gfn+gfn_offset) >> sh) - | 893 | ret |= handler(kvm, &linfo->rmap_pde, data); |
894 | (memslot->base_gfn >> sh); | ||
895 | ret |= handler(kvm, | ||
896 | &memslot->lpage_info[j][idx].rmap_pde, | ||
897 | data); | ||
898 | } | 894 | } |
899 | trace_kvm_age_page(hva, memslot, ret); | 895 | trace_kvm_age_page(hva, memslot, ret); |
900 | retval |= ret; | 896 | retval |= ret; |