diff options
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm.h | 7 | ||||
-rw-r--r-- | drivers/kvm/kvm_main.c | 47 | ||||
-rw-r--r-- | drivers/kvm/mmu.c | 40 |
3 files changed, 91 insertions, 3 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h index 1965438f18cc..9f10c373b74c 100644 --- a/drivers/kvm/kvm.h +++ b/drivers/kvm/kvm.h | |||
@@ -40,6 +40,8 @@ | |||
40 | #define KVM_MAX_VCPUS 4 | 40 | #define KVM_MAX_VCPUS 4 |
41 | #define KVM_ALIAS_SLOTS 4 | 41 | #define KVM_ALIAS_SLOTS 4 |
42 | #define KVM_MEMORY_SLOTS 8 | 42 | #define KVM_MEMORY_SLOTS 8 |
43 | #define KVM_PERMILLE_MMU_PAGES 20 | ||
44 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | ||
43 | #define KVM_NUM_MMU_PAGES 1024 | 45 | #define KVM_NUM_MMU_PAGES 1024 |
44 | #define KVM_MIN_FREE_MMU_PAGES 5 | 46 | #define KVM_MIN_FREE_MMU_PAGES 5 |
45 | #define KVM_REFILL_PAGES 25 | 47 | #define KVM_REFILL_PAGES 25 |
@@ -418,7 +420,9 @@ struct kvm { | |||
418 | * Hash table of struct kvm_mmu_page. | 420 | * Hash table of struct kvm_mmu_page. |
419 | */ | 421 | */ |
420 | struct list_head active_mmu_pages; | 422 | struct list_head active_mmu_pages; |
421 | int n_free_mmu_pages; | 423 | unsigned int n_free_mmu_pages; |
424 | unsigned int n_requested_mmu_pages; | ||
425 | unsigned int n_alloc_mmu_pages; | ||
422 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | 426 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
423 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; | 427 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
424 | unsigned long rmap_overflow; | 428 | unsigned long rmap_overflow; |
@@ -547,6 +551,7 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); | |||
547 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 551 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
548 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | 552 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
549 | void kvm_mmu_zap_all(struct kvm *kvm); | 553 | void kvm_mmu_zap_all(struct kvm *kvm); |
554 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); | ||
550 | 555 | ||
551 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); | 556 | hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); |
552 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | 557 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index cac328b8421c..2bb1f2f66efa 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -743,6 +743,24 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | |||
743 | if (mem->slot >= kvm->nmemslots) | 743 | if (mem->slot >= kvm->nmemslots) |
744 | kvm->nmemslots = mem->slot + 1; | 744 | kvm->nmemslots = mem->slot + 1; |
745 | 745 | ||
746 | if (!kvm->n_requested_mmu_pages) { | ||
747 | unsigned int n_pages; | ||
748 | |||
749 | if (npages) { | ||
750 | n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000; | ||
751 | kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages + | ||
752 | n_pages); | ||
753 | } else { | ||
754 | unsigned int nr_mmu_pages; | ||
755 | |||
756 | n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000; | ||
757 | nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages; | ||
758 | nr_mmu_pages = max(nr_mmu_pages, | ||
759 | (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); | ||
760 | kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); | ||
761 | } | ||
762 | } | ||
763 | |||
746 | *memslot = new; | 764 | *memslot = new; |
747 | 765 | ||
748 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); | 766 | kvm_mmu_slot_remove_write_access(kvm, mem->slot); |
@@ -760,6 +778,26 @@ out: | |||
760 | return r; | 778 | return r; |
761 | } | 779 | } |
762 | 780 | ||
781 | static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | ||
782 | u32 kvm_nr_mmu_pages) | ||
783 | { | ||
784 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) | ||
785 | return -EINVAL; | ||
786 | |||
787 | mutex_lock(&kvm->lock); | ||
788 | |||
789 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | ||
790 | kvm->n_requested_mmu_pages = kvm_nr_mmu_pages; | ||
791 | |||
792 | mutex_unlock(&kvm->lock); | ||
793 | return 0; | ||
794 | } | ||
795 | |||
796 | static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) | ||
797 | { | ||
798 | return kvm->n_alloc_mmu_pages; | ||
799 | } | ||
800 | |||
763 | /* | 801 | /* |
764 | * Get (and clear) the dirty memory log for a memory slot. | 802 | * Get (and clear) the dirty memory log for a memory slot. |
765 | */ | 803 | */ |
@@ -3071,6 +3109,14 @@ static long kvm_vm_ioctl(struct file *filp, | |||
3071 | goto out; | 3109 | goto out; |
3072 | break; | 3110 | break; |
3073 | } | 3111 | } |
3112 | case KVM_SET_NR_MMU_PAGES: | ||
3113 | r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); | ||
3114 | if (r) | ||
3115 | goto out; | ||
3116 | break; | ||
3117 | case KVM_GET_NR_MMU_PAGES: | ||
3118 | r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); | ||
3119 | break; | ||
3074 | case KVM_GET_DIRTY_LOG: { | 3120 | case KVM_GET_DIRTY_LOG: { |
3075 | struct kvm_dirty_log log; | 3121 | struct kvm_dirty_log log; |
3076 | 3122 | ||
@@ -3278,6 +3324,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
3278 | switch (ext) { | 3324 | switch (ext) { |
3279 | case KVM_CAP_IRQCHIP: | 3325 | case KVM_CAP_IRQCHIP: |
3280 | case KVM_CAP_HLT: | 3326 | case KVM_CAP_HLT: |
3327 | case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: | ||
3281 | r = 1; | 3328 | r = 1; |
3282 | break; | 3329 | break; |
3283 | default: | 3330 | default: |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 72757db15065..6cda1feb9a95 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -747,6 +747,40 @@ static void kvm_mmu_zap_page(struct kvm *kvm, | |||
747 | kvm_mmu_reset_last_pte_updated(kvm); | 747 | kvm_mmu_reset_last_pte_updated(kvm); |
748 | } | 748 | } |
749 | 749 | ||
750 | /* | ||
751 | * Changing the number of mmu pages allocated to the vm | ||
752 | * Note: if kvm_nr_mmu_pages is too small, you will get dead lock | ||
753 | */ | ||
754 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | ||
755 | { | ||
756 | /* | ||
757 | * If we set the number of mmu pages to be smaller be than the | ||
758 | * number of actived pages , we must to free some mmu pages before we | ||
759 | * change the value | ||
760 | */ | ||
761 | |||
762 | if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) > | ||
763 | kvm_nr_mmu_pages) { | ||
764 | int n_used_mmu_pages = kvm->n_alloc_mmu_pages | ||
765 | - kvm->n_free_mmu_pages; | ||
766 | |||
767 | while (n_used_mmu_pages > kvm_nr_mmu_pages) { | ||
768 | struct kvm_mmu_page *page; | ||
769 | |||
770 | page = container_of(kvm->active_mmu_pages.prev, | ||
771 | struct kvm_mmu_page, link); | ||
772 | kvm_mmu_zap_page(kvm, page); | ||
773 | n_used_mmu_pages--; | ||
774 | } | ||
775 | kvm->n_free_mmu_pages = 0; | ||
776 | } | ||
777 | else | ||
778 | kvm->n_free_mmu_pages += kvm_nr_mmu_pages | ||
779 | - kvm->n_alloc_mmu_pages; | ||
780 | |||
781 | kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages; | ||
782 | } | ||
783 | |||
750 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) | 784 | static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
751 | { | 785 | { |
752 | unsigned index; | 786 | unsigned index; |
@@ -1297,8 +1331,10 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) | |||
1297 | 1331 | ||
1298 | ASSERT(vcpu); | 1332 | ASSERT(vcpu); |
1299 | 1333 | ||
1300 | vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES; | 1334 | if (vcpu->kvm->n_requested_mmu_pages) |
1301 | 1335 | vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages; | |
1336 | else | ||
1337 | vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages; | ||
1302 | /* | 1338 | /* |
1303 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. | 1339 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. |
1304 | * Therefore we need to allocate shadow page tables in the first | 1340 | * Therefore we need to allocate shadow page tables in the first |