aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-21 04:44:20 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:47:00 -0400
commita1f4d39500ad8ed61825eff061debff42386ab5b (patch)
tree58b72188b1356329e78951773f4be41e66b11d21 /arch
parentfc34531db3cf8c422f2ff7cf4ef507a3ca672cd2 (diff)
KVM: Remove memory alias support
As advertised in feature-removal-schedule.txt. Equivalent support is provided by overlapping memory regions. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c5
-rw-r--r--arch/powerpc/kvm/powerpc.c5
-rw-r--r--arch/s390/kvm/kvm-s390.c5
-rw-r--r--arch/x86/include/asm/kvm_host.h21
-rw-r--r--arch/x86/kvm/mmu.c17
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/x86.c125
-rw-r--r--arch/x86/kvm/x86.h7
8 files changed, 5 insertions, 183 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 91760e80e268..bd510beb43af 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1946,11 +1946,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1946 return vcpu->arch.timer_fired; 1946 return vcpu->arch.timer_fired;
1947} 1947}
1948 1948
1949gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1950{
1951 return gfn;
1952}
1953
1954int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 1949int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1955{ 1950{
1956 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || 1951 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index b5ebdfbed20b..72a4ad86ee91 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -36,11 +36,6 @@
36#define CREATE_TRACE_POINTS 36#define CREATE_TRACE_POINTS
37#include "trace.h" 37#include "trace.h"
38 38
39gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
40{
41 return gfn;
42}
43
44int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
45{ 40{
46 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); 41 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 08a3b35d30be..4fe68650535c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -723,11 +723,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
723{ 723{
724} 724}
725 725
726gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
727{
728 return gfn;
729}
730
731static int __init kvm_s390_init(void) 726static int __init kvm_s390_init(void)
732{ 727{
733 int ret; 728 int ret;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2ec2e27a403e..a57cdeacc4d2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -69,8 +69,6 @@
69 69
70#define IOPL_SHIFT 12 70#define IOPL_SHIFT 12
71 71
72#define KVM_ALIAS_SLOTS 4
73
74#define KVM_PERMILLE_MMU_PAGES 20 72#define KVM_PERMILLE_MMU_PAGES 20
75#define KVM_MIN_ALLOC_MMU_PAGES 64 73#define KVM_MIN_ALLOC_MMU_PAGES 64
76#define KVM_MMU_HASH_SHIFT 10 74#define KVM_MMU_HASH_SHIFT 10
@@ -362,24 +360,7 @@ struct kvm_vcpu_arch {
362 u64 hv_vapic; 360 u64 hv_vapic;
363}; 361};
364 362
365struct kvm_mem_alias {
366 gfn_t base_gfn;
367 unsigned long npages;
368 gfn_t target_gfn;
369#define KVM_ALIAS_INVALID 1UL
370 unsigned long flags;
371};
372
373#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION
374
375struct kvm_mem_aliases {
376 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
377 int naliases;
378};
379
380struct kvm_arch { 363struct kvm_arch {
381 struct kvm_mem_aliases *aliases;
382
383 unsigned int n_free_mmu_pages; 364 unsigned int n_free_mmu_pages;
384 unsigned int n_requested_mmu_pages; 365 unsigned int n_requested_mmu_pages;
385 unsigned int n_alloc_mmu_pages; 366 unsigned int n_alloc_mmu_pages;
@@ -655,8 +636,6 @@ void kvm_disable_tdp(void);
655int complete_pio(struct kvm_vcpu *vcpu); 636int complete_pio(struct kvm_vcpu *vcpu);
656bool kvm_check_iopl(struct kvm_vcpu *vcpu); 637bool kvm_check_iopl(struct kvm_vcpu *vcpu);
657 638
658struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn);
659
660static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 639static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
661{ 640{
662 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 641 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8c2f580956d9..c5501bc10106 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -434,9 +434,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
434 int *write_count; 434 int *write_count;
435 int i; 435 int i;
436 436
437 gfn = unalias_gfn(kvm, gfn); 437 slot = gfn_to_memslot(kvm, gfn);
438
439 slot = gfn_to_memslot_unaliased(kvm, gfn);
440 for (i = PT_DIRECTORY_LEVEL; 438 for (i = PT_DIRECTORY_LEVEL;
441 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 439 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
442 write_count = slot_largepage_idx(gfn, slot, i); 440 write_count = slot_largepage_idx(gfn, slot, i);
@@ -450,8 +448,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
450 int *write_count; 448 int *write_count;
451 int i; 449 int i;
452 450
453 gfn = unalias_gfn(kvm, gfn); 451 slot = gfn_to_memslot(kvm, gfn);
454 slot = gfn_to_memslot_unaliased(kvm, gfn);
455 for (i = PT_DIRECTORY_LEVEL; 452 for (i = PT_DIRECTORY_LEVEL;
456 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 453 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
457 write_count = slot_largepage_idx(gfn, slot, i); 454 write_count = slot_largepage_idx(gfn, slot, i);
@@ -467,8 +464,7 @@ static int has_wrprotected_page(struct kvm *kvm,
467 struct kvm_memory_slot *slot; 464 struct kvm_memory_slot *slot;
468 int *largepage_idx; 465 int *largepage_idx;
469 466
470 gfn = unalias_gfn(kvm, gfn); 467 slot = gfn_to_memslot(kvm, gfn);
471 slot = gfn_to_memslot_unaliased(kvm, gfn);
472 if (slot) { 468 if (slot) {
473 largepage_idx = slot_largepage_idx(gfn, slot, level); 469 largepage_idx = slot_largepage_idx(gfn, slot, level);
474 return *largepage_idx; 470 return *largepage_idx;
@@ -521,7 +517,6 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
521 517
522/* 518/*
523 * Take gfn and return the reverse mapping to it. 519 * Take gfn and return the reverse mapping to it.
524 * Note: gfn must be unaliased before this function get called
525 */ 520 */
526 521
527static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) 522static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
@@ -561,7 +556,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
561 556
562 if (!is_rmap_spte(*spte)) 557 if (!is_rmap_spte(*spte))
563 return count; 558 return count;
564 gfn = unalias_gfn(vcpu->kvm, gfn);
565 sp = page_header(__pa(spte)); 559 sp = page_header(__pa(spte));
566 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); 560 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
567 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 561 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
@@ -698,7 +692,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
698 u64 *spte; 692 u64 *spte;
699 int i, write_protected = 0; 693 int i, write_protected = 0;
700 694
701 gfn = unalias_gfn(kvm, gfn);
702 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); 695 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
703 696
704 spte = rmap_next(kvm, rmapp, NULL); 697 spte = rmap_next(kvm, rmapp, NULL);
@@ -885,7 +878,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
885 878
886 sp = page_header(__pa(spte)); 879 sp = page_header(__pa(spte));
887 880
888 gfn = unalias_gfn(vcpu->kvm, gfn);
889 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 881 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
890 882
891 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); 883 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
@@ -3510,8 +3502,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
3510 if (sp->unsync) 3502 if (sp->unsync)
3511 continue; 3503 continue;
3512 3504
3513 gfn = unalias_gfn(vcpu->kvm, sp->gfn); 3505 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
3514 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3515 rmapp = &slot->rmap[gfn - slot->base_gfn]; 3506 rmapp = &slot->rmap[gfn - slot->base_gfn];
3516 3507
3517 spte = rmap_next(vcpu->kvm, rmapp, NULL); 3508 spte = rmap_next(vcpu->kvm, rmapp, NULL);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 863920f649fb..a21a86ef9e20 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -576,7 +576,6 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
576 * Using the cached information from sp->gfns is safe because: 576 * Using the cached information from sp->gfns is safe because:
577 * - The spte has a reference to the struct page, so the pfn for a given gfn 577 * - The spte has a reference to the struct page, so the pfn for a given gfn
578 * can't change unless all sptes pointing to it are nuked first. 578 * can't change unless all sptes pointing to it are nuked first.
579 * - Alias changes zap the entire shadow cache.
580 */ 579 */
581static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 580static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
582 bool clear_unsync) 581 bool clear_unsync)
@@ -611,7 +610,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
611 return -EINVAL; 610 return -EINVAL;
612 611
613 gfn = gpte_to_gfn(gpte); 612 gfn = gpte_to_gfn(gpte);
614 if (unalias_gfn(vcpu->kvm, gfn) != sp->gfns[i] || 613 if (gfn != sp->gfns[i] ||
615 !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) { 614 !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
616 u64 nonpresent; 615 u64 nonpresent;
617 616
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8e60b6c9c0b0..62596d373a49 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2740,115 +2740,6 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2740 return kvm->arch.n_alloc_mmu_pages; 2740 return kvm->arch.n_alloc_mmu_pages;
2741} 2741}
2742 2742
2743gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
2744{
2745 int i;
2746 struct kvm_mem_alias *alias;
2747 struct kvm_mem_aliases *aliases;
2748
2749 aliases = kvm_aliases(kvm);
2750
2751 for (i = 0; i < aliases->naliases; ++i) {
2752 alias = &aliases->aliases[i];
2753 if (alias->flags & KVM_ALIAS_INVALID)
2754 continue;
2755 if (gfn >= alias->base_gfn
2756 && gfn < alias->base_gfn + alias->npages)
2757 return alias->target_gfn + gfn - alias->base_gfn;
2758 }
2759 return gfn;
2760}
2761
2762gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2763{
2764 int i;
2765 struct kvm_mem_alias *alias;
2766 struct kvm_mem_aliases *aliases;
2767
2768 aliases = kvm_aliases(kvm);
2769
2770 for (i = 0; i < aliases->naliases; ++i) {
2771 alias = &aliases->aliases[i];
2772 if (gfn >= alias->base_gfn
2773 && gfn < alias->base_gfn + alias->npages)
2774 return alias->target_gfn + gfn - alias->base_gfn;
2775 }
2776 return gfn;
2777}
2778
2779/*
2780 * Set a new alias region. Aliases map a portion of physical memory into
2781 * another portion. This is useful for memory windows, for example the PC
2782 * VGA region.
2783 */
2784static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2785 struct kvm_memory_alias *alias)
2786{
2787 int r, n;
2788 struct kvm_mem_alias *p;
2789 struct kvm_mem_aliases *aliases, *old_aliases;
2790
2791 r = -EINVAL;
2792 /* General sanity checks */
2793 if (alias->memory_size & (PAGE_SIZE - 1))
2794 goto out;
2795 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
2796 goto out;
2797 if (alias->slot >= KVM_ALIAS_SLOTS)
2798 goto out;
2799 if (alias->guest_phys_addr + alias->memory_size
2800 < alias->guest_phys_addr)
2801 goto out;
2802 if (alias->target_phys_addr + alias->memory_size
2803 < alias->target_phys_addr)
2804 goto out;
2805
2806 r = -ENOMEM;
2807 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2808 if (!aliases)
2809 goto out;
2810
2811 mutex_lock(&kvm->slots_lock);
2812
2813 /* invalidate any gfn reference in case of deletion/shrinking */
2814 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2815 aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
2816 old_aliases = kvm->arch.aliases;
2817 rcu_assign_pointer(kvm->arch.aliases, aliases);
2818 synchronize_srcu_expedited(&kvm->srcu);
2819 kvm_mmu_zap_all(kvm);
2820 kfree(old_aliases);
2821
2822 r = -ENOMEM;
2823 aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2824 if (!aliases)
2825 goto out_unlock;
2826
2827 memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2828
2829 p = &aliases->aliases[alias->slot];
2830 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2831 p->npages = alias->memory_size >> PAGE_SHIFT;
2832 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
2833 p->flags &= ~(KVM_ALIAS_INVALID);
2834
2835 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
2836 if (aliases->aliases[n - 1].npages)
2837 break;
2838 aliases->naliases = n;
2839
2840 old_aliases = kvm->arch.aliases;
2841 rcu_assign_pointer(kvm->arch.aliases, aliases);
2842 synchronize_srcu_expedited(&kvm->srcu);
2843 kfree(old_aliases);
2844 r = 0;
2845
2846out_unlock:
2847 mutex_unlock(&kvm->slots_lock);
2848out:
2849 return r;
2850}
2851
2852static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) 2743static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2853{ 2744{
2854 int r; 2745 int r;
@@ -3056,7 +2947,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
3056 union { 2947 union {
3057 struct kvm_pit_state ps; 2948 struct kvm_pit_state ps;
3058 struct kvm_pit_state2 ps2; 2949 struct kvm_pit_state2 ps2;
3059 struct kvm_memory_alias alias;
3060 struct kvm_pit_config pit_config; 2950 struct kvm_pit_config pit_config;
3061 } u; 2951 } u;
3062 2952
@@ -3101,14 +2991,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
3101 case KVM_GET_NR_MMU_PAGES: 2991 case KVM_GET_NR_MMU_PAGES:
3102 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 2992 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3103 break; 2993 break;
3104 case KVM_SET_MEMORY_ALIAS:
3105 r = -EFAULT;
3106 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
3107 goto out;
3108 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
3109 if (r)
3110 goto out;
3111 break;
3112 case KVM_CREATE_IRQCHIP: { 2994 case KVM_CREATE_IRQCHIP: {
3113 struct kvm_pic *vpic; 2995 struct kvm_pic *vpic;
3114 2996
@@ -5559,12 +5441,6 @@ struct kvm *kvm_arch_create_vm(void)
5559 if (!kvm) 5441 if (!kvm)
5560 return ERR_PTR(-ENOMEM); 5442 return ERR_PTR(-ENOMEM);
5561 5443
5562 kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
5563 if (!kvm->arch.aliases) {
5564 kfree(kvm);
5565 return ERR_PTR(-ENOMEM);
5566 }
5567
5568 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 5444 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5569 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 5445 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5570 5446
@@ -5622,7 +5498,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
5622 if (kvm->arch.ept_identity_pagetable) 5498 if (kvm->arch.ept_identity_pagetable)
5623 put_page(kvm->arch.ept_identity_pagetable); 5499 put_page(kvm->arch.ept_identity_pagetable);
5624 cleanup_srcu_struct(&kvm->srcu); 5500 cleanup_srcu_struct(&kvm->srcu);
5625 kfree(kvm->arch.aliases);
5626 kfree(kvm); 5501 kfree(kvm);
5627} 5502}
5628 5503
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index f4b54458285b..b7a404722d2b 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -65,13 +65,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
65 return kvm_read_cr0_bits(vcpu, X86_CR0_PG); 65 return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
66} 66}
67 67
68static inline struct kvm_mem_aliases *kvm_aliases(struct kvm *kvm)
69{
70 return rcu_dereference_check(kvm->arch.aliases,
71 srcu_read_lock_held(&kvm->srcu)
72 || lockdep_is_held(&kvm->slots_lock));
73}
74
75void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 68void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
76void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 69void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
77 70