diff options
-rw-r--r-- | Documentation/feature-removal-schedule.txt | 11 | ||||
-rw-r--r-- | Documentation/kvm/api.txt | 12 | ||||
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 5 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 21 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 17 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 3 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 125 | ||||
-rw-r--r-- | arch/x86/kvm/x86.h | 7 | ||||
-rw-r--r-- | include/linux/kvm.h | 1 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 6 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 18 |
13 files changed, 11 insertions, 225 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 1571c0c83dba..ad1e90dd2780 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -538,17 +538,6 @@ Who: Jan Kiszka <jan.kiszka@web.de> | |||
538 | 538 | ||
539 | ---------------------------- | 539 | ---------------------------- |
540 | 540 | ||
541 | What: KVM memory aliases support | ||
542 | When: July 2010 | ||
543 | Why: Memory aliasing support is used for speeding up guest vga access | ||
544 | through the vga windows. | ||
545 | |||
546 | Modern userspace no longer uses this feature, so it's just bitrotted | ||
547 | code and can be removed with no impact. | ||
548 | Who: Avi Kivity <avi@redhat.com> | ||
549 | |||
550 | ---------------------------- | ||
551 | |||
552 | What: xtime, wall_to_monotonic | 541 | What: xtime, wall_to_monotonic |
553 | When: 2.6.36+ | 542 | When: 2.6.36+ |
554 | Files: kernel/time/timekeeping.c include/linux/time.h | 543 | Files: kernel/time/timekeeping.c include/linux/time.h |
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt index ffba03f55bdf..7e415943a11e 100644 --- a/Documentation/kvm/api.txt +++ b/Documentation/kvm/api.txt | |||
@@ -226,17 +226,7 @@ Type: vm ioctl | |||
226 | Parameters: struct kvm_memory_alias (in) | 226 | Parameters: struct kvm_memory_alias (in) |
227 | Returns: 0 (success), -1 (error) | 227 | Returns: 0 (success), -1 (error) |
228 | 228 | ||
229 | struct kvm_memory_alias { | 229 | This ioctl is obsolete and has been removed. |
230 | __u32 slot; /* this has a different namespace than memory slots */ | ||
231 | __u32 flags; | ||
232 | __u64 guest_phys_addr; | ||
233 | __u64 memory_size; | ||
234 | __u64 target_phys_addr; | ||
235 | }; | ||
236 | |||
237 | Defines a guest physical address space region as an alias to another | ||
238 | region. Useful for aliased address, for example the VGA low memory | ||
239 | window. Should not be used with userspace memory. | ||
240 | 230 | ||
241 | 4.9 KVM_RUN | 231 | 4.9 KVM_RUN |
242 | 232 | ||
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 91760e80e268..bd510beb43af 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -1946,11 +1946,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
1946 | return vcpu->arch.timer_fired; | 1946 | return vcpu->arch.timer_fired; |
1947 | } | 1947 | } |
1948 | 1948 | ||
1949 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
1950 | { | ||
1951 | return gfn; | ||
1952 | } | ||
1953 | |||
1954 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 1949 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
1955 | { | 1950 | { |
1956 | return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || | 1951 | return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) || |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index b5ebdfbed20b..72a4ad86ee91 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -36,11 +36,6 @@ | |||
36 | #define CREATE_TRACE_POINTS | 36 | #define CREATE_TRACE_POINTS |
37 | #include "trace.h" | 37 | #include "trace.h" |
38 | 38 | ||
39 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
40 | { | ||
41 | return gfn; | ||
42 | } | ||
43 | |||
44 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
45 | { | 40 | { |
46 | return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); | 41 | return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 08a3b35d30be..4fe68650535c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -723,11 +723,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
723 | { | 723 | { |
724 | } | 724 | } |
725 | 725 | ||
726 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
727 | { | ||
728 | return gfn; | ||
729 | } | ||
730 | |||
731 | static int __init kvm_s390_init(void) | 726 | static int __init kvm_s390_init(void) |
732 | { | 727 | { |
733 | int ret; | 728 | int ret; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2ec2e27a403e..a57cdeacc4d2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -69,8 +69,6 @@ | |||
69 | 69 | ||
70 | #define IOPL_SHIFT 12 | 70 | #define IOPL_SHIFT 12 |
71 | 71 | ||
72 | #define KVM_ALIAS_SLOTS 4 | ||
73 | |||
74 | #define KVM_PERMILLE_MMU_PAGES 20 | 72 | #define KVM_PERMILLE_MMU_PAGES 20 |
75 | #define KVM_MIN_ALLOC_MMU_PAGES 64 | 73 | #define KVM_MIN_ALLOC_MMU_PAGES 64 |
76 | #define KVM_MMU_HASH_SHIFT 10 | 74 | #define KVM_MMU_HASH_SHIFT 10 |
@@ -362,24 +360,7 @@ struct kvm_vcpu_arch { | |||
362 | u64 hv_vapic; | 360 | u64 hv_vapic; |
363 | }; | 361 | }; |
364 | 362 | ||
365 | struct kvm_mem_alias { | ||
366 | gfn_t base_gfn; | ||
367 | unsigned long npages; | ||
368 | gfn_t target_gfn; | ||
369 | #define KVM_ALIAS_INVALID 1UL | ||
370 | unsigned long flags; | ||
371 | }; | ||
372 | |||
373 | #define KVM_ARCH_HAS_UNALIAS_INSTANTIATION | ||
374 | |||
375 | struct kvm_mem_aliases { | ||
376 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; | ||
377 | int naliases; | ||
378 | }; | ||
379 | |||
380 | struct kvm_arch { | 363 | struct kvm_arch { |
381 | struct kvm_mem_aliases *aliases; | ||
382 | |||
383 | unsigned int n_free_mmu_pages; | 364 | unsigned int n_free_mmu_pages; |
384 | unsigned int n_requested_mmu_pages; | 365 | unsigned int n_requested_mmu_pages; |
385 | unsigned int n_alloc_mmu_pages; | 366 | unsigned int n_alloc_mmu_pages; |
@@ -655,8 +636,6 @@ void kvm_disable_tdp(void); | |||
655 | int complete_pio(struct kvm_vcpu *vcpu); | 636 | int complete_pio(struct kvm_vcpu *vcpu); |
656 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); | 637 | bool kvm_check_iopl(struct kvm_vcpu *vcpu); |
657 | 638 | ||
658 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); | ||
659 | |||
660 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | 639 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
661 | { | 640 | { |
662 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | 641 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8c2f580956d9..c5501bc10106 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -434,9 +434,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) | |||
434 | int *write_count; | 434 | int *write_count; |
435 | int i; | 435 | int i; |
436 | 436 | ||
437 | gfn = unalias_gfn(kvm, gfn); | 437 | slot = gfn_to_memslot(kvm, gfn); |
438 | |||
439 | slot = gfn_to_memslot_unaliased(kvm, gfn); | ||
440 | for (i = PT_DIRECTORY_LEVEL; | 438 | for (i = PT_DIRECTORY_LEVEL; |
441 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { | 439 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
442 | write_count = slot_largepage_idx(gfn, slot, i); | 440 | write_count = slot_largepage_idx(gfn, slot, i); |
@@ -450,8 +448,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) | |||
450 | int *write_count; | 448 | int *write_count; |
451 | int i; | 449 | int i; |
452 | 450 | ||
453 | gfn = unalias_gfn(kvm, gfn); | 451 | slot = gfn_to_memslot(kvm, gfn); |
454 | slot = gfn_to_memslot_unaliased(kvm, gfn); | ||
455 | for (i = PT_DIRECTORY_LEVEL; | 452 | for (i = PT_DIRECTORY_LEVEL; |
456 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { | 453 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
457 | write_count = slot_largepage_idx(gfn, slot, i); | 454 | write_count = slot_largepage_idx(gfn, slot, i); |
@@ -467,8 +464,7 @@ static int has_wrprotected_page(struct kvm *kvm, | |||
467 | struct kvm_memory_slot *slot; | 464 | struct kvm_memory_slot *slot; |
468 | int *largepage_idx; | 465 | int *largepage_idx; |
469 | 466 | ||
470 | gfn = unalias_gfn(kvm, gfn); | 467 | slot = gfn_to_memslot(kvm, gfn); |
471 | slot = gfn_to_memslot_unaliased(kvm, gfn); | ||
472 | if (slot) { | 468 | if (slot) { |
473 | largepage_idx = slot_largepage_idx(gfn, slot, level); | 469 | largepage_idx = slot_largepage_idx(gfn, slot, level); |
474 | return *largepage_idx; | 470 | return *largepage_idx; |
@@ -521,7 +517,6 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) | |||
521 | 517 | ||
522 | /* | 518 | /* |
523 | * Take gfn and return the reverse mapping to it. | 519 | * Take gfn and return the reverse mapping to it. |
524 | * Note: gfn must be unaliased before this function get called | ||
525 | */ | 520 | */ |
526 | 521 | ||
527 | static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) | 522 | static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) |
@@ -561,7 +556,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | |||
561 | 556 | ||
562 | if (!is_rmap_spte(*spte)) | 557 | if (!is_rmap_spte(*spte)) |
563 | return count; | 558 | return count; |
564 | gfn = unalias_gfn(vcpu->kvm, gfn); | ||
565 | sp = page_header(__pa(spte)); | 559 | sp = page_header(__pa(spte)); |
566 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); | 560 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); |
567 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); | 561 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
@@ -698,7 +692,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) | |||
698 | u64 *spte; | 692 | u64 *spte; |
699 | int i, write_protected = 0; | 693 | int i, write_protected = 0; |
700 | 694 | ||
701 | gfn = unalias_gfn(kvm, gfn); | ||
702 | rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); | 695 | rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); |
703 | 696 | ||
704 | spte = rmap_next(kvm, rmapp, NULL); | 697 | spte = rmap_next(kvm, rmapp, NULL); |
@@ -885,7 +878,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | |||
885 | 878 | ||
886 | sp = page_header(__pa(spte)); | 879 | sp = page_header(__pa(spte)); |
887 | 880 | ||
888 | gfn = unalias_gfn(vcpu->kvm, gfn); | ||
889 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); | 881 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
890 | 882 | ||
891 | kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); | 883 | kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); |
@@ -3510,8 +3502,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) | |||
3510 | if (sp->unsync) | 3502 | if (sp->unsync) |
3511 | continue; | 3503 | continue; |
3512 | 3504 | ||
3513 | gfn = unalias_gfn(vcpu->kvm, sp->gfn); | 3505 | slot = gfn_to_memslot(vcpu->kvm, sp->gfn); |
3514 | slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn); | ||
3515 | rmapp = &slot->rmap[gfn - slot->base_gfn]; | 3506 | rmapp = &slot->rmap[gfn - slot->base_gfn]; |
3516 | 3507 | ||
3517 | spte = rmap_next(vcpu->kvm, rmapp, NULL); | 3508 | spte = rmap_next(vcpu->kvm, rmapp, NULL); |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 863920f649fb..a21a86ef9e20 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -576,7 +576,6 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
576 | * Using the cached information from sp->gfns is safe because: | 576 | * Using the cached information from sp->gfns is safe because: |
577 | * - The spte has a reference to the struct page, so the pfn for a given gfn | 577 | * - The spte has a reference to the struct page, so the pfn for a given gfn |
578 | * can't change unless all sptes pointing to it are nuked first. | 578 | * can't change unless all sptes pointing to it are nuked first. |
579 | * - Alias changes zap the entire shadow cache. | ||
580 | */ | 579 | */ |
581 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 580 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
582 | bool clear_unsync) | 581 | bool clear_unsync) |
@@ -611,7 +610,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
611 | return -EINVAL; | 610 | return -EINVAL; |
612 | 611 | ||
613 | gfn = gpte_to_gfn(gpte); | 612 | gfn = gpte_to_gfn(gpte); |
614 | if (unalias_gfn(vcpu->kvm, gfn) != sp->gfns[i] || | 613 | if (gfn != sp->gfns[i] || |
615 | !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) { | 614 | !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) { |
616 | u64 nonpresent; | 615 | u64 nonpresent; |
617 | 616 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8e60b6c9c0b0..62596d373a49 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2740,115 +2740,6 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) | |||
2740 | return kvm->arch.n_alloc_mmu_pages; | 2740 | return kvm->arch.n_alloc_mmu_pages; |
2741 | } | 2741 | } |
2742 | 2742 | ||
2743 | gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn) | ||
2744 | { | ||
2745 | int i; | ||
2746 | struct kvm_mem_alias *alias; | ||
2747 | struct kvm_mem_aliases *aliases; | ||
2748 | |||
2749 | aliases = kvm_aliases(kvm); | ||
2750 | |||
2751 | for (i = 0; i < aliases->naliases; ++i) { | ||
2752 | alias = &aliases->aliases[i]; | ||
2753 | if (alias->flags & KVM_ALIAS_INVALID) | ||
2754 | continue; | ||
2755 | if (gfn >= alias->base_gfn | ||
2756 | && gfn < alias->base_gfn + alias->npages) | ||
2757 | return alias->target_gfn + gfn - alias->base_gfn; | ||
2758 | } | ||
2759 | return gfn; | ||
2760 | } | ||
2761 | |||
2762 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | ||
2763 | { | ||
2764 | int i; | ||
2765 | struct kvm_mem_alias *alias; | ||
2766 | struct kvm_mem_aliases *aliases; | ||
2767 | |||
2768 | aliases = kvm_aliases(kvm); | ||
2769 | |||
2770 | for (i = 0; i < aliases->naliases; ++i) { | ||
2771 | alias = &aliases->aliases[i]; | ||
2772 | if (gfn >= alias->base_gfn | ||
2773 | && gfn < alias->base_gfn + alias->npages) | ||
2774 | return alias->target_gfn + gfn - alias->base_gfn; | ||
2775 | } | ||
2776 | return gfn; | ||
2777 | } | ||
2778 | |||
2779 | /* | ||
2780 | * Set a new alias region. Aliases map a portion of physical memory into | ||
2781 | * another portion. This is useful for memory windows, for example the PC | ||
2782 | * VGA region. | ||
2783 | */ | ||
2784 | static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | ||
2785 | struct kvm_memory_alias *alias) | ||
2786 | { | ||
2787 | int r, n; | ||
2788 | struct kvm_mem_alias *p; | ||
2789 | struct kvm_mem_aliases *aliases, *old_aliases; | ||
2790 | |||
2791 | r = -EINVAL; | ||
2792 | /* General sanity checks */ | ||
2793 | if (alias->memory_size & (PAGE_SIZE - 1)) | ||
2794 | goto out; | ||
2795 | if (alias->guest_phys_addr & (PAGE_SIZE - 1)) | ||
2796 | goto out; | ||
2797 | if (alias->slot >= KVM_ALIAS_SLOTS) | ||
2798 | goto out; | ||
2799 | if (alias->guest_phys_addr + alias->memory_size | ||
2800 | < alias->guest_phys_addr) | ||
2801 | goto out; | ||
2802 | if (alias->target_phys_addr + alias->memory_size | ||
2803 | < alias->target_phys_addr) | ||
2804 | goto out; | ||
2805 | |||
2806 | r = -ENOMEM; | ||
2807 | aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); | ||
2808 | if (!aliases) | ||
2809 | goto out; | ||
2810 | |||
2811 | mutex_lock(&kvm->slots_lock); | ||
2812 | |||
2813 | /* invalidate any gfn reference in case of deletion/shrinking */ | ||
2814 | memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases)); | ||
2815 | aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID; | ||
2816 | old_aliases = kvm->arch.aliases; | ||
2817 | rcu_assign_pointer(kvm->arch.aliases, aliases); | ||
2818 | synchronize_srcu_expedited(&kvm->srcu); | ||
2819 | kvm_mmu_zap_all(kvm); | ||
2820 | kfree(old_aliases); | ||
2821 | |||
2822 | r = -ENOMEM; | ||
2823 | aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); | ||
2824 | if (!aliases) | ||
2825 | goto out_unlock; | ||
2826 | |||
2827 | memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases)); | ||
2828 | |||
2829 | p = &aliases->aliases[alias->slot]; | ||
2830 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; | ||
2831 | p->npages = alias->memory_size >> PAGE_SHIFT; | ||
2832 | p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; | ||
2833 | p->flags &= ~(KVM_ALIAS_INVALID); | ||
2834 | |||
2835 | for (n = KVM_ALIAS_SLOTS; n > 0; --n) | ||
2836 | if (aliases->aliases[n - 1].npages) | ||
2837 | break; | ||
2838 | aliases->naliases = n; | ||
2839 | |||
2840 | old_aliases = kvm->arch.aliases; | ||
2841 | rcu_assign_pointer(kvm->arch.aliases, aliases); | ||
2842 | synchronize_srcu_expedited(&kvm->srcu); | ||
2843 | kfree(old_aliases); | ||
2844 | r = 0; | ||
2845 | |||
2846 | out_unlock: | ||
2847 | mutex_unlock(&kvm->slots_lock); | ||
2848 | out: | ||
2849 | return r; | ||
2850 | } | ||
2851 | |||
2852 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | 2743 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) |
2853 | { | 2744 | { |
2854 | int r; | 2745 | int r; |
@@ -3056,7 +2947,6 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
3056 | union { | 2947 | union { |
3057 | struct kvm_pit_state ps; | 2948 | struct kvm_pit_state ps; |
3058 | struct kvm_pit_state2 ps2; | 2949 | struct kvm_pit_state2 ps2; |
3059 | struct kvm_memory_alias alias; | ||
3060 | struct kvm_pit_config pit_config; | 2950 | struct kvm_pit_config pit_config; |
3061 | } u; | 2951 | } u; |
3062 | 2952 | ||
@@ -3101,14 +2991,6 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
3101 | case KVM_GET_NR_MMU_PAGES: | 2991 | case KVM_GET_NR_MMU_PAGES: |
3102 | r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); | 2992 | r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); |
3103 | break; | 2993 | break; |
3104 | case KVM_SET_MEMORY_ALIAS: | ||
3105 | r = -EFAULT; | ||
3106 | if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias))) | ||
3107 | goto out; | ||
3108 | r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias); | ||
3109 | if (r) | ||
3110 | goto out; | ||
3111 | break; | ||
3112 | case KVM_CREATE_IRQCHIP: { | 2994 | case KVM_CREATE_IRQCHIP: { |
3113 | struct kvm_pic *vpic; | 2995 | struct kvm_pic *vpic; |
3114 | 2996 | ||
@@ -5559,12 +5441,6 @@ struct kvm *kvm_arch_create_vm(void) | |||
5559 | if (!kvm) | 5441 | if (!kvm) |
5560 | return ERR_PTR(-ENOMEM); | 5442 | return ERR_PTR(-ENOMEM); |
5561 | 5443 | ||
5562 | kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL); | ||
5563 | if (!kvm->arch.aliases) { | ||
5564 | kfree(kvm); | ||
5565 | return ERR_PTR(-ENOMEM); | ||
5566 | } | ||
5567 | |||
5568 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); | 5444 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
5569 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 5445 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
5570 | 5446 | ||
@@ -5622,7 +5498,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
5622 | if (kvm->arch.ept_identity_pagetable) | 5498 | if (kvm->arch.ept_identity_pagetable) |
5623 | put_page(kvm->arch.ept_identity_pagetable); | 5499 | put_page(kvm->arch.ept_identity_pagetable); |
5624 | cleanup_srcu_struct(&kvm->srcu); | 5500 | cleanup_srcu_struct(&kvm->srcu); |
5625 | kfree(kvm->arch.aliases); | ||
5626 | kfree(kvm); | 5501 | kfree(kvm); |
5627 | } | 5502 | } |
5628 | 5503 | ||
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index f4b54458285b..b7a404722d2b 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
@@ -65,13 +65,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu) | |||
65 | return kvm_read_cr0_bits(vcpu, X86_CR0_PG); | 65 | return kvm_read_cr0_bits(vcpu, X86_CR0_PG); |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline struct kvm_mem_aliases *kvm_aliases(struct kvm *kvm) | ||
69 | { | ||
70 | return rcu_dereference_check(kvm->arch.aliases, | ||
71 | srcu_read_lock_held(&kvm->srcu) | ||
72 | || lockdep_is_held(&kvm->slots_lock)); | ||
73 | } | ||
74 | |||
75 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); | 68 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
76 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | 69 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); |
77 | 70 | ||
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 6fd40f540a8e..636fc381c897 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -619,6 +619,7 @@ struct kvm_clock_data { | |||
619 | */ | 619 | */ |
620 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) | 620 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) |
621 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) | 621 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) |
622 | /* KVM_SET_MEMORY_ALIAS is obsolete: */ | ||
622 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) | 623 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) |
623 | #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) | 624 | #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) |
624 | #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) | 625 | #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2d96555cd4ed..240e460777bc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -286,8 +286,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
286 | int user_alloc); | 286 | int user_alloc); |
287 | void kvm_disable_largepages(void); | 287 | void kvm_disable_largepages(void); |
288 | void kvm_arch_flush_shadow(struct kvm *kvm); | 288 | void kvm_arch_flush_shadow(struct kvm *kvm); |
289 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); | ||
290 | gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn); | ||
291 | 289 | ||
292 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 290 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
293 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 291 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
@@ -564,10 +562,6 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se | |||
564 | } | 562 | } |
565 | #endif | 563 | #endif |
566 | 564 | ||
567 | #ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION | ||
568 | #define unalias_gfn_instantiation unalias_gfn | ||
569 | #endif | ||
570 | |||
571 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 565 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
572 | 566 | ||
573 | #define KVM_MAX_IRQ_ROUTES 1024 | 567 | #define KVM_MAX_IRQ_ROUTES 1024 |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 84a090644d9d..65417e3d8462 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -841,7 +841,7 @@ int kvm_is_error_hva(unsigned long addr) | |||
841 | } | 841 | } |
842 | EXPORT_SYMBOL_GPL(kvm_is_error_hva); | 842 | EXPORT_SYMBOL_GPL(kvm_is_error_hva); |
843 | 843 | ||
844 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) | 844 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) |
845 | { | 845 | { |
846 | int i; | 846 | int i; |
847 | struct kvm_memslots *slots = kvm_memslots(kvm); | 847 | struct kvm_memslots *slots = kvm_memslots(kvm); |
@@ -855,20 +855,13 @@ struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) | |||
855 | } | 855 | } |
856 | return NULL; | 856 | return NULL; |
857 | } | 857 | } |
858 | EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); | 858 | EXPORT_SYMBOL_GPL(gfn_to_memslot); |
859 | |||
860 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | ||
861 | { | ||
862 | gfn = unalias_gfn(kvm, gfn); | ||
863 | return gfn_to_memslot_unaliased(kvm, gfn); | ||
864 | } | ||
865 | 859 | ||
866 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | 860 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
867 | { | 861 | { |
868 | int i; | 862 | int i; |
869 | struct kvm_memslots *slots = kvm_memslots(kvm); | 863 | struct kvm_memslots *slots = kvm_memslots(kvm); |
870 | 864 | ||
871 | gfn = unalias_gfn_instantiation(kvm, gfn); | ||
872 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | 865 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { |
873 | struct kvm_memory_slot *memslot = &slots->memslots[i]; | 866 | struct kvm_memory_slot *memslot = &slots->memslots[i]; |
874 | 867 | ||
@@ -913,7 +906,6 @@ int memslot_id(struct kvm *kvm, gfn_t gfn) | |||
913 | struct kvm_memslots *slots = kvm_memslots(kvm); | 906 | struct kvm_memslots *slots = kvm_memslots(kvm); |
914 | struct kvm_memory_slot *memslot = NULL; | 907 | struct kvm_memory_slot *memslot = NULL; |
915 | 908 | ||
916 | gfn = unalias_gfn(kvm, gfn); | ||
917 | for (i = 0; i < slots->nmemslots; ++i) { | 909 | for (i = 0; i < slots->nmemslots; ++i) { |
918 | memslot = &slots->memslots[i]; | 910 | memslot = &slots->memslots[i]; |
919 | 911 | ||
@@ -934,8 +926,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | |||
934 | { | 926 | { |
935 | struct kvm_memory_slot *slot; | 927 | struct kvm_memory_slot *slot; |
936 | 928 | ||
937 | gfn = unalias_gfn_instantiation(kvm, gfn); | 929 | slot = gfn_to_memslot(kvm, gfn); |
938 | slot = gfn_to_memslot_unaliased(kvm, gfn); | ||
939 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) | 930 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID) |
940 | return bad_hva(); | 931 | return bad_hva(); |
941 | return gfn_to_hva_memslot(slot, gfn); | 932 | return gfn_to_hva_memslot(slot, gfn); |
@@ -1202,8 +1193,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
1202 | { | 1193 | { |
1203 | struct kvm_memory_slot *memslot; | 1194 | struct kvm_memory_slot *memslot; |
1204 | 1195 | ||
1205 | gfn = unalias_gfn(kvm, gfn); | 1196 | memslot = gfn_to_memslot(kvm, gfn); |
1206 | memslot = gfn_to_memslot_unaliased(kvm, gfn); | ||
1207 | if (memslot && memslot->dirty_bitmap) { | 1197 | if (memslot && memslot->dirty_bitmap) { |
1208 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1198 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1209 | 1199 | ||