aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c133
1 files changed, 58 insertions, 75 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3cee7b167052..f5283438ee05 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -124,15 +124,6 @@ int vcpu_load(struct kvm_vcpu *vcpu)
124 124
125 if (mutex_lock_killable(&vcpu->mutex)) 125 if (mutex_lock_killable(&vcpu->mutex))
126 return -EINTR; 126 return -EINTR;
127 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
128 /* The thread running this VCPU changed. */
129 struct pid *oldpid = vcpu->pid;
130 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
131 rcu_assign_pointer(vcpu->pid, newpid);
132 if (oldpid)
133 synchronize_rcu();
134 put_pid(oldpid);
135 }
136 cpu = get_cpu(); 127 cpu = get_cpu();
137 preempt_notifier_register(&vcpu->preempt_notifier); 128 preempt_notifier_register(&vcpu->preempt_notifier);
138 kvm_arch_vcpu_load(vcpu, cpu); 129 kvm_arch_vcpu_load(vcpu, cpu);
@@ -468,9 +459,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
468 if (r) 459 if (r)
469 goto out_err_no_disable; 460 goto out_err_no_disable;
470 461
471#ifdef CONFIG_HAVE_KVM_IRQCHIP
472 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
473#endif
474#ifdef CONFIG_HAVE_KVM_IRQFD 462#ifdef CONFIG_HAVE_KVM_IRQFD
475 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 463 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
476#endif 464#endif
@@ -668,48 +656,46 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
668 return 0; 656 return 0;
669} 657}
670 658
671static int cmp_memslot(const void *slot1, const void *slot2)
672{
673 struct kvm_memory_slot *s1, *s2;
674
675 s1 = (struct kvm_memory_slot *)slot1;
676 s2 = (struct kvm_memory_slot *)slot2;
677
678 if (s1->npages < s2->npages)
679 return 1;
680 if (s1->npages > s2->npages)
681 return -1;
682
683 return 0;
684}
685
686/* 659/*
687 * Sort the memslots base on its size, so the larger slots 660 * Insert memslot and re-sort memslots based on their GFN,
688 * will get better fit. 661 * so binary search could be used to lookup GFN.
662 * Sorting algorithm takes advantage of having initially
663 * sorted array and known changed memslot position.
689 */ 664 */
690static void sort_memslots(struct kvm_memslots *slots)
691{
692 int i;
693
694 sort(slots->memslots, KVM_MEM_SLOTS_NUM,
695 sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
696
697 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
698 slots->id_to_index[slots->memslots[i].id] = i;
699}
700
701static void update_memslots(struct kvm_memslots *slots, 665static void update_memslots(struct kvm_memslots *slots,
702 struct kvm_memory_slot *new) 666 struct kvm_memory_slot *new)
703{ 667{
704 if (new) { 668 int id = new->id;
705 int id = new->id; 669 int i = slots->id_to_index[id];
706 struct kvm_memory_slot *old = id_to_memslot(slots, id); 670 struct kvm_memory_slot *mslots = slots->memslots;
707 unsigned long npages = old->npages;
708 671
709 *old = *new; 672 WARN_ON(mslots[i].id != id);
710 if (new->npages != npages) 673 if (!new->npages) {
711 sort_memslots(slots); 674 new->base_gfn = 0;
675 if (mslots[i].npages)
676 slots->used_slots--;
677 } else {
678 if (!mslots[i].npages)
679 slots->used_slots++;
712 } 680 }
681
682 while (i < KVM_MEM_SLOTS_NUM - 1 &&
683 new->base_gfn <= mslots[i + 1].base_gfn) {
684 if (!mslots[i + 1].npages)
685 break;
686 mslots[i] = mslots[i + 1];
687 slots->id_to_index[mslots[i].id] = i;
688 i++;
689 }
690 while (i > 0 &&
691 new->base_gfn > mslots[i - 1].base_gfn) {
692 mslots[i] = mslots[i - 1];
693 slots->id_to_index[mslots[i].id] = i;
694 i--;
695 }
696
697 mslots[i] = *new;
698 slots->id_to_index[mslots[i].id] = i;
713} 699}
714 700
715static int check_memory_region_flags(struct kvm_userspace_memory_region *mem) 701static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
@@ -727,7 +713,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
727} 713}
728 714
729static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 715static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
730 struct kvm_memslots *slots, struct kvm_memory_slot *new) 716 struct kvm_memslots *slots)
731{ 717{
732 struct kvm_memslots *old_memslots = kvm->memslots; 718 struct kvm_memslots *old_memslots = kvm->memslots;
733 719
@@ -738,7 +724,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
738 WARN_ON(old_memslots->generation & 1); 724 WARN_ON(old_memslots->generation & 1);
739 slots->generation = old_memslots->generation + 1; 725 slots->generation = old_memslots->generation + 1;
740 726
741 update_memslots(slots, new);
742 rcu_assign_pointer(kvm->memslots, slots); 727 rcu_assign_pointer(kvm->memslots, slots);
743 synchronize_srcu_expedited(&kvm->srcu); 728 synchronize_srcu_expedited(&kvm->srcu);
744 729
@@ -760,7 +745,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
760 * 745 *
761 * Discontiguous memory is allowed, mostly for framebuffers. 746 * Discontiguous memory is allowed, mostly for framebuffers.
762 * 747 *
763 * Must be called holding mmap_sem for write. 748 * Must be called holding kvm->slots_lock for write.
764 */ 749 */
765int __kvm_set_memory_region(struct kvm *kvm, 750int __kvm_set_memory_region(struct kvm *kvm,
766 struct kvm_userspace_memory_region *mem) 751 struct kvm_userspace_memory_region *mem)
@@ -866,15 +851,16 @@ int __kvm_set_memory_region(struct kvm *kvm,
866 goto out_free; 851 goto out_free;
867 } 852 }
868 853
854 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
855 GFP_KERNEL);
856 if (!slots)
857 goto out_free;
858
869 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 859 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
870 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
871 GFP_KERNEL);
872 if (!slots)
873 goto out_free;
874 slot = id_to_memslot(slots, mem->slot); 860 slot = id_to_memslot(slots, mem->slot);
875 slot->flags |= KVM_MEMSLOT_INVALID; 861 slot->flags |= KVM_MEMSLOT_INVALID;
876 862
877 old_memslots = install_new_memslots(kvm, slots, NULL); 863 old_memslots = install_new_memslots(kvm, slots);
878 864
879 /* slot was deleted or moved, clear iommu mapping */ 865 /* slot was deleted or moved, clear iommu mapping */
880 kvm_iommu_unmap_pages(kvm, &old); 866 kvm_iommu_unmap_pages(kvm, &old);
@@ -886,6 +872,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
886 * - kvm_is_visible_gfn (mmu_check_roots) 872 * - kvm_is_visible_gfn (mmu_check_roots)
887 */ 873 */
888 kvm_arch_flush_shadow_memslot(kvm, slot); 874 kvm_arch_flush_shadow_memslot(kvm, slot);
875
876 /*
877 * We can re-use the old_memslots from above, the only difference
878 * from the currently installed memslots is the invalid flag. This
879 * will get overwritten by update_memslots anyway.
880 */
889 slots = old_memslots; 881 slots = old_memslots;
890 } 882 }
891 883
@@ -893,26 +885,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
893 if (r) 885 if (r)
894 goto out_slots; 886 goto out_slots;
895 887
896 r = -ENOMEM;
897 /*
898 * We can re-use the old_memslots from above, the only difference
899 * from the currently installed memslots is the invalid flag. This
900 * will get overwritten by update_memslots anyway.
901 */
902 if (!slots) {
903 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
904 GFP_KERNEL);
905 if (!slots)
906 goto out_free;
907 }
908
909 /* actual memory is freed via old in kvm_free_physmem_slot below */ 888 /* actual memory is freed via old in kvm_free_physmem_slot below */
910 if (change == KVM_MR_DELETE) { 889 if (change == KVM_MR_DELETE) {
911 new.dirty_bitmap = NULL; 890 new.dirty_bitmap = NULL;
912 memset(&new.arch, 0, sizeof(new.arch)); 891 memset(&new.arch, 0, sizeof(new.arch));
913 } 892 }
914 893
915 old_memslots = install_new_memslots(kvm, slots, &new); 894 update_memslots(slots, &new);
895 old_memslots = install_new_memslots(kvm, slots);
916 896
917 kvm_arch_commit_memory_region(kvm, mem, &old, change); 897 kvm_arch_commit_memory_region(kvm, mem, &old, change);
918 898
@@ -1799,10 +1779,6 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
1799 rcu_read_unlock(); 1779 rcu_read_unlock();
1800 if (!task) 1780 if (!task)
1801 return ret; 1781 return ret;
1802 if (task->flags & PF_VCPU) {
1803 put_task_struct(task);
1804 return ret;
1805 }
1806 ret = yield_to(task, 1); 1782 ret = yield_to(task, 1);
1807 put_task_struct(task); 1783 put_task_struct(task);
1808 1784
@@ -2065,6 +2041,15 @@ static long kvm_vcpu_ioctl(struct file *filp,
2065 r = -EINVAL; 2041 r = -EINVAL;
2066 if (arg) 2042 if (arg)
2067 goto out; 2043 goto out;
2044 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
2045 /* The thread running this VCPU changed. */
2046 struct pid *oldpid = vcpu->pid;
2047 struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
2048 rcu_assign_pointer(vcpu->pid, newpid);
2049 if (oldpid)
2050 synchronize_rcu();
2051 put_pid(oldpid);
2052 }
2068 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2053 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
2069 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2054 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
2070 break; 2055 break;
@@ -2599,8 +2584,6 @@ static long kvm_vm_ioctl(struct file *filp,
2599 break; 2584 break;
2600 default: 2585 default:
2601 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2586 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2602 if (r == -ENOTTY)
2603 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
2604 } 2587 }
2605out: 2588out:
2606 return r; 2589 return r;