aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c106
1 files changed, 75 insertions, 31 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5e709ebb7c40..2e93630b4add 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -217,6 +217,11 @@ void kvm_make_mclock_inprogress_request(struct kvm *kvm)
217 make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 217 make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
218} 218}
219 219
220void kvm_make_update_eoibitmap_request(struct kvm *kvm)
221{
222 make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
223}
224
220int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 225int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
221{ 226{
222 struct page *page; 227 struct page *page;
@@ -714,6 +719,24 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
714} 719}
715 720
716/* 721/*
722 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
723 * - create a new memory slot
724 * - delete an existing memory slot
725 * - modify an existing memory slot
726 * -- move it in the guest physical memory space
727 * -- just change its flags
728 *
729 * Since flags can be changed by some of these operations, the following
730 * differentiation is the best we can do for __kvm_set_memory_region():
731 */
732enum kvm_mr_change {
733 KVM_MR_CREATE,
734 KVM_MR_DELETE,
735 KVM_MR_MOVE,
736 KVM_MR_FLAGS_ONLY,
737};
738
739/*
717 * Allocate some memory and give it an address in the guest physical address 740 * Allocate some memory and give it an address in the guest physical address
718 * space. 741 * space.
719 * 742 *
@@ -731,6 +754,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
731 struct kvm_memory_slot *slot; 754 struct kvm_memory_slot *slot;
732 struct kvm_memory_slot old, new; 755 struct kvm_memory_slot old, new;
733 struct kvm_memslots *slots = NULL, *old_memslots; 756 struct kvm_memslots *slots = NULL, *old_memslots;
757 enum kvm_mr_change change;
734 758
735 r = check_memory_region_flags(mem); 759 r = check_memory_region_flags(mem);
736 if (r) 760 if (r)
@@ -772,17 +796,31 @@ int __kvm_set_memory_region(struct kvm *kvm,
772 new.npages = npages; 796 new.npages = npages;
773 new.flags = mem->flags; 797 new.flags = mem->flags;
774 798
775 /*
776 * Disallow changing a memory slot's size or changing anything about
777 * zero sized slots that doesn't involve making them non-zero.
778 */
779 r = -EINVAL; 799 r = -EINVAL;
780 if (npages && old.npages && npages != old.npages) 800 if (npages) {
781 goto out; 801 if (!old.npages)
782 if (!npages && !old.npages) 802 change = KVM_MR_CREATE;
803 else { /* Modify an existing slot. */
804 if ((mem->userspace_addr != old.userspace_addr) ||
805 (npages != old.npages) ||
806 ((new.flags ^ old.flags) & KVM_MEM_READONLY))
807 goto out;
808
809 if (base_gfn != old.base_gfn)
810 change = KVM_MR_MOVE;
811 else if (new.flags != old.flags)
812 change = KVM_MR_FLAGS_ONLY;
813 else { /* Nothing to change. */
814 r = 0;
815 goto out;
816 }
817 }
818 } else if (old.npages) {
819 change = KVM_MR_DELETE;
820 } else /* Modify a non-existent slot: disallowed. */
783 goto out; 821 goto out;
784 822
785 if ((npages && !old.npages) || (base_gfn != old.base_gfn)) { 823 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
786 /* Check for overlaps */ 824 /* Check for overlaps */
787 r = -EEXIST; 825 r = -EEXIST;
788 kvm_for_each_memslot(slot, kvm->memslots) { 826 kvm_for_each_memslot(slot, kvm->memslots) {
@@ -800,20 +838,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
800 new.dirty_bitmap = NULL; 838 new.dirty_bitmap = NULL;
801 839
802 r = -ENOMEM; 840 r = -ENOMEM;
803 841 if (change == KVM_MR_CREATE) {
804 /*
805 * Allocate if a slot is being created. If modifying a slot,
806 * the userspace_addr cannot change.
807 */
808 if (!old.npages) {
809 new.user_alloc = user_alloc; 842 new.user_alloc = user_alloc;
810 new.userspace_addr = mem->userspace_addr; 843 new.userspace_addr = mem->userspace_addr;
811 844
812 if (kvm_arch_create_memslot(&new, npages)) 845 if (kvm_arch_create_memslot(&new, npages))
813 goto out_free; 846 goto out_free;
814 } else if (npages && mem->userspace_addr != old.userspace_addr) {
815 r = -EINVAL;
816 goto out_free;
817 } 847 }
818 848
819 /* Allocate page dirty bitmap if needed */ 849 /* Allocate page dirty bitmap if needed */
@@ -822,7 +852,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
822 goto out_free; 852 goto out_free;
823 } 853 }
824 854
825 if (!npages || base_gfn != old.base_gfn) { 855 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
826 r = -ENOMEM; 856 r = -ENOMEM;
827 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), 857 slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
828 GFP_KERNEL); 858 GFP_KERNEL);
@@ -863,15 +893,23 @@ int __kvm_set_memory_region(struct kvm *kvm,
863 goto out_free; 893 goto out_free;
864 } 894 }
865 895
866 /* map new memory slot into the iommu */ 896 /*
867 if (npages) { 897 * IOMMU mapping: New slots need to be mapped. Old slots need to be
898 * un-mapped and re-mapped if their base changes. Since base change
899 * unmapping is handled above with slot deletion, mapping alone is
900 * needed here. Anything else the iommu might care about for existing
901 * slots (size changes, userspace addr changes and read-only flag
902 * changes) is disallowed above, so any other attribute changes getting
903 * here can be skipped.
904 */
905 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
868 r = kvm_iommu_map_pages(kvm, &new); 906 r = kvm_iommu_map_pages(kvm, &new);
869 if (r) 907 if (r)
870 goto out_slots; 908 goto out_slots;
871 } 909 }
872 910
873 /* actual memory is freed via old in kvm_free_physmem_slot below */ 911 /* actual memory is freed via old in kvm_free_physmem_slot below */
874 if (!npages) { 912 if (change == KVM_MR_DELETE) {
875 new.dirty_bitmap = NULL; 913 new.dirty_bitmap = NULL;
876 memset(&new.arch, 0, sizeof(new.arch)); 914 memset(&new.arch, 0, sizeof(new.arch));
877 } 915 }
@@ -1669,6 +1707,7 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
1669{ 1707{
1670 struct pid *pid; 1708 struct pid *pid;
1671 struct task_struct *task = NULL; 1709 struct task_struct *task = NULL;
1710 bool ret = false;
1672 1711
1673 rcu_read_lock(); 1712 rcu_read_lock();
1674 pid = rcu_dereference(target->pid); 1713 pid = rcu_dereference(target->pid);
@@ -1676,17 +1715,15 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
1676 task = get_pid_task(target->pid, PIDTYPE_PID); 1715 task = get_pid_task(target->pid, PIDTYPE_PID);
1677 rcu_read_unlock(); 1716 rcu_read_unlock();
1678 if (!task) 1717 if (!task)
1679 return false; 1718 return ret;
1680 if (task->flags & PF_VCPU) { 1719 if (task->flags & PF_VCPU) {
1681 put_task_struct(task); 1720 put_task_struct(task);
1682 return false; 1721 return ret;
1683 }
1684 if (yield_to(task, 1)) {
1685 put_task_struct(task);
1686 return true;
1687 } 1722 }
1723 ret = yield_to(task, 1);
1688 put_task_struct(task); 1724 put_task_struct(task);
1689 return false; 1725
1726 return ret;
1690} 1727}
1691EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 1728EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
1692 1729
@@ -1727,12 +1764,14 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1727 return eligible; 1764 return eligible;
1728} 1765}
1729#endif 1766#endif
1767
1730void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1768void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1731{ 1769{
1732 struct kvm *kvm = me->kvm; 1770 struct kvm *kvm = me->kvm;
1733 struct kvm_vcpu *vcpu; 1771 struct kvm_vcpu *vcpu;
1734 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 1772 int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
1735 int yielded = 0; 1773 int yielded = 0;
1774 int try = 3;
1736 int pass; 1775 int pass;
1737 int i; 1776 int i;
1738 1777
@@ -1744,7 +1783,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1744 * VCPU is holding the lock that we need and will release it. 1783 * VCPU is holding the lock that we need and will release it.
1745 * We approximate round-robin by starting at the last boosted VCPU. 1784 * We approximate round-robin by starting at the last boosted VCPU.
1746 */ 1785 */
1747 for (pass = 0; pass < 2 && !yielded; pass++) { 1786 for (pass = 0; pass < 2 && !yielded && try; pass++) {
1748 kvm_for_each_vcpu(i, vcpu, kvm) { 1787 kvm_for_each_vcpu(i, vcpu, kvm) {
1749 if (!pass && i <= last_boosted_vcpu) { 1788 if (!pass && i <= last_boosted_vcpu) {
1750 i = last_boosted_vcpu; 1789 i = last_boosted_vcpu;
@@ -1757,10 +1796,15 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1757 continue; 1796 continue;
1758 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 1797 if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
1759 continue; 1798 continue;
1760 if (kvm_vcpu_yield_to(vcpu)) { 1799
1800 yielded = kvm_vcpu_yield_to(vcpu);
1801 if (yielded > 0) {
1761 kvm->last_boosted_vcpu = i; 1802 kvm->last_boosted_vcpu = i;
1762 yielded = 1;
1763 break; 1803 break;
1804 } else if (yielded < 0) {
1805 try--;
1806 if (!try)
1807 break;
1764 } 1808 }
1765 } 1809 }
1766 } 1810 }