diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2017-12-22 04:54:20 -0500 |
---|---|---|
committer | Christian Borntraeger <borntraeger@de.ibm.com> | 2018-01-24 09:22:51 -0500 |
commit | 1de1ea7efeb9e8543212210e34518b4049ccd285 (patch) | |
tree | 0057ad8dc61f521d48a02caa2708e2e7a76cb6a7 | |
parent | c2cf265d860882b51a200e4a7553c17827f2b730 (diff) |
KVM: s390: add proper locking for CMMA migration bitmap
Some parts of the cmma migration bitmap is already protected
with the kvm->lock (e.g. the migration start). On the other
hand the read of the cmma bits is not protected against a
concurrent free, neither is the emulation of the ESSA instruction.
Let's extend the locking to all related ioctls by using
the slots lock for
- kvm_s390_vm_start_migration
- kvm_s390_vm_stop_migration
- kvm_s390_set_cmma_bits
- kvm_s390_get_cmma_bits
In addition to that, we use synchronize_srcu before freeing
the migration structure as all users hold kvm->srcu for read.
(e.g. the ESSA handler).
Reported-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: stable@vger.kernel.org # 4.13+
Fixes: 190df4a212a7 (KVM: s390: CMMA tracking, ESSA emulation, migration mode)
Reviewed-by: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index abcd24fdde3f..52880e980a33 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -766,7 +766,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) | |||
766 | 766 | ||
767 | /* | 767 | /* |
768 | * Must be called with kvm->srcu held to avoid races on memslots, and with | 768 | * Must be called with kvm->srcu held to avoid races on memslots, and with |
769 | * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration. | 769 | * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration. |
770 | */ | 770 | */ |
771 | static int kvm_s390_vm_start_migration(struct kvm *kvm) | 771 | static int kvm_s390_vm_start_migration(struct kvm *kvm) |
772 | { | 772 | { |
@@ -822,7 +822,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) | |||
822 | } | 822 | } |
823 | 823 | ||
824 | /* | 824 | /* |
825 | * Must be called with kvm->lock to avoid races with ourselves and | 825 | * Must be called with kvm->slots_lock to avoid races with ourselves and |
826 | * kvm_s390_vm_start_migration. | 826 | * kvm_s390_vm_start_migration. |
827 | */ | 827 | */ |
828 | static int kvm_s390_vm_stop_migration(struct kvm *kvm) | 828 | static int kvm_s390_vm_stop_migration(struct kvm *kvm) |
@@ -837,6 +837,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) | |||
837 | 837 | ||
838 | if (kvm->arch.use_cmma) { | 838 | if (kvm->arch.use_cmma) { |
839 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); | 839 | kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); |
840 | /* We have to wait for the essa emulation to finish */ | ||
841 | synchronize_srcu(&kvm->srcu); | ||
840 | vfree(mgs->pgste_bitmap); | 842 | vfree(mgs->pgste_bitmap); |
841 | } | 843 | } |
842 | kfree(mgs); | 844 | kfree(mgs); |
@@ -846,14 +848,12 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm) | |||
846 | static int kvm_s390_vm_set_migration(struct kvm *kvm, | 848 | static int kvm_s390_vm_set_migration(struct kvm *kvm, |
847 | struct kvm_device_attr *attr) | 849 | struct kvm_device_attr *attr) |
848 | { | 850 | { |
849 | int idx, res = -ENXIO; | 851 | int res = -ENXIO; |
850 | 852 | ||
851 | mutex_lock(&kvm->lock); | 853 | mutex_lock(&kvm->slots_lock); |
852 | switch (attr->attr) { | 854 | switch (attr->attr) { |
853 | case KVM_S390_VM_MIGRATION_START: | 855 | case KVM_S390_VM_MIGRATION_START: |
854 | idx = srcu_read_lock(&kvm->srcu); | ||
855 | res = kvm_s390_vm_start_migration(kvm); | 856 | res = kvm_s390_vm_start_migration(kvm); |
856 | srcu_read_unlock(&kvm->srcu, idx); | ||
857 | break; | 857 | break; |
858 | case KVM_S390_VM_MIGRATION_STOP: | 858 | case KVM_S390_VM_MIGRATION_STOP: |
859 | res = kvm_s390_vm_stop_migration(kvm); | 859 | res = kvm_s390_vm_stop_migration(kvm); |
@@ -861,7 +861,7 @@ static int kvm_s390_vm_set_migration(struct kvm *kvm, | |||
861 | default: | 861 | default: |
862 | break; | 862 | break; |
863 | } | 863 | } |
864 | mutex_unlock(&kvm->lock); | 864 | mutex_unlock(&kvm->slots_lock); |
865 | 865 | ||
866 | return res; | 866 | return res; |
867 | } | 867 | } |
@@ -1751,7 +1751,9 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1751 | r = -EFAULT; | 1751 | r = -EFAULT; |
1752 | if (copy_from_user(&args, argp, sizeof(args))) | 1752 | if (copy_from_user(&args, argp, sizeof(args))) |
1753 | break; | 1753 | break; |
1754 | mutex_lock(&kvm->slots_lock); | ||
1754 | r = kvm_s390_get_cmma_bits(kvm, &args); | 1755 | r = kvm_s390_get_cmma_bits(kvm, &args); |
1756 | mutex_unlock(&kvm->slots_lock); | ||
1755 | if (!r) { | 1757 | if (!r) { |
1756 | r = copy_to_user(argp, &args, sizeof(args)); | 1758 | r = copy_to_user(argp, &args, sizeof(args)); |
1757 | if (r) | 1759 | if (r) |
@@ -1765,7 +1767,9 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
1765 | r = -EFAULT; | 1767 | r = -EFAULT; |
1766 | if (copy_from_user(&args, argp, sizeof(args))) | 1768 | if (copy_from_user(&args, argp, sizeof(args))) |
1767 | break; | 1769 | break; |
1770 | mutex_lock(&kvm->slots_lock); | ||
1768 | r = kvm_s390_set_cmma_bits(kvm, &args); | 1771 | r = kvm_s390_set_cmma_bits(kvm, &args); |
1772 | mutex_unlock(&kvm->slots_lock); | ||
1769 | break; | 1773 | break; |
1770 | } | 1774 | } |
1771 | default: | 1775 | default: |