diff options
| -rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 16 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 16 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 15 |
3 files changed, 40 insertions, 7 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 707a1f06dc5d..f675162663f0 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
| @@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void) | |||
| 309 | return 8; | 309 | return 8; |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | /* | ||
| 313 | * We are not in the kvm->srcu critical section most of the time, so we take | ||
| 314 | * the SRCU read lock here. Since we copy the data from the user page, we | ||
| 315 | * can immediately drop the lock again. | ||
| 316 | */ | ||
| 317 | static inline int kvm_read_guest_lock(struct kvm *kvm, | ||
| 318 | gpa_t gpa, void *data, unsigned long len) | ||
| 319 | { | ||
| 320 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
| 321 | int ret = kvm_read_guest(kvm, gpa, data, len); | ||
| 322 | |||
| 323 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
| 324 | |||
| 325 | return ret; | ||
| 326 | } | ||
| 327 | |||
| 312 | static inline void *kvm_get_hyp_vector(void) | 328 | static inline void *kvm_get_hyp_vector(void) |
| 313 | { | 329 | { |
| 314 | return kvm_ksym_ref(__kvm_hyp_vector); | 330 | return kvm_ksym_ref(__kvm_hyp_vector); |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 082110993647..6128992c2ded 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
| @@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void) | |||
| 360 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; | 360 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | /* | ||
| 364 | * We are not in the kvm->srcu critical section most of the time, so we take | ||
| 365 | * the SRCU read lock here. Since we copy the data from the user page, we | ||
| 366 | * can immediately drop the lock again. | ||
| 367 | */ | ||
| 368 | static inline int kvm_read_guest_lock(struct kvm *kvm, | ||
| 369 | gpa_t gpa, void *data, unsigned long len) | ||
| 370 | { | ||
| 371 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
| 372 | int ret = kvm_read_guest(kvm, gpa, data, len); | ||
| 373 | |||
| 374 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
| 375 | |||
| 376 | return ret; | ||
| 377 | } | ||
| 378 | |||
| 363 | #ifdef CONFIG_KVM_INDIRECT_VECTORS | 379 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
| 364 | /* | 380 | /* |
| 365 | * EL2 vectors can be mapped and rerouted in a number of ways, | 381 | * EL2 vectors can be mapped and rerouted in a number of ways, |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 51a80b600632..7cb060e01a76 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
| @@ -281,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
| 281 | int ret; | 281 | int ret; |
| 282 | unsigned long flags; | 282 | unsigned long flags; |
| 283 | 283 | ||
| 284 | ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, | 284 | ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET, |
| 285 | &prop, 1); | 285 | &prop, 1); |
| 286 | 286 | ||
| 287 | if (ret) | 287 | if (ret) |
| 288 | return ret; | 288 | return ret; |
| @@ -444,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
| 444 | * this very same byte in the last iteration. Reuse that. | 444 | * this very same byte in the last iteration. Reuse that. |
| 445 | */ | 445 | */ |
| 446 | if (byte_offset != last_byte_offset) { | 446 | if (byte_offset != last_byte_offset) { |
| 447 | ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset, | 447 | ret = kvm_read_guest_lock(vcpu->kvm, |
| 448 | &pendmask, 1); | 448 | pendbase + byte_offset, |
| 449 | &pendmask, 1); | ||
| 449 | if (ret) { | 450 | if (ret) { |
| 450 | kfree(intids); | 451 | kfree(intids); |
| 451 | return ret; | 452 | return ret; |
| @@ -789,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, | |||
| 789 | return false; | 790 | return false; |
| 790 | 791 | ||
| 791 | /* Each 1st level entry is represented by a 64-bit value. */ | 792 | /* Each 1st level entry is represented by a 64-bit value. */ |
| 792 | if (kvm_read_guest(its->dev->kvm, | 793 | if (kvm_read_guest_lock(its->dev->kvm, |
| 793 | BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), | 794 | BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), |
| 794 | &indirect_ptr, sizeof(indirect_ptr))) | 795 | &indirect_ptr, sizeof(indirect_ptr))) |
| 795 | return false; | 796 | return false; |
| @@ -1370,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) | |||
| 1370 | cbaser = CBASER_ADDRESS(its->cbaser); | 1371 | cbaser = CBASER_ADDRESS(its->cbaser); |
| 1371 | 1372 | ||
| 1372 | while (its->cwriter != its->creadr) { | 1373 | while (its->cwriter != its->creadr) { |
| 1373 | int ret = kvm_read_guest(kvm, cbaser + its->creadr, | 1374 | int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, |
| 1374 | cmd_buf, ITS_CMD_SIZE); | 1375 | cmd_buf, ITS_CMD_SIZE); |
| 1375 | /* | 1376 | /* |
| 1376 | * If kvm_read_guest() fails, this could be due to the guest | 1377 | * If kvm_read_guest() fails, this could be due to the guest |
| 1377 | * programming a bogus value in CBASER or something else going | 1378 | * programming a bogus value in CBASER or something else going |
