diff options
-rw-r--r-- | Documentation/virtual/kvm/cpuid.txt | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 16 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 16 | ||||
-rw-r--r-- | arch/s390/kvm/vsie.c | 2 | ||||
-rw-r--r-- | arch/x86/include/uapi/asm/kvm_para.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/kvm.c | 8 | ||||
-rw-r--r-- | arch/x86/kvm/hyperv.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 28 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 26 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 8 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/Makefile | 2 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/include/test_util.h | 1 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/lib/kvm_util.c | 16 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/sync_regs_test.c | 40 | ||||
-rw-r--r-- | tools/testing/selftests/kvm/vmx_tsc_adjust_test.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-debug.c | 5 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 34 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 22 |
19 files changed, 158 insertions, 88 deletions
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index d4f33eb805dd..ab022dcd0911 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt | |||
@@ -72,8 +72,8 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side | |||
72 | 72 | ||
73 | flag || value || meaning | 73 | flag || value || meaning |
74 | ================================================================================== | 74 | ================================================================================== |
75 | KVM_HINTS_DEDICATED || 0 || guest checks this feature bit to | 75 | KVM_HINTS_REALTIME || 0 || guest checks this feature bit to |
76 | || || determine if there is vCPU pinning | 76 | || || determine that vCPUs are never |
77 | || || and there is no vCPU over-commitment, | 77 | || || preempted for an unlimited time, |
78 | || || allowing optimizations | 78 | || || allowing optimizations |
79 | ---------------------------------------------------------------------------------- | 79 | ---------------------------------------------------------------------------------- |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 707a1f06dc5d..f675162663f0 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void) | |||
309 | return 8; | 309 | return 8; |
310 | } | 310 | } |
311 | 311 | ||
312 | /* | ||
313 | * We are not in the kvm->srcu critical section most of the time, so we take | ||
314 | * the SRCU read lock here. Since we copy the data from the user page, we | ||
315 | * can immediately drop the lock again. | ||
316 | */ | ||
317 | static inline int kvm_read_guest_lock(struct kvm *kvm, | ||
318 | gpa_t gpa, void *data, unsigned long len) | ||
319 | { | ||
320 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
321 | int ret = kvm_read_guest(kvm, gpa, data, len); | ||
322 | |||
323 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
324 | |||
325 | return ret; | ||
326 | } | ||
327 | |||
312 | static inline void *kvm_get_hyp_vector(void) | 328 | static inline void *kvm_get_hyp_vector(void) |
313 | { | 329 | { |
314 | return kvm_ksym_ref(__kvm_hyp_vector); | 330 | return kvm_ksym_ref(__kvm_hyp_vector); |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 082110993647..6128992c2ded 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void) | |||
360 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; | 360 | return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; |
361 | } | 361 | } |
362 | 362 | ||
363 | /* | ||
364 | * We are not in the kvm->srcu critical section most of the time, so we take | ||
365 | * the SRCU read lock here. Since we copy the data from the user page, we | ||
366 | * can immediately drop the lock again. | ||
367 | */ | ||
368 | static inline int kvm_read_guest_lock(struct kvm *kvm, | ||
369 | gpa_t gpa, void *data, unsigned long len) | ||
370 | { | ||
371 | int srcu_idx = srcu_read_lock(&kvm->srcu); | ||
372 | int ret = kvm_read_guest(kvm, gpa, data, len); | ||
373 | |||
374 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
375 | |||
376 | return ret; | ||
377 | } | ||
378 | |||
363 | #ifdef CONFIG_KVM_INDIRECT_VECTORS | 379 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
364 | /* | 380 | /* |
365 | * EL2 vectors can be mapped and rerouted in a number of ways, | 381 | * EL2 vectors can be mapped and rerouted in a number of ways, |
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 8961e3970901..969882b54266 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c | |||
@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
578 | 578 | ||
579 | gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; | 579 | gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; |
580 | if (gpa && (scb_s->ecb & ECB_TE)) { | 580 | if (gpa && (scb_s->ecb & ECB_TE)) { |
581 | if (!(gpa & ~0x1fffU)) { | 581 | if (!(gpa & ~0x1fffUL)) { |
582 | rc = set_validity_icpt(scb_s, 0x0080U); | 582 | rc = set_validity_icpt(scb_s, 0x0080U); |
583 | goto unpin; | 583 | goto unpin; |
584 | } | 584 | } |
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 4c851ebb3ceb..0ede697c3961 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #define KVM_FEATURE_PV_TLB_FLUSH 9 | 29 | #define KVM_FEATURE_PV_TLB_FLUSH 9 |
30 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 | 30 | #define KVM_FEATURE_ASYNC_PF_VMEXIT 10 |
31 | 31 | ||
32 | #define KVM_HINTS_DEDICATED 0 | 32 | #define KVM_HINTS_REALTIME 0 |
33 | 33 | ||
34 | /* The last 8 bits are used to indicate how to interpret the flags field | 34 | /* The last 8 bits are used to indicate how to interpret the flags field |
35 | * in pvclock structure. If no bits are set, all flags are ignored. | 35 | * in pvclock structure. If no bits are set, all flags are ignored. |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 7867417cfaff..5b2300b818af 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void) | |||
457 | static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) | 457 | static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) |
458 | { | 458 | { |
459 | native_smp_prepare_cpus(max_cpus); | 459 | native_smp_prepare_cpus(max_cpus); |
460 | if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) | 460 | if (kvm_para_has_hint(KVM_HINTS_REALTIME)) |
461 | static_branch_disable(&virt_spin_lock_key); | 461 | static_branch_disable(&virt_spin_lock_key); |
462 | } | 462 | } |
463 | 463 | ||
@@ -553,7 +553,7 @@ static void __init kvm_guest_init(void) | |||
553 | } | 553 | } |
554 | 554 | ||
555 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && | 555 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
556 | !kvm_para_has_hint(KVM_HINTS_DEDICATED) && | 556 | !kvm_para_has_hint(KVM_HINTS_REALTIME) && |
557 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) | 557 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) |
558 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; | 558 | pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; |
559 | 559 | ||
@@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void) | |||
649 | int cpu; | 649 | int cpu; |
650 | 650 | ||
651 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && | 651 | if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && |
652 | !kvm_para_has_hint(KVM_HINTS_DEDICATED) && | 652 | !kvm_para_has_hint(KVM_HINTS_REALTIME) && |
653 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { | 653 | kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { |
654 | for_each_possible_cpu(cpu) { | 654 | for_each_possible_cpu(cpu) { |
655 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), | 655 | zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), |
@@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void) | |||
745 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | 745 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) |
746 | return; | 746 | return; |
747 | 747 | ||
748 | if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) | 748 | if (kvm_para_has_hint(KVM_HINTS_REALTIME)) |
749 | return; | 749 | return; |
750 | 750 | ||
751 | __pv_init_lock_hash(); | 751 | __pv_init_lock_hash(); |
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 98618e397342..5708e951a5c6 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c | |||
@@ -1265,7 +1265,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) | |||
1265 | struct kvm_run *run = vcpu->run; | 1265 | struct kvm_run *run = vcpu->run; |
1266 | 1266 | ||
1267 | kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); | 1267 | kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); |
1268 | return 1; | 1268 | return kvm_skip_emulated_instruction(vcpu); |
1269 | } | 1269 | } |
1270 | 1270 | ||
1271 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) | 1271 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) |
@@ -1296,8 +1296,10 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) | |||
1296 | if (param & ~KVM_HYPERV_CONN_ID_MASK) | 1296 | if (param & ~KVM_HYPERV_CONN_ID_MASK) |
1297 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | 1297 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
1298 | 1298 | ||
1299 | /* conn_to_evt is protected by vcpu->kvm->srcu */ | 1299 | /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ |
1300 | rcu_read_lock(); | ||
1300 | eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); | 1301 | eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); |
1302 | rcu_read_unlock(); | ||
1301 | if (!eventfd) | 1303 | if (!eventfd) |
1302 | return HV_STATUS_INVALID_PORT_ID; | 1304 | return HV_STATUS_INVALID_PORT_ID; |
1303 | 1305 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c7668806163f..3f1696570b41 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void) | |||
1494 | SECONDARY_EXEC_ENABLE_VMFUNC; | 1494 | SECONDARY_EXEC_ENABLE_VMFUNC; |
1495 | } | 1495 | } |
1496 | 1496 | ||
1497 | static bool vmx_umip_emulated(void) | ||
1498 | { | ||
1499 | return vmcs_config.cpu_based_2nd_exec_ctrl & | ||
1500 | SECONDARY_EXEC_DESC; | ||
1501 | } | ||
1502 | |||
1497 | static inline bool report_flexpriority(void) | 1503 | static inline bool report_flexpriority(void) |
1498 | { | 1504 | { |
1499 | return flexpriority_enabled; | 1505 | return flexpriority_enabled; |
@@ -4761,14 +4767,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
4761 | else | 4767 | else |
4762 | hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; | 4768 | hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; |
4763 | 4769 | ||
4764 | if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) { | 4770 | if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) { |
4765 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, | 4771 | if (cr4 & X86_CR4_UMIP) { |
4766 | SECONDARY_EXEC_DESC); | 4772 | vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, |
4767 | hw_cr4 &= ~X86_CR4_UMIP; | ||
4768 | } else if (!is_guest_mode(vcpu) || | ||
4769 | !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) | ||
4770 | vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, | ||
4771 | SECONDARY_EXEC_DESC); | 4773 | SECONDARY_EXEC_DESC); |
4774 | hw_cr4 &= ~X86_CR4_UMIP; | ||
4775 | } else if (!is_guest_mode(vcpu) || | ||
4776 | !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) | ||
4777 | vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, | ||
4778 | SECONDARY_EXEC_DESC); | ||
4779 | } | ||
4772 | 4780 | ||
4773 | if (cr4 & X86_CR4_VMXE) { | 4781 | if (cr4 & X86_CR4_VMXE) { |
4774 | /* | 4782 | /* |
@@ -9497,12 +9505,6 @@ static bool vmx_xsaves_supported(void) | |||
9497 | SECONDARY_EXEC_XSAVES; | 9505 | SECONDARY_EXEC_XSAVES; |
9498 | } | 9506 | } |
9499 | 9507 | ||
9500 | static bool vmx_umip_emulated(void) | ||
9501 | { | ||
9502 | return vmcs_config.cpu_based_2nd_exec_ctrl & | ||
9503 | SECONDARY_EXEC_DESC; | ||
9504 | } | ||
9505 | |||
9506 | static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) | 9508 | static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) |
9507 | { | 9509 | { |
9508 | u32 exit_intr_info; | 9510 | u32 exit_intr_info; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 51ecd381793b..59371de5d722 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -114,7 +114,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); | |||
114 | static bool __read_mostly report_ignored_msrs = true; | 114 | static bool __read_mostly report_ignored_msrs = true; |
115 | module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); | 115 | module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); |
116 | 116 | ||
117 | unsigned int min_timer_period_us = 500; | 117 | unsigned int min_timer_period_us = 200; |
118 | module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); | 118 | module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); |
119 | 119 | ||
120 | static bool __read_mostly kvmclock_periodic_sync = true; | 120 | static bool __read_mostly kvmclock_periodic_sync = true; |
@@ -843,7 +843,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); | |||
843 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | 843 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
844 | { | 844 | { |
845 | #ifdef CONFIG_X86_64 | 845 | #ifdef CONFIG_X86_64 |
846 | cr3 &= ~CR3_PCID_INVD; | 846 | bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); |
847 | |||
848 | if (pcid_enabled) | ||
849 | cr3 &= ~CR3_PCID_INVD; | ||
847 | #endif | 850 | #endif |
848 | 851 | ||
849 | if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { | 852 | if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { |
@@ -6671,12 +6674,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) | |||
6671 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | 6674 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) |
6672 | { | 6675 | { |
6673 | unsigned long nr, a0, a1, a2, a3, ret; | 6676 | unsigned long nr, a0, a1, a2, a3, ret; |
6674 | int op_64_bit, r; | 6677 | int op_64_bit; |
6675 | |||
6676 | r = kvm_skip_emulated_instruction(vcpu); | ||
6677 | 6678 | ||
6678 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) | 6679 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) { |
6679 | return kvm_hv_hypercall(vcpu); | 6680 | if (!kvm_hv_hypercall(vcpu)) |
6681 | return 0; | ||
6682 | goto out; | ||
6683 | } | ||
6680 | 6684 | ||
6681 | nr = kvm_register_read(vcpu, VCPU_REGS_RAX); | 6685 | nr = kvm_register_read(vcpu, VCPU_REGS_RAX); |
6682 | a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); | 6686 | a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); |
@@ -6697,7 +6701,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
6697 | 6701 | ||
6698 | if (kvm_x86_ops->get_cpl(vcpu) != 0) { | 6702 | if (kvm_x86_ops->get_cpl(vcpu) != 0) { |
6699 | ret = -KVM_EPERM; | 6703 | ret = -KVM_EPERM; |
6700 | goto out; | 6704 | goto out_error; |
6701 | } | 6705 | } |
6702 | 6706 | ||
6703 | switch (nr) { | 6707 | switch (nr) { |
@@ -6717,12 +6721,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
6717 | ret = -KVM_ENOSYS; | 6721 | ret = -KVM_ENOSYS; |
6718 | break; | 6722 | break; |
6719 | } | 6723 | } |
6720 | out: | 6724 | out_error: |
6721 | if (!op_64_bit) | 6725 | if (!op_64_bit) |
6722 | ret = (u32)ret; | 6726 | ret = (u32)ret; |
6723 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); | 6727 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); |
6728 | |||
6729 | out: | ||
6724 | ++vcpu->stat.hypercalls; | 6730 | ++vcpu->stat.hypercalls; |
6725 | return r; | 6731 | return kvm_skip_emulated_instruction(vcpu); |
6726 | } | 6732 | } |
6727 | EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); | 6733 | EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); |
6728 | 6734 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6930c63126c7..6d6e79c59e68 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -1045,13 +1045,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) | |||
1045 | 1045 | ||
1046 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING | 1046 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
1047 | 1047 | ||
1048 | #ifdef CONFIG_S390 | 1048 | #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ |
1049 | #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... | ||
1050 | #elif defined(CONFIG_ARM64) | ||
1051 | #define KVM_MAX_IRQ_ROUTES 4096 | ||
1052 | #else | ||
1053 | #define KVM_MAX_IRQ_ROUTES 1024 | ||
1054 | #endif | ||
1055 | 1049 | ||
1056 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); | 1050 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); |
1057 | int kvm_set_irq_routing(struct kvm *kvm, | 1051 | int kvm_set_irq_routing(struct kvm *kvm, |
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 2ddcc96ae456..d9d00319b07c 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile | |||
@@ -15,7 +15,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M)) | |||
15 | 15 | ||
16 | INSTALL_HDR_PATH = $(top_srcdir)/usr | 16 | INSTALL_HDR_PATH = $(top_srcdir)/usr |
17 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ | 17 | LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ |
18 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) | 18 | CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. |
19 | 19 | ||
20 | # After inclusion, $(OUTPUT) is defined and | 20 | # After inclusion, $(OUTPUT) is defined and |
21 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ | 21 | # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ |
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index 7ab98e41324f..ac53730b30aa 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <errno.h> | 19 | #include <errno.h> |
20 | #include <unistd.h> | 20 | #include <unistd.h> |
21 | #include <fcntl.h> | 21 | #include <fcntl.h> |
22 | #include "kselftest.h" | ||
22 | 23 | ||
23 | ssize_t test_write(int fd, const void *buf, size_t count); | 24 | ssize_t test_write(int fd, const void *buf, size_t count); |
24 | ssize_t test_read(int fd, void *buf, size_t count); | 25 | ssize_t test_read(int fd, void *buf, size_t count); |
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 2cedfda181d4..37e2a787d2fc 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c | |||
@@ -50,8 +50,8 @@ int kvm_check_cap(long cap) | |||
50 | int kvm_fd; | 50 | int kvm_fd; |
51 | 51 | ||
52 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | 52 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); |
53 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 53 | if (kvm_fd < 0) |
54 | KVM_DEV_PATH, kvm_fd, errno); | 54 | exit(KSFT_SKIP); |
55 | 55 | ||
56 | ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); | 56 | ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); |
57 | TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" | 57 | TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" |
@@ -91,8 +91,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) | |||
91 | 91 | ||
92 | vm->mode = mode; | 92 | vm->mode = mode; |
93 | kvm_fd = open(KVM_DEV_PATH, perm); | 93 | kvm_fd = open(KVM_DEV_PATH, perm); |
94 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 94 | if (kvm_fd < 0) |
95 | KVM_DEV_PATH, kvm_fd, errno); | 95 | exit(KSFT_SKIP); |
96 | 96 | ||
97 | /* Create VM. */ | 97 | /* Create VM. */ |
98 | vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL); | 98 | vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL); |
@@ -418,8 +418,8 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void) | |||
418 | 418 | ||
419 | cpuid = allocate_kvm_cpuid2(); | 419 | cpuid = allocate_kvm_cpuid2(); |
420 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | 420 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); |
421 | TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", | 421 | if (kvm_fd < 0) |
422 | KVM_DEV_PATH, kvm_fd, errno); | 422 | exit(KSFT_SKIP); |
423 | 423 | ||
424 | ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); | 424 | ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); |
425 | TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", | 425 | TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", |
@@ -675,8 +675,8 @@ static int vcpu_mmap_sz(void) | |||
675 | int dev_fd, ret; | 675 | int dev_fd, ret; |
676 | 676 | ||
677 | dev_fd = open(KVM_DEV_PATH, O_RDONLY); | 677 | dev_fd = open(KVM_DEV_PATH, O_RDONLY); |
678 | TEST_ASSERT(dev_fd >= 0, "%s open %s failed, rc: %i errno: %i", | 678 | if (dev_fd < 0) |
679 | __func__, KVM_DEV_PATH, dev_fd, errno); | 679 | exit(KSFT_SKIP); |
680 | 680 | ||
681 | ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); | 681 | ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); |
682 | TEST_ASSERT(ret >= sizeof(struct kvm_run), | 682 | TEST_ASSERT(ret >= sizeof(struct kvm_run), |
diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/sync_regs_test.c index 428e9473f5e2..eae1ece3c31b 100644 --- a/tools/testing/selftests/kvm/sync_regs_test.c +++ b/tools/testing/selftests/kvm/sync_regs_test.c | |||
@@ -85,6 +85,9 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left, | |||
85 | { | 85 | { |
86 | } | 86 | } |
87 | 87 | ||
88 | #define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS) | ||
89 | #define INVALID_SYNC_FIELD 0x80000000 | ||
90 | |||
88 | int main(int argc, char *argv[]) | 91 | int main(int argc, char *argv[]) |
89 | { | 92 | { |
90 | struct kvm_vm *vm; | 93 | struct kvm_vm *vm; |
@@ -98,9 +101,14 @@ int main(int argc, char *argv[]) | |||
98 | setbuf(stdout, NULL); | 101 | setbuf(stdout, NULL); |
99 | 102 | ||
100 | cap = kvm_check_cap(KVM_CAP_SYNC_REGS); | 103 | cap = kvm_check_cap(KVM_CAP_SYNC_REGS); |
101 | TEST_ASSERT((unsigned long)cap == KVM_SYNC_X86_VALID_FIELDS, | 104 | if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) { |
102 | "KVM_CAP_SYNC_REGS (0x%x) != KVM_SYNC_X86_VALID_FIELDS (0x%lx)\n", | 105 | fprintf(stderr, "KVM_CAP_SYNC_REGS not supported, skipping test\n"); |
103 | cap, KVM_SYNC_X86_VALID_FIELDS); | 106 | exit(KSFT_SKIP); |
107 | } | ||
108 | if ((cap & INVALID_SYNC_FIELD) != 0) { | ||
109 | fprintf(stderr, "The \"invalid\" field is not invalid, skipping test\n"); | ||
110 | exit(KSFT_SKIP); | ||
111 | } | ||
104 | 112 | ||
105 | /* Create VM */ | 113 | /* Create VM */ |
106 | vm = vm_create_default(VCPU_ID, guest_code); | 114 | vm = vm_create_default(VCPU_ID, guest_code); |
@@ -108,7 +116,14 @@ int main(int argc, char *argv[]) | |||
108 | run = vcpu_state(vm, VCPU_ID); | 116 | run = vcpu_state(vm, VCPU_ID); |
109 | 117 | ||
110 | /* Request reading invalid register set from VCPU. */ | 118 | /* Request reading invalid register set from VCPU. */ |
111 | run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS << 1; | 119 | run->kvm_valid_regs = INVALID_SYNC_FIELD; |
120 | rv = _vcpu_run(vm, VCPU_ID); | ||
121 | TEST_ASSERT(rv < 0 && errno == EINVAL, | ||
122 | "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", | ||
123 | rv); | ||
124 | vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; | ||
125 | |||
126 | run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; | ||
112 | rv = _vcpu_run(vm, VCPU_ID); | 127 | rv = _vcpu_run(vm, VCPU_ID); |
113 | TEST_ASSERT(rv < 0 && errno == EINVAL, | 128 | TEST_ASSERT(rv < 0 && errno == EINVAL, |
114 | "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", | 129 | "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", |
@@ -116,7 +131,14 @@ int main(int argc, char *argv[]) | |||
116 | vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; | 131 | vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; |
117 | 132 | ||
118 | /* Request setting invalid register set into VCPU. */ | 133 | /* Request setting invalid register set into VCPU. */ |
119 | run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS << 1; | 134 | run->kvm_dirty_regs = INVALID_SYNC_FIELD; |
135 | rv = _vcpu_run(vm, VCPU_ID); | ||
136 | TEST_ASSERT(rv < 0 && errno == EINVAL, | ||
137 | "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", | ||
138 | rv); | ||
139 | vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; | ||
140 | |||
141 | run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; | ||
120 | rv = _vcpu_run(vm, VCPU_ID); | 142 | rv = _vcpu_run(vm, VCPU_ID); |
121 | TEST_ASSERT(rv < 0 && errno == EINVAL, | 143 | TEST_ASSERT(rv < 0 && errno == EINVAL, |
122 | "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", | 144 | "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", |
@@ -125,7 +147,7 @@ int main(int argc, char *argv[]) | |||
125 | 147 | ||
126 | /* Request and verify all valid register sets. */ | 148 | /* Request and verify all valid register sets. */ |
127 | /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ | 149 | /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ |
128 | run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; | 150 | run->kvm_valid_regs = TEST_SYNC_FIELDS; |
129 | rv = _vcpu_run(vm, VCPU_ID); | 151 | rv = _vcpu_run(vm, VCPU_ID); |
130 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | 152 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
131 | "Unexpected exit reason: %u (%s),\n", | 153 | "Unexpected exit reason: %u (%s),\n", |
@@ -146,7 +168,7 @@ int main(int argc, char *argv[]) | |||
146 | run->s.regs.sregs.apic_base = 1 << 11; | 168 | run->s.regs.sregs.apic_base = 1 << 11; |
147 | /* TODO run->s.regs.events.XYZ = ABC; */ | 169 | /* TODO run->s.regs.events.XYZ = ABC; */ |
148 | 170 | ||
149 | run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; | 171 | run->kvm_valid_regs = TEST_SYNC_FIELDS; |
150 | run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; | 172 | run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; |
151 | rv = _vcpu_run(vm, VCPU_ID); | 173 | rv = _vcpu_run(vm, VCPU_ID); |
152 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | 174 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
@@ -172,7 +194,7 @@ int main(int argc, char *argv[]) | |||
172 | /* Clear kvm_dirty_regs bits, verify new s.regs values are | 194 | /* Clear kvm_dirty_regs bits, verify new s.regs values are |
173 | * overwritten with existing guest values. | 195 | * overwritten with existing guest values. |
174 | */ | 196 | */ |
175 | run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; | 197 | run->kvm_valid_regs = TEST_SYNC_FIELDS; |
176 | run->kvm_dirty_regs = 0; | 198 | run->kvm_dirty_regs = 0; |
177 | run->s.regs.regs.r11 = 0xDEADBEEF; | 199 | run->s.regs.regs.r11 = 0xDEADBEEF; |
178 | rv = _vcpu_run(vm, VCPU_ID); | 200 | rv = _vcpu_run(vm, VCPU_ID); |
@@ -211,7 +233,7 @@ int main(int argc, char *argv[]) | |||
211 | * with kvm_sync_regs values. | 233 | * with kvm_sync_regs values. |
212 | */ | 234 | */ |
213 | run->kvm_valid_regs = 0; | 235 | run->kvm_valid_regs = 0; |
214 | run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS; | 236 | run->kvm_dirty_regs = TEST_SYNC_FIELDS; |
215 | run->s.regs.regs.r11 = 0xBBBB; | 237 | run->s.regs.regs.r11 = 0xBBBB; |
216 | rv = _vcpu_run(vm, VCPU_ID); | 238 | rv = _vcpu_run(vm, VCPU_ID); |
217 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, | 239 | TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, |
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c index 8f7f62093add..aaa633263b2c 100644 --- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c | |||
@@ -189,8 +189,8 @@ int main(int argc, char *argv[]) | |||
189 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); | 189 | struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); |
190 | 190 | ||
191 | if (!(entry->ecx & CPUID_VMX)) { | 191 | if (!(entry->ecx & CPUID_VMX)) { |
192 | printf("nested VMX not enabled, skipping test"); | 192 | fprintf(stderr, "nested VMX not enabled, skipping test\n"); |
193 | return 0; | 193 | exit(KSFT_SKIP); |
194 | } | 194 | } |
195 | 195 | ||
196 | vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); | 196 | vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); |
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 10b38178cff2..4ffc0b5e6105 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c | |||
@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
211 | struct vgic_state_iter *iter = (struct vgic_state_iter *)v; | 211 | struct vgic_state_iter *iter = (struct vgic_state_iter *)v; |
212 | struct vgic_irq *irq; | 212 | struct vgic_irq *irq; |
213 | struct kvm_vcpu *vcpu = NULL; | 213 | struct kvm_vcpu *vcpu = NULL; |
214 | unsigned long flags; | ||
214 | 215 | ||
215 | if (iter->dist_id == 0) { | 216 | if (iter->dist_id == 0) { |
216 | print_dist_state(s, &kvm->arch.vgic); | 217 | print_dist_state(s, &kvm->arch.vgic); |
@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
227 | irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; | 228 | irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; |
228 | } | 229 | } |
229 | 230 | ||
230 | spin_lock(&irq->irq_lock); | 231 | spin_lock_irqsave(&irq->irq_lock, flags); |
231 | print_irq_state(s, irq, vcpu); | 232 | print_irq_state(s, irq, vcpu); |
232 | spin_unlock(&irq->irq_lock); | 233 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
233 | 234 | ||
234 | return 0; | 235 | return 0; |
235 | } | 236 | } |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index a8f07243aa9f..4ed79c939fb4 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
52 | { | 52 | { |
53 | struct vgic_dist *dist = &kvm->arch.vgic; | 53 | struct vgic_dist *dist = &kvm->arch.vgic; |
54 | struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; | 54 | struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; |
55 | unsigned long flags; | ||
55 | int ret; | 56 | int ret; |
56 | 57 | ||
57 | /* In this case there is no put, since we keep the reference. */ | 58 | /* In this case there is no put, since we keep the reference. */ |
@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
71 | irq->intid = intid; | 72 | irq->intid = intid; |
72 | irq->target_vcpu = vcpu; | 73 | irq->target_vcpu = vcpu; |
73 | 74 | ||
74 | spin_lock(&dist->lpi_list_lock); | 75 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
75 | 76 | ||
76 | /* | 77 | /* |
77 | * There could be a race with another vgic_add_lpi(), so we need to | 78 | * There could be a race with another vgic_add_lpi(), so we need to |
@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
99 | dist->lpi_list_count++; | 100 | dist->lpi_list_count++; |
100 | 101 | ||
101 | out_unlock: | 102 | out_unlock: |
102 | spin_unlock(&dist->lpi_list_lock); | 103 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
103 | 104 | ||
104 | /* | 105 | /* |
105 | * We "cache" the configuration table entries in our struct vgic_irq's. | 106 | * We "cache" the configuration table entries in our struct vgic_irq's. |
@@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
280 | int ret; | 281 | int ret; |
281 | unsigned long flags; | 282 | unsigned long flags; |
282 | 283 | ||
283 | ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, | 284 | ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET, |
284 | &prop, 1); | 285 | &prop, 1); |
285 | 286 | ||
286 | if (ret) | 287 | if (ret) |
287 | return ret; | 288 | return ret; |
@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
315 | { | 316 | { |
316 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 317 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
317 | struct vgic_irq *irq; | 318 | struct vgic_irq *irq; |
319 | unsigned long flags; | ||
318 | u32 *intids; | 320 | u32 *intids; |
319 | int irq_count, i = 0; | 321 | int irq_count, i = 0; |
320 | 322 | ||
@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
330 | if (!intids) | 332 | if (!intids) |
331 | return -ENOMEM; | 333 | return -ENOMEM; |
332 | 334 | ||
333 | spin_lock(&dist->lpi_list_lock); | 335 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
334 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
335 | if (i == irq_count) | 337 | if (i == irq_count) |
336 | break; | 338 | break; |
@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
339 | continue; | 341 | continue; |
340 | intids[i++] = irq->intid; | 342 | intids[i++] = irq->intid; |
341 | } | 343 | } |
342 | spin_unlock(&dist->lpi_list_lock); | 344 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
343 | 345 | ||
344 | *intid_ptr = intids; | 346 | *intid_ptr = intids; |
345 | return i; | 347 | return i; |
@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
348 | static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) | 350 | static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) |
349 | { | 351 | { |
350 | int ret = 0; | 352 | int ret = 0; |
353 | unsigned long flags; | ||
351 | 354 | ||
352 | spin_lock(&irq->irq_lock); | 355 | spin_lock_irqsave(&irq->irq_lock, flags); |
353 | irq->target_vcpu = vcpu; | 356 | irq->target_vcpu = vcpu; |
354 | spin_unlock(&irq->irq_lock); | 357 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
355 | 358 | ||
356 | if (irq->hw) { | 359 | if (irq->hw) { |
357 | struct its_vlpi_map map; | 360 | struct its_vlpi_map map; |
@@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
441 | * this very same byte in the last iteration. Reuse that. | 444 | * this very same byte in the last iteration. Reuse that. |
442 | */ | 445 | */ |
443 | if (byte_offset != last_byte_offset) { | 446 | if (byte_offset != last_byte_offset) { |
444 | ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset, | 447 | ret = kvm_read_guest_lock(vcpu->kvm, |
445 | &pendmask, 1); | 448 | pendbase + byte_offset, |
449 | &pendmask, 1); | ||
446 | if (ret) { | 450 | if (ret) { |
447 | kfree(intids); | 451 | kfree(intids); |
448 | return ret; | 452 | return ret; |
@@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, | |||
786 | return false; | 790 | return false; |
787 | 791 | ||
788 | /* Each 1st level entry is represented by a 64-bit value. */ | 792 | /* Each 1st level entry is represented by a 64-bit value. */ |
789 | if (kvm_read_guest(its->dev->kvm, | 793 | if (kvm_read_guest_lock(its->dev->kvm, |
790 | BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), | 794 | BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), |
791 | &indirect_ptr, sizeof(indirect_ptr))) | 795 | &indirect_ptr, sizeof(indirect_ptr))) |
792 | return false; | 796 | return false; |
@@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) | |||
1367 | cbaser = CBASER_ADDRESS(its->cbaser); | 1371 | cbaser = CBASER_ADDRESS(its->cbaser); |
1368 | 1372 | ||
1369 | while (its->cwriter != its->creadr) { | 1373 | while (its->cwriter != its->creadr) { |
1370 | int ret = kvm_read_guest(kvm, cbaser + its->creadr, | 1374 | int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, |
1371 | cmd_buf, ITS_CMD_SIZE); | 1375 | cmd_buf, ITS_CMD_SIZE); |
1372 | /* | 1376 | /* |
1373 | * If kvm_read_guest() fails, this could be due to the guest | 1377 | * If kvm_read_guest() fails, this could be due to the guest |
1374 | * programming a bogus value in CBASER or something else going | 1378 | * programming a bogus value in CBASER or something else going |
@@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz, | |||
1893 | int next_offset; | 1897 | int next_offset; |
1894 | size_t byte_offset; | 1898 | size_t byte_offset; |
1895 | 1899 | ||
1896 | ret = kvm_read_guest(kvm, gpa, entry, esz); | 1900 | ret = kvm_read_guest_lock(kvm, gpa, entry, esz); |
1897 | if (ret) | 1901 | if (ret) |
1898 | return ret; | 1902 | return ret; |
1899 | 1903 | ||
@@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) | |||
2263 | int ret; | 2267 | int ret; |
2264 | 2268 | ||
2265 | BUG_ON(esz > sizeof(val)); | 2269 | BUG_ON(esz > sizeof(val)); |
2266 | ret = kvm_read_guest(kvm, gpa, &val, esz); | 2270 | ret = kvm_read_guest_lock(kvm, gpa, &val, esz); |
2267 | if (ret) | 2271 | if (ret) |
2268 | return ret; | 2272 | return ret; |
2269 | val = le64_to_cpu(val); | 2273 | val = le64_to_cpu(val); |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index c7423f3768e5..bdcf8e7a6161 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -344,7 +344,7 @@ retry: | |||
344 | bit_nr = irq->intid % BITS_PER_BYTE; | 344 | bit_nr = irq->intid % BITS_PER_BYTE; |
345 | ptr = pendbase + byte_offset; | 345 | ptr = pendbase + byte_offset; |
346 | 346 | ||
347 | ret = kvm_read_guest(kvm, ptr, &val, 1); | 347 | ret = kvm_read_guest_lock(kvm, ptr, &val, 1); |
348 | if (ret) | 348 | if (ret) |
349 | return ret; | 349 | return ret; |
350 | 350 | ||
@@ -397,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) | |||
397 | ptr = pendbase + byte_offset; | 397 | ptr = pendbase + byte_offset; |
398 | 398 | ||
399 | if (byte_offset != last_byte_offset) { | 399 | if (byte_offset != last_byte_offset) { |
400 | ret = kvm_read_guest(kvm, ptr, &val, 1); | 400 | ret = kvm_read_guest_lock(kvm, ptr, &val, 1); |
401 | if (ret) | 401 | if (ret) |
402 | return ret; | 402 | return ret; |
403 | last_byte_offset = byte_offset; | 403 | last_byte_offset = byte_offset; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 97bfba8d9a59..33c8325c8f35 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { | |||
43 | * kvm->lock (mutex) | 43 | * kvm->lock (mutex) |
44 | * its->cmd_lock (mutex) | 44 | * its->cmd_lock (mutex) |
45 | * its->its_lock (mutex) | 45 | * its->its_lock (mutex) |
46 | * vgic_cpu->ap_list_lock | 46 | * vgic_cpu->ap_list_lock must be taken with IRQs disabled |
47 | * kvm->lpi_list_lock | 47 | * kvm->lpi_list_lock must be taken with IRQs disabled |
48 | * vgic_irq->irq_lock | 48 | * vgic_irq->irq_lock must be taken with IRQs disabled |
49 | * | ||
50 | * As the ap_list_lock might be taken from the timer interrupt handler, | ||
51 | * we have to disable IRQs before taking this lock and everything lower | ||
52 | * than it. | ||
49 | * | 53 | * |
50 | * If you need to take multiple locks, always take the upper lock first, | 54 | * If you need to take multiple locks, always take the upper lock first, |
51 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. | 55 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. |
@@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
72 | { | 76 | { |
73 | struct vgic_dist *dist = &kvm->arch.vgic; | 77 | struct vgic_dist *dist = &kvm->arch.vgic; |
74 | struct vgic_irq *irq = NULL; | 78 | struct vgic_irq *irq = NULL; |
79 | unsigned long flags; | ||
75 | 80 | ||
76 | spin_lock(&dist->lpi_list_lock); | 81 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
77 | 82 | ||
78 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 83 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
79 | if (irq->intid != intid) | 84 | if (irq->intid != intid) |
@@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
89 | irq = NULL; | 94 | irq = NULL; |
90 | 95 | ||
91 | out_unlock: | 96 | out_unlock: |
92 | spin_unlock(&dist->lpi_list_lock); | 97 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
93 | 98 | ||
94 | return irq; | 99 | return irq; |
95 | } | 100 | } |
@@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref) | |||
134 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) | 139 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) |
135 | { | 140 | { |
136 | struct vgic_dist *dist = &kvm->arch.vgic; | 141 | struct vgic_dist *dist = &kvm->arch.vgic; |
142 | unsigned long flags; | ||
137 | 143 | ||
138 | if (irq->intid < VGIC_MIN_LPI) | 144 | if (irq->intid < VGIC_MIN_LPI) |
139 | return; | 145 | return; |
140 | 146 | ||
141 | spin_lock(&dist->lpi_list_lock); | 147 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { | 148 | if (!kref_put(&irq->refcount, vgic_irq_release)) { |
143 | spin_unlock(&dist->lpi_list_lock); | 149 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
144 | return; | 150 | return; |
145 | }; | 151 | }; |
146 | 152 | ||
147 | list_del(&irq->lpi_list); | 153 | list_del(&irq->lpi_list); |
148 | dist->lpi_list_count--; | 154 | dist->lpi_list_count--; |
149 | spin_unlock(&dist->lpi_list_lock); | 155 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
150 | 156 | ||
151 | kfree(irq); | 157 | kfree(irq); |
152 | } | 158 | } |