diff options
| author | Paolo Bonzini <pbonzini@redhat.com> | 2014-09-27 05:03:33 -0400 |
|---|---|---|
| committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-09-27 05:03:33 -0400 |
| commit | e77d99d4a4ec761ad061f1ec890c71040a92efe3 (patch) | |
| tree | aea6fe2ee5bb6e699045a3629b48208f3e2a26b6 | |
| parent | bb0ca6acd466af55c95b7ce508f29e23a24cabd9 (diff) | |
| parent | 0496daa5cf99741ce8db82686b4c7446a37feabb (diff) | |
Merge tag 'kvm-arm-for-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-next
Changes for KVM for arm/arm64 for 3.18
This includes a bunch of changes:
- Support read-only memory slots on arm/arm64
- Various changes to fix Sparse warnings
- Correctly detect write vs. read Stage-2 faults
- Various VGIC cleanups and fixes
- Dynamic VGIC data strcuture sizing
- Fix SGI set_clear_pend offset bug
- Fix VTTBR_BADDR Mask
- Correctly report the FSC on Stage-2 faults
Conflicts:
virt/kvm/eventfd.c
[duplicate, different patch where the kvm-arm version broke x86.
The kvm tree instead has the right one]
| -rw-r--r-- | Documentation/virtual/kvm/devices/arm-vgic.txt | 10 | ||||
| -rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 5 | ||||
| -rw-r--r-- | arch/arm/include/asm/kvm_host.h | 2 | ||||
| -rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 11 | ||||
| -rw-r--r-- | arch/arm/include/uapi/asm/kvm.h | 2 | ||||
| -rw-r--r-- | arch/arm/kvm/arm.c | 17 | ||||
| -rw-r--r-- | arch/arm/kvm/coproc.c | 2 | ||||
| -rw-r--r-- | arch/arm/kvm/guest.c | 2 | ||||
| -rw-r--r-- | arch/arm/kvm/mmu.c | 40 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 13 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 5 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 4 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 18 | ||||
| -rw-r--r-- | arch/arm64/include/uapi/asm/kvm.h | 2 | ||||
| -rw-r--r-- | arch/arm64/kvm/guest.c | 2 | ||||
| -rw-r--r-- | arch/arm64/kvm/sys_regs.c | 2 | ||||
| -rw-r--r-- | include/kvm/arm_vgic.h | 112 | ||||
| -rw-r--r-- | include/linux/kvm_host.h | 2 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic.c | 631 | ||||
| -rw-r--r-- | virt/kvm/kvm_main.c | 11 |
20 files changed, 678 insertions, 215 deletions
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt index 7f4e91b1316b..df8b0c7540b6 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic.txt | |||
| @@ -71,3 +71,13 @@ Groups: | |||
| 71 | Errors: | 71 | Errors: |
| 72 | -ENODEV: Getting or setting this register is not yet supported | 72 | -ENODEV: Getting or setting this register is not yet supported |
| 73 | -EBUSY: One or more VCPUs are running | 73 | -EBUSY: One or more VCPUs are running |
| 74 | |||
| 75 | KVM_DEV_ARM_VGIC_GRP_NR_IRQS | ||
| 76 | Attributes: | ||
| 77 | A value describing the number of interrupts (SGI, PPI and SPI) for | ||
| 78 | this GIC instance, ranging from 64 to 1024, in increments of 32. | ||
| 79 | |||
| 80 | Errors: | ||
| 81 | -EINVAL: Value set is out of the expected range | ||
| 82 | -EBUSY: Value has already be set, or GIC has already been initialized | ||
| 83 | with default values. | ||
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 69b746955fca..b9db269c6e61 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
| @@ -149,6 +149,11 @@ static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) | |||
| 149 | 149 | ||
| 150 | static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) | 150 | static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) |
| 151 | { | 151 | { |
| 152 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) | ||
| 156 | { | ||
| 152 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; | 157 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; |
| 153 | } | 158 | } |
| 154 | 159 | ||
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 155497c2b4da..53036e21756b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
| @@ -43,7 +43,7 @@ | |||
| 43 | #include <kvm/arm_vgic.h> | 43 | #include <kvm/arm_vgic.h> |
| 44 | 44 | ||
| 45 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); | 45 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); |
| 46 | int kvm_target_cpu(void); | 46 | int __attribute_const__ kvm_target_cpu(void); |
| 47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | 47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
| 48 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu); | 48 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu); |
| 49 | 49 | ||
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 5cc0b0f5f72f..3f688b458143 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
| @@ -78,17 +78,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) | |||
| 78 | flush_pmd_entry(pte); | 78 | flush_pmd_entry(pte); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static inline bool kvm_is_write_fault(unsigned long hsr) | ||
| 82 | { | ||
| 83 | unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; | ||
| 84 | if (hsr_ec == HSR_EC_IABT) | ||
| 85 | return false; | ||
| 86 | else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) | ||
| 87 | return false; | ||
| 88 | else | ||
| 89 | return true; | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline void kvm_clean_pgd(pgd_t *pgd) | 81 | static inline void kvm_clean_pgd(pgd_t *pgd) |
| 93 | { | 82 | { |
| 94 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | 83 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); |
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index e6ebdd3471e5..09ee408c1a67 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | #define __KVM_HAVE_GUEST_DEBUG | 26 | #define __KVM_HAVE_GUEST_DEBUG |
| 27 | #define __KVM_HAVE_IRQ_LINE | 27 | #define __KVM_HAVE_IRQ_LINE |
| 28 | #define __KVM_HAVE_READONLY_MEM | ||
| 28 | 29 | ||
| 29 | #define KVM_REG_SIZE(id) \ | 30 | #define KVM_REG_SIZE(id) \ |
| 30 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | 31 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) |
| @@ -173,6 +174,7 @@ struct kvm_arch_memory_slot { | |||
| 173 | #define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) | 174 | #define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) |
| 174 | #define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 | 175 | #define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 |
| 175 | #define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) | 176 | #define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) |
| 177 | #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 | ||
| 176 | 178 | ||
| 177 | /* KVM_IRQ_LINE irq field index values */ | 179 | /* KVM_IRQ_LINE irq field index values */ |
| 178 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 180 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 005a7b5fd0aa..779605122f32 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
| @@ -82,7 +82,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void) | |||
| 82 | /** | 82 | /** |
| 83 | * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. | 83 | * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. |
| 84 | */ | 84 | */ |
| 85 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void) | 85 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) |
| 86 | { | 86 | { |
| 87 | return &kvm_arm_running_vcpu; | 87 | return &kvm_arm_running_vcpu; |
| 88 | } | 88 | } |
| @@ -161,6 +161,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
| 161 | kvm->vcpus[i] = NULL; | 161 | kvm->vcpus[i] = NULL; |
| 162 | } | 162 | } |
| 163 | } | 163 | } |
| 164 | |||
| 165 | kvm_vgic_destroy(kvm); | ||
| 164 | } | 166 | } |
| 165 | 167 | ||
| 166 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | 168 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
| @@ -177,6 +179,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
| 177 | case KVM_CAP_ONE_REG: | 179 | case KVM_CAP_ONE_REG: |
| 178 | case KVM_CAP_ARM_PSCI: | 180 | case KVM_CAP_ARM_PSCI: |
| 179 | case KVM_CAP_ARM_PSCI_0_2: | 181 | case KVM_CAP_ARM_PSCI_0_2: |
| 182 | case KVM_CAP_READONLY_MEM: | ||
| 180 | r = 1; | 183 | r = 1; |
| 181 | break; | 184 | break; |
| 182 | case KVM_CAP_COALESCED_MMIO: | 185 | case KVM_CAP_COALESCED_MMIO: |
| @@ -242,6 +245,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | |||
| 242 | { | 245 | { |
| 243 | kvm_mmu_free_memory_caches(vcpu); | 246 | kvm_mmu_free_memory_caches(vcpu); |
| 244 | kvm_timer_vcpu_terminate(vcpu); | 247 | kvm_timer_vcpu_terminate(vcpu); |
| 248 | kvm_vgic_vcpu_destroy(vcpu); | ||
| 245 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 249 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
| 246 | } | 250 | } |
| 247 | 251 | ||
| @@ -257,16 +261,9 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
| 257 | 261 | ||
| 258 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 262 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
| 259 | { | 263 | { |
| 260 | int ret; | ||
| 261 | |||
| 262 | /* Force users to call KVM_ARM_VCPU_INIT */ | 264 | /* Force users to call KVM_ARM_VCPU_INIT */ |
| 263 | vcpu->arch.target = -1; | 265 | vcpu->arch.target = -1; |
| 264 | 266 | ||
| 265 | /* Set up VGIC */ | ||
| 266 | ret = kvm_vgic_vcpu_init(vcpu); | ||
| 267 | if (ret) | ||
| 268 | return ret; | ||
| 269 | |||
| 270 | /* Set up the timer */ | 267 | /* Set up the timer */ |
| 271 | kvm_timer_vcpu_init(vcpu); | 268 | kvm_timer_vcpu_init(vcpu); |
| 272 | 269 | ||
| @@ -413,9 +410,9 @@ static void update_vttbr(struct kvm *kvm) | |||
| 413 | 410 | ||
| 414 | /* update vttbr to be used with the new vmid */ | 411 | /* update vttbr to be used with the new vmid */ |
| 415 | pgd_phys = virt_to_phys(kvm->arch.pgd); | 412 | pgd_phys = virt_to_phys(kvm->arch.pgd); |
| 413 | BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); | ||
| 416 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; | 414 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; |
| 417 | kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; | 415 | kvm->arch.vttbr = pgd_phys | vmid; |
| 418 | kvm->arch.vttbr |= vmid; | ||
| 419 | 416 | ||
| 420 | spin_unlock(&kvm_vmid_lock); | 417 | spin_unlock(&kvm_vmid_lock); |
| 421 | } | 418 | } |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 37a0fe1bb9bb..7928dbdf2102 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
| @@ -791,7 +791,7 @@ static bool is_valid_cache(u32 val) | |||
| 791 | u32 level, ctype; | 791 | u32 level, ctype; |
| 792 | 792 | ||
| 793 | if (val >= CSSELR_MAX) | 793 | if (val >= CSSELR_MAX) |
| 794 | return -ENOENT; | 794 | return false; |
| 795 | 795 | ||
| 796 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | 796 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ |
| 797 | level = (val >> 1); | 797 | level = (val >> 1); |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 813e49258690..cc0b78769bd8 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
| @@ -163,7 +163,7 @@ static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
| 163 | 163 | ||
| 164 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); | 164 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); |
| 165 | if (ret != 0) | 165 | if (ret != 0) |
| 166 | return ret; | 166 | return -EFAULT; |
| 167 | 167 | ||
| 168 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); | 168 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); |
| 169 | } | 169 | } |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 16e7994bf347..eea03069161b 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
| @@ -746,22 +746,29 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) | |||
| 746 | return false; | 746 | return false; |
| 747 | } | 747 | } |
| 748 | 748 | ||
| 749 | static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) | ||
| 750 | { | ||
| 751 | if (kvm_vcpu_trap_is_iabt(vcpu)) | ||
| 752 | return false; | ||
| 753 | |||
| 754 | return kvm_vcpu_dabt_iswrite(vcpu); | ||
| 755 | } | ||
| 756 | |||
| 749 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 757 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
| 750 | struct kvm_memory_slot *memslot, | 758 | struct kvm_memory_slot *memslot, unsigned long hva, |
| 751 | unsigned long fault_status) | 759 | unsigned long fault_status) |
| 752 | { | 760 | { |
| 753 | int ret; | 761 | int ret; |
| 754 | bool write_fault, writable, hugetlb = false, force_pte = false; | 762 | bool write_fault, writable, hugetlb = false, force_pte = false; |
| 755 | unsigned long mmu_seq; | 763 | unsigned long mmu_seq; |
| 756 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; | 764 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; |
| 757 | unsigned long hva = gfn_to_hva(vcpu->kvm, gfn); | ||
| 758 | struct kvm *kvm = vcpu->kvm; | 765 | struct kvm *kvm = vcpu->kvm; |
| 759 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | 766 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
| 760 | struct vm_area_struct *vma; | 767 | struct vm_area_struct *vma; |
| 761 | pfn_t pfn; | 768 | pfn_t pfn; |
| 762 | pgprot_t mem_type = PAGE_S2; | 769 | pgprot_t mem_type = PAGE_S2; |
| 763 | 770 | ||
| 764 | write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); | 771 | write_fault = kvm_is_write_fault(vcpu); |
| 765 | if (fault_status == FSC_PERM && !write_fault) { | 772 | if (fault_status == FSC_PERM && !write_fault) { |
| 766 | kvm_err("Unexpected L2 read permission error\n"); | 773 | kvm_err("Unexpected L2 read permission error\n"); |
| 767 | return -EFAULT; | 774 | return -EFAULT; |
| @@ -863,7 +870,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 863 | unsigned long fault_status; | 870 | unsigned long fault_status; |
| 864 | phys_addr_t fault_ipa; | 871 | phys_addr_t fault_ipa; |
| 865 | struct kvm_memory_slot *memslot; | 872 | struct kvm_memory_slot *memslot; |
| 866 | bool is_iabt; | 873 | unsigned long hva; |
| 874 | bool is_iabt, write_fault, writable; | ||
| 867 | gfn_t gfn; | 875 | gfn_t gfn; |
| 868 | int ret, idx; | 876 | int ret, idx; |
| 869 | 877 | ||
| @@ -874,17 +882,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 874 | kvm_vcpu_get_hfar(vcpu), fault_ipa); | 882 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
| 875 | 883 | ||
| 876 | /* Check the stage-2 fault is trans. fault or write fault */ | 884 | /* Check the stage-2 fault is trans. fault or write fault */ |
| 877 | fault_status = kvm_vcpu_trap_get_fault(vcpu); | 885 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
| 878 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { | 886 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { |
| 879 | kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n", | 887 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
| 880 | kvm_vcpu_trap_get_class(vcpu), fault_status); | 888 | kvm_vcpu_trap_get_class(vcpu), |
| 889 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | ||
| 890 | (unsigned long)kvm_vcpu_get_hsr(vcpu)); | ||
| 881 | return -EFAULT; | 891 | return -EFAULT; |
| 882 | } | 892 | } |
| 883 | 893 | ||
| 884 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 894 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 885 | 895 | ||
| 886 | gfn = fault_ipa >> PAGE_SHIFT; | 896 | gfn = fault_ipa >> PAGE_SHIFT; |
| 887 | if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { | 897 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
| 898 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); | ||
| 899 | write_fault = kvm_is_write_fault(vcpu); | ||
| 900 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { | ||
| 888 | if (is_iabt) { | 901 | if (is_iabt) { |
| 889 | /* Prefetch Abort on I/O address */ | 902 | /* Prefetch Abort on I/O address */ |
| 890 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); | 903 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
| @@ -892,13 +905,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 892 | goto out_unlock; | 905 | goto out_unlock; |
| 893 | } | 906 | } |
| 894 | 907 | ||
| 895 | if (fault_status != FSC_FAULT) { | ||
| 896 | kvm_err("Unsupported fault status on io memory: %#lx\n", | ||
| 897 | fault_status); | ||
| 898 | ret = -EFAULT; | ||
| 899 | goto out_unlock; | ||
| 900 | } | ||
| 901 | |||
| 902 | /* | 908 | /* |
| 903 | * The IPA is reported as [MAX:12], so we need to | 909 | * The IPA is reported as [MAX:12], so we need to |
| 904 | * complement it with the bottom 12 bits from the | 910 | * complement it with the bottom 12 bits from the |
| @@ -910,9 +916,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 910 | goto out_unlock; | 916 | goto out_unlock; |
| 911 | } | 917 | } |
| 912 | 918 | ||
| 913 | memslot = gfn_to_memslot(vcpu->kvm, gfn); | 919 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
| 914 | |||
| 915 | ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status); | ||
| 916 | if (ret == 0) | 920 | if (ret == 0) |
| 917 | ret = 1; | 921 | ret = 1; |
| 918 | out_unlock: | 922 | out_unlock: |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index cc83520459ed..7fd3e27e3ccc 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
| @@ -122,6 +122,17 @@ | |||
| 122 | #define VTCR_EL2_T0SZ_MASK 0x3f | 122 | #define VTCR_EL2_T0SZ_MASK 0x3f |
| 123 | #define VTCR_EL2_T0SZ_40B 24 | 123 | #define VTCR_EL2_T0SZ_40B 24 |
| 124 | 124 | ||
| 125 | /* | ||
| 126 | * We configure the Stage-2 page tables to always restrict the IPA space to be | ||
| 127 | * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are | ||
| 128 | * not known to exist and will break with this configuration. | ||
| 129 | * | ||
| 130 | * Note that when using 4K pages, we concatenate two first level page tables | ||
| 131 | * together. | ||
| 132 | * | ||
| 133 | * The magic numbers used for VTTBR_X in this patch can be found in Tables | ||
| 134 | * D4-23 and D4-25 in ARM DDI 0487A.b. | ||
| 135 | */ | ||
| 125 | #ifdef CONFIG_ARM64_64K_PAGES | 136 | #ifdef CONFIG_ARM64_64K_PAGES |
| 126 | /* | 137 | /* |
| 127 | * Stage2 translation configuration: | 138 | * Stage2 translation configuration: |
| @@ -149,7 +160,7 @@ | |||
| 149 | #endif | 160 | #endif |
| 150 | 161 | ||
| 151 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) | 162 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) |
| 152 | #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) | 163 | #define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) |
| 153 | #define VTTBR_VMID_SHIFT (48LLU) | 164 | #define VTTBR_VMID_SHIFT (48LLU) |
| 154 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | 165 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) |
| 155 | 166 | ||
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index fdc3e21abd8d..5674a55b5518 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
| @@ -174,6 +174,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | |||
| 174 | 174 | ||
| 175 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | 175 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
| 176 | { | 176 | { |
| 177 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; | ||
| 178 | } | ||
| 179 | |||
| 180 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | ||
| 181 | { | ||
| 177 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | 182 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; |
| 178 | } | 183 | } |
| 179 | 184 | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 992d9da88119..2012c4ba8d67 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
| @@ -43,7 +43,7 @@ | |||
| 43 | 43 | ||
| 44 | #define KVM_VCPU_MAX_FEATURES 3 | 44 | #define KVM_VCPU_MAX_FEATURES 3 |
| 45 | 45 | ||
| 46 | int kvm_target_cpu(void); | 46 | int __attribute_const__ kvm_target_cpu(void); |
| 47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | 47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
| 48 | int kvm_arch_dev_ioctl_check_extension(long ext); | 48 | int kvm_arch_dev_ioctl_check_extension(long ext); |
| 49 | 49 | ||
| @@ -197,7 +197,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | |||
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | 199 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); |
| 200 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); | 200 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
| 201 | 201 | ||
| 202 | u64 kvm_call_hyp(void *hypfn, ...); | 202 | u64 kvm_call_hyp(void *hypfn, ...); |
| 203 | 203 | ||
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 8e138c7c53ac..a030d163840b 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
| @@ -59,10 +59,9 @@ | |||
| 59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | 59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) |
| 60 | 60 | ||
| 61 | /* | 61 | /* |
| 62 | * Align KVM with the kernel's view of physical memory. Should be | 62 | * We currently only support a 40bit IPA. |
| 63 | * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration. | ||
| 64 | */ | 63 | */ |
| 65 | #define KVM_PHYS_SHIFT PHYS_MASK_SHIFT | 64 | #define KVM_PHYS_SHIFT (40) |
| 66 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) | 65 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) |
| 67 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) | 66 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) |
| 68 | 67 | ||
| @@ -93,19 +92,6 @@ void kvm_clear_hyp_idmap(void); | |||
| 93 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) | 92 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) |
| 94 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) | 93 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) |
| 95 | 94 | ||
| 96 | static inline bool kvm_is_write_fault(unsigned long esr) | ||
| 97 | { | ||
| 98 | unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT; | ||
| 99 | |||
| 100 | if (esr_ec == ESR_EL2_EC_IABT) | ||
| 101 | return false; | ||
| 102 | |||
| 103 | if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR)) | ||
| 104 | return false; | ||
| 105 | |||
| 106 | return true; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline void kvm_clean_pgd(pgd_t *pgd) {} | 95 | static inline void kvm_clean_pgd(pgd_t *pgd) {} |
| 110 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} | 96 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} |
| 111 | static inline void kvm_clean_pte(pte_t *pte) {} | 97 | static inline void kvm_clean_pte(pte_t *pte) {} |
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index e633ff8cdec8..8e38878c87c6 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | #define __KVM_HAVE_GUEST_DEBUG | 38 | #define __KVM_HAVE_GUEST_DEBUG |
| 39 | #define __KVM_HAVE_IRQ_LINE | 39 | #define __KVM_HAVE_IRQ_LINE |
| 40 | #define __KVM_HAVE_READONLY_MEM | ||
| 40 | 41 | ||
| 41 | #define KVM_REG_SIZE(id) \ | 42 | #define KVM_REG_SIZE(id) \ |
| 42 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | 43 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) |
| @@ -159,6 +160,7 @@ struct kvm_arch_memory_slot { | |||
| 159 | #define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) | 160 | #define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) |
| 160 | #define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 | 161 | #define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 |
| 161 | #define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) | 162 | #define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) |
| 163 | #define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 | ||
| 162 | 164 | ||
| 163 | /* KVM_IRQ_LINE irq field index values */ | 165 | /* KVM_IRQ_LINE irq field index values */ |
| 164 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 166 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 8d1ec2887a26..76794692c20b 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c | |||
| @@ -174,7 +174,7 @@ static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
| 174 | 174 | ||
| 175 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); | 175 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); |
| 176 | if (ret != 0) | 176 | if (ret != 0) |
| 177 | return ret; | 177 | return -EFAULT; |
| 178 | 178 | ||
| 179 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); | 179 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); |
| 180 | } | 180 | } |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 5805e7c4a4dd..4cc3b719208e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
| @@ -1218,7 +1218,7 @@ static bool is_valid_cache(u32 val) | |||
| 1218 | u32 level, ctype; | 1218 | u32 level, ctype; |
| 1219 | 1219 | ||
| 1220 | if (val >= CSSELR_MAX) | 1220 | if (val >= CSSELR_MAX) |
| 1221 | return -ENOENT; | 1221 | return false; |
| 1222 | 1222 | ||
| 1223 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | 1223 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ |
| 1224 | level = (val >> 1); | 1224 | level = (val >> 1); |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 35b0c121bb65..2f2aac8448a4 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
| @@ -25,26 +25,25 @@ | |||
| 25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
| 26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
| 27 | 27 | ||
| 28 | #define VGIC_NR_IRQS 256 | 28 | #define VGIC_NR_IRQS_LEGACY 256 |
| 29 | #define VGIC_NR_SGIS 16 | 29 | #define VGIC_NR_SGIS 16 |
| 30 | #define VGIC_NR_PPIS 16 | 30 | #define VGIC_NR_PPIS 16 |
| 31 | #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) | 31 | #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) |
| 32 | #define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS) | ||
| 33 | #define VGIC_MAX_CPUS KVM_MAX_VCPUS | ||
| 34 | 32 | ||
| 35 | #define VGIC_V2_MAX_LRS (1 << 6) | 33 | #define VGIC_V2_MAX_LRS (1 << 6) |
| 36 | #define VGIC_V3_MAX_LRS 16 | 34 | #define VGIC_V3_MAX_LRS 16 |
| 35 | #define VGIC_MAX_IRQS 1024 | ||
| 37 | 36 | ||
| 38 | /* Sanity checks... */ | 37 | /* Sanity checks... */ |
| 39 | #if (VGIC_MAX_CPUS > 8) | 38 | #if (KVM_MAX_VCPUS > 8) |
| 40 | #error Invalid number of CPU interfaces | 39 | #error Invalid number of CPU interfaces |
| 41 | #endif | 40 | #endif |
| 42 | 41 | ||
| 43 | #if (VGIC_NR_IRQS & 31) | 42 | #if (VGIC_NR_IRQS_LEGACY & 31) |
| 44 | #error "VGIC_NR_IRQS must be a multiple of 32" | 43 | #error "VGIC_NR_IRQS must be a multiple of 32" |
| 45 | #endif | 44 | #endif |
| 46 | 45 | ||
| 47 | #if (VGIC_NR_IRQS > 1024) | 46 | #if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS) |
| 48 | #error "VGIC_NR_IRQS must be <= 1024" | 47 | #error "VGIC_NR_IRQS must be <= 1024" |
| 49 | #endif | 48 | #endif |
| 50 | 49 | ||
| @@ -54,19 +53,33 @@ | |||
| 54 | * - a bunch of shared interrupts (SPI) | 53 | * - a bunch of shared interrupts (SPI) |
| 55 | */ | 54 | */ |
| 56 | struct vgic_bitmap { | 55 | struct vgic_bitmap { |
| 57 | union { | 56 | /* |
| 58 | u32 reg[VGIC_NR_PRIVATE_IRQS / 32]; | 57 | * - One UL per VCPU for private interrupts (assumes UL is at |
| 59 | DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS); | 58 | * least 32 bits) |
| 60 | } percpu[VGIC_MAX_CPUS]; | 59 | * - As many UL as necessary for shared interrupts. |
| 61 | union { | 60 | * |
| 62 | u32 reg[VGIC_NR_SHARED_IRQS / 32]; | 61 | * The private interrupts are accessed via the "private" |
| 63 | DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS); | 62 | * field, one UL per vcpu (the state for vcpu n is in |
| 64 | } shared; | 63 | * private[n]). The shared interrupts are accessed via the |
| 64 | * "shared" pointer (IRQn state is at bit n-32 in the bitmap). | ||
| 65 | */ | ||
| 66 | unsigned long *private; | ||
| 67 | unsigned long *shared; | ||
| 65 | }; | 68 | }; |
| 66 | 69 | ||
| 67 | struct vgic_bytemap { | 70 | struct vgic_bytemap { |
| 68 | u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4]; | 71 | /* |
| 69 | u32 shared[VGIC_NR_SHARED_IRQS / 4]; | 72 | * - 8 u32 per VCPU for private interrupts |
| 73 | * - As many u32 as necessary for shared interrupts. | ||
| 74 | * | ||
| 75 | * The private interrupts are accessed via the "private" | ||
| 76 | * field, (the state for vcpu n is in private[n*8] to | ||
| 77 | * private[n*8 + 7]). The shared interrupts are accessed via | ||
| 78 | * the "shared" pointer (IRQn state is at byte (n-32)%4 of the | ||
| 79 | * shared[(n-32)/4] word). | ||
| 80 | */ | ||
| 81 | u32 *private; | ||
| 82 | u32 *shared; | ||
| 70 | }; | 83 | }; |
| 71 | 84 | ||
| 72 | struct kvm_vcpu; | 85 | struct kvm_vcpu; |
| @@ -127,6 +140,9 @@ struct vgic_dist { | |||
| 127 | bool in_kernel; | 140 | bool in_kernel; |
| 128 | bool ready; | 141 | bool ready; |
| 129 | 142 | ||
| 143 | int nr_cpus; | ||
| 144 | int nr_irqs; | ||
| 145 | |||
| 130 | /* Virtual control interface mapping */ | 146 | /* Virtual control interface mapping */ |
| 131 | void __iomem *vctrl_base; | 147 | void __iomem *vctrl_base; |
| 132 | 148 | ||
| @@ -140,11 +156,25 @@ struct vgic_dist { | |||
| 140 | /* Interrupt enabled (one bit per IRQ) */ | 156 | /* Interrupt enabled (one bit per IRQ) */ |
| 141 | struct vgic_bitmap irq_enabled; | 157 | struct vgic_bitmap irq_enabled; |
| 142 | 158 | ||
| 143 | /* Interrupt 'pin' level */ | 159 | /* Level-triggered interrupt external input is asserted */ |
| 144 | struct vgic_bitmap irq_state; | 160 | struct vgic_bitmap irq_level; |
| 145 | 161 | ||
| 146 | /* Level-triggered interrupt in progress */ | 162 | /* |
| 147 | struct vgic_bitmap irq_active; | 163 | * Interrupt state is pending on the distributor |
| 164 | */ | ||
| 165 | struct vgic_bitmap irq_pending; | ||
| 166 | |||
| 167 | /* | ||
| 168 | * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered | ||
| 169 | * interrupts. Essentially holds the state of the flip-flop in | ||
| 170 | * Figure 4-10 on page 4-101 in ARM IHI 0048B.b. | ||
| 171 | * Once set, it is only cleared for level-triggered interrupts on | ||
| 172 | * guest ACKs (when we queue it) or writes to GICD_ICPENDRn. | ||
| 173 | */ | ||
| 174 | struct vgic_bitmap irq_soft_pend; | ||
| 175 | |||
| 176 | /* Level-triggered interrupt queued on VCPU interface */ | ||
| 177 | struct vgic_bitmap irq_queued; | ||
| 148 | 178 | ||
| 149 | /* Interrupt priority. Not used yet. */ | 179 | /* Interrupt priority. Not used yet. */ |
| 150 | struct vgic_bytemap irq_priority; | 180 | struct vgic_bytemap irq_priority; |
| @@ -152,15 +182,36 @@ struct vgic_dist { | |||
| 152 | /* Level/edge triggered */ | 182 | /* Level/edge triggered */ |
| 153 | struct vgic_bitmap irq_cfg; | 183 | struct vgic_bitmap irq_cfg; |
| 154 | 184 | ||
| 155 | /* Source CPU per SGI and target CPU */ | 185 | /* |
| 156 | u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS]; | 186 | * Source CPU per SGI and target CPU: |
| 157 | 187 | * | |
| 158 | /* Target CPU for each IRQ */ | 188 | * Each byte represent a SGI observable on a VCPU, each bit of |
| 159 | u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS]; | 189 | * this byte indicating if the corresponding VCPU has |
| 160 | struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS]; | 190 | * generated this interrupt. This is a GICv2 feature only. |
| 191 | * | ||
| 192 | * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are | ||
| 193 | * the SGIs observable on VCPUn. | ||
| 194 | */ | ||
| 195 | u8 *irq_sgi_sources; | ||
| 196 | |||
| 197 | /* | ||
| 198 | * Target CPU for each SPI: | ||
| 199 | * | ||
| 200 | * Array of available SPI, each byte indicating the target | ||
| 201 | * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32]. | ||
| 202 | */ | ||
| 203 | u8 *irq_spi_cpu; | ||
| 204 | |||
| 205 | /* | ||
| 206 | * Reverse lookup of irq_spi_cpu for faster compute pending: | ||
| 207 | * | ||
| 208 | * Array of bitmaps, one per VCPU, describing if IRQn is | ||
| 209 | * routed to a particular VCPU. | ||
| 210 | */ | ||
| 211 | struct vgic_bitmap *irq_spi_target; | ||
| 161 | 212 | ||
| 162 | /* Bitmap indicating which CPU has something pending */ | 213 | /* Bitmap indicating which CPU has something pending */ |
| 163 | unsigned long irq_pending_on_cpu; | 214 | unsigned long *irq_pending_on_cpu; |
| 164 | #endif | 215 | #endif |
| 165 | }; | 216 | }; |
| 166 | 217 | ||
| @@ -190,11 +241,11 @@ struct vgic_v3_cpu_if { | |||
| 190 | struct vgic_cpu { | 241 | struct vgic_cpu { |
| 191 | #ifdef CONFIG_KVM_ARM_VGIC | 242 | #ifdef CONFIG_KVM_ARM_VGIC |
| 192 | /* per IRQ to LR mapping */ | 243 | /* per IRQ to LR mapping */ |
| 193 | u8 vgic_irq_lr_map[VGIC_NR_IRQS]; | 244 | u8 *vgic_irq_lr_map; |
| 194 | 245 | ||
| 195 | /* Pending interrupts on this VCPU */ | 246 | /* Pending interrupts on this VCPU */ |
| 196 | DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); | 247 | DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); |
| 197 | DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS); | 248 | unsigned long *pending_shared; |
| 198 | 249 | ||
| 199 | /* Bitmap of used/free list registers */ | 250 | /* Bitmap of used/free list registers */ |
| 200 | DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); | 251 | DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); |
| @@ -225,7 +276,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); | |||
| 225 | int kvm_vgic_hyp_init(void); | 276 | int kvm_vgic_hyp_init(void); |
| 226 | int kvm_vgic_init(struct kvm *kvm); | 277 | int kvm_vgic_init(struct kvm *kvm); |
| 227 | int kvm_vgic_create(struct kvm *kvm); | 278 | int kvm_vgic_create(struct kvm *kvm); |
| 228 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); | 279 | void kvm_vgic_destroy(struct kvm *kvm); |
| 280 | void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); | ||
| 229 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); | 281 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); |
| 230 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); | 282 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); |
| 231 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, | 283 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d594f9f34429..28be31f49250 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -536,6 +536,8 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | |||
| 536 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 536 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
| 537 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); | 537 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
| 538 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); | 538 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
| 539 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, | ||
| 540 | bool *writable); | ||
| 539 | void kvm_release_page_clean(struct page *page); | 541 | void kvm_release_page_clean(struct page *page); |
| 540 | void kvm_release_page_dirty(struct page *page); | 542 | void kvm_release_page_dirty(struct page *page); |
| 541 | void kvm_set_page_accessed(struct page *page); | 543 | void kvm_set_page_accessed(struct page *page); |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 3ee3ce06bbec..862967852d5a 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
| @@ -36,21 +36,22 @@ | |||
| 36 | * How the whole thing works (courtesy of Christoffer Dall): | 36 | * How the whole thing works (courtesy of Christoffer Dall): |
| 37 | * | 37 | * |
| 38 | * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if | 38 | * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if |
| 39 | * something is pending | 39 | * something is pending on the CPU interface. |
| 40 | * - VGIC pending interrupts are stored on the vgic.irq_state vgic | 40 | * - Interrupts that are pending on the distributor are stored on the |
| 41 | * bitmap (this bitmap is updated by both user land ioctls and guest | 41 | * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land |
| 42 | * mmio ops, and other in-kernel peripherals such as the | 42 | * ioctls and guest mmio ops, and other in-kernel peripherals such as the |
| 43 | * arch. timers) and indicate the 'wire' state. | 43 | * arch. timers). |
| 44 | * - Every time the bitmap changes, the irq_pending_on_cpu oracle is | 44 | * - Every time the bitmap changes, the irq_pending_on_cpu oracle is |
| 45 | * recalculated | 45 | * recalculated |
| 46 | * - To calculate the oracle, we need info for each cpu from | 46 | * - To calculate the oracle, we need info for each cpu from |
| 47 | * compute_pending_for_cpu, which considers: | 47 | * compute_pending_for_cpu, which considers: |
| 48 | * - PPI: dist->irq_state & dist->irq_enable | 48 | * - PPI: dist->irq_pending & dist->irq_enable |
| 49 | * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target | 49 | * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target |
| 50 | * - irq_spi_target is a 'formatted' version of the GICD_ICFGR | 50 | * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn |
| 51 | * registers, stored on each vcpu. We only keep one bit of | 51 | * registers, stored on each vcpu. We only keep one bit of |
| 52 | * information per interrupt, making sure that only one vcpu can | 52 | * information per interrupt, making sure that only one vcpu can |
| 53 | * accept the interrupt. | 53 | * accept the interrupt. |
| 54 | * - If any of the above state changes, we must recalculate the oracle. | ||
| 54 | * - The same is true when injecting an interrupt, except that we only | 55 | * - The same is true when injecting an interrupt, except that we only |
| 55 | * consider a single interrupt at a time. The irq_spi_cpu array | 56 | * consider a single interrupt at a time. The irq_spi_cpu array |
| 56 | * contains the target CPU for each SPI. | 57 | * contains the target CPU for each SPI. |
| @@ -60,13 +61,18 @@ | |||
| 60 | * the 'line' again. This is achieved as such: | 61 | * the 'line' again. This is achieved as such: |
| 61 | * | 62 | * |
| 62 | * - When a level interrupt is moved onto a vcpu, the corresponding | 63 | * - When a level interrupt is moved onto a vcpu, the corresponding |
| 63 | * bit in irq_active is set. As long as this bit is set, the line | 64 | * bit in irq_queued is set. As long as this bit is set, the line |
| 64 | * will be ignored for further interrupts. The interrupt is injected | 65 | * will be ignored for further interrupts. The interrupt is injected |
| 65 | * into the vcpu with the GICH_LR_EOI bit set (generate a | 66 | * into the vcpu with the GICH_LR_EOI bit set (generate a |
| 66 | * maintenance interrupt on EOI). | 67 | * maintenance interrupt on EOI). |
| 67 | * - When the interrupt is EOIed, the maintenance interrupt fires, | 68 | * - When the interrupt is EOIed, the maintenance interrupt fires, |
| 68 | * and clears the corresponding bit in irq_active. This allow the | 69 | * and clears the corresponding bit in irq_queued. This allows the |
| 69 | * interrupt line to be sampled again. | 70 | * interrupt line to be sampled again. |
| 71 | * - Note that level-triggered interrupts can also be set to pending from | ||
| 72 | * writes to GICD_ISPENDRn and lowering the external input line does not | ||
| 73 | * cause the interrupt to become inactive in such a situation. | ||
| 74 | * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become | ||
| 75 | * inactive as long as the external input line is held high. | ||
| 70 | */ | 76 | */ |
| 71 | 77 | ||
| 72 | #define VGIC_ADDR_UNDEF (-1) | 78 | #define VGIC_ADDR_UNDEF (-1) |
| @@ -89,6 +95,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); | |||
| 89 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); | 95 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); |
| 90 | static void vgic_update_state(struct kvm *kvm); | 96 | static void vgic_update_state(struct kvm *kvm); |
| 91 | static void vgic_kick_vcpus(struct kvm *kvm); | 97 | static void vgic_kick_vcpus(struct kvm *kvm); |
| 98 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi); | ||
| 92 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); | 99 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); |
| 93 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); | 100 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); |
| 94 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); | 101 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); |
| @@ -99,10 +106,8 @@ static const struct vgic_ops *vgic_ops; | |||
| 99 | static const struct vgic_params *vgic; | 106 | static const struct vgic_params *vgic; |
| 100 | 107 | ||
| 101 | /* | 108 | /* |
| 102 | * struct vgic_bitmap contains unions that provide two views of | 109 | * struct vgic_bitmap contains a bitmap made of unsigned longs, but |
| 103 | * the same data. In one case it is an array of registers of | 110 | * extracts u32s out of them. |
| 104 | * u32's, and in the other case it is a bitmap of unsigned | ||
| 105 | * longs. | ||
| 106 | * | 111 | * |
| 107 | * This does not work on 64-bit BE systems, because the bitmap access | 112 | * This does not work on 64-bit BE systems, because the bitmap access |
| 108 | * will store two consecutive 32-bit words with the higher-addressed | 113 | * will store two consecutive 32-bit words with the higher-addressed |
| @@ -118,23 +123,45 @@ static const struct vgic_params *vgic; | |||
| 118 | #define REG_OFFSET_SWIZZLE 0 | 123 | #define REG_OFFSET_SWIZZLE 0 |
| 119 | #endif | 124 | #endif |
| 120 | 125 | ||
| 126 | static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs) | ||
| 127 | { | ||
| 128 | int nr_longs; | ||
| 129 | |||
| 130 | nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS); | ||
| 131 | |||
| 132 | b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL); | ||
| 133 | if (!b->private) | ||
| 134 | return -ENOMEM; | ||
| 135 | |||
| 136 | b->shared = b->private + nr_cpus; | ||
| 137 | |||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | static void vgic_free_bitmap(struct vgic_bitmap *b) | ||
| 142 | { | ||
| 143 | kfree(b->private); | ||
| 144 | b->private = NULL; | ||
| 145 | b->shared = NULL; | ||
| 146 | } | ||
| 147 | |||
| 121 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, | 148 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, |
| 122 | int cpuid, u32 offset) | 149 | int cpuid, u32 offset) |
| 123 | { | 150 | { |
| 124 | offset >>= 2; | 151 | offset >>= 2; |
| 125 | if (!offset) | 152 | if (!offset) |
| 126 | return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE); | 153 | return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE; |
| 127 | else | 154 | else |
| 128 | return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE); | 155 | return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE); |
| 129 | } | 156 | } |
| 130 | 157 | ||
| 131 | static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, | 158 | static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, |
| 132 | int cpuid, int irq) | 159 | int cpuid, int irq) |
| 133 | { | 160 | { |
| 134 | if (irq < VGIC_NR_PRIVATE_IRQS) | 161 | if (irq < VGIC_NR_PRIVATE_IRQS) |
| 135 | return test_bit(irq, x->percpu[cpuid].reg_ul); | 162 | return test_bit(irq, x->private + cpuid); |
| 136 | 163 | ||
| 137 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul); | 164 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared); |
| 138 | } | 165 | } |
| 139 | 166 | ||
| 140 | static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | 167 | static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, |
| @@ -143,9 +170,9 @@ static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | |||
| 143 | unsigned long *reg; | 170 | unsigned long *reg; |
| 144 | 171 | ||
| 145 | if (irq < VGIC_NR_PRIVATE_IRQS) { | 172 | if (irq < VGIC_NR_PRIVATE_IRQS) { |
| 146 | reg = x->percpu[cpuid].reg_ul; | 173 | reg = x->private + cpuid; |
| 147 | } else { | 174 | } else { |
| 148 | reg = x->shared.reg_ul; | 175 | reg = x->shared; |
| 149 | irq -= VGIC_NR_PRIVATE_IRQS; | 176 | irq -= VGIC_NR_PRIVATE_IRQS; |
| 150 | } | 177 | } |
| 151 | 178 | ||
| @@ -157,24 +184,49 @@ static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | |||
| 157 | 184 | ||
| 158 | static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) | 185 | static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) |
| 159 | { | 186 | { |
| 160 | if (unlikely(cpuid >= VGIC_MAX_CPUS)) | 187 | return x->private + cpuid; |
| 161 | return NULL; | ||
| 162 | return x->percpu[cpuid].reg_ul; | ||
| 163 | } | 188 | } |
| 164 | 189 | ||
| 165 | static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) | 190 | static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) |
| 166 | { | 191 | { |
| 167 | return x->shared.reg_ul; | 192 | return x->shared; |
| 193 | } | ||
| 194 | |||
| 195 | static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs) | ||
| 196 | { | ||
| 197 | int size; | ||
| 198 | |||
| 199 | size = nr_cpus * VGIC_NR_PRIVATE_IRQS; | ||
| 200 | size += nr_irqs - VGIC_NR_PRIVATE_IRQS; | ||
| 201 | |||
| 202 | x->private = kzalloc(size, GFP_KERNEL); | ||
| 203 | if (!x->private) | ||
| 204 | return -ENOMEM; | ||
| 205 | |||
| 206 | x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32); | ||
| 207 | return 0; | ||
| 208 | } | ||
| 209 | |||
| 210 | static void vgic_free_bytemap(struct vgic_bytemap *b) | ||
| 211 | { | ||
| 212 | kfree(b->private); | ||
| 213 | b->private = NULL; | ||
| 214 | b->shared = NULL; | ||
| 168 | } | 215 | } |
| 169 | 216 | ||
| 170 | static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) | 217 | static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) |
| 171 | { | 218 | { |
| 172 | offset >>= 2; | 219 | u32 *reg; |
| 173 | BUG_ON(offset > (VGIC_NR_IRQS / 4)); | 220 | |
| 174 | if (offset < 8) | 221 | if (offset < VGIC_NR_PRIVATE_IRQS) { |
| 175 | return x->percpu[cpuid] + offset; | 222 | reg = x->private; |
| 176 | else | 223 | offset += cpuid * VGIC_NR_PRIVATE_IRQS; |
| 177 | return x->shared + offset - 8; | 224 | } else { |
| 225 | reg = x->shared; | ||
| 226 | offset -= VGIC_NR_PRIVATE_IRQS; | ||
| 227 | } | ||
| 228 | |||
| 229 | return reg + (offset / sizeof(u32)); | ||
| 178 | } | 230 | } |
| 179 | 231 | ||
| 180 | #define VGIC_CFG_LEVEL 0 | 232 | #define VGIC_CFG_LEVEL 0 |
| @@ -196,46 +248,81 @@ static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq) | |||
| 196 | return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); | 248 | return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); |
| 197 | } | 249 | } |
| 198 | 250 | ||
| 199 | static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) | 251 | static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq) |
| 252 | { | ||
| 253 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 254 | |||
| 255 | return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); | ||
| 256 | } | ||
| 257 | |||
| 258 | static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) | ||
| 259 | { | ||
| 260 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 261 | |||
| 262 | vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1); | ||
| 263 | } | ||
| 264 | |||
| 265 | static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq) | ||
| 266 | { | ||
| 267 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 268 | |||
| 269 | vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); | ||
| 270 | } | ||
| 271 | |||
| 272 | static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) | ||
| 273 | { | ||
| 274 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 275 | |||
| 276 | return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq); | ||
| 277 | } | ||
| 278 | |||
| 279 | static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq) | ||
| 280 | { | ||
| 281 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 282 | |||
| 283 | vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1); | ||
| 284 | } | ||
| 285 | |||
| 286 | static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq) | ||
| 200 | { | 287 | { |
| 201 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 288 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 202 | 289 | ||
| 203 | return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); | 290 | vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0); |
| 204 | } | 291 | } |
| 205 | 292 | ||
| 206 | static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) | 293 | static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq) |
| 207 | { | 294 | { |
| 208 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 295 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 209 | 296 | ||
| 210 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); | 297 | return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq); |
| 211 | } | 298 | } |
| 212 | 299 | ||
| 213 | static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) | 300 | static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq) |
| 214 | { | 301 | { |
| 215 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 302 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 216 | 303 | ||
| 217 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); | 304 | vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0); |
| 218 | } | 305 | } |
| 219 | 306 | ||
| 220 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) | 307 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) |
| 221 | { | 308 | { |
| 222 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 309 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 223 | 310 | ||
| 224 | return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq); | 311 | return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq); |
| 225 | } | 312 | } |
| 226 | 313 | ||
| 227 | static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq) | 314 | static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) |
| 228 | { | 315 | { |
| 229 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 316 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 230 | 317 | ||
| 231 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1); | 318 | vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1); |
| 232 | } | 319 | } |
| 233 | 320 | ||
| 234 | static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq) | 321 | static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) |
| 235 | { | 322 | { |
| 236 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 323 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 237 | 324 | ||
| 238 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0); | 325 | vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0); |
| 239 | } | 326 | } |
| 240 | 327 | ||
| 241 | static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) | 328 | static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) |
| @@ -256,6 +343,11 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) | |||
| 256 | vcpu->arch.vgic_cpu.pending_shared); | 343 | vcpu->arch.vgic_cpu.pending_shared); |
| 257 | } | 344 | } |
| 258 | 345 | ||
| 346 | static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq) | ||
| 347 | { | ||
| 348 | return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq); | ||
| 349 | } | ||
| 350 | |||
| 259 | static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) | 351 | static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) |
| 260 | { | 352 | { |
| 261 | return le32_to_cpu(*((u32 *)mmio->data)) & mask; | 353 | return le32_to_cpu(*((u32 *)mmio->data)) & mask; |
| @@ -347,7 +439,7 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | |||
| 347 | 439 | ||
| 348 | case 4: /* GICD_TYPER */ | 440 | case 4: /* GICD_TYPER */ |
| 349 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; | 441 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; |
| 350 | reg |= (VGIC_NR_IRQS >> 5) - 1; | 442 | reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1; |
| 351 | vgic_reg_access(mmio, ®, word_offset, | 443 | vgic_reg_access(mmio, ®, word_offset, |
| 352 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | 444 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); |
| 353 | break; | 445 | break; |
| @@ -409,11 +501,33 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, | |||
| 409 | struct kvm_exit_mmio *mmio, | 501 | struct kvm_exit_mmio *mmio, |
| 410 | phys_addr_t offset) | 502 | phys_addr_t offset) |
| 411 | { | 503 | { |
| 412 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | 504 | u32 *reg, orig; |
| 413 | vcpu->vcpu_id, offset); | 505 | u32 level_mask; |
| 506 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 507 | |||
| 508 | reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset); | ||
| 509 | level_mask = (~(*reg)); | ||
| 510 | |||
| 511 | /* Mark both level and edge triggered irqs as pending */ | ||
| 512 | reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset); | ||
| 513 | orig = *reg; | ||
| 414 | vgic_reg_access(mmio, reg, offset, | 514 | vgic_reg_access(mmio, reg, offset, |
| 415 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | 515 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); |
| 516 | |||
| 416 | if (mmio->is_write) { | 517 | if (mmio->is_write) { |
| 518 | /* Set the soft-pending flag only for level-triggered irqs */ | ||
| 519 | reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, | ||
| 520 | vcpu->vcpu_id, offset); | ||
| 521 | vgic_reg_access(mmio, reg, offset, | ||
| 522 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | ||
| 523 | *reg &= level_mask; | ||
| 524 | |||
| 525 | /* Ignore writes to SGIs */ | ||
| 526 | if (offset < 2) { | ||
| 527 | *reg &= ~0xffff; | ||
| 528 | *reg |= orig & 0xffff; | ||
| 529 | } | ||
| 530 | |||
| 417 | vgic_update_state(vcpu->kvm); | 531 | vgic_update_state(vcpu->kvm); |
| 418 | return true; | 532 | return true; |
| 419 | } | 533 | } |
| @@ -425,11 +539,34 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, | |||
| 425 | struct kvm_exit_mmio *mmio, | 539 | struct kvm_exit_mmio *mmio, |
| 426 | phys_addr_t offset) | 540 | phys_addr_t offset) |
| 427 | { | 541 | { |
| 428 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | 542 | u32 *level_active; |
| 429 | vcpu->vcpu_id, offset); | 543 | u32 *reg, orig; |
| 544 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 545 | |||
| 546 | reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset); | ||
| 547 | orig = *reg; | ||
| 430 | vgic_reg_access(mmio, reg, offset, | 548 | vgic_reg_access(mmio, reg, offset, |
| 431 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | 549 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); |
| 432 | if (mmio->is_write) { | 550 | if (mmio->is_write) { |
| 551 | /* Re-set level triggered level-active interrupts */ | ||
| 552 | level_active = vgic_bitmap_get_reg(&dist->irq_level, | ||
| 553 | vcpu->vcpu_id, offset); | ||
| 554 | reg = vgic_bitmap_get_reg(&dist->irq_pending, | ||
| 555 | vcpu->vcpu_id, offset); | ||
| 556 | *reg |= *level_active; | ||
| 557 | |||
| 558 | /* Ignore writes to SGIs */ | ||
| 559 | if (offset < 2) { | ||
| 560 | *reg &= ~0xffff; | ||
| 561 | *reg |= orig & 0xffff; | ||
| 562 | } | ||
| 563 | |||
| 564 | /* Clear soft-pending flags */ | ||
| 565 | reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, | ||
| 566 | vcpu->vcpu_id, offset); | ||
| 567 | vgic_reg_access(mmio, reg, offset, | ||
| 568 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | ||
| 569 | |||
| 433 | vgic_update_state(vcpu->kvm); | 570 | vgic_update_state(vcpu->kvm); |
| 434 | return true; | 571 | return true; |
| 435 | } | 572 | } |
| @@ -651,9 +788,9 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
| 651 | * is fine, then we are only setting a few bits that were | 788 | * is fine, then we are only setting a few bits that were |
| 652 | * already set. | 789 | * already set. |
| 653 | */ | 790 | */ |
| 654 | vgic_dist_irq_set(vcpu, lr.irq); | 791 | vgic_dist_irq_set_pending(vcpu, lr.irq); |
| 655 | if (lr.irq < VGIC_NR_SGIS) | 792 | if (lr.irq < VGIC_NR_SGIS) |
| 656 | dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source; | 793 | *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source; |
| 657 | lr.state &= ~LR_STATE_PENDING; | 794 | lr.state &= ~LR_STATE_PENDING; |
| 658 | vgic_set_lr(vcpu, i, lr); | 795 | vgic_set_lr(vcpu, i, lr); |
| 659 | 796 | ||
| @@ -662,8 +799,10 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
| 662 | * active), then the LR does not hold any useful info and can | 799 | * active), then the LR does not hold any useful info and can |
| 663 | * be marked as free for other use. | 800 | * be marked as free for other use. |
| 664 | */ | 801 | */ |
| 665 | if (!(lr.state & LR_STATE_MASK)) | 802 | if (!(lr.state & LR_STATE_MASK)) { |
| 666 | vgic_retire_lr(i, lr.irq, vcpu); | 803 | vgic_retire_lr(i, lr.irq, vcpu); |
| 804 | vgic_irq_clear_queued(vcpu, lr.irq); | ||
| 805 | } | ||
| 667 | 806 | ||
| 668 | /* Finally update the VGIC state. */ | 807 | /* Finally update the VGIC state. */ |
| 669 | vgic_update_state(vcpu->kvm); | 808 | vgic_update_state(vcpu->kvm); |
| @@ -677,7 +816,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |||
| 677 | { | 816 | { |
| 678 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 817 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 679 | int sgi; | 818 | int sgi; |
| 680 | int min_sgi = (offset & ~0x3) * 4; | 819 | int min_sgi = (offset & ~0x3); |
| 681 | int max_sgi = min_sgi + 3; | 820 | int max_sgi = min_sgi + 3; |
| 682 | int vcpu_id = vcpu->vcpu_id; | 821 | int vcpu_id = vcpu->vcpu_id; |
| 683 | u32 reg = 0; | 822 | u32 reg = 0; |
| @@ -685,7 +824,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |||
| 685 | /* Copy source SGIs from distributor side */ | 824 | /* Copy source SGIs from distributor side */ |
| 686 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | 825 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { |
| 687 | int shift = 8 * (sgi - min_sgi); | 826 | int shift = 8 * (sgi - min_sgi); |
| 688 | reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift; | 827 | reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift; |
| 689 | } | 828 | } |
| 690 | 829 | ||
| 691 | mmio_data_write(mmio, ~0, reg); | 830 | mmio_data_write(mmio, ~0, reg); |
| @@ -698,7 +837,7 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |||
| 698 | { | 837 | { |
| 699 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 838 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 700 | int sgi; | 839 | int sgi; |
| 701 | int min_sgi = (offset & ~0x3) * 4; | 840 | int min_sgi = (offset & ~0x3); |
| 702 | int max_sgi = min_sgi + 3; | 841 | int max_sgi = min_sgi + 3; |
| 703 | int vcpu_id = vcpu->vcpu_id; | 842 | int vcpu_id = vcpu->vcpu_id; |
| 704 | u32 reg; | 843 | u32 reg; |
| @@ -709,14 +848,15 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |||
| 709 | /* Clear pending SGIs on the distributor */ | 848 | /* Clear pending SGIs on the distributor */ |
| 710 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | 849 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { |
| 711 | u8 mask = reg >> (8 * (sgi - min_sgi)); | 850 | u8 mask = reg >> (8 * (sgi - min_sgi)); |
| 851 | u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi); | ||
| 712 | if (set) { | 852 | if (set) { |
| 713 | if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask) | 853 | if ((*src & mask) != mask) |
| 714 | updated = true; | 854 | updated = true; |
| 715 | dist->irq_sgi_sources[vcpu_id][sgi] |= mask; | 855 | *src |= mask; |
| 716 | } else { | 856 | } else { |
| 717 | if (dist->irq_sgi_sources[vcpu_id][sgi] & mask) | 857 | if (*src & mask) |
| 718 | updated = true; | 858 | updated = true; |
| 719 | dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask; | 859 | *src &= ~mask; |
| 720 | } | 860 | } |
| 721 | } | 861 | } |
| 722 | 862 | ||
| @@ -755,6 +895,7 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | |||
| 755 | struct mmio_range { | 895 | struct mmio_range { |
| 756 | phys_addr_t base; | 896 | phys_addr_t base; |
| 757 | unsigned long len; | 897 | unsigned long len; |
| 898 | int bits_per_irq; | ||
| 758 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | 899 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, |
| 759 | phys_addr_t offset); | 900 | phys_addr_t offset); |
| 760 | }; | 901 | }; |
| @@ -763,56 +904,67 @@ static const struct mmio_range vgic_dist_ranges[] = { | |||
| 763 | { | 904 | { |
| 764 | .base = GIC_DIST_CTRL, | 905 | .base = GIC_DIST_CTRL, |
| 765 | .len = 12, | 906 | .len = 12, |
| 907 | .bits_per_irq = 0, | ||
| 766 | .handle_mmio = handle_mmio_misc, | 908 | .handle_mmio = handle_mmio_misc, |
| 767 | }, | 909 | }, |
| 768 | { | 910 | { |
| 769 | .base = GIC_DIST_IGROUP, | 911 | .base = GIC_DIST_IGROUP, |
| 770 | .len = VGIC_NR_IRQS / 8, | 912 | .len = VGIC_MAX_IRQS / 8, |
| 913 | .bits_per_irq = 1, | ||
| 771 | .handle_mmio = handle_mmio_raz_wi, | 914 | .handle_mmio = handle_mmio_raz_wi, |
| 772 | }, | 915 | }, |
| 773 | { | 916 | { |
| 774 | .base = GIC_DIST_ENABLE_SET, | 917 | .base = GIC_DIST_ENABLE_SET, |
| 775 | .len = VGIC_NR_IRQS / 8, | 918 | .len = VGIC_MAX_IRQS / 8, |
| 919 | .bits_per_irq = 1, | ||
| 776 | .handle_mmio = handle_mmio_set_enable_reg, | 920 | .handle_mmio = handle_mmio_set_enable_reg, |
| 777 | }, | 921 | }, |
| 778 | { | 922 | { |
| 779 | .base = GIC_DIST_ENABLE_CLEAR, | 923 | .base = GIC_DIST_ENABLE_CLEAR, |
| 780 | .len = VGIC_NR_IRQS / 8, | 924 | .len = VGIC_MAX_IRQS / 8, |
| 925 | .bits_per_irq = 1, | ||
| 781 | .handle_mmio = handle_mmio_clear_enable_reg, | 926 | .handle_mmio = handle_mmio_clear_enable_reg, |
| 782 | }, | 927 | }, |
| 783 | { | 928 | { |
| 784 | .base = GIC_DIST_PENDING_SET, | 929 | .base = GIC_DIST_PENDING_SET, |
| 785 | .len = VGIC_NR_IRQS / 8, | 930 | .len = VGIC_MAX_IRQS / 8, |
| 931 | .bits_per_irq = 1, | ||
| 786 | .handle_mmio = handle_mmio_set_pending_reg, | 932 | .handle_mmio = handle_mmio_set_pending_reg, |
| 787 | }, | 933 | }, |
| 788 | { | 934 | { |
| 789 | .base = GIC_DIST_PENDING_CLEAR, | 935 | .base = GIC_DIST_PENDING_CLEAR, |
| 790 | .len = VGIC_NR_IRQS / 8, | 936 | .len = VGIC_MAX_IRQS / 8, |
| 937 | .bits_per_irq = 1, | ||
| 791 | .handle_mmio = handle_mmio_clear_pending_reg, | 938 | .handle_mmio = handle_mmio_clear_pending_reg, |
| 792 | }, | 939 | }, |
| 793 | { | 940 | { |
| 794 | .base = GIC_DIST_ACTIVE_SET, | 941 | .base = GIC_DIST_ACTIVE_SET, |
| 795 | .len = VGIC_NR_IRQS / 8, | 942 | .len = VGIC_MAX_IRQS / 8, |
| 943 | .bits_per_irq = 1, | ||
| 796 | .handle_mmio = handle_mmio_raz_wi, | 944 | .handle_mmio = handle_mmio_raz_wi, |
| 797 | }, | 945 | }, |
| 798 | { | 946 | { |
| 799 | .base = GIC_DIST_ACTIVE_CLEAR, | 947 | .base = GIC_DIST_ACTIVE_CLEAR, |
| 800 | .len = VGIC_NR_IRQS / 8, | 948 | .len = VGIC_MAX_IRQS / 8, |
| 949 | .bits_per_irq = 1, | ||
| 801 | .handle_mmio = handle_mmio_raz_wi, | 950 | .handle_mmio = handle_mmio_raz_wi, |
| 802 | }, | 951 | }, |
| 803 | { | 952 | { |
| 804 | .base = GIC_DIST_PRI, | 953 | .base = GIC_DIST_PRI, |
| 805 | .len = VGIC_NR_IRQS, | 954 | .len = VGIC_MAX_IRQS, |
| 955 | .bits_per_irq = 8, | ||
| 806 | .handle_mmio = handle_mmio_priority_reg, | 956 | .handle_mmio = handle_mmio_priority_reg, |
| 807 | }, | 957 | }, |
| 808 | { | 958 | { |
| 809 | .base = GIC_DIST_TARGET, | 959 | .base = GIC_DIST_TARGET, |
| 810 | .len = VGIC_NR_IRQS, | 960 | .len = VGIC_MAX_IRQS, |
| 961 | .bits_per_irq = 8, | ||
| 811 | .handle_mmio = handle_mmio_target_reg, | 962 | .handle_mmio = handle_mmio_target_reg, |
| 812 | }, | 963 | }, |
| 813 | { | 964 | { |
| 814 | .base = GIC_DIST_CONFIG, | 965 | .base = GIC_DIST_CONFIG, |
| 815 | .len = VGIC_NR_IRQS / 4, | 966 | .len = VGIC_MAX_IRQS / 4, |
| 967 | .bits_per_irq = 2, | ||
| 816 | .handle_mmio = handle_mmio_cfg_reg, | 968 | .handle_mmio = handle_mmio_cfg_reg, |
| 817 | }, | 969 | }, |
| 818 | { | 970 | { |
| @@ -850,6 +1002,22 @@ struct mmio_range *find_matching_range(const struct mmio_range *ranges, | |||
| 850 | return NULL; | 1002 | return NULL; |
| 851 | } | 1003 | } |
| 852 | 1004 | ||
| 1005 | static bool vgic_validate_access(const struct vgic_dist *dist, | ||
| 1006 | const struct mmio_range *range, | ||
| 1007 | unsigned long offset) | ||
| 1008 | { | ||
| 1009 | int irq; | ||
| 1010 | |||
| 1011 | if (!range->bits_per_irq) | ||
| 1012 | return true; /* Not an irq-based access */ | ||
| 1013 | |||
| 1014 | irq = offset * 8 / range->bits_per_irq; | ||
| 1015 | if (irq >= dist->nr_irqs) | ||
| 1016 | return false; | ||
| 1017 | |||
| 1018 | return true; | ||
| 1019 | } | ||
| 1020 | |||
| 853 | /** | 1021 | /** |
| 854 | * vgic_handle_mmio - handle an in-kernel MMIO access | 1022 | * vgic_handle_mmio - handle an in-kernel MMIO access |
| 855 | * @vcpu: pointer to the vcpu performing the access | 1023 | * @vcpu: pointer to the vcpu performing the access |
| @@ -889,7 +1057,13 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
| 889 | 1057 | ||
| 890 | spin_lock(&vcpu->kvm->arch.vgic.lock); | 1058 | spin_lock(&vcpu->kvm->arch.vgic.lock); |
| 891 | offset = mmio->phys_addr - range->base - base; | 1059 | offset = mmio->phys_addr - range->base - base; |
| 892 | updated_state = range->handle_mmio(vcpu, mmio, offset); | 1060 | if (vgic_validate_access(dist, range, offset)) { |
| 1061 | updated_state = range->handle_mmio(vcpu, mmio, offset); | ||
| 1062 | } else { | ||
| 1063 | vgic_reg_access(mmio, NULL, offset, | ||
| 1064 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); | ||
| 1065 | updated_state = false; | ||
| 1066 | } | ||
| 893 | spin_unlock(&vcpu->kvm->arch.vgic.lock); | 1067 | spin_unlock(&vcpu->kvm->arch.vgic.lock); |
| 894 | kvm_prepare_mmio(run, mmio); | 1068 | kvm_prepare_mmio(run, mmio); |
| 895 | kvm_handle_mmio_return(vcpu, run); | 1069 | kvm_handle_mmio_return(vcpu, run); |
| @@ -900,6 +1074,11 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
| 900 | return true; | 1074 | return true; |
| 901 | } | 1075 | } |
| 902 | 1076 | ||
| 1077 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi) | ||
| 1078 | { | ||
| 1079 | return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi; | ||
| 1080 | } | ||
| 1081 | |||
| 903 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | 1082 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) |
| 904 | { | 1083 | { |
| 905 | struct kvm *kvm = vcpu->kvm; | 1084 | struct kvm *kvm = vcpu->kvm; |
| @@ -932,8 +1111,8 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | |||
| 932 | kvm_for_each_vcpu(c, vcpu, kvm) { | 1111 | kvm_for_each_vcpu(c, vcpu, kvm) { |
| 933 | if (target_cpus & 1) { | 1112 | if (target_cpus & 1) { |
| 934 | /* Flag the SGI as pending */ | 1113 | /* Flag the SGI as pending */ |
| 935 | vgic_dist_irq_set(vcpu, sgi); | 1114 | vgic_dist_irq_set_pending(vcpu, sgi); |
| 936 | dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id; | 1115 | *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id; |
| 937 | kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); | 1116 | kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); |
| 938 | } | 1117 | } |
| 939 | 1118 | ||
| @@ -941,32 +1120,38 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | |||
| 941 | } | 1120 | } |
| 942 | } | 1121 | } |
| 943 | 1122 | ||
| 1123 | static int vgic_nr_shared_irqs(struct vgic_dist *dist) | ||
| 1124 | { | ||
| 1125 | return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; | ||
| 1126 | } | ||
| 1127 | |||
| 944 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | 1128 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) |
| 945 | { | 1129 | { |
| 946 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1130 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 947 | unsigned long *pending, *enabled, *pend_percpu, *pend_shared; | 1131 | unsigned long *pending, *enabled, *pend_percpu, *pend_shared; |
| 948 | unsigned long pending_private, pending_shared; | 1132 | unsigned long pending_private, pending_shared; |
| 1133 | int nr_shared = vgic_nr_shared_irqs(dist); | ||
| 949 | int vcpu_id; | 1134 | int vcpu_id; |
| 950 | 1135 | ||
| 951 | vcpu_id = vcpu->vcpu_id; | 1136 | vcpu_id = vcpu->vcpu_id; |
| 952 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; | 1137 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; |
| 953 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; | 1138 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; |
| 954 | 1139 | ||
| 955 | pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id); | 1140 | pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); |
| 956 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); | 1141 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); |
| 957 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); | 1142 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); |
| 958 | 1143 | ||
| 959 | pending = vgic_bitmap_get_shared_map(&dist->irq_state); | 1144 | pending = vgic_bitmap_get_shared_map(&dist->irq_pending); |
| 960 | enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); | 1145 | enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); |
| 961 | bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS); | 1146 | bitmap_and(pend_shared, pending, enabled, nr_shared); |
| 962 | bitmap_and(pend_shared, pend_shared, | 1147 | bitmap_and(pend_shared, pend_shared, |
| 963 | vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), | 1148 | vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), |
| 964 | VGIC_NR_SHARED_IRQS); | 1149 | nr_shared); |
| 965 | 1150 | ||
| 966 | pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS); | 1151 | pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS); |
| 967 | pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS); | 1152 | pending_shared = find_first_bit(pend_shared, nr_shared); |
| 968 | return (pending_private < VGIC_NR_PRIVATE_IRQS || | 1153 | return (pending_private < VGIC_NR_PRIVATE_IRQS || |
| 969 | pending_shared < VGIC_NR_SHARED_IRQS); | 1154 | pending_shared < vgic_nr_shared_irqs(dist)); |
| 970 | } | 1155 | } |
| 971 | 1156 | ||
| 972 | /* | 1157 | /* |
| @@ -980,14 +1165,14 @@ static void vgic_update_state(struct kvm *kvm) | |||
| 980 | int c; | 1165 | int c; |
| 981 | 1166 | ||
| 982 | if (!dist->enabled) { | 1167 | if (!dist->enabled) { |
| 983 | set_bit(0, &dist->irq_pending_on_cpu); | 1168 | set_bit(0, dist->irq_pending_on_cpu); |
| 984 | return; | 1169 | return; |
| 985 | } | 1170 | } |
| 986 | 1171 | ||
| 987 | kvm_for_each_vcpu(c, vcpu, kvm) { | 1172 | kvm_for_each_vcpu(c, vcpu, kvm) { |
| 988 | if (compute_pending_for_cpu(vcpu)) { | 1173 | if (compute_pending_for_cpu(vcpu)) { |
| 989 | pr_debug("CPU%d has pending interrupts\n", c); | 1174 | pr_debug("CPU%d has pending interrupts\n", c); |
| 990 | set_bit(c, &dist->irq_pending_on_cpu); | 1175 | set_bit(c, dist->irq_pending_on_cpu); |
| 991 | } | 1176 | } |
| 992 | } | 1177 | } |
| 993 | } | 1178 | } |
| @@ -1079,8 +1264,8 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
| 1079 | 1264 | ||
| 1080 | if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { | 1265 | if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { |
| 1081 | vgic_retire_lr(lr, vlr.irq, vcpu); | 1266 | vgic_retire_lr(lr, vlr.irq, vcpu); |
| 1082 | if (vgic_irq_is_active(vcpu, vlr.irq)) | 1267 | if (vgic_irq_is_queued(vcpu, vlr.irq)) |
| 1083 | vgic_irq_clear_active(vcpu, vlr.irq); | 1268 | vgic_irq_clear_queued(vcpu, vlr.irq); |
| 1084 | } | 1269 | } |
| 1085 | } | 1270 | } |
| 1086 | } | 1271 | } |
| @@ -1092,13 +1277,14 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
| 1092 | static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | 1277 | static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) |
| 1093 | { | 1278 | { |
| 1094 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1279 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 1280 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 1095 | struct vgic_lr vlr; | 1281 | struct vgic_lr vlr; |
| 1096 | int lr; | 1282 | int lr; |
| 1097 | 1283 | ||
| 1098 | /* Sanitize the input... */ | 1284 | /* Sanitize the input... */ |
| 1099 | BUG_ON(sgi_source_id & ~7); | 1285 | BUG_ON(sgi_source_id & ~7); |
| 1100 | BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS); | 1286 | BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS); |
| 1101 | BUG_ON(irq >= VGIC_NR_IRQS); | 1287 | BUG_ON(irq >= dist->nr_irqs); |
| 1102 | 1288 | ||
| 1103 | kvm_debug("Queue IRQ%d\n", irq); | 1289 | kvm_debug("Queue IRQ%d\n", irq); |
| 1104 | 1290 | ||
| @@ -1144,14 +1330,14 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |||
| 1144 | int vcpu_id = vcpu->vcpu_id; | 1330 | int vcpu_id = vcpu->vcpu_id; |
| 1145 | int c; | 1331 | int c; |
| 1146 | 1332 | ||
| 1147 | sources = dist->irq_sgi_sources[vcpu_id][irq]; | 1333 | sources = *vgic_get_sgi_sources(dist, vcpu_id, irq); |
| 1148 | 1334 | ||
| 1149 | for_each_set_bit(c, &sources, VGIC_MAX_CPUS) { | 1335 | for_each_set_bit(c, &sources, dist->nr_cpus) { |
| 1150 | if (vgic_queue_irq(vcpu, c, irq)) | 1336 | if (vgic_queue_irq(vcpu, c, irq)) |
| 1151 | clear_bit(c, &sources); | 1337 | clear_bit(c, &sources); |
| 1152 | } | 1338 | } |
| 1153 | 1339 | ||
| 1154 | dist->irq_sgi_sources[vcpu_id][irq] = sources; | 1340 | *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources; |
| 1155 | 1341 | ||
| 1156 | /* | 1342 | /* |
| 1157 | * If the sources bitmap has been cleared it means that we | 1343 | * If the sources bitmap has been cleared it means that we |
| @@ -1160,7 +1346,7 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |||
| 1160 | * our emulated gic and can get rid of them. | 1346 | * our emulated gic and can get rid of them. |
| 1161 | */ | 1347 | */ |
| 1162 | if (!sources) { | 1348 | if (!sources) { |
| 1163 | vgic_dist_irq_clear(vcpu, irq); | 1349 | vgic_dist_irq_clear_pending(vcpu, irq); |
| 1164 | vgic_cpu_irq_clear(vcpu, irq); | 1350 | vgic_cpu_irq_clear(vcpu, irq); |
| 1165 | return true; | 1351 | return true; |
| 1166 | } | 1352 | } |
| @@ -1170,15 +1356,15 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |||
| 1170 | 1356 | ||
| 1171 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) | 1357 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) |
| 1172 | { | 1358 | { |
| 1173 | if (vgic_irq_is_active(vcpu, irq)) | 1359 | if (!vgic_can_sample_irq(vcpu, irq)) |
| 1174 | return true; /* level interrupt, already queued */ | 1360 | return true; /* level interrupt, already queued */ |
| 1175 | 1361 | ||
| 1176 | if (vgic_queue_irq(vcpu, 0, irq)) { | 1362 | if (vgic_queue_irq(vcpu, 0, irq)) { |
| 1177 | if (vgic_irq_is_edge(vcpu, irq)) { | 1363 | if (vgic_irq_is_edge(vcpu, irq)) { |
| 1178 | vgic_dist_irq_clear(vcpu, irq); | 1364 | vgic_dist_irq_clear_pending(vcpu, irq); |
| 1179 | vgic_cpu_irq_clear(vcpu, irq); | 1365 | vgic_cpu_irq_clear(vcpu, irq); |
| 1180 | } else { | 1366 | } else { |
| 1181 | vgic_irq_set_active(vcpu, irq); | 1367 | vgic_irq_set_queued(vcpu, irq); |
| 1182 | } | 1368 | } |
| 1183 | 1369 | ||
| 1184 | return true; | 1370 | return true; |
| @@ -1223,7 +1409,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
| 1223 | } | 1409 | } |
| 1224 | 1410 | ||
| 1225 | /* SPIs */ | 1411 | /* SPIs */ |
| 1226 | for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) { | 1412 | for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) { |
| 1227 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) | 1413 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) |
| 1228 | overflow = 1; | 1414 | overflow = 1; |
| 1229 | } | 1415 | } |
| @@ -1239,7 +1425,7 @@ epilog: | |||
| 1239 | * us. Claim we don't have anything pending. We'll | 1425 | * us. Claim we don't have anything pending. We'll |
| 1240 | * adjust that if needed while exiting. | 1426 | * adjust that if needed while exiting. |
| 1241 | */ | 1427 | */ |
| 1242 | clear_bit(vcpu_id, &dist->irq_pending_on_cpu); | 1428 | clear_bit(vcpu_id, dist->irq_pending_on_cpu); |
| 1243 | } | 1429 | } |
| 1244 | } | 1430 | } |
| 1245 | 1431 | ||
| @@ -1261,17 +1447,32 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
| 1261 | 1447 | ||
| 1262 | for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { | 1448 | for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { |
| 1263 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); | 1449 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); |
| 1450 | WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); | ||
| 1264 | 1451 | ||
| 1265 | vgic_irq_clear_active(vcpu, vlr.irq); | 1452 | vgic_irq_clear_queued(vcpu, vlr.irq); |
| 1266 | WARN_ON(vlr.state & LR_STATE_MASK); | 1453 | WARN_ON(vlr.state & LR_STATE_MASK); |
| 1267 | vlr.state = 0; | 1454 | vlr.state = 0; |
| 1268 | vgic_set_lr(vcpu, lr, vlr); | 1455 | vgic_set_lr(vcpu, lr, vlr); |
| 1269 | 1456 | ||
| 1457 | /* | ||
| 1458 | * If the IRQ was EOIed it was also ACKed and we we | ||
| 1459 | * therefore assume we can clear the soft pending | ||
| 1460 | * state (should it had been set) for this interrupt. | ||
| 1461 | * | ||
| 1462 | * Note: if the IRQ soft pending state was set after | ||
| 1463 | * the IRQ was acked, it actually shouldn't be | ||
| 1464 | * cleared, but we have no way of knowing that unless | ||
| 1465 | * we start trapping ACKs when the soft-pending state | ||
| 1466 | * is set. | ||
| 1467 | */ | ||
| 1468 | vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); | ||
| 1469 | |||
| 1270 | /* Any additional pending interrupt? */ | 1470 | /* Any additional pending interrupt? */ |
| 1271 | if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) { | 1471 | if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { |
| 1272 | vgic_cpu_irq_set(vcpu, vlr.irq); | 1472 | vgic_cpu_irq_set(vcpu, vlr.irq); |
| 1273 | level_pending = true; | 1473 | level_pending = true; |
| 1274 | } else { | 1474 | } else { |
| 1475 | vgic_dist_irq_clear_pending(vcpu, vlr.irq); | ||
| 1275 | vgic_cpu_irq_clear(vcpu, vlr.irq); | 1476 | vgic_cpu_irq_clear(vcpu, vlr.irq); |
| 1276 | } | 1477 | } |
| 1277 | 1478 | ||
| @@ -1315,14 +1516,14 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |||
| 1315 | 1516 | ||
| 1316 | vlr = vgic_get_lr(vcpu, lr); | 1517 | vlr = vgic_get_lr(vcpu, lr); |
| 1317 | 1518 | ||
| 1318 | BUG_ON(vlr.irq >= VGIC_NR_IRQS); | 1519 | BUG_ON(vlr.irq >= dist->nr_irqs); |
| 1319 | vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; | 1520 | vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; |
| 1320 | } | 1521 | } |
| 1321 | 1522 | ||
| 1322 | /* Check if we still have something up our sleeve... */ | 1523 | /* Check if we still have something up our sleeve... */ |
| 1323 | pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); | 1524 | pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); |
| 1324 | if (level_pending || pending < vgic->nr_lr) | 1525 | if (level_pending || pending < vgic->nr_lr) |
| 1325 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | 1526 | set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); |
| 1326 | } | 1527 | } |
| 1327 | 1528 | ||
| 1328 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | 1529 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) |
| @@ -1356,7 +1557,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
| 1356 | if (!irqchip_in_kernel(vcpu->kvm)) | 1557 | if (!irqchip_in_kernel(vcpu->kvm)) |
| 1357 | return 0; | 1558 | return 0; |
| 1358 | 1559 | ||
| 1359 | return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | 1560 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); |
| 1360 | } | 1561 | } |
| 1361 | 1562 | ||
| 1362 | static void vgic_kick_vcpus(struct kvm *kvm) | 1563 | static void vgic_kick_vcpus(struct kvm *kvm) |
| @@ -1376,34 +1577,36 @@ static void vgic_kick_vcpus(struct kvm *kvm) | |||
| 1376 | 1577 | ||
| 1377 | static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) | 1578 | static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) |
| 1378 | { | 1579 | { |
| 1379 | int is_edge = vgic_irq_is_edge(vcpu, irq); | 1580 | int edge_triggered = vgic_irq_is_edge(vcpu, irq); |
| 1380 | int state = vgic_dist_irq_is_pending(vcpu, irq); | ||
| 1381 | 1581 | ||
| 1382 | /* | 1582 | /* |
| 1383 | * Only inject an interrupt if: | 1583 | * Only inject an interrupt if: |
| 1384 | * - edge triggered and we have a rising edge | 1584 | * - edge triggered and we have a rising edge |
| 1385 | * - level triggered and we change level | 1585 | * - level triggered and we change level |
| 1386 | */ | 1586 | */ |
| 1387 | if (is_edge) | 1587 | if (edge_triggered) { |
| 1588 | int state = vgic_dist_irq_is_pending(vcpu, irq); | ||
| 1388 | return level > state; | 1589 | return level > state; |
| 1389 | else | 1590 | } else { |
| 1591 | int state = vgic_dist_irq_get_level(vcpu, irq); | ||
| 1390 | return level != state; | 1592 | return level != state; |
| 1593 | } | ||
| 1391 | } | 1594 | } |
| 1392 | 1595 | ||
| 1393 | static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | 1596 | static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, |
| 1394 | unsigned int irq_num, bool level) | 1597 | unsigned int irq_num, bool level) |
| 1395 | { | 1598 | { |
| 1396 | struct vgic_dist *dist = &kvm->arch.vgic; | 1599 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 1397 | struct kvm_vcpu *vcpu; | 1600 | struct kvm_vcpu *vcpu; |
| 1398 | int is_edge, is_level; | 1601 | int edge_triggered, level_triggered; |
| 1399 | int enabled; | 1602 | int enabled; |
| 1400 | bool ret = true; | 1603 | bool ret = true; |
| 1401 | 1604 | ||
| 1402 | spin_lock(&dist->lock); | 1605 | spin_lock(&dist->lock); |
| 1403 | 1606 | ||
| 1404 | vcpu = kvm_get_vcpu(kvm, cpuid); | 1607 | vcpu = kvm_get_vcpu(kvm, cpuid); |
| 1405 | is_edge = vgic_irq_is_edge(vcpu, irq_num); | 1608 | edge_triggered = vgic_irq_is_edge(vcpu, irq_num); |
| 1406 | is_level = !is_edge; | 1609 | level_triggered = !edge_triggered; |
| 1407 | 1610 | ||
| 1408 | if (!vgic_validate_injection(vcpu, irq_num, level)) { | 1611 | if (!vgic_validate_injection(vcpu, irq_num, level)) { |
| 1409 | ret = false; | 1612 | ret = false; |
| @@ -1417,10 +1620,19 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | |||
| 1417 | 1620 | ||
| 1418 | kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); | 1621 | kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); |
| 1419 | 1622 | ||
| 1420 | if (level) | 1623 | if (level) { |
| 1421 | vgic_dist_irq_set(vcpu, irq_num); | 1624 | if (level_triggered) |
| 1422 | else | 1625 | vgic_dist_irq_set_level(vcpu, irq_num); |
| 1423 | vgic_dist_irq_clear(vcpu, irq_num); | 1626 | vgic_dist_irq_set_pending(vcpu, irq_num); |
| 1627 | } else { | ||
| 1628 | if (level_triggered) { | ||
| 1629 | vgic_dist_irq_clear_level(vcpu, irq_num); | ||
| 1630 | if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) | ||
| 1631 | vgic_dist_irq_clear_pending(vcpu, irq_num); | ||
| 1632 | } else { | ||
| 1633 | vgic_dist_irq_clear_pending(vcpu, irq_num); | ||
| 1634 | } | ||
| 1635 | } | ||
| 1424 | 1636 | ||
| 1425 | enabled = vgic_irq_is_enabled(vcpu, irq_num); | 1637 | enabled = vgic_irq_is_enabled(vcpu, irq_num); |
| 1426 | 1638 | ||
| @@ -1429,7 +1641,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | |||
| 1429 | goto out; | 1641 | goto out; |
| 1430 | } | 1642 | } |
| 1431 | 1643 | ||
| 1432 | if (is_level && vgic_irq_is_active(vcpu, irq_num)) { | 1644 | if (!vgic_can_sample_irq(vcpu, irq_num)) { |
| 1433 | /* | 1645 | /* |
| 1434 | * Level interrupt in progress, will be picked up | 1646 | * Level interrupt in progress, will be picked up |
| 1435 | * when EOId. | 1647 | * when EOId. |
| @@ -1440,7 +1652,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | |||
| 1440 | 1652 | ||
| 1441 | if (level) { | 1653 | if (level) { |
| 1442 | vgic_cpu_irq_set(vcpu, irq_num); | 1654 | vgic_cpu_irq_set(vcpu, irq_num); |
| 1443 | set_bit(cpuid, &dist->irq_pending_on_cpu); | 1655 | set_bit(cpuid, dist->irq_pending_on_cpu); |
| 1444 | } | 1656 | } |
| 1445 | 1657 | ||
| 1446 | out: | 1658 | out: |
| @@ -1466,7 +1678,8 @@ out: | |||
| 1466 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, | 1678 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, |
| 1467 | bool level) | 1679 | bool level) |
| 1468 | { | 1680 | { |
| 1469 | if (vgic_update_irq_state(kvm, cpuid, irq_num, level)) | 1681 | if (likely(vgic_initialized(kvm)) && |
| 1682 | vgic_update_irq_pending(kvm, cpuid, irq_num, level)) | ||
| 1470 | vgic_kick_vcpus(kvm); | 1683 | vgic_kick_vcpus(kvm); |
| 1471 | 1684 | ||
| 1472 | return 0; | 1685 | return 0; |
| @@ -1483,6 +1696,32 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) | |||
| 1483 | return IRQ_HANDLED; | 1696 | return IRQ_HANDLED; |
| 1484 | } | 1697 | } |
| 1485 | 1698 | ||
| 1699 | void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
| 1700 | { | ||
| 1701 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
| 1702 | |||
| 1703 | kfree(vgic_cpu->pending_shared); | ||
| 1704 | kfree(vgic_cpu->vgic_irq_lr_map); | ||
| 1705 | vgic_cpu->pending_shared = NULL; | ||
| 1706 | vgic_cpu->vgic_irq_lr_map = NULL; | ||
| 1707 | } | ||
| 1708 | |||
| 1709 | static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) | ||
| 1710 | { | ||
| 1711 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
| 1712 | |||
| 1713 | int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; | ||
| 1714 | vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); | ||
| 1715 | vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL); | ||
| 1716 | |||
| 1717 | if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { | ||
| 1718 | kvm_vgic_vcpu_destroy(vcpu); | ||
| 1719 | return -ENOMEM; | ||
| 1720 | } | ||
| 1721 | |||
| 1722 | return 0; | ||
| 1723 | } | ||
| 1724 | |||
| 1486 | /** | 1725 | /** |
| 1487 | * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state | 1726 | * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state |
| 1488 | * @vcpu: pointer to the vcpu struct | 1727 | * @vcpu: pointer to the vcpu struct |
| @@ -1490,16 +1729,13 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) | |||
| 1490 | * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to | 1729 | * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to |
| 1491 | * this vcpu and enable the VGIC for this VCPU | 1730 | * this vcpu and enable the VGIC for this VCPU |
| 1492 | */ | 1731 | */ |
| 1493 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | 1732 | static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) |
| 1494 | { | 1733 | { |
| 1495 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1734 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 1496 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1735 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 1497 | int i; | 1736 | int i; |
| 1498 | 1737 | ||
| 1499 | if (vcpu->vcpu_id >= VGIC_MAX_CPUS) | 1738 | for (i = 0; i < dist->nr_irqs; i++) { |
| 1500 | return -EBUSY; | ||
| 1501 | |||
| 1502 | for (i = 0; i < VGIC_NR_IRQS; i++) { | ||
| 1503 | if (i < VGIC_NR_PPIS) | 1739 | if (i < VGIC_NR_PPIS) |
| 1504 | vgic_bitmap_set_irq_val(&dist->irq_enabled, | 1740 | vgic_bitmap_set_irq_val(&dist->irq_enabled, |
| 1505 | vcpu->vcpu_id, i, 1); | 1741 | vcpu->vcpu_id, i, 1); |
| @@ -1518,8 +1754,113 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
| 1518 | vgic_cpu->nr_lr = vgic->nr_lr; | 1754 | vgic_cpu->nr_lr = vgic->nr_lr; |
| 1519 | 1755 | ||
| 1520 | vgic_enable(vcpu); | 1756 | vgic_enable(vcpu); |
| 1757 | } | ||
| 1521 | 1758 | ||
| 1522 | return 0; | 1759 | void kvm_vgic_destroy(struct kvm *kvm) |
| 1760 | { | ||
| 1761 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
| 1762 | struct kvm_vcpu *vcpu; | ||
| 1763 | int i; | ||
| 1764 | |||
| 1765 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
| 1766 | kvm_vgic_vcpu_destroy(vcpu); | ||
| 1767 | |||
| 1768 | vgic_free_bitmap(&dist->irq_enabled); | ||
| 1769 | vgic_free_bitmap(&dist->irq_level); | ||
| 1770 | vgic_free_bitmap(&dist->irq_pending); | ||
| 1771 | vgic_free_bitmap(&dist->irq_soft_pend); | ||
| 1772 | vgic_free_bitmap(&dist->irq_queued); | ||
| 1773 | vgic_free_bitmap(&dist->irq_cfg); | ||
| 1774 | vgic_free_bytemap(&dist->irq_priority); | ||
| 1775 | if (dist->irq_spi_target) { | ||
| 1776 | for (i = 0; i < dist->nr_cpus; i++) | ||
| 1777 | vgic_free_bitmap(&dist->irq_spi_target[i]); | ||
| 1778 | } | ||
| 1779 | kfree(dist->irq_sgi_sources); | ||
| 1780 | kfree(dist->irq_spi_cpu); | ||
| 1781 | kfree(dist->irq_spi_target); | ||
| 1782 | kfree(dist->irq_pending_on_cpu); | ||
| 1783 | dist->irq_sgi_sources = NULL; | ||
| 1784 | dist->irq_spi_cpu = NULL; | ||
| 1785 | dist->irq_spi_target = NULL; | ||
| 1786 | dist->irq_pending_on_cpu = NULL; | ||
| 1787 | } | ||
| 1788 | |||
| 1789 | /* | ||
| 1790 | * Allocate and initialize the various data structures. Must be called | ||
| 1791 | * with kvm->lock held! | ||
| 1792 | */ | ||
| 1793 | static int vgic_init_maps(struct kvm *kvm) | ||
| 1794 | { | ||
| 1795 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
| 1796 | struct kvm_vcpu *vcpu; | ||
| 1797 | int nr_cpus, nr_irqs; | ||
| 1798 | int ret, i; | ||
| 1799 | |||
| 1800 | if (dist->nr_cpus) /* Already allocated */ | ||
| 1801 | return 0; | ||
| 1802 | |||
| 1803 | nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); | ||
| 1804 | if (!nr_cpus) /* No vcpus? Can't be good... */ | ||
| 1805 | return -EINVAL; | ||
| 1806 | |||
| 1807 | /* | ||
| 1808 | * If nobody configured the number of interrupts, use the | ||
| 1809 | * legacy one. | ||
| 1810 | */ | ||
| 1811 | if (!dist->nr_irqs) | ||
| 1812 | dist->nr_irqs = VGIC_NR_IRQS_LEGACY; | ||
| 1813 | |||
| 1814 | nr_irqs = dist->nr_irqs; | ||
| 1815 | |||
| 1816 | ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs); | ||
| 1817 | ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs); | ||
| 1818 | ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs); | ||
| 1819 | ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs); | ||
| 1820 | ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs); | ||
| 1821 | ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs); | ||
| 1822 | ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs); | ||
| 1823 | |||
| 1824 | if (ret) | ||
| 1825 | goto out; | ||
| 1826 | |||
| 1827 | dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL); | ||
| 1828 | dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL); | ||
| 1829 | dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus, | ||
| 1830 | GFP_KERNEL); | ||
| 1831 | dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), | ||
| 1832 | GFP_KERNEL); | ||
| 1833 | if (!dist->irq_sgi_sources || | ||
| 1834 | !dist->irq_spi_cpu || | ||
| 1835 | !dist->irq_spi_target || | ||
| 1836 | !dist->irq_pending_on_cpu) { | ||
| 1837 | ret = -ENOMEM; | ||
| 1838 | goto out; | ||
| 1839 | } | ||
| 1840 | |||
| 1841 | for (i = 0; i < nr_cpus; i++) | ||
| 1842 | ret |= vgic_init_bitmap(&dist->irq_spi_target[i], | ||
| 1843 | nr_cpus, nr_irqs); | ||
| 1844 | |||
| 1845 | if (ret) | ||
| 1846 | goto out; | ||
| 1847 | |||
| 1848 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
| 1849 | ret = vgic_vcpu_init_maps(vcpu, nr_irqs); | ||
| 1850 | if (ret) { | ||
| 1851 | kvm_err("VGIC: Failed to allocate vcpu memory\n"); | ||
| 1852 | break; | ||
| 1853 | } | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) | ||
| 1857 | vgic_set_target_reg(kvm, 0, i); | ||
| 1858 | |||
| 1859 | out: | ||
| 1860 | if (ret) | ||
| 1861 | kvm_vgic_destroy(kvm); | ||
| 1862 | |||
| 1863 | return ret; | ||
| 1523 | } | 1864 | } |
| 1524 | 1865 | ||
| 1525 | /** | 1866 | /** |
| @@ -1533,6 +1874,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
| 1533 | */ | 1874 | */ |
| 1534 | int kvm_vgic_init(struct kvm *kvm) | 1875 | int kvm_vgic_init(struct kvm *kvm) |
| 1535 | { | 1876 | { |
| 1877 | struct kvm_vcpu *vcpu; | ||
| 1536 | int ret = 0, i; | 1878 | int ret = 0, i; |
| 1537 | 1879 | ||
| 1538 | if (!irqchip_in_kernel(kvm)) | 1880 | if (!irqchip_in_kernel(kvm)) |
| @@ -1550,6 +1892,12 @@ int kvm_vgic_init(struct kvm *kvm) | |||
| 1550 | goto out; | 1892 | goto out; |
| 1551 | } | 1893 | } |
| 1552 | 1894 | ||
| 1895 | ret = vgic_init_maps(kvm); | ||
| 1896 | if (ret) { | ||
| 1897 | kvm_err("Unable to allocate maps\n"); | ||
| 1898 | goto out; | ||
| 1899 | } | ||
| 1900 | |||
| 1553 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, | 1901 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, |
| 1554 | vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE); | 1902 | vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE); |
| 1555 | if (ret) { | 1903 | if (ret) { |
| @@ -1557,11 +1905,13 @@ int kvm_vgic_init(struct kvm *kvm) | |||
| 1557 | goto out; | 1905 | goto out; |
| 1558 | } | 1906 | } |
| 1559 | 1907 | ||
| 1560 | for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) | 1908 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 1561 | vgic_set_target_reg(kvm, 0, i); | 1909 | kvm_vgic_vcpu_init(vcpu); |
| 1562 | 1910 | ||
| 1563 | kvm->arch.vgic.ready = true; | 1911 | kvm->arch.vgic.ready = true; |
| 1564 | out: | 1912 | out: |
| 1913 | if (ret) | ||
| 1914 | kvm_vgic_destroy(kvm); | ||
| 1565 | mutex_unlock(&kvm->lock); | 1915 | mutex_unlock(&kvm->lock); |
| 1566 | return ret; | 1916 | return ret; |
| 1567 | } | 1917 | } |
| @@ -1613,7 +1963,7 @@ out: | |||
| 1613 | return ret; | 1963 | return ret; |
| 1614 | } | 1964 | } |
| 1615 | 1965 | ||
| 1616 | static bool vgic_ioaddr_overlap(struct kvm *kvm) | 1966 | static int vgic_ioaddr_overlap(struct kvm *kvm) |
| 1617 | { | 1967 | { |
| 1618 | phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; | 1968 | phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; |
| 1619 | phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; | 1969 | phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; |
| @@ -1802,6 +2152,10 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
| 1802 | 2152 | ||
| 1803 | mutex_lock(&dev->kvm->lock); | 2153 | mutex_lock(&dev->kvm->lock); |
| 1804 | 2154 | ||
| 2155 | ret = vgic_init_maps(dev->kvm); | ||
| 2156 | if (ret) | ||
| 2157 | goto out; | ||
| 2158 | |||
| 1805 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { | 2159 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { |
| 1806 | ret = -EINVAL; | 2160 | ret = -EINVAL; |
| 1807 | goto out; | 2161 | goto out; |
| @@ -1899,6 +2253,36 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
| 1899 | 2253 | ||
| 1900 | return vgic_attr_regs_access(dev, attr, ®, true); | 2254 | return vgic_attr_regs_access(dev, attr, ®, true); |
| 1901 | } | 2255 | } |
| 2256 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { | ||
| 2257 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
| 2258 | u32 val; | ||
| 2259 | int ret = 0; | ||
| 2260 | |||
| 2261 | if (get_user(val, uaddr)) | ||
| 2262 | return -EFAULT; | ||
| 2263 | |||
| 2264 | /* | ||
| 2265 | * We require: | ||
| 2266 | * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs | ||
| 2267 | * - at most 1024 interrupts | ||
| 2268 | * - a multiple of 32 interrupts | ||
| 2269 | */ | ||
| 2270 | if (val < (VGIC_NR_PRIVATE_IRQS + 32) || | ||
| 2271 | val > VGIC_MAX_IRQS || | ||
| 2272 | (val & 31)) | ||
| 2273 | return -EINVAL; | ||
| 2274 | |||
| 2275 | mutex_lock(&dev->kvm->lock); | ||
| 2276 | |||
| 2277 | if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) | ||
| 2278 | ret = -EBUSY; | ||
| 2279 | else | ||
| 2280 | dev->kvm->arch.vgic.nr_irqs = val; | ||
| 2281 | |||
| 2282 | mutex_unlock(&dev->kvm->lock); | ||
| 2283 | |||
| 2284 | return ret; | ||
| 2285 | } | ||
| 1902 | 2286 | ||
| 1903 | } | 2287 | } |
| 1904 | 2288 | ||
| @@ -1935,6 +2319,11 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
| 1935 | r = put_user(reg, uaddr); | 2319 | r = put_user(reg, uaddr); |
| 1936 | break; | 2320 | break; |
| 1937 | } | 2321 | } |
| 2322 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { | ||
| 2323 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
| 2324 | r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr); | ||
| 2325 | break; | ||
| 2326 | } | ||
| 1938 | 2327 | ||
| 1939 | } | 2328 | } |
| 1940 | 2329 | ||
| @@ -1971,6 +2360,8 @@ static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
| 1971 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | 2360 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: |
| 1972 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | 2361 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; |
| 1973 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); | 2362 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); |
| 2363 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: | ||
| 2364 | return 0; | ||
| 1974 | } | 2365 | } |
| 1975 | return -ENXIO; | 2366 | return -ENXIO; |
| 1976 | } | 2367 | } |
| @@ -2029,8 +2420,8 @@ static const struct of_device_id vgic_ids[] = { | |||
| 2029 | int kvm_vgic_hyp_init(void) | 2420 | int kvm_vgic_hyp_init(void) |
| 2030 | { | 2421 | { |
| 2031 | const struct of_device_id *matched_id; | 2422 | const struct of_device_id *matched_id; |
| 2032 | int (*vgic_probe)(struct device_node *,const struct vgic_ops **, | 2423 | const int (*vgic_probe)(struct device_node *,const struct vgic_ops **, |
| 2033 | const struct vgic_params **); | 2424 | const struct vgic_params **); |
| 2034 | struct device_node *vgic_node; | 2425 | struct device_node *vgic_node; |
| 2035 | int ret; | 2426 | int ret; |
| 2036 | 2427 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a1cf53ee0d28..39a02fbdb572 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -1095,9 +1095,9 @@ EXPORT_SYMBOL_GPL(gfn_to_hva); | |||
| 1095 | * If writable is set to false, the hva returned by this function is only | 1095 | * If writable is set to false, the hva returned by this function is only |
| 1096 | * allowed to be read. | 1096 | * allowed to be read. |
| 1097 | */ | 1097 | */ |
| 1098 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) | 1098 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, |
| 1099 | gfn_t gfn, bool *writable) | ||
| 1099 | { | 1100 | { |
| 1100 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); | ||
| 1101 | unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); | 1101 | unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); |
| 1102 | 1102 | ||
| 1103 | if (!kvm_is_error_hva(hva) && writable) | 1103 | if (!kvm_is_error_hva(hva) && writable) |
| @@ -1106,6 +1106,13 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) | |||
| 1106 | return hva; | 1106 | return hva; |
| 1107 | } | 1107 | } |
| 1108 | 1108 | ||
| 1109 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) | ||
| 1110 | { | ||
| 1111 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); | ||
| 1112 | |||
| 1113 | return gfn_to_hva_memslot_prot(slot, gfn, writable); | ||
| 1114 | } | ||
| 1115 | |||
| 1109 | static int kvm_read_hva(void *data, void __user *hva, int len) | 1116 | static int kvm_read_hva(void *data, void __user *hva, int len) |
| 1110 | { | 1117 | { |
| 1111 | return __copy_from_user(data, hva, len); | 1118 | return __copy_from_user(data, hva, len); |
