diff options
Diffstat (limited to 'arch/arm/kvm/vgic.c')
| -rw-r--r-- | arch/arm/kvm/vgic.c | 37 |
1 files changed, 15 insertions, 22 deletions
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9fe..17c5ac7d10ed 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c | |||
| @@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
| 883 | lr, irq, vgic_cpu->vgic_lr[lr]); | 883 | lr, irq, vgic_cpu->vgic_lr[lr]); |
| 884 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | 884 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); |
| 885 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; | 885 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; |
| 886 | 886 | return true; | |
| 887 | goto out; | ||
| 888 | } | 887 | } |
| 889 | 888 | ||
| 890 | /* Try to use another LR for this interrupt */ | 889 | /* Try to use another LR for this interrupt */ |
| @@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
| 898 | vgic_cpu->vgic_irq_lr_map[irq] = lr; | 897 | vgic_cpu->vgic_irq_lr_map[irq] = lr; |
| 899 | set_bit(lr, vgic_cpu->lr_used); | 898 | set_bit(lr, vgic_cpu->lr_used); |
| 900 | 899 | ||
| 901 | out: | ||
| 902 | if (!vgic_irq_is_edge(vcpu, irq)) | 900 | if (!vgic_irq_is_edge(vcpu, irq)) |
| 903 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; | 901 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; |
| 904 | 902 | ||
| @@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
| 1018 | 1016 | ||
| 1019 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); | 1017 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); |
| 1020 | 1018 | ||
| 1021 | /* | ||
| 1022 | * We do not need to take the distributor lock here, since the only | ||
| 1023 | * action we perform is clearing the irq_active_bit for an EOIed | ||
| 1024 | * level interrupt. There is a potential race with | ||
| 1025 | * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we | ||
| 1026 | * check if the interrupt is already active. Two possibilities: | ||
| 1027 | * | ||
| 1028 | * - The queuing is occurring on the same vcpu: cannot happen, | ||
| 1029 | * as we're already in the context of this vcpu, and | ||
| 1030 | * executing the handler | ||
| 1031 | * - The interrupt has been migrated to another vcpu, and we | ||
| 1032 | * ignore this interrupt for this run. Big deal. It is still | ||
| 1033 | * pending though, and will get considered when this vcpu | ||
| 1034 | * exits. | ||
| 1035 | */ | ||
| 1036 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { | 1019 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { |
| 1037 | /* | 1020 | /* |
| 1038 | * Some level interrupts have been EOIed. Clear their | 1021 | * Some level interrupts have been EOIed. Clear their |
| @@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
| 1054 | } else { | 1037 | } else { |
| 1055 | vgic_cpu_irq_clear(vcpu, irq); | 1038 | vgic_cpu_irq_clear(vcpu, irq); |
| 1056 | } | 1039 | } |
| 1040 | |||
| 1041 | /* | ||
| 1042 | * Despite being EOIed, the LR may not have | ||
| 1043 | * been marked as empty. | ||
| 1044 | */ | ||
| 1045 | set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); | ||
| 1046 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; | ||
| 1057 | } | 1047 | } |
| 1058 | } | 1048 | } |
| 1059 | 1049 | ||
| @@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
| 1064 | } | 1054 | } |
| 1065 | 1055 | ||
| 1066 | /* | 1056 | /* |
| 1067 | * Sync back the VGIC state after a guest run. We do not really touch | 1057 | * Sync back the VGIC state after a guest run. The distributor lock is |
| 1068 | * the distributor here (the irq_pending_on_cpu bit is safe to set), | 1058 | * needed so we don't get preempted in the middle of the state processing. |
| 1069 | * so there is no need for taking its lock. | ||
| 1070 | */ | 1059 | */ |
| 1071 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1060 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
| 1072 | { | 1061 | { |
| @@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
| 1112 | 1101 | ||
| 1113 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1102 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
| 1114 | { | 1103 | { |
| 1104 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 1105 | |||
| 1115 | if (!irqchip_in_kernel(vcpu->kvm)) | 1106 | if (!irqchip_in_kernel(vcpu->kvm)) |
| 1116 | return; | 1107 | return; |
| 1117 | 1108 | ||
| 1109 | spin_lock(&dist->lock); | ||
| 1118 | __kvm_vgic_sync_hwstate(vcpu); | 1110 | __kvm_vgic_sync_hwstate(vcpu); |
| 1111 | spin_unlock(&dist->lock); | ||
| 1119 | } | 1112 | } |
| 1120 | 1113 | ||
| 1121 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | 1114 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
| @@ -1484,7 +1477,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | |||
| 1484 | if (addr & ~KVM_PHYS_MASK) | 1477 | if (addr & ~KVM_PHYS_MASK) |
| 1485 | return -E2BIG; | 1478 | return -E2BIG; |
| 1486 | 1479 | ||
| 1487 | if (addr & ~PAGE_MASK) | 1480 | if (addr & (SZ_4K - 1)) |
| 1488 | return -EINVAL; | 1481 | return -EINVAL; |
| 1489 | 1482 | ||
| 1490 | mutex_lock(&kvm->lock); | 1483 | mutex_lock(&kvm->lock); |
