diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2013-05-30 05:20:36 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2014-07-11 07:57:31 -0400 |
commit | eede821dbfd58df89edb072da64e006321eaef58 (patch) | |
tree | 704cb415f76f70f2c55f45800dbfe48a8fb75695 /virt | |
parent | 63f8344cb4917e5219d07cfd6fcd50860bcf5360 (diff) |
KVM: arm/arm64: vgic: move GICv2 registers to their own structure
In order to make way for the GICv3 registers, move the v2-specific
registers to their own structure.
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/vgic.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 56ff9bebb577..0ba1ab0721fd 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -601,7 +601,7 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | |||
601 | static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu) | 601 | static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu) |
602 | { | 602 | { |
603 | clear_bit(lr_nr, vgic_cpu->lr_used); | 603 | clear_bit(lr_nr, vgic_cpu->lr_used); |
604 | vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE; | 604 | vgic_cpu->vgic_v2.vgic_lr[lr_nr] &= ~GICH_LR_STATE; |
605 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | 605 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; |
606 | } | 606 | } |
607 | 607 | ||
@@ -626,7 +626,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
626 | u32 *lr; | 626 | u32 *lr; |
627 | 627 | ||
628 | for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | 628 | for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { |
629 | lr = &vgic_cpu->vgic_lr[i]; | 629 | lr = &vgic_cpu->vgic_v2.vgic_lr[i]; |
630 | irq = LR_IRQID(*lr); | 630 | irq = LR_IRQID(*lr); |
631 | source_cpu = LR_CPUID(*lr); | 631 | source_cpu = LR_CPUID(*lr); |
632 | 632 | ||
@@ -1007,7 +1007,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
1007 | int lr; | 1007 | int lr; |
1008 | 1008 | ||
1009 | for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | 1009 | for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { |
1010 | int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | 1010 | int irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID; |
1011 | 1011 | ||
1012 | if (!vgic_irq_is_enabled(vcpu, irq)) { | 1012 | if (!vgic_irq_is_enabled(vcpu, irq)) { |
1013 | vgic_retire_lr(lr, irq, vgic_cpu); | 1013 | vgic_retire_lr(lr, irq, vgic_cpu); |
@@ -1037,11 +1037,11 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
1037 | 1037 | ||
1038 | /* Do we have an active interrupt for the same CPUID? */ | 1038 | /* Do we have an active interrupt for the same CPUID? */ |
1039 | if (lr != LR_EMPTY && | 1039 | if (lr != LR_EMPTY && |
1040 | (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) { | 1040 | (LR_CPUID(vgic_cpu->vgic_v2.vgic_lr[lr]) == sgi_source_id)) { |
1041 | kvm_debug("LR%d piggyback for IRQ%d %x\n", | 1041 | kvm_debug("LR%d piggyback for IRQ%d %x\n", |
1042 | lr, irq, vgic_cpu->vgic_lr[lr]); | 1042 | lr, irq, vgic_cpu->vgic_v2.vgic_lr[lr]); |
1043 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | 1043 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); |
1044 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; | 1044 | vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_PENDING_BIT; |
1045 | return true; | 1045 | return true; |
1046 | } | 1046 | } |
1047 | 1047 | ||
@@ -1052,12 +1052,12 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
1052 | return false; | 1052 | return false; |
1053 | 1053 | ||
1054 | kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); | 1054 | kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); |
1055 | vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); | 1055 | vgic_cpu->vgic_v2.vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); |
1056 | vgic_cpu->vgic_irq_lr_map[irq] = lr; | 1056 | vgic_cpu->vgic_irq_lr_map[irq] = lr; |
1057 | set_bit(lr, vgic_cpu->lr_used); | 1057 | set_bit(lr, vgic_cpu->lr_used); |
1058 | 1058 | ||
1059 | if (!vgic_irq_is_edge(vcpu, irq)) | 1059 | if (!vgic_irq_is_edge(vcpu, irq)) |
1060 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; | 1060 | vgic_cpu->vgic_v2.vgic_lr[lr] |= GICH_LR_EOI; |
1061 | 1061 | ||
1062 | return true; | 1062 | return true; |
1063 | } | 1063 | } |
@@ -1155,9 +1155,9 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1155 | 1155 | ||
1156 | epilog: | 1156 | epilog: |
1157 | if (overflow) { | 1157 | if (overflow) { |
1158 | vgic_cpu->vgic_hcr |= GICH_HCR_UIE; | 1158 | vgic_cpu->vgic_v2.vgic_hcr |= GICH_HCR_UIE; |
1159 | } else { | 1159 | } else { |
1160 | vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; | 1160 | vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; |
1161 | /* | 1161 | /* |
1162 | * We're about to run this VCPU, and we've consumed | 1162 | * We're about to run this VCPU, and we've consumed |
1163 | * everything the distributor had in store for | 1163 | * everything the distributor had in store for |
@@ -1173,21 +1173,21 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1173 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1173 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1174 | bool level_pending = false; | 1174 | bool level_pending = false; |
1175 | 1175 | ||
1176 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); | 1176 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_v2.vgic_misr); |
1177 | 1177 | ||
1178 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { | 1178 | if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_EOI) { |
1179 | /* | 1179 | /* |
1180 | * Some level interrupts have been EOIed. Clear their | 1180 | * Some level interrupts have been EOIed. Clear their |
1181 | * active bit. | 1181 | * active bit. |
1182 | */ | 1182 | */ |
1183 | int lr, irq; | 1183 | int lr, irq; |
1184 | 1184 | ||
1185 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr, | 1185 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_eisr, |
1186 | vgic_cpu->nr_lr) { | 1186 | vgic_cpu->nr_lr) { |
1187 | irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | 1187 | irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID; |
1188 | 1188 | ||
1189 | vgic_irq_clear_active(vcpu, irq); | 1189 | vgic_irq_clear_active(vcpu, irq); |
1190 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI; | 1190 | vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_EOI; |
1191 | 1191 | ||
1192 | /* Any additional pending interrupt? */ | 1192 | /* Any additional pending interrupt? */ |
1193 | if (vgic_dist_irq_is_pending(vcpu, irq)) { | 1193 | if (vgic_dist_irq_is_pending(vcpu, irq)) { |
@@ -1201,13 +1201,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1201 | * Despite being EOIed, the LR may not have | 1201 | * Despite being EOIed, the LR may not have |
1202 | * been marked as empty. | 1202 | * been marked as empty. |
1203 | */ | 1203 | */ |
1204 | set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); | 1204 | set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr); |
1205 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; | 1205 | vgic_cpu->vgic_v2.vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; |
1206 | } | 1206 | } |
1207 | } | 1207 | } |
1208 | 1208 | ||
1209 | if (vgic_cpu->vgic_misr & GICH_MISR_U) | 1209 | if (vgic_cpu->vgic_v2.vgic_misr & GICH_MISR_U) |
1210 | vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; | 1210 | vgic_cpu->vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; |
1211 | 1211 | ||
1212 | return level_pending; | 1212 | return level_pending; |
1213 | } | 1213 | } |
@@ -1226,21 +1226,21 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |||
1226 | level_pending = vgic_process_maintenance(vcpu); | 1226 | level_pending = vgic_process_maintenance(vcpu); |
1227 | 1227 | ||
1228 | /* Clear mappings for empty LRs */ | 1228 | /* Clear mappings for empty LRs */ |
1229 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr, | 1229 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr, |
1230 | vgic_cpu->nr_lr) { | 1230 | vgic_cpu->nr_lr) { |
1231 | int irq; | 1231 | int irq; |
1232 | 1232 | ||
1233 | if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) | 1233 | if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) |
1234 | continue; | 1234 | continue; |
1235 | 1235 | ||
1236 | irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | 1236 | irq = vgic_cpu->vgic_v2.vgic_lr[lr] & GICH_LR_VIRTUALID; |
1237 | 1237 | ||
1238 | BUG_ON(irq >= VGIC_NR_IRQS); | 1238 | BUG_ON(irq >= VGIC_NR_IRQS); |
1239 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | 1239 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | /* Check if we still have something up our sleeve... */ | 1242 | /* Check if we still have something up our sleeve... */ |
1243 | pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr, | 1243 | pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_v2.vgic_elrsr, |
1244 | vgic_cpu->nr_lr); | 1244 | vgic_cpu->nr_lr); |
1245 | if (level_pending || pending < vgic_cpu->nr_lr) | 1245 | if (level_pending || pending < vgic_cpu->nr_lr) |
1246 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | 1246 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); |
@@ -1436,10 +1436,10 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
1436 | * points to their reset values. Anything else resets to zero | 1436 | * points to their reset values. Anything else resets to zero |
1437 | * anyway. | 1437 | * anyway. |
1438 | */ | 1438 | */ |
1439 | vgic_cpu->vgic_vmcr = 0; | 1439 | vgic_cpu->vgic_v2.vgic_vmcr = 0; |
1440 | 1440 | ||
1441 | vgic_cpu->nr_lr = vgic_nr_lr; | 1441 | vgic_cpu->nr_lr = vgic_nr_lr; |
1442 | vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ | 1442 | vgic_cpu->vgic_v2.vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ |
1443 | 1443 | ||
1444 | return 0; | 1444 | return 0; |
1445 | } | 1445 | } |
@@ -1746,15 +1746,15 @@ static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, | |||
1746 | } | 1746 | } |
1747 | 1747 | ||
1748 | if (!mmio->is_write) { | 1748 | if (!mmio->is_write) { |
1749 | reg = (vgic_cpu->vgic_vmcr & mask) >> shift; | 1749 | reg = (vgic_cpu->vgic_v2.vgic_vmcr & mask) >> shift; |
1750 | mmio_data_write(mmio, ~0, reg); | 1750 | mmio_data_write(mmio, ~0, reg); |
1751 | } else { | 1751 | } else { |
1752 | reg = mmio_data_read(mmio, ~0); | 1752 | reg = mmio_data_read(mmio, ~0); |
1753 | reg = (reg << shift) & mask; | 1753 | reg = (reg << shift) & mask; |
1754 | if (reg != (vgic_cpu->vgic_vmcr & mask)) | 1754 | if (reg != (vgic_cpu->vgic_v2.vgic_vmcr & mask)) |
1755 | updated = true; | 1755 | updated = true; |
1756 | vgic_cpu->vgic_vmcr &= ~mask; | 1756 | vgic_cpu->vgic_v2.vgic_vmcr &= ~mask; |
1757 | vgic_cpu->vgic_vmcr |= reg; | 1757 | vgic_cpu->vgic_v2.vgic_vmcr |= reg; |
1758 | } | 1758 | } |
1759 | return updated; | 1759 | return updated; |
1760 | } | 1760 | } |