diff options
Diffstat (limited to 'virt/kvm/arm/vgic-v3.c')
-rw-r--r-- | virt/kvm/arm/vgic-v3.c | 82 |
1 files changed, 57 insertions, 25 deletions
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 1c2c8eef0599..3a62d8a9a2c6 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #define GICH_LR_VIRTUALID (0x3ffUL << 0) | 34 | #define GICH_LR_VIRTUALID (0x3ffUL << 0) |
35 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) | 35 | #define GICH_LR_PHYSID_CPUID_SHIFT (10) |
36 | #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) | 36 | #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) |
37 | #define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1) | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * LRs are stored in reverse order in memory. make sure we index them | 40 | * LRs are stored in reverse order in memory. make sure we index them |
@@ -48,12 +49,17 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) | |||
48 | struct vgic_lr lr_desc; | 49 | struct vgic_lr lr_desc; |
49 | u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; | 50 | u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; |
50 | 51 | ||
51 | lr_desc.irq = val & GICH_LR_VIRTUALID; | 52 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) |
52 | if (lr_desc.irq <= 15) | 53 | lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; |
53 | lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; | ||
54 | else | 54 | else |
55 | lr_desc.source = 0; | 55 | lr_desc.irq = val & GICH_LR_VIRTUALID; |
56 | lr_desc.state = 0; | 56 | |
57 | lr_desc.source = 0; | ||
58 | if (lr_desc.irq <= 15 && | ||
59 | vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) | ||
60 | lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; | ||
61 | |||
62 | lr_desc.state = 0; | ||
57 | 63 | ||
58 | if (val & ICH_LR_PENDING_BIT) | 64 | if (val & ICH_LR_PENDING_BIT) |
59 | lr_desc.state |= LR_STATE_PENDING; | 65 | lr_desc.state |= LR_STATE_PENDING; |
@@ -68,8 +74,20 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) | |||
68 | static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, | 74 | static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, |
69 | struct vgic_lr lr_desc) | 75 | struct vgic_lr lr_desc) |
70 | { | 76 | { |
71 | u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | | 77 | u64 lr_val; |
72 | lr_desc.irq); | 78 | |
79 | lr_val = lr_desc.irq; | ||
80 | |||
81 | /* | ||
82 | * Currently all guest IRQs are Group1, as Group0 would result | ||
83 | * in a FIQ in the guest, which it wouldn't expect. | ||
84 | * Eventually we want to make this configurable, so we may revisit | ||
85 | * this in the future. | ||
86 | */ | ||
87 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | ||
88 | lr_val |= ICH_LR_GROUP; | ||
89 | else | ||
90 | lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT; | ||
73 | 91 | ||
74 | if (lr_desc.state & LR_STATE_PENDING) | 92 | if (lr_desc.state & LR_STATE_PENDING) |
75 | lr_val |= ICH_LR_PENDING_BIT; | 93 | lr_val |= ICH_LR_PENDING_BIT; |
@@ -145,15 +163,27 @@ static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |||
145 | 163 | ||
146 | static void vgic_v3_enable(struct kvm_vcpu *vcpu) | 164 | static void vgic_v3_enable(struct kvm_vcpu *vcpu) |
147 | { | 165 | { |
166 | struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; | ||
167 | |||
148 | /* | 168 | /* |
149 | * By forcing VMCR to zero, the GIC will restore the binary | 169 | * By forcing VMCR to zero, the GIC will restore the binary |
150 | * points to their reset values. Anything else resets to zero | 170 | * points to their reset values. Anything else resets to zero |
151 | * anyway. | 171 | * anyway. |
152 | */ | 172 | */ |
153 | vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0; | 173 | vgic_v3->vgic_vmcr = 0; |
174 | |||
175 | /* | ||
176 | * If we are emulating a GICv3, we do it in an non-GICv2-compatible | ||
177 | * way, so we force SRE to 1 to demonstrate this to the guest. | ||
178 | * This goes with the spec allowing the value to be RAO/WI. | ||
179 | */ | ||
180 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | ||
181 | vgic_v3->vgic_sre = ICC_SRE_EL1_SRE; | ||
182 | else | ||
183 | vgic_v3->vgic_sre = 0; | ||
154 | 184 | ||
155 | /* Get the show on the road... */ | 185 | /* Get the show on the road... */ |
156 | vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN; | 186 | vgic_v3->vgic_hcr = ICH_HCR_EN; |
157 | } | 187 | } |
158 | 188 | ||
159 | static const struct vgic_ops vgic_v3_ops = { | 189 | static const struct vgic_ops vgic_v3_ops = { |
@@ -205,35 +235,37 @@ int vgic_v3_probe(struct device_node *vgic_node, | |||
205 | * maximum of 16 list registers. Just ignore bit 4... | 235 | * maximum of 16 list registers. Just ignore bit 4... |
206 | */ | 236 | */ |
207 | vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1; | 237 | vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1; |
238 | vgic->can_emulate_gicv2 = false; | ||
208 | 239 | ||
209 | if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx)) | 240 | if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx)) |
210 | gicv_idx = 1; | 241 | gicv_idx = 1; |
211 | 242 | ||
212 | gicv_idx += 3; /* Also skip GICD, GICC, GICH */ | 243 | gicv_idx += 3; /* Also skip GICD, GICC, GICH */ |
213 | if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) { | 244 | if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) { |
214 | kvm_err("Cannot obtain GICV region\n"); | 245 | kvm_info("GICv3: no GICV resource entry\n"); |
215 | ret = -ENXIO; | 246 | vgic->vcpu_base = 0; |
216 | goto out; | 247 | } else if (!PAGE_ALIGNED(vcpu_res.start)) { |
217 | } | 248 | pr_warn("GICV physical address 0x%llx not page aligned\n", |
218 | |||
219 | if (!PAGE_ALIGNED(vcpu_res.start)) { | ||
220 | kvm_err("GICV physical address 0x%llx not page aligned\n", | ||
221 | (unsigned long long)vcpu_res.start); | 249 | (unsigned long long)vcpu_res.start); |
222 | ret = -ENXIO; | 250 | vgic->vcpu_base = 0; |
223 | goto out; | 251 | } else if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { |
224 | } | 252 | pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", |
225 | |||
226 | if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { | ||
227 | kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", | ||
228 | (unsigned long long)resource_size(&vcpu_res), | 253 | (unsigned long long)resource_size(&vcpu_res), |
229 | PAGE_SIZE); | 254 | PAGE_SIZE); |
230 | ret = -ENXIO; | 255 | vgic->vcpu_base = 0; |
231 | goto out; | 256 | } else { |
257 | vgic->vcpu_base = vcpu_res.start; | ||
258 | vgic->can_emulate_gicv2 = true; | ||
259 | kvm_register_device_ops(&kvm_arm_vgic_v2_ops, | ||
260 | KVM_DEV_TYPE_ARM_VGIC_V2); | ||
232 | } | 261 | } |
262 | if (vgic->vcpu_base == 0) | ||
263 | kvm_info("disabling GICv2 emulation\n"); | ||
264 | kvm_register_device_ops(&kvm_arm_vgic_v3_ops, KVM_DEV_TYPE_ARM_VGIC_V3); | ||
233 | 265 | ||
234 | vgic->vcpu_base = vcpu_res.start; | ||
235 | vgic->vctrl_base = NULL; | 266 | vgic->vctrl_base = NULL; |
236 | vgic->type = VGIC_V3; | 267 | vgic->type = VGIC_V3; |
268 | vgic->max_gic_vcpus = KVM_MAX_VCPUS; | ||
237 | 269 | ||
238 | kvm_info("%s@%llx IRQ%d\n", vgic_node->name, | 270 | kvm_info("%s@%llx IRQ%d\n", vgic_node->name, |
239 | vcpu_res.start, vgic->maint_irq); | 271 | vcpu_res.start, vgic->maint_irq); |