diff options
author | Andre Przywara <andre.przywara@arm.com> | 2014-06-06 18:53:08 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2015-01-20 12:25:30 -0500 |
commit | 1d916229e348c628ddc9cf97528e76d13f52c122 (patch) | |
tree | ca7881a221f0a5ffb40367fd3445d38e0b239bb1 /virt/kvm | |
parent | 832158125d2ef30b364f21e1616495c40c286a4a (diff) |
arm/arm64: KVM: split GICv2 specific emulation code from vgic.c
vgic.c is currently a mixture of generic vGIC emulation code and
functions specific to emulating a GICv2. To ease the addition of
GICv3, split off strictly v2 specific parts into a new file
vgic-v2-emul.c.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
-------
As the diff isn't always obvious here (and to aid eventual rebases),
here is a list of high-level changes done to the code:
* added new file to respective arm/arm64 Makefiles
* moved GICv2 specific functions to vgic-v2-emul.c:
- handle_mmio_misc()
- handle_mmio_set_enable_reg()
- handle_mmio_clear_enable_reg()
- handle_mmio_set_pending_reg()
- handle_mmio_clear_pending_reg()
- handle_mmio_priority_reg()
- vgic_get_target_reg()
- vgic_set_target_reg()
- handle_mmio_target_reg()
- handle_mmio_cfg_reg()
- handle_mmio_sgi_reg()
- vgic_v2_unqueue_sgi()
- read_set_clear_sgi_pend_reg()
- write_set_clear_sgi_pend_reg()
- handle_mmio_sgi_set()
- handle_mmio_sgi_clear()
- vgic_v2_handle_mmio()
- vgic_get_sgi_sources()
- vgic_dispatch_sgi()
- vgic_v2_queue_sgi()
- vgic_v2_map_resources()
- vgic_v2_init()
- vgic_v2_add_sgi_source()
- vgic_v2_init_model()
- vgic_v2_init_emulation()
- handle_cpu_mmio_misc()
- handle_mmio_abpr()
- handle_cpu_mmio_ident()
- vgic_attr_regs_access()
- vgic_create() (renamed to vgic_v2_create())
- vgic_destroy() (renamed to vgic_v2_destroy())
- vgic_has_attr() (renamed to vgic_v2_has_attr())
- vgic_set_attr() (renamed to vgic_v2_set_attr())
- vgic_get_attr() (renamed to vgic_v2_get_attr())
- struct kvm_mmio_range vgic_dist_ranges[]
- struct kvm_mmio_range vgic_cpu_ranges[]
- struct kvm_device_ops kvm_arm_vgic_v2_ops {}
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/vgic-v2-emul.c | 847 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 806 |
2 files changed, 848 insertions, 805 deletions
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c new file mode 100644 index 000000000000..19c6210f02cf --- /dev/null +++ b/virt/kvm/arm/vgic-v2-emul.c | |||
@@ -0,0 +1,847 @@ | |||
1 | /* | ||
2 | * Contains GICv2 specific emulation code, was in vgic.c before. | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/kvm_host.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include <linux/irqchip/arm-gic.h> | ||
28 | |||
29 | #include <asm/kvm_emulate.h> | ||
30 | #include <asm/kvm_arm.h> | ||
31 | #include <asm/kvm_mmu.h> | ||
32 | |||
33 | #include "vgic.h" | ||
34 | |||
35 | #define GICC_ARCH_VERSION_V2 0x2 | ||
36 | |||
37 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); | ||
38 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi) | ||
39 | { | ||
40 | return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi; | ||
41 | } | ||
42 | |||
43 | static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | ||
44 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
45 | { | ||
46 | u32 reg; | ||
47 | u32 word_offset = offset & 3; | ||
48 | |||
49 | switch (offset & ~3) { | ||
50 | case 0: /* GICD_CTLR */ | ||
51 | reg = vcpu->kvm->arch.vgic.enabled; | ||
52 | vgic_reg_access(mmio, ®, word_offset, | ||
53 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
54 | if (mmio->is_write) { | ||
55 | vcpu->kvm->arch.vgic.enabled = reg & 1; | ||
56 | vgic_update_state(vcpu->kvm); | ||
57 | return true; | ||
58 | } | ||
59 | break; | ||
60 | |||
61 | case 4: /* GICD_TYPER */ | ||
62 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; | ||
63 | reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1; | ||
64 | vgic_reg_access(mmio, ®, word_offset, | ||
65 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
66 | break; | ||
67 | |||
68 | case 8: /* GICD_IIDR */ | ||
69 | reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); | ||
70 | vgic_reg_access(mmio, ®, word_offset, | ||
71 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
72 | break; | ||
73 | } | ||
74 | |||
75 | return false; | ||
76 | } | ||
77 | |||
78 | static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, | ||
79 | struct kvm_exit_mmio *mmio, | ||
80 | phys_addr_t offset) | ||
81 | { | ||
82 | return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, | ||
83 | vcpu->vcpu_id, ACCESS_WRITE_SETBIT); | ||
84 | } | ||
85 | |||
86 | static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, | ||
87 | struct kvm_exit_mmio *mmio, | ||
88 | phys_addr_t offset) | ||
89 | { | ||
90 | return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, | ||
91 | vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); | ||
92 | } | ||
93 | |||
94 | static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, | ||
95 | struct kvm_exit_mmio *mmio, | ||
96 | phys_addr_t offset) | ||
97 | { | ||
98 | return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset, | ||
99 | vcpu->vcpu_id); | ||
100 | } | ||
101 | |||
102 | static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, | ||
103 | struct kvm_exit_mmio *mmio, | ||
104 | phys_addr_t offset) | ||
105 | { | ||
106 | return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset, | ||
107 | vcpu->vcpu_id); | ||
108 | } | ||
109 | |||
110 | static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, | ||
111 | struct kvm_exit_mmio *mmio, | ||
112 | phys_addr_t offset) | ||
113 | { | ||
114 | u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, | ||
115 | vcpu->vcpu_id, offset); | ||
116 | vgic_reg_access(mmio, reg, offset, | ||
117 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
118 | return false; | ||
119 | } | ||
120 | |||
121 | #define GICD_ITARGETSR_SIZE 32 | ||
122 | #define GICD_CPUTARGETS_BITS 8 | ||
123 | #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS) | ||
124 | static u32 vgic_get_target_reg(struct kvm *kvm, int irq) | ||
125 | { | ||
126 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
127 | int i; | ||
128 | u32 val = 0; | ||
129 | |||
130 | irq -= VGIC_NR_PRIVATE_IRQS; | ||
131 | |||
132 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) | ||
133 | val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8); | ||
134 | |||
135 | return val; | ||
136 | } | ||
137 | |||
138 | static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq) | ||
139 | { | ||
140 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
141 | struct kvm_vcpu *vcpu; | ||
142 | int i, c; | ||
143 | unsigned long *bmap; | ||
144 | u32 target; | ||
145 | |||
146 | irq -= VGIC_NR_PRIVATE_IRQS; | ||
147 | |||
148 | /* | ||
149 | * Pick the LSB in each byte. This ensures we target exactly | ||
150 | * one vcpu per IRQ. If the byte is null, assume we target | ||
151 | * CPU0. | ||
152 | */ | ||
153 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) { | ||
154 | int shift = i * GICD_CPUTARGETS_BITS; | ||
155 | |||
156 | target = ffs((val >> shift) & 0xffU); | ||
157 | target = target ? (target - 1) : 0; | ||
158 | dist->irq_spi_cpu[irq + i] = target; | ||
159 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
160 | bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]); | ||
161 | if (c == target) | ||
162 | set_bit(irq + i, bmap); | ||
163 | else | ||
164 | clear_bit(irq + i, bmap); | ||
165 | } | ||
166 | } | ||
167 | } | ||
168 | |||
169 | static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu, | ||
170 | struct kvm_exit_mmio *mmio, | ||
171 | phys_addr_t offset) | ||
172 | { | ||
173 | u32 reg; | ||
174 | |||
175 | /* We treat the banked interrupts targets as read-only */ | ||
176 | if (offset < 32) { | ||
177 | u32 roreg; | ||
178 | |||
179 | roreg = 1 << vcpu->vcpu_id; | ||
180 | roreg |= roreg << 8; | ||
181 | roreg |= roreg << 16; | ||
182 | |||
183 | vgic_reg_access(mmio, &roreg, offset, | ||
184 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
185 | return false; | ||
186 | } | ||
187 | |||
188 | reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U); | ||
189 | vgic_reg_access(mmio, ®, offset, | ||
190 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
191 | if (mmio->is_write) { | ||
192 | vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U); | ||
193 | vgic_update_state(vcpu->kvm); | ||
194 | return true; | ||
195 | } | ||
196 | |||
197 | return false; | ||
198 | } | ||
199 | |||
200 | static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | ||
201 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
202 | { | ||
203 | u32 *reg; | ||
204 | |||
205 | reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, | ||
206 | vcpu->vcpu_id, offset >> 1); | ||
207 | |||
208 | return vgic_handle_cfg_reg(reg, mmio, offset); | ||
209 | } | ||
210 | |||
211 | static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | ||
212 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
213 | { | ||
214 | u32 reg; | ||
215 | |||
216 | vgic_reg_access(mmio, ®, offset, | ||
217 | ACCESS_READ_RAZ | ACCESS_WRITE_VALUE); | ||
218 | if (mmio->is_write) { | ||
219 | vgic_dispatch_sgi(vcpu, reg); | ||
220 | vgic_update_state(vcpu->kvm); | ||
221 | return true; | ||
222 | } | ||
223 | |||
224 | return false; | ||
225 | } | ||
226 | |||
227 | /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */ | ||
228 | static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | ||
229 | struct kvm_exit_mmio *mmio, | ||
230 | phys_addr_t offset) | ||
231 | { | ||
232 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
233 | int sgi; | ||
234 | int min_sgi = (offset & ~0x3); | ||
235 | int max_sgi = min_sgi + 3; | ||
236 | int vcpu_id = vcpu->vcpu_id; | ||
237 | u32 reg = 0; | ||
238 | |||
239 | /* Copy source SGIs from distributor side */ | ||
240 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | ||
241 | u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi); | ||
242 | |||
243 | reg |= ((u32)sources) << (8 * (sgi - min_sgi)); | ||
244 | } | ||
245 | |||
246 | mmio_data_write(mmio, ~0, reg); | ||
247 | return false; | ||
248 | } | ||
249 | |||
250 | static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | ||
251 | struct kvm_exit_mmio *mmio, | ||
252 | phys_addr_t offset, bool set) | ||
253 | { | ||
254 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
255 | int sgi; | ||
256 | int min_sgi = (offset & ~0x3); | ||
257 | int max_sgi = min_sgi + 3; | ||
258 | int vcpu_id = vcpu->vcpu_id; | ||
259 | u32 reg; | ||
260 | bool updated = false; | ||
261 | |||
262 | reg = mmio_data_read(mmio, ~0); | ||
263 | |||
264 | /* Clear pending SGIs on the distributor */ | ||
265 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | ||
266 | u8 mask = reg >> (8 * (sgi - min_sgi)); | ||
267 | u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi); | ||
268 | |||
269 | if (set) { | ||
270 | if ((*src & mask) != mask) | ||
271 | updated = true; | ||
272 | *src |= mask; | ||
273 | } else { | ||
274 | if (*src & mask) | ||
275 | updated = true; | ||
276 | *src &= ~mask; | ||
277 | } | ||
278 | } | ||
279 | |||
280 | if (updated) | ||
281 | vgic_update_state(vcpu->kvm); | ||
282 | |||
283 | return updated; | ||
284 | } | ||
285 | |||
286 | static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu, | ||
287 | struct kvm_exit_mmio *mmio, | ||
288 | phys_addr_t offset) | ||
289 | { | ||
290 | if (!mmio->is_write) | ||
291 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | ||
292 | else | ||
293 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true); | ||
294 | } | ||
295 | |||
296 | static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | ||
297 | struct kvm_exit_mmio *mmio, | ||
298 | phys_addr_t offset) | ||
299 | { | ||
300 | if (!mmio->is_write) | ||
301 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | ||
302 | else | ||
303 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); | ||
304 | } | ||
305 | |||
306 | static const struct kvm_mmio_range vgic_dist_ranges[] = { | ||
307 | { | ||
308 | .base = GIC_DIST_CTRL, | ||
309 | .len = 12, | ||
310 | .bits_per_irq = 0, | ||
311 | .handle_mmio = handle_mmio_misc, | ||
312 | }, | ||
313 | { | ||
314 | .base = GIC_DIST_IGROUP, | ||
315 | .len = VGIC_MAX_IRQS / 8, | ||
316 | .bits_per_irq = 1, | ||
317 | .handle_mmio = handle_mmio_raz_wi, | ||
318 | }, | ||
319 | { | ||
320 | .base = GIC_DIST_ENABLE_SET, | ||
321 | .len = VGIC_MAX_IRQS / 8, | ||
322 | .bits_per_irq = 1, | ||
323 | .handle_mmio = handle_mmio_set_enable_reg, | ||
324 | }, | ||
325 | { | ||
326 | .base = GIC_DIST_ENABLE_CLEAR, | ||
327 | .len = VGIC_MAX_IRQS / 8, | ||
328 | .bits_per_irq = 1, | ||
329 | .handle_mmio = handle_mmio_clear_enable_reg, | ||
330 | }, | ||
331 | { | ||
332 | .base = GIC_DIST_PENDING_SET, | ||
333 | .len = VGIC_MAX_IRQS / 8, | ||
334 | .bits_per_irq = 1, | ||
335 | .handle_mmio = handle_mmio_set_pending_reg, | ||
336 | }, | ||
337 | { | ||
338 | .base = GIC_DIST_PENDING_CLEAR, | ||
339 | .len = VGIC_MAX_IRQS / 8, | ||
340 | .bits_per_irq = 1, | ||
341 | .handle_mmio = handle_mmio_clear_pending_reg, | ||
342 | }, | ||
343 | { | ||
344 | .base = GIC_DIST_ACTIVE_SET, | ||
345 | .len = VGIC_MAX_IRQS / 8, | ||
346 | .bits_per_irq = 1, | ||
347 | .handle_mmio = handle_mmio_raz_wi, | ||
348 | }, | ||
349 | { | ||
350 | .base = GIC_DIST_ACTIVE_CLEAR, | ||
351 | .len = VGIC_MAX_IRQS / 8, | ||
352 | .bits_per_irq = 1, | ||
353 | .handle_mmio = handle_mmio_raz_wi, | ||
354 | }, | ||
355 | { | ||
356 | .base = GIC_DIST_PRI, | ||
357 | .len = VGIC_MAX_IRQS, | ||
358 | .bits_per_irq = 8, | ||
359 | .handle_mmio = handle_mmio_priority_reg, | ||
360 | }, | ||
361 | { | ||
362 | .base = GIC_DIST_TARGET, | ||
363 | .len = VGIC_MAX_IRQS, | ||
364 | .bits_per_irq = 8, | ||
365 | .handle_mmio = handle_mmio_target_reg, | ||
366 | }, | ||
367 | { | ||
368 | .base = GIC_DIST_CONFIG, | ||
369 | .len = VGIC_MAX_IRQS / 4, | ||
370 | .bits_per_irq = 2, | ||
371 | .handle_mmio = handle_mmio_cfg_reg, | ||
372 | }, | ||
373 | { | ||
374 | .base = GIC_DIST_SOFTINT, | ||
375 | .len = 4, | ||
376 | .handle_mmio = handle_mmio_sgi_reg, | ||
377 | }, | ||
378 | { | ||
379 | .base = GIC_DIST_SGI_PENDING_CLEAR, | ||
380 | .len = VGIC_NR_SGIS, | ||
381 | .handle_mmio = handle_mmio_sgi_clear, | ||
382 | }, | ||
383 | { | ||
384 | .base = GIC_DIST_SGI_PENDING_SET, | ||
385 | .len = VGIC_NR_SGIS, | ||
386 | .handle_mmio = handle_mmio_sgi_set, | ||
387 | }, | ||
388 | {} | ||
389 | }; | ||
390 | |||
391 | static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
392 | struct kvm_exit_mmio *mmio) | ||
393 | { | ||
394 | unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base; | ||
395 | |||
396 | if (!is_in_range(mmio->phys_addr, mmio->len, base, | ||
397 | KVM_VGIC_V2_DIST_SIZE)) | ||
398 | return false; | ||
399 | |||
400 | /* GICv2 does not support accesses wider than 32 bits */ | ||
401 | if (mmio->len > 4) { | ||
402 | kvm_inject_dabt(vcpu, mmio->phys_addr); | ||
403 | return true; | ||
404 | } | ||
405 | |||
406 | return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base); | ||
407 | } | ||
408 | |||
409 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | ||
410 | { | ||
411 | struct kvm *kvm = vcpu->kvm; | ||
412 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
413 | int nrcpus = atomic_read(&kvm->online_vcpus); | ||
414 | u8 target_cpus; | ||
415 | int sgi, mode, c, vcpu_id; | ||
416 | |||
417 | vcpu_id = vcpu->vcpu_id; | ||
418 | |||
419 | sgi = reg & 0xf; | ||
420 | target_cpus = (reg >> 16) & 0xff; | ||
421 | mode = (reg >> 24) & 3; | ||
422 | |||
423 | switch (mode) { | ||
424 | case 0: | ||
425 | if (!target_cpus) | ||
426 | return; | ||
427 | break; | ||
428 | |||
429 | case 1: | ||
430 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; | ||
431 | break; | ||
432 | |||
433 | case 2: | ||
434 | target_cpus = 1 << vcpu_id; | ||
435 | break; | ||
436 | } | ||
437 | |||
438 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
439 | if (target_cpus & 1) { | ||
440 | /* Flag the SGI as pending */ | ||
441 | vgic_dist_irq_set_pending(vcpu, sgi); | ||
442 | *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id; | ||
443 | kvm_debug("SGI%d from CPU%d to CPU%d\n", | ||
444 | sgi, vcpu_id, c); | ||
445 | } | ||
446 | |||
447 | target_cpus >>= 1; | ||
448 | } | ||
449 | } | ||
450 | |||
451 | static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq) | ||
452 | { | ||
453 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
454 | unsigned long sources; | ||
455 | int vcpu_id = vcpu->vcpu_id; | ||
456 | int c; | ||
457 | |||
458 | sources = *vgic_get_sgi_sources(dist, vcpu_id, irq); | ||
459 | |||
460 | for_each_set_bit(c, &sources, dist->nr_cpus) { | ||
461 | if (vgic_queue_irq(vcpu, c, irq)) | ||
462 | clear_bit(c, &sources); | ||
463 | } | ||
464 | |||
465 | *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources; | ||
466 | |||
467 | /* | ||
468 | * If the sources bitmap has been cleared it means that we | ||
469 | * could queue all the SGIs onto link registers (see the | ||
470 | * clear_bit above), and therefore we are done with them in | ||
471 | * our emulated gic and can get rid of them. | ||
472 | */ | ||
473 | if (!sources) { | ||
474 | vgic_dist_irq_clear_pending(vcpu, irq); | ||
475 | vgic_cpu_irq_clear(vcpu, irq); | ||
476 | return true; | ||
477 | } | ||
478 | |||
479 | return false; | ||
480 | } | ||
481 | |||
482 | /** | ||
483 | * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs | ||
484 | * @kvm: pointer to the kvm struct | ||
485 | * | ||
486 | * Map the virtual CPU interface into the VM before running any VCPUs. We | ||
487 | * can't do this at creation time, because user space must first set the | ||
488 | * virtual CPU interface address in the guest physical address space. | ||
489 | */ | ||
490 | static int vgic_v2_map_resources(struct kvm *kvm, | ||
491 | const struct vgic_params *params) | ||
492 | { | ||
493 | int ret = 0; | ||
494 | |||
495 | if (!irqchip_in_kernel(kvm)) | ||
496 | return 0; | ||
497 | |||
498 | mutex_lock(&kvm->lock); | ||
499 | |||
500 | if (vgic_ready(kvm)) | ||
501 | goto out; | ||
502 | |||
503 | if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || | ||
504 | IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) { | ||
505 | kvm_err("Need to set vgic cpu and dist addresses first\n"); | ||
506 | ret = -ENXIO; | ||
507 | goto out; | ||
508 | } | ||
509 | |||
510 | /* | ||
511 | * Initialize the vgic if this hasn't already been done on demand by | ||
512 | * accessing the vgic state from userspace. | ||
513 | */ | ||
514 | ret = vgic_init(kvm); | ||
515 | if (ret) { | ||
516 | kvm_err("Unable to allocate maps\n"); | ||
517 | goto out; | ||
518 | } | ||
519 | |||
520 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, | ||
521 | params->vcpu_base, KVM_VGIC_V2_CPU_SIZE, | ||
522 | true); | ||
523 | if (ret) { | ||
524 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); | ||
525 | goto out; | ||
526 | } | ||
527 | |||
528 | kvm->arch.vgic.ready = true; | ||
529 | out: | ||
530 | if (ret) | ||
531 | kvm_vgic_destroy(kvm); | ||
532 | mutex_unlock(&kvm->lock); | ||
533 | return ret; | ||
534 | } | ||
535 | |||
536 | static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) | ||
537 | { | ||
538 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
539 | |||
540 | *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source; | ||
541 | } | ||
542 | |||
543 | static int vgic_v2_init_model(struct kvm *kvm) | ||
544 | { | ||
545 | int i; | ||
546 | |||
547 | for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4) | ||
548 | vgic_set_target_reg(kvm, 0, i); | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | void vgic_v2_init_emulation(struct kvm *kvm) | ||
554 | { | ||
555 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
556 | |||
557 | dist->vm_ops.handle_mmio = vgic_v2_handle_mmio; | ||
558 | dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; | ||
559 | dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; | ||
560 | dist->vm_ops.init_model = vgic_v2_init_model; | ||
561 | dist->vm_ops.map_resources = vgic_v2_map_resources; | ||
562 | |||
563 | kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS; | ||
564 | } | ||
565 | |||
566 | static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, | ||
567 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
568 | { | ||
569 | bool updated = false; | ||
570 | struct vgic_vmcr vmcr; | ||
571 | u32 *vmcr_field; | ||
572 | u32 reg; | ||
573 | |||
574 | vgic_get_vmcr(vcpu, &vmcr); | ||
575 | |||
576 | switch (offset & ~0x3) { | ||
577 | case GIC_CPU_CTRL: | ||
578 | vmcr_field = &vmcr.ctlr; | ||
579 | break; | ||
580 | case GIC_CPU_PRIMASK: | ||
581 | vmcr_field = &vmcr.pmr; | ||
582 | break; | ||
583 | case GIC_CPU_BINPOINT: | ||
584 | vmcr_field = &vmcr.bpr; | ||
585 | break; | ||
586 | case GIC_CPU_ALIAS_BINPOINT: | ||
587 | vmcr_field = &vmcr.abpr; | ||
588 | break; | ||
589 | default: | ||
590 | BUG(); | ||
591 | } | ||
592 | |||
593 | if (!mmio->is_write) { | ||
594 | reg = *vmcr_field; | ||
595 | mmio_data_write(mmio, ~0, reg); | ||
596 | } else { | ||
597 | reg = mmio_data_read(mmio, ~0); | ||
598 | if (reg != *vmcr_field) { | ||
599 | *vmcr_field = reg; | ||
600 | vgic_set_vmcr(vcpu, &vmcr); | ||
601 | updated = true; | ||
602 | } | ||
603 | } | ||
604 | return updated; | ||
605 | } | ||
606 | |||
607 | static bool handle_mmio_abpr(struct kvm_vcpu *vcpu, | ||
608 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
609 | { | ||
610 | return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT); | ||
611 | } | ||
612 | |||
613 | static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, | ||
614 | struct kvm_exit_mmio *mmio, | ||
615 | phys_addr_t offset) | ||
616 | { | ||
617 | u32 reg; | ||
618 | |||
619 | if (mmio->is_write) | ||
620 | return false; | ||
621 | |||
622 | /* GICC_IIDR */ | ||
623 | reg = (PRODUCT_ID_KVM << 20) | | ||
624 | (GICC_ARCH_VERSION_V2 << 16) | | ||
625 | (IMPLEMENTER_ARM << 0); | ||
626 | mmio_data_write(mmio, ~0, reg); | ||
627 | return false; | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * CPU Interface Register accesses - these are not accessed by the VM, but by | ||
632 | * user space for saving and restoring VGIC state. | ||
633 | */ | ||
634 | static const struct kvm_mmio_range vgic_cpu_ranges[] = { | ||
635 | { | ||
636 | .base = GIC_CPU_CTRL, | ||
637 | .len = 12, | ||
638 | .handle_mmio = handle_cpu_mmio_misc, | ||
639 | }, | ||
640 | { | ||
641 | .base = GIC_CPU_ALIAS_BINPOINT, | ||
642 | .len = 4, | ||
643 | .handle_mmio = handle_mmio_abpr, | ||
644 | }, | ||
645 | { | ||
646 | .base = GIC_CPU_ACTIVEPRIO, | ||
647 | .len = 16, | ||
648 | .handle_mmio = handle_mmio_raz_wi, | ||
649 | }, | ||
650 | { | ||
651 | .base = GIC_CPU_IDENT, | ||
652 | .len = 4, | ||
653 | .handle_mmio = handle_cpu_mmio_ident, | ||
654 | }, | ||
655 | }; | ||
656 | |||
657 | static int vgic_attr_regs_access(struct kvm_device *dev, | ||
658 | struct kvm_device_attr *attr, | ||
659 | u32 *reg, bool is_write) | ||
660 | { | ||
661 | const struct kvm_mmio_range *r = NULL, *ranges; | ||
662 | phys_addr_t offset; | ||
663 | int ret, cpuid, c; | ||
664 | struct kvm_vcpu *vcpu, *tmp_vcpu; | ||
665 | struct vgic_dist *vgic; | ||
666 | struct kvm_exit_mmio mmio; | ||
667 | |||
668 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
669 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> | ||
670 | KVM_DEV_ARM_VGIC_CPUID_SHIFT; | ||
671 | |||
672 | mutex_lock(&dev->kvm->lock); | ||
673 | |||
674 | ret = vgic_init(dev->kvm); | ||
675 | if (ret) | ||
676 | goto out; | ||
677 | |||
678 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { | ||
679 | ret = -EINVAL; | ||
680 | goto out; | ||
681 | } | ||
682 | |||
683 | vcpu = kvm_get_vcpu(dev->kvm, cpuid); | ||
684 | vgic = &dev->kvm->arch.vgic; | ||
685 | |||
686 | mmio.len = 4; | ||
687 | mmio.is_write = is_write; | ||
688 | if (is_write) | ||
689 | mmio_data_write(&mmio, ~0, *reg); | ||
690 | switch (attr->group) { | ||
691 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
692 | mmio.phys_addr = vgic->vgic_dist_base + offset; | ||
693 | ranges = vgic_dist_ranges; | ||
694 | break; | ||
695 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | ||
696 | mmio.phys_addr = vgic->vgic_cpu_base + offset; | ||
697 | ranges = vgic_cpu_ranges; | ||
698 | break; | ||
699 | default: | ||
700 | BUG(); | ||
701 | } | ||
702 | r = vgic_find_range(ranges, &mmio, offset); | ||
703 | |||
704 | if (unlikely(!r || !r->handle_mmio)) { | ||
705 | ret = -ENXIO; | ||
706 | goto out; | ||
707 | } | ||
708 | |||
709 | |||
710 | spin_lock(&vgic->lock); | ||
711 | |||
712 | /* | ||
713 | * Ensure that no other VCPU is running by checking the vcpu->cpu | ||
714 | * field. If no other VPCUs are running we can safely access the VGIC | ||
715 | * state, because even if another VPU is run after this point, that | ||
716 | * VCPU will not touch the vgic state, because it will block on | ||
717 | * getting the vgic->lock in kvm_vgic_sync_hwstate(). | ||
718 | */ | ||
719 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) { | ||
720 | if (unlikely(tmp_vcpu->cpu != -1)) { | ||
721 | ret = -EBUSY; | ||
722 | goto out_vgic_unlock; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * Move all pending IRQs from the LRs on all VCPUs so the pending | ||
728 | * state can be properly represented in the register state accessible | ||
729 | * through this API. | ||
730 | */ | ||
731 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) | ||
732 | vgic_unqueue_irqs(tmp_vcpu); | ||
733 | |||
734 | offset -= r->base; | ||
735 | r->handle_mmio(vcpu, &mmio, offset); | ||
736 | |||
737 | if (!is_write) | ||
738 | *reg = mmio_data_read(&mmio, ~0); | ||
739 | |||
740 | ret = 0; | ||
741 | out_vgic_unlock: | ||
742 | spin_unlock(&vgic->lock); | ||
743 | out: | ||
744 | mutex_unlock(&dev->kvm->lock); | ||
745 | return ret; | ||
746 | } | ||
747 | |||
748 | static int vgic_v2_create(struct kvm_device *dev, u32 type) | ||
749 | { | ||
750 | return kvm_vgic_create(dev->kvm, type); | ||
751 | } | ||
752 | |||
753 | static void vgic_v2_destroy(struct kvm_device *dev) | ||
754 | { | ||
755 | kfree(dev); | ||
756 | } | ||
757 | |||
758 | static int vgic_v2_set_attr(struct kvm_device *dev, | ||
759 | struct kvm_device_attr *attr) | ||
760 | { | ||
761 | int ret; | ||
762 | |||
763 | ret = vgic_set_common_attr(dev, attr); | ||
764 | if (ret != -ENXIO) | ||
765 | return ret; | ||
766 | |||
767 | switch (attr->group) { | ||
768 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
769 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | ||
770 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
771 | u32 reg; | ||
772 | |||
773 | if (get_user(reg, uaddr)) | ||
774 | return -EFAULT; | ||
775 | |||
776 | return vgic_attr_regs_access(dev, attr, ®, true); | ||
777 | } | ||
778 | |||
779 | } | ||
780 | |||
781 | return -ENXIO; | ||
782 | } | ||
783 | |||
784 | static int vgic_v2_get_attr(struct kvm_device *dev, | ||
785 | struct kvm_device_attr *attr) | ||
786 | { | ||
787 | int ret; | ||
788 | |||
789 | ret = vgic_get_common_attr(dev, attr); | ||
790 | if (ret != -ENXIO) | ||
791 | return ret; | ||
792 | |||
793 | switch (attr->group) { | ||
794 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
795 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | ||
796 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
797 | u32 reg = 0; | ||
798 | |||
799 | ret = vgic_attr_regs_access(dev, attr, ®, false); | ||
800 | if (ret) | ||
801 | return ret; | ||
802 | return put_user(reg, uaddr); | ||
803 | } | ||
804 | |||
805 | } | ||
806 | |||
807 | return -ENXIO; | ||
808 | } | ||
809 | |||
810 | static int vgic_v2_has_attr(struct kvm_device *dev, | ||
811 | struct kvm_device_attr *attr) | ||
812 | { | ||
813 | phys_addr_t offset; | ||
814 | |||
815 | switch (attr->group) { | ||
816 | case KVM_DEV_ARM_VGIC_GRP_ADDR: | ||
817 | switch (attr->attr) { | ||
818 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | ||
819 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | ||
820 | return 0; | ||
821 | } | ||
822 | break; | ||
823 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
824 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
825 | return vgic_has_attr_regs(vgic_dist_ranges, offset); | ||
826 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | ||
827 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
828 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); | ||
829 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: | ||
830 | return 0; | ||
831 | case KVM_DEV_ARM_VGIC_GRP_CTRL: | ||
832 | switch (attr->attr) { | ||
833 | case KVM_DEV_ARM_VGIC_CTRL_INIT: | ||
834 | return 0; | ||
835 | } | ||
836 | } | ||
837 | return -ENXIO; | ||
838 | } | ||
839 | |||
840 | struct kvm_device_ops kvm_arm_vgic_v2_ops = { | ||
841 | .name = "kvm-arm-vgic-v2", | ||
842 | .create = vgic_v2_create, | ||
843 | .destroy = vgic_v2_destroy, | ||
844 | .set_attr = vgic_v2_set_attr, | ||
845 | .get_attr = vgic_v2_get_attr, | ||
846 | .has_attr = vgic_v2_has_attr, | ||
847 | }; | ||
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index ce6c998a3f8d..0e84292c2197 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -77,12 +77,8 @@ | |||
77 | 77 | ||
78 | #include "vgic.h" | 78 | #include "vgic.h" |
79 | 79 | ||
80 | #define GICC_ARCH_VERSION_V2 0x2 | ||
81 | |||
82 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); | 80 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); |
83 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); | 81 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); |
84 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi); | ||
85 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); | ||
86 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); | 82 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); |
87 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); | 83 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); |
88 | 84 | ||
@@ -421,41 +417,6 @@ void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, | |||
421 | } | 417 | } |
422 | } | 418 | } |
423 | 419 | ||
424 | static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | ||
425 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
426 | { | ||
427 | u32 reg; | ||
428 | u32 word_offset = offset & 3; | ||
429 | |||
430 | switch (offset & ~3) { | ||
431 | case 0: /* GICD_CTLR */ | ||
432 | reg = vcpu->kvm->arch.vgic.enabled; | ||
433 | vgic_reg_access(mmio, ®, word_offset, | ||
434 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
435 | if (mmio->is_write) { | ||
436 | vcpu->kvm->arch.vgic.enabled = reg & 1; | ||
437 | vgic_update_state(vcpu->kvm); | ||
438 | return true; | ||
439 | } | ||
440 | break; | ||
441 | |||
442 | case 4: /* GICD_TYPER */ | ||
443 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; | ||
444 | reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1; | ||
445 | vgic_reg_access(mmio, ®, word_offset, | ||
446 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
447 | break; | ||
448 | |||
449 | case 8: /* GICD_IIDR */ | ||
450 | reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); | ||
451 | vgic_reg_access(mmio, ®, word_offset, | ||
452 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
453 | break; | ||
454 | } | ||
455 | |||
456 | return false; | ||
457 | } | ||
458 | |||
459 | bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | 420 | bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, |
460 | phys_addr_t offset) | 421 | phys_addr_t offset) |
461 | { | 422 | { |
@@ -486,22 +447,6 @@ bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, | |||
486 | return false; | 447 | return false; |
487 | } | 448 | } |
488 | 449 | ||
489 | static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, | ||
490 | struct kvm_exit_mmio *mmio, | ||
491 | phys_addr_t offset) | ||
492 | { | ||
493 | return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, | ||
494 | vcpu->vcpu_id, ACCESS_WRITE_SETBIT); | ||
495 | } | ||
496 | |||
497 | static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, | ||
498 | struct kvm_exit_mmio *mmio, | ||
499 | phys_addr_t offset) | ||
500 | { | ||
501 | return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, | ||
502 | vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); | ||
503 | } | ||
504 | |||
505 | bool vgic_handle_set_pending_reg(struct kvm *kvm, | 450 | bool vgic_handle_set_pending_reg(struct kvm *kvm, |
506 | struct kvm_exit_mmio *mmio, | 451 | struct kvm_exit_mmio *mmio, |
507 | phys_addr_t offset, int vcpu_id) | 452 | phys_addr_t offset, int vcpu_id) |
@@ -575,109 +520,6 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm, | |||
575 | return false; | 520 | return false; |
576 | } | 521 | } |
577 | 522 | ||
578 | static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, | ||
579 | struct kvm_exit_mmio *mmio, | ||
580 | phys_addr_t offset) | ||
581 | { | ||
582 | return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset, | ||
583 | vcpu->vcpu_id); | ||
584 | } | ||
585 | |||
586 | static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, | ||
587 | struct kvm_exit_mmio *mmio, | ||
588 | phys_addr_t offset) | ||
589 | { | ||
590 | return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset, | ||
591 | vcpu->vcpu_id); | ||
592 | } | ||
593 | |||
594 | static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, | ||
595 | struct kvm_exit_mmio *mmio, | ||
596 | phys_addr_t offset) | ||
597 | { | ||
598 | u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, | ||
599 | vcpu->vcpu_id, offset); | ||
600 | vgic_reg_access(mmio, reg, offset, | ||
601 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
602 | return false; | ||
603 | } | ||
604 | |||
605 | #define GICD_ITARGETSR_SIZE 32 | ||
606 | #define GICD_CPUTARGETS_BITS 8 | ||
607 | #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS) | ||
608 | static u32 vgic_get_target_reg(struct kvm *kvm, int irq) | ||
609 | { | ||
610 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
611 | int i; | ||
612 | u32 val = 0; | ||
613 | |||
614 | irq -= VGIC_NR_PRIVATE_IRQS; | ||
615 | |||
616 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) | ||
617 | val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8); | ||
618 | |||
619 | return val; | ||
620 | } | ||
621 | |||
622 | static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq) | ||
623 | { | ||
624 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
625 | struct kvm_vcpu *vcpu; | ||
626 | int i, c; | ||
627 | unsigned long *bmap; | ||
628 | u32 target; | ||
629 | |||
630 | irq -= VGIC_NR_PRIVATE_IRQS; | ||
631 | |||
632 | /* | ||
633 | * Pick the LSB in each byte. This ensures we target exactly | ||
634 | * one vcpu per IRQ. If the byte is null, assume we target | ||
635 | * CPU0. | ||
636 | */ | ||
637 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) { | ||
638 | int shift = i * GICD_CPUTARGETS_BITS; | ||
639 | target = ffs((val >> shift) & 0xffU); | ||
640 | target = target ? (target - 1) : 0; | ||
641 | dist->irq_spi_cpu[irq + i] = target; | ||
642 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
643 | bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]); | ||
644 | if (c == target) | ||
645 | set_bit(irq + i, bmap); | ||
646 | else | ||
647 | clear_bit(irq + i, bmap); | ||
648 | } | ||
649 | } | ||
650 | } | ||
651 | |||
652 | static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu, | ||
653 | struct kvm_exit_mmio *mmio, | ||
654 | phys_addr_t offset) | ||
655 | { | ||
656 | u32 reg; | ||
657 | |||
658 | /* We treat the banked interrupts targets as read-only */ | ||
659 | if (offset < 32) { | ||
660 | u32 roreg = 1 << vcpu->vcpu_id; | ||
661 | roreg |= roreg << 8; | ||
662 | roreg |= roreg << 16; | ||
663 | |||
664 | vgic_reg_access(mmio, &roreg, offset, | ||
665 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | ||
666 | return false; | ||
667 | } | ||
668 | |||
669 | reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U); | ||
670 | vgic_reg_access(mmio, ®, offset, | ||
671 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | ||
672 | if (mmio->is_write) { | ||
673 | vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U); | ||
674 | vgic_update_state(vcpu->kvm); | ||
675 | return true; | ||
676 | } | ||
677 | |||
678 | return false; | ||
679 | } | ||
680 | |||
681 | static u32 vgic_cfg_expand(u16 val) | 523 | static u32 vgic_cfg_expand(u16 val) |
682 | { | 524 | { |
683 | u32 res = 0; | 525 | u32 res = 0; |
@@ -745,39 +587,6 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, | |||
745 | return false; | 587 | return false; |
746 | } | 588 | } |
747 | 589 | ||
748 | static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | ||
749 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
750 | { | ||
751 | u32 *reg; | ||
752 | |||
753 | reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, | ||
754 | vcpu->vcpu_id, offset >> 1); | ||
755 | |||
756 | return vgic_handle_cfg_reg(reg, mmio, offset); | ||
757 | } | ||
758 | |||
759 | static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | ||
760 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
761 | { | ||
762 | u32 reg; | ||
763 | vgic_reg_access(mmio, ®, offset, | ||
764 | ACCESS_READ_RAZ | ACCESS_WRITE_VALUE); | ||
765 | if (mmio->is_write) { | ||
766 | vgic_dispatch_sgi(vcpu, reg); | ||
767 | vgic_update_state(vcpu->kvm); | ||
768 | return true; | ||
769 | } | ||
770 | |||
771 | return false; | ||
772 | } | ||
773 | |||
774 | static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) | ||
775 | { | ||
776 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
777 | |||
778 | *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source; | ||
779 | } | ||
780 | |||
781 | /** | 590 | /** |
782 | * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor | 591 | * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor |
783 | * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs | 592 | * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs |
@@ -838,168 +647,6 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
838 | } | 647 | } |
839 | } | 648 | } |
840 | 649 | ||
841 | /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */ | ||
842 | static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | ||
843 | struct kvm_exit_mmio *mmio, | ||
844 | phys_addr_t offset) | ||
845 | { | ||
846 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
847 | int sgi; | ||
848 | int min_sgi = (offset & ~0x3); | ||
849 | int max_sgi = min_sgi + 3; | ||
850 | int vcpu_id = vcpu->vcpu_id; | ||
851 | u32 reg = 0; | ||
852 | |||
853 | /* Copy source SGIs from distributor side */ | ||
854 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | ||
855 | int shift = 8 * (sgi - min_sgi); | ||
856 | reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift; | ||
857 | } | ||
858 | |||
859 | mmio_data_write(mmio, ~0, reg); | ||
860 | return false; | ||
861 | } | ||
862 | |||
863 | static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | ||
864 | struct kvm_exit_mmio *mmio, | ||
865 | phys_addr_t offset, bool set) | ||
866 | { | ||
867 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
868 | int sgi; | ||
869 | int min_sgi = (offset & ~0x3); | ||
870 | int max_sgi = min_sgi + 3; | ||
871 | int vcpu_id = vcpu->vcpu_id; | ||
872 | u32 reg; | ||
873 | bool updated = false; | ||
874 | |||
875 | reg = mmio_data_read(mmio, ~0); | ||
876 | |||
877 | /* Clear pending SGIs on the distributor */ | ||
878 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | ||
879 | u8 mask = reg >> (8 * (sgi - min_sgi)); | ||
880 | u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi); | ||
881 | if (set) { | ||
882 | if ((*src & mask) != mask) | ||
883 | updated = true; | ||
884 | *src |= mask; | ||
885 | } else { | ||
886 | if (*src & mask) | ||
887 | updated = true; | ||
888 | *src &= ~mask; | ||
889 | } | ||
890 | } | ||
891 | |||
892 | if (updated) | ||
893 | vgic_update_state(vcpu->kvm); | ||
894 | |||
895 | return updated; | ||
896 | } | ||
897 | |||
898 | static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu, | ||
899 | struct kvm_exit_mmio *mmio, | ||
900 | phys_addr_t offset) | ||
901 | { | ||
902 | if (!mmio->is_write) | ||
903 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | ||
904 | else | ||
905 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true); | ||
906 | } | ||
907 | |||
908 | static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | ||
909 | struct kvm_exit_mmio *mmio, | ||
910 | phys_addr_t offset) | ||
911 | { | ||
912 | if (!mmio->is_write) | ||
913 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | ||
914 | else | ||
915 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); | ||
916 | } | ||
917 | |||
918 | static const struct kvm_mmio_range vgic_dist_ranges[] = { | ||
919 | { | ||
920 | .base = GIC_DIST_CTRL, | ||
921 | .len = 12, | ||
922 | .bits_per_irq = 0, | ||
923 | .handle_mmio = handle_mmio_misc, | ||
924 | }, | ||
925 | { | ||
926 | .base = GIC_DIST_IGROUP, | ||
927 | .len = VGIC_MAX_IRQS / 8, | ||
928 | .bits_per_irq = 1, | ||
929 | .handle_mmio = handle_mmio_raz_wi, | ||
930 | }, | ||
931 | { | ||
932 | .base = GIC_DIST_ENABLE_SET, | ||
933 | .len = VGIC_MAX_IRQS / 8, | ||
934 | .bits_per_irq = 1, | ||
935 | .handle_mmio = handle_mmio_set_enable_reg, | ||
936 | }, | ||
937 | { | ||
938 | .base = GIC_DIST_ENABLE_CLEAR, | ||
939 | .len = VGIC_MAX_IRQS / 8, | ||
940 | .bits_per_irq = 1, | ||
941 | .handle_mmio = handle_mmio_clear_enable_reg, | ||
942 | }, | ||
943 | { | ||
944 | .base = GIC_DIST_PENDING_SET, | ||
945 | .len = VGIC_MAX_IRQS / 8, | ||
946 | .bits_per_irq = 1, | ||
947 | .handle_mmio = handle_mmio_set_pending_reg, | ||
948 | }, | ||
949 | { | ||
950 | .base = GIC_DIST_PENDING_CLEAR, | ||
951 | .len = VGIC_MAX_IRQS / 8, | ||
952 | .bits_per_irq = 1, | ||
953 | .handle_mmio = handle_mmio_clear_pending_reg, | ||
954 | }, | ||
955 | { | ||
956 | .base = GIC_DIST_ACTIVE_SET, | ||
957 | .len = VGIC_MAX_IRQS / 8, | ||
958 | .bits_per_irq = 1, | ||
959 | .handle_mmio = handle_mmio_raz_wi, | ||
960 | }, | ||
961 | { | ||
962 | .base = GIC_DIST_ACTIVE_CLEAR, | ||
963 | .len = VGIC_MAX_IRQS / 8, | ||
964 | .bits_per_irq = 1, | ||
965 | .handle_mmio = handle_mmio_raz_wi, | ||
966 | }, | ||
967 | { | ||
968 | .base = GIC_DIST_PRI, | ||
969 | .len = VGIC_MAX_IRQS, | ||
970 | .bits_per_irq = 8, | ||
971 | .handle_mmio = handle_mmio_priority_reg, | ||
972 | }, | ||
973 | { | ||
974 | .base = GIC_DIST_TARGET, | ||
975 | .len = VGIC_MAX_IRQS, | ||
976 | .bits_per_irq = 8, | ||
977 | .handle_mmio = handle_mmio_target_reg, | ||
978 | }, | ||
979 | { | ||
980 | .base = GIC_DIST_CONFIG, | ||
981 | .len = VGIC_MAX_IRQS / 4, | ||
982 | .bits_per_irq = 2, | ||
983 | .handle_mmio = handle_mmio_cfg_reg, | ||
984 | }, | ||
985 | { | ||
986 | .base = GIC_DIST_SOFTINT, | ||
987 | .len = 4, | ||
988 | .handle_mmio = handle_mmio_sgi_reg, | ||
989 | }, | ||
990 | { | ||
991 | .base = GIC_DIST_SGI_PENDING_CLEAR, | ||
992 | .len = VGIC_NR_SGIS, | ||
993 | .handle_mmio = handle_mmio_sgi_clear, | ||
994 | }, | ||
995 | { | ||
996 | .base = GIC_DIST_SGI_PENDING_SET, | ||
997 | .len = VGIC_NR_SGIS, | ||
998 | .handle_mmio = handle_mmio_sgi_set, | ||
999 | }, | ||
1000 | {} | ||
1001 | }; | ||
1002 | |||
1003 | const | 650 | const |
1004 | struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, | 651 | struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, |
1005 | struct kvm_exit_mmio *mmio, | 652 | struct kvm_exit_mmio *mmio, |
@@ -1127,24 +774,6 @@ bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
1127 | return true; | 774 | return true; |
1128 | } | 775 | } |
1129 | 776 | ||
1130 | static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
1131 | struct kvm_exit_mmio *mmio) | ||
1132 | { | ||
1133 | unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base; | ||
1134 | |||
1135 | if (!is_in_range(mmio->phys_addr, mmio->len, base, | ||
1136 | KVM_VGIC_V2_DIST_SIZE)) | ||
1137 | return false; | ||
1138 | |||
1139 | /* GICv2 does not support accesses wider than 32 bits */ | ||
1140 | if (mmio->len > 4) { | ||
1141 | kvm_inject_dabt(vcpu, mmio->phys_addr); | ||
1142 | return true; | ||
1143 | } | ||
1144 | |||
1145 | return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base); | ||
1146 | } | ||
1147 | |||
1148 | /** | 777 | /** |
1149 | * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation | 778 | * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation |
1150 | * @vcpu: pointer to the vcpu performing the access | 779 | * @vcpu: pointer to the vcpu performing the access |
@@ -1169,52 +798,6 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
1169 | return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio); | 798 | return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio); |
1170 | } | 799 | } |
1171 | 800 | ||
1172 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi) | ||
1173 | { | ||
1174 | return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi; | ||
1175 | } | ||
1176 | |||
1177 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | ||
1178 | { | ||
1179 | struct kvm *kvm = vcpu->kvm; | ||
1180 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
1181 | int nrcpus = atomic_read(&kvm->online_vcpus); | ||
1182 | u8 target_cpus; | ||
1183 | int sgi, mode, c, vcpu_id; | ||
1184 | |||
1185 | vcpu_id = vcpu->vcpu_id; | ||
1186 | |||
1187 | sgi = reg & 0xf; | ||
1188 | target_cpus = (reg >> 16) & 0xff; | ||
1189 | mode = (reg >> 24) & 3; | ||
1190 | |||
1191 | switch (mode) { | ||
1192 | case 0: | ||
1193 | if (!target_cpus) | ||
1194 | return; | ||
1195 | break; | ||
1196 | |||
1197 | case 1: | ||
1198 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; | ||
1199 | break; | ||
1200 | |||
1201 | case 2: | ||
1202 | target_cpus = 1 << vcpu_id; | ||
1203 | break; | ||
1204 | } | ||
1205 | |||
1206 | kvm_for_each_vcpu(c, vcpu, kvm) { | ||
1207 | if (target_cpus & 1) { | ||
1208 | /* Flag the SGI as pending */ | ||
1209 | vgic_dist_irq_set_pending(vcpu, sgi); | ||
1210 | *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id; | ||
1211 | kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); | ||
1212 | } | ||
1213 | |||
1214 | target_cpus >>= 1; | ||
1215 | } | ||
1216 | } | ||
1217 | |||
1218 | static int vgic_nr_shared_irqs(struct vgic_dist *dist) | 801 | static int vgic_nr_shared_irqs(struct vgic_dist *dist) |
1219 | { | 802 | { |
1220 | return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; | 803 | return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; |
@@ -1368,6 +951,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
1368 | /* | 951 | /* |
1369 | * Queue an interrupt to a CPU virtual interface. Return true on success, | 952 | * Queue an interrupt to a CPU virtual interface. Return true on success, |
1370 | * or false if it wasn't possible to queue it. | 953 | * or false if it wasn't possible to queue it. |
954 | * sgi_source must be zero for any non-SGI interrupts. | ||
1371 | */ | 955 | */ |
1372 | bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | 956 | bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) |
1373 | { | 957 | { |
@@ -1418,37 +1002,6 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
1418 | return true; | 1002 | return true; |
1419 | } | 1003 | } |
1420 | 1004 | ||
1421 | static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq) | ||
1422 | { | ||
1423 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1424 | unsigned long sources; | ||
1425 | int vcpu_id = vcpu->vcpu_id; | ||
1426 | int c; | ||
1427 | |||
1428 | sources = *vgic_get_sgi_sources(dist, vcpu_id, irq); | ||
1429 | |||
1430 | for_each_set_bit(c, &sources, dist->nr_cpus) { | ||
1431 | if (vgic_queue_irq(vcpu, c, irq)) | ||
1432 | clear_bit(c, &sources); | ||
1433 | } | ||
1434 | |||
1435 | *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources; | ||
1436 | |||
1437 | /* | ||
1438 | * If the sources bitmap has been cleared it means that we | ||
1439 | * could queue all the SGIs onto link registers (see the | ||
1440 | * clear_bit above), and therefore we are done with them in | ||
1441 | * our emulated gic and can get rid of them. | ||
1442 | */ | ||
1443 | if (!sources) { | ||
1444 | vgic_dist_irq_clear_pending(vcpu, irq); | ||
1445 | vgic_cpu_irq_clear(vcpu, irq); | ||
1446 | return true; | ||
1447 | } | ||
1448 | |||
1449 | return false; | ||
1450 | } | ||
1451 | |||
1452 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) | 1005 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) |
1453 | { | 1006 | { |
1454 | if (!vgic_can_sample_irq(vcpu, irq)) | 1007 | if (!vgic_can_sample_irq(vcpu, irq)) |
@@ -1894,16 +1447,6 @@ void kvm_vgic_destroy(struct kvm *kvm) | |||
1894 | dist->nr_cpus = 0; | 1447 | dist->nr_cpus = 0; |
1895 | } | 1448 | } |
1896 | 1449 | ||
1897 | static int vgic_v2_init_model(struct kvm *kvm) | ||
1898 | { | ||
1899 | int i; | ||
1900 | |||
1901 | for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4) | ||
1902 | vgic_set_target_reg(kvm, 0, i); | ||
1903 | |||
1904 | return 0; | ||
1905 | } | ||
1906 | |||
1907 | /* | 1450 | /* |
1908 | * Allocate and initialize the various data structures. Must be called | 1451 | * Allocate and initialize the various data structures. Must be called |
1909 | * with kvm->lock held! | 1452 | * with kvm->lock held! |
@@ -1994,73 +1537,6 @@ out: | |||
1994 | return ret; | 1537 | return ret; |
1995 | } | 1538 | } |
1996 | 1539 | ||
1997 | /** | ||
1998 | * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs | ||
1999 | * @kvm: pointer to the kvm struct | ||
2000 | * | ||
2001 | * Map the virtual CPU interface into the VM before running any VCPUs. We | ||
2002 | * can't do this at creation time, because user space must first set the | ||
2003 | * virtual CPU interface address in the guest physical address space. | ||
2004 | */ | ||
2005 | static int vgic_v2_map_resources(struct kvm *kvm, | ||
2006 | const struct vgic_params *params) | ||
2007 | { | ||
2008 | int ret = 0; | ||
2009 | |||
2010 | if (!irqchip_in_kernel(kvm)) | ||
2011 | return 0; | ||
2012 | |||
2013 | mutex_lock(&kvm->lock); | ||
2014 | |||
2015 | if (vgic_ready(kvm)) | ||
2016 | goto out; | ||
2017 | |||
2018 | if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || | ||
2019 | IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) { | ||
2020 | kvm_err("Need to set vgic cpu and dist addresses first\n"); | ||
2021 | ret = -ENXIO; | ||
2022 | goto out; | ||
2023 | } | ||
2024 | |||
2025 | /* | ||
2026 | * Initialize the vgic if this hasn't already been done on demand by | ||
2027 | * accessing the vgic state from userspace. | ||
2028 | */ | ||
2029 | ret = vgic_init(kvm); | ||
2030 | if (ret) { | ||
2031 | kvm_err("Unable to allocate maps\n"); | ||
2032 | goto out; | ||
2033 | } | ||
2034 | |||
2035 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, | ||
2036 | params->vcpu_base, KVM_VGIC_V2_CPU_SIZE, | ||
2037 | true); | ||
2038 | if (ret) { | ||
2039 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); | ||
2040 | goto out; | ||
2041 | } | ||
2042 | |||
2043 | kvm->arch.vgic.ready = true; | ||
2044 | out: | ||
2045 | if (ret) | ||
2046 | kvm_vgic_destroy(kvm); | ||
2047 | mutex_unlock(&kvm->lock); | ||
2048 | return ret; | ||
2049 | } | ||
2050 | |||
2051 | void vgic_v2_init_emulation(struct kvm *kvm) | ||
2052 | { | ||
2053 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
2054 | |||
2055 | dist->vm_ops.handle_mmio = vgic_v2_handle_mmio; | ||
2056 | dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; | ||
2057 | dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; | ||
2058 | dist->vm_ops.init_model = vgic_v2_init_model; | ||
2059 | dist->vm_ops.map_resources = vgic_v2_map_resources; | ||
2060 | |||
2061 | kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS; | ||
2062 | } | ||
2063 | |||
2064 | static int init_vgic_model(struct kvm *kvm, int type) | 1540 | static int init_vgic_model(struct kvm *kvm, int type) |
2065 | { | 1541 | { |
2066 | switch (type) { | 1542 | switch (type) { |
@@ -2210,188 +1686,6 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | |||
2210 | return r; | 1686 | return r; |
2211 | } | 1687 | } |
2212 | 1688 | ||
2213 | static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, | ||
2214 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
2215 | { | ||
2216 | bool updated = false; | ||
2217 | struct vgic_vmcr vmcr; | ||
2218 | u32 *vmcr_field; | ||
2219 | u32 reg; | ||
2220 | |||
2221 | vgic_get_vmcr(vcpu, &vmcr); | ||
2222 | |||
2223 | switch (offset & ~0x3) { | ||
2224 | case GIC_CPU_CTRL: | ||
2225 | vmcr_field = &vmcr.ctlr; | ||
2226 | break; | ||
2227 | case GIC_CPU_PRIMASK: | ||
2228 | vmcr_field = &vmcr.pmr; | ||
2229 | break; | ||
2230 | case GIC_CPU_BINPOINT: | ||
2231 | vmcr_field = &vmcr.bpr; | ||
2232 | break; | ||
2233 | case GIC_CPU_ALIAS_BINPOINT: | ||
2234 | vmcr_field = &vmcr.abpr; | ||
2235 | break; | ||
2236 | default: | ||
2237 | BUG(); | ||
2238 | } | ||
2239 | |||
2240 | if (!mmio->is_write) { | ||
2241 | reg = *vmcr_field; | ||
2242 | mmio_data_write(mmio, ~0, reg); | ||
2243 | } else { | ||
2244 | reg = mmio_data_read(mmio, ~0); | ||
2245 | if (reg != *vmcr_field) { | ||
2246 | *vmcr_field = reg; | ||
2247 | vgic_set_vmcr(vcpu, &vmcr); | ||
2248 | updated = true; | ||
2249 | } | ||
2250 | } | ||
2251 | return updated; | ||
2252 | } | ||
2253 | |||
2254 | static bool handle_mmio_abpr(struct kvm_vcpu *vcpu, | ||
2255 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
2256 | { | ||
2257 | return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT); | ||
2258 | } | ||
2259 | |||
2260 | static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, | ||
2261 | struct kvm_exit_mmio *mmio, | ||
2262 | phys_addr_t offset) | ||
2263 | { | ||
2264 | u32 reg; | ||
2265 | |||
2266 | if (mmio->is_write) | ||
2267 | return false; | ||
2268 | |||
2269 | /* GICC_IIDR */ | ||
2270 | reg = (PRODUCT_ID_KVM << 20) | | ||
2271 | (GICC_ARCH_VERSION_V2 << 16) | | ||
2272 | (IMPLEMENTER_ARM << 0); | ||
2273 | mmio_data_write(mmio, ~0, reg); | ||
2274 | return false; | ||
2275 | } | ||
2276 | |||
2277 | /* | ||
2278 | * CPU Interface Register accesses - these are not accessed by the VM, but by | ||
2279 | * user space for saving and restoring VGIC state. | ||
2280 | */ | ||
2281 | static const struct kvm_mmio_range vgic_cpu_ranges[] = { | ||
2282 | { | ||
2283 | .base = GIC_CPU_CTRL, | ||
2284 | .len = 12, | ||
2285 | .handle_mmio = handle_cpu_mmio_misc, | ||
2286 | }, | ||
2287 | { | ||
2288 | .base = GIC_CPU_ALIAS_BINPOINT, | ||
2289 | .len = 4, | ||
2290 | .handle_mmio = handle_mmio_abpr, | ||
2291 | }, | ||
2292 | { | ||
2293 | .base = GIC_CPU_ACTIVEPRIO, | ||
2294 | .len = 16, | ||
2295 | .handle_mmio = handle_mmio_raz_wi, | ||
2296 | }, | ||
2297 | { | ||
2298 | .base = GIC_CPU_IDENT, | ||
2299 | .len = 4, | ||
2300 | .handle_mmio = handle_cpu_mmio_ident, | ||
2301 | }, | ||
2302 | }; | ||
2303 | |||
2304 | static int vgic_attr_regs_access(struct kvm_device *dev, | ||
2305 | struct kvm_device_attr *attr, | ||
2306 | u32 *reg, bool is_write) | ||
2307 | { | ||
2308 | const struct kvm_mmio_range *r = NULL, *ranges; | ||
2309 | phys_addr_t offset; | ||
2310 | int ret, cpuid, c; | ||
2311 | struct kvm_vcpu *vcpu, *tmp_vcpu; | ||
2312 | struct vgic_dist *vgic; | ||
2313 | struct kvm_exit_mmio mmio; | ||
2314 | |||
2315 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
2316 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> | ||
2317 | KVM_DEV_ARM_VGIC_CPUID_SHIFT; | ||
2318 | |||
2319 | mutex_lock(&dev->kvm->lock); | ||
2320 | |||
2321 | ret = vgic_init(dev->kvm); | ||
2322 | if (ret) | ||
2323 | goto out; | ||
2324 | |||
2325 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { | ||
2326 | ret = -EINVAL; | ||
2327 | goto out; | ||
2328 | } | ||
2329 | |||
2330 | vcpu = kvm_get_vcpu(dev->kvm, cpuid); | ||
2331 | vgic = &dev->kvm->arch.vgic; | ||
2332 | |||
2333 | mmio.len = 4; | ||
2334 | mmio.is_write = is_write; | ||
2335 | if (is_write) | ||
2336 | mmio_data_write(&mmio, ~0, *reg); | ||
2337 | switch (attr->group) { | ||
2338 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
2339 | mmio.phys_addr = vgic->vgic_dist_base + offset; | ||
2340 | ranges = vgic_dist_ranges; | ||
2341 | break; | ||
2342 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | ||
2343 | mmio.phys_addr = vgic->vgic_cpu_base + offset; | ||
2344 | ranges = vgic_cpu_ranges; | ||
2345 | break; | ||
2346 | default: | ||
2347 | BUG(); | ||
2348 | } | ||
2349 | r = vgic_find_range(ranges, &mmio, offset); | ||
2350 | |||
2351 | if (unlikely(!r || !r->handle_mmio)) { | ||
2352 | ret = -ENXIO; | ||
2353 | goto out; | ||
2354 | } | ||
2355 | |||
2356 | |||
2357 | spin_lock(&vgic->lock); | ||
2358 | |||
2359 | /* | ||
2360 | * Ensure that no other VCPU is running by checking the vcpu->cpu | ||
2361 | * field. If no other VPCUs are running we can safely access the VGIC | ||
2362 | * state, because even if another VPU is run after this point, that | ||
2363 | * VCPU will not touch the vgic state, because it will block on | ||
2364 | * getting the vgic->lock in kvm_vgic_sync_hwstate(). | ||
2365 | */ | ||
2366 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) { | ||
2367 | if (unlikely(tmp_vcpu->cpu != -1)) { | ||
2368 | ret = -EBUSY; | ||
2369 | goto out_vgic_unlock; | ||
2370 | } | ||
2371 | } | ||
2372 | |||
2373 | /* | ||
2374 | * Move all pending IRQs from the LRs on all VCPUs so the pending | ||
2375 | * state can be properly represented in the register state accessible | ||
2376 | * through this API. | ||
2377 | */ | ||
2378 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) | ||
2379 | vgic_unqueue_irqs(tmp_vcpu); | ||
2380 | |||
2381 | offset -= r->base; | ||
2382 | r->handle_mmio(vcpu, &mmio, offset); | ||
2383 | |||
2384 | if (!is_write) | ||
2385 | *reg = mmio_data_read(&mmio, ~0); | ||
2386 | |||
2387 | ret = 0; | ||
2388 | out_vgic_unlock: | ||
2389 | spin_unlock(&vgic->lock); | ||
2390 | out: | ||
2391 | mutex_unlock(&dev->kvm->lock); | ||
2392 | return ret; | ||
2393 | } | ||
2394 | |||
2395 | int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | 1689 | int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
2396 | { | 1690 | { |
2397 | int r; | 1691 | int r; |
@@ -2451,31 +1745,6 @@ int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
2451 | return -ENXIO; | 1745 | return -ENXIO; |
2452 | } | 1746 | } |
2453 | 1747 | ||
2454 | static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
2455 | { | ||
2456 | int ret; | ||
2457 | |||
2458 | ret = vgic_set_common_attr(dev, attr); | ||
2459 | if (ret != -ENXIO) | ||
2460 | return ret; | ||
2461 | |||
2462 | switch (attr->group) { | ||
2463 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
2464 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | ||
2465 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
2466 | u32 reg; | ||
2467 | |||
2468 | if (get_user(reg, uaddr)) | ||
2469 | return -EFAULT; | ||
2470 | |||
2471 | return vgic_attr_regs_access(dev, attr, ®, true); | ||
2472 | } | ||
2473 | |||
2474 | } | ||
2475 | |||
2476 | return -ENXIO; | ||
2477 | } | ||
2478 | |||
2479 | int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | 1748 | int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
2480 | { | 1749 | { |
2481 | int r = -ENXIO; | 1750 | int r = -ENXIO; |
@@ -2506,31 +1775,6 @@ int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
2506 | return r; | 1775 | return r; |
2507 | } | 1776 | } |
2508 | 1777 | ||
2509 | static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
2510 | { | ||
2511 | int ret; | ||
2512 | |||
2513 | ret = vgic_get_common_attr(dev, attr); | ||
2514 | if (ret != -ENXIO) | ||
2515 | return ret; | ||
2516 | |||
2517 | switch (attr->group) { | ||
2518 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
2519 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | ||
2520 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
2521 | u32 reg = 0; | ||
2522 | |||
2523 | ret = vgic_attr_regs_access(dev, attr, ®, false); | ||
2524 | if (ret) | ||
2525 | return ret; | ||
2526 | return put_user(reg, uaddr); | ||
2527 | } | ||
2528 | |||
2529 | } | ||
2530 | |||
2531 | return -ENXIO; | ||
2532 | } | ||
2533 | |||
2534 | int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset) | 1778 | int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset) |
2535 | { | 1779 | { |
2536 | struct kvm_exit_mmio dev_attr_mmio; | 1780 | struct kvm_exit_mmio dev_attr_mmio; |
@@ -2542,54 +1786,6 @@ int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset) | |||
2542 | return -ENXIO; | 1786 | return -ENXIO; |
2543 | } | 1787 | } |
2544 | 1788 | ||
2545 | static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
2546 | { | ||
2547 | phys_addr_t offset; | ||
2548 | |||
2549 | switch (attr->group) { | ||
2550 | case KVM_DEV_ARM_VGIC_GRP_ADDR: | ||
2551 | switch (attr->attr) { | ||
2552 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | ||
2553 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | ||
2554 | return 0; | ||
2555 | } | ||
2556 | break; | ||
2557 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
2558 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
2559 | return vgic_has_attr_regs(vgic_dist_ranges, offset); | ||
2560 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | ||
2561 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
2562 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); | ||
2563 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: | ||
2564 | return 0; | ||
2565 | case KVM_DEV_ARM_VGIC_GRP_CTRL: | ||
2566 | switch (attr->attr) { | ||
2567 | case KVM_DEV_ARM_VGIC_CTRL_INIT: | ||
2568 | return 0; | ||
2569 | } | ||
2570 | } | ||
2571 | return -ENXIO; | ||
2572 | } | ||
2573 | |||
2574 | void vgic_destroy(struct kvm_device *dev) | ||
2575 | { | ||
2576 | kfree(dev); | ||
2577 | } | ||
2578 | |||
2579 | int vgic_create(struct kvm_device *dev, u32 type) | ||
2580 | { | ||
2581 | return kvm_vgic_create(dev->kvm, type); | ||
2582 | } | ||
2583 | |||
2584 | struct kvm_device_ops kvm_arm_vgic_v2_ops = { | ||
2585 | .name = "kvm-arm-vgic", | ||
2586 | .create = vgic_create, | ||
2587 | .destroy = vgic_destroy, | ||
2588 | .set_attr = vgic_set_attr, | ||
2589 | .get_attr = vgic_get_attr, | ||
2590 | .has_attr = vgic_has_attr, | ||
2591 | }; | ||
2592 | |||
2593 | static void vgic_init_maintenance_interrupt(void *info) | 1789 | static void vgic_init_maintenance_interrupt(void *info) |
2594 | { | 1790 | { |
2595 | enable_percpu_irq(vgic->maint_irq, 0); | 1791 | enable_percpu_irq(vgic->maint_irq, 0); |