aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/vgic.c
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-01-21 19:36:14 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2013-02-11 13:59:15 -0500
commitb47ef92af8efc30f4fbdeac041397df01b7134af (patch)
tree41a1298f2b5d2a0d34f50334afae800c72427e39 /arch/arm/kvm/vgic.c
parent330690cdceba06b60afcfe50a65f72fab7f4f970 (diff)
ARM: KVM: VGIC distributor handling
Add the GIC distributor emulation code. A number of the GIC features are simply ignored as they are not required to boot a Linux guest. Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm/kvm/vgic.c')
-rw-r--r--arch/arm/kvm/vgic.c596
1 files changed, 595 insertions, 1 deletions
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index b333b58de4cb..815069f22e8b 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -22,6 +22,43 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <asm/kvm_emulate.h> 23#include <asm/kvm_emulate.h>
24 24
25/*
26 * How the whole thing works (courtesy of Christoffer Dall):
27 *
28 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
29 * something is pending
30 * - VGIC pending interrupts are stored on the vgic.irq_state vgic
31 * bitmap (this bitmap is updated by both user land ioctls and guest
32 * mmio ops, and other in-kernel peripherals such as the
33 * arch. timers) and indicate the 'wire' state.
34 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
35 * recalculated
36 * - To calculate the oracle, we need info for each cpu from
37 * compute_pending_for_cpu, which considers:
38 * - PPI: dist->irq_state & dist->irq_enable
39 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
40 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
41 * registers, stored on each vcpu. We only keep one bit of
42 * information per interrupt, making sure that only one vcpu can
43 * accept the interrupt.
44 * - The same is true when injecting an interrupt, except that we only
45 * consider a single interrupt at a time. The irq_spi_cpu array
46 * contains the target CPU for each SPI.
47 *
48 * The handling of level interrupts adds some extra complexity. We
49 * need to track when the interrupt has been EOIed, so we can sample
50 * the 'line' again. This is achieved as such:
51 *
52 * - When a level interrupt is moved onto a vcpu, the corresponding
53 * bit in irq_active is set. As long as this bit is set, the line
54 * will be ignored for further interrupts. The interrupt is injected
55 * into the vcpu with the GICH_LR_EOI bit set (generate a
56 * maintenance interrupt on EOI).
57 * - When the interrupt is EOIed, the maintenance interrupt fires,
58 * and clears the corresponding bit in irq_active. This allow the
59 * interrupt line to be sampled again.
60 */
61
25#define VGIC_ADDR_UNDEF (-1) 62#define VGIC_ADDR_UNDEF (-1)
26#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) 63#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
27 64
@@ -34,6 +71,119 @@
34#define ACCESS_WRITE_VALUE (3 << 1) 71#define ACCESS_WRITE_VALUE (3 << 1)
35#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) 72#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
36 73
74static void vgic_update_state(struct kvm *kvm);
75static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
76
77static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
78 int cpuid, u32 offset)
79{
80 offset >>= 2;
81 if (!offset)
82 return x->percpu[cpuid].reg;
83 else
84 return x->shared.reg + offset - 1;
85}
86
87static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
88 int cpuid, int irq)
89{
90 if (irq < VGIC_NR_PRIVATE_IRQS)
91 return test_bit(irq, x->percpu[cpuid].reg_ul);
92
93 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
94}
95
96static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
97 int irq, int val)
98{
99 unsigned long *reg;
100
101 if (irq < VGIC_NR_PRIVATE_IRQS) {
102 reg = x->percpu[cpuid].reg_ul;
103 } else {
104 reg = x->shared.reg_ul;
105 irq -= VGIC_NR_PRIVATE_IRQS;
106 }
107
108 if (val)
109 set_bit(irq, reg);
110 else
111 clear_bit(irq, reg);
112}
113
114static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
115{
116 if (unlikely(cpuid >= VGIC_MAX_CPUS))
117 return NULL;
118 return x->percpu[cpuid].reg_ul;
119}
120
121static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
122{
123 return x->shared.reg_ul;
124}
125
126static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
127{
128 offset >>= 2;
129 BUG_ON(offset > (VGIC_NR_IRQS / 4));
130 if (offset < 4)
131 return x->percpu[cpuid] + offset;
132 else
133 return x->shared + offset - 8;
134}
135
136#define VGIC_CFG_LEVEL 0
137#define VGIC_CFG_EDGE 1
138
139static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
140{
141 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
142 int irq_val;
143
144 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
145 return irq_val == VGIC_CFG_EDGE;
146}
147
148static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
149{
150 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
151
152 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
153}
154
155static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
156{
157 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
158
159 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
160}
161
162static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
163{
164 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
165
166 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
167}
168
169static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
170{
171 if (irq < VGIC_NR_PRIVATE_IRQS)
172 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
173 else
174 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
175 vcpu->arch.vgic_cpu.pending_shared);
176}
177
178static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
179{
180 if (irq < VGIC_NR_PRIVATE_IRQS)
181 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
182 else
183 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
184 vcpu->arch.vgic_cpu.pending_shared);
185}
186
37static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) 187static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
38{ 188{
39 return *((u32 *)mmio->data) & mask; 189 return *((u32 *)mmio->data) & mask;
@@ -105,6 +255,291 @@ static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
105 } 255 }
106} 256}
107 257
258static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
259 struct kvm_exit_mmio *mmio, phys_addr_t offset)
260{
261 u32 reg;
262 u32 word_offset = offset & 3;
263
264 switch (offset & ~3) {
265 case 0: /* CTLR */
266 reg = vcpu->kvm->arch.vgic.enabled;
267 vgic_reg_access(mmio, &reg, word_offset,
268 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
269 if (mmio->is_write) {
270 vcpu->kvm->arch.vgic.enabled = reg & 1;
271 vgic_update_state(vcpu->kvm);
272 return true;
273 }
274 break;
275
276 case 4: /* TYPER */
277 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
278 reg |= (VGIC_NR_IRQS >> 5) - 1;
279 vgic_reg_access(mmio, &reg, word_offset,
280 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
281 break;
282
283 case 8: /* IIDR */
284 reg = 0x4B00043B;
285 vgic_reg_access(mmio, &reg, word_offset,
286 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
287 break;
288 }
289
290 return false;
291}
292
293static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
294 struct kvm_exit_mmio *mmio, phys_addr_t offset)
295{
296 vgic_reg_access(mmio, NULL, offset,
297 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
298 return false;
299}
300
301static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
302 struct kvm_exit_mmio *mmio,
303 phys_addr_t offset)
304{
305 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
306 vcpu->vcpu_id, offset);
307 vgic_reg_access(mmio, reg, offset,
308 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
309 if (mmio->is_write) {
310 vgic_update_state(vcpu->kvm);
311 return true;
312 }
313
314 return false;
315}
316
317static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
318 struct kvm_exit_mmio *mmio,
319 phys_addr_t offset)
320{
321 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
322 vcpu->vcpu_id, offset);
323 vgic_reg_access(mmio, reg, offset,
324 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
325 if (mmio->is_write) {
326 if (offset < 4) /* Force SGI enabled */
327 *reg |= 0xffff;
328 vgic_update_state(vcpu->kvm);
329 return true;
330 }
331
332 return false;
333}
334
335static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
336 struct kvm_exit_mmio *mmio,
337 phys_addr_t offset)
338{
339 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
340 vcpu->vcpu_id, offset);
341 vgic_reg_access(mmio, reg, offset,
342 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
343 if (mmio->is_write) {
344 vgic_update_state(vcpu->kvm);
345 return true;
346 }
347
348 return false;
349}
350
351static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
352 struct kvm_exit_mmio *mmio,
353 phys_addr_t offset)
354{
355 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
356 vcpu->vcpu_id, offset);
357 vgic_reg_access(mmio, reg, offset,
358 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
359 if (mmio->is_write) {
360 vgic_update_state(vcpu->kvm);
361 return true;
362 }
363
364 return false;
365}
366
367static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
368 struct kvm_exit_mmio *mmio,
369 phys_addr_t offset)
370{
371 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
372 vcpu->vcpu_id, offset);
373 vgic_reg_access(mmio, reg, offset,
374 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
375 return false;
376}
377
378#define GICD_ITARGETSR_SIZE 32
379#define GICD_CPUTARGETS_BITS 8
380#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
381static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
382{
383 struct vgic_dist *dist = &kvm->arch.vgic;
384 struct kvm_vcpu *vcpu;
385 int i, c;
386 unsigned long *bmap;
387 u32 val = 0;
388
389 irq -= VGIC_NR_PRIVATE_IRQS;
390
391 kvm_for_each_vcpu(c, vcpu, kvm) {
392 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
393 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
394 if (test_bit(irq + i, bmap))
395 val |= 1 << (c + i * 8);
396 }
397
398 return val;
399}
400
401static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
402{
403 struct vgic_dist *dist = &kvm->arch.vgic;
404 struct kvm_vcpu *vcpu;
405 int i, c;
406 unsigned long *bmap;
407 u32 target;
408
409 irq -= VGIC_NR_PRIVATE_IRQS;
410
411 /*
412 * Pick the LSB in each byte. This ensures we target exactly
413 * one vcpu per IRQ. If the byte is null, assume we target
414 * CPU0.
415 */
416 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
417 int shift = i * GICD_CPUTARGETS_BITS;
418 target = ffs((val >> shift) & 0xffU);
419 target = target ? (target - 1) : 0;
420 dist->irq_spi_cpu[irq + i] = target;
421 kvm_for_each_vcpu(c, vcpu, kvm) {
422 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
423 if (c == target)
424 set_bit(irq + i, bmap);
425 else
426 clear_bit(irq + i, bmap);
427 }
428 }
429}
430
431static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
432 struct kvm_exit_mmio *mmio,
433 phys_addr_t offset)
434{
435 u32 reg;
436
437 /* We treat the banked interrupts targets as read-only */
438 if (offset < 32) {
439 u32 roreg = 1 << vcpu->vcpu_id;
440 roreg |= roreg << 8;
441 roreg |= roreg << 16;
442
443 vgic_reg_access(mmio, &roreg, offset,
444 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
445 return false;
446 }
447
448 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
449 vgic_reg_access(mmio, &reg, offset,
450 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
451 if (mmio->is_write) {
452 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
453 vgic_update_state(vcpu->kvm);
454 return true;
455 }
456
457 return false;
458}
459
460static u32 vgic_cfg_expand(u16 val)
461{
462 u32 res = 0;
463 int i;
464
465 /*
466 * Turn a 16bit value like abcd...mnop into a 32bit word
467 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
468 */
469 for (i = 0; i < 16; i++)
470 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
471
472 return res;
473}
474
475static u16 vgic_cfg_compress(u32 val)
476{
477 u16 res = 0;
478 int i;
479
480 /*
481 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
482 * abcd...mnop which is what we really care about.
483 */
484 for (i = 0; i < 16; i++)
485 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
486
487 return res;
488}
489
490/*
491 * The distributor uses 2 bits per IRQ for the CFG register, but the
492 * LSB is always 0. As such, we only keep the upper bit, and use the
493 * two above functions to compress/expand the bits
494 */
495static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
496 struct kvm_exit_mmio *mmio, phys_addr_t offset)
497{
498 u32 val;
499 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
500 vcpu->vcpu_id, offset >> 1);
501 if (offset & 2)
502 val = *reg >> 16;
503 else
504 val = *reg & 0xffff;
505
506 val = vgic_cfg_expand(val);
507 vgic_reg_access(mmio, &val, offset,
508 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
509 if (mmio->is_write) {
510 if (offset < 4) {
511 *reg = ~0U; /* Force PPIs/SGIs to 1 */
512 return false;
513 }
514
515 val = vgic_cfg_compress(val);
516 if (offset & 2) {
517 *reg &= 0xffff;
518 *reg |= val << 16;
519 } else {
520 *reg &= 0xffff << 16;
521 *reg |= val;
522 }
523 }
524
525 return false;
526}
527
528static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
529 struct kvm_exit_mmio *mmio, phys_addr_t offset)
530{
531 u32 reg;
532 vgic_reg_access(mmio, &reg, offset,
533 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
534 if (mmio->is_write) {
535 vgic_dispatch_sgi(vcpu, reg);
536 vgic_update_state(vcpu->kvm);
537 return true;
538 }
539
540 return false;
541}
542
108/* 543/*
109 * I would have liked to use the kvm_bus_io_*() API instead, but it 544 * I would have liked to use the kvm_bus_io_*() API instead, but it
110 * cannot cope with banked registers (only the VM pointer is passed 545 * cannot cope with banked registers (only the VM pointer is passed
@@ -119,6 +554,66 @@ struct mmio_range {
119}; 554};
120 555
121static const struct mmio_range vgic_ranges[] = { 556static const struct mmio_range vgic_ranges[] = {
557 {
558 .base = GIC_DIST_CTRL,
559 .len = 12,
560 .handle_mmio = handle_mmio_misc,
561 },
562 {
563 .base = GIC_DIST_IGROUP,
564 .len = VGIC_NR_IRQS / 8,
565 .handle_mmio = handle_mmio_raz_wi,
566 },
567 {
568 .base = GIC_DIST_ENABLE_SET,
569 .len = VGIC_NR_IRQS / 8,
570 .handle_mmio = handle_mmio_set_enable_reg,
571 },
572 {
573 .base = GIC_DIST_ENABLE_CLEAR,
574 .len = VGIC_NR_IRQS / 8,
575 .handle_mmio = handle_mmio_clear_enable_reg,
576 },
577 {
578 .base = GIC_DIST_PENDING_SET,
579 .len = VGIC_NR_IRQS / 8,
580 .handle_mmio = handle_mmio_set_pending_reg,
581 },
582 {
583 .base = GIC_DIST_PENDING_CLEAR,
584 .len = VGIC_NR_IRQS / 8,
585 .handle_mmio = handle_mmio_clear_pending_reg,
586 },
587 {
588 .base = GIC_DIST_ACTIVE_SET,
589 .len = VGIC_NR_IRQS / 8,
590 .handle_mmio = handle_mmio_raz_wi,
591 },
592 {
593 .base = GIC_DIST_ACTIVE_CLEAR,
594 .len = VGIC_NR_IRQS / 8,
595 .handle_mmio = handle_mmio_raz_wi,
596 },
597 {
598 .base = GIC_DIST_PRI,
599 .len = VGIC_NR_IRQS,
600 .handle_mmio = handle_mmio_priority_reg,
601 },
602 {
603 .base = GIC_DIST_TARGET,
604 .len = VGIC_NR_IRQS,
605 .handle_mmio = handle_mmio_target_reg,
606 },
607 {
608 .base = GIC_DIST_CONFIG,
609 .len = VGIC_NR_IRQS / 4,
610 .handle_mmio = handle_mmio_cfg_reg,
611 },
612 {
613 .base = GIC_DIST_SOFTINT,
614 .len = 4,
615 .handle_mmio = handle_mmio_sgi_reg,
616 },
122 {} 617 {}
123}; 618};
124 619
@@ -152,7 +647,106 @@ struct mmio_range *find_matching_range(const struct mmio_range *ranges,
152bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, 647bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
153 struct kvm_exit_mmio *mmio) 648 struct kvm_exit_mmio *mmio)
154{ 649{
155 return KVM_EXIT_MMIO; 650 const struct mmio_range *range;
651 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
652 unsigned long base = dist->vgic_dist_base;
653 bool updated_state;
654 unsigned long offset;
655
656 if (!irqchip_in_kernel(vcpu->kvm) ||
657 mmio->phys_addr < base ||
658 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
659 return false;
660
661 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
662 if (mmio->len > 4) {
663 kvm_inject_dabt(vcpu, mmio->phys_addr);
664 return true;
665 }
666
667 range = find_matching_range(vgic_ranges, mmio, base);
668 if (unlikely(!range || !range->handle_mmio)) {
669 pr_warn("Unhandled access %d %08llx %d\n",
670 mmio->is_write, mmio->phys_addr, mmio->len);
671 return false;
672 }
673
674 spin_lock(&vcpu->kvm->arch.vgic.lock);
675 offset = mmio->phys_addr - range->base - base;
676 updated_state = range->handle_mmio(vcpu, mmio, offset);
677 spin_unlock(&vcpu->kvm->arch.vgic.lock);
678 kvm_prepare_mmio(run, mmio);
679 kvm_handle_mmio_return(vcpu, run);
680
681 return true;
682}
683
684static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
685{
686 struct kvm *kvm = vcpu->kvm;
687 struct vgic_dist *dist = &kvm->arch.vgic;
688 int nrcpus = atomic_read(&kvm->online_vcpus);
689 u8 target_cpus;
690 int sgi, mode, c, vcpu_id;
691
692 vcpu_id = vcpu->vcpu_id;
693
694 sgi = reg & 0xf;
695 target_cpus = (reg >> 16) & 0xff;
696 mode = (reg >> 24) & 3;
697
698 switch (mode) {
699 case 0:
700 if (!target_cpus)
701 return;
702
703 case 1:
704 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
705 break;
706
707 case 2:
708 target_cpus = 1 << vcpu_id;
709 break;
710 }
711
712 kvm_for_each_vcpu(c, vcpu, kvm) {
713 if (target_cpus & 1) {
714 /* Flag the SGI as pending */
715 vgic_dist_irq_set(vcpu, sgi);
716 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
717 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
718 }
719
720 target_cpus >>= 1;
721 }
722}
723
724static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
725{
726 return 0;
727}
728
729/*
730 * Update the interrupt state and determine which CPUs have pending
731 * interrupts. Must be called with distributor lock held.
732 */
733static void vgic_update_state(struct kvm *kvm)
734{
735 struct vgic_dist *dist = &kvm->arch.vgic;
736 struct kvm_vcpu *vcpu;
737 int c;
738
739 if (!dist->enabled) {
740 set_bit(0, &dist->irq_pending_on_cpu);
741 return;
742 }
743
744 kvm_for_each_vcpu(c, vcpu, kvm) {
745 if (compute_pending_for_cpu(vcpu)) {
746 pr_debug("CPU%d has pending interrupts\n", c);
747 set_bit(c, &dist->irq_pending_on_cpu);
748 }
749 }
156} 750}
157 751
158static bool vgic_ioaddr_overlap(struct kvm *kvm) 752static bool vgic_ioaddr_overlap(struct kvm *kvm)