summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-04-07 12:09:20 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-04-07 12:09:20 -0400
commitbf0fb67cf957fc8ecfaaa2819b7d6a0f795e2ef2 (patch)
tree22697f7deae781dbbacd2e19a5030df2e8551e6a /virt
parent8999602d08a804ae9cb271fdd5378f910058112d (diff)
parentd44758c0dfc5993a4b9952935a7eae4c91ebb6b4 (diff)
Merge tag 'kvm-arm-for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into 'kvm-next'
KVM/ARM changes for v4.1: - fixes for live migration - irqfd support - kvm-io-bus & vgic rework to enable ioeventfd - page ageing for stage-2 translation - various cleanups
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arch_timer.c45
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c71
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c246
-rw-r--r--virt/kvm/arm/vgic.c479
-rw-r--r--virt/kvm/arm/vgic.h37
-rw-r--r--virt/kvm/coalesced_mmio.c7
-rw-r--r--virt/kvm/eventfd.c9
-rw-r--r--virt/kvm/iodev.h70
-rw-r--r--virt/kvm/kvm_main.c34
9 files changed, 609 insertions, 389 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 6e54f3542126..98c95f2fcba4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -85,13 +85,22 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
85 return IRQ_HANDLED; 85 return IRQ_HANDLED;
86} 86}
87 87
88/*
89 * Work function for handling the backup timer that we schedule when a vcpu is
90 * no longer running, but had a timer programmed to fire in the future.
91 */
88static void kvm_timer_inject_irq_work(struct work_struct *work) 92static void kvm_timer_inject_irq_work(struct work_struct *work)
89{ 93{
90 struct kvm_vcpu *vcpu; 94 struct kvm_vcpu *vcpu;
91 95
92 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); 96 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
93 vcpu->arch.timer_cpu.armed = false; 97 vcpu->arch.timer_cpu.armed = false;
94 kvm_timer_inject_irq(vcpu); 98
99 /*
100 * If the vcpu is blocked we want to wake it up so that it will see
101 * the timer has expired when entering the guest.
102 */
103 kvm_vcpu_kick(vcpu);
95} 104}
96 105
97static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) 106static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
@@ -102,6 +111,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
102 return HRTIMER_NORESTART; 111 return HRTIMER_NORESTART;
103} 112}
104 113
114bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
115{
116 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
117 cycle_t cval, now;
118
119 if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
120 !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
121 return false;
122
123 cval = timer->cntv_cval;
124 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
125
126 return cval <= now;
127}
128
105/** 129/**
106 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu 130 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
107 * @vcpu: The vcpu pointer 131 * @vcpu: The vcpu pointer
@@ -119,6 +143,13 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
119 * populate the CPU timer again. 143 * populate the CPU timer again.
120 */ 144 */
121 timer_disarm(timer); 145 timer_disarm(timer);
146
147 /*
148 * If the timer expired while we were not scheduled, now is the time
149 * to inject it.
150 */
151 if (kvm_timer_should_fire(vcpu))
152 kvm_timer_inject_irq(vcpu);
122} 153}
123 154
124/** 155/**
@@ -134,16 +165,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
134 cycle_t cval, now; 165 cycle_t cval, now;
135 u64 ns; 166 u64 ns;
136 167
137 if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
138 !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
139 return;
140
141 cval = timer->cntv_cval;
142 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
143
144 BUG_ON(timer_is_armed(timer)); 168 BUG_ON(timer_is_armed(timer));
145 169
146 if (cval <= now) { 170 if (kvm_timer_should_fire(vcpu)) {
147 /* 171 /*
148 * Timer has already expired while we were not 172 * Timer has already expired while we were not
149 * looking. Inject the interrupt and carry on. 173 * looking. Inject the interrupt and carry on.
@@ -152,6 +176,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
152 return; 176 return;
153 } 177 }
154 178
179 cval = timer->cntv_cval;
180 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
181
155 ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask, 182 ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
156 &timecounter->frac); 183 &timecounter->frac);
157 timer_arm(timer, ns); 184 timer_arm(timer, ns);
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index 19c6210f02cf..13907970d11c 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -107,6 +107,22 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
107 vcpu->vcpu_id); 107 vcpu->vcpu_id);
108} 108}
109 109
110static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio,
112 phys_addr_t offset)
113{
114 return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
115 vcpu->vcpu_id);
116}
117
118static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
119 struct kvm_exit_mmio *mmio,
120 phys_addr_t offset)
121{
122 return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
123 vcpu->vcpu_id);
124}
125
110static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, 126static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio, 127 struct kvm_exit_mmio *mmio,
112 phys_addr_t offset) 128 phys_addr_t offset)
@@ -303,7 +319,7 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
303 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); 319 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
304} 320}
305 321
306static const struct kvm_mmio_range vgic_dist_ranges[] = { 322static const struct vgic_io_range vgic_dist_ranges[] = {
307 { 323 {
308 .base = GIC_DIST_CTRL, 324 .base = GIC_DIST_CTRL,
309 .len = 12, 325 .len = 12,
@@ -344,13 +360,13 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
344 .base = GIC_DIST_ACTIVE_SET, 360 .base = GIC_DIST_ACTIVE_SET,
345 .len = VGIC_MAX_IRQS / 8, 361 .len = VGIC_MAX_IRQS / 8,
346 .bits_per_irq = 1, 362 .bits_per_irq = 1,
347 .handle_mmio = handle_mmio_raz_wi, 363 .handle_mmio = handle_mmio_set_active_reg,
348 }, 364 },
349 { 365 {
350 .base = GIC_DIST_ACTIVE_CLEAR, 366 .base = GIC_DIST_ACTIVE_CLEAR,
351 .len = VGIC_MAX_IRQS / 8, 367 .len = VGIC_MAX_IRQS / 8,
352 .bits_per_irq = 1, 368 .bits_per_irq = 1,
353 .handle_mmio = handle_mmio_raz_wi, 369 .handle_mmio = handle_mmio_clear_active_reg,
354 }, 370 },
355 { 371 {
356 .base = GIC_DIST_PRI, 372 .base = GIC_DIST_PRI,
@@ -388,24 +404,6 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
388 {} 404 {}
389}; 405};
390 406
391static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
392 struct kvm_exit_mmio *mmio)
393{
394 unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
395
396 if (!is_in_range(mmio->phys_addr, mmio->len, base,
397 KVM_VGIC_V2_DIST_SIZE))
398 return false;
399
400 /* GICv2 does not support accesses wider than 32 bits */
401 if (mmio->len > 4) {
402 kvm_inject_dabt(vcpu, mmio->phys_addr);
403 return true;
404 }
405
406 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
407}
408
409static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) 407static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
410{ 408{
411 struct kvm *kvm = vcpu->kvm; 409 struct kvm *kvm = vcpu->kvm;
@@ -490,6 +488,7 @@ static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
490static int vgic_v2_map_resources(struct kvm *kvm, 488static int vgic_v2_map_resources(struct kvm *kvm,
491 const struct vgic_params *params) 489 const struct vgic_params *params)
492{ 490{
491 struct vgic_dist *dist = &kvm->arch.vgic;
493 int ret = 0; 492 int ret = 0;
494 493
495 if (!irqchip_in_kernel(kvm)) 494 if (!irqchip_in_kernel(kvm))
@@ -500,13 +499,17 @@ static int vgic_v2_map_resources(struct kvm *kvm,
500 if (vgic_ready(kvm)) 499 if (vgic_ready(kvm))
501 goto out; 500 goto out;
502 501
503 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || 502 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
504 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) { 503 IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
505 kvm_err("Need to set vgic cpu and dist addresses first\n"); 504 kvm_err("Need to set vgic cpu and dist addresses first\n");
506 ret = -ENXIO; 505 ret = -ENXIO;
507 goto out; 506 goto out;
508 } 507 }
509 508
509 vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
510 KVM_VGIC_V2_DIST_SIZE,
511 vgic_dist_ranges, -1, &dist->dist_iodev);
512
510 /* 513 /*
511 * Initialize the vgic if this hasn't already been done on demand by 514 * Initialize the vgic if this hasn't already been done on demand by
512 * accessing the vgic state from userspace. 515 * accessing the vgic state from userspace.
@@ -514,18 +517,23 @@ static int vgic_v2_map_resources(struct kvm *kvm,
514 ret = vgic_init(kvm); 517 ret = vgic_init(kvm);
515 if (ret) { 518 if (ret) {
516 kvm_err("Unable to allocate maps\n"); 519 kvm_err("Unable to allocate maps\n");
517 goto out; 520 goto out_unregister;
518 } 521 }
519 522
520 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, 523 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
521 params->vcpu_base, KVM_VGIC_V2_CPU_SIZE, 524 params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
522 true); 525 true);
523 if (ret) { 526 if (ret) {
524 kvm_err("Unable to remap VGIC CPU to VCPU\n"); 527 kvm_err("Unable to remap VGIC CPU to VCPU\n");
525 goto out; 528 goto out_unregister;
526 } 529 }
527 530
528 kvm->arch.vgic.ready = true; 531 dist->ready = true;
532 goto out;
533
534out_unregister:
535 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
536
529out: 537out:
530 if (ret) 538 if (ret)
531 kvm_vgic_destroy(kvm); 539 kvm_vgic_destroy(kvm);
@@ -554,7 +562,6 @@ void vgic_v2_init_emulation(struct kvm *kvm)
554{ 562{
555 struct vgic_dist *dist = &kvm->arch.vgic; 563 struct vgic_dist *dist = &kvm->arch.vgic;
556 564
557 dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
558 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; 565 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
559 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; 566 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
560 dist->vm_ops.init_model = vgic_v2_init_model; 567 dist->vm_ops.init_model = vgic_v2_init_model;
@@ -631,7 +638,7 @@ static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
631 * CPU Interface Register accesses - these are not accessed by the VM, but by 638 * CPU Interface Register accesses - these are not accessed by the VM, but by
632 * user space for saving and restoring VGIC state. 639 * user space for saving and restoring VGIC state.
633 */ 640 */
634static const struct kvm_mmio_range vgic_cpu_ranges[] = { 641static const struct vgic_io_range vgic_cpu_ranges[] = {
635 { 642 {
636 .base = GIC_CPU_CTRL, 643 .base = GIC_CPU_CTRL,
637 .len = 12, 644 .len = 12,
@@ -658,12 +665,13 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
658 struct kvm_device_attr *attr, 665 struct kvm_device_attr *attr,
659 u32 *reg, bool is_write) 666 u32 *reg, bool is_write)
660{ 667{
661 const struct kvm_mmio_range *r = NULL, *ranges; 668 const struct vgic_io_range *r = NULL, *ranges;
662 phys_addr_t offset; 669 phys_addr_t offset;
663 int ret, cpuid, c; 670 int ret, cpuid, c;
664 struct kvm_vcpu *vcpu, *tmp_vcpu; 671 struct kvm_vcpu *vcpu, *tmp_vcpu;
665 struct vgic_dist *vgic; 672 struct vgic_dist *vgic;
666 struct kvm_exit_mmio mmio; 673 struct kvm_exit_mmio mmio;
674 u32 data;
667 675
668 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 676 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
669 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> 677 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
@@ -685,6 +693,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
685 693
686 mmio.len = 4; 694 mmio.len = 4;
687 mmio.is_write = is_write; 695 mmio.is_write = is_write;
696 mmio.data = &data;
688 if (is_write) 697 if (is_write)
689 mmio_data_write(&mmio, ~0, *reg); 698 mmio_data_write(&mmio, ~0, *reg);
690 switch (attr->group) { 699 switch (attr->group) {
@@ -699,7 +708,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
699 default: 708 default:
700 BUG(); 709 BUG();
701 } 710 }
702 r = vgic_find_range(ranges, &mmio, offset); 711 r = vgic_find_range(ranges, 4, offset);
703 712
704 if (unlikely(!r || !r->handle_mmio)) { 713 if (unlikely(!r || !r->handle_mmio)) {
705 ret = -ENXIO; 714 ret = -ENXIO;
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index b3f154631515..e9c3a7a83833 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -340,7 +340,7 @@ static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
340 return false; 340 return false;
341} 341}
342 342
343static const struct kvm_mmio_range vgic_v3_dist_ranges[] = { 343static const struct vgic_io_range vgic_v3_dist_ranges[] = {
344 { 344 {
345 .base = GICD_CTLR, 345 .base = GICD_CTLR,
346 .len = 0x04, 346 .len = 0x04,
@@ -502,6 +502,43 @@ static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
502 {}, 502 {},
503}; 503};
504 504
505static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio,
507 phys_addr_t offset)
508{
509 /* since we don't support LPIs, this register is zero for now */
510 vgic_reg_access(mmio, NULL, offset,
511 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
512 return false;
513}
514
515static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
516 struct kvm_exit_mmio *mmio,
517 phys_addr_t offset)
518{
519 u32 reg;
520 u64 mpidr;
521 struct kvm_vcpu *redist_vcpu = mmio->private;
522 int target_vcpu_id = redist_vcpu->vcpu_id;
523
524 /* the upper 32 bits contain the affinity value */
525 if ((offset & ~3) == 4) {
526 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
527 reg = compress_mpidr(mpidr);
528
529 vgic_reg_access(mmio, &reg, offset,
530 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
531 return false;
532 }
533
534 reg = redist_vcpu->vcpu_id << 8;
535 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
536 reg |= GICR_TYPER_LAST;
537 vgic_reg_access(mmio, &reg, offset,
538 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
539 return false;
540}
541
505static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu, 542static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio, 543 struct kvm_exit_mmio *mmio,
507 phys_addr_t offset) 544 phys_addr_t offset)
@@ -570,186 +607,107 @@ static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
570 return vgic_handle_cfg_reg(reg, mmio, offset); 607 return vgic_handle_cfg_reg(reg, mmio, offset);
571} 608}
572 609
573static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = { 610#define SGI_base(x) ((x) + SZ_64K)
611
612static const struct vgic_io_range vgic_redist_ranges[] = {
613 {
614 .base = GICR_CTLR,
615 .len = 0x04,
616 .bits_per_irq = 0,
617 .handle_mmio = handle_mmio_ctlr_redist,
618 },
619 {
620 .base = GICR_TYPER,
621 .len = 0x08,
622 .bits_per_irq = 0,
623 .handle_mmio = handle_mmio_typer_redist,
624 },
625 {
626 .base = GICR_IIDR,
627 .len = 0x04,
628 .bits_per_irq = 0,
629 .handle_mmio = handle_mmio_iidr,
630 },
631 {
632 .base = GICR_WAKER,
633 .len = 0x04,
634 .bits_per_irq = 0,
635 .handle_mmio = handle_mmio_raz_wi,
636 },
574 { 637 {
575 .base = GICR_IGROUPR0, 638 .base = GICR_IDREGS,
639 .len = 0x30,
640 .bits_per_irq = 0,
641 .handle_mmio = handle_mmio_idregs,
642 },
643 {
644 .base = SGI_base(GICR_IGROUPR0),
576 .len = 0x04, 645 .len = 0x04,
577 .bits_per_irq = 1, 646 .bits_per_irq = 1,
578 .handle_mmio = handle_mmio_rao_wi, 647 .handle_mmio = handle_mmio_rao_wi,
579 }, 648 },
580 { 649 {
581 .base = GICR_ISENABLER0, 650 .base = SGI_base(GICR_ISENABLER0),
582 .len = 0x04, 651 .len = 0x04,
583 .bits_per_irq = 1, 652 .bits_per_irq = 1,
584 .handle_mmio = handle_mmio_set_enable_reg_redist, 653 .handle_mmio = handle_mmio_set_enable_reg_redist,
585 }, 654 },
586 { 655 {
587 .base = GICR_ICENABLER0, 656 .base = SGI_base(GICR_ICENABLER0),
588 .len = 0x04, 657 .len = 0x04,
589 .bits_per_irq = 1, 658 .bits_per_irq = 1,
590 .handle_mmio = handle_mmio_clear_enable_reg_redist, 659 .handle_mmio = handle_mmio_clear_enable_reg_redist,
591 }, 660 },
592 { 661 {
593 .base = GICR_ISPENDR0, 662 .base = SGI_base(GICR_ISPENDR0),
594 .len = 0x04, 663 .len = 0x04,
595 .bits_per_irq = 1, 664 .bits_per_irq = 1,
596 .handle_mmio = handle_mmio_set_pending_reg_redist, 665 .handle_mmio = handle_mmio_set_pending_reg_redist,
597 }, 666 },
598 { 667 {
599 .base = GICR_ICPENDR0, 668 .base = SGI_base(GICR_ICPENDR0),
600 .len = 0x04, 669 .len = 0x04,
601 .bits_per_irq = 1, 670 .bits_per_irq = 1,
602 .handle_mmio = handle_mmio_clear_pending_reg_redist, 671 .handle_mmio = handle_mmio_clear_pending_reg_redist,
603 }, 672 },
604 { 673 {
605 .base = GICR_ISACTIVER0, 674 .base = SGI_base(GICR_ISACTIVER0),
606 .len = 0x04, 675 .len = 0x04,
607 .bits_per_irq = 1, 676 .bits_per_irq = 1,
608 .handle_mmio = handle_mmio_raz_wi, 677 .handle_mmio = handle_mmio_raz_wi,
609 }, 678 },
610 { 679 {
611 .base = GICR_ICACTIVER0, 680 .base = SGI_base(GICR_ICACTIVER0),
612 .len = 0x04, 681 .len = 0x04,
613 .bits_per_irq = 1, 682 .bits_per_irq = 1,
614 .handle_mmio = handle_mmio_raz_wi, 683 .handle_mmio = handle_mmio_raz_wi,
615 }, 684 },
616 { 685 {
617 .base = GICR_IPRIORITYR0, 686 .base = SGI_base(GICR_IPRIORITYR0),
618 .len = 0x20, 687 .len = 0x20,
619 .bits_per_irq = 8, 688 .bits_per_irq = 8,
620 .handle_mmio = handle_mmio_priority_reg_redist, 689 .handle_mmio = handle_mmio_priority_reg_redist,
621 }, 690 },
622 { 691 {
623 .base = GICR_ICFGR0, 692 .base = SGI_base(GICR_ICFGR0),
624 .len = 0x08, 693 .len = 0x08,
625 .bits_per_irq = 2, 694 .bits_per_irq = 2,
626 .handle_mmio = handle_mmio_cfg_reg_redist, 695 .handle_mmio = handle_mmio_cfg_reg_redist,
627 }, 696 },
628 { 697 {
629 .base = GICR_IGRPMODR0, 698 .base = SGI_base(GICR_IGRPMODR0),
630 .len = 0x04, 699 .len = 0x04,
631 .bits_per_irq = 1, 700 .bits_per_irq = 1,
632 .handle_mmio = handle_mmio_raz_wi, 701 .handle_mmio = handle_mmio_raz_wi,
633 }, 702 },
634 { 703 {
635 .base = GICR_NSACR, 704 .base = SGI_base(GICR_NSACR),
636 .len = 0x04, 705 .len = 0x04,
637 .handle_mmio = handle_mmio_raz_wi, 706 .handle_mmio = handle_mmio_raz_wi,
638 }, 707 },
639 {}, 708 {},
640}; 709};
641 710
642static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
643 struct kvm_exit_mmio *mmio,
644 phys_addr_t offset)
645{
646 /* since we don't support LPIs, this register is zero for now */
647 vgic_reg_access(mmio, NULL, offset,
648 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
649 return false;
650}
651
652static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
653 struct kvm_exit_mmio *mmio,
654 phys_addr_t offset)
655{
656 u32 reg;
657 u64 mpidr;
658 struct kvm_vcpu *redist_vcpu = mmio->private;
659 int target_vcpu_id = redist_vcpu->vcpu_id;
660
661 /* the upper 32 bits contain the affinity value */
662 if ((offset & ~3) == 4) {
663 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
664 reg = compress_mpidr(mpidr);
665
666 vgic_reg_access(mmio, &reg, offset,
667 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
668 return false;
669 }
670
671 reg = redist_vcpu->vcpu_id << 8;
672 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
673 reg |= GICR_TYPER_LAST;
674 vgic_reg_access(mmio, &reg, offset,
675 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
676 return false;
677}
678
679static const struct kvm_mmio_range vgic_redist_ranges[] = {
680 {
681 .base = GICR_CTLR,
682 .len = 0x04,
683 .bits_per_irq = 0,
684 .handle_mmio = handle_mmio_ctlr_redist,
685 },
686 {
687 .base = GICR_TYPER,
688 .len = 0x08,
689 .bits_per_irq = 0,
690 .handle_mmio = handle_mmio_typer_redist,
691 },
692 {
693 .base = GICR_IIDR,
694 .len = 0x04,
695 .bits_per_irq = 0,
696 .handle_mmio = handle_mmio_iidr,
697 },
698 {
699 .base = GICR_WAKER,
700 .len = 0x04,
701 .bits_per_irq = 0,
702 .handle_mmio = handle_mmio_raz_wi,
703 },
704 {
705 .base = GICR_IDREGS,
706 .len = 0x30,
707 .bits_per_irq = 0,
708 .handle_mmio = handle_mmio_idregs,
709 },
710 {},
711};
712
713/*
714 * This function splits accesses between the distributor and the two
715 * redistributor parts (private/SPI). As each redistributor is accessible
716 * from any CPU, we have to determine the affected VCPU by taking the faulting
717 * address into account. We then pass this VCPU to the handler function via
718 * the private parameter.
719 */
720#define SGI_BASE_OFFSET SZ_64K
721static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
722 struct kvm_exit_mmio *mmio)
723{
724 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
725 unsigned long dbase = dist->vgic_dist_base;
726 unsigned long rdbase = dist->vgic_redist_base;
727 int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
728 int vcpu_id;
729 const struct kvm_mmio_range *mmio_range;
730
731 if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
732 return vgic_handle_mmio_range(vcpu, run, mmio,
733 vgic_v3_dist_ranges, dbase);
734 }
735
736 if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
737 GIC_V3_REDIST_SIZE * nrcpus))
738 return false;
739
740 vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
741 rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
742 mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
743
744 if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
745 rdbase += SGI_BASE_OFFSET;
746 mmio_range = vgic_redist_sgi_ranges;
747 } else {
748 mmio_range = vgic_redist_ranges;
749 }
750 return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
751}
752
753static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) 711static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
754{ 712{
755 if (vgic_queue_irq(vcpu, 0, irq)) { 713 if (vgic_queue_irq(vcpu, 0, irq)) {
@@ -766,6 +724,9 @@ static int vgic_v3_map_resources(struct kvm *kvm,
766{ 724{
767 int ret = 0; 725 int ret = 0;
768 struct vgic_dist *dist = &kvm->arch.vgic; 726 struct vgic_dist *dist = &kvm->arch.vgic;
727 gpa_t rdbase = dist->vgic_redist_base;
728 struct vgic_io_device *iodevs = NULL;
729 int i;
769 730
770 if (!irqchip_in_kernel(kvm)) 731 if (!irqchip_in_kernel(kvm))
771 return 0; 732 return 0;
@@ -791,7 +752,41 @@ static int vgic_v3_map_resources(struct kvm *kvm,
791 goto out; 752 goto out;
792 } 753 }
793 754
794 kvm->arch.vgic.ready = true; 755 ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
756 GIC_V3_DIST_SIZE, vgic_v3_dist_ranges,
757 -1, &dist->dist_iodev);
758 if (ret)
759 goto out;
760
761 iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL);
762 if (!iodevs) {
763 ret = -ENOMEM;
764 goto out_unregister;
765 }
766
767 for (i = 0; i < dist->nr_cpus; i++) {
768 ret = vgic_register_kvm_io_dev(kvm, rdbase,
769 SZ_128K, vgic_redist_ranges,
770 i, &iodevs[i]);
771 if (ret)
772 goto out_unregister;
773 rdbase += GIC_V3_REDIST_SIZE;
774 }
775
776 dist->redist_iodevs = iodevs;
777 dist->ready = true;
778 goto out;
779
780out_unregister:
781 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
782 if (iodevs) {
783 for (i = 0; i < dist->nr_cpus; i++) {
784 if (iodevs[i].dev.ops)
785 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
786 &iodevs[i].dev);
787 }
788 }
789
795out: 790out:
796 if (ret) 791 if (ret)
797 kvm_vgic_destroy(kvm); 792 kvm_vgic_destroy(kvm);
@@ -832,7 +827,6 @@ void vgic_v3_init_emulation(struct kvm *kvm)
832{ 827{
833 struct vgic_dist *dist = &kvm->arch.vgic; 828 struct vgic_dist *dist = &kvm->arch.vgic;
834 829
835 dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
836 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi; 830 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
837 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source; 831 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
838 dist->vm_ops.init_model = vgic_v3_init_model; 832 dist->vm_ops.init_model = vgic_v3_init_model;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index c9f60f524588..8d550ff14700 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -31,6 +31,9 @@
31#include <asm/kvm_emulate.h> 31#include <asm/kvm_emulate.h>
32#include <asm/kvm_arm.h> 32#include <asm/kvm_arm.h>
33#include <asm/kvm_mmu.h> 33#include <asm/kvm_mmu.h>
34#include <trace/events/kvm.h>
35#include <asm/kvm.h>
36#include <kvm/iodev.h>
34 37
35/* 38/*
36 * How the whole thing works (courtesy of Christoffer Dall): 39 * How the whole thing works (courtesy of Christoffer Dall):
@@ -263,6 +266,13 @@ static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
263 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); 266 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
264} 267}
265 268
269static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
270{
271 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
272
273 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
274}
275
266static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) 276static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
267{ 277{
268 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 278 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -277,6 +287,20 @@ static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
277 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); 287 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
278} 288}
279 289
290static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
291{
292 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
293
294 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
295}
296
297static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
298{
299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
300
301 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
302}
303
280static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) 304static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
281{ 305{
282 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 306 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -520,6 +544,44 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
520 return false; 544 return false;
521} 545}
522 546
547bool vgic_handle_set_active_reg(struct kvm *kvm,
548 struct kvm_exit_mmio *mmio,
549 phys_addr_t offset, int vcpu_id)
550{
551 u32 *reg;
552 struct vgic_dist *dist = &kvm->arch.vgic;
553
554 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
555 vgic_reg_access(mmio, reg, offset,
556 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
557
558 if (mmio->is_write) {
559 vgic_update_state(kvm);
560 return true;
561 }
562
563 return false;
564}
565
566bool vgic_handle_clear_active_reg(struct kvm *kvm,
567 struct kvm_exit_mmio *mmio,
568 phys_addr_t offset, int vcpu_id)
569{
570 u32 *reg;
571 struct vgic_dist *dist = &kvm->arch.vgic;
572
573 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
574 vgic_reg_access(mmio, reg, offset,
575 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
576
577 if (mmio->is_write) {
578 vgic_update_state(kvm);
579 return true;
580 }
581
582 return false;
583}
584
523static u32 vgic_cfg_expand(u16 val) 585static u32 vgic_cfg_expand(u16 val)
524{ 586{
525 u32 res = 0; 587 u32 res = 0;
@@ -588,16 +650,12 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
588} 650}
589 651
590/** 652/**
591 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor 653 * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
592 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs 654 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
593 * 655 *
594 * Move any pending IRQs that have already been assigned to LRs back to the 656 * Move any IRQs that have already been assigned to LRs back to the
595 * emulated distributor state so that the complete emulated state can be read 657 * emulated distributor state so that the complete emulated state can be read
596 * from the main emulation structures without investigating the LRs. 658 * from the main emulation structures without investigating the LRs.
597 *
598 * Note that IRQs in the active state in the LRs get their pending state moved
599 * to the distributor but the active state stays in the LRs, because we don't
600 * track the active state on the distributor side.
601 */ 659 */
602void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) 660void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
603{ 661{
@@ -613,12 +671,22 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
613 * 01: pending 671 * 01: pending
614 * 10: active 672 * 10: active
615 * 11: pending and active 673 * 11: pending and active
616 *
617 * If the LR holds only an active interrupt (not pending) then
618 * just leave it alone.
619 */ 674 */
620 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE) 675 BUG_ON(!(lr.state & LR_STATE_MASK));
621 continue; 676
677 /* Reestablish SGI source for pending and active IRQs */
678 if (lr.irq < VGIC_NR_SGIS)
679 add_sgi_source(vcpu, lr.irq, lr.source);
680
681 /*
682 * If the LR holds an active (10) or a pending and active (11)
683 * interrupt then move the active state to the
684 * distributor tracking bit.
685 */
686 if (lr.state & LR_STATE_ACTIVE) {
687 vgic_irq_set_active(vcpu, lr.irq);
688 lr.state &= ~LR_STATE_ACTIVE;
689 }
622 690
623 /* 691 /*
624 * Reestablish the pending state on the distributor and the 692 * Reestablish the pending state on the distributor and the
@@ -626,21 +694,19 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
626 * is fine, then we are only setting a few bits that were 694 * is fine, then we are only setting a few bits that were
627 * already set. 695 * already set.
628 */ 696 */
629 vgic_dist_irq_set_pending(vcpu, lr.irq); 697 if (lr.state & LR_STATE_PENDING) {
630 if (lr.irq < VGIC_NR_SGIS) 698 vgic_dist_irq_set_pending(vcpu, lr.irq);
631 add_sgi_source(vcpu, lr.irq, lr.source); 699 lr.state &= ~LR_STATE_PENDING;
632 lr.state &= ~LR_STATE_PENDING; 700 }
701
633 vgic_set_lr(vcpu, i, lr); 702 vgic_set_lr(vcpu, i, lr);
634 703
635 /* 704 /*
636 * If there's no state left on the LR (it could still be 705 * Mark the LR as free for other use.
637 * active), then the LR does not hold any useful info and can
638 * be marked as free for other use.
639 */ 706 */
640 if (!(lr.state & LR_STATE_MASK)) { 707 BUG_ON(lr.state & LR_STATE_MASK);
641 vgic_retire_lr(i, lr.irq, vcpu); 708 vgic_retire_lr(i, lr.irq, vcpu);
642 vgic_irq_clear_queued(vcpu, lr.irq); 709 vgic_irq_clear_queued(vcpu, lr.irq);
643 }
644 710
645 /* Finally update the VGIC state. */ 711 /* Finally update the VGIC state. */
646 vgic_update_state(vcpu->kvm); 712 vgic_update_state(vcpu->kvm);
@@ -648,24 +714,21 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
648} 714}
649 715
650const 716const
651struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, 717struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
652 struct kvm_exit_mmio *mmio, 718 int len, gpa_t offset)
653 phys_addr_t offset) 719{
654{ 720 while (ranges->len) {
655 const struct kvm_mmio_range *r = ranges; 721 if (offset >= ranges->base &&
656 722 (offset + len) <= (ranges->base + ranges->len))
657 while (r->len) { 723 return ranges;
658 if (offset >= r->base && 724 ranges++;
659 (offset + mmio->len) <= (r->base + r->len))
660 return r;
661 r++;
662 } 725 }
663 726
664 return NULL; 727 return NULL;
665} 728}
666 729
667static bool vgic_validate_access(const struct vgic_dist *dist, 730static bool vgic_validate_access(const struct vgic_dist *dist,
668 const struct kvm_mmio_range *range, 731 const struct vgic_io_range *range,
669 unsigned long offset) 732 unsigned long offset)
670{ 733{
671 int irq; 734 int irq;
@@ -693,9 +756,8 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
693static bool call_range_handler(struct kvm_vcpu *vcpu, 756static bool call_range_handler(struct kvm_vcpu *vcpu,
694 struct kvm_exit_mmio *mmio, 757 struct kvm_exit_mmio *mmio,
695 unsigned long offset, 758 unsigned long offset,
696 const struct kvm_mmio_range *range) 759 const struct vgic_io_range *range)
697{ 760{
698 u32 *data32 = (void *)mmio->data;
699 struct kvm_exit_mmio mmio32; 761 struct kvm_exit_mmio mmio32;
700 bool ret; 762 bool ret;
701 763
@@ -712,91 +774,142 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
712 mmio32.private = mmio->private; 774 mmio32.private = mmio->private;
713 775
714 mmio32.phys_addr = mmio->phys_addr + 4; 776 mmio32.phys_addr = mmio->phys_addr + 4;
715 if (mmio->is_write) 777 mmio32.data = &((u32 *)mmio->data)[1];
716 *(u32 *)mmio32.data = data32[1];
717 ret = range->handle_mmio(vcpu, &mmio32, offset + 4); 778 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
718 if (!mmio->is_write)
719 data32[1] = *(u32 *)mmio32.data;
720 779
721 mmio32.phys_addr = mmio->phys_addr; 780 mmio32.phys_addr = mmio->phys_addr;
722 if (mmio->is_write) 781 mmio32.data = &((u32 *)mmio->data)[0];
723 *(u32 *)mmio32.data = data32[0];
724 ret |= range->handle_mmio(vcpu, &mmio32, offset); 782 ret |= range->handle_mmio(vcpu, &mmio32, offset);
725 if (!mmio->is_write)
726 data32[0] = *(u32 *)mmio32.data;
727 783
728 return ret; 784 return ret;
729} 785}
730 786
731/** 787/**
732 * vgic_handle_mmio_range - handle an in-kernel MMIO access 788 * vgic_handle_mmio_access - handle an in-kernel MMIO access
789 * This is called by the read/write KVM IO device wrappers below.
733 * @vcpu: pointer to the vcpu performing the access 790 * @vcpu: pointer to the vcpu performing the access
734 * @run: pointer to the kvm_run structure 791 * @this: pointer to the KVM IO device in charge
735 * @mmio: pointer to the data describing the access 792 * @addr: guest physical address of the access
736 * @ranges: array of MMIO ranges in a given region 793 * @len: size of the access
737 * @mmio_base: base address of that region 794 * @val: pointer to the data region
795 * @is_write: read or write access
738 * 796 *
739 * returns true if the MMIO access could be performed 797 * returns true if the MMIO access could be performed
740 */ 798 */
741bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, 799static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
742 struct kvm_exit_mmio *mmio, 800 struct kvm_io_device *this, gpa_t addr,
743 const struct kvm_mmio_range *ranges, 801 int len, void *val, bool is_write)
744 unsigned long mmio_base)
745{ 802{
746 const struct kvm_mmio_range *range;
747 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 803 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
804 struct vgic_io_device *iodev = container_of(this,
805 struct vgic_io_device, dev);
806 struct kvm_run *run = vcpu->run;
807 const struct vgic_io_range *range;
808 struct kvm_exit_mmio mmio;
748 bool updated_state; 809 bool updated_state;
749 unsigned long offset; 810 gpa_t offset;
750 811
751 offset = mmio->phys_addr - mmio_base; 812 offset = addr - iodev->addr;
752 range = vgic_find_range(ranges, mmio, offset); 813 range = vgic_find_range(iodev->reg_ranges, len, offset);
753 if (unlikely(!range || !range->handle_mmio)) { 814 if (unlikely(!range || !range->handle_mmio)) {
754 pr_warn("Unhandled access %d %08llx %d\n", 815 pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
755 mmio->is_write, mmio->phys_addr, mmio->len); 816 return -ENXIO;
756 return false;
757 } 817 }
758 818
759 spin_lock(&vcpu->kvm->arch.vgic.lock); 819 mmio.phys_addr = addr;
820 mmio.len = len;
821 mmio.is_write = is_write;
822 mmio.data = val;
823 mmio.private = iodev->redist_vcpu;
824
825 spin_lock(&dist->lock);
760 offset -= range->base; 826 offset -= range->base;
761 if (vgic_validate_access(dist, range, offset)) { 827 if (vgic_validate_access(dist, range, offset)) {
762 updated_state = call_range_handler(vcpu, mmio, offset, range); 828 updated_state = call_range_handler(vcpu, &mmio, offset, range);
763 } else { 829 } else {
764 if (!mmio->is_write) 830 if (!is_write)
765 memset(mmio->data, 0, mmio->len); 831 memset(val, 0, len);
766 updated_state = false; 832 updated_state = false;
767 } 833 }
768 spin_unlock(&vcpu->kvm->arch.vgic.lock); 834 spin_unlock(&dist->lock);
769 kvm_prepare_mmio(run, mmio); 835 run->mmio.is_write = is_write;
836 run->mmio.len = len;
837 run->mmio.phys_addr = addr;
838 memcpy(run->mmio.data, val, len);
839
770 kvm_handle_mmio_return(vcpu, run); 840 kvm_handle_mmio_return(vcpu, run);
771 841
772 if (updated_state) 842 if (updated_state)
773 vgic_kick_vcpus(vcpu->kvm); 843 vgic_kick_vcpus(vcpu->kvm);
774 844
775 return true; 845 return 0;
846}
847
848static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
849 struct kvm_io_device *this,
850 gpa_t addr, int len, void *val)
851{
852 return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
776} 853}
777 854
855static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
856 struct kvm_io_device *this,
857 gpa_t addr, int len, const void *val)
858{
859 return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
860 true);
861}
862
863struct kvm_io_device_ops vgic_io_ops = {
864 .read = vgic_handle_mmio_read,
865 .write = vgic_handle_mmio_write,
866};
867
778/** 868/**
779 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation 869 * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
780 * @vcpu: pointer to the vcpu performing the access 870 * @kvm: The VM structure pointer
781 * @run: pointer to the kvm_run structure 871 * @base: The (guest) base address for the register frame
782 * @mmio: pointer to the data describing the access 872 * @len: Length of the register frame window
873 * @ranges: Describing the handler functions for each register
874 * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
875 * @iodev: Points to memory to be passed on to the handler
783 * 876 *
784 * returns true if the MMIO access has been performed in kernel space, 877 * @iodev stores the parameters of this function to be usable by the handler
785 * and false if it needs to be emulated in user space. 878 * respectively the dispatcher function (since the KVM I/O bus framework lacks
786 * Calls the actual handling routine for the selected VGIC model. 879 * an opaque parameter). Initialization is done in this function, but the
880 * reference should be valid and unique for the whole VGIC lifetime.
881 * If the register frame is not mapped for a specific VCPU, pass -1 to
882 * @redist_vcpu_id.
787 */ 883 */
788bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, 884int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
789 struct kvm_exit_mmio *mmio) 885 const struct vgic_io_range *ranges,
886 int redist_vcpu_id,
887 struct vgic_io_device *iodev)
790{ 888{
791 if (!irqchip_in_kernel(vcpu->kvm)) 889 struct kvm_vcpu *vcpu = NULL;
792 return false; 890 int ret;
793 891
794 /* 892 if (redist_vcpu_id >= 0)
795 * This will currently call either vgic_v2_handle_mmio() or 893 vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
796 * vgic_v3_handle_mmio(), which in turn will call 894
797 * vgic_handle_mmio_range() defined above. 895 iodev->addr = base;
798 */ 896 iodev->len = len;
799 return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio); 897 iodev->reg_ranges = ranges;
898 iodev->redist_vcpu = vcpu;
899
900 kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
901
902 mutex_lock(&kvm->slots_lock);
903
904 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
905 &iodev->dev);
906 mutex_unlock(&kvm->slots_lock);
907
908 /* Mark the iodev as invalid if registration fails. */
909 if (ret)
910 iodev->dev.ops = NULL;
911
912 return ret;
800} 913}
801 914
802static int vgic_nr_shared_irqs(struct vgic_dist *dist) 915static int vgic_nr_shared_irqs(struct vgic_dist *dist)
@@ -804,6 +917,36 @@ static int vgic_nr_shared_irqs(struct vgic_dist *dist)
804 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; 917 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
805} 918}
806 919
920static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
921{
922 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
923 unsigned long *active, *enabled, *act_percpu, *act_shared;
924 unsigned long active_private, active_shared;
925 int nr_shared = vgic_nr_shared_irqs(dist);
926 int vcpu_id;
927
928 vcpu_id = vcpu->vcpu_id;
929 act_percpu = vcpu->arch.vgic_cpu.active_percpu;
930 act_shared = vcpu->arch.vgic_cpu.active_shared;
931
932 active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
933 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
934 bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
935
936 active = vgic_bitmap_get_shared_map(&dist->irq_active);
937 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
938 bitmap_and(act_shared, active, enabled, nr_shared);
939 bitmap_and(act_shared, act_shared,
940 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
941 nr_shared);
942
943 active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
944 active_shared = find_first_bit(act_shared, nr_shared);
945
946 return (active_private < VGIC_NR_PRIVATE_IRQS ||
947 active_shared < nr_shared);
948}
949
807static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) 950static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
808{ 951{
809 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 952 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -835,7 +978,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
835 978
836/* 979/*
837 * Update the interrupt state and determine which CPUs have pending 980 * Update the interrupt state and determine which CPUs have pending
838 * interrupts. Must be called with distributor lock held. 981 * or active interrupts. Must be called with distributor lock held.
839 */ 982 */
840void vgic_update_state(struct kvm *kvm) 983void vgic_update_state(struct kvm *kvm)
841{ 984{
@@ -849,10 +992,13 @@ void vgic_update_state(struct kvm *kvm)
849 } 992 }
850 993
851 kvm_for_each_vcpu(c, vcpu, kvm) { 994 kvm_for_each_vcpu(c, vcpu, kvm) {
852 if (compute_pending_for_cpu(vcpu)) { 995 if (compute_pending_for_cpu(vcpu))
853 pr_debug("CPU%d has pending interrupts\n", c);
854 set_bit(c, dist->irq_pending_on_cpu); 996 set_bit(c, dist->irq_pending_on_cpu);
855 } 997
998 if (compute_active_for_cpu(vcpu))
999 set_bit(c, dist->irq_active_on_cpu);
1000 else
1001 clear_bit(c, dist->irq_active_on_cpu);
856 } 1002 }
857} 1003}
858 1004
@@ -955,6 +1101,26 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
955 } 1101 }
956} 1102}
957 1103
1104static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1105 int lr_nr, struct vgic_lr vlr)
1106{
1107 if (vgic_irq_is_active(vcpu, irq)) {
1108 vlr.state |= LR_STATE_ACTIVE;
1109 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1110 vgic_irq_clear_active(vcpu, irq);
1111 vgic_update_state(vcpu->kvm);
1112 } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
1113 vlr.state |= LR_STATE_PENDING;
1114 kvm_debug("Set pending: 0x%x\n", vlr.state);
1115 }
1116
1117 if (!vgic_irq_is_edge(vcpu, irq))
1118 vlr.state |= LR_EOI_INT;
1119
1120 vgic_set_lr(vcpu, lr_nr, vlr);
1121 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
1122}
1123
958/* 1124/*
959 * Queue an interrupt to a CPU virtual interface. Return true on success, 1125 * Queue an interrupt to a CPU virtual interface. Return true on success,
960 * or false if it wasn't possible to queue it. 1126 * or false if it wasn't possible to queue it.
@@ -982,9 +1148,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
982 if (vlr.source == sgi_source_id) { 1148 if (vlr.source == sgi_source_id) {
983 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); 1149 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
984 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 1150 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
985 vlr.state |= LR_STATE_PENDING; 1151 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
986 vgic_set_lr(vcpu, lr, vlr);
987 vgic_sync_lr_elrsr(vcpu, lr, vlr);
988 return true; 1152 return true;
989 } 1153 }
990 } 1154 }
@@ -1001,12 +1165,8 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1001 1165
1002 vlr.irq = irq; 1166 vlr.irq = irq;
1003 vlr.source = sgi_source_id; 1167 vlr.source = sgi_source_id;
1004 vlr.state = LR_STATE_PENDING; 1168 vlr.state = 0;
1005 if (!vgic_irq_is_edge(vcpu, irq)) 1169 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1006 vlr.state |= LR_EOI_INT;
1007
1008 vgic_set_lr(vcpu, lr, vlr);
1009 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1010 1170
1011 return true; 1171 return true;
1012} 1172}
@@ -1038,39 +1198,49 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1038{ 1198{
1039 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1199 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1040 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1200 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1201 unsigned long *pa_percpu, *pa_shared;
1041 int i, vcpu_id; 1202 int i, vcpu_id;
1042 int overflow = 0; 1203 int overflow = 0;
1204 int nr_shared = vgic_nr_shared_irqs(dist);
1043 1205
1044 vcpu_id = vcpu->vcpu_id; 1206 vcpu_id = vcpu->vcpu_id;
1045 1207
1208 pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
1209 pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
1210
1211 bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
1212 VGIC_NR_PRIVATE_IRQS);
1213 bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
1214 nr_shared);
1046 /* 1215 /*
1047 * We may not have any pending interrupt, or the interrupts 1216 * We may not have any pending interrupt, or the interrupts
1048 * may have been serviced from another vcpu. In all cases, 1217 * may have been serviced from another vcpu. In all cases,
1049 * move along. 1218 * move along.
1050 */ 1219 */
1051 if (!kvm_vgic_vcpu_pending_irq(vcpu)) { 1220 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
1052 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1053 goto epilog; 1221 goto epilog;
1054 }
1055 1222
1056 /* SGIs */ 1223 /* SGIs */
1057 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) { 1224 for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
1058 if (!queue_sgi(vcpu, i)) 1225 if (!queue_sgi(vcpu, i))
1059 overflow = 1; 1226 overflow = 1;
1060 } 1227 }
1061 1228
1062 /* PPIs */ 1229 /* PPIs */
1063 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) { 1230 for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
1064 if (!vgic_queue_hwirq(vcpu, i)) 1231 if (!vgic_queue_hwirq(vcpu, i))
1065 overflow = 1; 1232 overflow = 1;
1066 } 1233 }
1067 1234
1068 /* SPIs */ 1235 /* SPIs */
1069 for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) { 1236 for_each_set_bit(i, pa_shared, nr_shared) {
1070 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) 1237 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1071 overflow = 1; 1238 overflow = 1;
1072 } 1239 }
1073 1240
1241
1242
1243
1074epilog: 1244epilog:
1075 if (overflow) { 1245 if (overflow) {
1076 vgic_enable_underflow(vcpu); 1246 vgic_enable_underflow(vcpu);
@@ -1089,7 +1259,9 @@ epilog:
1089static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1259static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1090{ 1260{
1091 u32 status = vgic_get_interrupt_status(vcpu); 1261 u32 status = vgic_get_interrupt_status(vcpu);
1262 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1092 bool level_pending = false; 1263 bool level_pending = false;
1264 struct kvm *kvm = vcpu->kvm;
1093 1265
1094 kvm_debug("STATUS = %08x\n", status); 1266 kvm_debug("STATUS = %08x\n", status);
1095 1267
@@ -1106,6 +1278,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1106 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); 1278 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1107 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); 1279 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1108 1280
1281 spin_lock(&dist->lock);
1109 vgic_irq_clear_queued(vcpu, vlr.irq); 1282 vgic_irq_clear_queued(vcpu, vlr.irq);
1110 WARN_ON(vlr.state & LR_STATE_MASK); 1283 WARN_ON(vlr.state & LR_STATE_MASK);
1111 vlr.state = 0; 1284 vlr.state = 0;
@@ -1124,6 +1297,17 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1124 */ 1297 */
1125 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); 1298 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1126 1299
1300 /*
1301 * kvm_notify_acked_irq calls kvm_set_irq()
1302 * to reset the IRQ level. Need to release the
1303 * lock for kvm_set_irq to grab it.
1304 */
1305 spin_unlock(&dist->lock);
1306
1307 kvm_notify_acked_irq(kvm, 0,
1308 vlr.irq - VGIC_NR_PRIVATE_IRQS);
1309 spin_lock(&dist->lock);
1310
1127 /* Any additional pending interrupt? */ 1311 /* Any additional pending interrupt? */
1128 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { 1312 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1129 vgic_cpu_irq_set(vcpu, vlr.irq); 1313 vgic_cpu_irq_set(vcpu, vlr.irq);
@@ -1133,6 +1317,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1133 vgic_cpu_irq_clear(vcpu, vlr.irq); 1317 vgic_cpu_irq_clear(vcpu, vlr.irq);
1134 } 1318 }
1135 1319
1320 spin_unlock(&dist->lock);
1321
1136 /* 1322 /*
1137 * Despite being EOIed, the LR may not have 1323 * Despite being EOIed, the LR may not have
1138 * been marked as empty. 1324 * been marked as empty.
@@ -1155,10 +1341,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1155 return level_pending; 1341 return level_pending;
1156} 1342}
1157 1343
1158/* 1344/* Sync back the VGIC state after a guest run */
1159 * Sync back the VGIC state after a guest run. The distributor lock is
1160 * needed so we don't get preempted in the middle of the state processing.
1161 */
1162static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1345static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1163{ 1346{
1164 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1347 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1205,14 +1388,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1205 1388
1206void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1389void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1207{ 1390{
1208 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1209
1210 if (!irqchip_in_kernel(vcpu->kvm)) 1391 if (!irqchip_in_kernel(vcpu->kvm))
1211 return; 1392 return;
1212 1393
1213 spin_lock(&dist->lock);
1214 __kvm_vgic_sync_hwstate(vcpu); 1394 __kvm_vgic_sync_hwstate(vcpu);
1215 spin_unlock(&dist->lock);
1216} 1395}
1217 1396
1218int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) 1397int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
@@ -1225,6 +1404,17 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1225 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1404 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1226} 1405}
1227 1406
1407int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
1408{
1409 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1410
1411 if (!irqchip_in_kernel(vcpu->kvm))
1412 return 0;
1413
1414 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1415}
1416
1417
1228void vgic_kick_vcpus(struct kvm *kvm) 1418void vgic_kick_vcpus(struct kvm *kvm)
1229{ 1419{
1230 struct kvm_vcpu *vcpu; 1420 struct kvm_vcpu *vcpu;
@@ -1397,8 +1587,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1397 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1587 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1398 1588
1399 kfree(vgic_cpu->pending_shared); 1589 kfree(vgic_cpu->pending_shared);
1590 kfree(vgic_cpu->active_shared);
1591 kfree(vgic_cpu->pend_act_shared);
1400 kfree(vgic_cpu->vgic_irq_lr_map); 1592 kfree(vgic_cpu->vgic_irq_lr_map);
1401 vgic_cpu->pending_shared = NULL; 1593 vgic_cpu->pending_shared = NULL;
1594 vgic_cpu->active_shared = NULL;
1595 vgic_cpu->pend_act_shared = NULL;
1402 vgic_cpu->vgic_irq_lr_map = NULL; 1596 vgic_cpu->vgic_irq_lr_map = NULL;
1403} 1597}
1404 1598
@@ -1408,9 +1602,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1408 1602
1409 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; 1603 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1410 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); 1604 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1605 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1606 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
1411 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL); 1607 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
1412 1608
1413 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { 1609 if (!vgic_cpu->pending_shared
1610 || !vgic_cpu->active_shared
1611 || !vgic_cpu->pend_act_shared
1612 || !vgic_cpu->vgic_irq_lr_map) {
1414 kvm_vgic_vcpu_destroy(vcpu); 1613 kvm_vgic_vcpu_destroy(vcpu);
1415 return -ENOMEM; 1614 return -ENOMEM;
1416 } 1615 }
@@ -1463,10 +1662,12 @@ void kvm_vgic_destroy(struct kvm *kvm)
1463 kfree(dist->irq_spi_mpidr); 1662 kfree(dist->irq_spi_mpidr);
1464 kfree(dist->irq_spi_target); 1663 kfree(dist->irq_spi_target);
1465 kfree(dist->irq_pending_on_cpu); 1664 kfree(dist->irq_pending_on_cpu);
1665 kfree(dist->irq_active_on_cpu);
1466 dist->irq_sgi_sources = NULL; 1666 dist->irq_sgi_sources = NULL;
1467 dist->irq_spi_cpu = NULL; 1667 dist->irq_spi_cpu = NULL;
1468 dist->irq_spi_target = NULL; 1668 dist->irq_spi_target = NULL;
1469 dist->irq_pending_on_cpu = NULL; 1669 dist->irq_pending_on_cpu = NULL;
1670 dist->irq_active_on_cpu = NULL;
1470 dist->nr_cpus = 0; 1671 dist->nr_cpus = 0;
1471} 1672}
1472 1673
@@ -1502,6 +1703,7 @@ int vgic_init(struct kvm *kvm)
1502 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs); 1703 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1503 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs); 1704 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1504 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs); 1705 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1706 ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
1505 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs); 1707 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1506 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs); 1708 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1507 1709
@@ -1514,10 +1716,13 @@ int vgic_init(struct kvm *kvm)
1514 GFP_KERNEL); 1716 GFP_KERNEL);
1515 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), 1717 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1516 GFP_KERNEL); 1718 GFP_KERNEL);
1719 dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1720 GFP_KERNEL);
1517 if (!dist->irq_sgi_sources || 1721 if (!dist->irq_sgi_sources ||
1518 !dist->irq_spi_cpu || 1722 !dist->irq_spi_cpu ||
1519 !dist->irq_spi_target || 1723 !dist->irq_spi_target ||
1520 !dist->irq_pending_on_cpu) { 1724 !dist->irq_pending_on_cpu ||
1725 !dist->irq_active_on_cpu) {
1521 ret = -ENOMEM; 1726 ret = -ENOMEM;
1522 goto out; 1727 goto out;
1523 } 1728 }
@@ -1845,12 +2050,9 @@ int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1845 return r; 2050 return r;
1846} 2051}
1847 2052
1848int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset) 2053int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
1849{ 2054{
1850 struct kvm_exit_mmio dev_attr_mmio; 2055 if (vgic_find_range(ranges, 4, offset))
1851
1852 dev_attr_mmio.len = 4;
1853 if (vgic_find_range(ranges, &dev_attr_mmio, offset))
1854 return 0; 2056 return 0;
1855 else 2057 else
1856 return -ENXIO; 2058 return -ENXIO;
@@ -1883,8 +2085,10 @@ static struct notifier_block vgic_cpu_nb = {
1883}; 2085};
1884 2086
1885static const struct of_device_id vgic_ids[] = { 2087static const struct of_device_id vgic_ids[] = {
1886 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, }, 2088 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
1887 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, }, 2089 { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
2090 { .compatible = "arm,gic-400", .data = vgic_v2_probe, },
2091 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
1888 {}, 2092 {},
1889}; 2093};
1890 2094
@@ -1932,3 +2136,38 @@ out_free_irq:
1932 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); 2136 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
1933 return ret; 2137 return ret;
1934} 2138}
2139
2140int kvm_irq_map_gsi(struct kvm *kvm,
2141 struct kvm_kernel_irq_routing_entry *entries,
2142 int gsi)
2143{
2144 return gsi;
2145}
2146
2147int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
2148{
2149 return pin;
2150}
2151
2152int kvm_set_irq(struct kvm *kvm, int irq_source_id,
2153 u32 irq, int level, bool line_status)
2154{
2155 unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
2156
2157 trace_kvm_set_irq(irq, level, irq_source_id);
2158
2159 BUG_ON(!vgic_initialized(kvm));
2160
2161 if (spi > kvm->arch.vgic.nr_irqs)
2162 return -EINVAL;
2163 return kvm_vgic_inject_irq(kvm, 0, spi, level);
2164
2165}
2166
2167/* MSI not implemented yet */
2168int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
2169 struct kvm *kvm, int irq_source_id,
2170 int level, bool line_status)
2171{
2172 return 0;
2173}
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
index 1e83bdf5f499..0df74cbb6200 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -20,6 +20,8 @@
20#ifndef __KVM_VGIC_H__ 20#ifndef __KVM_VGIC_H__
21#define __KVM_VGIC_H__ 21#define __KVM_VGIC_H__
22 22
23#include <kvm/iodev.h>
24
23#define VGIC_ADDR_UNDEF (-1) 25#define VGIC_ADDR_UNDEF (-1)
24#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) 26#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
25 27
@@ -57,6 +59,14 @@ void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
57bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); 59bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
58void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); 60void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
59 61
62struct kvm_exit_mmio {
63 phys_addr_t phys_addr;
64 void *data;
65 u32 len;
66 bool is_write;
67 void *private;
68};
69
60void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, 70void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
61 phys_addr_t offset, int mode); 71 phys_addr_t offset, int mode);
62bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 72bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
@@ -74,7 +84,7 @@ void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
74 *((u32 *)mmio->data) = cpu_to_le32(value) & mask; 84 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
75} 85}
76 86
77struct kvm_mmio_range { 87struct vgic_io_range {
78 phys_addr_t base; 88 phys_addr_t base;
79 unsigned long len; 89 unsigned long len;
80 int bits_per_irq; 90 int bits_per_irq;
@@ -82,6 +92,11 @@ struct kvm_mmio_range {
82 phys_addr_t offset); 92 phys_addr_t offset);
83}; 93};
84 94
95int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
96 const struct vgic_io_range *ranges,
97 int redist_id,
98 struct vgic_io_device *iodev);
99
85static inline bool is_in_range(phys_addr_t addr, unsigned long len, 100static inline bool is_in_range(phys_addr_t addr, unsigned long len,
86 phys_addr_t baseaddr, unsigned long size) 101 phys_addr_t baseaddr, unsigned long size)
87{ 102{
@@ -89,14 +104,8 @@ static inline bool is_in_range(phys_addr_t addr, unsigned long len,
89} 104}
90 105
91const 106const
92struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, 107struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
93 struct kvm_exit_mmio *mmio, 108 int len, gpa_t offset);
94 phys_addr_t offset);
95
96bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
97 struct kvm_exit_mmio *mmio,
98 const struct kvm_mmio_range *ranges,
99 unsigned long mmio_base);
100 109
101bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 110bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
102 phys_addr_t offset, int vcpu_id, int access); 111 phys_addr_t offset, int vcpu_id, int access);
@@ -107,12 +116,20 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
107bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 116bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
108 phys_addr_t offset, int vcpu_id); 117 phys_addr_t offset, int vcpu_id);
109 118
119bool vgic_handle_set_active_reg(struct kvm *kvm,
120 struct kvm_exit_mmio *mmio,
121 phys_addr_t offset, int vcpu_id);
122
123bool vgic_handle_clear_active_reg(struct kvm *kvm,
124 struct kvm_exit_mmio *mmio,
125 phys_addr_t offset, int vcpu_id);
126
110bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, 127bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
111 phys_addr_t offset); 128 phys_addr_t offset);
112 129
113void vgic_kick_vcpus(struct kvm *kvm); 130void vgic_kick_vcpus(struct kvm *kvm);
114 131
115int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset); 132int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset);
116int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); 133int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
117int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); 134int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
118 135
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 00d86427af0f..571c1ce37d15 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -8,7 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#include "iodev.h" 11#include <kvm/iodev.h>
12 12
13#include <linux/kvm_host.h> 13#include <linux/kvm_host.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
@@ -60,8 +60,9 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
60 return 1; 60 return 1;
61} 61}
62 62
63static int coalesced_mmio_write(struct kvm_io_device *this, 63static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
64 gpa_t addr, int len, const void *val) 64 struct kvm_io_device *this, gpa_t addr,
65 int len, const void *val)
65{ 66{
66 struct kvm_coalesced_mmio_dev *dev = to_mmio(this); 67 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
67 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; 68 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 148b2392c762..9ff4193dfa49 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -36,7 +36,7 @@
36#include <linux/seqlock.h> 36#include <linux/seqlock.h>
37#include <trace/events/kvm.h> 37#include <trace/events/kvm.h>
38 38
39#include "iodev.h" 39#include <kvm/iodev.h>
40 40
41#ifdef CONFIG_HAVE_KVM_IRQFD 41#ifdef CONFIG_HAVE_KVM_IRQFD
42/* 42/*
@@ -311,6 +311,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
311 unsigned int events; 311 unsigned int events;
312 int idx; 312 int idx;
313 313
314 if (!kvm_arch_intc_initialized(kvm))
315 return -EAGAIN;
316
314 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); 317 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
315 if (!irqfd) 318 if (!irqfd)
316 return -ENOMEM; 319 return -ENOMEM;
@@ -712,8 +715,8 @@ ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
712 715
713/* MMIO/PIO writes trigger an event if the addr/val match */ 716/* MMIO/PIO writes trigger an event if the addr/val match */
714static int 717static int
715ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len, 718ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
716 const void *val) 719 int len, const void *val)
717{ 720{
718 struct _ioeventfd *p = to_ioeventfd(this); 721 struct _ioeventfd *p = to_ioeventfd(this);
719 722
diff --git a/virt/kvm/iodev.h b/virt/kvm/iodev.h
deleted file mode 100644
index 12fd3caffd2b..000000000000
--- a/virt/kvm/iodev.h
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 */
15
16#ifndef __KVM_IODEV_H__
17#define __KVM_IODEV_H__
18
19#include <linux/kvm_types.h>
20#include <asm/errno.h>
21
22struct kvm_io_device;
23
24/**
25 * kvm_io_device_ops are called under kvm slots_lock.
26 * read and write handlers return 0 if the transaction has been handled,
27 * or non-zero to have it passed to the next device.
28 **/
29struct kvm_io_device_ops {
30 int (*read)(struct kvm_io_device *this,
31 gpa_t addr,
32 int len,
33 void *val);
34 int (*write)(struct kvm_io_device *this,
35 gpa_t addr,
36 int len,
37 const void *val);
38 void (*destructor)(struct kvm_io_device *this);
39};
40
41
42struct kvm_io_device {
43 const struct kvm_io_device_ops *ops;
44};
45
46static inline void kvm_iodevice_init(struct kvm_io_device *dev,
47 const struct kvm_io_device_ops *ops)
48{
49 dev->ops = ops;
50}
51
52static inline int kvm_iodevice_read(struct kvm_io_device *dev,
53 gpa_t addr, int l, void *v)
54{
55 return dev->ops->read ? dev->ops->read(dev, addr, l, v) : -EOPNOTSUPP;
56}
57
58static inline int kvm_iodevice_write(struct kvm_io_device *dev,
59 gpa_t addr, int l, const void *v)
60{
61 return dev->ops->write ? dev->ops->write(dev, addr, l, v) : -EOPNOTSUPP;
62}
63
64static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
65{
66 if (dev->ops->destructor)
67 dev->ops->destructor(dev);
68}
69
70#endif /* __KVM_IODEV_H__ */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ce7888a15128..efe59ae64dc3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -16,7 +16,7 @@
16 * 16 *
17 */ 17 */
18 18
19#include "iodev.h" 19#include <kvm/iodev.h>
20 20
21#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
22#include <linux/kvm.h> 22#include <linux/kvm.h>
@@ -2994,7 +2994,7 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
2994 return off; 2994 return off;
2995} 2995}
2996 2996
2997static int __kvm_io_bus_write(struct kvm_io_bus *bus, 2997static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
2998 struct kvm_io_range *range, const void *val) 2998 struct kvm_io_range *range, const void *val)
2999{ 2999{
3000 int idx; 3000 int idx;
@@ -3005,7 +3005,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,
3005 3005
3006 while (idx < bus->dev_count && 3006 while (idx < bus->dev_count &&
3007 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3007 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3008 if (!kvm_iodevice_write(bus->range[idx].dev, range->addr, 3008 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
3009 range->len, val)) 3009 range->len, val))
3010 return idx; 3010 return idx;
3011 idx++; 3011 idx++;
@@ -3015,7 +3015,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,
3015} 3015}
3016 3016
3017/* kvm_io_bus_write - called under kvm->slots_lock */ 3017/* kvm_io_bus_write - called under kvm->slots_lock */
3018int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3018int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3019 int len, const void *val) 3019 int len, const void *val)
3020{ 3020{
3021 struct kvm_io_bus *bus; 3021 struct kvm_io_bus *bus;
@@ -3027,14 +3027,14 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3027 .len = len, 3027 .len = len,
3028 }; 3028 };
3029 3029
3030 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3030 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3031 r = __kvm_io_bus_write(bus, &range, val); 3031 r = __kvm_io_bus_write(vcpu, bus, &range, val);
3032 return r < 0 ? r : 0; 3032 return r < 0 ? r : 0;
3033} 3033}
3034 3034
3035/* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3035/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
3036int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3036int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
3037 int len, const void *val, long cookie) 3037 gpa_t addr, int len, const void *val, long cookie)
3038{ 3038{
3039 struct kvm_io_bus *bus; 3039 struct kvm_io_bus *bus;
3040 struct kvm_io_range range; 3040 struct kvm_io_range range;
@@ -3044,12 +3044,12 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3044 .len = len, 3044 .len = len,
3045 }; 3045 };
3046 3046
3047 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3047 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3048 3048
3049 /* First try the device referenced by cookie. */ 3049 /* First try the device referenced by cookie. */
3050 if ((cookie >= 0) && (cookie < bus->dev_count) && 3050 if ((cookie >= 0) && (cookie < bus->dev_count) &&
3051 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3051 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
3052 if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len, 3052 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
3053 val)) 3053 val))
3054 return cookie; 3054 return cookie;
3055 3055
@@ -3057,11 +3057,11 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3057 * cookie contained garbage; fall back to search and return the 3057 * cookie contained garbage; fall back to search and return the
3058 * correct cookie value. 3058 * correct cookie value.
3059 */ 3059 */
3060 return __kvm_io_bus_write(bus, &range, val); 3060 return __kvm_io_bus_write(vcpu, bus, &range, val);
3061} 3061}
3062 3062
3063static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range, 3063static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
3064 void *val) 3064 struct kvm_io_range *range, void *val)
3065{ 3065{
3066 int idx; 3066 int idx;
3067 3067
@@ -3071,7 +3071,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
3071 3071
3072 while (idx < bus->dev_count && 3072 while (idx < bus->dev_count &&
3073 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3073 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
3074 if (!kvm_iodevice_read(bus->range[idx].dev, range->addr, 3074 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
3075 range->len, val)) 3075 range->len, val))
3076 return idx; 3076 return idx;
3077 idx++; 3077 idx++;
@@ -3082,7 +3082,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
3082EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3082EXPORT_SYMBOL_GPL(kvm_io_bus_write);
3083 3083
3084/* kvm_io_bus_read - called under kvm->slots_lock */ 3084/* kvm_io_bus_read - called under kvm->slots_lock */
3085int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3085int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
3086 int len, void *val) 3086 int len, void *val)
3087{ 3087{
3088 struct kvm_io_bus *bus; 3088 struct kvm_io_bus *bus;
@@ -3094,8 +3094,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
3094 .len = len, 3094 .len = len,
3095 }; 3095 };
3096 3096
3097 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 3097 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
3098 r = __kvm_io_bus_read(bus, &range, val); 3098 r = __kvm_io_bus_read(vcpu, bus, &range, val);
3099 return r < 0 ? r : 0; 3099 return r < 0 ? r : 0;
3100} 3100}
3101 3101