diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 34 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 584 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 2 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 1 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 81 | ||||
-rw-r--r-- | virt/kvm/vfio.c | 6 |
6 files changed, 617 insertions, 91 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index c2e1ef4604e8..5081e809821f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -182,6 +182,40 @@ static void kvm_timer_init_interrupt(void *info) | |||
182 | enable_percpu_irq(host_vtimer_irq, 0); | 182 | enable_percpu_irq(host_vtimer_irq, 0); |
183 | } | 183 | } |
184 | 184 | ||
185 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | ||
186 | { | ||
187 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
188 | |||
189 | switch (regid) { | ||
190 | case KVM_REG_ARM_TIMER_CTL: | ||
191 | timer->cntv_ctl = value; | ||
192 | break; | ||
193 | case KVM_REG_ARM_TIMER_CNT: | ||
194 | vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; | ||
195 | break; | ||
196 | case KVM_REG_ARM_TIMER_CVAL: | ||
197 | timer->cntv_cval = value; | ||
198 | break; | ||
199 | default: | ||
200 | return -1; | ||
201 | } | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | ||
206 | { | ||
207 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
208 | |||
209 | switch (regid) { | ||
210 | case KVM_REG_ARM_TIMER_CTL: | ||
211 | return timer->cntv_ctl; | ||
212 | case KVM_REG_ARM_TIMER_CNT: | ||
213 | return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | ||
214 | case KVM_REG_ARM_TIMER_CVAL: | ||
215 | return timer->cntv_cval; | ||
216 | } | ||
217 | return (u64)-1; | ||
218 | } | ||
185 | 219 | ||
186 | static int kvm_timer_cpu_notify(struct notifier_block *self, | 220 | static int kvm_timer_cpu_notify(struct notifier_block *self, |
187 | unsigned long action, void *cpu) | 221 | unsigned long action, void *cpu) |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 685fc72fc751..be456ce264d0 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -71,6 +71,10 @@ | |||
71 | #define VGIC_ADDR_UNDEF (-1) | 71 | #define VGIC_ADDR_UNDEF (-1) |
72 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | 72 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) |
73 | 73 | ||
74 | #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ | ||
75 | #define IMPLEMENTER_ARM 0x43b | ||
76 | #define GICC_ARCH_VERSION_V2 0x2 | ||
77 | |||
74 | /* Physical address of vgic virtual cpu interface */ | 78 | /* Physical address of vgic virtual cpu interface */ |
75 | static phys_addr_t vgic_vcpu_base; | 79 | static phys_addr_t vgic_vcpu_base; |
76 | 80 | ||
@@ -312,7 +316,7 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | |||
312 | u32 word_offset = offset & 3; | 316 | u32 word_offset = offset & 3; |
313 | 317 | ||
314 | switch (offset & ~3) { | 318 | switch (offset & ~3) { |
315 | case 0: /* CTLR */ | 319 | case 0: /* GICD_CTLR */ |
316 | reg = vcpu->kvm->arch.vgic.enabled; | 320 | reg = vcpu->kvm->arch.vgic.enabled; |
317 | vgic_reg_access(mmio, ®, word_offset, | 321 | vgic_reg_access(mmio, ®, word_offset, |
318 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | 322 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); |
@@ -323,15 +327,15 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | |||
323 | } | 327 | } |
324 | break; | 328 | break; |
325 | 329 | ||
326 | case 4: /* TYPER */ | 330 | case 4: /* GICD_TYPER */ |
327 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; | 331 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; |
328 | reg |= (VGIC_NR_IRQS >> 5) - 1; | 332 | reg |= (VGIC_NR_IRQS >> 5) - 1; |
329 | vgic_reg_access(mmio, ®, word_offset, | 333 | vgic_reg_access(mmio, ®, word_offset, |
330 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | 334 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); |
331 | break; | 335 | break; |
332 | 336 | ||
333 | case 8: /* IIDR */ | 337 | case 8: /* GICD_IIDR */ |
334 | reg = 0x4B00043B; | 338 | reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); |
335 | vgic_reg_access(mmio, ®, word_offset, | 339 | vgic_reg_access(mmio, ®, word_offset, |
336 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | 340 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); |
337 | break; | 341 | break; |
@@ -589,6 +593,156 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | |||
589 | return false; | 593 | return false; |
590 | } | 594 | } |
591 | 595 | ||
596 | #define LR_CPUID(lr) \ | ||
597 | (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) | ||
598 | #define LR_IRQID(lr) \ | ||
599 | ((lr) & GICH_LR_VIRTUALID) | ||
600 | |||
601 | static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu) | ||
602 | { | ||
603 | clear_bit(lr_nr, vgic_cpu->lr_used); | ||
604 | vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE; | ||
605 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | ||
606 | } | ||
607 | |||
608 | /** | ||
609 | * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor | ||
610 | * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs | ||
611 | * | ||
612 | * Move any pending IRQs that have already been assigned to LRs back to the | ||
613 | * emulated distributor state so that the complete emulated state can be read | ||
614 | * from the main emulation structures without investigating the LRs. | ||
615 | * | ||
616 | * Note that IRQs in the active state in the LRs get their pending state moved | ||
617 | * to the distributor but the active state stays in the LRs, because we don't | ||
618 | * track the active state on the distributor side. | ||
619 | */ | ||
620 | static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | ||
621 | { | ||
622 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
623 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
624 | int vcpu_id = vcpu->vcpu_id; | ||
625 | int i, irq, source_cpu; | ||
626 | u32 *lr; | ||
627 | |||
628 | for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | ||
629 | lr = &vgic_cpu->vgic_lr[i]; | ||
630 | irq = LR_IRQID(*lr); | ||
631 | source_cpu = LR_CPUID(*lr); | ||
632 | |||
633 | /* | ||
634 | * There are three options for the state bits: | ||
635 | * | ||
636 | * 01: pending | ||
637 | * 10: active | ||
638 | * 11: pending and active | ||
639 | * | ||
640 | * If the LR holds only an active interrupt (not pending) then | ||
641 | * just leave it alone. | ||
642 | */ | ||
643 | if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT) | ||
644 | continue; | ||
645 | |||
646 | /* | ||
647 | * Reestablish the pending state on the distributor and the | ||
648 | * CPU interface. It may have already been pending, but that | ||
649 | * is fine, then we are only setting a few bits that were | ||
650 | * already set. | ||
651 | */ | ||
652 | vgic_dist_irq_set(vcpu, irq); | ||
653 | if (irq < VGIC_NR_SGIS) | ||
654 | dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu; | ||
655 | *lr &= ~GICH_LR_PENDING_BIT; | ||
656 | |||
657 | /* | ||
658 | * If there's no state left on the LR (it could still be | ||
659 | * active), then the LR does not hold any useful info and can | ||
660 | * be marked as free for other use. | ||
661 | */ | ||
662 | if (!(*lr & GICH_LR_STATE)) | ||
663 | vgic_retire_lr(i, irq, vgic_cpu); | ||
664 | |||
665 | /* Finally update the VGIC state. */ | ||
666 | vgic_update_state(vcpu->kvm); | ||
667 | } | ||
668 | } | ||
669 | |||
670 | /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */ | ||
671 | static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | ||
672 | struct kvm_exit_mmio *mmio, | ||
673 | phys_addr_t offset) | ||
674 | { | ||
675 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
676 | int sgi; | ||
677 | int min_sgi = (offset & ~0x3) * 4; | ||
678 | int max_sgi = min_sgi + 3; | ||
679 | int vcpu_id = vcpu->vcpu_id; | ||
680 | u32 reg = 0; | ||
681 | |||
682 | /* Copy source SGIs from distributor side */ | ||
683 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | ||
684 | int shift = 8 * (sgi - min_sgi); | ||
685 | reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift; | ||
686 | } | ||
687 | |||
688 | mmio_data_write(mmio, ~0, reg); | ||
689 | return false; | ||
690 | } | ||
691 | |||
692 | static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | ||
693 | struct kvm_exit_mmio *mmio, | ||
694 | phys_addr_t offset, bool set) | ||
695 | { | ||
696 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
697 | int sgi; | ||
698 | int min_sgi = (offset & ~0x3) * 4; | ||
699 | int max_sgi = min_sgi + 3; | ||
700 | int vcpu_id = vcpu->vcpu_id; | ||
701 | u32 reg; | ||
702 | bool updated = false; | ||
703 | |||
704 | reg = mmio_data_read(mmio, ~0); | ||
705 | |||
706 | /* Clear pending SGIs on the distributor */ | ||
707 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | ||
708 | u8 mask = reg >> (8 * (sgi - min_sgi)); | ||
709 | if (set) { | ||
710 | if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask) | ||
711 | updated = true; | ||
712 | dist->irq_sgi_sources[vcpu_id][sgi] |= mask; | ||
713 | } else { | ||
714 | if (dist->irq_sgi_sources[vcpu_id][sgi] & mask) | ||
715 | updated = true; | ||
716 | dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask; | ||
717 | } | ||
718 | } | ||
719 | |||
720 | if (updated) | ||
721 | vgic_update_state(vcpu->kvm); | ||
722 | |||
723 | return updated; | ||
724 | } | ||
725 | |||
726 | static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu, | ||
727 | struct kvm_exit_mmio *mmio, | ||
728 | phys_addr_t offset) | ||
729 | { | ||
730 | if (!mmio->is_write) | ||
731 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | ||
732 | else | ||
733 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true); | ||
734 | } | ||
735 | |||
736 | static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | ||
737 | struct kvm_exit_mmio *mmio, | ||
738 | phys_addr_t offset) | ||
739 | { | ||
740 | if (!mmio->is_write) | ||
741 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | ||
742 | else | ||
743 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); | ||
744 | } | ||
745 | |||
592 | /* | 746 | /* |
593 | * I would have liked to use the kvm_bus_io_*() API instead, but it | 747 | * I would have liked to use the kvm_bus_io_*() API instead, but it |
594 | * cannot cope with banked registers (only the VM pointer is passed | 748 | * cannot cope with banked registers (only the VM pointer is passed |
@@ -602,7 +756,7 @@ struct mmio_range { | |||
602 | phys_addr_t offset); | 756 | phys_addr_t offset); |
603 | }; | 757 | }; |
604 | 758 | ||
605 | static const struct mmio_range vgic_ranges[] = { | 759 | static const struct mmio_range vgic_dist_ranges[] = { |
606 | { | 760 | { |
607 | .base = GIC_DIST_CTRL, | 761 | .base = GIC_DIST_CTRL, |
608 | .len = 12, | 762 | .len = 12, |
@@ -663,20 +817,29 @@ static const struct mmio_range vgic_ranges[] = { | |||
663 | .len = 4, | 817 | .len = 4, |
664 | .handle_mmio = handle_mmio_sgi_reg, | 818 | .handle_mmio = handle_mmio_sgi_reg, |
665 | }, | 819 | }, |
820 | { | ||
821 | .base = GIC_DIST_SGI_PENDING_CLEAR, | ||
822 | .len = VGIC_NR_SGIS, | ||
823 | .handle_mmio = handle_mmio_sgi_clear, | ||
824 | }, | ||
825 | { | ||
826 | .base = GIC_DIST_SGI_PENDING_SET, | ||
827 | .len = VGIC_NR_SGIS, | ||
828 | .handle_mmio = handle_mmio_sgi_set, | ||
829 | }, | ||
666 | {} | 830 | {} |
667 | }; | 831 | }; |
668 | 832 | ||
669 | static const | 833 | static const |
670 | struct mmio_range *find_matching_range(const struct mmio_range *ranges, | 834 | struct mmio_range *find_matching_range(const struct mmio_range *ranges, |
671 | struct kvm_exit_mmio *mmio, | 835 | struct kvm_exit_mmio *mmio, |
672 | phys_addr_t base) | 836 | phys_addr_t offset) |
673 | { | 837 | { |
674 | const struct mmio_range *r = ranges; | 838 | const struct mmio_range *r = ranges; |
675 | phys_addr_t addr = mmio->phys_addr - base; | ||
676 | 839 | ||
677 | while (r->len) { | 840 | while (r->len) { |
678 | if (addr >= r->base && | 841 | if (offset >= r->base && |
679 | (addr + mmio->len) <= (r->base + r->len)) | 842 | (offset + mmio->len) <= (r->base + r->len)) |
680 | return r; | 843 | return r; |
681 | r++; | 844 | r++; |
682 | } | 845 | } |
@@ -713,7 +876,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
713 | return true; | 876 | return true; |
714 | } | 877 | } |
715 | 878 | ||
716 | range = find_matching_range(vgic_ranges, mmio, base); | 879 | offset = mmio->phys_addr - base; |
880 | range = find_matching_range(vgic_dist_ranges, mmio, offset); | ||
717 | if (unlikely(!range || !range->handle_mmio)) { | 881 | if (unlikely(!range || !range->handle_mmio)) { |
718 | pr_warn("Unhandled access %d %08llx %d\n", | 882 | pr_warn("Unhandled access %d %08llx %d\n", |
719 | mmio->is_write, mmio->phys_addr, mmio->len); | 883 | mmio->is_write, mmio->phys_addr, mmio->len); |
@@ -824,8 +988,6 @@ static void vgic_update_state(struct kvm *kvm) | |||
824 | } | 988 | } |
825 | } | 989 | } |
826 | 990 | ||
827 | #define LR_CPUID(lr) \ | ||
828 | (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) | ||
829 | #define MK_LR_PEND(src, irq) \ | 991 | #define MK_LR_PEND(src, irq) \ |
830 | (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) | 992 | (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) |
831 | 993 | ||
@@ -847,9 +1009,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
847 | int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | 1009 | int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; |
848 | 1010 | ||
849 | if (!vgic_irq_is_enabled(vcpu, irq)) { | 1011 | if (!vgic_irq_is_enabled(vcpu, irq)) { |
850 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | 1012 | vgic_retire_lr(lr, irq, vgic_cpu); |
851 | clear_bit(lr, vgic_cpu->lr_used); | ||
852 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE; | ||
853 | if (vgic_irq_is_active(vcpu, irq)) | 1013 | if (vgic_irq_is_active(vcpu, irq)) |
854 | vgic_irq_clear_active(vcpu, irq); | 1014 | vgic_irq_clear_active(vcpu, irq); |
855 | } | 1015 | } |
@@ -1243,15 +1403,19 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) | |||
1243 | return IRQ_HANDLED; | 1403 | return IRQ_HANDLED; |
1244 | } | 1404 | } |
1245 | 1405 | ||
1406 | /** | ||
1407 | * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state | ||
1408 | * @vcpu: pointer to the vcpu struct | ||
1409 | * | ||
1410 | * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to | ||
1411 | * this vcpu and enable the VGIC for this VCPU | ||
1412 | */ | ||
1246 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | 1413 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) |
1247 | { | 1414 | { |
1248 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1415 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1249 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1416 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1250 | int i; | 1417 | int i; |
1251 | 1418 | ||
1252 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
1253 | return 0; | ||
1254 | |||
1255 | if (vcpu->vcpu_id >= VGIC_MAX_CPUS) | 1419 | if (vcpu->vcpu_id >= VGIC_MAX_CPUS) |
1256 | return -EBUSY; | 1420 | return -EBUSY; |
1257 | 1421 | ||
@@ -1383,10 +1547,22 @@ out: | |||
1383 | return ret; | 1547 | return ret; |
1384 | } | 1548 | } |
1385 | 1549 | ||
1550 | /** | ||
1551 | * kvm_vgic_init - Initialize global VGIC state before running any VCPUs | ||
1552 | * @kvm: pointer to the kvm struct | ||
1553 | * | ||
1554 | * Map the virtual CPU interface into the VM before running any VCPUs. We | ||
1555 | * can't do this at creation time, because user space must first set the | ||
1556 | * virtual CPU interface address in the guest physical address space. Also | ||
1557 | * initialize the ITARGETSRn regs to 0 on the emulated distributor. | ||
1558 | */ | ||
1386 | int kvm_vgic_init(struct kvm *kvm) | 1559 | int kvm_vgic_init(struct kvm *kvm) |
1387 | { | 1560 | { |
1388 | int ret = 0, i; | 1561 | int ret = 0, i; |
1389 | 1562 | ||
1563 | if (!irqchip_in_kernel(kvm)) | ||
1564 | return 0; | ||
1565 | |||
1390 | mutex_lock(&kvm->lock); | 1566 | mutex_lock(&kvm->lock); |
1391 | 1567 | ||
1392 | if (vgic_initialized(kvm)) | 1568 | if (vgic_initialized(kvm)) |
@@ -1409,7 +1585,6 @@ int kvm_vgic_init(struct kvm *kvm) | |||
1409 | for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) | 1585 | for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) |
1410 | vgic_set_target_reg(kvm, 0, i); | 1586 | vgic_set_target_reg(kvm, 0, i); |
1411 | 1587 | ||
1412 | kvm_timer_init(kvm); | ||
1413 | kvm->arch.vgic.ready = true; | 1588 | kvm->arch.vgic.ready = true; |
1414 | out: | 1589 | out: |
1415 | mutex_unlock(&kvm->lock); | 1590 | mutex_unlock(&kvm->lock); |
@@ -1418,20 +1593,45 @@ out: | |||
1418 | 1593 | ||
1419 | int kvm_vgic_create(struct kvm *kvm) | 1594 | int kvm_vgic_create(struct kvm *kvm) |
1420 | { | 1595 | { |
1421 | int ret = 0; | 1596 | int i, vcpu_lock_idx = -1, ret = 0; |
1597 | struct kvm_vcpu *vcpu; | ||
1422 | 1598 | ||
1423 | mutex_lock(&kvm->lock); | 1599 | mutex_lock(&kvm->lock); |
1424 | 1600 | ||
1425 | if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) { | 1601 | if (kvm->arch.vgic.vctrl_base) { |
1426 | ret = -EEXIST; | 1602 | ret = -EEXIST; |
1427 | goto out; | 1603 | goto out; |
1428 | } | 1604 | } |
1429 | 1605 | ||
1606 | /* | ||
1607 | * Any time a vcpu is run, vcpu_load is called which tries to grab the | ||
1608 | * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure | ||
1609 | * that no other VCPUs are run while we create the vgic. | ||
1610 | */ | ||
1611 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
1612 | if (!mutex_trylock(&vcpu->mutex)) | ||
1613 | goto out_unlock; | ||
1614 | vcpu_lock_idx = i; | ||
1615 | } | ||
1616 | |||
1617 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
1618 | if (vcpu->arch.has_run_once) { | ||
1619 | ret = -EBUSY; | ||
1620 | goto out_unlock; | ||
1621 | } | ||
1622 | } | ||
1623 | |||
1430 | spin_lock_init(&kvm->arch.vgic.lock); | 1624 | spin_lock_init(&kvm->arch.vgic.lock); |
1431 | kvm->arch.vgic.vctrl_base = vgic_vctrl_base; | 1625 | kvm->arch.vgic.vctrl_base = vgic_vctrl_base; |
1432 | kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; | 1626 | kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; |
1433 | kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; | 1627 | kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; |
1434 | 1628 | ||
1629 | out_unlock: | ||
1630 | for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { | ||
1631 | vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); | ||
1632 | mutex_unlock(&vcpu->mutex); | ||
1633 | } | ||
1634 | |||
1435 | out: | 1635 | out: |
1436 | mutex_unlock(&kvm->lock); | 1636 | mutex_unlock(&kvm->lock); |
1437 | return ret; | 1637 | return ret; |
@@ -1455,6 +1655,12 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | |||
1455 | { | 1655 | { |
1456 | int ret; | 1656 | int ret; |
1457 | 1657 | ||
1658 | if (addr & ~KVM_PHYS_MASK) | ||
1659 | return -E2BIG; | ||
1660 | |||
1661 | if (addr & (SZ_4K - 1)) | ||
1662 | return -EINVAL; | ||
1663 | |||
1458 | if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) | 1664 | if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) |
1459 | return -EEXIST; | 1665 | return -EEXIST; |
1460 | if (addr + size < addr) | 1666 | if (addr + size < addr) |
@@ -1467,26 +1673,41 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | |||
1467 | return ret; | 1673 | return ret; |
1468 | } | 1674 | } |
1469 | 1675 | ||
1470 | int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | 1676 | /** |
1677 | * kvm_vgic_addr - set or get vgic VM base addresses | ||
1678 | * @kvm: pointer to the vm struct | ||
1679 | * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX | ||
1680 | * @addr: pointer to address value | ||
1681 | * @write: if true set the address in the VM address space, if false read the | ||
1682 | * address | ||
1683 | * | ||
1684 | * Set or get the vgic base addresses for the distributor and the virtual CPU | ||
1685 | * interface in the VM physical address space. These addresses are properties | ||
1686 | * of the emulated core/SoC and therefore user space initially knows this | ||
1687 | * information. | ||
1688 | */ | ||
1689 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | ||
1471 | { | 1690 | { |
1472 | int r = 0; | 1691 | int r = 0; |
1473 | struct vgic_dist *vgic = &kvm->arch.vgic; | 1692 | struct vgic_dist *vgic = &kvm->arch.vgic; |
1474 | 1693 | ||
1475 | if (addr & ~KVM_PHYS_MASK) | ||
1476 | return -E2BIG; | ||
1477 | |||
1478 | if (addr & (SZ_4K - 1)) | ||
1479 | return -EINVAL; | ||
1480 | |||
1481 | mutex_lock(&kvm->lock); | 1694 | mutex_lock(&kvm->lock); |
1482 | switch (type) { | 1695 | switch (type) { |
1483 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | 1696 | case KVM_VGIC_V2_ADDR_TYPE_DIST: |
1484 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base, | 1697 | if (write) { |
1485 | addr, KVM_VGIC_V2_DIST_SIZE); | 1698 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base, |
1699 | *addr, KVM_VGIC_V2_DIST_SIZE); | ||
1700 | } else { | ||
1701 | *addr = vgic->vgic_dist_base; | ||
1702 | } | ||
1486 | break; | 1703 | break; |
1487 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | 1704 | case KVM_VGIC_V2_ADDR_TYPE_CPU: |
1488 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base, | 1705 | if (write) { |
1489 | addr, KVM_VGIC_V2_CPU_SIZE); | 1706 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base, |
1707 | *addr, KVM_VGIC_V2_CPU_SIZE); | ||
1708 | } else { | ||
1709 | *addr = vgic->vgic_cpu_base; | ||
1710 | } | ||
1490 | break; | 1711 | break; |
1491 | default: | 1712 | default: |
1492 | r = -ENODEV; | 1713 | r = -ENODEV; |
@@ -1495,3 +1716,302 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | |||
1495 | mutex_unlock(&kvm->lock); | 1716 | mutex_unlock(&kvm->lock); |
1496 | return r; | 1717 | return r; |
1497 | } | 1718 | } |
1719 | |||
1720 | static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, | ||
1721 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
1722 | { | ||
1723 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1724 | u32 reg, mask = 0, shift = 0; | ||
1725 | bool updated = false; | ||
1726 | |||
1727 | switch (offset & ~0x3) { | ||
1728 | case GIC_CPU_CTRL: | ||
1729 | mask = GICH_VMCR_CTRL_MASK; | ||
1730 | shift = GICH_VMCR_CTRL_SHIFT; | ||
1731 | break; | ||
1732 | case GIC_CPU_PRIMASK: | ||
1733 | mask = GICH_VMCR_PRIMASK_MASK; | ||
1734 | shift = GICH_VMCR_PRIMASK_SHIFT; | ||
1735 | break; | ||
1736 | case GIC_CPU_BINPOINT: | ||
1737 | mask = GICH_VMCR_BINPOINT_MASK; | ||
1738 | shift = GICH_VMCR_BINPOINT_SHIFT; | ||
1739 | break; | ||
1740 | case GIC_CPU_ALIAS_BINPOINT: | ||
1741 | mask = GICH_VMCR_ALIAS_BINPOINT_MASK; | ||
1742 | shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT; | ||
1743 | break; | ||
1744 | } | ||
1745 | |||
1746 | if (!mmio->is_write) { | ||
1747 | reg = (vgic_cpu->vgic_vmcr & mask) >> shift; | ||
1748 | mmio_data_write(mmio, ~0, reg); | ||
1749 | } else { | ||
1750 | reg = mmio_data_read(mmio, ~0); | ||
1751 | reg = (reg << shift) & mask; | ||
1752 | if (reg != (vgic_cpu->vgic_vmcr & mask)) | ||
1753 | updated = true; | ||
1754 | vgic_cpu->vgic_vmcr &= ~mask; | ||
1755 | vgic_cpu->vgic_vmcr |= reg; | ||
1756 | } | ||
1757 | return updated; | ||
1758 | } | ||
1759 | |||
1760 | static bool handle_mmio_abpr(struct kvm_vcpu *vcpu, | ||
1761 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
1762 | { | ||
1763 | return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT); | ||
1764 | } | ||
1765 | |||
1766 | static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, | ||
1767 | struct kvm_exit_mmio *mmio, | ||
1768 | phys_addr_t offset) | ||
1769 | { | ||
1770 | u32 reg; | ||
1771 | |||
1772 | if (mmio->is_write) | ||
1773 | return false; | ||
1774 | |||
1775 | /* GICC_IIDR */ | ||
1776 | reg = (PRODUCT_ID_KVM << 20) | | ||
1777 | (GICC_ARCH_VERSION_V2 << 16) | | ||
1778 | (IMPLEMENTER_ARM << 0); | ||
1779 | mmio_data_write(mmio, ~0, reg); | ||
1780 | return false; | ||
1781 | } | ||
1782 | |||
1783 | /* | ||
1784 | * CPU Interface Register accesses - these are not accessed by the VM, but by | ||
1785 | * user space for saving and restoring VGIC state. | ||
1786 | */ | ||
1787 | static const struct mmio_range vgic_cpu_ranges[] = { | ||
1788 | { | ||
1789 | .base = GIC_CPU_CTRL, | ||
1790 | .len = 12, | ||
1791 | .handle_mmio = handle_cpu_mmio_misc, | ||
1792 | }, | ||
1793 | { | ||
1794 | .base = GIC_CPU_ALIAS_BINPOINT, | ||
1795 | .len = 4, | ||
1796 | .handle_mmio = handle_mmio_abpr, | ||
1797 | }, | ||
1798 | { | ||
1799 | .base = GIC_CPU_ACTIVEPRIO, | ||
1800 | .len = 16, | ||
1801 | .handle_mmio = handle_mmio_raz_wi, | ||
1802 | }, | ||
1803 | { | ||
1804 | .base = GIC_CPU_IDENT, | ||
1805 | .len = 4, | ||
1806 | .handle_mmio = handle_cpu_mmio_ident, | ||
1807 | }, | ||
1808 | }; | ||
1809 | |||
1810 | static int vgic_attr_regs_access(struct kvm_device *dev, | ||
1811 | struct kvm_device_attr *attr, | ||
1812 | u32 *reg, bool is_write) | ||
1813 | { | ||
1814 | const struct mmio_range *r = NULL, *ranges; | ||
1815 | phys_addr_t offset; | ||
1816 | int ret, cpuid, c; | ||
1817 | struct kvm_vcpu *vcpu, *tmp_vcpu; | ||
1818 | struct vgic_dist *vgic; | ||
1819 | struct kvm_exit_mmio mmio; | ||
1820 | |||
1821 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
1822 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> | ||
1823 | KVM_DEV_ARM_VGIC_CPUID_SHIFT; | ||
1824 | |||
1825 | mutex_lock(&dev->kvm->lock); | ||
1826 | |||
1827 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { | ||
1828 | ret = -EINVAL; | ||
1829 | goto out; | ||
1830 | } | ||
1831 | |||
1832 | vcpu = kvm_get_vcpu(dev->kvm, cpuid); | ||
1833 | vgic = &dev->kvm->arch.vgic; | ||
1834 | |||
1835 | mmio.len = 4; | ||
1836 | mmio.is_write = is_write; | ||
1837 | if (is_write) | ||
1838 | mmio_data_write(&mmio, ~0, *reg); | ||
1839 | switch (attr->group) { | ||
1840 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
1841 | mmio.phys_addr = vgic->vgic_dist_base + offset; | ||
1842 | ranges = vgic_dist_ranges; | ||
1843 | break; | ||
1844 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | ||
1845 | mmio.phys_addr = vgic->vgic_cpu_base + offset; | ||
1846 | ranges = vgic_cpu_ranges; | ||
1847 | break; | ||
1848 | default: | ||
1849 | BUG(); | ||
1850 | } | ||
1851 | r = find_matching_range(ranges, &mmio, offset); | ||
1852 | |||
1853 | if (unlikely(!r || !r->handle_mmio)) { | ||
1854 | ret = -ENXIO; | ||
1855 | goto out; | ||
1856 | } | ||
1857 | |||
1858 | |||
1859 | spin_lock(&vgic->lock); | ||
1860 | |||
1861 | /* | ||
1862 | * Ensure that no other VCPU is running by checking the vcpu->cpu | ||
1863 | * field. If no other VPCUs are running we can safely access the VGIC | ||
1864 | * state, because even if another VPU is run after this point, that | ||
1865 | * VCPU will not touch the vgic state, because it will block on | ||
1866 | * getting the vgic->lock in kvm_vgic_sync_hwstate(). | ||
1867 | */ | ||
1868 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) { | ||
1869 | if (unlikely(tmp_vcpu->cpu != -1)) { | ||
1870 | ret = -EBUSY; | ||
1871 | goto out_vgic_unlock; | ||
1872 | } | ||
1873 | } | ||
1874 | |||
1875 | /* | ||
1876 | * Move all pending IRQs from the LRs on all VCPUs so the pending | ||
1877 | * state can be properly represented in the register state accessible | ||
1878 | * through this API. | ||
1879 | */ | ||
1880 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) | ||
1881 | vgic_unqueue_irqs(tmp_vcpu); | ||
1882 | |||
1883 | offset -= r->base; | ||
1884 | r->handle_mmio(vcpu, &mmio, offset); | ||
1885 | |||
1886 | if (!is_write) | ||
1887 | *reg = mmio_data_read(&mmio, ~0); | ||
1888 | |||
1889 | ret = 0; | ||
1890 | out_vgic_unlock: | ||
1891 | spin_unlock(&vgic->lock); | ||
1892 | out: | ||
1893 | mutex_unlock(&dev->kvm->lock); | ||
1894 | return ret; | ||
1895 | } | ||
1896 | |||
1897 | static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
1898 | { | ||
1899 | int r; | ||
1900 | |||
1901 | switch (attr->group) { | ||
1902 | case KVM_DEV_ARM_VGIC_GRP_ADDR: { | ||
1903 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | ||
1904 | u64 addr; | ||
1905 | unsigned long type = (unsigned long)attr->attr; | ||
1906 | |||
1907 | if (copy_from_user(&addr, uaddr, sizeof(addr))) | ||
1908 | return -EFAULT; | ||
1909 | |||
1910 | r = kvm_vgic_addr(dev->kvm, type, &addr, true); | ||
1911 | return (r == -ENODEV) ? -ENXIO : r; | ||
1912 | } | ||
1913 | |||
1914 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
1915 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | ||
1916 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
1917 | u32 reg; | ||
1918 | |||
1919 | if (get_user(reg, uaddr)) | ||
1920 | return -EFAULT; | ||
1921 | |||
1922 | return vgic_attr_regs_access(dev, attr, ®, true); | ||
1923 | } | ||
1924 | |||
1925 | } | ||
1926 | |||
1927 | return -ENXIO; | ||
1928 | } | ||
1929 | |||
1930 | static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
1931 | { | ||
1932 | int r = -ENXIO; | ||
1933 | |||
1934 | switch (attr->group) { | ||
1935 | case KVM_DEV_ARM_VGIC_GRP_ADDR: { | ||
1936 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | ||
1937 | u64 addr; | ||
1938 | unsigned long type = (unsigned long)attr->attr; | ||
1939 | |||
1940 | r = kvm_vgic_addr(dev->kvm, type, &addr, false); | ||
1941 | if (r) | ||
1942 | return (r == -ENODEV) ? -ENXIO : r; | ||
1943 | |||
1944 | if (copy_to_user(uaddr, &addr, sizeof(addr))) | ||
1945 | return -EFAULT; | ||
1946 | break; | ||
1947 | } | ||
1948 | |||
1949 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
1950 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | ||
1951 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
1952 | u32 reg = 0; | ||
1953 | |||
1954 | r = vgic_attr_regs_access(dev, attr, ®, false); | ||
1955 | if (r) | ||
1956 | return r; | ||
1957 | r = put_user(reg, uaddr); | ||
1958 | break; | ||
1959 | } | ||
1960 | |||
1961 | } | ||
1962 | |||
1963 | return r; | ||
1964 | } | ||
1965 | |||
1966 | static int vgic_has_attr_regs(const struct mmio_range *ranges, | ||
1967 | phys_addr_t offset) | ||
1968 | { | ||
1969 | struct kvm_exit_mmio dev_attr_mmio; | ||
1970 | |||
1971 | dev_attr_mmio.len = 4; | ||
1972 | if (find_matching_range(ranges, &dev_attr_mmio, offset)) | ||
1973 | return 0; | ||
1974 | else | ||
1975 | return -ENXIO; | ||
1976 | } | ||
1977 | |||
1978 | static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
1979 | { | ||
1980 | phys_addr_t offset; | ||
1981 | |||
1982 | switch (attr->group) { | ||
1983 | case KVM_DEV_ARM_VGIC_GRP_ADDR: | ||
1984 | switch (attr->attr) { | ||
1985 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | ||
1986 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | ||
1987 | return 0; | ||
1988 | } | ||
1989 | break; | ||
1990 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
1991 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
1992 | return vgic_has_attr_regs(vgic_dist_ranges, offset); | ||
1993 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | ||
1994 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
1995 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); | ||
1996 | } | ||
1997 | return -ENXIO; | ||
1998 | } | ||
1999 | |||
2000 | static void vgic_destroy(struct kvm_device *dev) | ||
2001 | { | ||
2002 | kfree(dev); | ||
2003 | } | ||
2004 | |||
2005 | static int vgic_create(struct kvm_device *dev, u32 type) | ||
2006 | { | ||
2007 | return kvm_vgic_create(dev->kvm); | ||
2008 | } | ||
2009 | |||
2010 | struct kvm_device_ops kvm_arm_vgic_v2_ops = { | ||
2011 | .name = "kvm-arm-vgic", | ||
2012 | .create = vgic_create, | ||
2013 | .destroy = vgic_destroy, | ||
2014 | .set_attr = vgic_set_attr, | ||
2015 | .get_attr = vgic_get_attr, | ||
2016 | .has_attr = vgic_has_attr, | ||
2017 | }; | ||
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 2d682977ce82..ce9ed99ad7dc 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -520,7 +520,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
522 | 522 | ||
523 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | 523 | static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) |
524 | { | 524 | { |
525 | int i; | 525 | int i; |
526 | 526 | ||
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 615d8c995c3c..90d43e95dcf8 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -91,7 +91,6 @@ void kvm_ioapic_destroy(struct kvm *kvm); | |||
91 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | 91 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, |
92 | int level, bool line_status); | 92 | int level, bool line_status); |
93 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); | 93 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); |
94 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | ||
95 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 94 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
96 | struct kvm_lapic_irq *irq, unsigned long *dest_map); | 95 | struct kvm_lapic_irq *irq, unsigned long *dest_map); |
97 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | 96 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4f588bc94186..03a0381b1cb7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -95,6 +95,12 @@ static int hardware_enable_all(void); | |||
95 | static void hardware_disable_all(void); | 95 | static void hardware_disable_all(void); |
96 | 96 | ||
97 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus); | 97 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
98 | static void update_memslots(struct kvm_memslots *slots, | ||
99 | struct kvm_memory_slot *new, u64 last_generation); | ||
100 | |||
101 | static void kvm_release_pfn_dirty(pfn_t pfn); | ||
102 | static void mark_page_dirty_in_slot(struct kvm *kvm, | ||
103 | struct kvm_memory_slot *memslot, gfn_t gfn); | ||
98 | 104 | ||
99 | bool kvm_rebooting; | 105 | bool kvm_rebooting; |
100 | EXPORT_SYMBOL_GPL(kvm_rebooting); | 106 | EXPORT_SYMBOL_GPL(kvm_rebooting); |
@@ -553,7 +559,7 @@ static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free, | |||
553 | free->npages = 0; | 559 | free->npages = 0; |
554 | } | 560 | } |
555 | 561 | ||
556 | void kvm_free_physmem(struct kvm *kvm) | 562 | static void kvm_free_physmem(struct kvm *kvm) |
557 | { | 563 | { |
558 | struct kvm_memslots *slots = kvm->memslots; | 564 | struct kvm_memslots *slots = kvm->memslots; |
559 | struct kvm_memory_slot *memslot; | 565 | struct kvm_memory_slot *memslot; |
@@ -675,8 +681,9 @@ static void sort_memslots(struct kvm_memslots *slots) | |||
675 | slots->id_to_index[slots->memslots[i].id] = i; | 681 | slots->id_to_index[slots->memslots[i].id] = i; |
676 | } | 682 | } |
677 | 683 | ||
678 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, | 684 | static void update_memslots(struct kvm_memslots *slots, |
679 | u64 last_generation) | 685 | struct kvm_memory_slot *new, |
686 | u64 last_generation) | ||
680 | { | 687 | { |
681 | if (new) { | 688 | if (new) { |
682 | int id = new->id; | 689 | int id = new->id; |
@@ -924,8 +931,8 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
924 | } | 931 | } |
925 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 932 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
926 | 933 | ||
927 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 934 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
928 | struct kvm_userspace_memory_region *mem) | 935 | struct kvm_userspace_memory_region *mem) |
929 | { | 936 | { |
930 | if (mem->slot >= KVM_USER_MEM_SLOTS) | 937 | if (mem->slot >= KVM_USER_MEM_SLOTS) |
931 | return -EINVAL; | 938 | return -EINVAL; |
@@ -1047,7 +1054,7 @@ static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, | |||
1047 | } | 1054 | } |
1048 | 1055 | ||
1049 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, | 1056 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, |
1050 | gfn_t gfn) | 1057 | gfn_t gfn) |
1051 | { | 1058 | { |
1052 | return gfn_to_hva_many(slot, gfn, NULL); | 1059 | return gfn_to_hva_many(slot, gfn, NULL); |
1053 | } | 1060 | } |
@@ -1387,18 +1394,11 @@ void kvm_release_page_dirty(struct page *page) | |||
1387 | } | 1394 | } |
1388 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); | 1395 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); |
1389 | 1396 | ||
1390 | void kvm_release_pfn_dirty(pfn_t pfn) | 1397 | static void kvm_release_pfn_dirty(pfn_t pfn) |
1391 | { | 1398 | { |
1392 | kvm_set_pfn_dirty(pfn); | 1399 | kvm_set_pfn_dirty(pfn); |
1393 | kvm_release_pfn_clean(pfn); | 1400 | kvm_release_pfn_clean(pfn); |
1394 | } | 1401 | } |
1395 | EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); | ||
1396 | |||
1397 | void kvm_set_page_dirty(struct page *page) | ||
1398 | { | ||
1399 | kvm_set_pfn_dirty(page_to_pfn(page)); | ||
1400 | } | ||
1401 | EXPORT_SYMBOL_GPL(kvm_set_page_dirty); | ||
1402 | 1402 | ||
1403 | void kvm_set_pfn_dirty(pfn_t pfn) | 1403 | void kvm_set_pfn_dirty(pfn_t pfn) |
1404 | { | 1404 | { |
@@ -1640,8 +1640,9 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) | |||
1640 | } | 1640 | } |
1641 | EXPORT_SYMBOL_GPL(kvm_clear_guest); | 1641 | EXPORT_SYMBOL_GPL(kvm_clear_guest); |
1642 | 1642 | ||
1643 | void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, | 1643 | static void mark_page_dirty_in_slot(struct kvm *kvm, |
1644 | gfn_t gfn) | 1644 | struct kvm_memory_slot *memslot, |
1645 | gfn_t gfn) | ||
1645 | { | 1646 | { |
1646 | if (memslot && memslot->dirty_bitmap) { | 1647 | if (memslot && memslot->dirty_bitmap) { |
1647 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1648 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
@@ -1710,14 +1711,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |||
1710 | EXPORT_SYMBOL_GPL(kvm_vcpu_kick); | 1711 | EXPORT_SYMBOL_GPL(kvm_vcpu_kick); |
1711 | #endif /* !CONFIG_S390 */ | 1712 | #endif /* !CONFIG_S390 */ |
1712 | 1713 | ||
1713 | void kvm_resched(struct kvm_vcpu *vcpu) | ||
1714 | { | ||
1715 | if (!need_resched()) | ||
1716 | return; | ||
1717 | cond_resched(); | ||
1718 | } | ||
1719 | EXPORT_SYMBOL_GPL(kvm_resched); | ||
1720 | |||
1721 | bool kvm_vcpu_yield_to(struct kvm_vcpu *target) | 1714 | bool kvm_vcpu_yield_to(struct kvm_vcpu *target) |
1722 | { | 1715 | { |
1723 | struct pid *pid; | 1716 | struct pid *pid; |
@@ -1742,7 +1735,6 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target) | |||
1742 | } | 1735 | } |
1743 | EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); | 1736 | EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); |
1744 | 1737 | ||
1745 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | ||
1746 | /* | 1738 | /* |
1747 | * Helper that checks whether a VCPU is eligible for directed yield. | 1739 | * Helper that checks whether a VCPU is eligible for directed yield. |
1748 | * Most eligible candidate to yield is decided by following heuristics: | 1740 | * Most eligible candidate to yield is decided by following heuristics: |
@@ -1765,8 +1757,9 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); | |||
1765 | * locking does not harm. It may result in trying to yield to same VCPU, fail | 1757 | * locking does not harm. It may result in trying to yield to same VCPU, fail |
1766 | * and continue with next VCPU and so on. | 1758 | * and continue with next VCPU and so on. |
1767 | */ | 1759 | */ |
1768 | bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) | 1760 | static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) |
1769 | { | 1761 | { |
1762 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | ||
1770 | bool eligible; | 1763 | bool eligible; |
1771 | 1764 | ||
1772 | eligible = !vcpu->spin_loop.in_spin_loop || | 1765 | eligible = !vcpu->spin_loop.in_spin_loop || |
@@ -1777,8 +1770,10 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) | |||
1777 | kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); | 1770 | kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); |
1778 | 1771 | ||
1779 | return eligible; | 1772 | return eligible; |
1780 | } | 1773 | #else |
1774 | return true; | ||
1781 | #endif | 1775 | #endif |
1776 | } | ||
1782 | 1777 | ||
1783 | void kvm_vcpu_on_spin(struct kvm_vcpu *me) | 1778 | void kvm_vcpu_on_spin(struct kvm_vcpu *me) |
1784 | { | 1779 | { |
@@ -2284,6 +2279,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, | |||
2284 | ops = &kvm_vfio_ops; | 2279 | ops = &kvm_vfio_ops; |
2285 | break; | 2280 | break; |
2286 | #endif | 2281 | #endif |
2282 | #ifdef CONFIG_KVM_ARM_VGIC | ||
2283 | case KVM_DEV_TYPE_ARM_VGIC_V2: | ||
2284 | ops = &kvm_arm_vgic_v2_ops; | ||
2285 | break; | ||
2286 | #endif | ||
2287 | default: | 2287 | default: |
2288 | return -ENODEV; | 2288 | return -ENODEV; |
2289 | } | 2289 | } |
@@ -2939,33 +2939,6 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
2939 | return r < 0 ? r : 0; | 2939 | return r < 0 ? r : 0; |
2940 | } | 2940 | } |
2941 | 2941 | ||
2942 | /* kvm_io_bus_read_cookie - called under kvm->slots_lock */ | ||
2943 | int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | ||
2944 | int len, void *val, long cookie) | ||
2945 | { | ||
2946 | struct kvm_io_bus *bus; | ||
2947 | struct kvm_io_range range; | ||
2948 | |||
2949 | range = (struct kvm_io_range) { | ||
2950 | .addr = addr, | ||
2951 | .len = len, | ||
2952 | }; | ||
2953 | |||
2954 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); | ||
2955 | |||
2956 | /* First try the device referenced by cookie. */ | ||
2957 | if ((cookie >= 0) && (cookie < bus->dev_count) && | ||
2958 | (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) | ||
2959 | if (!kvm_iodevice_read(bus->range[cookie].dev, addr, len, | ||
2960 | val)) | ||
2961 | return cookie; | ||
2962 | |||
2963 | /* | ||
2964 | * cookie contained garbage; fall back to search and return the | ||
2965 | * correct cookie value. | ||
2966 | */ | ||
2967 | return __kvm_io_bus_read(bus, &range, val); | ||
2968 | } | ||
2969 | 2942 | ||
2970 | /* Caller must hold slots_lock. */ | 2943 | /* Caller must hold slots_lock. */ |
2971 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | 2944 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index ca4260e35037..b4f9507ae650 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c | |||
@@ -101,14 +101,14 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) | |||
101 | struct kvm_vfio *kv = dev->private; | 101 | struct kvm_vfio *kv = dev->private; |
102 | struct vfio_group *vfio_group; | 102 | struct vfio_group *vfio_group; |
103 | struct kvm_vfio_group *kvg; | 103 | struct kvm_vfio_group *kvg; |
104 | void __user *argp = (void __user *)arg; | 104 | int32_t __user *argp = (int32_t __user *)(unsigned long)arg; |
105 | struct fd f; | 105 | struct fd f; |
106 | int32_t fd; | 106 | int32_t fd; |
107 | int ret; | 107 | int ret; |
108 | 108 | ||
109 | switch (attr) { | 109 | switch (attr) { |
110 | case KVM_DEV_VFIO_GROUP_ADD: | 110 | case KVM_DEV_VFIO_GROUP_ADD: |
111 | if (get_user(fd, (int32_t __user *)argp)) | 111 | if (get_user(fd, argp)) |
112 | return -EFAULT; | 112 | return -EFAULT; |
113 | 113 | ||
114 | f = fdget(fd); | 114 | f = fdget(fd); |
@@ -148,7 +148,7 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) | |||
148 | return 0; | 148 | return 0; |
149 | 149 | ||
150 | case KVM_DEV_VFIO_GROUP_DEL: | 150 | case KVM_DEV_VFIO_GROUP_DEL: |
151 | if (get_user(fd, (int32_t __user *)argp)) | 151 | if (get_user(fd, argp)) |
152 | return -EFAULT; | 152 | return -EFAULT; |
153 | 153 | ||
154 | f = fdget(fd); | 154 | f = fdget(fd); |