aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@arm.com>2015-03-26 10:39:36 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2015-03-30 12:07:08 -0400
commit0ba10d53920d030cd7772a9553b13b5ea1aa4115 (patch)
tree9269c2fe2a7e0bb926ba6a68d0aae0a09157516d
parenta9cf86f62b785202684c3ba92895946f03d910c8 (diff)
KVM: arm/arm64: merge GICv3 RD_base and SGI_base register frames
Currently we handle the redistributor registers in two separate MMIO regions, one for the overall behaviour and SPIs and one for the SGIs/PPIs. That latter forces the creation of _two_ KVM I/O bus devices for each redistributor. Since the spec mandates those two pages to be contigious, we could as well merge them and save the churn with the second KVM I/O bus device. Signed-off-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c174
1 files changed, 83 insertions, 91 deletions
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index 14943e3f7248..2f03a36a9312 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -502,6 +502,43 @@ static const struct vgic_io_range vgic_v3_dist_ranges[] = {
502 {}, 502 {},
503}; 503};
504 504
505static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio,
507 phys_addr_t offset)
508{
509 /* since we don't support LPIs, this register is zero for now */
510 vgic_reg_access(mmio, NULL, offset,
511 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
512 return false;
513}
514
515static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
516 struct kvm_exit_mmio *mmio,
517 phys_addr_t offset)
518{
519 u32 reg;
520 u64 mpidr;
521 struct kvm_vcpu *redist_vcpu = mmio->private;
522 int target_vcpu_id = redist_vcpu->vcpu_id;
523
524 /* the upper 32 bits contain the affinity value */
525 if ((offset & ~3) == 4) {
526 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
527 reg = compress_mpidr(mpidr);
528
529 vgic_reg_access(mmio, &reg, offset,
530 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
531 return false;
532 }
533
534 reg = redist_vcpu->vcpu_id << 8;
535 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
536 reg |= GICR_TYPER_LAST;
537 vgic_reg_access(mmio, &reg, offset,
538 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
539 return false;
540}
541
505static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu, 542static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio, 543 struct kvm_exit_mmio *mmio,
507 phys_addr_t offset) 544 phys_addr_t offset)
@@ -570,146 +607,107 @@ static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
570 return vgic_handle_cfg_reg(reg, mmio, offset); 607 return vgic_handle_cfg_reg(reg, mmio, offset);
571} 608}
572 609
573static const struct vgic_io_range vgic_redist_sgi_ranges[] = { 610#define SGI_base(x) ((x) + SZ_64K)
611
612static const struct vgic_io_range vgic_redist_ranges[] = {
613 {
614 .base = GICR_CTLR,
615 .len = 0x04,
616 .bits_per_irq = 0,
617 .handle_mmio = handle_mmio_ctlr_redist,
618 },
574 { 619 {
575 .base = GICR_IGROUPR0, 620 .base = GICR_TYPER,
621 .len = 0x08,
622 .bits_per_irq = 0,
623 .handle_mmio = handle_mmio_typer_redist,
624 },
625 {
626 .base = GICR_IIDR,
627 .len = 0x04,
628 .bits_per_irq = 0,
629 .handle_mmio = handle_mmio_iidr,
630 },
631 {
632 .base = GICR_WAKER,
633 .len = 0x04,
634 .bits_per_irq = 0,
635 .handle_mmio = handle_mmio_raz_wi,
636 },
637 {
638 .base = GICR_IDREGS,
639 .len = 0x30,
640 .bits_per_irq = 0,
641 .handle_mmio = handle_mmio_idregs,
642 },
643 {
644 .base = SGI_base(GICR_IGROUPR0),
576 .len = 0x04, 645 .len = 0x04,
577 .bits_per_irq = 1, 646 .bits_per_irq = 1,
578 .handle_mmio = handle_mmio_rao_wi, 647 .handle_mmio = handle_mmio_rao_wi,
579 }, 648 },
580 { 649 {
581 .base = GICR_ISENABLER0, 650 .base = SGI_base(GICR_ISENABLER0),
582 .len = 0x04, 651 .len = 0x04,
583 .bits_per_irq = 1, 652 .bits_per_irq = 1,
584 .handle_mmio = handle_mmio_set_enable_reg_redist, 653 .handle_mmio = handle_mmio_set_enable_reg_redist,
585 }, 654 },
586 { 655 {
587 .base = GICR_ICENABLER0, 656 .base = SGI_base(GICR_ICENABLER0),
588 .len = 0x04, 657 .len = 0x04,
589 .bits_per_irq = 1, 658 .bits_per_irq = 1,
590 .handle_mmio = handle_mmio_clear_enable_reg_redist, 659 .handle_mmio = handle_mmio_clear_enable_reg_redist,
591 }, 660 },
592 { 661 {
593 .base = GICR_ISPENDR0, 662 .base = SGI_base(GICR_ISPENDR0),
594 .len = 0x04, 663 .len = 0x04,
595 .bits_per_irq = 1, 664 .bits_per_irq = 1,
596 .handle_mmio = handle_mmio_set_pending_reg_redist, 665 .handle_mmio = handle_mmio_set_pending_reg_redist,
597 }, 666 },
598 { 667 {
599 .base = GICR_ICPENDR0, 668 .base = SGI_base(GICR_ICPENDR0),
600 .len = 0x04, 669 .len = 0x04,
601 .bits_per_irq = 1, 670 .bits_per_irq = 1,
602 .handle_mmio = handle_mmio_clear_pending_reg_redist, 671 .handle_mmio = handle_mmio_clear_pending_reg_redist,
603 }, 672 },
604 { 673 {
605 .base = GICR_ISACTIVER0, 674 .base = SGI_base(GICR_ISACTIVER0),
606 .len = 0x04, 675 .len = 0x04,
607 .bits_per_irq = 1, 676 .bits_per_irq = 1,
608 .handle_mmio = handle_mmio_raz_wi, 677 .handle_mmio = handle_mmio_raz_wi,
609 }, 678 },
610 { 679 {
611 .base = GICR_ICACTIVER0, 680 .base = SGI_base(GICR_ICACTIVER0),
612 .len = 0x04, 681 .len = 0x04,
613 .bits_per_irq = 1, 682 .bits_per_irq = 1,
614 .handle_mmio = handle_mmio_raz_wi, 683 .handle_mmio = handle_mmio_raz_wi,
615 }, 684 },
616 { 685 {
617 .base = GICR_IPRIORITYR0, 686 .base = SGI_base(GICR_IPRIORITYR0),
618 .len = 0x20, 687 .len = 0x20,
619 .bits_per_irq = 8, 688 .bits_per_irq = 8,
620 .handle_mmio = handle_mmio_priority_reg_redist, 689 .handle_mmio = handle_mmio_priority_reg_redist,
621 }, 690 },
622 { 691 {
623 .base = GICR_ICFGR0, 692 .base = SGI_base(GICR_ICFGR0),
624 .len = 0x08, 693 .len = 0x08,
625 .bits_per_irq = 2, 694 .bits_per_irq = 2,
626 .handle_mmio = handle_mmio_cfg_reg_redist, 695 .handle_mmio = handle_mmio_cfg_reg_redist,
627 }, 696 },
628 { 697 {
629 .base = GICR_IGRPMODR0, 698 .base = SGI_base(GICR_IGRPMODR0),
630 .len = 0x04, 699 .len = 0x04,
631 .bits_per_irq = 1, 700 .bits_per_irq = 1,
632 .handle_mmio = handle_mmio_raz_wi, 701 .handle_mmio = handle_mmio_raz_wi,
633 }, 702 },
634 { 703 {
635 .base = GICR_NSACR, 704 .base = SGI_base(GICR_NSACR),
636 .len = 0x04, 705 .len = 0x04,
637 .handle_mmio = handle_mmio_raz_wi, 706 .handle_mmio = handle_mmio_raz_wi,
638 }, 707 },
639 {}, 708 {},
640}; 709};
641 710
642static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
643 struct kvm_exit_mmio *mmio,
644 phys_addr_t offset)
645{
646 /* since we don't support LPIs, this register is zero for now */
647 vgic_reg_access(mmio, NULL, offset,
648 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
649 return false;
650}
651
652static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
653 struct kvm_exit_mmio *mmio,
654 phys_addr_t offset)
655{
656 u32 reg;
657 u64 mpidr;
658 struct kvm_vcpu *redist_vcpu = mmio->private;
659 int target_vcpu_id = redist_vcpu->vcpu_id;
660
661 /* the upper 32 bits contain the affinity value */
662 if ((offset & ~3) == 4) {
663 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
664 reg = compress_mpidr(mpidr);
665
666 vgic_reg_access(mmio, &reg, offset,
667 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
668 return false;
669 }
670
671 reg = redist_vcpu->vcpu_id << 8;
672 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
673 reg |= GICR_TYPER_LAST;
674 vgic_reg_access(mmio, &reg, offset,
675 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
676 return false;
677}
678
679static const struct vgic_io_range vgic_redist_ranges[] = {
680 {
681 .base = GICR_CTLR,
682 .len = 0x04,
683 .bits_per_irq = 0,
684 .handle_mmio = handle_mmio_ctlr_redist,
685 },
686 {
687 .base = GICR_TYPER,
688 .len = 0x08,
689 .bits_per_irq = 0,
690 .handle_mmio = handle_mmio_typer_redist,
691 },
692 {
693 .base = GICR_IIDR,
694 .len = 0x04,
695 .bits_per_irq = 0,
696 .handle_mmio = handle_mmio_iidr,
697 },
698 {
699 .base = GICR_WAKER,
700 .len = 0x04,
701 .bits_per_irq = 0,
702 .handle_mmio = handle_mmio_raz_wi,
703 },
704 {
705 .base = GICR_IDREGS,
706 .len = 0x30,
707 .bits_per_irq = 0,
708 .handle_mmio = handle_mmio_idregs,
709 },
710 {},
711};
712
713/* 711/*
714 * This function splits accesses between the distributor and the two 712 * This function splits accesses between the distributor and the two
715 * redistributor parts (private/SPI). As each redistributor is accessible 713 * redistributor parts (private/SPI). As each redistributor is accessible
@@ -726,7 +724,6 @@ static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
726 unsigned long rdbase = dist->vgic_redist_base; 724 unsigned long rdbase = dist->vgic_redist_base;
727 int nrcpus = atomic_read(&vcpu->kvm->online_vcpus); 725 int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
728 int vcpu_id; 726 int vcpu_id;
729 const struct vgic_io_range *mmio_range;
730 727
731 if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) { 728 if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
732 return vgic_handle_mmio_range(vcpu, run, mmio, 729 return vgic_handle_mmio_range(vcpu, run, mmio,
@@ -741,13 +738,8 @@ static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
741 rdbase += (vcpu_id * GIC_V3_REDIST_SIZE); 738 rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
742 mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id); 739 mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
743 740
744 if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) { 741 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_redist_ranges,
745 rdbase += SGI_BASE_OFFSET; 742 rdbase);
746 mmio_range = vgic_redist_sgi_ranges;
747 } else {
748 mmio_range = vgic_redist_ranges;
749 }
750 return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
751} 743}
752 744
753static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) 745static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)