aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@arm.com>2014-06-11 08:11:49 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2015-01-20 12:25:29 -0500
commitd97f683d0f4b2e63e68869f81ba2ce4ccbb6e5d8 (patch)
tree47ab5c6644d35a6a5feca2dad59b75be173d796a /virt/kvm
parent2f5fa41a7a7f47f3109a6596b0ec96258dbf06e6 (diff)
arm/arm64: KVM: refactor MMIO accessors
The MMIO accessors for GICD_I[CS]ENABLER, GICD_I[CS]PENDR and GICD_ICFGR behave very similar for GICv2 and GICv3, although the way the affected VCPU is determined differs. Since we need them to access the registers from three different places in the future, we factor out a generic, backend-facing implementation and use small wrappers in the current GICv2 emulation. This will ease adding GICv3 accessors later. Signed-off-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic.c126
1 files changed, 74 insertions, 52 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 2126bf5b0035..7589e2c82db2 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -492,64 +492,66 @@ static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
492 return false; 492 return false;
493} 493}
494 494
495static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, 495static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
496 struct kvm_exit_mmio *mmio, 496 phys_addr_t offset, int vcpu_id, int access)
497 phys_addr_t offset)
498{ 497{
499 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, 498 u32 *reg;
500 vcpu->vcpu_id, offset); 499 int mode = ACCESS_READ_VALUE | access;
501 vgic_reg_access(mmio, reg, offset, 500 struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
502 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); 501
502 reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
503 vgic_reg_access(mmio, reg, offset, mode);
503 if (mmio->is_write) { 504 if (mmio->is_write) {
504 vgic_update_state(vcpu->kvm); 505 if (access & ACCESS_WRITE_CLEARBIT) {
506 if (offset < 4) /* Force SGI enabled */
507 *reg |= 0xffff;
508 vgic_retire_disabled_irqs(target_vcpu);
509 }
510 vgic_update_state(kvm);
505 return true; 511 return true;
506 } 512 }
507 513
508 return false; 514 return false;
509} 515}
510 516
517static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
518 struct kvm_exit_mmio *mmio,
519 phys_addr_t offset)
520{
521 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
522 vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
523}
524
511static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, 525static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
512 struct kvm_exit_mmio *mmio, 526 struct kvm_exit_mmio *mmio,
513 phys_addr_t offset) 527 phys_addr_t offset)
514{ 528{
515 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, 529 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
516 vcpu->vcpu_id, offset); 530 vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
517 vgic_reg_access(mmio, reg, offset,
518 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
519 if (mmio->is_write) {
520 if (offset < 4) /* Force SGI enabled */
521 *reg |= 0xffff;
522 vgic_retire_disabled_irqs(vcpu);
523 vgic_update_state(vcpu->kvm);
524 return true;
525 }
526
527 return false;
528} 531}
529 532
530static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, 533static bool vgic_handle_set_pending_reg(struct kvm *kvm,
531 struct kvm_exit_mmio *mmio, 534 struct kvm_exit_mmio *mmio,
532 phys_addr_t offset) 535 phys_addr_t offset, int vcpu_id)
533{ 536{
534 u32 *reg, orig; 537 u32 *reg, orig;
535 u32 level_mask; 538 u32 level_mask;
536 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 539 int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
540 struct vgic_dist *dist = &kvm->arch.vgic;
537 541
538 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset); 542 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
539 level_mask = (~(*reg)); 543 level_mask = (~(*reg));
540 544
541 /* Mark both level and edge triggered irqs as pending */ 545 /* Mark both level and edge triggered irqs as pending */
542 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset); 546 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
543 orig = *reg; 547 orig = *reg;
544 vgic_reg_access(mmio, reg, offset, 548 vgic_reg_access(mmio, reg, offset, mode);
545 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
546 549
547 if (mmio->is_write) { 550 if (mmio->is_write) {
548 /* Set the soft-pending flag only for level-triggered irqs */ 551 /* Set the soft-pending flag only for level-triggered irqs */
549 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, 552 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
550 vcpu->vcpu_id, offset); 553 vcpu_id, offset);
551 vgic_reg_access(mmio, reg, offset, 554 vgic_reg_access(mmio, reg, offset, mode);
552 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
553 *reg &= level_mask; 555 *reg &= level_mask;
554 556
555 /* Ignore writes to SGIs */ 557 /* Ignore writes to SGIs */
@@ -558,31 +560,30 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
558 *reg |= orig & 0xffff; 560 *reg |= orig & 0xffff;
559 } 561 }
560 562
561 vgic_update_state(vcpu->kvm); 563 vgic_update_state(kvm);
562 return true; 564 return true;
563 } 565 }
564 566
565 return false; 567 return false;
566} 568}
567 569
568static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, 570static bool vgic_handle_clear_pending_reg(struct kvm *kvm,
569 struct kvm_exit_mmio *mmio, 571 struct kvm_exit_mmio *mmio,
570 phys_addr_t offset) 572 phys_addr_t offset, int vcpu_id)
571{ 573{
572 u32 *level_active; 574 u32 *level_active;
573 u32 *reg, orig; 575 u32 *reg, orig;
574 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 576 int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
577 struct vgic_dist *dist = &kvm->arch.vgic;
575 578
576 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset); 579 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
577 orig = *reg; 580 orig = *reg;
578 vgic_reg_access(mmio, reg, offset, 581 vgic_reg_access(mmio, reg, offset, mode);
579 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
580 if (mmio->is_write) { 582 if (mmio->is_write) {
581 /* Re-set level triggered level-active interrupts */ 583 /* Re-set level triggered level-active interrupts */
582 level_active = vgic_bitmap_get_reg(&dist->irq_level, 584 level_active = vgic_bitmap_get_reg(&dist->irq_level,
583 vcpu->vcpu_id, offset); 585 vcpu_id, offset);
584 reg = vgic_bitmap_get_reg(&dist->irq_pending, 586 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
585 vcpu->vcpu_id, offset);
586 *reg |= *level_active; 587 *reg |= *level_active;
587 588
588 /* Ignore writes to SGIs */ 589 /* Ignore writes to SGIs */
@@ -593,17 +594,31 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
593 594
594 /* Clear soft-pending flags */ 595 /* Clear soft-pending flags */
595 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, 596 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
596 vcpu->vcpu_id, offset); 597 vcpu_id, offset);
597 vgic_reg_access(mmio, reg, offset, 598 vgic_reg_access(mmio, reg, offset, mode);
598 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
599 599
600 vgic_update_state(vcpu->kvm); 600 vgic_update_state(kvm);
601 return true; 601 return true;
602 } 602 }
603
604 return false; 603 return false;
605} 604}
606 605
606static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
607 struct kvm_exit_mmio *mmio,
608 phys_addr_t offset)
609{
610 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
611 vcpu->vcpu_id);
612}
613
614static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
615 struct kvm_exit_mmio *mmio,
616 phys_addr_t offset)
617{
618 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
619 vcpu->vcpu_id);
620}
621
607static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, 622static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
608 struct kvm_exit_mmio *mmio, 623 struct kvm_exit_mmio *mmio,
609 phys_addr_t offset) 624 phys_addr_t offset)
@@ -726,14 +741,10 @@ static u16 vgic_cfg_compress(u32 val)
726 * LSB is always 0. As such, we only keep the upper bit, and use the 741 * LSB is always 0. As such, we only keep the upper bit, and use the
727 * two above functions to compress/expand the bits 742 * two above functions to compress/expand the bits
728 */ 743 */
729static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, 744static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
730 struct kvm_exit_mmio *mmio, phys_addr_t offset) 745 phys_addr_t offset)
731{ 746{
732 u32 val; 747 u32 val;
733 u32 *reg;
734
735 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
736 vcpu->vcpu_id, offset >> 1);
737 748
738 if (offset & 4) 749 if (offset & 4)
739 val = *reg >> 16; 750 val = *reg >> 16;
@@ -762,6 +773,17 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
762 return false; 773 return false;
763} 774}
764 775
776static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
777 struct kvm_exit_mmio *mmio, phys_addr_t offset)
778{
779 u32 *reg;
780
781 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
782 vcpu->vcpu_id, offset >> 1);
783
784 return vgic_handle_cfg_reg(reg, mmio, offset);
785}
786
765static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, 787static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
766 struct kvm_exit_mmio *mmio, phys_addr_t offset) 788 struct kvm_exit_mmio *mmio, phys_addr_t offset)
767{ 789{