aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2013-10-25 16:17:31 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2013-12-21 13:01:39 -0500
commitc07a0191ef2de1f9510f12d1f88e3b0b5cd8d66f (patch)
tree0f598e9d286a8d48d0c4aeeb43163cb7fff9825f /virt
parente9b152cb957cb194437f37e79f0f3c9d34fe53d6 (diff)
KVM: arm-vgic: Add vgic reg access from dev attr
Add infrastructure to handle distributor and cpu interface register accesses through the KVM_{GET/SET}_DEVICE_ATTR interface by adding the KVM_DEV_ARM_VGIC_GRP_DIST_REGS and KVM_DEV_ARM_VGIC_GRP_CPU_REGS groups and defining the semantics of the attr field to be the MMIO offset as specified in the GICv2 specs. Missing register accesses or other changes in individual register access functions to support save/restore of the VGIC state is added in subsequent patches. Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic.c178
1 files changed, 178 insertions, 0 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index e2596f618281..88599b585362 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -589,6 +589,20 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
589 return false; 589 return false;
590} 590}
591 591
592static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
593 struct kvm_exit_mmio *mmio,
594 phys_addr_t offset)
595{
596 return false;
597}
598
599static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
600 struct kvm_exit_mmio *mmio,
601 phys_addr_t offset)
602{
603 return false;
604}
605
592/* 606/*
593 * I would have liked to use the kvm_bus_io_*() API instead, but it 607 * I would have liked to use the kvm_bus_io_*() API instead, but it
594 * cannot cope with banked registers (only the VM pointer is passed 608 * cannot cope with banked registers (only the VM pointer is passed
@@ -663,6 +677,16 @@ static const struct mmio_range vgic_dist_ranges[] = {
663 .len = 4, 677 .len = 4,
664 .handle_mmio = handle_mmio_sgi_reg, 678 .handle_mmio = handle_mmio_sgi_reg,
665 }, 679 },
680 {
681 .base = GIC_DIST_SGI_PENDING_CLEAR,
682 .len = VGIC_NR_SGIS,
683 .handle_mmio = handle_mmio_sgi_clear,
684 },
685 {
686 .base = GIC_DIST_SGI_PENDING_SET,
687 .len = VGIC_NR_SGIS,
688 .handle_mmio = handle_mmio_sgi_set,
689 },
666 {} 690 {}
667}; 691};
668 692
@@ -1557,6 +1581,114 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
1557 return r; 1581 return r;
1558} 1582}
1559 1583
1584static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
1585 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1586{
1587 return true;
1588}
1589
1590static const struct mmio_range vgic_cpu_ranges[] = {
1591 {
1592 .base = GIC_CPU_CTRL,
1593 .len = 12,
1594 .handle_mmio = handle_cpu_mmio_misc,
1595 },
1596 {
1597 .base = GIC_CPU_ALIAS_BINPOINT,
1598 .len = 4,
1599 .handle_mmio = handle_cpu_mmio_misc,
1600 },
1601 {
1602 .base = GIC_CPU_ACTIVEPRIO,
1603 .len = 16,
1604 .handle_mmio = handle_cpu_mmio_misc,
1605 },
1606 {
1607 .base = GIC_CPU_IDENT,
1608 .len = 4,
1609 .handle_mmio = handle_cpu_mmio_misc,
1610 },
1611};
1612
1613static int vgic_attr_regs_access(struct kvm_device *dev,
1614 struct kvm_device_attr *attr,
1615 u32 *reg, bool is_write)
1616{
1617 const struct mmio_range *r = NULL, *ranges;
1618 phys_addr_t offset;
1619 int ret, cpuid, c;
1620 struct kvm_vcpu *vcpu, *tmp_vcpu;
1621 struct vgic_dist *vgic;
1622 struct kvm_exit_mmio mmio;
1623
1624 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
1625 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
1626 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
1627
1628 mutex_lock(&dev->kvm->lock);
1629
1630 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
1631 ret = -EINVAL;
1632 goto out;
1633 }
1634
1635 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
1636 vgic = &dev->kvm->arch.vgic;
1637
1638 mmio.len = 4;
1639 mmio.is_write = is_write;
1640 if (is_write)
1641 mmio_data_write(&mmio, ~0, *reg);
1642 switch (attr->group) {
1643 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1644 mmio.phys_addr = vgic->vgic_dist_base + offset;
1645 ranges = vgic_dist_ranges;
1646 break;
1647 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1648 mmio.phys_addr = vgic->vgic_cpu_base + offset;
1649 ranges = vgic_cpu_ranges;
1650 break;
1651 default:
1652 BUG();
1653 }
1654 r = find_matching_range(ranges, &mmio, offset);
1655
1656 if (unlikely(!r || !r->handle_mmio)) {
1657 ret = -ENXIO;
1658 goto out;
1659 }
1660
1661
1662 spin_lock(&vgic->lock);
1663
1664 /*
1665 * Ensure that no other VCPU is running by checking the vcpu->cpu
1666 * field. If no other VPCUs are running we can safely access the VGIC
1667 * state, because even if another VPU is run after this point, that
1668 * VCPU will not touch the vgic state, because it will block on
1669 * getting the vgic->lock in kvm_vgic_sync_hwstate().
1670 */
1671 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
1672 if (unlikely(tmp_vcpu->cpu != -1)) {
1673 ret = -EBUSY;
1674 goto out_vgic_unlock;
1675 }
1676 }
1677
1678 offset -= r->base;
1679 r->handle_mmio(vcpu, &mmio, offset);
1680
1681 if (!is_write)
1682 *reg = mmio_data_read(&mmio, ~0);
1683
1684 ret = 0;
1685out_vgic_unlock:
1686 spin_unlock(&vgic->lock);
1687out:
1688 mutex_unlock(&dev->kvm->lock);
1689 return ret;
1690}
1691
1560static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1692static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1561{ 1693{
1562 int r; 1694 int r;
@@ -1573,6 +1705,18 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1573 r = kvm_vgic_addr(dev->kvm, type, &addr, true); 1705 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
1574 return (r == -ENODEV) ? -ENXIO : r; 1706 return (r == -ENODEV) ? -ENXIO : r;
1575 } 1707 }
1708
1709 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1710 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
1711 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
1712 u32 reg;
1713
1714 if (get_user(reg, uaddr))
1715 return -EFAULT;
1716
1717 return vgic_attr_regs_access(dev, attr, &reg, true);
1718 }
1719
1576 } 1720 }
1577 1721
1578 return -ENXIO; 1722 return -ENXIO;
@@ -1594,14 +1738,42 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1594 1738
1595 if (copy_to_user(uaddr, &addr, sizeof(addr))) 1739 if (copy_to_user(uaddr, &addr, sizeof(addr)))
1596 return -EFAULT; 1740 return -EFAULT;
1741 break;
1742 }
1743
1744 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1745 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
1746 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
1747 u32 reg = 0;
1748
1749 r = vgic_attr_regs_access(dev, attr, &reg, false);
1750 if (r)
1751 return r;
1752 r = put_user(reg, uaddr);
1753 break;
1597 } 1754 }
1755
1598 } 1756 }
1599 1757
1600 return r; 1758 return r;
1601} 1759}
1602 1760
1761static int vgic_has_attr_regs(const struct mmio_range *ranges,
1762 phys_addr_t offset)
1763{
1764 struct kvm_exit_mmio dev_attr_mmio;
1765
1766 dev_attr_mmio.len = 4;
1767 if (find_matching_range(ranges, &dev_attr_mmio, offset))
1768 return 0;
1769 else
1770 return -ENXIO;
1771}
1772
1603static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1773static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1604{ 1774{
1775 phys_addr_t offset;
1776
1605 switch (attr->group) { 1777 switch (attr->group) {
1606 case KVM_DEV_ARM_VGIC_GRP_ADDR: 1778 case KVM_DEV_ARM_VGIC_GRP_ADDR:
1607 switch (attr->attr) { 1779 switch (attr->attr) {
@@ -1610,6 +1782,12 @@ static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1610 return 0; 1782 return 0;
1611 } 1783 }
1612 break; 1784 break;
1785 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1786 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
1787 return vgic_has_attr_regs(vgic_dist_ranges, offset);
1788 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1789 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
1790 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
1613 } 1791 }
1614 return -ENXIO; 1792 return -ENXIO;
1615} 1793}