diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2014-02-04 12:48:10 -0500 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2014-07-11 07:57:33 -0400 |
commit | beee38b9d0c0ea6cf2a7f35c3108f7d8281d4545 (patch) | |
tree | 49da6608d9b99da17c297eecbf36ecc43ce525c9 /virt/kvm | |
parent | 909d9b5025f149af6cfc304a76ad6218e6622cc0 (diff) |
KVM: ARM: vgic: abstract VMCR access
Instead of directly messing with with the GICH_VMCR bits for the CPU
interface save/restore code, add accessors that encode/decode the
entire set of registers exposed by VMCR.
Not the most efficient thing, but given that this code is only used
by the save/restore code, performance is far from being critical.
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/vgic.c | 69 |
1 files changed, 53 insertions, 16 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 6d618e0b08a1..5c706393956d 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -100,8 +100,10 @@ static void vgic_kick_vcpus(struct kvm *kvm); | |||
100 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); | 100 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); |
101 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); | 101 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); |
102 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); | 102 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); |
103 | static u32 vgic_nr_lr; | 103 | static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
104 | static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | ||
104 | 105 | ||
106 | static u32 vgic_nr_lr; | ||
105 | static unsigned int vgic_maint_irq; | 107 | static unsigned int vgic_maint_irq; |
106 | 108 | ||
107 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, | 109 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, |
@@ -1073,6 +1075,28 @@ static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu) | |||
1073 | vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; | 1075 | vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; |
1074 | } | 1076 | } |
1075 | 1077 | ||
1078 | static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | ||
1079 | { | ||
1080 | u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr; | ||
1081 | |||
1082 | vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT; | ||
1083 | vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT; | ||
1084 | vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; | ||
1085 | vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT; | ||
1086 | } | ||
1087 | |||
1088 | static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | ||
1089 | { | ||
1090 | u32 vmcr; | ||
1091 | |||
1092 | vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; | ||
1093 | vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK; | ||
1094 | vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; | ||
1095 | vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; | ||
1096 | |||
1097 | vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; | ||
1098 | } | ||
1099 | |||
1076 | static const struct vgic_ops vgic_ops = { | 1100 | static const struct vgic_ops vgic_ops = { |
1077 | .get_lr = vgic_v2_get_lr, | 1101 | .get_lr = vgic_v2_get_lr, |
1078 | .set_lr = vgic_v2_set_lr, | 1102 | .set_lr = vgic_v2_set_lr, |
@@ -1082,6 +1106,8 @@ static const struct vgic_ops vgic_ops = { | |||
1082 | .get_interrupt_status = vgic_v2_get_interrupt_status, | 1106 | .get_interrupt_status = vgic_v2_get_interrupt_status, |
1083 | .enable_underflow = vgic_v2_enable_underflow, | 1107 | .enable_underflow = vgic_v2_enable_underflow, |
1084 | .disable_underflow = vgic_v2_disable_underflow, | 1108 | .disable_underflow = vgic_v2_disable_underflow, |
1109 | .get_vmcr = vgic_v2_get_vmcr, | ||
1110 | .set_vmcr = vgic_v2_set_vmcr, | ||
1085 | }; | 1111 | }; |
1086 | 1112 | ||
1087 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr) | 1113 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr) |
@@ -1126,6 +1152,16 @@ static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu) | |||
1126 | vgic_ops.disable_underflow(vcpu); | 1152 | vgic_ops.disable_underflow(vcpu); |
1127 | } | 1153 | } |
1128 | 1154 | ||
1155 | static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | ||
1156 | { | ||
1157 | vgic_ops.get_vmcr(vcpu, vmcr); | ||
1158 | } | ||
1159 | |||
1160 | static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | ||
1161 | { | ||
1162 | vgic_ops.set_vmcr(vcpu, vmcr); | ||
1163 | } | ||
1164 | |||
1129 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) | 1165 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) |
1130 | { | 1166 | { |
1131 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1167 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
@@ -1879,39 +1915,40 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | |||
1879 | static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, | 1915 | static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, |
1880 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | 1916 | struct kvm_exit_mmio *mmio, phys_addr_t offset) |
1881 | { | 1917 | { |
1882 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1883 | u32 reg, mask = 0, shift = 0; | ||
1884 | bool updated = false; | 1918 | bool updated = false; |
1919 | struct vgic_vmcr vmcr; | ||
1920 | u32 *vmcr_field; | ||
1921 | u32 reg; | ||
1922 | |||
1923 | vgic_get_vmcr(vcpu, &vmcr); | ||
1885 | 1924 | ||
1886 | switch (offset & ~0x3) { | 1925 | switch (offset & ~0x3) { |
1887 | case GIC_CPU_CTRL: | 1926 | case GIC_CPU_CTRL: |
1888 | mask = GICH_VMCR_CTRL_MASK; | 1927 | vmcr_field = &vmcr.ctlr; |
1889 | shift = GICH_VMCR_CTRL_SHIFT; | ||
1890 | break; | 1928 | break; |
1891 | case GIC_CPU_PRIMASK: | 1929 | case GIC_CPU_PRIMASK: |
1892 | mask = GICH_VMCR_PRIMASK_MASK; | 1930 | vmcr_field = &vmcr.pmr; |
1893 | shift = GICH_VMCR_PRIMASK_SHIFT; | ||
1894 | break; | 1931 | break; |
1895 | case GIC_CPU_BINPOINT: | 1932 | case GIC_CPU_BINPOINT: |
1896 | mask = GICH_VMCR_BINPOINT_MASK; | 1933 | vmcr_field = &vmcr.bpr; |
1897 | shift = GICH_VMCR_BINPOINT_SHIFT; | ||
1898 | break; | 1934 | break; |
1899 | case GIC_CPU_ALIAS_BINPOINT: | 1935 | case GIC_CPU_ALIAS_BINPOINT: |
1900 | mask = GICH_VMCR_ALIAS_BINPOINT_MASK; | 1936 | vmcr_field = &vmcr.abpr; |
1901 | shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT; | ||
1902 | break; | 1937 | break; |
1938 | default: | ||
1939 | BUG(); | ||
1903 | } | 1940 | } |
1904 | 1941 | ||
1905 | if (!mmio->is_write) { | 1942 | if (!mmio->is_write) { |
1906 | reg = (vgic_cpu->vgic_v2.vgic_vmcr & mask) >> shift; | 1943 | reg = *vmcr_field; |
1907 | mmio_data_write(mmio, ~0, reg); | 1944 | mmio_data_write(mmio, ~0, reg); |
1908 | } else { | 1945 | } else { |
1909 | reg = mmio_data_read(mmio, ~0); | 1946 | reg = mmio_data_read(mmio, ~0); |
1910 | reg = (reg << shift) & mask; | 1947 | if (reg != *vmcr_field) { |
1911 | if (reg != (vgic_cpu->vgic_v2.vgic_vmcr & mask)) | 1948 | *vmcr_field = reg; |
1949 | vgic_set_vmcr(vcpu, &vmcr); | ||
1912 | updated = true; | 1950 | updated = true; |
1913 | vgic_cpu->vgic_v2.vgic_vmcr &= ~mask; | 1951 | } |
1914 | vgic_cpu->vgic_v2.vgic_vmcr |= reg; | ||
1915 | } | 1952 | } |
1916 | return updated; | 1953 | return updated; |
1917 | } | 1954 | } |