aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:47:45 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-08-05 03:47:45 -0400
commit5d5768660539b6d0da0d46113ffb0676540579a6 (patch)
treec0cd7a918fc7371c5f5b1b9b04c6358966850277 /virt
parent5167d09ffad5b16b574d35ce3047ed34caf1e837 (diff)
parentdedf97e8ff2c7513b1370e36b56e08b6bd0f0290 (diff)
Merge tag 'kvm-arm-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm
KVM/ARM New features for 3.17 include: - Fixes and code refactoring for stage2 kvm MMU unmap_range - Support unmapping IPAs on deleting memslots for arm and arm64 - Support MMIO mappings in stage2 faults - KVM VGIC v2 emulation on GICv3 hardware - Big-Endian support for arm/arm64 (guest and host) - Debug Architecture support for arm64 (arm32 is on Christoffer's todo list) Conflicts: virt/kvm/arm/vgic.c [last minute cherry-pick from 3.17 to 3.16]
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic-v2.c265
-rw-r--r--virt/kvm/arm/vgic-v3.c247
-rw-r--r--virt/kvm/arm/vgic.c389
3 files changed, 725 insertions, 176 deletions
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
new file mode 100644
index 000000000000..01124ef3690a
--- /dev/null
+++ b/virt/kvm/arm/vgic-v2.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26
27#include <linux/irqchip/arm-gic.h>
28
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_mmu.h>
32
33static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
34{
35 struct vgic_lr lr_desc;
36 u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
37
38 lr_desc.irq = val & GICH_LR_VIRTUALID;
39 if (lr_desc.irq <= 15)
40 lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
41 else
42 lr_desc.source = 0;
43 lr_desc.state = 0;
44
45 if (val & GICH_LR_PENDING_BIT)
46 lr_desc.state |= LR_STATE_PENDING;
47 if (val & GICH_LR_ACTIVE_BIT)
48 lr_desc.state |= LR_STATE_ACTIVE;
49 if (val & GICH_LR_EOI)
50 lr_desc.state |= LR_EOI_INT;
51
52 return lr_desc;
53}
54
55static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
56 struct vgic_lr lr_desc)
57{
58 u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq;
59
60 if (lr_desc.state & LR_STATE_PENDING)
61 lr_val |= GICH_LR_PENDING_BIT;
62 if (lr_desc.state & LR_STATE_ACTIVE)
63 lr_val |= GICH_LR_ACTIVE_BIT;
64 if (lr_desc.state & LR_EOI_INT)
65 lr_val |= GICH_LR_EOI;
66
67 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
68}
69
70static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
71 struct vgic_lr lr_desc)
72{
73 if (!(lr_desc.state & LR_STATE_MASK))
74 set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
75}
76
77static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
78{
79 u64 val;
80
81#if BITS_PER_LONG == 64
82 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
83 val <<= 32;
84 val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
85#else
86 val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
87#endif
88 return val;
89}
90
91static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
92{
93 u64 val;
94
95#if BITS_PER_LONG == 64
96 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
97 val <<= 32;
98 val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
99#else
100 val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
101#endif
102 return val;
103}
104
105static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
106{
107 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
108 u32 ret = 0;
109
110 if (misr & GICH_MISR_EOI)
111 ret |= INT_STATUS_EOI;
112 if (misr & GICH_MISR_U)
113 ret |= INT_STATUS_UNDERFLOW;
114
115 return ret;
116}
117
118static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu)
119{
120 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
121}
122
123static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu)
124{
125 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
126}
127
128static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
129{
130 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
131
132 vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
133 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
134 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
135 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
136}
137
138static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
139{
140 u32 vmcr;
141
142 vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
143 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
144 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
145 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
146
147 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
148}
149
150static void vgic_v2_enable(struct kvm_vcpu *vcpu)
151{
152 /*
153 * By forcing VMCR to zero, the GIC will restore the binary
154 * points to their reset values. Anything else resets to zero
155 * anyway.
156 */
157 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
158
159 /* Get the show on the road... */
160 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
161}
162
163static const struct vgic_ops vgic_v2_ops = {
164 .get_lr = vgic_v2_get_lr,
165 .set_lr = vgic_v2_set_lr,
166 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
167 .get_elrsr = vgic_v2_get_elrsr,
168 .get_eisr = vgic_v2_get_eisr,
169 .get_interrupt_status = vgic_v2_get_interrupt_status,
170 .enable_underflow = vgic_v2_enable_underflow,
171 .disable_underflow = vgic_v2_disable_underflow,
172 .get_vmcr = vgic_v2_get_vmcr,
173 .set_vmcr = vgic_v2_set_vmcr,
174 .enable = vgic_v2_enable,
175};
176
177static struct vgic_params vgic_v2_params;
178
179/**
180 * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
181 * @node: pointer to the DT node
182 * @ops: address of a pointer to the GICv2 operations
183 * @params: address of a pointer to HW-specific parameters
184 *
185 * Returns 0 if a GICv2 has been found, with the low level operations
186 * in *ops and the HW parameters in *params. Returns an error code
187 * otherwise.
188 */
189int vgic_v2_probe(struct device_node *vgic_node,
190 const struct vgic_ops **ops,
191 const struct vgic_params **params)
192{
193 int ret;
194 struct resource vctrl_res;
195 struct resource vcpu_res;
196 struct vgic_params *vgic = &vgic_v2_params;
197
198 vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
199 if (!vgic->maint_irq) {
200 kvm_err("error getting vgic maintenance irq from DT\n");
201 ret = -ENXIO;
202 goto out;
203 }
204
205 ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
206 if (ret) {
207 kvm_err("Cannot obtain GICH resource\n");
208 goto out;
209 }
210
211 vgic->vctrl_base = of_iomap(vgic_node, 2);
212 if (!vgic->vctrl_base) {
213 kvm_err("Cannot ioremap GICH\n");
214 ret = -ENOMEM;
215 goto out;
216 }
217
218 vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
219 vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
220
221 ret = create_hyp_io_mappings(vgic->vctrl_base,
222 vgic->vctrl_base + resource_size(&vctrl_res),
223 vctrl_res.start);
224 if (ret) {
225 kvm_err("Cannot map VCTRL into hyp\n");
226 goto out_unmap;
227 }
228
229 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
230 kvm_err("Cannot obtain GICV resource\n");
231 ret = -ENXIO;
232 goto out_unmap;
233 }
234
235 if (!PAGE_ALIGNED(vcpu_res.start)) {
236 kvm_err("GICV physical address 0x%llx not page aligned\n",
237 (unsigned long long)vcpu_res.start);
238 ret = -ENXIO;
239 goto out_unmap;
240 }
241
242 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
243 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
244 (unsigned long long)resource_size(&vcpu_res),
245 PAGE_SIZE);
246 ret = -ENXIO;
247 goto out_unmap;
248 }
249
250 vgic->vcpu_base = vcpu_res.start;
251
252 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
253 vctrl_res.start, vgic->maint_irq);
254
255 vgic->type = VGIC_V2;
256 *ops = &vgic_v2_ops;
257 *params = vgic;
258 goto out;
259
260out_unmap:
261 iounmap(vgic->vctrl_base);
262out:
263 of_node_put(vgic_node);
264 return ret;
265}
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
new file mode 100644
index 000000000000..1c2c8eef0599
--- /dev/null
+++ b/virt/kvm/arm/vgic-v3.c
@@ -0,0 +1,247 @@
1/*
2 * Copyright (C) 2013 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26
27#include <linux/irqchip/arm-gic-v3.h>
28
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_mmu.h>
32
33/* These are for GICv2 emulation only */
34#define GICH_LR_VIRTUALID (0x3ffUL << 0)
35#define GICH_LR_PHYSID_CPUID_SHIFT (10)
36#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
37
38/*
39 * LRs are stored in reverse order in memory. make sure we index them
40 * correctly.
41 */
42#define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
43
44static u32 ich_vtr_el2;
45
46static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
47{
48 struct vgic_lr lr_desc;
49 u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
50
51 lr_desc.irq = val & GICH_LR_VIRTUALID;
52 if (lr_desc.irq <= 15)
53 lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
54 else
55 lr_desc.source = 0;
56 lr_desc.state = 0;
57
58 if (val & ICH_LR_PENDING_BIT)
59 lr_desc.state |= LR_STATE_PENDING;
60 if (val & ICH_LR_ACTIVE_BIT)
61 lr_desc.state |= LR_STATE_ACTIVE;
62 if (val & ICH_LR_EOI)
63 lr_desc.state |= LR_EOI_INT;
64
65 return lr_desc;
66}
67
68static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
69 struct vgic_lr lr_desc)
70{
71 u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) |
72 lr_desc.irq);
73
74 if (lr_desc.state & LR_STATE_PENDING)
75 lr_val |= ICH_LR_PENDING_BIT;
76 if (lr_desc.state & LR_STATE_ACTIVE)
77 lr_val |= ICH_LR_ACTIVE_BIT;
78 if (lr_desc.state & LR_EOI_INT)
79 lr_val |= ICH_LR_EOI;
80
81 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
82}
83
84static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
85 struct vgic_lr lr_desc)
86{
87 if (!(lr_desc.state & LR_STATE_MASK))
88 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
89}
90
91static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
92{
93 return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr;
94}
95
96static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
97{
98 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
99}
100
101static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
102{
103 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
104 u32 ret = 0;
105
106 if (misr & ICH_MISR_EOI)
107 ret |= INT_STATUS_EOI;
108 if (misr & ICH_MISR_U)
109 ret |= INT_STATUS_UNDERFLOW;
110
111 return ret;
112}
113
114static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
115{
116 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
117
118 vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT;
119 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
120 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
121 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
122}
123
124static void vgic_v3_enable_underflow(struct kvm_vcpu *vcpu)
125{
126 vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= ICH_HCR_UIE;
127}
128
129static void vgic_v3_disable_underflow(struct kvm_vcpu *vcpu)
130{
131 vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~ICH_HCR_UIE;
132}
133
134static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
135{
136 u32 vmcr;
137
138 vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK;
139 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
140 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
141 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
142
143 vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
144}
145
146static void vgic_v3_enable(struct kvm_vcpu *vcpu)
147{
148 /*
149 * By forcing VMCR to zero, the GIC will restore the binary
150 * points to their reset values. Anything else resets to zero
151 * anyway.
152 */
153 vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0;
154
155 /* Get the show on the road... */
156 vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN;
157}
158
159static const struct vgic_ops vgic_v3_ops = {
160 .get_lr = vgic_v3_get_lr,
161 .set_lr = vgic_v3_set_lr,
162 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
163 .get_elrsr = vgic_v3_get_elrsr,
164 .get_eisr = vgic_v3_get_eisr,
165 .get_interrupt_status = vgic_v3_get_interrupt_status,
166 .enable_underflow = vgic_v3_enable_underflow,
167 .disable_underflow = vgic_v3_disable_underflow,
168 .get_vmcr = vgic_v3_get_vmcr,
169 .set_vmcr = vgic_v3_set_vmcr,
170 .enable = vgic_v3_enable,
171};
172
173static struct vgic_params vgic_v3_params;
174
175/**
176 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
177 * @node: pointer to the DT node
178 * @ops: address of a pointer to the GICv3 operations
179 * @params: address of a pointer to HW-specific parameters
180 *
181 * Returns 0 if a GICv3 has been found, with the low level operations
182 * in *ops and the HW parameters in *params. Returns an error code
183 * otherwise.
184 */
185int vgic_v3_probe(struct device_node *vgic_node,
186 const struct vgic_ops **ops,
187 const struct vgic_params **params)
188{
189 int ret = 0;
190 u32 gicv_idx;
191 struct resource vcpu_res;
192 struct vgic_params *vgic = &vgic_v3_params;
193
194 vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0);
195 if (!vgic->maint_irq) {
196 kvm_err("error getting vgic maintenance irq from DT\n");
197 ret = -ENXIO;
198 goto out;
199 }
200
201 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
202
203 /*
204 * The ListRegs field is 5 bits, but there is a architectural
205 * maximum of 16 list registers. Just ignore bit 4...
206 */
207 vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
208
209 if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx))
210 gicv_idx = 1;
211
212 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
213 if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) {
214 kvm_err("Cannot obtain GICV region\n");
215 ret = -ENXIO;
216 goto out;
217 }
218
219 if (!PAGE_ALIGNED(vcpu_res.start)) {
220 kvm_err("GICV physical address 0x%llx not page aligned\n",
221 (unsigned long long)vcpu_res.start);
222 ret = -ENXIO;
223 goto out;
224 }
225
226 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
227 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
228 (unsigned long long)resource_size(&vcpu_res),
229 PAGE_SIZE);
230 ret = -ENXIO;
231 goto out;
232 }
233
234 vgic->vcpu_base = vcpu_res.start;
235 vgic->vctrl_base = NULL;
236 vgic->type = VGIC_V3;
237
238 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
239 vcpu_res.start, vgic->maint_irq);
240
241 *ops = &vgic_v3_ops;
242 *params = vgic;
243
244out:
245 of_node_put(vgic_node);
246 return ret;
247}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 476d3bf540a8..73eba793b17f 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -76,14 +76,6 @@
76#define IMPLEMENTER_ARM 0x43b 76#define IMPLEMENTER_ARM 0x43b
77#define GICC_ARCH_VERSION_V2 0x2 77#define GICC_ARCH_VERSION_V2 0x2
78 78
79/* Physical address of vgic virtual cpu interface */
80static phys_addr_t vgic_vcpu_base;
81
82/* Virtual control interface base address */
83static void __iomem *vgic_vctrl_base;
84
85static struct device_node *vgic_node;
86
87#define ACCESS_READ_VALUE (1 << 0) 79#define ACCESS_READ_VALUE (1 << 0)
88#define ACCESS_READ_RAZ (0 << 0) 80#define ACCESS_READ_RAZ (0 << 0)
89#define ACCESS_READ_MASK(x) ((x) & (1 << 0)) 81#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
@@ -94,21 +86,46 @@ static struct device_node *vgic_node;
94#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) 86#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
95 87
96static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); 88static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
89static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
97static void vgic_update_state(struct kvm *kvm); 90static void vgic_update_state(struct kvm *kvm);
98static void vgic_kick_vcpus(struct kvm *kvm); 91static void vgic_kick_vcpus(struct kvm *kvm);
99static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); 92static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
100static u32 vgic_nr_lr; 93static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
94static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
95static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
96static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
101 97
102static unsigned int vgic_maint_irq; 98static const struct vgic_ops *vgic_ops;
99static const struct vgic_params *vgic;
100
101/*
102 * struct vgic_bitmap contains unions that provide two views of
103 * the same data. In one case it is an array of registers of
104 * u32's, and in the other case it is a bitmap of unsigned
105 * longs.
106 *
107 * This does not work on 64-bit BE systems, because the bitmap access
108 * will store two consecutive 32-bit words with the higher-addressed
109 * register's bits at the lower index and the lower-addressed register's
110 * bits at the higher index.
111 *
112 * Therefore, swizzle the register index when accessing the 32-bit word
113 * registers to access the right register's value.
114 */
115#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
116#define REG_OFFSET_SWIZZLE 1
117#else
118#define REG_OFFSET_SWIZZLE 0
119#endif
103 120
104static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, 121static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
105 int cpuid, u32 offset) 122 int cpuid, u32 offset)
106{ 123{
107 offset >>= 2; 124 offset >>= 2;
108 if (!offset) 125 if (!offset)
109 return x->percpu[cpuid].reg; 126 return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE);
110 else 127 else
111 return x->shared.reg + offset - 1; 128 return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
112} 129}
113 130
114static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, 131static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
@@ -241,12 +258,12 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
241 258
242static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) 259static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
243{ 260{
244 return *((u32 *)mmio->data) & mask; 261 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
245} 262}
246 263
247static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) 264static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
248{ 265{
249 *((u32 *)mmio->data) = value & mask; 266 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
250} 267}
251 268
252/** 269/**
@@ -593,18 +610,6 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
593 return false; 610 return false;
594} 611}
595 612
596#define LR_CPUID(lr) \
597 (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
598#define LR_IRQID(lr) \
599 ((lr) & GICH_LR_VIRTUALID)
600
601static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
602{
603 clear_bit(lr_nr, vgic_cpu->lr_used);
604 vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE;
605 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
606}
607
608/** 613/**
609 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor 614 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
610 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs 615 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
@@ -622,13 +627,10 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
622 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 627 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
623 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 628 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
624 int vcpu_id = vcpu->vcpu_id; 629 int vcpu_id = vcpu->vcpu_id;
625 int i, irq, source_cpu; 630 int i;
626 u32 *lr;
627 631
628 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { 632 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
629 lr = &vgic_cpu->vgic_lr[i]; 633 struct vgic_lr lr = vgic_get_lr(vcpu, i);
630 irq = LR_IRQID(*lr);
631 source_cpu = LR_CPUID(*lr);
632 634
633 /* 635 /*
634 * There are three options for the state bits: 636 * There are three options for the state bits:
@@ -640,7 +642,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
640 * If the LR holds only an active interrupt (not pending) then 642 * If the LR holds only an active interrupt (not pending) then
641 * just leave it alone. 643 * just leave it alone.
642 */ 644 */
643 if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT) 645 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
644 continue; 646 continue;
645 647
646 /* 648 /*
@@ -649,18 +651,19 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
649 * is fine, then we are only setting a few bits that were 651 * is fine, then we are only setting a few bits that were
650 * already set. 652 * already set.
651 */ 653 */
652 vgic_dist_irq_set(vcpu, irq); 654 vgic_dist_irq_set(vcpu, lr.irq);
653 if (irq < VGIC_NR_SGIS) 655 if (lr.irq < VGIC_NR_SGIS)
654 dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu; 656 dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
655 *lr &= ~GICH_LR_PENDING_BIT; 657 lr.state &= ~LR_STATE_PENDING;
658 vgic_set_lr(vcpu, i, lr);
656 659
657 /* 660 /*
658 * If there's no state left on the LR (it could still be 661 * If there's no state left on the LR (it could still be
659 * active), then the LR does not hold any useful info and can 662 * active), then the LR does not hold any useful info and can
660 * be marked as free for other use. 663 * be marked as free for other use.
661 */ 664 */
662 if (!(*lr & GICH_LR_STATE)) 665 if (!(lr.state & LR_STATE_MASK))
663 vgic_retire_lr(i, irq, vgic_cpu); 666 vgic_retire_lr(i, lr.irq, vcpu);
664 667
665 /* Finally update the VGIC state. */ 668 /* Finally update the VGIC state. */
666 vgic_update_state(vcpu->kvm); 669 vgic_update_state(vcpu->kvm);
@@ -989,8 +992,73 @@ static void vgic_update_state(struct kvm *kvm)
989 } 992 }
990} 993}
991 994
992#define MK_LR_PEND(src, irq) \ 995static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
993 (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) 996{
997 return vgic_ops->get_lr(vcpu, lr);
998}
999
1000static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1001 struct vgic_lr vlr)
1002{
1003 vgic_ops->set_lr(vcpu, lr, vlr);
1004}
1005
1006static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1007 struct vgic_lr vlr)
1008{
1009 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1010}
1011
1012static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1013{
1014 return vgic_ops->get_elrsr(vcpu);
1015}
1016
1017static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1018{
1019 return vgic_ops->get_eisr(vcpu);
1020}
1021
1022static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1023{
1024 return vgic_ops->get_interrupt_status(vcpu);
1025}
1026
1027static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1028{
1029 vgic_ops->enable_underflow(vcpu);
1030}
1031
1032static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1033{
1034 vgic_ops->disable_underflow(vcpu);
1035}
1036
1037static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1038{
1039 vgic_ops->get_vmcr(vcpu, vmcr);
1040}
1041
1042static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1043{
1044 vgic_ops->set_vmcr(vcpu, vmcr);
1045}
1046
1047static inline void vgic_enable(struct kvm_vcpu *vcpu)
1048{
1049 vgic_ops->enable(vcpu);
1050}
1051
1052static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1053{
1054 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1055 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1056
1057 vlr.state = 0;
1058 vgic_set_lr(vcpu, lr_nr, vlr);
1059 clear_bit(lr_nr, vgic_cpu->lr_used);
1060 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1061}
994 1062
995/* 1063/*
996 * An interrupt may have been disabled after being made pending on the 1064 * An interrupt may have been disabled after being made pending on the
@@ -1006,13 +1074,13 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1006 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1074 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1007 int lr; 1075 int lr;
1008 1076
1009 for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { 1077 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1010 int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; 1078 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1011 1079
1012 if (!vgic_irq_is_enabled(vcpu, irq)) { 1080 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1013 vgic_retire_lr(lr, irq, vgic_cpu); 1081 vgic_retire_lr(lr, vlr.irq, vcpu);
1014 if (vgic_irq_is_active(vcpu, irq)) 1082 if (vgic_irq_is_active(vcpu, vlr.irq))
1015 vgic_irq_clear_active(vcpu, irq); 1083 vgic_irq_clear_active(vcpu, vlr.irq);
1016 } 1084 }
1017 } 1085 }
1018} 1086}
@@ -1024,6 +1092,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1024static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) 1092static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1025{ 1093{
1026 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1094 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1095 struct vgic_lr vlr;
1027 int lr; 1096 int lr;
1028 1097
1029 /* Sanitize the input... */ 1098 /* Sanitize the input... */
@@ -1036,28 +1105,34 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1036 lr = vgic_cpu->vgic_irq_lr_map[irq]; 1105 lr = vgic_cpu->vgic_irq_lr_map[irq];
1037 1106
1038 /* Do we have an active interrupt for the same CPUID? */ 1107 /* Do we have an active interrupt for the same CPUID? */
1039 if (lr != LR_EMPTY && 1108 if (lr != LR_EMPTY) {
1040 (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) { 1109 vlr = vgic_get_lr(vcpu, lr);
1041 kvm_debug("LR%d piggyback for IRQ%d %x\n", 1110 if (vlr.source == sgi_source_id) {
1042 lr, irq, vgic_cpu->vgic_lr[lr]); 1111 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1043 BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 1112 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1044 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; 1113 vlr.state |= LR_STATE_PENDING;
1045 return true; 1114 vgic_set_lr(vcpu, lr, vlr);
1115 return true;
1116 }
1046 } 1117 }
1047 1118
1048 /* Try to use another LR for this interrupt */ 1119 /* Try to use another LR for this interrupt */
1049 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used, 1120 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1050 vgic_cpu->nr_lr); 1121 vgic->nr_lr);
1051 if (lr >= vgic_cpu->nr_lr) 1122 if (lr >= vgic->nr_lr)
1052 return false; 1123 return false;
1053 1124
1054 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); 1125 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1055 vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
1056 vgic_cpu->vgic_irq_lr_map[irq] = lr; 1126 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1057 set_bit(lr, vgic_cpu->lr_used); 1127 set_bit(lr, vgic_cpu->lr_used);
1058 1128
1129 vlr.irq = irq;
1130 vlr.source = sgi_source_id;
1131 vlr.state = LR_STATE_PENDING;
1059 if (!vgic_irq_is_edge(vcpu, irq)) 1132 if (!vgic_irq_is_edge(vcpu, irq))
1060 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; 1133 vlr.state |= LR_EOI_INT;
1134
1135 vgic_set_lr(vcpu, lr, vlr);
1061 1136
1062 return true; 1137 return true;
1063} 1138}
@@ -1155,9 +1230,9 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1155 1230
1156epilog: 1231epilog:
1157 if (overflow) { 1232 if (overflow) {
1158 vgic_cpu->vgic_hcr |= GICH_HCR_UIE; 1233 vgic_enable_underflow(vcpu);
1159 } else { 1234 } else {
1160 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; 1235 vgic_disable_underflow(vcpu);
1161 /* 1236 /*
1162 * We're about to run this VCPU, and we've consumed 1237 * We're about to run this VCPU, and we've consumed
1163 * everything the distributor had in store for 1238 * everything the distributor had in store for
@@ -1170,44 +1245,46 @@ epilog:
1170 1245
1171static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1246static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1172{ 1247{
1173 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1248 u32 status = vgic_get_interrupt_status(vcpu);
1174 bool level_pending = false; 1249 bool level_pending = false;
1175 1250
1176 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); 1251 kvm_debug("STATUS = %08x\n", status);
1177 1252
1178 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { 1253 if (status & INT_STATUS_EOI) {
1179 /* 1254 /*
1180 * Some level interrupts have been EOIed. Clear their 1255 * Some level interrupts have been EOIed. Clear their
1181 * active bit. 1256 * active bit.
1182 */ 1257 */
1183 int lr, irq; 1258 u64 eisr = vgic_get_eisr(vcpu);
1259 unsigned long *eisr_ptr = (unsigned long *)&eisr;
1260 int lr;
1184 1261
1185 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr, 1262 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1186 vgic_cpu->nr_lr) { 1263 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1187 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1188 1264
1189 vgic_irq_clear_active(vcpu, irq); 1265 vgic_irq_clear_active(vcpu, vlr.irq);
1190 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI; 1266 WARN_ON(vlr.state & LR_STATE_MASK);
1267 vlr.state = 0;
1268 vgic_set_lr(vcpu, lr, vlr);
1191 1269
1192 /* Any additional pending interrupt? */ 1270 /* Any additional pending interrupt? */
1193 if (vgic_dist_irq_is_pending(vcpu, irq)) { 1271 if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) {
1194 vgic_cpu_irq_set(vcpu, irq); 1272 vgic_cpu_irq_set(vcpu, vlr.irq);
1195 level_pending = true; 1273 level_pending = true;
1196 } else { 1274 } else {
1197 vgic_cpu_irq_clear(vcpu, irq); 1275 vgic_cpu_irq_clear(vcpu, vlr.irq);
1198 } 1276 }
1199 1277
1200 /* 1278 /*
1201 * Despite being EOIed, the LR may not have 1279 * Despite being EOIed, the LR may not have
1202 * been marked as empty. 1280 * been marked as empty.
1203 */ 1281 */
1204 set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); 1282 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1205 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
1206 } 1283 }
1207 } 1284 }
1208 1285
1209 if (vgic_cpu->vgic_misr & GICH_MISR_U) 1286 if (status & INT_STATUS_UNDERFLOW)
1210 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; 1287 vgic_disable_underflow(vcpu);
1211 1288
1212 return level_pending; 1289 return level_pending;
1213} 1290}
@@ -1220,29 +1297,31 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1220{ 1297{
1221 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1298 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1222 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1300 u64 elrsr;
1301 unsigned long *elrsr_ptr;
1223 int lr, pending; 1302 int lr, pending;
1224 bool level_pending; 1303 bool level_pending;
1225 1304
1226 level_pending = vgic_process_maintenance(vcpu); 1305 level_pending = vgic_process_maintenance(vcpu);
1306 elrsr = vgic_get_elrsr(vcpu);
1307 elrsr_ptr = (unsigned long *)&elrsr;
1227 1308
1228 /* Clear mappings for empty LRs */ 1309 /* Clear mappings for empty LRs */
1229 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr, 1310 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1230 vgic_cpu->nr_lr) { 1311 struct vgic_lr vlr;
1231 int irq;
1232 1312
1233 if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) 1313 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1234 continue; 1314 continue;
1235 1315
1236 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; 1316 vlr = vgic_get_lr(vcpu, lr);
1237 1317
1238 BUG_ON(irq >= VGIC_NR_IRQS); 1318 BUG_ON(vlr.irq >= VGIC_NR_IRQS);
1239 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; 1319 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1240 } 1320 }
1241 1321
1242 /* Check if we still have something up our sleeve... */ 1322 /* Check if we still have something up our sleeve... */
1243 pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr, 1323 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1244 vgic_cpu->nr_lr); 1324 if (level_pending || pending < vgic->nr_lr)
1245 if (level_pending || pending < vgic_cpu->nr_lr)
1246 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); 1325 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1247} 1326}
1248 1327
@@ -1432,21 +1511,20 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1432 } 1511 }
1433 1512
1434 /* 1513 /*
1435 * By forcing VMCR to zero, the GIC will restore the binary 1514 * Store the number of LRs per vcpu, so we don't have to go
1436 * points to their reset values. Anything else resets to zero 1515 * all the way to the distributor structure to find out. Only
1437 * anyway. 1516 * assembly code should use this one.
1438 */ 1517 */
1439 vgic_cpu->vgic_vmcr = 0; 1518 vgic_cpu->nr_lr = vgic->nr_lr;
1440 1519
1441 vgic_cpu->nr_lr = vgic_nr_lr; 1520 vgic_enable(vcpu);
1442 vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
1443 1521
1444 return 0; 1522 return 0;
1445} 1523}
1446 1524
1447static void vgic_init_maintenance_interrupt(void *info) 1525static void vgic_init_maintenance_interrupt(void *info)
1448{ 1526{
1449 enable_percpu_irq(vgic_maint_irq, 0); 1527 enable_percpu_irq(vgic->maint_irq, 0);
1450} 1528}
1451 1529
1452static int vgic_cpu_notify(struct notifier_block *self, 1530static int vgic_cpu_notify(struct notifier_block *self,
@@ -1459,7 +1537,7 @@ static int vgic_cpu_notify(struct notifier_block *self,
1459 break; 1537 break;
1460 case CPU_DYING: 1538 case CPU_DYING:
1461 case CPU_DYING_FROZEN: 1539 case CPU_DYING_FROZEN:
1462 disable_percpu_irq(vgic_maint_irq); 1540 disable_percpu_irq(vgic->maint_irq);
1463 break; 1541 break;
1464 } 1542 }
1465 1543
@@ -1470,30 +1548,37 @@ static struct notifier_block vgic_cpu_nb = {
1470 .notifier_call = vgic_cpu_notify, 1548 .notifier_call = vgic_cpu_notify,
1471}; 1549};
1472 1550
1551static const struct of_device_id vgic_ids[] = {
1552 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
1553 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
1554 {},
1555};
1556
1473int kvm_vgic_hyp_init(void) 1557int kvm_vgic_hyp_init(void)
1474{ 1558{
1559 const struct of_device_id *matched_id;
1560 int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
1561 const struct vgic_params **);
1562 struct device_node *vgic_node;
1475 int ret; 1563 int ret;
1476 struct resource vctrl_res;
1477 struct resource vcpu_res;
1478 1564
1479 vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic"); 1565 vgic_node = of_find_matching_node_and_match(NULL,
1566 vgic_ids, &matched_id);
1480 if (!vgic_node) { 1567 if (!vgic_node) {
1481 kvm_err("error: no compatible vgic node in DT\n"); 1568 kvm_err("error: no compatible GIC node found\n");
1482 return -ENODEV; 1569 return -ENODEV;
1483 } 1570 }
1484 1571
1485 vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0); 1572 vgic_probe = matched_id->data;
1486 if (!vgic_maint_irq) { 1573 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
1487 kvm_err("error getting vgic maintenance irq from DT\n"); 1574 if (ret)
1488 ret = -ENXIO; 1575 return ret;
1489 goto out;
1490 }
1491 1576
1492 ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler, 1577 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
1493 "vgic", kvm_get_running_vcpus()); 1578 "vgic", kvm_get_running_vcpus());
1494 if (ret) { 1579 if (ret) {
1495 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq); 1580 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
1496 goto out; 1581 return ret;
1497 } 1582 }
1498 1583
1499 ret = __register_cpu_notifier(&vgic_cpu_nb); 1584 ret = __register_cpu_notifier(&vgic_cpu_nb);
@@ -1502,65 +1587,15 @@ int kvm_vgic_hyp_init(void)
1502 goto out_free_irq; 1587 goto out_free_irq;
1503 } 1588 }
1504 1589
1505 ret = of_address_to_resource(vgic_node, 2, &vctrl_res); 1590 /* Callback into for arch code for setup */
1506 if (ret) { 1591 vgic_arch_setup(vgic);
1507 kvm_err("Cannot obtain VCTRL resource\n");
1508 goto out_free_irq;
1509 }
1510 1592
1511 vgic_vctrl_base = of_iomap(vgic_node, 2);
1512 if (!vgic_vctrl_base) {
1513 kvm_err("Cannot ioremap VCTRL\n");
1514 ret = -ENOMEM;
1515 goto out_free_irq;
1516 }
1517
1518 vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
1519 vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
1520
1521 ret = create_hyp_io_mappings(vgic_vctrl_base,
1522 vgic_vctrl_base + resource_size(&vctrl_res),
1523 vctrl_res.start);
1524 if (ret) {
1525 kvm_err("Cannot map VCTRL into hyp\n");
1526 goto out_unmap;
1527 }
1528
1529 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1530 kvm_err("Cannot obtain VCPU resource\n");
1531 ret = -ENXIO;
1532 goto out_unmap;
1533 }
1534
1535 if (!PAGE_ALIGNED(vcpu_res.start)) {
1536 kvm_err("GICV physical address 0x%llx not page aligned\n",
1537 (unsigned long long)vcpu_res.start);
1538 ret = -ENXIO;
1539 goto out_unmap;
1540 }
1541
1542 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
1543 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
1544 (unsigned long long)resource_size(&vcpu_res),
1545 PAGE_SIZE);
1546 ret = -ENXIO;
1547 goto out_unmap;
1548 }
1549
1550 vgic_vcpu_base = vcpu_res.start;
1551
1552 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1553 vctrl_res.start, vgic_maint_irq);
1554 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); 1593 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1555 1594
1556 goto out; 1595 return 0;
1557 1596
1558out_unmap:
1559 iounmap(vgic_vctrl_base);
1560out_free_irq: 1597out_free_irq:
1561 free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus()); 1598 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
1562out:
1563 of_node_put(vgic_node);
1564 return ret; 1599 return ret;
1565} 1600}
1566 1601
@@ -1593,7 +1628,7 @@ int kvm_vgic_init(struct kvm *kvm)
1593 } 1628 }
1594 1629
1595 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, 1630 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1596 vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE); 1631 vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1597 if (ret) { 1632 if (ret) {
1598 kvm_err("Unable to remap VGIC CPU to VCPU\n"); 1633 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1599 goto out; 1634 goto out;
@@ -1639,7 +1674,8 @@ int kvm_vgic_create(struct kvm *kvm)
1639 } 1674 }
1640 1675
1641 spin_lock_init(&kvm->arch.vgic.lock); 1676 spin_lock_init(&kvm->arch.vgic.lock);
1642 kvm->arch.vgic.vctrl_base = vgic_vctrl_base; 1677 kvm->arch.vgic.in_kernel = true;
1678 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
1643 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 1679 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1644 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; 1680 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1645 1681
@@ -1738,39 +1774,40 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
1738static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, 1774static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
1739 struct kvm_exit_mmio *mmio, phys_addr_t offset) 1775 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1740{ 1776{
1741 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1742 u32 reg, mask = 0, shift = 0;
1743 bool updated = false; 1777 bool updated = false;
1778 struct vgic_vmcr vmcr;
1779 u32 *vmcr_field;
1780 u32 reg;
1781
1782 vgic_get_vmcr(vcpu, &vmcr);
1744 1783
1745 switch (offset & ~0x3) { 1784 switch (offset & ~0x3) {
1746 case GIC_CPU_CTRL: 1785 case GIC_CPU_CTRL:
1747 mask = GICH_VMCR_CTRL_MASK; 1786 vmcr_field = &vmcr.ctlr;
1748 shift = GICH_VMCR_CTRL_SHIFT;
1749 break; 1787 break;
1750 case GIC_CPU_PRIMASK: 1788 case GIC_CPU_PRIMASK:
1751 mask = GICH_VMCR_PRIMASK_MASK; 1789 vmcr_field = &vmcr.pmr;
1752 shift = GICH_VMCR_PRIMASK_SHIFT;
1753 break; 1790 break;
1754 case GIC_CPU_BINPOINT: 1791 case GIC_CPU_BINPOINT:
1755 mask = GICH_VMCR_BINPOINT_MASK; 1792 vmcr_field = &vmcr.bpr;
1756 shift = GICH_VMCR_BINPOINT_SHIFT;
1757 break; 1793 break;
1758 case GIC_CPU_ALIAS_BINPOINT: 1794 case GIC_CPU_ALIAS_BINPOINT:
1759 mask = GICH_VMCR_ALIAS_BINPOINT_MASK; 1795 vmcr_field = &vmcr.abpr;
1760 shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT;
1761 break; 1796 break;
1797 default:
1798 BUG();
1762 } 1799 }
1763 1800
1764 if (!mmio->is_write) { 1801 if (!mmio->is_write) {
1765 reg = (vgic_cpu->vgic_vmcr & mask) >> shift; 1802 reg = *vmcr_field;
1766 mmio_data_write(mmio, ~0, reg); 1803 mmio_data_write(mmio, ~0, reg);
1767 } else { 1804 } else {
1768 reg = mmio_data_read(mmio, ~0); 1805 reg = mmio_data_read(mmio, ~0);
1769 reg = (reg << shift) & mask; 1806 if (reg != *vmcr_field) {
1770 if (reg != (vgic_cpu->vgic_vmcr & mask)) 1807 *vmcr_field = reg;
1808 vgic_set_vmcr(vcpu, &vmcr);
1771 updated = true; 1809 updated = true;
1772 vgic_cpu->vgic_vmcr &= ~mask; 1810 }
1773 vgic_cpu->vgic_vmcr |= reg;
1774 } 1811 }
1775 return updated; 1812 return updated;
1776} 1813}