diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-09-27 05:03:33 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-09-27 05:03:33 -0400 |
commit | e77d99d4a4ec761ad061f1ec890c71040a92efe3 (patch) | |
tree | aea6fe2ee5bb6e699045a3629b48208f3e2a26b6 /virt | |
parent | bb0ca6acd466af55c95b7ce508f29e23a24cabd9 (diff) | |
parent | 0496daa5cf99741ce8db82686b4c7446a37feabb (diff) |
Merge tag 'kvm-arm-for-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-next
Changes for KVM for arm/arm64 for 3.18
This includes a bunch of changes:
- Support read-only memory slots on arm/arm64
- Various changes to fix Sparse warnings
- Correctly detect write vs. read Stage-2 faults
- Various VGIC cleanups and fixes
- Dynamic VGIC data strcuture sizing
- Fix SGI set_clear_pend offset bug
- Fix VTTBR_BADDR Mask
- Correctly report the FSC on Stage-2 faults
Conflicts:
virt/kvm/eventfd.c
[duplicate, different patch where the kvm-arm version broke x86.
The kvm tree instead has the right one]
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/vgic.c | 631 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 11 |
2 files changed, 520 insertions, 122 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 3ee3ce06bbec..862967852d5a 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -36,21 +36,22 @@ | |||
36 | * How the whole thing works (courtesy of Christoffer Dall): | 36 | * How the whole thing works (courtesy of Christoffer Dall): |
37 | * | 37 | * |
38 | * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if | 38 | * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if |
39 | * something is pending | 39 | * something is pending on the CPU interface. |
40 | * - VGIC pending interrupts are stored on the vgic.irq_state vgic | 40 | * - Interrupts that are pending on the distributor are stored on the |
41 | * bitmap (this bitmap is updated by both user land ioctls and guest | 41 | * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land |
42 | * mmio ops, and other in-kernel peripherals such as the | 42 | * ioctls and guest mmio ops, and other in-kernel peripherals such as the |
43 | * arch. timers) and indicate the 'wire' state. | 43 | * arch. timers). |
44 | * - Every time the bitmap changes, the irq_pending_on_cpu oracle is | 44 | * - Every time the bitmap changes, the irq_pending_on_cpu oracle is |
45 | * recalculated | 45 | * recalculated |
46 | * - To calculate the oracle, we need info for each cpu from | 46 | * - To calculate the oracle, we need info for each cpu from |
47 | * compute_pending_for_cpu, which considers: | 47 | * compute_pending_for_cpu, which considers: |
48 | * - PPI: dist->irq_state & dist->irq_enable | 48 | * - PPI: dist->irq_pending & dist->irq_enable |
49 | * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target | 49 | * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target |
50 | * - irq_spi_target is a 'formatted' version of the GICD_ICFGR | 50 | * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn |
51 | * registers, stored on each vcpu. We only keep one bit of | 51 | * registers, stored on each vcpu. We only keep one bit of |
52 | * information per interrupt, making sure that only one vcpu can | 52 | * information per interrupt, making sure that only one vcpu can |
53 | * accept the interrupt. | 53 | * accept the interrupt. |
54 | * - If any of the above state changes, we must recalculate the oracle. | ||
54 | * - The same is true when injecting an interrupt, except that we only | 55 | * - The same is true when injecting an interrupt, except that we only |
55 | * consider a single interrupt at a time. The irq_spi_cpu array | 56 | * consider a single interrupt at a time. The irq_spi_cpu array |
56 | * contains the target CPU for each SPI. | 57 | * contains the target CPU for each SPI. |
@@ -60,13 +61,18 @@ | |||
60 | * the 'line' again. This is achieved as such: | 61 | * the 'line' again. This is achieved as such: |
61 | * | 62 | * |
62 | * - When a level interrupt is moved onto a vcpu, the corresponding | 63 | * - When a level interrupt is moved onto a vcpu, the corresponding |
63 | * bit in irq_active is set. As long as this bit is set, the line | 64 | * bit in irq_queued is set. As long as this bit is set, the line |
64 | * will be ignored for further interrupts. The interrupt is injected | 65 | * will be ignored for further interrupts. The interrupt is injected |
65 | * into the vcpu with the GICH_LR_EOI bit set (generate a | 66 | * into the vcpu with the GICH_LR_EOI bit set (generate a |
66 | * maintenance interrupt on EOI). | 67 | * maintenance interrupt on EOI). |
67 | * - When the interrupt is EOIed, the maintenance interrupt fires, | 68 | * - When the interrupt is EOIed, the maintenance interrupt fires, |
68 | * and clears the corresponding bit in irq_active. This allow the | 69 | * and clears the corresponding bit in irq_queued. This allows the |
69 | * interrupt line to be sampled again. | 70 | * interrupt line to be sampled again. |
71 | * - Note that level-triggered interrupts can also be set to pending from | ||
72 | * writes to GICD_ISPENDRn and lowering the external input line does not | ||
73 | * cause the interrupt to become inactive in such a situation. | ||
74 | * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become | ||
75 | * inactive as long as the external input line is held high. | ||
70 | */ | 76 | */ |
71 | 77 | ||
72 | #define VGIC_ADDR_UNDEF (-1) | 78 | #define VGIC_ADDR_UNDEF (-1) |
@@ -89,6 +95,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); | |||
89 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); | 95 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); |
90 | static void vgic_update_state(struct kvm *kvm); | 96 | static void vgic_update_state(struct kvm *kvm); |
91 | static void vgic_kick_vcpus(struct kvm *kvm); | 97 | static void vgic_kick_vcpus(struct kvm *kvm); |
98 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi); | ||
92 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); | 99 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); |
93 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); | 100 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); |
94 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); | 101 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); |
@@ -99,10 +106,8 @@ static const struct vgic_ops *vgic_ops; | |||
99 | static const struct vgic_params *vgic; | 106 | static const struct vgic_params *vgic; |
100 | 107 | ||
101 | /* | 108 | /* |
102 | * struct vgic_bitmap contains unions that provide two views of | 109 | * struct vgic_bitmap contains a bitmap made of unsigned longs, but |
103 | * the same data. In one case it is an array of registers of | 110 | * extracts u32s out of them. |
104 | * u32's, and in the other case it is a bitmap of unsigned | ||
105 | * longs. | ||
106 | * | 111 | * |
107 | * This does not work on 64-bit BE systems, because the bitmap access | 112 | * This does not work on 64-bit BE systems, because the bitmap access |
108 | * will store two consecutive 32-bit words with the higher-addressed | 113 | * will store two consecutive 32-bit words with the higher-addressed |
@@ -118,23 +123,45 @@ static const struct vgic_params *vgic; | |||
118 | #define REG_OFFSET_SWIZZLE 0 | 123 | #define REG_OFFSET_SWIZZLE 0 |
119 | #endif | 124 | #endif |
120 | 125 | ||
126 | static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs) | ||
127 | { | ||
128 | int nr_longs; | ||
129 | |||
130 | nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS); | ||
131 | |||
132 | b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL); | ||
133 | if (!b->private) | ||
134 | return -ENOMEM; | ||
135 | |||
136 | b->shared = b->private + nr_cpus; | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static void vgic_free_bitmap(struct vgic_bitmap *b) | ||
142 | { | ||
143 | kfree(b->private); | ||
144 | b->private = NULL; | ||
145 | b->shared = NULL; | ||
146 | } | ||
147 | |||
121 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, | 148 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, |
122 | int cpuid, u32 offset) | 149 | int cpuid, u32 offset) |
123 | { | 150 | { |
124 | offset >>= 2; | 151 | offset >>= 2; |
125 | if (!offset) | 152 | if (!offset) |
126 | return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE); | 153 | return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE; |
127 | else | 154 | else |
128 | return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE); | 155 | return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE); |
129 | } | 156 | } |
130 | 157 | ||
131 | static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, | 158 | static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, |
132 | int cpuid, int irq) | 159 | int cpuid, int irq) |
133 | { | 160 | { |
134 | if (irq < VGIC_NR_PRIVATE_IRQS) | 161 | if (irq < VGIC_NR_PRIVATE_IRQS) |
135 | return test_bit(irq, x->percpu[cpuid].reg_ul); | 162 | return test_bit(irq, x->private + cpuid); |
136 | 163 | ||
137 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul); | 164 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared); |
138 | } | 165 | } |
139 | 166 | ||
140 | static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | 167 | static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, |
@@ -143,9 +170,9 @@ static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | |||
143 | unsigned long *reg; | 170 | unsigned long *reg; |
144 | 171 | ||
145 | if (irq < VGIC_NR_PRIVATE_IRQS) { | 172 | if (irq < VGIC_NR_PRIVATE_IRQS) { |
146 | reg = x->percpu[cpuid].reg_ul; | 173 | reg = x->private + cpuid; |
147 | } else { | 174 | } else { |
148 | reg = x->shared.reg_ul; | 175 | reg = x->shared; |
149 | irq -= VGIC_NR_PRIVATE_IRQS; | 176 | irq -= VGIC_NR_PRIVATE_IRQS; |
150 | } | 177 | } |
151 | 178 | ||
@@ -157,24 +184,49 @@ static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | |||
157 | 184 | ||
158 | static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) | 185 | static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) |
159 | { | 186 | { |
160 | if (unlikely(cpuid >= VGIC_MAX_CPUS)) | 187 | return x->private + cpuid; |
161 | return NULL; | ||
162 | return x->percpu[cpuid].reg_ul; | ||
163 | } | 188 | } |
164 | 189 | ||
165 | static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) | 190 | static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) |
166 | { | 191 | { |
167 | return x->shared.reg_ul; | 192 | return x->shared; |
193 | } | ||
194 | |||
195 | static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs) | ||
196 | { | ||
197 | int size; | ||
198 | |||
199 | size = nr_cpus * VGIC_NR_PRIVATE_IRQS; | ||
200 | size += nr_irqs - VGIC_NR_PRIVATE_IRQS; | ||
201 | |||
202 | x->private = kzalloc(size, GFP_KERNEL); | ||
203 | if (!x->private) | ||
204 | return -ENOMEM; | ||
205 | |||
206 | x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32); | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static void vgic_free_bytemap(struct vgic_bytemap *b) | ||
211 | { | ||
212 | kfree(b->private); | ||
213 | b->private = NULL; | ||
214 | b->shared = NULL; | ||
168 | } | 215 | } |
169 | 216 | ||
170 | static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) | 217 | static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) |
171 | { | 218 | { |
172 | offset >>= 2; | 219 | u32 *reg; |
173 | BUG_ON(offset > (VGIC_NR_IRQS / 4)); | 220 | |
174 | if (offset < 8) | 221 | if (offset < VGIC_NR_PRIVATE_IRQS) { |
175 | return x->percpu[cpuid] + offset; | 222 | reg = x->private; |
176 | else | 223 | offset += cpuid * VGIC_NR_PRIVATE_IRQS; |
177 | return x->shared + offset - 8; | 224 | } else { |
225 | reg = x->shared; | ||
226 | offset -= VGIC_NR_PRIVATE_IRQS; | ||
227 | } | ||
228 | |||
229 | return reg + (offset / sizeof(u32)); | ||
178 | } | 230 | } |
179 | 231 | ||
180 | #define VGIC_CFG_LEVEL 0 | 232 | #define VGIC_CFG_LEVEL 0 |
@@ -196,46 +248,81 @@ static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq) | |||
196 | return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); | 248 | return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); |
197 | } | 249 | } |
198 | 250 | ||
199 | static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) | 251 | static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq) |
252 | { | ||
253 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
254 | |||
255 | return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); | ||
256 | } | ||
257 | |||
258 | static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) | ||
259 | { | ||
260 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
261 | |||
262 | vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1); | ||
263 | } | ||
264 | |||
265 | static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq) | ||
266 | { | ||
267 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
268 | |||
269 | vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); | ||
270 | } | ||
271 | |||
272 | static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) | ||
273 | { | ||
274 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
275 | |||
276 | return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq); | ||
277 | } | ||
278 | |||
279 | static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq) | ||
280 | { | ||
281 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
282 | |||
283 | vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1); | ||
284 | } | ||
285 | |||
286 | static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq) | ||
200 | { | 287 | { |
201 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 288 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
202 | 289 | ||
203 | return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); | 290 | vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0); |
204 | } | 291 | } |
205 | 292 | ||
206 | static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) | 293 | static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq) |
207 | { | 294 | { |
208 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 295 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
209 | 296 | ||
210 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); | 297 | return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq); |
211 | } | 298 | } |
212 | 299 | ||
213 | static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) | 300 | static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq) |
214 | { | 301 | { |
215 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 302 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
216 | 303 | ||
217 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); | 304 | vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0); |
218 | } | 305 | } |
219 | 306 | ||
220 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) | 307 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) |
221 | { | 308 | { |
222 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 309 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
223 | 310 | ||
224 | return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq); | 311 | return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq); |
225 | } | 312 | } |
226 | 313 | ||
227 | static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq) | 314 | static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) |
228 | { | 315 | { |
229 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 316 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
230 | 317 | ||
231 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1); | 318 | vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1); |
232 | } | 319 | } |
233 | 320 | ||
234 | static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq) | 321 | static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) |
235 | { | 322 | { |
236 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 323 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
237 | 324 | ||
238 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0); | 325 | vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0); |
239 | } | 326 | } |
240 | 327 | ||
241 | static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) | 328 | static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) |
@@ -256,6 +343,11 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) | |||
256 | vcpu->arch.vgic_cpu.pending_shared); | 343 | vcpu->arch.vgic_cpu.pending_shared); |
257 | } | 344 | } |
258 | 345 | ||
346 | static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq) | ||
347 | { | ||
348 | return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq); | ||
349 | } | ||
350 | |||
259 | static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) | 351 | static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) |
260 | { | 352 | { |
261 | return le32_to_cpu(*((u32 *)mmio->data)) & mask; | 353 | return le32_to_cpu(*((u32 *)mmio->data)) & mask; |
@@ -347,7 +439,7 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | |||
347 | 439 | ||
348 | case 4: /* GICD_TYPER */ | 440 | case 4: /* GICD_TYPER */ |
349 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; | 441 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; |
350 | reg |= (VGIC_NR_IRQS >> 5) - 1; | 442 | reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1; |
351 | vgic_reg_access(mmio, ®, word_offset, | 443 | vgic_reg_access(mmio, ®, word_offset, |
352 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | 444 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); |
353 | break; | 445 | break; |
@@ -409,11 +501,33 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, | |||
409 | struct kvm_exit_mmio *mmio, | 501 | struct kvm_exit_mmio *mmio, |
410 | phys_addr_t offset) | 502 | phys_addr_t offset) |
411 | { | 503 | { |
412 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | 504 | u32 *reg, orig; |
413 | vcpu->vcpu_id, offset); | 505 | u32 level_mask; |
506 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
507 | |||
508 | reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset); | ||
509 | level_mask = (~(*reg)); | ||
510 | |||
511 | /* Mark both level and edge triggered irqs as pending */ | ||
512 | reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset); | ||
513 | orig = *reg; | ||
414 | vgic_reg_access(mmio, reg, offset, | 514 | vgic_reg_access(mmio, reg, offset, |
415 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | 515 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); |
516 | |||
416 | if (mmio->is_write) { | 517 | if (mmio->is_write) { |
518 | /* Set the soft-pending flag only for level-triggered irqs */ | ||
519 | reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, | ||
520 | vcpu->vcpu_id, offset); | ||
521 | vgic_reg_access(mmio, reg, offset, | ||
522 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | ||
523 | *reg &= level_mask; | ||
524 | |||
525 | /* Ignore writes to SGIs */ | ||
526 | if (offset < 2) { | ||
527 | *reg &= ~0xffff; | ||
528 | *reg |= orig & 0xffff; | ||
529 | } | ||
530 | |||
417 | vgic_update_state(vcpu->kvm); | 531 | vgic_update_state(vcpu->kvm); |
418 | return true; | 532 | return true; |
419 | } | 533 | } |
@@ -425,11 +539,34 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, | |||
425 | struct kvm_exit_mmio *mmio, | 539 | struct kvm_exit_mmio *mmio, |
426 | phys_addr_t offset) | 540 | phys_addr_t offset) |
427 | { | 541 | { |
428 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | 542 | u32 *level_active; |
429 | vcpu->vcpu_id, offset); | 543 | u32 *reg, orig; |
544 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
545 | |||
546 | reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset); | ||
547 | orig = *reg; | ||
430 | vgic_reg_access(mmio, reg, offset, | 548 | vgic_reg_access(mmio, reg, offset, |
431 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | 549 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); |
432 | if (mmio->is_write) { | 550 | if (mmio->is_write) { |
551 | /* Re-set level triggered level-active interrupts */ | ||
552 | level_active = vgic_bitmap_get_reg(&dist->irq_level, | ||
553 | vcpu->vcpu_id, offset); | ||
554 | reg = vgic_bitmap_get_reg(&dist->irq_pending, | ||
555 | vcpu->vcpu_id, offset); | ||
556 | *reg |= *level_active; | ||
557 | |||
558 | /* Ignore writes to SGIs */ | ||
559 | if (offset < 2) { | ||
560 | *reg &= ~0xffff; | ||
561 | *reg |= orig & 0xffff; | ||
562 | } | ||
563 | |||
564 | /* Clear soft-pending flags */ | ||
565 | reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, | ||
566 | vcpu->vcpu_id, offset); | ||
567 | vgic_reg_access(mmio, reg, offset, | ||
568 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | ||
569 | |||
433 | vgic_update_state(vcpu->kvm); | 570 | vgic_update_state(vcpu->kvm); |
434 | return true; | 571 | return true; |
435 | } | 572 | } |
@@ -651,9 +788,9 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
651 | * is fine, then we are only setting a few bits that were | 788 | * is fine, then we are only setting a few bits that were |
652 | * already set. | 789 | * already set. |
653 | */ | 790 | */ |
654 | vgic_dist_irq_set(vcpu, lr.irq); | 791 | vgic_dist_irq_set_pending(vcpu, lr.irq); |
655 | if (lr.irq < VGIC_NR_SGIS) | 792 | if (lr.irq < VGIC_NR_SGIS) |
656 | dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source; | 793 | *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source; |
657 | lr.state &= ~LR_STATE_PENDING; | 794 | lr.state &= ~LR_STATE_PENDING; |
658 | vgic_set_lr(vcpu, i, lr); | 795 | vgic_set_lr(vcpu, i, lr); |
659 | 796 | ||
@@ -662,8 +799,10 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |||
662 | * active), then the LR does not hold any useful info and can | 799 | * active), then the LR does not hold any useful info and can |
663 | * be marked as free for other use. | 800 | * be marked as free for other use. |
664 | */ | 801 | */ |
665 | if (!(lr.state & LR_STATE_MASK)) | 802 | if (!(lr.state & LR_STATE_MASK)) { |
666 | vgic_retire_lr(i, lr.irq, vcpu); | 803 | vgic_retire_lr(i, lr.irq, vcpu); |
804 | vgic_irq_clear_queued(vcpu, lr.irq); | ||
805 | } | ||
667 | 806 | ||
668 | /* Finally update the VGIC state. */ | 807 | /* Finally update the VGIC state. */ |
669 | vgic_update_state(vcpu->kvm); | 808 | vgic_update_state(vcpu->kvm); |
@@ -677,7 +816,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |||
677 | { | 816 | { |
678 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 817 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
679 | int sgi; | 818 | int sgi; |
680 | int min_sgi = (offset & ~0x3) * 4; | 819 | int min_sgi = (offset & ~0x3); |
681 | int max_sgi = min_sgi + 3; | 820 | int max_sgi = min_sgi + 3; |
682 | int vcpu_id = vcpu->vcpu_id; | 821 | int vcpu_id = vcpu->vcpu_id; |
683 | u32 reg = 0; | 822 | u32 reg = 0; |
@@ -685,7 +824,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |||
685 | /* Copy source SGIs from distributor side */ | 824 | /* Copy source SGIs from distributor side */ |
686 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | 825 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { |
687 | int shift = 8 * (sgi - min_sgi); | 826 | int shift = 8 * (sgi - min_sgi); |
688 | reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift; | 827 | reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift; |
689 | } | 828 | } |
690 | 829 | ||
691 | mmio_data_write(mmio, ~0, reg); | 830 | mmio_data_write(mmio, ~0, reg); |
@@ -698,7 +837,7 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |||
698 | { | 837 | { |
699 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 838 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
700 | int sgi; | 839 | int sgi; |
701 | int min_sgi = (offset & ~0x3) * 4; | 840 | int min_sgi = (offset & ~0x3); |
702 | int max_sgi = min_sgi + 3; | 841 | int max_sgi = min_sgi + 3; |
703 | int vcpu_id = vcpu->vcpu_id; | 842 | int vcpu_id = vcpu->vcpu_id; |
704 | u32 reg; | 843 | u32 reg; |
@@ -709,14 +848,15 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |||
709 | /* Clear pending SGIs on the distributor */ | 848 | /* Clear pending SGIs on the distributor */ |
710 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | 849 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { |
711 | u8 mask = reg >> (8 * (sgi - min_sgi)); | 850 | u8 mask = reg >> (8 * (sgi - min_sgi)); |
851 | u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi); | ||
712 | if (set) { | 852 | if (set) { |
713 | if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask) | 853 | if ((*src & mask) != mask) |
714 | updated = true; | 854 | updated = true; |
715 | dist->irq_sgi_sources[vcpu_id][sgi] |= mask; | 855 | *src |= mask; |
716 | } else { | 856 | } else { |
717 | if (dist->irq_sgi_sources[vcpu_id][sgi] & mask) | 857 | if (*src & mask) |
718 | updated = true; | 858 | updated = true; |
719 | dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask; | 859 | *src &= ~mask; |
720 | } | 860 | } |
721 | } | 861 | } |
722 | 862 | ||
@@ -755,6 +895,7 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | |||
755 | struct mmio_range { | 895 | struct mmio_range { |
756 | phys_addr_t base; | 896 | phys_addr_t base; |
757 | unsigned long len; | 897 | unsigned long len; |
898 | int bits_per_irq; | ||
758 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | 899 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, |
759 | phys_addr_t offset); | 900 | phys_addr_t offset); |
760 | }; | 901 | }; |
@@ -763,56 +904,67 @@ static const struct mmio_range vgic_dist_ranges[] = { | |||
763 | { | 904 | { |
764 | .base = GIC_DIST_CTRL, | 905 | .base = GIC_DIST_CTRL, |
765 | .len = 12, | 906 | .len = 12, |
907 | .bits_per_irq = 0, | ||
766 | .handle_mmio = handle_mmio_misc, | 908 | .handle_mmio = handle_mmio_misc, |
767 | }, | 909 | }, |
768 | { | 910 | { |
769 | .base = GIC_DIST_IGROUP, | 911 | .base = GIC_DIST_IGROUP, |
770 | .len = VGIC_NR_IRQS / 8, | 912 | .len = VGIC_MAX_IRQS / 8, |
913 | .bits_per_irq = 1, | ||
771 | .handle_mmio = handle_mmio_raz_wi, | 914 | .handle_mmio = handle_mmio_raz_wi, |
772 | }, | 915 | }, |
773 | { | 916 | { |
774 | .base = GIC_DIST_ENABLE_SET, | 917 | .base = GIC_DIST_ENABLE_SET, |
775 | .len = VGIC_NR_IRQS / 8, | 918 | .len = VGIC_MAX_IRQS / 8, |
919 | .bits_per_irq = 1, | ||
776 | .handle_mmio = handle_mmio_set_enable_reg, | 920 | .handle_mmio = handle_mmio_set_enable_reg, |
777 | }, | 921 | }, |
778 | { | 922 | { |
779 | .base = GIC_DIST_ENABLE_CLEAR, | 923 | .base = GIC_DIST_ENABLE_CLEAR, |
780 | .len = VGIC_NR_IRQS / 8, | 924 | .len = VGIC_MAX_IRQS / 8, |
925 | .bits_per_irq = 1, | ||
781 | .handle_mmio = handle_mmio_clear_enable_reg, | 926 | .handle_mmio = handle_mmio_clear_enable_reg, |
782 | }, | 927 | }, |
783 | { | 928 | { |
784 | .base = GIC_DIST_PENDING_SET, | 929 | .base = GIC_DIST_PENDING_SET, |
785 | .len = VGIC_NR_IRQS / 8, | 930 | .len = VGIC_MAX_IRQS / 8, |
931 | .bits_per_irq = 1, | ||
786 | .handle_mmio = handle_mmio_set_pending_reg, | 932 | .handle_mmio = handle_mmio_set_pending_reg, |
787 | }, | 933 | }, |
788 | { | 934 | { |
789 | .base = GIC_DIST_PENDING_CLEAR, | 935 | .base = GIC_DIST_PENDING_CLEAR, |
790 | .len = VGIC_NR_IRQS / 8, | 936 | .len = VGIC_MAX_IRQS / 8, |
937 | .bits_per_irq = 1, | ||
791 | .handle_mmio = handle_mmio_clear_pending_reg, | 938 | .handle_mmio = handle_mmio_clear_pending_reg, |
792 | }, | 939 | }, |
793 | { | 940 | { |
794 | .base = GIC_DIST_ACTIVE_SET, | 941 | .base = GIC_DIST_ACTIVE_SET, |
795 | .len = VGIC_NR_IRQS / 8, | 942 | .len = VGIC_MAX_IRQS / 8, |
943 | .bits_per_irq = 1, | ||
796 | .handle_mmio = handle_mmio_raz_wi, | 944 | .handle_mmio = handle_mmio_raz_wi, |
797 | }, | 945 | }, |
798 | { | 946 | { |
799 | .base = GIC_DIST_ACTIVE_CLEAR, | 947 | .base = GIC_DIST_ACTIVE_CLEAR, |
800 | .len = VGIC_NR_IRQS / 8, | 948 | .len = VGIC_MAX_IRQS / 8, |
949 | .bits_per_irq = 1, | ||
801 | .handle_mmio = handle_mmio_raz_wi, | 950 | .handle_mmio = handle_mmio_raz_wi, |
802 | }, | 951 | }, |
803 | { | 952 | { |
804 | .base = GIC_DIST_PRI, | 953 | .base = GIC_DIST_PRI, |
805 | .len = VGIC_NR_IRQS, | 954 | .len = VGIC_MAX_IRQS, |
955 | .bits_per_irq = 8, | ||
806 | .handle_mmio = handle_mmio_priority_reg, | 956 | .handle_mmio = handle_mmio_priority_reg, |
807 | }, | 957 | }, |
808 | { | 958 | { |
809 | .base = GIC_DIST_TARGET, | 959 | .base = GIC_DIST_TARGET, |
810 | .len = VGIC_NR_IRQS, | 960 | .len = VGIC_MAX_IRQS, |
961 | .bits_per_irq = 8, | ||
811 | .handle_mmio = handle_mmio_target_reg, | 962 | .handle_mmio = handle_mmio_target_reg, |
812 | }, | 963 | }, |
813 | { | 964 | { |
814 | .base = GIC_DIST_CONFIG, | 965 | .base = GIC_DIST_CONFIG, |
815 | .len = VGIC_NR_IRQS / 4, | 966 | .len = VGIC_MAX_IRQS / 4, |
967 | .bits_per_irq = 2, | ||
816 | .handle_mmio = handle_mmio_cfg_reg, | 968 | .handle_mmio = handle_mmio_cfg_reg, |
817 | }, | 969 | }, |
818 | { | 970 | { |
@@ -850,6 +1002,22 @@ struct mmio_range *find_matching_range(const struct mmio_range *ranges, | |||
850 | return NULL; | 1002 | return NULL; |
851 | } | 1003 | } |
852 | 1004 | ||
1005 | static bool vgic_validate_access(const struct vgic_dist *dist, | ||
1006 | const struct mmio_range *range, | ||
1007 | unsigned long offset) | ||
1008 | { | ||
1009 | int irq; | ||
1010 | |||
1011 | if (!range->bits_per_irq) | ||
1012 | return true; /* Not an irq-based access */ | ||
1013 | |||
1014 | irq = offset * 8 / range->bits_per_irq; | ||
1015 | if (irq >= dist->nr_irqs) | ||
1016 | return false; | ||
1017 | |||
1018 | return true; | ||
1019 | } | ||
1020 | |||
853 | /** | 1021 | /** |
854 | * vgic_handle_mmio - handle an in-kernel MMIO access | 1022 | * vgic_handle_mmio - handle an in-kernel MMIO access |
855 | * @vcpu: pointer to the vcpu performing the access | 1023 | * @vcpu: pointer to the vcpu performing the access |
@@ -889,7 +1057,13 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
889 | 1057 | ||
890 | spin_lock(&vcpu->kvm->arch.vgic.lock); | 1058 | spin_lock(&vcpu->kvm->arch.vgic.lock); |
891 | offset = mmio->phys_addr - range->base - base; | 1059 | offset = mmio->phys_addr - range->base - base; |
892 | updated_state = range->handle_mmio(vcpu, mmio, offset); | 1060 | if (vgic_validate_access(dist, range, offset)) { |
1061 | updated_state = range->handle_mmio(vcpu, mmio, offset); | ||
1062 | } else { | ||
1063 | vgic_reg_access(mmio, NULL, offset, | ||
1064 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); | ||
1065 | updated_state = false; | ||
1066 | } | ||
893 | spin_unlock(&vcpu->kvm->arch.vgic.lock); | 1067 | spin_unlock(&vcpu->kvm->arch.vgic.lock); |
894 | kvm_prepare_mmio(run, mmio); | 1068 | kvm_prepare_mmio(run, mmio); |
895 | kvm_handle_mmio_return(vcpu, run); | 1069 | kvm_handle_mmio_return(vcpu, run); |
@@ -900,6 +1074,11 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
900 | return true; | 1074 | return true; |
901 | } | 1075 | } |
902 | 1076 | ||
1077 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi) | ||
1078 | { | ||
1079 | return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi; | ||
1080 | } | ||
1081 | |||
903 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | 1082 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) |
904 | { | 1083 | { |
905 | struct kvm *kvm = vcpu->kvm; | 1084 | struct kvm *kvm = vcpu->kvm; |
@@ -932,8 +1111,8 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | |||
932 | kvm_for_each_vcpu(c, vcpu, kvm) { | 1111 | kvm_for_each_vcpu(c, vcpu, kvm) { |
933 | if (target_cpus & 1) { | 1112 | if (target_cpus & 1) { |
934 | /* Flag the SGI as pending */ | 1113 | /* Flag the SGI as pending */ |
935 | vgic_dist_irq_set(vcpu, sgi); | 1114 | vgic_dist_irq_set_pending(vcpu, sgi); |
936 | dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id; | 1115 | *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id; |
937 | kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); | 1116 | kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); |
938 | } | 1117 | } |
939 | 1118 | ||
@@ -941,32 +1120,38 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | |||
941 | } | 1120 | } |
942 | } | 1121 | } |
943 | 1122 | ||
1123 | static int vgic_nr_shared_irqs(struct vgic_dist *dist) | ||
1124 | { | ||
1125 | return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; | ||
1126 | } | ||
1127 | |||
944 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | 1128 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) |
945 | { | 1129 | { |
946 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1130 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
947 | unsigned long *pending, *enabled, *pend_percpu, *pend_shared; | 1131 | unsigned long *pending, *enabled, *pend_percpu, *pend_shared; |
948 | unsigned long pending_private, pending_shared; | 1132 | unsigned long pending_private, pending_shared; |
1133 | int nr_shared = vgic_nr_shared_irqs(dist); | ||
949 | int vcpu_id; | 1134 | int vcpu_id; |
950 | 1135 | ||
951 | vcpu_id = vcpu->vcpu_id; | 1136 | vcpu_id = vcpu->vcpu_id; |
952 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; | 1137 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; |
953 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; | 1138 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; |
954 | 1139 | ||
955 | pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id); | 1140 | pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); |
956 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); | 1141 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); |
957 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); | 1142 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); |
958 | 1143 | ||
959 | pending = vgic_bitmap_get_shared_map(&dist->irq_state); | 1144 | pending = vgic_bitmap_get_shared_map(&dist->irq_pending); |
960 | enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); | 1145 | enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); |
961 | bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS); | 1146 | bitmap_and(pend_shared, pending, enabled, nr_shared); |
962 | bitmap_and(pend_shared, pend_shared, | 1147 | bitmap_and(pend_shared, pend_shared, |
963 | vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), | 1148 | vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), |
964 | VGIC_NR_SHARED_IRQS); | 1149 | nr_shared); |
965 | 1150 | ||
966 | pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS); | 1151 | pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS); |
967 | pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS); | 1152 | pending_shared = find_first_bit(pend_shared, nr_shared); |
968 | return (pending_private < VGIC_NR_PRIVATE_IRQS || | 1153 | return (pending_private < VGIC_NR_PRIVATE_IRQS || |
969 | pending_shared < VGIC_NR_SHARED_IRQS); | 1154 | pending_shared < vgic_nr_shared_irqs(dist)); |
970 | } | 1155 | } |
971 | 1156 | ||
972 | /* | 1157 | /* |
@@ -980,14 +1165,14 @@ static void vgic_update_state(struct kvm *kvm) | |||
980 | int c; | 1165 | int c; |
981 | 1166 | ||
982 | if (!dist->enabled) { | 1167 | if (!dist->enabled) { |
983 | set_bit(0, &dist->irq_pending_on_cpu); | 1168 | set_bit(0, dist->irq_pending_on_cpu); |
984 | return; | 1169 | return; |
985 | } | 1170 | } |
986 | 1171 | ||
987 | kvm_for_each_vcpu(c, vcpu, kvm) { | 1172 | kvm_for_each_vcpu(c, vcpu, kvm) { |
988 | if (compute_pending_for_cpu(vcpu)) { | 1173 | if (compute_pending_for_cpu(vcpu)) { |
989 | pr_debug("CPU%d has pending interrupts\n", c); | 1174 | pr_debug("CPU%d has pending interrupts\n", c); |
990 | set_bit(c, &dist->irq_pending_on_cpu); | 1175 | set_bit(c, dist->irq_pending_on_cpu); |
991 | } | 1176 | } |
992 | } | 1177 | } |
993 | } | 1178 | } |
@@ -1079,8 +1264,8 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
1079 | 1264 | ||
1080 | if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { | 1265 | if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { |
1081 | vgic_retire_lr(lr, vlr.irq, vcpu); | 1266 | vgic_retire_lr(lr, vlr.irq, vcpu); |
1082 | if (vgic_irq_is_active(vcpu, vlr.irq)) | 1267 | if (vgic_irq_is_queued(vcpu, vlr.irq)) |
1083 | vgic_irq_clear_active(vcpu, vlr.irq); | 1268 | vgic_irq_clear_queued(vcpu, vlr.irq); |
1084 | } | 1269 | } |
1085 | } | 1270 | } |
1086 | } | 1271 | } |
@@ -1092,13 +1277,14 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
1092 | static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | 1277 | static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) |
1093 | { | 1278 | { |
1094 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1279 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1280 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1095 | struct vgic_lr vlr; | 1281 | struct vgic_lr vlr; |
1096 | int lr; | 1282 | int lr; |
1097 | 1283 | ||
1098 | /* Sanitize the input... */ | 1284 | /* Sanitize the input... */ |
1099 | BUG_ON(sgi_source_id & ~7); | 1285 | BUG_ON(sgi_source_id & ~7); |
1100 | BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS); | 1286 | BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS); |
1101 | BUG_ON(irq >= VGIC_NR_IRQS); | 1287 | BUG_ON(irq >= dist->nr_irqs); |
1102 | 1288 | ||
1103 | kvm_debug("Queue IRQ%d\n", irq); | 1289 | kvm_debug("Queue IRQ%d\n", irq); |
1104 | 1290 | ||
@@ -1144,14 +1330,14 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |||
1144 | int vcpu_id = vcpu->vcpu_id; | 1330 | int vcpu_id = vcpu->vcpu_id; |
1145 | int c; | 1331 | int c; |
1146 | 1332 | ||
1147 | sources = dist->irq_sgi_sources[vcpu_id][irq]; | 1333 | sources = *vgic_get_sgi_sources(dist, vcpu_id, irq); |
1148 | 1334 | ||
1149 | for_each_set_bit(c, &sources, VGIC_MAX_CPUS) { | 1335 | for_each_set_bit(c, &sources, dist->nr_cpus) { |
1150 | if (vgic_queue_irq(vcpu, c, irq)) | 1336 | if (vgic_queue_irq(vcpu, c, irq)) |
1151 | clear_bit(c, &sources); | 1337 | clear_bit(c, &sources); |
1152 | } | 1338 | } |
1153 | 1339 | ||
1154 | dist->irq_sgi_sources[vcpu_id][irq] = sources; | 1340 | *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources; |
1155 | 1341 | ||
1156 | /* | 1342 | /* |
1157 | * If the sources bitmap has been cleared it means that we | 1343 | * If the sources bitmap has been cleared it means that we |
@@ -1160,7 +1346,7 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |||
1160 | * our emulated gic and can get rid of them. | 1346 | * our emulated gic and can get rid of them. |
1161 | */ | 1347 | */ |
1162 | if (!sources) { | 1348 | if (!sources) { |
1163 | vgic_dist_irq_clear(vcpu, irq); | 1349 | vgic_dist_irq_clear_pending(vcpu, irq); |
1164 | vgic_cpu_irq_clear(vcpu, irq); | 1350 | vgic_cpu_irq_clear(vcpu, irq); |
1165 | return true; | 1351 | return true; |
1166 | } | 1352 | } |
@@ -1170,15 +1356,15 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |||
1170 | 1356 | ||
1171 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) | 1357 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) |
1172 | { | 1358 | { |
1173 | if (vgic_irq_is_active(vcpu, irq)) | 1359 | if (!vgic_can_sample_irq(vcpu, irq)) |
1174 | return true; /* level interrupt, already queued */ | 1360 | return true; /* level interrupt, already queued */ |
1175 | 1361 | ||
1176 | if (vgic_queue_irq(vcpu, 0, irq)) { | 1362 | if (vgic_queue_irq(vcpu, 0, irq)) { |
1177 | if (vgic_irq_is_edge(vcpu, irq)) { | 1363 | if (vgic_irq_is_edge(vcpu, irq)) { |
1178 | vgic_dist_irq_clear(vcpu, irq); | 1364 | vgic_dist_irq_clear_pending(vcpu, irq); |
1179 | vgic_cpu_irq_clear(vcpu, irq); | 1365 | vgic_cpu_irq_clear(vcpu, irq); |
1180 | } else { | 1366 | } else { |
1181 | vgic_irq_set_active(vcpu, irq); | 1367 | vgic_irq_set_queued(vcpu, irq); |
1182 | } | 1368 | } |
1183 | 1369 | ||
1184 | return true; | 1370 | return true; |
@@ -1223,7 +1409,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1223 | } | 1409 | } |
1224 | 1410 | ||
1225 | /* SPIs */ | 1411 | /* SPIs */ |
1226 | for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) { | 1412 | for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) { |
1227 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) | 1413 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) |
1228 | overflow = 1; | 1414 | overflow = 1; |
1229 | } | 1415 | } |
@@ -1239,7 +1425,7 @@ epilog: | |||
1239 | * us. Claim we don't have anything pending. We'll | 1425 | * us. Claim we don't have anything pending. We'll |
1240 | * adjust that if needed while exiting. | 1426 | * adjust that if needed while exiting. |
1241 | */ | 1427 | */ |
1242 | clear_bit(vcpu_id, &dist->irq_pending_on_cpu); | 1428 | clear_bit(vcpu_id, dist->irq_pending_on_cpu); |
1243 | } | 1429 | } |
1244 | } | 1430 | } |
1245 | 1431 | ||
@@ -1261,17 +1447,32 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1261 | 1447 | ||
1262 | for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { | 1448 | for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { |
1263 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); | 1449 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); |
1450 | WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); | ||
1264 | 1451 | ||
1265 | vgic_irq_clear_active(vcpu, vlr.irq); | 1452 | vgic_irq_clear_queued(vcpu, vlr.irq); |
1266 | WARN_ON(vlr.state & LR_STATE_MASK); | 1453 | WARN_ON(vlr.state & LR_STATE_MASK); |
1267 | vlr.state = 0; | 1454 | vlr.state = 0; |
1268 | vgic_set_lr(vcpu, lr, vlr); | 1455 | vgic_set_lr(vcpu, lr, vlr); |
1269 | 1456 | ||
1457 | /* | ||
1458 | * If the IRQ was EOIed it was also ACKed and we we | ||
1459 | * therefore assume we can clear the soft pending | ||
1460 | * state (should it had been set) for this interrupt. | ||
1461 | * | ||
1462 | * Note: if the IRQ soft pending state was set after | ||
1463 | * the IRQ was acked, it actually shouldn't be | ||
1464 | * cleared, but we have no way of knowing that unless | ||
1465 | * we start trapping ACKs when the soft-pending state | ||
1466 | * is set. | ||
1467 | */ | ||
1468 | vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); | ||
1469 | |||
1270 | /* Any additional pending interrupt? */ | 1470 | /* Any additional pending interrupt? */ |
1271 | if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) { | 1471 | if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { |
1272 | vgic_cpu_irq_set(vcpu, vlr.irq); | 1472 | vgic_cpu_irq_set(vcpu, vlr.irq); |
1273 | level_pending = true; | 1473 | level_pending = true; |
1274 | } else { | 1474 | } else { |
1475 | vgic_dist_irq_clear_pending(vcpu, vlr.irq); | ||
1275 | vgic_cpu_irq_clear(vcpu, vlr.irq); | 1476 | vgic_cpu_irq_clear(vcpu, vlr.irq); |
1276 | } | 1477 | } |
1277 | 1478 | ||
@@ -1315,14 +1516,14 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |||
1315 | 1516 | ||
1316 | vlr = vgic_get_lr(vcpu, lr); | 1517 | vlr = vgic_get_lr(vcpu, lr); |
1317 | 1518 | ||
1318 | BUG_ON(vlr.irq >= VGIC_NR_IRQS); | 1519 | BUG_ON(vlr.irq >= dist->nr_irqs); |
1319 | vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; | 1520 | vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; |
1320 | } | 1521 | } |
1321 | 1522 | ||
1322 | /* Check if we still have something up our sleeve... */ | 1523 | /* Check if we still have something up our sleeve... */ |
1323 | pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); | 1524 | pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); |
1324 | if (level_pending || pending < vgic->nr_lr) | 1525 | if (level_pending || pending < vgic->nr_lr) |
1325 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | 1526 | set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); |
1326 | } | 1527 | } |
1327 | 1528 | ||
1328 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | 1529 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) |
@@ -1356,7 +1557,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
1356 | if (!irqchip_in_kernel(vcpu->kvm)) | 1557 | if (!irqchip_in_kernel(vcpu->kvm)) |
1357 | return 0; | 1558 | return 0; |
1358 | 1559 | ||
1359 | return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | 1560 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); |
1360 | } | 1561 | } |
1361 | 1562 | ||
1362 | static void vgic_kick_vcpus(struct kvm *kvm) | 1563 | static void vgic_kick_vcpus(struct kvm *kvm) |
@@ -1376,34 +1577,36 @@ static void vgic_kick_vcpus(struct kvm *kvm) | |||
1376 | 1577 | ||
1377 | static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) | 1578 | static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) |
1378 | { | 1579 | { |
1379 | int is_edge = vgic_irq_is_edge(vcpu, irq); | 1580 | int edge_triggered = vgic_irq_is_edge(vcpu, irq); |
1380 | int state = vgic_dist_irq_is_pending(vcpu, irq); | ||
1381 | 1581 | ||
1382 | /* | 1582 | /* |
1383 | * Only inject an interrupt if: | 1583 | * Only inject an interrupt if: |
1384 | * - edge triggered and we have a rising edge | 1584 | * - edge triggered and we have a rising edge |
1385 | * - level triggered and we change level | 1585 | * - level triggered and we change level |
1386 | */ | 1586 | */ |
1387 | if (is_edge) | 1587 | if (edge_triggered) { |
1588 | int state = vgic_dist_irq_is_pending(vcpu, irq); | ||
1388 | return level > state; | 1589 | return level > state; |
1389 | else | 1590 | } else { |
1591 | int state = vgic_dist_irq_get_level(vcpu, irq); | ||
1390 | return level != state; | 1592 | return level != state; |
1593 | } | ||
1391 | } | 1594 | } |
1392 | 1595 | ||
1393 | static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | 1596 | static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, |
1394 | unsigned int irq_num, bool level) | 1597 | unsigned int irq_num, bool level) |
1395 | { | 1598 | { |
1396 | struct vgic_dist *dist = &kvm->arch.vgic; | 1599 | struct vgic_dist *dist = &kvm->arch.vgic; |
1397 | struct kvm_vcpu *vcpu; | 1600 | struct kvm_vcpu *vcpu; |
1398 | int is_edge, is_level; | 1601 | int edge_triggered, level_triggered; |
1399 | int enabled; | 1602 | int enabled; |
1400 | bool ret = true; | 1603 | bool ret = true; |
1401 | 1604 | ||
1402 | spin_lock(&dist->lock); | 1605 | spin_lock(&dist->lock); |
1403 | 1606 | ||
1404 | vcpu = kvm_get_vcpu(kvm, cpuid); | 1607 | vcpu = kvm_get_vcpu(kvm, cpuid); |
1405 | is_edge = vgic_irq_is_edge(vcpu, irq_num); | 1608 | edge_triggered = vgic_irq_is_edge(vcpu, irq_num); |
1406 | is_level = !is_edge; | 1609 | level_triggered = !edge_triggered; |
1407 | 1610 | ||
1408 | if (!vgic_validate_injection(vcpu, irq_num, level)) { | 1611 | if (!vgic_validate_injection(vcpu, irq_num, level)) { |
1409 | ret = false; | 1612 | ret = false; |
@@ -1417,10 +1620,19 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | |||
1417 | 1620 | ||
1418 | kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); | 1621 | kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); |
1419 | 1622 | ||
1420 | if (level) | 1623 | if (level) { |
1421 | vgic_dist_irq_set(vcpu, irq_num); | 1624 | if (level_triggered) |
1422 | else | 1625 | vgic_dist_irq_set_level(vcpu, irq_num); |
1423 | vgic_dist_irq_clear(vcpu, irq_num); | 1626 | vgic_dist_irq_set_pending(vcpu, irq_num); |
1627 | } else { | ||
1628 | if (level_triggered) { | ||
1629 | vgic_dist_irq_clear_level(vcpu, irq_num); | ||
1630 | if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) | ||
1631 | vgic_dist_irq_clear_pending(vcpu, irq_num); | ||
1632 | } else { | ||
1633 | vgic_dist_irq_clear_pending(vcpu, irq_num); | ||
1634 | } | ||
1635 | } | ||
1424 | 1636 | ||
1425 | enabled = vgic_irq_is_enabled(vcpu, irq_num); | 1637 | enabled = vgic_irq_is_enabled(vcpu, irq_num); |
1426 | 1638 | ||
@@ -1429,7 +1641,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | |||
1429 | goto out; | 1641 | goto out; |
1430 | } | 1642 | } |
1431 | 1643 | ||
1432 | if (is_level && vgic_irq_is_active(vcpu, irq_num)) { | 1644 | if (!vgic_can_sample_irq(vcpu, irq_num)) { |
1433 | /* | 1645 | /* |
1434 | * Level interrupt in progress, will be picked up | 1646 | * Level interrupt in progress, will be picked up |
1435 | * when EOId. | 1647 | * when EOId. |
@@ -1440,7 +1652,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | |||
1440 | 1652 | ||
1441 | if (level) { | 1653 | if (level) { |
1442 | vgic_cpu_irq_set(vcpu, irq_num); | 1654 | vgic_cpu_irq_set(vcpu, irq_num); |
1443 | set_bit(cpuid, &dist->irq_pending_on_cpu); | 1655 | set_bit(cpuid, dist->irq_pending_on_cpu); |
1444 | } | 1656 | } |
1445 | 1657 | ||
1446 | out: | 1658 | out: |
@@ -1466,7 +1678,8 @@ out: | |||
1466 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, | 1678 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, |
1467 | bool level) | 1679 | bool level) |
1468 | { | 1680 | { |
1469 | if (vgic_update_irq_state(kvm, cpuid, irq_num, level)) | 1681 | if (likely(vgic_initialized(kvm)) && |
1682 | vgic_update_irq_pending(kvm, cpuid, irq_num, level)) | ||
1470 | vgic_kick_vcpus(kvm); | 1683 | vgic_kick_vcpus(kvm); |
1471 | 1684 | ||
1472 | return 0; | 1685 | return 0; |
@@ -1483,6 +1696,32 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) | |||
1483 | return IRQ_HANDLED; | 1696 | return IRQ_HANDLED; |
1484 | } | 1697 | } |
1485 | 1698 | ||
1699 | void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
1700 | { | ||
1701 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1702 | |||
1703 | kfree(vgic_cpu->pending_shared); | ||
1704 | kfree(vgic_cpu->vgic_irq_lr_map); | ||
1705 | vgic_cpu->pending_shared = NULL; | ||
1706 | vgic_cpu->vgic_irq_lr_map = NULL; | ||
1707 | } | ||
1708 | |||
1709 | static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) | ||
1710 | { | ||
1711 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1712 | |||
1713 | int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; | ||
1714 | vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); | ||
1715 | vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL); | ||
1716 | |||
1717 | if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { | ||
1718 | kvm_vgic_vcpu_destroy(vcpu); | ||
1719 | return -ENOMEM; | ||
1720 | } | ||
1721 | |||
1722 | return 0; | ||
1723 | } | ||
1724 | |||
1486 | /** | 1725 | /** |
1487 | * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state | 1726 | * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state |
1488 | * @vcpu: pointer to the vcpu struct | 1727 | * @vcpu: pointer to the vcpu struct |
@@ -1490,16 +1729,13 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) | |||
1490 | * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to | 1729 | * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to |
1491 | * this vcpu and enable the VGIC for this VCPU | 1730 | * this vcpu and enable the VGIC for this VCPU |
1492 | */ | 1731 | */ |
1493 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | 1732 | static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) |
1494 | { | 1733 | { |
1495 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1734 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1496 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1735 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1497 | int i; | 1736 | int i; |
1498 | 1737 | ||
1499 | if (vcpu->vcpu_id >= VGIC_MAX_CPUS) | 1738 | for (i = 0; i < dist->nr_irqs; i++) { |
1500 | return -EBUSY; | ||
1501 | |||
1502 | for (i = 0; i < VGIC_NR_IRQS; i++) { | ||
1503 | if (i < VGIC_NR_PPIS) | 1739 | if (i < VGIC_NR_PPIS) |
1504 | vgic_bitmap_set_irq_val(&dist->irq_enabled, | 1740 | vgic_bitmap_set_irq_val(&dist->irq_enabled, |
1505 | vcpu->vcpu_id, i, 1); | 1741 | vcpu->vcpu_id, i, 1); |
@@ -1518,8 +1754,113 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
1518 | vgic_cpu->nr_lr = vgic->nr_lr; | 1754 | vgic_cpu->nr_lr = vgic->nr_lr; |
1519 | 1755 | ||
1520 | vgic_enable(vcpu); | 1756 | vgic_enable(vcpu); |
1757 | } | ||
1521 | 1758 | ||
1522 | return 0; | 1759 | void kvm_vgic_destroy(struct kvm *kvm) |
1760 | { | ||
1761 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
1762 | struct kvm_vcpu *vcpu; | ||
1763 | int i; | ||
1764 | |||
1765 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
1766 | kvm_vgic_vcpu_destroy(vcpu); | ||
1767 | |||
1768 | vgic_free_bitmap(&dist->irq_enabled); | ||
1769 | vgic_free_bitmap(&dist->irq_level); | ||
1770 | vgic_free_bitmap(&dist->irq_pending); | ||
1771 | vgic_free_bitmap(&dist->irq_soft_pend); | ||
1772 | vgic_free_bitmap(&dist->irq_queued); | ||
1773 | vgic_free_bitmap(&dist->irq_cfg); | ||
1774 | vgic_free_bytemap(&dist->irq_priority); | ||
1775 | if (dist->irq_spi_target) { | ||
1776 | for (i = 0; i < dist->nr_cpus; i++) | ||
1777 | vgic_free_bitmap(&dist->irq_spi_target[i]); | ||
1778 | } | ||
1779 | kfree(dist->irq_sgi_sources); | ||
1780 | kfree(dist->irq_spi_cpu); | ||
1781 | kfree(dist->irq_spi_target); | ||
1782 | kfree(dist->irq_pending_on_cpu); | ||
1783 | dist->irq_sgi_sources = NULL; | ||
1784 | dist->irq_spi_cpu = NULL; | ||
1785 | dist->irq_spi_target = NULL; | ||
1786 | dist->irq_pending_on_cpu = NULL; | ||
1787 | } | ||
1788 | |||
1789 | /* | ||
1790 | * Allocate and initialize the various data structures. Must be called | ||
1791 | * with kvm->lock held! | ||
1792 | */ | ||
1793 | static int vgic_init_maps(struct kvm *kvm) | ||
1794 | { | ||
1795 | struct vgic_dist *dist = &kvm->arch.vgic; | ||
1796 | struct kvm_vcpu *vcpu; | ||
1797 | int nr_cpus, nr_irqs; | ||
1798 | int ret, i; | ||
1799 | |||
1800 | if (dist->nr_cpus) /* Already allocated */ | ||
1801 | return 0; | ||
1802 | |||
1803 | nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); | ||
1804 | if (!nr_cpus) /* No vcpus? Can't be good... */ | ||
1805 | return -EINVAL; | ||
1806 | |||
1807 | /* | ||
1808 | * If nobody configured the number of interrupts, use the | ||
1809 | * legacy one. | ||
1810 | */ | ||
1811 | if (!dist->nr_irqs) | ||
1812 | dist->nr_irqs = VGIC_NR_IRQS_LEGACY; | ||
1813 | |||
1814 | nr_irqs = dist->nr_irqs; | ||
1815 | |||
1816 | ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs); | ||
1817 | ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs); | ||
1818 | ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs); | ||
1819 | ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs); | ||
1820 | ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs); | ||
1821 | ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs); | ||
1822 | ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs); | ||
1823 | |||
1824 | if (ret) | ||
1825 | goto out; | ||
1826 | |||
1827 | dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL); | ||
1828 | dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL); | ||
1829 | dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus, | ||
1830 | GFP_KERNEL); | ||
1831 | dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), | ||
1832 | GFP_KERNEL); | ||
1833 | if (!dist->irq_sgi_sources || | ||
1834 | !dist->irq_spi_cpu || | ||
1835 | !dist->irq_spi_target || | ||
1836 | !dist->irq_pending_on_cpu) { | ||
1837 | ret = -ENOMEM; | ||
1838 | goto out; | ||
1839 | } | ||
1840 | |||
1841 | for (i = 0; i < nr_cpus; i++) | ||
1842 | ret |= vgic_init_bitmap(&dist->irq_spi_target[i], | ||
1843 | nr_cpus, nr_irqs); | ||
1844 | |||
1845 | if (ret) | ||
1846 | goto out; | ||
1847 | |||
1848 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
1849 | ret = vgic_vcpu_init_maps(vcpu, nr_irqs); | ||
1850 | if (ret) { | ||
1851 | kvm_err("VGIC: Failed to allocate vcpu memory\n"); | ||
1852 | break; | ||
1853 | } | ||
1854 | } | ||
1855 | |||
1856 | for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) | ||
1857 | vgic_set_target_reg(kvm, 0, i); | ||
1858 | |||
1859 | out: | ||
1860 | if (ret) | ||
1861 | kvm_vgic_destroy(kvm); | ||
1862 | |||
1863 | return ret; | ||
1523 | } | 1864 | } |
1524 | 1865 | ||
1525 | /** | 1866 | /** |
@@ -1533,6 +1874,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
1533 | */ | 1874 | */ |
1534 | int kvm_vgic_init(struct kvm *kvm) | 1875 | int kvm_vgic_init(struct kvm *kvm) |
1535 | { | 1876 | { |
1877 | struct kvm_vcpu *vcpu; | ||
1536 | int ret = 0, i; | 1878 | int ret = 0, i; |
1537 | 1879 | ||
1538 | if (!irqchip_in_kernel(kvm)) | 1880 | if (!irqchip_in_kernel(kvm)) |
@@ -1550,6 +1892,12 @@ int kvm_vgic_init(struct kvm *kvm) | |||
1550 | goto out; | 1892 | goto out; |
1551 | } | 1893 | } |
1552 | 1894 | ||
1895 | ret = vgic_init_maps(kvm); | ||
1896 | if (ret) { | ||
1897 | kvm_err("Unable to allocate maps\n"); | ||
1898 | goto out; | ||
1899 | } | ||
1900 | |||
1553 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, | 1901 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, |
1554 | vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE); | 1902 | vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE); |
1555 | if (ret) { | 1903 | if (ret) { |
@@ -1557,11 +1905,13 @@ int kvm_vgic_init(struct kvm *kvm) | |||
1557 | goto out; | 1905 | goto out; |
1558 | } | 1906 | } |
1559 | 1907 | ||
1560 | for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) | 1908 | kvm_for_each_vcpu(i, vcpu, kvm) |
1561 | vgic_set_target_reg(kvm, 0, i); | 1909 | kvm_vgic_vcpu_init(vcpu); |
1562 | 1910 | ||
1563 | kvm->arch.vgic.ready = true; | 1911 | kvm->arch.vgic.ready = true; |
1564 | out: | 1912 | out: |
1913 | if (ret) | ||
1914 | kvm_vgic_destroy(kvm); | ||
1565 | mutex_unlock(&kvm->lock); | 1915 | mutex_unlock(&kvm->lock); |
1566 | return ret; | 1916 | return ret; |
1567 | } | 1917 | } |
@@ -1613,7 +1963,7 @@ out: | |||
1613 | return ret; | 1963 | return ret; |
1614 | } | 1964 | } |
1615 | 1965 | ||
1616 | static bool vgic_ioaddr_overlap(struct kvm *kvm) | 1966 | static int vgic_ioaddr_overlap(struct kvm *kvm) |
1617 | { | 1967 | { |
1618 | phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; | 1968 | phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; |
1619 | phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; | 1969 | phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; |
@@ -1802,6 +2152,10 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
1802 | 2152 | ||
1803 | mutex_lock(&dev->kvm->lock); | 2153 | mutex_lock(&dev->kvm->lock); |
1804 | 2154 | ||
2155 | ret = vgic_init_maps(dev->kvm); | ||
2156 | if (ret) | ||
2157 | goto out; | ||
2158 | |||
1805 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { | 2159 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { |
1806 | ret = -EINVAL; | 2160 | ret = -EINVAL; |
1807 | goto out; | 2161 | goto out; |
@@ -1899,6 +2253,36 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
1899 | 2253 | ||
1900 | return vgic_attr_regs_access(dev, attr, ®, true); | 2254 | return vgic_attr_regs_access(dev, attr, ®, true); |
1901 | } | 2255 | } |
2256 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { | ||
2257 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
2258 | u32 val; | ||
2259 | int ret = 0; | ||
2260 | |||
2261 | if (get_user(val, uaddr)) | ||
2262 | return -EFAULT; | ||
2263 | |||
2264 | /* | ||
2265 | * We require: | ||
2266 | * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs | ||
2267 | * - at most 1024 interrupts | ||
2268 | * - a multiple of 32 interrupts | ||
2269 | */ | ||
2270 | if (val < (VGIC_NR_PRIVATE_IRQS + 32) || | ||
2271 | val > VGIC_MAX_IRQS || | ||
2272 | (val & 31)) | ||
2273 | return -EINVAL; | ||
2274 | |||
2275 | mutex_lock(&dev->kvm->lock); | ||
2276 | |||
2277 | if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) | ||
2278 | ret = -EBUSY; | ||
2279 | else | ||
2280 | dev->kvm->arch.vgic.nr_irqs = val; | ||
2281 | |||
2282 | mutex_unlock(&dev->kvm->lock); | ||
2283 | |||
2284 | return ret; | ||
2285 | } | ||
1902 | 2286 | ||
1903 | } | 2287 | } |
1904 | 2288 | ||
@@ -1935,6 +2319,11 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
1935 | r = put_user(reg, uaddr); | 2319 | r = put_user(reg, uaddr); |
1936 | break; | 2320 | break; |
1937 | } | 2321 | } |
2322 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { | ||
2323 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
2324 | r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr); | ||
2325 | break; | ||
2326 | } | ||
1938 | 2327 | ||
1939 | } | 2328 | } |
1940 | 2329 | ||
@@ -1971,6 +2360,8 @@ static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
1971 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | 2360 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: |
1972 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | 2361 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; |
1973 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); | 2362 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); |
2363 | case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: | ||
2364 | return 0; | ||
1974 | } | 2365 | } |
1975 | return -ENXIO; | 2366 | return -ENXIO; |
1976 | } | 2367 | } |
@@ -2029,8 +2420,8 @@ static const struct of_device_id vgic_ids[] = { | |||
2029 | int kvm_vgic_hyp_init(void) | 2420 | int kvm_vgic_hyp_init(void) |
2030 | { | 2421 | { |
2031 | const struct of_device_id *matched_id; | 2422 | const struct of_device_id *matched_id; |
2032 | int (*vgic_probe)(struct device_node *,const struct vgic_ops **, | 2423 | const int (*vgic_probe)(struct device_node *,const struct vgic_ops **, |
2033 | const struct vgic_params **); | 2424 | const struct vgic_params **); |
2034 | struct device_node *vgic_node; | 2425 | struct device_node *vgic_node; |
2035 | int ret; | 2426 | int ret; |
2036 | 2427 | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a1cf53ee0d28..39a02fbdb572 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1095,9 +1095,9 @@ EXPORT_SYMBOL_GPL(gfn_to_hva); | |||
1095 | * If writable is set to false, the hva returned by this function is only | 1095 | * If writable is set to false, the hva returned by this function is only |
1096 | * allowed to be read. | 1096 | * allowed to be read. |
1097 | */ | 1097 | */ |
1098 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) | 1098 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, |
1099 | gfn_t gfn, bool *writable) | ||
1099 | { | 1100 | { |
1100 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); | ||
1101 | unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); | 1101 | unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); |
1102 | 1102 | ||
1103 | if (!kvm_is_error_hva(hva) && writable) | 1103 | if (!kvm_is_error_hva(hva) && writable) |
@@ -1106,6 +1106,13 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) | |||
1106 | return hva; | 1106 | return hva; |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) | ||
1110 | { | ||
1111 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); | ||
1112 | |||
1113 | return gfn_to_hva_memslot_prot(slot, gfn, writable); | ||
1114 | } | ||
1115 | |||
1109 | static int kvm_read_hva(void *data, void __user *hva, int len) | 1116 | static int kvm_read_hva(void *data, void __user *hva, int len) |
1110 | { | 1117 | { |
1111 | return __copy_from_user(data, hva, len); | 1118 | return __copy_from_user(data, hva, len); |