aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2016-08-16 10:48:20 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2016-09-08 06:53:00 -0400
commitba7b9169b52038ce017782479c6a24cd37f34ae5 (patch)
tree8557f62c3312667d99e429285f0ecb26d7e9ba12 /virt/kvm
parent714848026531043ae06ca7bcd4a852c8bb8348c7 (diff)
KVM: arm/arm64: Factor out vgic_attr_regs_access functionality
As we are about to deal with multiple data types and situations where the vgic should not be initialized when doing userspace accesses on the register attributes, factor out the functionality of vgic_attr_regs_access into smaller bits which can be reused by a new function later. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Reviewed-by: Eric Auger <eric.auger@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c100
1 files changed, 73 insertions, 27 deletions
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 1813f93b5cde..19fa331f2d11 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -233,6 +233,67 @@ int kvm_register_vgic_device(unsigned long type)
233 return ret; 233 return ret;
234} 234}
235 235
236struct vgic_reg_attr {
237 struct kvm_vcpu *vcpu;
238 gpa_t addr;
239};
240
241static int parse_vgic_v2_attr(struct kvm_device *dev,
242 struct kvm_device_attr *attr,
243 struct vgic_reg_attr *reg_attr)
244{
245 int cpuid;
246
247 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
248 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
249
250 if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
251 return -EINVAL;
252
253 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
254 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
255
256 return 0;
257}
258
259/* unlocks vcpus from @vcpu_lock_idx and smaller */
260static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
261{
262 struct kvm_vcpu *tmp_vcpu;
263
264 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
265 tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
266 mutex_unlock(&tmp_vcpu->mutex);
267 }
268}
269
270static void unlock_all_vcpus(struct kvm *kvm)
271{
272 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
273}
274
275/* Returns true if all vcpus were locked, false otherwise */
276static bool lock_all_vcpus(struct kvm *kvm)
277{
278 struct kvm_vcpu *tmp_vcpu;
279 int c;
280
281 /*
282 * Any time a vcpu is run, vcpu_load is called which tries to grab the
283 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
284 * that no other VCPUs are run and fiddle with the vgic state while we
285 * access it.
286 */
287 kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
288 if (!mutex_trylock(&tmp_vcpu->mutex)) {
289 unlock_vcpus(kvm, c - 1);
290 return false;
291 }
292 }
293
294 return true;
295}
296
236/** vgic_attr_regs_access: allows user space to read/write VGIC registers 297/** vgic_attr_regs_access: allows user space to read/write VGIC registers
237 * 298 *
238 * @dev: kvm device handle 299 * @dev: kvm device handle
@@ -245,15 +306,17 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
245 struct kvm_device_attr *attr, 306 struct kvm_device_attr *attr,
246 u32 *reg, bool is_write) 307 u32 *reg, bool is_write)
247{ 308{
309 struct vgic_reg_attr reg_attr;
248 gpa_t addr; 310 gpa_t addr;
249 int cpuid, ret, c; 311 struct kvm_vcpu *vcpu;
250 struct kvm_vcpu *vcpu, *tmp_vcpu; 312 int ret;
251 int vcpu_lock_idx = -1;
252 313
253 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> 314 ret = parse_vgic_v2_attr(dev, attr, &reg_attr);
254 KVM_DEV_ARM_VGIC_CPUID_SHIFT; 315 if (ret)
255 vcpu = kvm_get_vcpu(dev->kvm, cpuid); 316 return ret;
256 addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 317
318 vcpu = reg_attr.vcpu;
319 addr = reg_attr.addr;
257 320
258 mutex_lock(&dev->kvm->lock); 321 mutex_lock(&dev->kvm->lock);
259 322
@@ -261,24 +324,11 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
261 if (ret) 324 if (ret)
262 goto out; 325 goto out;
263 326
264 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { 327 if (!lock_all_vcpus(dev->kvm)) {
265 ret = -EINVAL; 328 ret = -EBUSY;
266 goto out; 329 goto out;
267 } 330 }
268 331
269 /*
270 * Any time a vcpu is run, vcpu_load is called which tries to grab the
271 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
272 * that no other VCPUs are run and fiddle with the vgic state while we
273 * access it.
274 */
275 ret = -EBUSY;
276 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
277 if (!mutex_trylock(&tmp_vcpu->mutex))
278 goto out;
279 vcpu_lock_idx = c;
280 }
281
282 switch (attr->group) { 332 switch (attr->group) {
283 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 333 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
284 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg); 334 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
@@ -291,12 +341,8 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
291 break; 341 break;
292 } 342 }
293 343
344 unlock_all_vcpus(dev->kvm);
294out: 345out:
295 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
296 tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
297 mutex_unlock(&tmp_vcpu->mutex);
298 }
299
300 mutex_unlock(&dev->kvm->lock); 346 mutex_unlock(&dev->kvm->lock);
301 return ret; 347 return ret;
302} 348}