aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/vgic/vgic-mmio.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/vgic/vgic-mmio.c')
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c44
1 files changed, 26 insertions, 18 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index c1e4bdd66131..deb51ee16a3d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -69,13 +69,14 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
69{ 69{
70 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 70 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
71 int i; 71 int i;
72 unsigned long flags;
72 73
73 for_each_set_bit(i, &val, len * 8) { 74 for_each_set_bit(i, &val, len * 8) {
74 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 75 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
75 76
76 spin_lock(&irq->irq_lock); 77 spin_lock_irqsave(&irq->irq_lock, flags);
77 irq->enabled = true; 78 irq->enabled = true;
78 vgic_queue_irq_unlock(vcpu->kvm, irq); 79 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
79 80
80 vgic_put_irq(vcpu->kvm, irq); 81 vgic_put_irq(vcpu->kvm, irq);
81 } 82 }
@@ -87,15 +88,16 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
87{ 88{
88 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 89 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
89 int i; 90 int i;
91 unsigned long flags;
90 92
91 for_each_set_bit(i, &val, len * 8) { 93 for_each_set_bit(i, &val, len * 8) {
92 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 94 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
93 95
94 spin_lock(&irq->irq_lock); 96 spin_lock_irqsave(&irq->irq_lock, flags);
95 97
96 irq->enabled = false; 98 irq->enabled = false;
97 99
98 spin_unlock(&irq->irq_lock); 100 spin_unlock_irqrestore(&irq->irq_lock, flags);
99 vgic_put_irq(vcpu->kvm, irq); 101 vgic_put_irq(vcpu->kvm, irq);
100 } 102 }
101} 103}
@@ -126,14 +128,15 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
126{ 128{
127 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 129 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
128 int i; 130 int i;
131 unsigned long flags;
129 132
130 for_each_set_bit(i, &val, len * 8) { 133 for_each_set_bit(i, &val, len * 8) {
131 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 134 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
132 135
133 spin_lock(&irq->irq_lock); 136 spin_lock_irqsave(&irq->irq_lock, flags);
134 irq->pending_latch = true; 137 irq->pending_latch = true;
135 138
136 vgic_queue_irq_unlock(vcpu->kvm, irq); 139 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
137 vgic_put_irq(vcpu->kvm, irq); 140 vgic_put_irq(vcpu->kvm, irq);
138 } 141 }
139} 142}
@@ -144,15 +147,16 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
144{ 147{
145 u32 intid = VGIC_ADDR_TO_INTID(addr, 1); 148 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
146 int i; 149 int i;
150 unsigned long flags;
147 151
148 for_each_set_bit(i, &val, len * 8) { 152 for_each_set_bit(i, &val, len * 8) {
149 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 153 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
150 154
151 spin_lock(&irq->irq_lock); 155 spin_lock_irqsave(&irq->irq_lock, flags);
152 156
153 irq->pending_latch = false; 157 irq->pending_latch = false;
154 158
155 spin_unlock(&irq->irq_lock); 159 spin_unlock_irqrestore(&irq->irq_lock, flags);
156 vgic_put_irq(vcpu->kvm, irq); 160 vgic_put_irq(vcpu->kvm, irq);
157 } 161 }
158} 162}
@@ -181,7 +185,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
181 bool new_active_state) 185 bool new_active_state)
182{ 186{
183 struct kvm_vcpu *requester_vcpu; 187 struct kvm_vcpu *requester_vcpu;
184 spin_lock(&irq->irq_lock); 188 unsigned long flags;
189 spin_lock_irqsave(&irq->irq_lock, flags);
185 190
186 /* 191 /*
187 * The vcpu parameter here can mean multiple things depending on how 192 * The vcpu parameter here can mean multiple things depending on how
@@ -216,9 +221,9 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
216 221
217 irq->active = new_active_state; 222 irq->active = new_active_state;
218 if (new_active_state) 223 if (new_active_state)
219 vgic_queue_irq_unlock(vcpu->kvm, irq); 224 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
220 else 225 else
221 spin_unlock(&irq->irq_lock); 226 spin_unlock_irqrestore(&irq->irq_lock, flags);
222} 227}
223 228
224/* 229/*
@@ -352,14 +357,15 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
352{ 357{
353 u32 intid = VGIC_ADDR_TO_INTID(addr, 8); 358 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
354 int i; 359 int i;
360 unsigned long flags;
355 361
356 for (i = 0; i < len; i++) { 362 for (i = 0; i < len; i++) {
357 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 363 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
358 364
359 spin_lock(&irq->irq_lock); 365 spin_lock_irqsave(&irq->irq_lock, flags);
360 /* Narrow the priority range to what we actually support */ 366 /* Narrow the priority range to what we actually support */
361 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); 367 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
362 spin_unlock(&irq->irq_lock); 368 spin_unlock_irqrestore(&irq->irq_lock, flags);
363 369
364 vgic_put_irq(vcpu->kvm, irq); 370 vgic_put_irq(vcpu->kvm, irq);
365 } 371 }
@@ -390,6 +396,7 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
390{ 396{
391 u32 intid = VGIC_ADDR_TO_INTID(addr, 2); 397 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
392 int i; 398 int i;
399 unsigned long flags;
393 400
394 for (i = 0; i < len * 4; i++) { 401 for (i = 0; i < len * 4; i++) {
395 struct vgic_irq *irq; 402 struct vgic_irq *irq;
@@ -404,14 +411,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
404 continue; 411 continue;
405 412
406 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 413 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
407 spin_lock(&irq->irq_lock); 414 spin_lock_irqsave(&irq->irq_lock, flags);
408 415
409 if (test_bit(i * 2 + 1, &val)) 416 if (test_bit(i * 2 + 1, &val))
410 irq->config = VGIC_CONFIG_EDGE; 417 irq->config = VGIC_CONFIG_EDGE;
411 else 418 else
412 irq->config = VGIC_CONFIG_LEVEL; 419 irq->config = VGIC_CONFIG_LEVEL;
413 420
414 spin_unlock(&irq->irq_lock); 421 spin_unlock_irqrestore(&irq->irq_lock, flags);
415 vgic_put_irq(vcpu->kvm, irq); 422 vgic_put_irq(vcpu->kvm, irq);
416 } 423 }
417} 424}
@@ -443,6 +450,7 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
443{ 450{
444 int i; 451 int i;
445 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; 452 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
453 unsigned long flags;
446 454
447 for (i = 0; i < 32; i++) { 455 for (i = 0; i < 32; i++) {
448 struct vgic_irq *irq; 456 struct vgic_irq *irq;
@@ -459,12 +467,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
459 * restore irq config before line level. 467 * restore irq config before line level.
460 */ 468 */
461 new_level = !!(val & (1U << i)); 469 new_level = !!(val & (1U << i));
462 spin_lock(&irq->irq_lock); 470 spin_lock_irqsave(&irq->irq_lock, flags);
463 irq->line_level = new_level; 471 irq->line_level = new_level;
464 if (new_level) 472 if (new_level)
465 vgic_queue_irq_unlock(vcpu->kvm, irq); 473 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
466 else 474 else
467 spin_unlock(&irq->irq_lock); 475 spin_unlock_irqrestore(&irq->irq_lock, flags);
468 476
469 vgic_put_irq(vcpu->kvm, irq); 477 vgic_put_irq(vcpu->kvm, irq);
470 } 478 }