aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c10
-rw-r--r--virt/kvm/arm/mmu.c9
-rw-r--r--virt/kvm/arm/psci.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c118
12 files changed, 158 insertions, 143 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 9e350fd34504..9c486fad3f9f 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
626 /* Awaken to handle a signal, request we sleep again later. */ 626 /* Awaken to handle a signal, request we sleep again later. */
627 kvm_make_request(KVM_REQ_SLEEP, vcpu); 627 kvm_make_request(KVM_REQ_SLEEP, vcpu);
628 } 628 }
629
630 /*
631 * Make sure we will observe a potential reset request if we've
632 * observed a change to the power state. Pairs with the smp_wmb() in
633 * kvm_psci_vcpu_on().
634 */
635 smp_rmb();
629} 636}
630 637
631static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 638static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
639 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) 646 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
640 vcpu_req_sleep(vcpu); 647 vcpu_req_sleep(vcpu);
641 648
649 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
650 kvm_reset_vcpu(vcpu);
651
642 /* 652 /*
643 * Clear IRQ_PENDING requests that were made to guarantee 653 * Clear IRQ_PENDING requests that were made to guarantee
644 * that a VCPU sees new virtual interrupts. 654 * that a VCPU sees new virtual interrupts.
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index fbdf3ac2f001..30251e288629 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1695 1695
1696 vma_pagesize = vma_kernel_pagesize(vma); 1696 vma_pagesize = vma_kernel_pagesize(vma);
1697 /* 1697 /*
1698 * PUD level may not exist for a VM but PMD is guaranteed to 1698 * The stage2 has a minimum of 2 level table (For arm64 see
1699 * exist. 1699 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1700 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1701 * As for PUD huge maps, we must make sure that we have at least
1702 * 3 levels, i.e, PMD is not folded.
1700 */ 1703 */
1701 if ((vma_pagesize == PMD_SIZE || 1704 if ((vma_pagesize == PMD_SIZE ||
1702 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && 1705 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
1703 !force_pte) { 1706 !force_pte) {
1704 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1707 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1705 } 1708 }
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index 9b73d3ad918a..34d08ee63747 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
104 104
105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) 105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
106{ 106{
107 struct vcpu_reset_state *reset_state;
107 struct kvm *kvm = source_vcpu->kvm; 108 struct kvm *kvm = source_vcpu->kvm;
108 struct kvm_vcpu *vcpu = NULL; 109 struct kvm_vcpu *vcpu = NULL;
109 struct swait_queue_head *wq;
110 unsigned long cpu_id; 110 unsigned long cpu_id;
111 unsigned long context_id;
112 phys_addr_t target_pc;
113 111
114 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; 112 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
115 if (vcpu_mode_is_32bit(source_vcpu)) 113 if (vcpu_mode_is_32bit(source_vcpu))
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
130 return PSCI_RET_INVALID_PARAMS; 128 return PSCI_RET_INVALID_PARAMS;
131 } 129 }
132 130
133 target_pc = smccc_get_arg2(source_vcpu); 131 reset_state = &vcpu->arch.reset_state;
134 context_id = smccc_get_arg3(source_vcpu);
135 132
136 kvm_reset_vcpu(vcpu); 133 reset_state->pc = smccc_get_arg2(source_vcpu);
137
138 /* Gracefully handle Thumb2 entry point */
139 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
140 target_pc &= ~((phys_addr_t) 1);
141 vcpu_set_thumb(vcpu);
142 }
143 134
144 /* Propagate caller endianness */ 135 /* Propagate caller endianness */
145 if (kvm_vcpu_is_be(source_vcpu)) 136 reset_state->be = kvm_vcpu_is_be(source_vcpu);
146 kvm_vcpu_set_be(vcpu);
147 137
148 *vcpu_pc(vcpu) = target_pc;
149 /* 138 /*
150 * NOTE: We always update r0 (or x0) because for PSCI v0.1 139 * NOTE: We always update r0 (or x0) because for PSCI v0.1
151 * the general puspose registers are undefined upon CPU_ON. 140 * the general puspose registers are undefined upon CPU_ON.
152 */ 141 */
153 smccc_set_retval(vcpu, context_id, 0, 0, 0); 142 reset_state->r0 = smccc_get_arg3(source_vcpu);
154 vcpu->arch.power_off = false; 143
155 smp_mb(); /* Make sure the above is visible */ 144 WRITE_ONCE(reset_state->reset, true);
145 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
156 146
157 wq = kvm_arch_vcpu_wq(vcpu); 147 /*
158 swake_up_one(wq); 148 * Make sure the reset request is observed if the change to
149 * power_state is observed.
150 */
151 smp_wmb();
152
153 vcpu->arch.power_off = false;
154 kvm_vcpu_wake_up(vcpu);
159 155
160 return PSCI_RET_SUCCESS; 156 return PSCI_RET_SUCCESS;
161} 157}
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 07aa900bac56..1f62f2b8065d 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
251 return 0; 251 return 0;
252 } 252 }
253 253
254 spin_lock_irqsave(&irq->irq_lock, flags); 254 raw_spin_lock_irqsave(&irq->irq_lock, flags);
255 print_irq_state(s, irq, vcpu); 255 print_irq_state(s, irq, vcpu);
256 spin_unlock_irqrestore(&irq->irq_lock, flags); 256 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
257 257
258 vgic_put_irq(kvm, irq); 258 vgic_put_irq(kvm, irq);
259 return 0; 259 return 0;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index c0c0b88af1d5..3bdb31eaed64 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
64 struct vgic_dist *dist = &kvm->arch.vgic; 64 struct vgic_dist *dist = &kvm->arch.vgic;
65 65
66 INIT_LIST_HEAD(&dist->lpi_list_head); 66 INIT_LIST_HEAD(&dist->lpi_list_head);
67 spin_lock_init(&dist->lpi_list_lock); 67 raw_spin_lock_init(&dist->lpi_list_lock);
68} 68}
69 69
70/* CREATION */ 70/* CREATION */
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
171 171
172 irq->intid = i + VGIC_NR_PRIVATE_IRQS; 172 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
173 INIT_LIST_HEAD(&irq->ap_list); 173 INIT_LIST_HEAD(&irq->ap_list);
174 spin_lock_init(&irq->irq_lock); 174 raw_spin_lock_init(&irq->irq_lock);
175 irq->vcpu = NULL; 175 irq->vcpu = NULL;
176 irq->target_vcpu = vcpu0; 176 irq->target_vcpu = vcpu0;
177 kref_init(&irq->refcount); 177 kref_init(&irq->refcount);
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; 206 vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
207 207
208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 208 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
209 spin_lock_init(&vgic_cpu->ap_list_lock); 209 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
210 210
211 /* 211 /*
212 * Enable and configure all SGIs to be edge-triggered and 212 * Enable and configure all SGIs to be edge-triggered and
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
217 217
218 INIT_LIST_HEAD(&irq->ap_list); 218 INIT_LIST_HEAD(&irq->ap_list);
219 spin_lock_init(&irq->irq_lock); 219 raw_spin_lock_init(&irq->irq_lock);
220 irq->intid = i; 220 irq->intid = i;
221 irq->vcpu = NULL; 221 irq->vcpu = NULL;
222 irq->target_vcpu = vcpu; 222 irq->target_vcpu = vcpu;
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
231 irq->config = VGIC_CONFIG_LEVEL; 231 irq->config = VGIC_CONFIG_LEVEL;
232 } 232 }
233 233
234 /*
235 * GICv3 can only be created via the KVM_DEVICE_CREATE API and
236 * so we always know the emulation type at this point as it's
237 * either explicitly configured as GICv3, or explicitly
238 * configured as GICv2, or not configured yet which also
239 * implies GICv2.
240 */
241 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 234 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
242 irq->group = 1; 235 irq->group = 1;
243 else 236 else
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm)
281{ 274{
282 struct vgic_dist *dist = &kvm->arch.vgic; 275 struct vgic_dist *dist = &kvm->arch.vgic;
283 struct kvm_vcpu *vcpu; 276 struct kvm_vcpu *vcpu;
284 int ret = 0, i; 277 int ret = 0, i, idx;
285 278
286 if (vgic_initialized(kvm)) 279 if (vgic_initialized(kvm))
287 return 0; 280 return 0;
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm)
298 if (ret) 291 if (ret)
299 goto out; 292 goto out;
300 293
294 /* Initialize groups on CPUs created before the VGIC type was known */
295 kvm_for_each_vcpu(idx, vcpu, kvm) {
296 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
297
298 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
299 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
300 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
301 irq->group = 1;
302 else
303 irq->group = 0;
304 }
305 }
306
301 if (vgic_has_its(kvm)) { 307 if (vgic_has_its(kvm)) {
302 ret = vgic_v4_init(kvm); 308 ret = vgic_v4_init(kvm);
303 if (ret) 309 if (ret)
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index eb2a390a6c86..ab3f47745d9c 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
65 65
66 INIT_LIST_HEAD(&irq->lpi_list); 66 INIT_LIST_HEAD(&irq->lpi_list);
67 INIT_LIST_HEAD(&irq->ap_list); 67 INIT_LIST_HEAD(&irq->ap_list);
68 spin_lock_init(&irq->irq_lock); 68 raw_spin_lock_init(&irq->irq_lock);
69 69
70 irq->config = VGIC_CONFIG_EDGE; 70 irq->config = VGIC_CONFIG_EDGE;
71 kref_init(&irq->refcount); 71 kref_init(&irq->refcount);
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
73 irq->target_vcpu = vcpu; 73 irq->target_vcpu = vcpu;
74 irq->group = 1; 74 irq->group = 1;
75 75
76 spin_lock_irqsave(&dist->lpi_list_lock, flags); 76 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
77 77
78 /* 78 /*
79 * There could be a race with another vgic_add_lpi(), so we need to 79 * There could be a race with another vgic_add_lpi(), so we need to
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
101 dist->lpi_list_count++; 101 dist->lpi_list_count++;
102 102
103out_unlock: 103out_unlock:
104 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 104 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
105 105
106 /* 106 /*
107 * We "cache" the configuration table entries in our struct vgic_irq's. 107 * We "cache" the configuration table entries in our struct vgic_irq's.
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
287 if (ret) 287 if (ret)
288 return ret; 288 return ret;
289 289
290 spin_lock_irqsave(&irq->irq_lock, flags); 290 raw_spin_lock_irqsave(&irq->irq_lock, flags);
291 291
292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { 292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
293 irq->priority = LPI_PROP_PRIORITY(prop); 293 irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
299 } 299 }
300 } 300 }
301 301
302 spin_unlock_irqrestore(&irq->irq_lock, flags); 302 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
303 303
304 if (irq->hw) 304 if (irq->hw)
305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); 305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
332 if (!intids) 332 if (!intids)
333 return -ENOMEM; 333 return -ENOMEM;
334 334
335 spin_lock_irqsave(&dist->lpi_list_lock, flags); 335 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
337 if (i == irq_count) 337 if (i == irq_count)
338 break; 338 break;
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
341 continue; 341 continue;
342 intids[i++] = irq->intid; 342 intids[i++] = irq->intid;
343 } 343 }
344 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 344 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
345 345
346 *intid_ptr = intids; 346 *intid_ptr = intids;
347 return i; 347 return i;
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
352 int ret = 0; 352 int ret = 0;
353 unsigned long flags; 353 unsigned long flags;
354 354
355 spin_lock_irqsave(&irq->irq_lock, flags); 355 raw_spin_lock_irqsave(&irq->irq_lock, flags);
356 irq->target_vcpu = vcpu; 356 irq->target_vcpu = vcpu;
357 spin_unlock_irqrestore(&irq->irq_lock, flags); 357 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
358 358
359 if (irq->hw) { 359 if (irq->hw) {
360 struct its_vlpi_map map; 360 struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
455 } 455 }
456 456
457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
458 spin_lock_irqsave(&irq->irq_lock, flags); 458 raw_spin_lock_irqsave(&irq->irq_lock, flags);
459 irq->pending_latch = pendmask & (1U << bit_nr); 459 irq->pending_latch = pendmask & (1U << bit_nr);
460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
461 vgic_put_irq(vcpu->kvm, irq); 461 vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
612 return irq_set_irqchip_state(irq->host_irq, 612 return irq_set_irqchip_state(irq->host_irq,
613 IRQCHIP_STATE_PENDING, true); 613 IRQCHIP_STATE_PENDING, true);
614 614
615 spin_lock_irqsave(&irq->irq_lock, flags); 615 raw_spin_lock_irqsave(&irq->irq_lock, flags);
616 irq->pending_latch = true; 616 irq->pending_latch = true;
617 vgic_queue_irq_unlock(kvm, irq, flags); 617 vgic_queue_irq_unlock(kvm, irq, flags);
618 618
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 738b65d2d0e7..b535fffc7400 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
147 147
148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); 148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
149 149
150 spin_lock_irqsave(&irq->irq_lock, flags); 150 raw_spin_lock_irqsave(&irq->irq_lock, flags);
151 irq->pending_latch = true; 151 irq->pending_latch = true;
152 irq->source |= 1U << source_vcpu->vcpu_id; 152 irq->source |= 1U << source_vcpu->vcpu_id;
153 153
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); 191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
192 int target; 192 int target;
193 193
194 spin_lock_irqsave(&irq->irq_lock, flags); 194 raw_spin_lock_irqsave(&irq->irq_lock, flags);
195 195
196 irq->targets = (val >> (i * 8)) & cpu_mask; 196 irq->targets = (val >> (i * 8)) & cpu_mask;
197 target = irq->targets ? __ffs(irq->targets) : 0; 197 target = irq->targets ? __ffs(irq->targets) : 0;
198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); 198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
199 199
200 spin_unlock_irqrestore(&irq->irq_lock, flags); 200 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
201 vgic_put_irq(vcpu->kvm, irq); 201 vgic_put_irq(vcpu->kvm, irq);
202 } 202 }
203} 203}
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
230 for (i = 0; i < len; i++) { 230 for (i = 0; i < len; i++) {
231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232 232
233 spin_lock_irqsave(&irq->irq_lock, flags); 233 raw_spin_lock_irqsave(&irq->irq_lock, flags);
234 234
235 irq->source &= ~((val >> (i * 8)) & 0xff); 235 irq->source &= ~((val >> (i * 8)) & 0xff);
236 if (!irq->source) 236 if (!irq->source)
237 irq->pending_latch = false; 237 irq->pending_latch = false;
238 238
239 spin_unlock_irqrestore(&irq->irq_lock, flags); 239 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
240 vgic_put_irq(vcpu->kvm, irq); 240 vgic_put_irq(vcpu->kvm, irq);
241 } 241 }
242} 242}
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
252 for (i = 0; i < len; i++) { 252 for (i = 0; i < len; i++) {
253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
254 254
255 spin_lock_irqsave(&irq->irq_lock, flags); 255 raw_spin_lock_irqsave(&irq->irq_lock, flags);
256 256
257 irq->source |= (val >> (i * 8)) & 0xff; 257 irq->source |= (val >> (i * 8)) & 0xff;
258 258
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
260 irq->pending_latch = true; 260 irq->pending_latch = true;
261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
262 } else { 262 } else {
263 spin_unlock_irqrestore(&irq->irq_lock, flags); 263 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
264 } 264 }
265 vgic_put_irq(vcpu->kvm, irq); 265 vgic_put_irq(vcpu->kvm, irq);
266 } 266 }
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index b3d1f0985117..4a12322bf7df 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
169 if (!irq) 169 if (!irq)
170 return; 170 return;
171 171
172 spin_lock_irqsave(&irq->irq_lock, flags); 172 raw_spin_lock_irqsave(&irq->irq_lock, flags);
173 173
174 /* We only care about and preserve Aff0, Aff1 and Aff2. */ 174 /* We only care about and preserve Aff0, Aff1 and Aff2. */
175 irq->mpidr = val & GENMASK(23, 0); 175 irq->mpidr = val & GENMASK(23, 0);
176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); 176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
177 177
178 spin_unlock_irqrestore(&irq->irq_lock, flags); 178 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
179 vgic_put_irq(vcpu->kvm, irq); 179 vgic_put_irq(vcpu->kvm, irq);
180} 180}
181 181
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
281 for (i = 0; i < len * 8; i++) { 281 for (i = 0; i < len * 8; i++) {
282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
283 283
284 spin_lock_irqsave(&irq->irq_lock, flags); 284 raw_spin_lock_irqsave(&irq->irq_lock, flags);
285 if (test_bit(i, &val)) { 285 if (test_bit(i, &val)) {
286 /* 286 /*
287 * pending_latch is set irrespective of irq type 287 * pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
293 } else { 293 } else {
294 irq->pending_latch = false; 294 irq->pending_latch = false;
295 spin_unlock_irqrestore(&irq->irq_lock, flags); 295 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
296 } 296 }
297 297
298 vgic_put_irq(vcpu->kvm, irq); 298 vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
957 957
958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); 958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
959 959
960 spin_lock_irqsave(&irq->irq_lock, flags); 960 raw_spin_lock_irqsave(&irq->irq_lock, flags);
961 961
962 /* 962 /*
963 * An access targetting Group0 SGIs can only generate 963 * An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
968 irq->pending_latch = true; 968 irq->pending_latch = true;
969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
970 } else { 970 } else {
971 spin_unlock_irqrestore(&irq->irq_lock, flags); 971 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
972 } 972 }
973 973
974 vgic_put_irq(vcpu->kvm, irq); 974 vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ceeda7e04a4d..7de42fba05b5 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
77 for (i = 0; i < len * 8; i++) { 77 for (i = 0; i < len * 8; i++) {
78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79 79
80 spin_lock_irqsave(&irq->irq_lock, flags); 80 raw_spin_lock_irqsave(&irq->irq_lock, flags);
81 irq->group = !!(val & BIT(i)); 81 irq->group = !!(val & BIT(i));
82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
83 83
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
120 for_each_set_bit(i, &val, len * 8) { 120 for_each_set_bit(i, &val, len * 8) {
121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
122 122
123 spin_lock_irqsave(&irq->irq_lock, flags); 123 raw_spin_lock_irqsave(&irq->irq_lock, flags);
124 irq->enabled = true; 124 irq->enabled = true;
125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
126 126
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
139 for_each_set_bit(i, &val, len * 8) { 139 for_each_set_bit(i, &val, len * 8) {
140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
141 141
142 spin_lock_irqsave(&irq->irq_lock, flags); 142 raw_spin_lock_irqsave(&irq->irq_lock, flags);
143 143
144 irq->enabled = false; 144 irq->enabled = false;
145 145
146 spin_unlock_irqrestore(&irq->irq_lock, flags); 146 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
147 vgic_put_irq(vcpu->kvm, irq); 147 vgic_put_irq(vcpu->kvm, irq);
148 } 148 }
149} 149}
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
161 unsigned long flags; 161 unsigned long flags;
162 162
163 spin_lock_irqsave(&irq->irq_lock, flags); 163 raw_spin_lock_irqsave(&irq->irq_lock, flags);
164 if (irq_is_pending(irq)) 164 if (irq_is_pending(irq))
165 value |= (1U << i); 165 value |= (1U << i);
166 spin_unlock_irqrestore(&irq->irq_lock, flags); 166 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
167 167
168 vgic_put_irq(vcpu->kvm, irq); 168 vgic_put_irq(vcpu->kvm, irq);
169 } 169 }
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 for_each_set_bit(i, &val, len * 8) { 215 for_each_set_bit(i, &val, len * 8) {
216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
217 217
218 spin_lock_irqsave(&irq->irq_lock, flags); 218 raw_spin_lock_irqsave(&irq->irq_lock, flags);
219 if (irq->hw) 219 if (irq->hw)
220 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 220 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
221 else 221 else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
262 for_each_set_bit(i, &val, len * 8) { 262 for_each_set_bit(i, &val, len * 8) {
263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264 264
265 spin_lock_irqsave(&irq->irq_lock, flags); 265 raw_spin_lock_irqsave(&irq->irq_lock, flags);
266 266
267 if (irq->hw) 267 if (irq->hw)
268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess); 268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
269 else 269 else
270 irq->pending_latch = false; 270 irq->pending_latch = false;
271 271
272 spin_unlock_irqrestore(&irq->irq_lock, flags); 272 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
273 vgic_put_irq(vcpu->kvm, irq); 273 vgic_put_irq(vcpu->kvm, irq);
274 } 274 }
275} 275}
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
311 unsigned long flags; 311 unsigned long flags;
312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); 312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
313 313
314 spin_lock_irqsave(&irq->irq_lock, flags); 314 raw_spin_lock_irqsave(&irq->irq_lock, flags);
315 315
316 if (irq->hw) { 316 if (irq->hw) {
317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
342 if (irq->active) 342 if (irq->active)
343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
344 else 344 else
345 spin_unlock_irqrestore(&irq->irq_lock, flags); 345 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
346} 346}
347 347
348/* 348/*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
485 for (i = 0; i < len; i++) { 485 for (i = 0; i < len; i++) {
486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
487 487
488 spin_lock_irqsave(&irq->irq_lock, flags); 488 raw_spin_lock_irqsave(&irq->irq_lock, flags);
489 /* Narrow the priority range to what we actually support */ 489 /* Narrow the priority range to what we actually support */
490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); 490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
491 spin_unlock_irqrestore(&irq->irq_lock, flags); 491 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
492 492
493 vgic_put_irq(vcpu->kvm, irq); 493 vgic_put_irq(vcpu->kvm, irq);
494 } 494 }
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
534 continue; 534 continue;
535 535
536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
537 spin_lock_irqsave(&irq->irq_lock, flags); 537 raw_spin_lock_irqsave(&irq->irq_lock, flags);
538 538
539 if (test_bit(i * 2 + 1, &val)) 539 if (test_bit(i * 2 + 1, &val))
540 irq->config = VGIC_CONFIG_EDGE; 540 irq->config = VGIC_CONFIG_EDGE;
541 else 541 else
542 irq->config = VGIC_CONFIG_LEVEL; 542 irq->config = VGIC_CONFIG_LEVEL;
543 543
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 544 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 545 vgic_put_irq(vcpu->kvm, irq);
546 } 546 }
547} 547}
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
590 * restore irq config before line level. 590 * restore irq config before line level.
591 */ 591 */
592 new_level = !!(val & (1U << i)); 592 new_level = !!(val & (1U << i));
593 spin_lock_irqsave(&irq->irq_lock, flags); 593 raw_spin_lock_irqsave(&irq->irq_lock, flags);
594 irq->line_level = new_level; 594 irq->line_level = new_level;
595 if (new_level) 595 if (new_level)
596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
597 else 597 else
598 spin_unlock_irqrestore(&irq->irq_lock, flags); 598 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
599 599
600 vgic_put_irq(vcpu->kvm, irq); 600 vgic_put_irq(vcpu->kvm, irq);
601 } 601 }
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892abd7dc..d91a8938aa7c 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
84 84
85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
86 86
87 spin_lock(&irq->irq_lock); 87 raw_spin_lock(&irq->irq_lock);
88 88
89 /* Always preserve the active bit */ 89 /* Always preserve the active bit */
90 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 90 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
127 vgic_irq_set_phys_active(irq, false); 127 vgic_irq_set_phys_active(irq, false);
128 } 128 }
129 129
130 spin_unlock(&irq->irq_lock); 130 raw_spin_unlock(&irq->irq_lock);
131 vgic_put_irq(vcpu->kvm, irq); 131 vgic_put_irq(vcpu->kvm, irq);
132 } 132 }
133 133
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9c0dd234ebe8..4ee0aeb9a905 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
76 if (!irq) /* An LPI could have been unmapped. */ 76 if (!irq) /* An LPI could have been unmapped. */
77 continue; 77 continue;
78 78
79 spin_lock(&irq->irq_lock); 79 raw_spin_lock(&irq->irq_lock);
80 80
81 /* Always preserve the active bit */ 81 /* Always preserve the active bit */
82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 82 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
119 vgic_irq_set_phys_active(irq, false); 119 vgic_irq_set_phys_active(irq, false);
120 } 120 }
121 121
122 spin_unlock(&irq->irq_lock); 122 raw_spin_unlock(&irq->irq_lock);
123 vgic_put_irq(vcpu->kvm, irq); 123 vgic_put_irq(vcpu->kvm, irq);
124 } 124 }
125 125
@@ -347,9 +347,9 @@ retry:
347 347
348 status = val & (1 << bit_nr); 348 status = val & (1 << bit_nr);
349 349
350 spin_lock_irqsave(&irq->irq_lock, flags); 350 raw_spin_lock_irqsave(&irq->irq_lock, flags);
351 if (irq->target_vcpu != vcpu) { 351 if (irq->target_vcpu != vcpu) {
352 spin_unlock_irqrestore(&irq->irq_lock, flags); 352 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
353 goto retry; 353 goto retry;
354 } 354 }
355 irq->pending_latch = status; 355 irq->pending_latch = status;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 870b1185173b..abd9c7352677 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
54 * When taking more than one ap_list_lock at the same time, always take the 54 * When taking more than one ap_list_lock at the same time, always take the
55 * lowest numbered VCPU's ap_list_lock first, so: 55 * lowest numbered VCPU's ap_list_lock first, so:
56 * vcpuX->vcpu_id < vcpuY->vcpu_id: 56 * vcpuX->vcpu_id < vcpuY->vcpu_id:
57 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); 57 * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
58 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); 58 * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
59 * 59 *
60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have 60 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
61 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer 61 * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
62 * spinlocks for any lock that may be taken while injecting an interrupt. 62 * spinlocks for any lock that may be taken while injecting an interrupt.
63 */ 63 */
64 64
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
72 struct vgic_irq *irq = NULL; 72 struct vgic_irq *irq = NULL;
73 unsigned long flags; 73 unsigned long flags;
74 74
75 spin_lock_irqsave(&dist->lpi_list_lock, flags); 75 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
76 76
77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 77 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
78 if (irq->intid != intid) 78 if (irq->intid != intid)
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
88 irq = NULL; 88 irq = NULL;
89 89
90out_unlock: 90out_unlock:
91 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 91 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
92 92
93 return irq; 93 return irq;
94} 94}
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
138 if (irq->intid < VGIC_MIN_LPI) 138 if (irq->intid < VGIC_MIN_LPI)
139 return; 139 return;
140 140
141 spin_lock_irqsave(&dist->lpi_list_lock, flags); 141 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
142 if (!kref_put(&irq->refcount, vgic_irq_release)) { 142 if (!kref_put(&irq->refcount, vgic_irq_release)) {
143 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 143 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
144 return; 144 return;
145 }; 145 };
146 146
147 list_del(&irq->lpi_list); 147 list_del(&irq->lpi_list);
148 dist->lpi_list_count--; 148 dist->lpi_list_count--;
149 spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 149 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 150
151 kfree(irq); 151 kfree(irq);
152} 152}
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
244 bool penda, pendb; 244 bool penda, pendb;
245 int ret; 245 int ret;
246 246
247 spin_lock(&irqa->irq_lock); 247 raw_spin_lock(&irqa->irq_lock);
248 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 248 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
249 249
250 if (irqa->active || irqb->active) { 250 if (irqa->active || irqb->active) {
251 ret = (int)irqb->active - (int)irqa->active; 251 ret = (int)irqb->active - (int)irqa->active;
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
263 /* Both pending and enabled, sort by priority */ 263 /* Both pending and enabled, sort by priority */
264 ret = irqa->priority - irqb->priority; 264 ret = irqa->priority - irqb->priority;
265out: 265out:
266 spin_unlock(&irqb->irq_lock); 266 raw_spin_unlock(&irqb->irq_lock);
267 spin_unlock(&irqa->irq_lock); 267 raw_spin_unlock(&irqa->irq_lock);
268 return ret; 268 return ret;
269} 269}
270 270
@@ -325,7 +325,7 @@ retry:
325 * not need to be inserted into an ap_list and there is also 325 * not need to be inserted into an ap_list and there is also
326 * no more work for us to do. 326 * no more work for us to do.
327 */ 327 */
328 spin_unlock_irqrestore(&irq->irq_lock, flags); 328 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
329 329
330 /* 330 /*
331 * We have to kick the VCPU here, because we could be 331 * We have to kick the VCPU here, because we could be
@@ -347,12 +347,12 @@ retry:
347 * We must unlock the irq lock to take the ap_list_lock where 347 * We must unlock the irq lock to take the ap_list_lock where
348 * we are going to insert this new pending interrupt. 348 * we are going to insert this new pending interrupt.
349 */ 349 */
350 spin_unlock_irqrestore(&irq->irq_lock, flags); 350 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
351 351
352 /* someone can do stuff here, which we re-check below */ 352 /* someone can do stuff here, which we re-check below */
353 353
354 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 354 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
355 spin_lock(&irq->irq_lock); 355 raw_spin_lock(&irq->irq_lock);
356 356
357 /* 357 /*
358 * Did something change behind our backs? 358 * Did something change behind our backs?
@@ -367,10 +367,11 @@ retry:
367 */ 367 */
368 368
369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { 369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
370 spin_unlock(&irq->irq_lock); 370 raw_spin_unlock(&irq->irq_lock);
371 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 371 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
372 flags);
372 373
373 spin_lock_irqsave(&irq->irq_lock, flags); 374 raw_spin_lock_irqsave(&irq->irq_lock, flags);
374 goto retry; 375 goto retry;
375 } 376 }
376 377
@@ -382,8 +383,8 @@ retry:
382 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); 383 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
383 irq->vcpu = vcpu; 384 irq->vcpu = vcpu;
384 385
385 spin_unlock(&irq->irq_lock); 386 raw_spin_unlock(&irq->irq_lock);
386 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 387 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
387 388
388 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 389 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
389 kvm_vcpu_kick(vcpu); 390 kvm_vcpu_kick(vcpu);
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
430 if (!irq) 431 if (!irq)
431 return -EINVAL; 432 return -EINVAL;
432 433
433 spin_lock_irqsave(&irq->irq_lock, flags); 434 raw_spin_lock_irqsave(&irq->irq_lock, flags);
434 435
435 if (!vgic_validate_injection(irq, level, owner)) { 436 if (!vgic_validate_injection(irq, level, owner)) {
436 /* Nothing to see here, move along... */ 437 /* Nothing to see here, move along... */
437 spin_unlock_irqrestore(&irq->irq_lock, flags); 438 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
438 vgic_put_irq(kvm, irq); 439 vgic_put_irq(kvm, irq);
439 return 0; 440 return 0;
440 } 441 }
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
494 495
495 BUG_ON(!irq); 496 BUG_ON(!irq);
496 497
497 spin_lock_irqsave(&irq->irq_lock, flags); 498 raw_spin_lock_irqsave(&irq->irq_lock, flags);
498 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); 499 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
499 spin_unlock_irqrestore(&irq->irq_lock, flags); 500 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
500 vgic_put_irq(vcpu->kvm, irq); 501 vgic_put_irq(vcpu->kvm, irq);
501 502
502 return ret; 503 return ret;
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
519 if (!irq->hw) 520 if (!irq->hw)
520 goto out; 521 goto out;
521 522
522 spin_lock_irqsave(&irq->irq_lock, flags); 523 raw_spin_lock_irqsave(&irq->irq_lock, flags);
523 irq->active = false; 524 irq->active = false;
524 irq->pending_latch = false; 525 irq->pending_latch = false;
525 irq->line_level = false; 526 irq->line_level = false;
526 spin_unlock_irqrestore(&irq->irq_lock, flags); 527 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527out: 528out:
528 vgic_put_irq(vcpu->kvm, irq); 529 vgic_put_irq(vcpu->kvm, irq);
529} 530}
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
539 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 540 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
540 BUG_ON(!irq); 541 BUG_ON(!irq);
541 542
542 spin_lock_irqsave(&irq->irq_lock, flags); 543 raw_spin_lock_irqsave(&irq->irq_lock, flags);
543 kvm_vgic_unmap_irq(irq); 544 kvm_vgic_unmap_irq(irq);
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 545 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 546 vgic_put_irq(vcpu->kvm, irq);
546 547
547 return 0; 548 return 0;
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
571 return -EINVAL; 572 return -EINVAL;
572 573
573 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 574 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
574 spin_lock_irqsave(&irq->irq_lock, flags); 575 raw_spin_lock_irqsave(&irq->irq_lock, flags);
575 if (irq->owner && irq->owner != owner) 576 if (irq->owner && irq->owner != owner)
576 ret = -EEXIST; 577 ret = -EEXIST;
577 else 578 else
578 irq->owner = owner; 579 irq->owner = owner;
579 spin_unlock_irqrestore(&irq->irq_lock, flags); 580 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
580 581
581 return ret; 582 return ret;
582} 583}
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
597 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 598 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
598 599
599retry: 600retry:
600 spin_lock(&vgic_cpu->ap_list_lock); 601 raw_spin_lock(&vgic_cpu->ap_list_lock);
601 602
602 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 603 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
603 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 604 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
604 bool target_vcpu_needs_kick = false; 605 bool target_vcpu_needs_kick = false;
605 606
606 spin_lock(&irq->irq_lock); 607 raw_spin_lock(&irq->irq_lock);
607 608
608 BUG_ON(vcpu != irq->vcpu); 609 BUG_ON(vcpu != irq->vcpu);
609 610
@@ -616,7 +617,7 @@ retry:
616 */ 617 */
617 list_del(&irq->ap_list); 618 list_del(&irq->ap_list);
618 irq->vcpu = NULL; 619 irq->vcpu = NULL;
619 spin_unlock(&irq->irq_lock); 620 raw_spin_unlock(&irq->irq_lock);
620 621
621 /* 622 /*
622 * This vgic_put_irq call matches the 623 * This vgic_put_irq call matches the
@@ -631,14 +632,14 @@ retry:
631 632
632 if (target_vcpu == vcpu) { 633 if (target_vcpu == vcpu) {
633 /* We're on the right CPU */ 634 /* We're on the right CPU */
634 spin_unlock(&irq->irq_lock); 635 raw_spin_unlock(&irq->irq_lock);
635 continue; 636 continue;
636 } 637 }
637 638
638 /* This interrupt looks like it has to be migrated. */ 639 /* This interrupt looks like it has to be migrated. */
639 640
640 spin_unlock(&irq->irq_lock); 641 raw_spin_unlock(&irq->irq_lock);
641 spin_unlock(&vgic_cpu->ap_list_lock); 642 raw_spin_unlock(&vgic_cpu->ap_list_lock);
642 643
643 /* 644 /*
644 * Ensure locking order by always locking the smallest 645 * Ensure locking order by always locking the smallest
@@ -652,10 +653,10 @@ retry:
652 vcpuB = vcpu; 653 vcpuB = vcpu;
653 } 654 }
654 655
655 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); 656 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
656 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, 657 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
657 SINGLE_DEPTH_NESTING); 658 SINGLE_DEPTH_NESTING);
658 spin_lock(&irq->irq_lock); 659 raw_spin_lock(&irq->irq_lock);
659 660
660 /* 661 /*
661 * If the affinity has been preserved, move the 662 * If the affinity has been preserved, move the
@@ -675,9 +676,9 @@ retry:
675 target_vcpu_needs_kick = true; 676 target_vcpu_needs_kick = true;
676 } 677 }
677 678
678 spin_unlock(&irq->irq_lock); 679 raw_spin_unlock(&irq->irq_lock);
679 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 680 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
680 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); 681 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
681 682
682 if (target_vcpu_needs_kick) { 683 if (target_vcpu_needs_kick) {
683 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); 684 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -687,7 +688,7 @@ retry:
687 goto retry; 688 goto retry;
688 } 689 }
689 690
690 spin_unlock(&vgic_cpu->ap_list_lock); 691 raw_spin_unlock(&vgic_cpu->ap_list_lock);
691} 692}
692 693
693static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) 694static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 742 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
742 int w; 743 int w;
743 744
744 spin_lock(&irq->irq_lock); 745 raw_spin_lock(&irq->irq_lock);
745 /* GICv2 SGIs can count for more than one... */ 746 /* GICv2 SGIs can count for more than one... */
746 w = vgic_irq_get_lr_count(irq); 747 w = vgic_irq_get_lr_count(irq);
747 spin_unlock(&irq->irq_lock); 748 raw_spin_unlock(&irq->irq_lock);
748 749
749 count += w; 750 count += w;
750 *multi_sgi |= (w > 1); 751 *multi_sgi |= (w > 1);
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
770 count = 0; 771 count = 0;
771 772
772 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 773 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
773 spin_lock(&irq->irq_lock); 774 raw_spin_lock(&irq->irq_lock);
774 775
775 /* 776 /*
776 * If we have multi-SGIs in the pipeline, we need to 777 * If we have multi-SGIs in the pipeline, we need to
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
780 * the AP list has been sorted already. 781 * the AP list has been sorted already.
781 */ 782 */
782 if (multi_sgi && irq->priority > prio) { 783 if (multi_sgi && irq->priority > prio) {
783 spin_unlock(&irq->irq_lock); 784 _raw_spin_unlock(&irq->irq_lock);
784 break; 785 break;
785 } 786 }
786 787
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
791 prio = irq->priority; 792 prio = irq->priority;
792 } 793 }
793 794
794 spin_unlock(&irq->irq_lock); 795 raw_spin_unlock(&irq->irq_lock);
795 796
796 if (count == kvm_vgic_global_state.nr_lr) { 797 if (count == kvm_vgic_global_state.nr_lr) {
797 if (!list_is_last(&irq->ap_list, 798 if (!list_is_last(&irq->ap_list,
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
872 873
873 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 874 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
874 875
875 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); 876 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
876 vgic_flush_lr_state(vcpu); 877 vgic_flush_lr_state(vcpu);
877 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); 878 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
878 879
879 if (can_access_vgic_from_kernel()) 880 if (can_access_vgic_from_kernel())
880 vgic_restore_state(vcpu); 881 vgic_restore_state(vcpu);
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
918 919
919 vgic_get_vmcr(vcpu, &vmcr); 920 vgic_get_vmcr(vcpu, &vmcr);
920 921
921 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 922 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
922 923
923 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 924 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
924 spin_lock(&irq->irq_lock); 925 raw_spin_lock(&irq->irq_lock);
925 pending = irq_is_pending(irq) && irq->enabled && 926 pending = irq_is_pending(irq) && irq->enabled &&
926 !irq->active && 927 !irq->active &&
927 irq->priority < vmcr.pmr; 928 irq->priority < vmcr.pmr;
928 spin_unlock(&irq->irq_lock); 929 raw_spin_unlock(&irq->irq_lock);
929 930
930 if (pending) 931 if (pending)
931 break; 932 break;
932 } 933 }
933 934
934 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); 935 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
935 936
936 return pending; 937 return pending;
937} 938}
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
963 return false; 964 return false;
964 965
965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 966 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
966 spin_lock_irqsave(&irq->irq_lock, flags); 967 raw_spin_lock_irqsave(&irq->irq_lock, flags);
967 map_is_active = irq->hw && irq->active; 968 map_is_active = irq->hw && irq->active;
968 spin_unlock_irqrestore(&irq->irq_lock, flags); 969 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
969 vgic_put_irq(vcpu->kvm, irq); 970 vgic_put_irq(vcpu->kvm, irq);
970 971
971 return map_is_active; 972 return map_is_active;
972} 973}
973