aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/ioapic.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2009-08-24 04:54:25 -0400
committerAvi Kivity <avi@redhat.com>2009-12-03 02:32:08 -0500
commiteba0226bdfffe262e72b8360e4d0d12070e9a0f0 (patch)
tree93da785e3bba63a9232e529a2572541ef87c0615 /virt/kvm/ioapic.c
parent280aa177dcd1edc718d8a92f17f235b783ec6307 (diff)
KVM: Move IO APIC to its own lock
The allows removal of irq_lock from the injection path. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/ioapic.c')
-rw-r--r--virt/kvm/ioapic.c80
1 files changed, 61 insertions, 19 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 9fe140bb38ec..38a2d20b89de 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -182,6 +182,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
182 union kvm_ioapic_redirect_entry entry; 182 union kvm_ioapic_redirect_entry entry;
183 int ret = 1; 183 int ret = 1;
184 184
185 mutex_lock(&ioapic->lock);
185 if (irq >= 0 && irq < IOAPIC_NUM_PINS) { 186 if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
186 entry = ioapic->redirtbl[irq]; 187 entry = ioapic->redirtbl[irq];
187 level ^= entry.fields.polarity; 188 level ^= entry.fields.polarity;
@@ -198,34 +199,51 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
198 } 199 }
199 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); 200 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
200 } 201 }
202 mutex_unlock(&ioapic->lock);
203
201 return ret; 204 return ret;
202} 205}
203 206
204static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int pin, 207static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
205 int trigger_mode) 208 int trigger_mode)
206{ 209{
207 union kvm_ioapic_redirect_entry *ent; 210 int i;
211
212 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
213 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
208 214
209 ent = &ioapic->redirtbl[pin]; 215 if (ent->fields.vector != vector)
216 continue;
210 217
211 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin); 218 /*
219 * We are dropping lock while calling ack notifiers because ack
220 * notifier callbacks for assigned devices call into IOAPIC
221 * recursively. Since remote_irr is cleared only after call
222 * to notifiers if the same vector will be delivered while lock
223 * is dropped it will be put into irr and will be delivered
224 * after ack notifier returns.
225 */
226 mutex_unlock(&ioapic->lock);
227 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
228 mutex_lock(&ioapic->lock);
229
230 if (trigger_mode != IOAPIC_LEVEL_TRIG)
231 continue;
212 232
213 if (trigger_mode == IOAPIC_LEVEL_TRIG) {
214 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 233 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
215 ent->fields.remote_irr = 0; 234 ent->fields.remote_irr = 0;
216 if (!ent->fields.mask && (ioapic->irr & (1 << pin))) 235 if (!ent->fields.mask && (ioapic->irr & (1 << i)))
217 ioapic_service(ioapic, pin); 236 ioapic_service(ioapic, i);
218 } 237 }
219} 238}
220 239
221void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) 240void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
222{ 241{
223 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 242 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
224 int i;
225 243
226 for (i = 0; i < IOAPIC_NUM_PINS; i++) 244 mutex_lock(&ioapic->lock);
227 if (ioapic->redirtbl[i].fields.vector == vector) 245 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
228 __kvm_ioapic_update_eoi(ioapic, i, trigger_mode); 246 mutex_unlock(&ioapic->lock);
229} 247}
230 248
231static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) 249static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
@@ -250,8 +268,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
250 ioapic_debug("addr %lx\n", (unsigned long)addr); 268 ioapic_debug("addr %lx\n", (unsigned long)addr);
251 ASSERT(!(addr & 0xf)); /* check alignment */ 269 ASSERT(!(addr & 0xf)); /* check alignment */
252 270
253 mutex_lock(&ioapic->kvm->irq_lock);
254 addr &= 0xff; 271 addr &= 0xff;
272 mutex_lock(&ioapic->lock);
255 switch (addr) { 273 switch (addr) {
256 case IOAPIC_REG_SELECT: 274 case IOAPIC_REG_SELECT:
257 result = ioapic->ioregsel; 275 result = ioapic->ioregsel;
@@ -265,6 +283,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
265 result = 0; 283 result = 0;
266 break; 284 break;
267 } 285 }
286 mutex_unlock(&ioapic->lock);
287
268 switch (len) { 288 switch (len) {
269 case 8: 289 case 8:
270 *(u64 *) val = result; 290 *(u64 *) val = result;
@@ -277,7 +297,6 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
277 default: 297 default:
278 printk(KERN_WARNING "ioapic: wrong length %d\n", len); 298 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
279 } 299 }
280 mutex_unlock(&ioapic->kvm->irq_lock);
281 return 0; 300 return 0;
282} 301}
283 302
@@ -293,15 +312,15 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
293 (void*)addr, len, val); 312 (void*)addr, len, val);
294 ASSERT(!(addr & 0xf)); /* check alignment */ 313 ASSERT(!(addr & 0xf)); /* check alignment */
295 314
296 mutex_lock(&ioapic->kvm->irq_lock);
297 if (len == 4 || len == 8) 315 if (len == 4 || len == 8)
298 data = *(u32 *) val; 316 data = *(u32 *) val;
299 else { 317 else {
300 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); 318 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
301 goto unlock; 319 return 0;
302 } 320 }
303 321
304 addr &= 0xff; 322 addr &= 0xff;
323 mutex_lock(&ioapic->lock);
305 switch (addr) { 324 switch (addr) {
306 case IOAPIC_REG_SELECT: 325 case IOAPIC_REG_SELECT:
307 ioapic->ioregsel = data; 326 ioapic->ioregsel = data;
@@ -312,15 +331,14 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
312 break; 331 break;
313#ifdef CONFIG_IA64 332#ifdef CONFIG_IA64
314 case IOAPIC_REG_EOI: 333 case IOAPIC_REG_EOI:
315 kvm_ioapic_update_eoi(ioapic->kvm, data, IOAPIC_LEVEL_TRIG); 334 __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG);
316 break; 335 break;
317#endif 336#endif
318 337
319 default: 338 default:
320 break; 339 break;
321 } 340 }
322unlock: 341 mutex_unlock(&ioapic->lock);
323 mutex_unlock(&ioapic->kvm->irq_lock);
324 return 0; 342 return 0;
325} 343}
326 344
@@ -349,6 +367,7 @@ int kvm_ioapic_init(struct kvm *kvm)
349 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); 367 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
350 if (!ioapic) 368 if (!ioapic)
351 return -ENOMEM; 369 return -ENOMEM;
370 mutex_init(&ioapic->lock);
352 kvm->arch.vioapic = ioapic; 371 kvm->arch.vioapic = ioapic;
353 kvm_ioapic_reset(ioapic); 372 kvm_ioapic_reset(ioapic);
354 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); 373 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
@@ -360,3 +379,26 @@ int kvm_ioapic_init(struct kvm *kvm)
360 return ret; 379 return ret;
361} 380}
362 381
382int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
383{
384 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
385 if (!ioapic)
386 return -EINVAL;
387
388 mutex_lock(&ioapic->lock);
389 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
390 mutex_unlock(&ioapic->lock);
391 return 0;
392}
393
394int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
395{
396 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
397 if (!ioapic)
398 return -EINVAL;
399
400 mutex_lock(&ioapic->lock);
401 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
402 mutex_unlock(&ioapic->lock);
403 return 0;
404}