diff options
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/i8259.c | 22 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 10 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 80 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 4 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 23 |
7 files changed, 100 insertions, 51 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0ad09f05efa9..4a983147f6eb 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -851,8 +851,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | |||
851 | r = 0; | 851 | r = 0; |
852 | switch (chip->chip_id) { | 852 | switch (chip->chip_id) { |
853 | case KVM_IRQCHIP_IOAPIC: | 853 | case KVM_IRQCHIP_IOAPIC: |
854 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | 854 | r = kvm_get_ioapic(kvm, &chip->chip.ioapic); |
855 | sizeof(struct kvm_ioapic_state)); | ||
856 | break; | 855 | break; |
857 | default: | 856 | default: |
858 | r = -EINVAL; | 857 | r = -EINVAL; |
@@ -868,9 +867,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
868 | r = 0; | 867 | r = 0; |
869 | switch (chip->chip_id) { | 868 | switch (chip->chip_id) { |
870 | case KVM_IRQCHIP_IOAPIC: | 869 | case KVM_IRQCHIP_IOAPIC: |
871 | memcpy(ioapic_irqchip(kvm), | 870 | r = kvm_set_ioapic(kvm, &chip->chip.ioapic); |
872 | &chip->chip.ioapic, | ||
873 | sizeof(struct kvm_ioapic_state)); | ||
874 | break; | 871 | break; |
875 | default: | 872 | default: |
876 | r = -EINVAL; | 873 | r = -EINVAL; |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index ccc941af4eaf..d057c0cbd245 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -38,7 +38,15 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq) | |||
38 | s->isr_ack |= (1 << irq); | 38 | s->isr_ack |= (1 << irq); |
39 | if (s != &s->pics_state->pics[0]) | 39 | if (s != &s->pics_state->pics[0]) |
40 | irq += 8; | 40 | irq += 8; |
41 | /* | ||
42 | * We are dropping lock while calling ack notifiers since ack | ||
43 | * notifier callbacks for assigned devices call into PIC recursively. | ||
44 | * Other interrupt may be delivered to PIC while lock is dropped but | ||
45 | * it should be safe since PIC state is already updated at this stage. | ||
46 | */ | ||
47 | spin_unlock(&s->pics_state->lock); | ||
41 | kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); | 48 | kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); |
49 | spin_lock(&s->pics_state->lock); | ||
42 | } | 50 | } |
43 | 51 | ||
44 | void kvm_pic_clear_isr_ack(struct kvm *kvm) | 52 | void kvm_pic_clear_isr_ack(struct kvm *kvm) |
@@ -176,16 +184,18 @@ int kvm_pic_set_irq(void *opaque, int irq, int level) | |||
176 | static inline void pic_intack(struct kvm_kpic_state *s, int irq) | 184 | static inline void pic_intack(struct kvm_kpic_state *s, int irq) |
177 | { | 185 | { |
178 | s->isr |= 1 << irq; | 186 | s->isr |= 1 << irq; |
179 | if (s->auto_eoi) { | ||
180 | if (s->rotate_on_auto_eoi) | ||
181 | s->priority_add = (irq + 1) & 7; | ||
182 | pic_clear_isr(s, irq); | ||
183 | } | ||
184 | /* | 187 | /* |
185 | * We don't clear a level sensitive interrupt here | 188 | * We don't clear a level sensitive interrupt here |
186 | */ | 189 | */ |
187 | if (!(s->elcr & (1 << irq))) | 190 | if (!(s->elcr & (1 << irq))) |
188 | s->irr &= ~(1 << irq); | 191 | s->irr &= ~(1 << irq); |
192 | |||
193 | if (s->auto_eoi) { | ||
194 | if (s->rotate_on_auto_eoi) | ||
195 | s->priority_add = (irq + 1) & 7; | ||
196 | pic_clear_isr(s, irq); | ||
197 | } | ||
198 | |||
189 | } | 199 | } |
190 | 200 | ||
191 | int kvm_pic_read_irq(struct kvm *kvm) | 201 | int kvm_pic_read_irq(struct kvm *kvm) |
@@ -294,9 +304,9 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) | |||
294 | priority = get_priority(s, s->isr); | 304 | priority = get_priority(s, s->isr); |
295 | if (priority != 8) { | 305 | if (priority != 8) { |
296 | irq = (priority + s->priority_add) & 7; | 306 | irq = (priority + s->priority_add) & 7; |
297 | pic_clear_isr(s, irq); | ||
298 | if (cmd == 5) | 307 | if (cmd == 5) |
299 | s->priority_add = (irq + 1) & 7; | 308 | s->priority_add = (irq + 1) & 7; |
309 | pic_clear_isr(s, irq); | ||
300 | pic_update_irq(s->pics_state); | 310 | pic_update_irq(s->pics_state); |
301 | } | 311 | } |
302 | break; | 312 | break; |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 23c217692ea9..df8bcb0f66d8 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -471,11 +471,8 @@ static void apic_set_eoi(struct kvm_lapic *apic) | |||
471 | trigger_mode = IOAPIC_LEVEL_TRIG; | 471 | trigger_mode = IOAPIC_LEVEL_TRIG; |
472 | else | 472 | else |
473 | trigger_mode = IOAPIC_EDGE_TRIG; | 473 | trigger_mode = IOAPIC_EDGE_TRIG; |
474 | if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) { | 474 | if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) |
475 | mutex_lock(&apic->vcpu->kvm->irq_lock); | ||
476 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); | 475 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); |
477 | mutex_unlock(&apic->vcpu->kvm->irq_lock); | ||
478 | } | ||
479 | } | 476 | } |
480 | 477 | ||
481 | static void apic_send_ipi(struct kvm_lapic *apic) | 478 | static void apic_send_ipi(struct kvm_lapic *apic) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1687d12b122a..fdf989f17a61 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2038,9 +2038,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
2038 | sizeof(struct kvm_pic_state)); | 2038 | sizeof(struct kvm_pic_state)); |
2039 | break; | 2039 | break; |
2040 | case KVM_IRQCHIP_IOAPIC: | 2040 | case KVM_IRQCHIP_IOAPIC: |
2041 | memcpy(&chip->chip.ioapic, | 2041 | r = kvm_get_ioapic(kvm, &chip->chip.ioapic); |
2042 | ioapic_irqchip(kvm), | ||
2043 | sizeof(struct kvm_ioapic_state)); | ||
2044 | break; | 2042 | break; |
2045 | default: | 2043 | default: |
2046 | r = -EINVAL; | 2044 | r = -EINVAL; |
@@ -2070,11 +2068,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
2070 | spin_unlock(&pic_irqchip(kvm)->lock); | 2068 | spin_unlock(&pic_irqchip(kvm)->lock); |
2071 | break; | 2069 | break; |
2072 | case KVM_IRQCHIP_IOAPIC: | 2070 | case KVM_IRQCHIP_IOAPIC: |
2073 | mutex_lock(&kvm->irq_lock); | 2071 | r = kvm_set_ioapic(kvm, &chip->chip.ioapic); |
2074 | memcpy(ioapic_irqchip(kvm), | ||
2075 | &chip->chip.ioapic, | ||
2076 | sizeof(struct kvm_ioapic_state)); | ||
2077 | mutex_unlock(&kvm->irq_lock); | ||
2078 | break; | 2072 | break; |
2079 | default: | 2073 | default: |
2080 | r = -EINVAL; | 2074 | r = -EINVAL; |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 9fe140bb38ec..38a2d20b89de 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -182,6 +182,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
182 | union kvm_ioapic_redirect_entry entry; | 182 | union kvm_ioapic_redirect_entry entry; |
183 | int ret = 1; | 183 | int ret = 1; |
184 | 184 | ||
185 | mutex_lock(&ioapic->lock); | ||
185 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { | 186 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { |
186 | entry = ioapic->redirtbl[irq]; | 187 | entry = ioapic->redirtbl[irq]; |
187 | level ^= entry.fields.polarity; | 188 | level ^= entry.fields.polarity; |
@@ -198,34 +199,51 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
198 | } | 199 | } |
199 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); | 200 | trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); |
200 | } | 201 | } |
202 | mutex_unlock(&ioapic->lock); | ||
203 | |||
201 | return ret; | 204 | return ret; |
202 | } | 205 | } |
203 | 206 | ||
204 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int pin, | 207 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector, |
205 | int trigger_mode) | 208 | int trigger_mode) |
206 | { | 209 | { |
207 | union kvm_ioapic_redirect_entry *ent; | 210 | int i; |
211 | |||
212 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { | ||
213 | union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; | ||
208 | 214 | ||
209 | ent = &ioapic->redirtbl[pin]; | 215 | if (ent->fields.vector != vector) |
216 | continue; | ||
210 | 217 | ||
211 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin); | 218 | /* |
219 | * We are dropping lock while calling ack notifiers because ack | ||
220 | * notifier callbacks for assigned devices call into IOAPIC | ||
221 | * recursively. Since remote_irr is cleared only after call | ||
222 | * to notifiers if the same vector will be delivered while lock | ||
223 | * is dropped it will be put into irr and will be delivered | ||
224 | * after ack notifier returns. | ||
225 | */ | ||
226 | mutex_unlock(&ioapic->lock); | ||
227 | kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); | ||
228 | mutex_lock(&ioapic->lock); | ||
229 | |||
230 | if (trigger_mode != IOAPIC_LEVEL_TRIG) | ||
231 | continue; | ||
212 | 232 | ||
213 | if (trigger_mode == IOAPIC_LEVEL_TRIG) { | ||
214 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); | 233 | ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); |
215 | ent->fields.remote_irr = 0; | 234 | ent->fields.remote_irr = 0; |
216 | if (!ent->fields.mask && (ioapic->irr & (1 << pin))) | 235 | if (!ent->fields.mask && (ioapic->irr & (1 << i))) |
217 | ioapic_service(ioapic, pin); | 236 | ioapic_service(ioapic, i); |
218 | } | 237 | } |
219 | } | 238 | } |
220 | 239 | ||
221 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) | 240 | void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) |
222 | { | 241 | { |
223 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 242 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
224 | int i; | ||
225 | 243 | ||
226 | for (i = 0; i < IOAPIC_NUM_PINS; i++) | 244 | mutex_lock(&ioapic->lock); |
227 | if (ioapic->redirtbl[i].fields.vector == vector) | 245 | __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); |
228 | __kvm_ioapic_update_eoi(ioapic, i, trigger_mode); | 246 | mutex_unlock(&ioapic->lock); |
229 | } | 247 | } |
230 | 248 | ||
231 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) | 249 | static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) |
@@ -250,8 +268,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
250 | ioapic_debug("addr %lx\n", (unsigned long)addr); | 268 | ioapic_debug("addr %lx\n", (unsigned long)addr); |
251 | ASSERT(!(addr & 0xf)); /* check alignment */ | 269 | ASSERT(!(addr & 0xf)); /* check alignment */ |
252 | 270 | ||
253 | mutex_lock(&ioapic->kvm->irq_lock); | ||
254 | addr &= 0xff; | 271 | addr &= 0xff; |
272 | mutex_lock(&ioapic->lock); | ||
255 | switch (addr) { | 273 | switch (addr) { |
256 | case IOAPIC_REG_SELECT: | 274 | case IOAPIC_REG_SELECT: |
257 | result = ioapic->ioregsel; | 275 | result = ioapic->ioregsel; |
@@ -265,6 +283,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
265 | result = 0; | 283 | result = 0; |
266 | break; | 284 | break; |
267 | } | 285 | } |
286 | mutex_unlock(&ioapic->lock); | ||
287 | |||
268 | switch (len) { | 288 | switch (len) { |
269 | case 8: | 289 | case 8: |
270 | *(u64 *) val = result; | 290 | *(u64 *) val = result; |
@@ -277,7 +297,6 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, | |||
277 | default: | 297 | default: |
278 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); | 298 | printk(KERN_WARNING "ioapic: wrong length %d\n", len); |
279 | } | 299 | } |
280 | mutex_unlock(&ioapic->kvm->irq_lock); | ||
281 | return 0; | 300 | return 0; |
282 | } | 301 | } |
283 | 302 | ||
@@ -293,15 +312,15 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
293 | (void*)addr, len, val); | 312 | (void*)addr, len, val); |
294 | ASSERT(!(addr & 0xf)); /* check alignment */ | 313 | ASSERT(!(addr & 0xf)); /* check alignment */ |
295 | 314 | ||
296 | mutex_lock(&ioapic->kvm->irq_lock); | ||
297 | if (len == 4 || len == 8) | 315 | if (len == 4 || len == 8) |
298 | data = *(u32 *) val; | 316 | data = *(u32 *) val; |
299 | else { | 317 | else { |
300 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); | 318 | printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); |
301 | goto unlock; | 319 | return 0; |
302 | } | 320 | } |
303 | 321 | ||
304 | addr &= 0xff; | 322 | addr &= 0xff; |
323 | mutex_lock(&ioapic->lock); | ||
305 | switch (addr) { | 324 | switch (addr) { |
306 | case IOAPIC_REG_SELECT: | 325 | case IOAPIC_REG_SELECT: |
307 | ioapic->ioregsel = data; | 326 | ioapic->ioregsel = data; |
@@ -312,15 +331,14 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
312 | break; | 331 | break; |
313 | #ifdef CONFIG_IA64 | 332 | #ifdef CONFIG_IA64 |
314 | case IOAPIC_REG_EOI: | 333 | case IOAPIC_REG_EOI: |
315 | kvm_ioapic_update_eoi(ioapic->kvm, data, IOAPIC_LEVEL_TRIG); | 334 | __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG); |
316 | break; | 335 | break; |
317 | #endif | 336 | #endif |
318 | 337 | ||
319 | default: | 338 | default: |
320 | break; | 339 | break; |
321 | } | 340 | } |
322 | unlock: | 341 | mutex_unlock(&ioapic->lock); |
323 | mutex_unlock(&ioapic->kvm->irq_lock); | ||
324 | return 0; | 342 | return 0; |
325 | } | 343 | } |
326 | 344 | ||
@@ -349,6 +367,7 @@ int kvm_ioapic_init(struct kvm *kvm) | |||
349 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); | 367 | ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); |
350 | if (!ioapic) | 368 | if (!ioapic) |
351 | return -ENOMEM; | 369 | return -ENOMEM; |
370 | mutex_init(&ioapic->lock); | ||
352 | kvm->arch.vioapic = ioapic; | 371 | kvm->arch.vioapic = ioapic; |
353 | kvm_ioapic_reset(ioapic); | 372 | kvm_ioapic_reset(ioapic); |
354 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); | 373 | kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); |
@@ -360,3 +379,26 @@ int kvm_ioapic_init(struct kvm *kvm) | |||
360 | return ret; | 379 | return ret; |
361 | } | 380 | } |
362 | 381 | ||
382 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | ||
383 | { | ||
384 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | ||
385 | if (!ioapic) | ||
386 | return -EINVAL; | ||
387 | |||
388 | mutex_lock(&ioapic->lock); | ||
389 | memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); | ||
390 | mutex_unlock(&ioapic->lock); | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) | ||
395 | { | ||
396 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | ||
397 | if (!ioapic) | ||
398 | return -EINVAL; | ||
399 | |||
400 | mutex_lock(&ioapic->lock); | ||
401 | memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); | ||
402 | mutex_unlock(&ioapic->lock); | ||
403 | return 0; | ||
404 | } | ||
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 6e461ade6365..419c43b667ab 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -45,6 +45,7 @@ struct kvm_ioapic { | |||
45 | struct kvm_io_device dev; | 45 | struct kvm_io_device dev; |
46 | struct kvm *kvm; | 46 | struct kvm *kvm; |
47 | void (*ack_notifier)(void *opaque, int irq); | 47 | void (*ack_notifier)(void *opaque, int irq); |
48 | struct mutex lock; | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | #ifdef DEBUG | 51 | #ifdef DEBUG |
@@ -74,4 +75,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); | |||
74 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | 75 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); |
75 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 76 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
76 | struct kvm_lapic_irq *irq); | 77 | struct kvm_lapic_irq *irq); |
78 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | ||
79 | int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | ||
80 | |||
77 | #endif | 81 | #endif |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 6c946141dbcc..fadf4408a820 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -146,8 +146,8 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | |||
146 | */ | 146 | */ |
147 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) | 147 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) |
148 | { | 148 | { |
149 | struct kvm_kernel_irq_routing_entry *e; | 149 | struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; |
150 | int ret = -1; | 150 | int ret = -1, i = 0; |
151 | struct kvm_irq_routing_table *irq_rt; | 151 | struct kvm_irq_routing_table *irq_rt; |
152 | struct hlist_node *n; | 152 | struct hlist_node *n; |
153 | 153 | ||
@@ -162,14 +162,19 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level) | |||
162 | rcu_read_lock(); | 162 | rcu_read_lock(); |
163 | irq_rt = rcu_dereference(kvm->irq_routing); | 163 | irq_rt = rcu_dereference(kvm->irq_routing); |
164 | if (irq < irq_rt->nr_rt_entries) | 164 | if (irq < irq_rt->nr_rt_entries) |
165 | hlist_for_each_entry(e, n, &irq_rt->map[irq], link) { | 165 | hlist_for_each_entry(e, n, &irq_rt->map[irq], link) |
166 | int r = e->set(e, kvm, irq_source_id, level); | 166 | irq_set[i++] = *e; |
167 | if (r < 0) | ||
168 | continue; | ||
169 | |||
170 | ret = r + ((ret < 0) ? 0 : ret); | ||
171 | } | ||
172 | rcu_read_unlock(); | 167 | rcu_read_unlock(); |
168 | |||
169 | while(i--) { | ||
170 | int r; | ||
171 | r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level); | ||
172 | if (r < 0) | ||
173 | continue; | ||
174 | |||
175 | ret = r + ((ret < 0) ? 0 : ret); | ||
176 | } | ||
177 | |||
173 | return ret; | 178 | return ret; |
174 | } | 179 | } |
175 | 180 | ||