diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2010-02-24 04:41:58 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-04-25 05:39:28 -0400 |
commit | 50a085bdd48af08cc7e3178ba0d7c1d5d8191698 (patch) | |
tree | 145a9a99b9e5c240c816b727a6669c8b01e07b4a /arch/x86/kvm/i8259.c | |
parent | a595405df9efb89710cd555d29df0e4902f90613 (diff) |
KVM: x86: Kick VCPU outside PIC lock again
This restores the deferred VCPU kicking before 956f97cf. We need this
over -rt as wake_up* requires non-atomic context in this configuration.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/i8259.c')
-rw-r--r-- | arch/x86/kvm/i8259.c | 53 |
1 files changed, 38 insertions, 15 deletions
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index a790fa128a9f..93825ff3338f 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -33,6 +33,29 @@ | |||
33 | #include <linux/kvm_host.h> | 33 | #include <linux/kvm_host.h> |
34 | #include "trace.h" | 34 | #include "trace.h" |
35 | 35 | ||
36 | static void pic_lock(struct kvm_pic *s) | ||
37 | __acquires(&s->lock) | ||
38 | { | ||
39 | raw_spin_lock(&s->lock); | ||
40 | } | ||
41 | |||
42 | static void pic_unlock(struct kvm_pic *s) | ||
43 | __releases(&s->lock) | ||
44 | { | ||
45 | bool wakeup = s->wakeup_needed; | ||
46 | struct kvm_vcpu *vcpu; | ||
47 | |||
48 | s->wakeup_needed = false; | ||
49 | |||
50 | raw_spin_unlock(&s->lock); | ||
51 | |||
52 | if (wakeup) { | ||
53 | vcpu = s->kvm->bsp_vcpu; | ||
54 | if (vcpu) | ||
55 | kvm_vcpu_kick(vcpu); | ||
56 | } | ||
57 | } | ||
58 | |||
36 | static void pic_clear_isr(struct kvm_kpic_state *s, int irq) | 59 | static void pic_clear_isr(struct kvm_kpic_state *s, int irq) |
37 | { | 60 | { |
38 | s->isr &= ~(1 << irq); | 61 | s->isr &= ~(1 << irq); |
@@ -45,19 +68,19 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq) | |||
45 | * Other interrupt may be delivered to PIC while lock is dropped but | 68 | * Other interrupt may be delivered to PIC while lock is dropped but |
46 | * it should be safe since PIC state is already updated at this stage. | 69 | * it should be safe since PIC state is already updated at this stage. |
47 | */ | 70 | */ |
48 | raw_spin_unlock(&s->pics_state->lock); | 71 | pic_unlock(s->pics_state); |
49 | kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); | 72 | kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); |
50 | raw_spin_lock(&s->pics_state->lock); | 73 | pic_lock(s->pics_state); |
51 | } | 74 | } |
52 | 75 | ||
53 | void kvm_pic_clear_isr_ack(struct kvm *kvm) | 76 | void kvm_pic_clear_isr_ack(struct kvm *kvm) |
54 | { | 77 | { |
55 | struct kvm_pic *s = pic_irqchip(kvm); | 78 | struct kvm_pic *s = pic_irqchip(kvm); |
56 | 79 | ||
57 | raw_spin_lock(&s->lock); | 80 | pic_lock(s); |
58 | s->pics[0].isr_ack = 0xff; | 81 | s->pics[0].isr_ack = 0xff; |
59 | s->pics[1].isr_ack = 0xff; | 82 | s->pics[1].isr_ack = 0xff; |
60 | raw_spin_unlock(&s->lock); | 83 | pic_unlock(s); |
61 | } | 84 | } |
62 | 85 | ||
63 | /* | 86 | /* |
@@ -158,9 +181,9 @@ static void pic_update_irq(struct kvm_pic *s) | |||
158 | 181 | ||
159 | void kvm_pic_update_irq(struct kvm_pic *s) | 182 | void kvm_pic_update_irq(struct kvm_pic *s) |
160 | { | 183 | { |
161 | raw_spin_lock(&s->lock); | 184 | pic_lock(s); |
162 | pic_update_irq(s); | 185 | pic_update_irq(s); |
163 | raw_spin_unlock(&s->lock); | 186 | pic_unlock(s); |
164 | } | 187 | } |
165 | 188 | ||
166 | int kvm_pic_set_irq(void *opaque, int irq, int level) | 189 | int kvm_pic_set_irq(void *opaque, int irq, int level) |
@@ -168,14 +191,14 @@ int kvm_pic_set_irq(void *opaque, int irq, int level) | |||
168 | struct kvm_pic *s = opaque; | 191 | struct kvm_pic *s = opaque; |
169 | int ret = -1; | 192 | int ret = -1; |
170 | 193 | ||
171 | raw_spin_lock(&s->lock); | 194 | pic_lock(s); |
172 | if (irq >= 0 && irq < PIC_NUM_PINS) { | 195 | if (irq >= 0 && irq < PIC_NUM_PINS) { |
173 | ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); | 196 | ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); |
174 | pic_update_irq(s); | 197 | pic_update_irq(s); |
175 | trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, | 198 | trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, |
176 | s->pics[irq >> 3].imr, ret == 0); | 199 | s->pics[irq >> 3].imr, ret == 0); |
177 | } | 200 | } |
178 | raw_spin_unlock(&s->lock); | 201 | pic_unlock(s); |
179 | 202 | ||
180 | return ret; | 203 | return ret; |
181 | } | 204 | } |
@@ -205,7 +228,7 @@ int kvm_pic_read_irq(struct kvm *kvm) | |||
205 | int irq, irq2, intno; | 228 | int irq, irq2, intno; |
206 | struct kvm_pic *s = pic_irqchip(kvm); | 229 | struct kvm_pic *s = pic_irqchip(kvm); |
207 | 230 | ||
208 | raw_spin_lock(&s->lock); | 231 | pic_lock(s); |
209 | irq = pic_get_irq(&s->pics[0]); | 232 | irq = pic_get_irq(&s->pics[0]); |
210 | if (irq >= 0) { | 233 | if (irq >= 0) { |
211 | pic_intack(&s->pics[0], irq); | 234 | pic_intack(&s->pics[0], irq); |
@@ -230,7 +253,7 @@ int kvm_pic_read_irq(struct kvm *kvm) | |||
230 | intno = s->pics[0].irq_base + irq; | 253 | intno = s->pics[0].irq_base + irq; |
231 | } | 254 | } |
232 | pic_update_irq(s); | 255 | pic_update_irq(s); |
233 | raw_spin_unlock(&s->lock); | 256 | pic_unlock(s); |
234 | 257 | ||
235 | return intno; | 258 | return intno; |
236 | } | 259 | } |
@@ -444,7 +467,7 @@ static int picdev_write(struct kvm_io_device *this, | |||
444 | printk(KERN_ERR "PIC: non byte write\n"); | 467 | printk(KERN_ERR "PIC: non byte write\n"); |
445 | return 0; | 468 | return 0; |
446 | } | 469 | } |
447 | raw_spin_lock(&s->lock); | 470 | pic_lock(s); |
448 | switch (addr) { | 471 | switch (addr) { |
449 | case 0x20: | 472 | case 0x20: |
450 | case 0x21: | 473 | case 0x21: |
@@ -457,7 +480,7 @@ static int picdev_write(struct kvm_io_device *this, | |||
457 | elcr_ioport_write(&s->pics[addr & 1], addr, data); | 480 | elcr_ioport_write(&s->pics[addr & 1], addr, data); |
458 | break; | 481 | break; |
459 | } | 482 | } |
460 | raw_spin_unlock(&s->lock); | 483 | pic_unlock(s); |
461 | return 0; | 484 | return 0; |
462 | } | 485 | } |
463 | 486 | ||
@@ -474,7 +497,7 @@ static int picdev_read(struct kvm_io_device *this, | |||
474 | printk(KERN_ERR "PIC: non byte read\n"); | 497 | printk(KERN_ERR "PIC: non byte read\n"); |
475 | return 0; | 498 | return 0; |
476 | } | 499 | } |
477 | raw_spin_lock(&s->lock); | 500 | pic_lock(s); |
478 | switch (addr) { | 501 | switch (addr) { |
479 | case 0x20: | 502 | case 0x20: |
480 | case 0x21: | 503 | case 0x21: |
@@ -488,7 +511,7 @@ static int picdev_read(struct kvm_io_device *this, | |||
488 | break; | 511 | break; |
489 | } | 512 | } |
490 | *(unsigned char *)val = data; | 513 | *(unsigned char *)val = data; |
491 | raw_spin_unlock(&s->lock); | 514 | pic_unlock(s); |
492 | return 0; | 515 | return 0; |
493 | } | 516 | } |
494 | 517 | ||
@@ -505,7 +528,7 @@ static void pic_irq_request(void *opaque, int level) | |||
505 | s->output = level; | 528 | s->output = level; |
506 | if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { | 529 | if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { |
507 | s->pics[0].isr_ack &= ~(1 << irq); | 530 | s->pics[0].isr_ack &= ~(1 << irq); |
508 | kvm_vcpu_kick(vcpu); | 531 | s->wakeup_needed = true; |
509 | } | 532 | } |
510 | } | 533 | } |
511 | 534 | ||