diff options
Diffstat (limited to 'virt/kvm/ioapic.c')
-rw-r--r-- | virt/kvm/ioapic.c | 153 |
1 files changed, 22 insertions, 131 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index c3b99def9cbc..1eddae94bab3 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -85,7 +85,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | |||
85 | 85 | ||
86 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) | 86 | static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) |
87 | { | 87 | { |
88 | union ioapic_redir_entry *pent; | 88 | union kvm_ioapic_redirect_entry *pent; |
89 | int injected = -1; | 89 | int injected = -1; |
90 | 90 | ||
91 | pent = &ioapic->redirtbl[idx]; | 91 | pent = &ioapic->redirtbl[idx]; |
@@ -142,149 +142,40 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
142 | } | 142 | } |
143 | } | 143 | } |
144 | 144 | ||
145 | static int ioapic_inj_irq(struct kvm_ioapic *ioapic, | ||
146 | struct kvm_vcpu *vcpu, | ||
147 | u8 vector, u8 trig_mode, u8 delivery_mode) | ||
148 | { | ||
149 | ioapic_debug("irq %d trig %d deliv %d\n", vector, trig_mode, | ||
150 | delivery_mode); | ||
151 | |||
152 | ASSERT((delivery_mode == IOAPIC_FIXED) || | ||
153 | (delivery_mode == IOAPIC_LOWEST_PRIORITY)); | ||
154 | |||
155 | return kvm_apic_set_irq(vcpu, vector, trig_mode); | ||
156 | } | ||
157 | |||
158 | static void ioapic_inj_nmi(struct kvm_vcpu *vcpu) | ||
159 | { | ||
160 | kvm_inject_nmi(vcpu); | ||
161 | kvm_vcpu_kick(vcpu); | ||
162 | } | ||
163 | |||
164 | u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, | ||
165 | u8 dest_mode) | ||
166 | { | ||
167 | u32 mask = 0; | ||
168 | int i; | ||
169 | struct kvm *kvm = ioapic->kvm; | ||
170 | struct kvm_vcpu *vcpu; | ||
171 | |||
172 | ioapic_debug("dest %d dest_mode %d\n", dest, dest_mode); | ||
173 | |||
174 | if (dest_mode == 0) { /* Physical mode. */ | ||
175 | if (dest == 0xFF) { /* Broadcast. */ | ||
176 | for (i = 0; i < KVM_MAX_VCPUS; ++i) | ||
177 | if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic) | ||
178 | mask |= 1 << i; | ||
179 | return mask; | ||
180 | } | ||
181 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
182 | vcpu = kvm->vcpus[i]; | ||
183 | if (!vcpu) | ||
184 | continue; | ||
185 | if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) { | ||
186 | if (vcpu->arch.apic) | ||
187 | mask = 1 << i; | ||
188 | break; | ||
189 | } | ||
190 | } | ||
191 | } else if (dest != 0) /* Logical mode, MDA non-zero. */ | ||
192 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
193 | vcpu = kvm->vcpus[i]; | ||
194 | if (!vcpu) | ||
195 | continue; | ||
196 | if (vcpu->arch.apic && | ||
197 | kvm_apic_match_logical_addr(vcpu->arch.apic, dest)) | ||
198 | mask |= 1 << vcpu->vcpu_id; | ||
199 | } | ||
200 | ioapic_debug("mask %x\n", mask); | ||
201 | return mask; | ||
202 | } | ||
203 | |||
204 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | 145 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) |
205 | { | 146 | { |
206 | u8 dest = ioapic->redirtbl[irq].fields.dest_id; | 147 | union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; |
207 | u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode; | 148 | struct kvm_lapic_irq irqe; |
208 | u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode; | ||
209 | u8 vector = ioapic->redirtbl[irq].fields.vector; | ||
210 | u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode; | ||
211 | u32 deliver_bitmask; | ||
212 | struct kvm_vcpu *vcpu; | ||
213 | int vcpu_id, r = -1; | ||
214 | 149 | ||
215 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | 150 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " |
216 | "vector=%x trig_mode=%x\n", | 151 | "vector=%x trig_mode=%x\n", |
217 | dest, dest_mode, delivery_mode, vector, trig_mode); | 152 | entry->fields.dest, entry->fields.dest_mode, |
218 | 153 | entry->fields.delivery_mode, entry->fields.vector, | |
219 | deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic, dest, | 154 | entry->fields.trig_mode); |
220 | dest_mode); | 155 | |
221 | if (!deliver_bitmask) { | 156 | irqe.dest_id = entry->fields.dest_id; |
222 | ioapic_debug("no target on destination\n"); | 157 | irqe.vector = entry->fields.vector; |
223 | return 0; | 158 | irqe.dest_mode = entry->fields.dest_mode; |
224 | } | 159 | irqe.trig_mode = entry->fields.trig_mode; |
160 | irqe.delivery_mode = entry->fields.delivery_mode << 8; | ||
161 | irqe.level = 1; | ||
162 | irqe.shorthand = 0; | ||
225 | 163 | ||
226 | switch (delivery_mode) { | ||
227 | case IOAPIC_LOWEST_PRIORITY: | ||
228 | vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, | ||
229 | deliver_bitmask); | ||
230 | #ifdef CONFIG_X86 | 164 | #ifdef CONFIG_X86 |
231 | if (irq == 0) | 165 | /* Always delivery PIT interrupt to vcpu 0 */ |
232 | vcpu = ioapic->kvm->vcpus[0]; | 166 | if (irq == 0) { |
233 | #endif | 167 | irqe.dest_mode = 0; /* Physical mode. */ |
234 | if (vcpu != NULL) | 168 | irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id; |
235 | r = ioapic_inj_irq(ioapic, vcpu, vector, | ||
236 | trig_mode, delivery_mode); | ||
237 | else | ||
238 | ioapic_debug("null lowest prio vcpu: " | ||
239 | "mask=%x vector=%x delivery_mode=%x\n", | ||
240 | deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY); | ||
241 | break; | ||
242 | case IOAPIC_FIXED: | ||
243 | #ifdef CONFIG_X86 | ||
244 | if (irq == 0) | ||
245 | deliver_bitmask = 1; | ||
246 | #endif | ||
247 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { | ||
248 | if (!(deliver_bitmask & (1 << vcpu_id))) | ||
249 | continue; | ||
250 | deliver_bitmask &= ~(1 << vcpu_id); | ||
251 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | ||
252 | if (vcpu) { | ||
253 | if (r < 0) | ||
254 | r = 0; | ||
255 | r += ioapic_inj_irq(ioapic, vcpu, vector, | ||
256 | trig_mode, delivery_mode); | ||
257 | } | ||
258 | } | ||
259 | break; | ||
260 | case IOAPIC_NMI: | ||
261 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { | ||
262 | if (!(deliver_bitmask & (1 << vcpu_id))) | ||
263 | continue; | ||
264 | deliver_bitmask &= ~(1 << vcpu_id); | ||
265 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | ||
266 | if (vcpu) { | ||
267 | ioapic_inj_nmi(vcpu); | ||
268 | r = 1; | ||
269 | } | ||
270 | else | ||
271 | ioapic_debug("NMI to vcpu %d failed\n", | ||
272 | vcpu->vcpu_id); | ||
273 | } | ||
274 | break; | ||
275 | default: | ||
276 | printk(KERN_WARNING "Unsupported delivery mode %d\n", | ||
277 | delivery_mode); | ||
278 | break; | ||
279 | } | 169 | } |
280 | return r; | 170 | #endif |
171 | return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); | ||
281 | } | 172 | } |
282 | 173 | ||
283 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | 174 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) |
284 | { | 175 | { |
285 | u32 old_irr = ioapic->irr; | 176 | u32 old_irr = ioapic->irr; |
286 | u32 mask = 1 << irq; | 177 | u32 mask = 1 << irq; |
287 | union ioapic_redir_entry entry; | 178 | union kvm_ioapic_redirect_entry entry; |
288 | int ret = 1; | 179 | int ret = 1; |
289 | 180 | ||
290 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { | 181 | if (irq >= 0 && irq < IOAPIC_NUM_PINS) { |
@@ -305,7 +196,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | |||
305 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int pin, | 196 | static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int pin, |
306 | int trigger_mode) | 197 | int trigger_mode) |
307 | { | 198 | { |
308 | union ioapic_redir_entry *ent; | 199 | union kvm_ioapic_redirect_entry *ent; |
309 | 200 | ||
310 | ent = &ioapic->redirtbl[pin]; | 201 | ent = &ioapic->redirtbl[pin]; |
311 | 202 | ||