aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorSheng Yang <sheng@linux.intel.com>2009-03-04 00:33:02 -0500
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:26 -0400
commit74a3a8f152053394a016518cc2f2fee216897fa4 (patch)
tree96e2d2a9a8583836280063fde54323715ff8d90a /virt
parent3f5e06f8799adca3e7e1bbafe1cd780a3e69f604 (diff)
KVM: Merge kvm_ioapic_get_delivery_bitmask into kvm_get_intr_delivery_bitmask
Gleb fixed bitmap ops usage in kvm_ioapic_get_delivery_bitmask. Sheng merged two functions, as well as fixed several issues in kvm_get_intr_delivery_bitmask 1. deliver_bitmask is a bitmap rather than a unsigned long intereger. 2. Lowest priority target bitmap wrong calculated by mistake. 3. Prevent potential NULL reference. 4. Declaration in include/kvm_host.h caused powerpc compilation warning. 5. Add warning for guest broadcast interrupt with lowest priority delivery mode. 6. Removed duplicate bitmap clean up in caller of kvm_get_intr_delivery_bitmask. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Sheng Yang <sheng@linux.intel.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/ioapic.c46
-rw-r--r--virt/kvm/ioapic.h5
-rw-r--r--virt/kvm/irq_comm.c49
3 files changed, 49 insertions, 51 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 7c2cb2bd1199..ea268a8c37da 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -161,45 +161,6 @@ static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
161 kvm_vcpu_kick(vcpu); 161 kvm_vcpu_kick(vcpu);
162} 162}
163 163
164void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
165 u8 dest_mode, unsigned long *mask)
166{
167 int i;
168 struct kvm *kvm = ioapic->kvm;
169 struct kvm_vcpu *vcpu;
170
171 ioapic_debug("dest %d dest_mode %d\n", dest, dest_mode);
172
173 *mask = 0;
174 if (dest_mode == 0) { /* Physical mode. */
175 if (dest == 0xFF) { /* Broadcast. */
176 for (i = 0; i < KVM_MAX_VCPUS; ++i)
177 if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
178 *mask |= 1 << i;
179 return;
180 }
181 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
182 vcpu = kvm->vcpus[i];
183 if (!vcpu)
184 continue;
185 if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
186 if (vcpu->arch.apic)
187 *mask = 1 << i;
188 break;
189 }
190 }
191 } else if (dest != 0) /* Logical mode, MDA non-zero. */
192 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
193 vcpu = kvm->vcpus[i];
194 if (!vcpu)
195 continue;
196 if (vcpu->arch.apic &&
197 kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
198 *mask |= 1 << vcpu->vcpu_id;
199 }
200 ioapic_debug("mask %x\n", *mask);
201}
202
203static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) 164static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
204{ 165{
205 union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq]; 166 union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
@@ -213,13 +174,12 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
213 entry.fields.delivery_mode, entry.fields.vector, 174 entry.fields.delivery_mode, entry.fields.vector,
214 entry.fields.trig_mode); 175 entry.fields.trig_mode);
215 176
216 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
217
218 /* Always delivery PIT interrupt to vcpu 0 */ 177 /* Always delivery PIT interrupt to vcpu 0 */
219#ifdef CONFIG_X86 178#ifdef CONFIG_X86
220 if (irq == 0) 179 if (irq == 0) {
180 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
221 __set_bit(0, deliver_bitmask); 181 __set_bit(0, deliver_bitmask);
222 else 182 } else
223#endif 183#endif
224 kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask); 184 kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
225 185
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 7275f87a11cd..c8032ab2a4e2 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -70,7 +70,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
70int kvm_ioapic_init(struct kvm *kvm); 70int kvm_ioapic_init(struct kvm *kvm);
71int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); 71int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
72void kvm_ioapic_reset(struct kvm_ioapic *ioapic); 72void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
73void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, 73void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
74 u8 dest_mode, unsigned long *mask); 74 union kvm_ioapic_redirect_entry *entry,
75 unsigned long *deliver_bitmask);
75 76
76#endif 77#endif
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index d165e056f79b..1c6ff6d1b842 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -47,15 +47,54 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
47 union kvm_ioapic_redirect_entry *entry, 47 union kvm_ioapic_redirect_entry *entry,
48 unsigned long *deliver_bitmask) 48 unsigned long *deliver_bitmask)
49{ 49{
50 int i;
51 struct kvm *kvm = ioapic->kvm;
50 struct kvm_vcpu *vcpu; 52 struct kvm_vcpu *vcpu;
51 53
52 kvm_ioapic_get_delivery_bitmask(ioapic, entry->fields.dest_id, 54 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
53 entry->fields.dest_mode, 55
54 deliver_bitmask); 56 if (entry->fields.dest_mode == 0) { /* Physical mode. */
57 if (entry->fields.dest_id == 0xFF) { /* Broadcast. */
58 for (i = 0; i < KVM_MAX_VCPUS; ++i)
59 if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
60 __set_bit(i, deliver_bitmask);
61 /* Lowest priority shouldn't combine with broadcast */
62 if (entry->fields.delivery_mode ==
63 IOAPIC_LOWEST_PRIORITY && printk_ratelimit())
64 printk(KERN_INFO "kvm: apic: phys broadcast "
65 "and lowest prio\n");
66 return;
67 }
68 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
69 vcpu = kvm->vcpus[i];
70 if (!vcpu)
71 continue;
72 if (kvm_apic_match_physical_addr(vcpu->arch.apic,
73 entry->fields.dest_id)) {
74 if (vcpu->arch.apic)
75 __set_bit(i, deliver_bitmask);
76 break;
77 }
78 }
79 } else if (entry->fields.dest_id != 0) /* Logical mode, MDA non-zero. */
80 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
81 vcpu = kvm->vcpus[i];
82 if (!vcpu)
83 continue;
84 if (vcpu->arch.apic &&
85 kvm_apic_match_logical_addr(vcpu->arch.apic,
86 entry->fields.dest_id))
87 __set_bit(i, deliver_bitmask);
88 }
89
55 switch (entry->fields.delivery_mode) { 90 switch (entry->fields.delivery_mode) {
56 case IOAPIC_LOWEST_PRIORITY: 91 case IOAPIC_LOWEST_PRIORITY:
92 /* Select one in deliver_bitmask */
57 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, 93 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
58 entry->fields.vector, deliver_bitmask); 94 entry->fields.vector, deliver_bitmask);
95 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
96 if (!vcpu)
97 return;
59 __set_bit(vcpu->vcpu_id, deliver_bitmask); 98 __set_bit(vcpu->vcpu_id, deliver_bitmask);
60 break; 99 break;
61 case IOAPIC_FIXED: 100 case IOAPIC_FIXED:
@@ -65,7 +104,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
65 if (printk_ratelimit()) 104 if (printk_ratelimit())
66 printk(KERN_INFO "kvm: unsupported delivery mode %d\n", 105 printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
67 entry->fields.delivery_mode); 106 entry->fields.delivery_mode);
68 *deliver_bitmask = 0; 107 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
69 } 108 }
70} 109}
71 110
@@ -80,8 +119,6 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
80 119
81 BUG_ON(!ioapic); 120 BUG_ON(!ioapic);
82 121
83 bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
84
85 entry.bits = 0; 122 entry.bits = 0;
86 entry.fields.dest_id = (e->msi.address_lo & 123 entry.fields.dest_id = (e->msi.address_lo &
87 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; 124 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;