diff options
author | Gleb Natapov <gleb@redhat.com> | 2009-03-05 09:34:49 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:27 -0400 |
commit | a53c17d21c46a752f5ac6695376481bc27865b04 (patch) | |
tree | f7ece20fcb0adbf4cabc580fb5a16cf5eec0a897 | |
parent | 6da7e3f643cf7099965d75fda8606b9d3a8650b9 (diff) |
KVM: ioapic/msi interrupt delivery consolidation
ioapic_deliver() and kvm_set_msi() have code duplication. Move
the code into ioapic_deliver_entry() function and call it from
both places.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | include/linux/kvm_host.h | 2 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 61 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 4 | ||||
-rw-r--r-- | virt/kvm/irq_comm.c | 32 |
4 files changed, 38 insertions, 61 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3b91ec9982c2..ec9d078b1e8e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -364,7 +364,7 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |||
364 | void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); | 364 | void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); |
365 | 365 | ||
366 | #ifdef __KVM_HAVE_IOAPIC | 366 | #ifdef __KVM_HAVE_IOAPIC |
367 | void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | 367 | void kvm_get_intr_delivery_bitmask(struct kvm *kvm, |
368 | union kvm_ioapic_redirect_entry *entry, | 368 | union kvm_ioapic_redirect_entry *entry, |
369 | unsigned long *deliver_bitmask); | 369 | unsigned long *deliver_bitmask); |
370 | #endif | 370 | #endif |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index d4a7948b010c..b71c0442cecf 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -142,54 +142,57 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) | |||
142 | } | 142 | } |
143 | } | 143 | } |
144 | 144 | ||
145 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | 145 | int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e) |
146 | { | 146 | { |
147 | union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq]; | ||
148 | DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS); | 147 | DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS); |
149 | struct kvm_vcpu *vcpu; | 148 | int i, r = -1; |
150 | int vcpu_id, r = -1; | ||
151 | 149 | ||
152 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | 150 | kvm_get_intr_delivery_bitmask(kvm, e, deliver_bitmask); |
153 | "vector=%x trig_mode=%x\n", | ||
154 | entry.fields.dest, entry.fields.dest_mode, | ||
155 | entry.fields.delivery_mode, entry.fields.vector, | ||
156 | entry.fields.trig_mode); | ||
157 | |||
158 | /* Always delivery PIT interrupt to vcpu 0 */ | ||
159 | #ifdef CONFIG_X86 | ||
160 | if (irq == 0) { | ||
161 | bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); | ||
162 | __set_bit(0, deliver_bitmask); | ||
163 | } else | ||
164 | #endif | ||
165 | kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask); | ||
166 | 151 | ||
167 | if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) { | 152 | if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) { |
168 | ioapic_debug("no target on destination\n"); | 153 | ioapic_debug("no target on destination\n"); |
169 | return 0; | 154 | return r; |
170 | } | 155 | } |
171 | 156 | ||
172 | while ((vcpu_id = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS)) | 157 | while ((i = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS)) |
173 | < KVM_MAX_VCPUS) { | 158 | < KVM_MAX_VCPUS) { |
174 | __clear_bit(vcpu_id, deliver_bitmask); | 159 | struct kvm_vcpu *vcpu = kvm->vcpus[i]; |
175 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | 160 | __clear_bit(i, deliver_bitmask); |
176 | if (vcpu) { | 161 | if (vcpu) { |
177 | if (r < 0) | 162 | if (r < 0) |
178 | r = 0; | 163 | r = 0; |
179 | r += kvm_apic_set_irq(vcpu, | 164 | r += kvm_apic_set_irq(vcpu, e->fields.vector, |
180 | entry.fields.vector, | 165 | e->fields.delivery_mode, |
181 | entry.fields.trig_mode, | 166 | e->fields.trig_mode); |
182 | entry.fields.delivery_mode); | ||
183 | } else | 167 | } else |
184 | ioapic_debug("null destination vcpu: " | 168 | ioapic_debug("null destination vcpu: " |
185 | "mask=%x vector=%x delivery_mode=%x\n", | 169 | "mask=%x vector=%x delivery_mode=%x\n", |
186 | entry.fields.deliver_bitmask, | 170 | e->fields.deliver_bitmask, |
187 | entry.fields.vector, | 171 | e->fields.vector, e->fields.delivery_mode); |
188 | entry.fields.delivery_mode); | ||
189 | } | 172 | } |
190 | return r; | 173 | return r; |
191 | } | 174 | } |
192 | 175 | ||
176 | static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | ||
177 | { | ||
178 | union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq]; | ||
179 | |||
180 | ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " | ||
181 | "vector=%x trig_mode=%x\n", | ||
182 | entry.fields.dest, entry.fields.dest_mode, | ||
183 | entry.fields.delivery_mode, entry.fields.vector, | ||
184 | entry.fields.trig_mode); | ||
185 | |||
186 | #ifdef CONFIG_X86 | ||
187 | /* Always delivery PIT interrupt to vcpu 0 */ | ||
188 | if (irq == 0) { | ||
189 | entry.fields.dest_mode = 0; /* Physical mode. */ | ||
190 | entry.fields.dest_id = ioapic->kvm->vcpus[0]->vcpu_id; | ||
191 | } | ||
192 | #endif | ||
193 | return ioapic_deliver_entry(ioapic->kvm, &entry); | ||
194 | } | ||
195 | |||
193 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) | 196 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) |
194 | { | 197 | { |
195 | u32 old_irr = ioapic->irr; | 198 | u32 old_irr = ioapic->irr; |
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index c8032ab2a4e2..bedeea59cc1c 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -70,8 +70,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); | |||
70 | int kvm_ioapic_init(struct kvm *kvm); | 70 | int kvm_ioapic_init(struct kvm *kvm); |
71 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); | 71 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); |
72 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | 72 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); |
73 | void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | 73 | void kvm_get_intr_delivery_bitmask(struct kvm *kvm, |
74 | union kvm_ioapic_redirect_entry *entry, | 74 | union kvm_ioapic_redirect_entry *entry, |
75 | unsigned long *deliver_bitmask); | 75 | unsigned long *deliver_bitmask); |
76 | 76 | int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e); | |
77 | #endif | 77 | #endif |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 325c6685f206..35397a569b24 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -43,12 +43,11 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, | |||
43 | return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level); | 43 | return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level); |
44 | } | 44 | } |
45 | 45 | ||
46 | void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | 46 | void kvm_get_intr_delivery_bitmask(struct kvm *kvm, |
47 | union kvm_ioapic_redirect_entry *entry, | 47 | union kvm_ioapic_redirect_entry *entry, |
48 | unsigned long *deliver_bitmask) | 48 | unsigned long *deliver_bitmask) |
49 | { | 49 | { |
50 | int i; | 50 | int i; |
51 | struct kvm *kvm = ioapic->kvm; | ||
52 | struct kvm_vcpu *vcpu; | 51 | struct kvm_vcpu *vcpu; |
53 | 52 | ||
54 | bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); | 53 | bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); |
@@ -90,7 +89,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | |||
90 | switch (entry->fields.delivery_mode) { | 89 | switch (entry->fields.delivery_mode) { |
91 | case IOAPIC_LOWEST_PRIORITY: | 90 | case IOAPIC_LOWEST_PRIORITY: |
92 | /* Select one in deliver_bitmask */ | 91 | /* Select one in deliver_bitmask */ |
93 | vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, | 92 | vcpu = kvm_get_lowest_prio_vcpu(kvm, |
94 | entry->fields.vector, deliver_bitmask); | 93 | entry->fields.vector, deliver_bitmask); |
95 | bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); | 94 | bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS); |
96 | if (!vcpu) | 95 | if (!vcpu) |
@@ -111,13 +110,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, | |||
111 | static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | 110 | static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, |
112 | struct kvm *kvm, int level) | 111 | struct kvm *kvm, int level) |
113 | { | 112 | { |
114 | int vcpu_id, r = -1; | ||
115 | struct kvm_vcpu *vcpu; | ||
116 | struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); | ||
117 | union kvm_ioapic_redirect_entry entry; | 113 | union kvm_ioapic_redirect_entry entry; |
118 | DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS); | ||
119 | |||
120 | BUG_ON(!ioapic); | ||
121 | 114 | ||
122 | entry.bits = 0; | 115 | entry.bits = 0; |
123 | entry.fields.dest_id = (e->msi.address_lo & | 116 | entry.fields.dest_id = (e->msi.address_lo & |
@@ -133,26 +126,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | |||
133 | (unsigned long *)&e->msi.data); | 126 | (unsigned long *)&e->msi.data); |
134 | 127 | ||
135 | /* TODO Deal with RH bit of MSI message address */ | 128 | /* TODO Deal with RH bit of MSI message address */ |
136 | 129 | return ioapic_deliver_entry(kvm, &entry); | |
137 | kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask); | ||
138 | |||
139 | if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) { | ||
140 | printk(KERN_WARNING "kvm: no destination for MSI delivery!"); | ||
141 | return -1; | ||
142 | } | ||
143 | while ((vcpu_id = find_first_bit(deliver_bitmask, | ||
144 | KVM_MAX_VCPUS)) < KVM_MAX_VCPUS) { | ||
145 | __clear_bit(vcpu_id, deliver_bitmask); | ||
146 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | ||
147 | if (vcpu) { | ||
148 | if (r < 0) | ||
149 | r = 0; | ||
150 | r += kvm_apic_set_irq(vcpu, entry.fields.vector, | ||
151 | entry.fields.dest_mode, | ||
152 | entry.fields.trig_mode); | ||
153 | } | ||
154 | } | ||
155 | return r; | ||
156 | } | 130 | } |
157 | 131 | ||
158 | /* This should be called with the kvm->lock mutex held | 132 | /* This should be called with the kvm->lock mutex held |