aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/irq_comm.c
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2013-04-15 17:04:10 -0400
committerAlexander Graf <agraf@suse.de>2013-04-26 14:27:17 -0400
commit1c9f8520bda73c07fed9bcdb307854b45a3a60c4 (patch)
treec9cdfb21218b33982781d0dadb2e4455a14c0ca4 /virt/kvm/irq_comm.c
parentaa8d5944b8b2809e574581abbf41894089b7def2 (diff)
KVM: Extract generic irqchip logic into irqchip.c
The current irq_comm.c file contains pieces of code that are generic across different irqchip implementations, as well as code that is fully IOAPIC specific. Split the generic bits out into irqchip.c. Signed-off-by: Alexander Graf <agraf@suse.de> Acked-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'virt/kvm/irq_comm.c')
-rw-r--r--virt/kvm/irq_comm.c118
1 files changed, 0 insertions, 118 deletions
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 7c0071de9e85..d5008f4aade7 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -151,59 +151,6 @@ static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
151 return -EWOULDBLOCK; 151 return -EWOULDBLOCK;
152} 152}
153 153
154int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
155{
156 struct kvm_kernel_irq_routing_entry route;
157
158 if (!irqchip_in_kernel(kvm) || msi->flags != 0)
159 return -EINVAL;
160
161 route.msi.address_lo = msi->address_lo;
162 route.msi.address_hi = msi->address_hi;
163 route.msi.data = msi->data;
164
165 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
166}
167
168/*
169 * Return value:
170 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
171 * = 0 Interrupt was coalesced (previous irq is still pending)
172 * > 0 Number of CPUs interrupt was delivered to
173 */
174int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
175 bool line_status)
176{
177 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
178 int ret = -1, i = 0;
179 struct kvm_irq_routing_table *irq_rt;
180
181 trace_kvm_set_irq(irq, level, irq_source_id);
182
183 /* Not possible to detect if the guest uses the PIC or the
184 * IOAPIC. So set the bit in both. The guest will ignore
185 * writes to the unused one.
186 */
187 rcu_read_lock();
188 irq_rt = rcu_dereference(kvm->irq_routing);
189 if (irq < irq_rt->nr_rt_entries)
190 hlist_for_each_entry(e, &irq_rt->map[irq], link)
191 irq_set[i++] = *e;
192 rcu_read_unlock();
193
194 while(i--) {
195 int r;
196 r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
197 line_status);
198 if (r < 0)
199 continue;
200
201 ret = r + ((ret < 0) ? 0 : ret);
202 }
203
204 return ret;
205}
206
207/* 154/*
208 * Deliver an IRQ in an atomic context if we can, or return a failure, 155 * Deliver an IRQ in an atomic context if we can, or return a failure,
209 * user can retry in a process context. 156 * user can retry in a process context.
@@ -241,63 +188,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
241 return ret; 188 return ret;
242} 189}
243 190
244bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
245{
246 struct kvm_irq_ack_notifier *kian;
247 int gsi;
248
249 rcu_read_lock();
250 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
251 if (gsi != -1)
252 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
253 link)
254 if (kian->gsi == gsi) {
255 rcu_read_unlock();
256 return true;
257 }
258
259 rcu_read_unlock();
260
261 return false;
262}
263EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
264
265void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
266{
267 struct kvm_irq_ack_notifier *kian;
268 int gsi;
269
270 trace_kvm_ack_irq(irqchip, pin);
271
272 rcu_read_lock();
273 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
274 if (gsi != -1)
275 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
276 link)
277 if (kian->gsi == gsi)
278 kian->irq_acked(kian);
279 rcu_read_unlock();
280}
281
282void kvm_register_irq_ack_notifier(struct kvm *kvm,
283 struct kvm_irq_ack_notifier *kian)
284{
285 mutex_lock(&kvm->irq_lock);
286 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
287 mutex_unlock(&kvm->irq_lock);
288 kvm_vcpu_request_scan_ioapic(kvm);
289}
290
291void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
292 struct kvm_irq_ack_notifier *kian)
293{
294 mutex_lock(&kvm->irq_lock);
295 hlist_del_init_rcu(&kian->link);
296 mutex_unlock(&kvm->irq_lock);
297 synchronize_rcu();
298 kvm_vcpu_request_scan_ioapic(kvm);
299}
300
301int kvm_request_irq_source_id(struct kvm *kvm) 191int kvm_request_irq_source_id(struct kvm *kvm)
302{ 192{
303 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; 193 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
@@ -381,13 +271,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
381 rcu_read_unlock(); 271 rcu_read_unlock();
382} 272}
383 273
384void kvm_free_irq_routing(struct kvm *kvm)
385{
386 /* Called only during vm destruction. Nobody can use the pointer
387 at this stage */
388 kfree(kvm->irq_routing);
389}
390
391static int setup_routing_entry(struct kvm_irq_routing_table *rt, 274static int setup_routing_entry(struct kvm_irq_routing_table *rt,
392 struct kvm_kernel_irq_routing_entry *e, 275 struct kvm_kernel_irq_routing_entry *e,
393 const struct kvm_irq_routing_entry *ue) 276 const struct kvm_irq_routing_entry *ue)
@@ -451,7 +334,6 @@ out:
451 return r; 334 return r;
452} 335}
453 336
454
455int kvm_set_irq_routing(struct kvm *kvm, 337int kvm_set_irq_routing(struct kvm *kvm,
456 const struct kvm_irq_routing_entry *ue, 338 const struct kvm_irq_routing_entry *ue,
457 unsigned nr, 339 unsigned nr,