aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2012-10-17 12:06:02 -0400
committerGleb Natapov <gleb@redhat.com>2012-12-05 08:10:45 -0500
commit01f218803757c9ec1152ac2fd39d03c27c452634 (patch)
treee64afc26da5c1a1b1b434cf7fda755ddaf436892 /virt/kvm
parent45e3cc7d9fe69844cd12d51c511e1e98d156bbe1 (diff)
kvm: add kvm_set_irq_inatomic
Add an API to inject IRQ from atomic context. Return EWOULDBLOCK if impossible (e.g. for multicast). Only MSI is supported ATM. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/irq_comm.c83
1 files changed, 71 insertions, 12 deletions
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 2eb58af7ee99..656fa455e154 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -102,6 +102,23 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
102 return r; 102 return r;
103} 103}
104 104
105static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
106 struct kvm_lapic_irq *irq)
107{
108 trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
109
110 irq->dest_id = (e->msi.address_lo &
111 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
112 irq->vector = (e->msi.data &
113 MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
114 irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
115 irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
116 irq->delivery_mode = e->msi.data & 0x700;
117 irq->level = 1;
118 irq->shorthand = 0;
119 /* TODO Deal with RH bit of MSI message address */
120}
121
105int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, 122int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
106 struct kvm *kvm, int irq_source_id, int level) 123 struct kvm *kvm, int irq_source_id, int level)
107{ 124{
@@ -110,22 +127,26 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
110 if (!level) 127 if (!level)
111 return -1; 128 return -1;
112 129
113 trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data); 130 kvm_set_msi_irq(e, &irq);
114 131
115 irq.dest_id = (e->msi.address_lo &
116 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
117 irq.vector = (e->msi.data &
118 MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
119 irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
120 irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
121 irq.delivery_mode = e->msi.data & 0x700;
122 irq.level = 1;
123 irq.shorthand = 0;
124
125 /* TODO Deal with RH bit of MSI message address */
126 return kvm_irq_delivery_to_apic(kvm, NULL, &irq); 132 return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
127} 133}
128 134
135
136static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
137 struct kvm *kvm)
138{
139 struct kvm_lapic_irq irq;
140 int r;
141
142 kvm_set_msi_irq(e, &irq);
143
144 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r))
145 return r;
146 else
147 return -EWOULDBLOCK;
148}
149
129int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) 150int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
130{ 151{
131 struct kvm_kernel_irq_routing_entry route; 152 struct kvm_kernel_irq_routing_entry route;
@@ -178,6 +199,44 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
178 return ret; 199 return ret;
179} 200}
180 201
202/*
203 * Deliver an IRQ in an atomic context if we can, or return a failure,
204 * user can retry in a process context.
205 * Return value:
206 * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
207 * Other values - No need to retry.
208 */
209int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
210{
211 struct kvm_kernel_irq_routing_entry *e;
212 int ret = -EINVAL;
213 struct kvm_irq_routing_table *irq_rt;
214 struct hlist_node *n;
215
216 trace_kvm_set_irq(irq, level, irq_source_id);
217
218 /*
219 * Injection into either PIC or IOAPIC might need to scan all CPUs,
220 * which would need to be retried from thread context; when same GSI
221 * is connected to both PIC and IOAPIC, we'd have to report a
222 * partial failure here.
223 * Since there's no easy way to do this, we only support injecting MSI
224 * which is limited to 1:1 GSI mapping.
225 */
226 rcu_read_lock();
227 irq_rt = rcu_dereference(kvm->irq_routing);
228 if (irq < irq_rt->nr_rt_entries)
229 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) {
230 if (likely(e->type == KVM_IRQ_ROUTING_MSI))
231 ret = kvm_set_msi_inatomic(e, kvm);
232 else
233 ret = -EWOULDBLOCK;
234 break;
235 }
236 rcu_read_unlock();
237 return ret;
238}
239
181void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) 240void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
182{ 241{
183 struct kvm_irq_ack_notifier *kian; 242 struct kvm_irq_ack_notifier *kian;