aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/virtio/virtio_pci_common.c117
-rw-r--r--drivers/virtio/virtio_pci_common.h25
-rw-r--r--drivers/virtio/virtio_pci_legacy.c6
-rw-r--r--drivers/virtio/virtio_pci_modern.c6
4 files changed, 39 insertions, 115 deletions
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 186cbab327b8..a33767318cbf 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -62,16 +62,13 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
62static irqreturn_t vp_vring_interrupt(int irq, void *opaque) 62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
63{ 63{
64 struct virtio_pci_device *vp_dev = opaque; 64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
66 irqreturn_t ret = IRQ_NONE; 65 irqreturn_t ret = IRQ_NONE;
67 unsigned long flags; 66 struct virtqueue *vq;
68 67
69 spin_lock_irqsave(&vp_dev->lock, flags); 68 list_for_each_entry(vq, &vp_dev->vdev.vqs, list) {
70 list_for_each_entry(info, &vp_dev->virtqueues, node) { 69 if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED)
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
72 ret = IRQ_HANDLED; 70 ret = IRQ_HANDLED;
73 } 71 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
75 72
76 return ret; 73 return ret;
77} 74}
@@ -167,55 +164,6 @@ error:
167 return err; 164 return err;
168} 165}
169 166
170static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
171 void (*callback)(struct virtqueue *vq),
172 const char *name,
173 u16 msix_vec)
174{
175 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
176 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
177 struct virtqueue *vq;
178 unsigned long flags;
179
180 /* fill out our structure that represents an active queue */
181 if (!info)
182 return ERR_PTR(-ENOMEM);
183
184 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec);
185 if (IS_ERR(vq))
186 goto out_info;
187
188 info->vq = vq;
189 if (callback) {
190 spin_lock_irqsave(&vp_dev->lock, flags);
191 list_add(&info->node, &vp_dev->virtqueues);
192 spin_unlock_irqrestore(&vp_dev->lock, flags);
193 } else {
194 INIT_LIST_HEAD(&info->node);
195 }
196
197 vp_dev->vqs[index] = info;
198 return vq;
199
200out_info:
201 kfree(info);
202 return vq;
203}
204
205static void vp_del_vq(struct virtqueue *vq)
206{
207 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
208 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
209 unsigned long flags;
210
211 spin_lock_irqsave(&vp_dev->lock, flags);
212 list_del(&info->node);
213 spin_unlock_irqrestore(&vp_dev->lock, flags);
214
215 vp_dev->del_vq(info);
216 kfree(info);
217}
218
219/* the config->del_vqs() implementation */ 167/* the config->del_vqs() implementation */
220void vp_del_vqs(struct virtio_device *vdev) 168void vp_del_vqs(struct virtio_device *vdev)
221{ 169{
@@ -224,16 +172,15 @@ void vp_del_vqs(struct virtio_device *vdev)
224 int i; 172 int i;
225 173
226 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 174 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
227 if (vp_dev->per_vq_vectors) { 175 if (vp_dev->msix_vector_map) {
228 int v = vp_dev->vqs[vq->index]->msix_vector; 176 int v = vp_dev->msix_vector_map[vq->index];
229 177
230 if (v != VIRTIO_MSI_NO_VECTOR) 178 if (v != VIRTIO_MSI_NO_VECTOR)
231 free_irq(pci_irq_vector(vp_dev->pci_dev, v), 179 free_irq(pci_irq_vector(vp_dev->pci_dev, v),
232 vq); 180 vq);
233 } 181 }
234 vp_del_vq(vq); 182 vp_dev->del_vq(vq);
235 } 183 }
236 vp_dev->per_vq_vectors = false;
237 184
238 if (vp_dev->intx_enabled) { 185 if (vp_dev->intx_enabled) {
239 free_irq(vp_dev->pci_dev->irq, vp_dev); 186 free_irq(vp_dev->pci_dev->irq, vp_dev);
@@ -261,8 +208,8 @@ void vp_del_vqs(struct virtio_device *vdev)
261 vp_dev->msix_names = NULL; 208 vp_dev->msix_names = NULL;
262 kfree(vp_dev->msix_affinity_masks); 209 kfree(vp_dev->msix_affinity_masks);
263 vp_dev->msix_affinity_masks = NULL; 210 vp_dev->msix_affinity_masks = NULL;
264 kfree(vp_dev->vqs); 211 kfree(vp_dev->msix_vector_map);
265 vp_dev->vqs = NULL; 212 vp_dev->msix_vector_map = NULL;
266} 213}
267 214
268static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, 215static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
@@ -275,10 +222,6 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
275 u16 msix_vec; 222 u16 msix_vec;
276 int i, err, nvectors, allocated_vectors; 223 int i, err, nvectors, allocated_vectors;
277 224
278 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
279 if (!vp_dev->vqs)
280 return -ENOMEM;
281
282 if (per_vq_vectors) { 225 if (per_vq_vectors) {
283 /* Best option: one for change interrupt, one per vq. */ 226 /* Best option: one for change interrupt, one per vq. */
284 nvectors = 1; 227 nvectors = 1;
@@ -294,7 +237,13 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
294 if (err) 237 if (err)
295 goto error_find; 238 goto error_find;
296 239
297 vp_dev->per_vq_vectors = per_vq_vectors; 240 if (per_vq_vectors) {
241 vp_dev->msix_vector_map = kmalloc_array(nvqs,
242 sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
243 if (!vp_dev->msix_vector_map)
244 goto error_find;
245 }
246
298 allocated_vectors = vp_dev->msix_used_vectors; 247 allocated_vectors = vp_dev->msix_used_vectors;
299 for (i = 0; i < nvqs; ++i) { 248 for (i = 0; i < nvqs; ++i) {
300 if (!names[i]) { 249 if (!names[i]) {
@@ -304,19 +253,25 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
304 253
305 if (!callbacks[i]) 254 if (!callbacks[i])
306 msix_vec = VIRTIO_MSI_NO_VECTOR; 255 msix_vec = VIRTIO_MSI_NO_VECTOR;
307 else if (vp_dev->per_vq_vectors) 256 else if (per_vq_vectors)
308 msix_vec = allocated_vectors++; 257 msix_vec = allocated_vectors++;
309 else 258 else
310 msix_vec = VP_MSIX_VQ_VECTOR; 259 msix_vec = VP_MSIX_VQ_VECTOR;
311 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); 260 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
261 msix_vec);
312 if (IS_ERR(vqs[i])) { 262 if (IS_ERR(vqs[i])) {
313 err = PTR_ERR(vqs[i]); 263 err = PTR_ERR(vqs[i]);
314 goto error_find; 264 goto error_find;
315 } 265 }
316 266
317 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) 267 if (!per_vq_vectors)
318 continue; 268 continue;
319 269
270 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
271 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
272 continue;
273 }
274
320 /* allocate per-vq irq if available and necessary */ 275 /* allocate per-vq irq if available and necessary */
321 snprintf(vp_dev->msix_names[msix_vec], 276 snprintf(vp_dev->msix_names[msix_vec],
322 sizeof *vp_dev->msix_names, 277 sizeof *vp_dev->msix_names,
@@ -326,8 +281,12 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
326 vring_interrupt, 0, 281 vring_interrupt, 0,
327 vp_dev->msix_names[msix_vec], 282 vp_dev->msix_names[msix_vec],
328 vqs[i]); 283 vqs[i]);
329 if (err) 284 if (err) {
285 /* don't free this irq on error */
286 vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
330 goto error_find; 287 goto error_find;
288 }
289 vp_dev->msix_vector_map[i] = msix_vec;
331 } 290 }
332 return 0; 291 return 0;
333 292
@@ -343,23 +302,18 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
343 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 302 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
344 int i, err; 303 int i, err;
345 304
346 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
347 if (!vp_dev->vqs)
348 return -ENOMEM;
349
350 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, 305 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
351 dev_name(&vdev->dev), vp_dev); 306 dev_name(&vdev->dev), vp_dev);
352 if (err) 307 if (err)
353 goto out_del_vqs; 308 goto out_del_vqs;
354 309
355 vp_dev->intx_enabled = 1; 310 vp_dev->intx_enabled = 1;
356 vp_dev->per_vq_vectors = false;
357 for (i = 0; i < nvqs; ++i) { 311 for (i = 0; i < nvqs; ++i) {
358 if (!names[i]) { 312 if (!names[i]) {
359 vqs[i] = NULL; 313 vqs[i] = NULL;
360 continue; 314 continue;
361 } 315 }
362 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 316 vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
363 VIRTIO_MSI_NO_VECTOR); 317 VIRTIO_MSI_NO_VECTOR);
364 if (IS_ERR(vqs[i])) { 318 if (IS_ERR(vqs[i])) {
365 err = PTR_ERR(vqs[i]); 319 err = PTR_ERR(vqs[i]);
@@ -409,16 +363,15 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
409{ 363{
410 struct virtio_device *vdev = vq->vdev; 364 struct virtio_device *vdev = vq->vdev;
411 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 365 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
412 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
413 struct cpumask *mask;
414 unsigned int irq;
415 366
416 if (!vq->callback) 367 if (!vq->callback)
417 return -EINVAL; 368 return -EINVAL;
418 369
419 if (vp_dev->msix_enabled) { 370 if (vp_dev->msix_enabled) {
420 mask = vp_dev->msix_affinity_masks[info->msix_vector]; 371 int vec = vp_dev->msix_vector_map[vq->index];
421 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); 372 struct cpumask *mask = vp_dev->msix_affinity_masks[vec];
373 unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
374
422 if (cpu == -1) 375 if (cpu == -1)
423 irq_set_affinity_hint(irq, NULL); 376 irq_set_affinity_hint(irq, NULL);
424 else { 377 else {
@@ -498,8 +451,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
498 vp_dev->vdev.dev.parent = &pci_dev->dev; 451 vp_dev->vdev.dev.parent = &pci_dev->dev;
499 vp_dev->vdev.dev.release = virtio_pci_release_dev; 452 vp_dev->vdev.dev.release = virtio_pci_release_dev;
500 vp_dev->pci_dev = pci_dev; 453 vp_dev->pci_dev = pci_dev;
501 INIT_LIST_HEAD(&vp_dev->virtqueues);
502 spin_lock_init(&vp_dev->lock);
503 454
504 /* enable the device */ 455 /* enable the device */
505 rc = pci_enable_device(pci_dev); 456 rc = pci_enable_device(pci_dev);
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index b2f666250ae0..2038887bdf23 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -31,17 +31,6 @@
31#include <linux/highmem.h> 31#include <linux/highmem.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33 33
34struct virtio_pci_vq_info {
35 /* the actual virtqueue */
36 struct virtqueue *vq;
37
38 /* the list node for the virtqueues list */
39 struct list_head node;
40
41 /* MSI-X vector (or none) */
42 unsigned msix_vector;
43};
44
45/* Our device structure */ 34/* Our device structure */
46struct virtio_pci_device { 35struct virtio_pci_device {
47 struct virtio_device vdev; 36 struct virtio_device vdev;
@@ -75,13 +64,6 @@ struct virtio_pci_device {
75 /* the IO mapping for the PCI config space */ 64 /* the IO mapping for the PCI config space */
76 void __iomem *ioaddr; 65 void __iomem *ioaddr;
77 66
78 /* a list of queues so we can dispatch IRQs */
79 spinlock_t lock;
80 struct list_head virtqueues;
81
82 /* array of all queues for house-keeping */
83 struct virtio_pci_vq_info **vqs;
84
85 /* MSI-X support */ 67 /* MSI-X support */
86 int msix_enabled; 68 int msix_enabled;
87 int intx_enabled; 69 int intx_enabled;
@@ -94,16 +76,15 @@ struct virtio_pci_device {
94 /* Vectors allocated, excluding per-vq vectors if any */ 76 /* Vectors allocated, excluding per-vq vectors if any */
95 unsigned msix_used_vectors; 77 unsigned msix_used_vectors;
96 78
97 /* Whether we have vector per vq */ 79 /* Map of per-VQ MSI-X vectors, may be NULL */
98 bool per_vq_vectors; 80 unsigned *msix_vector_map;
99 81
100 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, 82 struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
101 struct virtio_pci_vq_info *info,
102 unsigned idx, 83 unsigned idx,
103 void (*callback)(struct virtqueue *vq), 84 void (*callback)(struct virtqueue *vq),
104 const char *name, 85 const char *name,
105 u16 msix_vec); 86 u16 msix_vec);
106 void (*del_vq)(struct virtio_pci_vq_info *info); 87 void (*del_vq)(struct virtqueue *vq);
107 88
108 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); 89 u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
109}; 90};
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6d9e5173d5fa..47292dad0ff9 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -112,7 +112,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
112} 112}
113 113
114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 114static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
115 struct virtio_pci_vq_info *info,
116 unsigned index, 115 unsigned index,
117 void (*callback)(struct virtqueue *vq), 116 void (*callback)(struct virtqueue *vq),
118 const char *name, 117 const char *name,
@@ -130,8 +129,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
130 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) 129 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
131 return ERR_PTR(-ENOENT); 130 return ERR_PTR(-ENOENT);
132 131
133 info->msix_vector = msix_vec;
134
135 /* create the vring */ 132 /* create the vring */
136 vq = vring_create_virtqueue(index, num, 133 vq = vring_create_virtqueue(index, num,
137 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, 134 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -162,9 +159,8 @@ out_deactivate:
162 return ERR_PTR(err); 159 return ERR_PTR(err);
163} 160}
164 161
165static void del_vq(struct virtio_pci_vq_info *info) 162static void del_vq(struct virtqueue *vq)
166{ 163{
167 struct virtqueue *vq = info->vq;
168 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 164 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
169 165
170 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 166 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 4bf7ab375894..00e6fc1df407 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -293,7 +293,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
293} 293}
294 294
295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 295static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
296 struct virtio_pci_vq_info *info,
297 unsigned index, 296 unsigned index,
298 void (*callback)(struct virtqueue *vq), 297 void (*callback)(struct virtqueue *vq),
299 const char *name, 298 const char *name,
@@ -323,8 +322,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
323 /* get offset of notification word for this vq */ 322 /* get offset of notification word for this vq */
324 off = vp_ioread16(&cfg->queue_notify_off); 323 off = vp_ioread16(&cfg->queue_notify_off);
325 324
326 info->msix_vector = msix_vec;
327
328 /* create the vring */ 325 /* create the vring */
329 vq = vring_create_virtqueue(index, num, 326 vq = vring_create_virtqueue(index, num,
330 SMP_CACHE_BYTES, &vp_dev->vdev, 327 SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -409,9 +406,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
409 return 0; 406 return 0;
410} 407}
411 408
412static void del_vq(struct virtio_pci_vq_info *info) 409static void del_vq(struct virtqueue *vq)
413{ 410{
414 struct virtqueue *vq = info->vq;
415 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 411 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
416 412
417 vp_iowrite16(vq->index, &vp_dev->common->queue_select); 413 vp_iowrite16(vq->index, &vp_dev->common->queue_select);