aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2016-02-03 00:46:39 -0500
committerMichael S. Tsirkin <mst@redhat.com>2016-03-02 10:01:58 -0500
commit7a5589b240b405d55b2b395554082ec284f414bb (patch)
tree9f33193d968818ec232015ab60b191142bcb14ed
parentb42111382f0e677e2e227c5c4894423cbdaed1f1 (diff)
virtio_pci: Use the DMA API if enabled
This switches to vring_create_virtqueue, simplifying the driver and adding DMA API support. This fixes virtio-pci on platforms and busses that have IOMMUs. This will break the experimental QEMU Q35 IOMMU support until QEMU is fixed. In exchange, it fixes physical virtio hardware as well as virtio-pci running under Xen. Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--drivers/virtio/virtio_pci_common.h6
-rw-r--r--drivers/virtio/virtio_pci_legacy.c42
-rw-r--r--drivers/virtio/virtio_pci_modern.c61
3 files changed, 33 insertions, 76 deletions
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 2cc252270b2d..28263200ed42 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -35,12 +35,6 @@ struct virtio_pci_vq_info {
35 /* the actual virtqueue */ 35 /* the actual virtqueue */
36 struct virtqueue *vq; 36 struct virtqueue *vq;
37 37
38 /* the number of entries in the queue */
39 int num;
40
41 /* the virtual address of the ring queue */
42 void *queue;
43
44 /* the list node for the virtqueues list */ 38 /* the list node for the virtqueues list */
45 struct list_head node; 39 struct list_head node;
46 40
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 48bc9797e530..8c4e61783441 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -119,7 +119,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
119 u16 msix_vec) 119 u16 msix_vec)
120{ 120{
121 struct virtqueue *vq; 121 struct virtqueue *vq;
122 unsigned long size;
123 u16 num; 122 u16 num;
124 int err; 123 int err;
125 124
@@ -131,27 +130,19 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
131 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) 130 if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
132 return ERR_PTR(-ENOENT); 131 return ERR_PTR(-ENOENT);
133 132
134 info->num = num;
135 info->msix_vector = msix_vec; 133 info->msix_vector = msix_vec;
136 134
137 size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); 135 /* create the vring */
138 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); 136 vq = vring_create_virtqueue(index, num,
139 if (info->queue == NULL) 137 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
138 true, false, vp_notify, callback, name);
139 if (!vq)
140 return ERR_PTR(-ENOMEM); 140 return ERR_PTR(-ENOMEM);
141 141
142 /* activate the queue */ 142 /* activate the queue */
143 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, 143 iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
144 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 144 vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
145 145
146 /* create the vring */
147 vq = vring_new_virtqueue(index, info->num,
148 VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
149 true, info->queue, vp_notify, callback, name);
150 if (!vq) {
151 err = -ENOMEM;
152 goto out_activate_queue;
153 }
154
155 vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; 146 vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
156 147
157 if (msix_vec != VIRTIO_MSI_NO_VECTOR) { 148 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
@@ -159,17 +150,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
159 msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); 150 msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
160 if (msix_vec == VIRTIO_MSI_NO_VECTOR) { 151 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
161 err = -EBUSY; 152 err = -EBUSY;
162 goto out_assign; 153 goto out_deactivate;
163 } 154 }
164 } 155 }
165 156
166 return vq; 157 return vq;
167 158
168out_assign: 159out_deactivate:
169 vring_del_virtqueue(vq);
170out_activate_queue:
171 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 160 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
172 free_pages_exact(info->queue, size); 161 vring_del_virtqueue(vq);
173 return ERR_PTR(err); 162 return ERR_PTR(err);
174} 163}
175 164
@@ -177,7 +166,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
177{ 166{
178 struct virtqueue *vq = info->vq; 167 struct virtqueue *vq = info->vq;
179 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); 168 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
180 unsigned long size;
181 169
182 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); 170 iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
183 171
@@ -188,13 +176,10 @@ static void del_vq(struct virtio_pci_vq_info *info)
188 ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); 176 ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
189 } 177 }
190 178
191 vring_del_virtqueue(vq);
192
193 /* Select and deactivate the queue */ 179 /* Select and deactivate the queue */
194 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); 180 iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
195 181
196 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); 182 vring_del_virtqueue(vq);
197 free_pages_exact(info->queue, size);
198} 183}
199 184
200static const struct virtio_config_ops virtio_pci_config_ops = { 185static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -227,6 +212,13 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
227 return -ENODEV; 212 return -ENODEV;
228 } 213 }
229 214
215 rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
216 if (rc)
217 rc = dma_set_mask_and_coherent(&pci_dev->dev,
218 DMA_BIT_MASK(32));
219 if (rc)
220 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
221
230 rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy"); 222 rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
231 if (rc) 223 if (rc)
232 return rc; 224 return rc;
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7760fc1a2218..f6f28cc7eb45 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -287,31 +287,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
287 return vp_ioread16(&vp_dev->common->msix_config); 287 return vp_ioread16(&vp_dev->common->msix_config);
288} 288}
289 289
290static size_t vring_pci_size(u16 num)
291{
292 /* We only need a cacheline separation. */
293 return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES));
294}
295
296static void *alloc_virtqueue_pages(int *num)
297{
298 void *pages;
299
300 /* TODO: allocate each queue chunk individually */
301 for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) {
302 pages = alloc_pages_exact(vring_pci_size(*num),
303 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
304 if (pages)
305 return pages;
306 }
307
308 if (!*num)
309 return NULL;
310
311 /* Try to get a single page. You are my only hope! */
312 return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO);
313}
314
315static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, 290static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
316 struct virtio_pci_vq_info *info, 291 struct virtio_pci_vq_info *info,
317 unsigned index, 292 unsigned index,
@@ -343,29 +318,22 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
343 /* get offset of notification word for this vq */ 318 /* get offset of notification word for this vq */
344 off = vp_ioread16(&cfg->queue_notify_off); 319 off = vp_ioread16(&cfg->queue_notify_off);
345 320
346 info->num = num;
347 info->msix_vector = msix_vec; 321 info->msix_vector = msix_vec;
348 322
349 info->queue = alloc_virtqueue_pages(&info->num);
350 if (info->queue == NULL)
351 return ERR_PTR(-ENOMEM);
352
353 /* create the vring */ 323 /* create the vring */
354 vq = vring_new_virtqueue(index, info->num, 324 vq = vring_create_virtqueue(index, num,
355 SMP_CACHE_BYTES, &vp_dev->vdev, 325 SMP_CACHE_BYTES, &vp_dev->vdev,
356 true, info->queue, vp_notify, callback, name); 326 true, true, vp_notify, callback, name);
357 if (!vq) { 327 if (!vq)
358 err = -ENOMEM; 328 return ERR_PTR(-ENOMEM);
359 goto err_new_queue;
360 }
361 329
362 /* activate the queue */ 330 /* activate the queue */
363 vp_iowrite16(num, &cfg->queue_size); 331 vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
364 vp_iowrite64_twopart(virt_to_phys(info->queue), 332 vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
365 &cfg->queue_desc_lo, &cfg->queue_desc_hi); 333 &cfg->queue_desc_lo, &cfg->queue_desc_hi);
366 vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), 334 vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
367 &cfg->queue_avail_lo, &cfg->queue_avail_hi); 335 &cfg->queue_avail_lo, &cfg->queue_avail_hi);
368 vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), 336 vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
369 &cfg->queue_used_lo, &cfg->queue_used_hi); 337 &cfg->queue_used_lo, &cfg->queue_used_hi);
370 338
371 if (vp_dev->notify_base) { 339 if (vp_dev->notify_base) {
@@ -410,8 +378,6 @@ err_assign_vector:
410 pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); 378 pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
411err_map_notify: 379err_map_notify:
412 vring_del_virtqueue(vq); 380 vring_del_virtqueue(vq);
413err_new_queue:
414 free_pages_exact(info->queue, vring_pci_size(info->num));
415 return ERR_PTR(err); 381 return ERR_PTR(err);
416} 382}
417 383
@@ -456,8 +422,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
456 pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); 422 pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
457 423
458 vring_del_virtqueue(vq); 424 vring_del_virtqueue(vq);
459
460 free_pages_exact(info->queue, vring_pci_size(info->num));
461} 425}
462 426
463static const struct virtio_config_ops virtio_pci_config_nodev_ops = { 427static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
@@ -641,6 +605,13 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
641 return -EINVAL; 605 return -EINVAL;
642 } 606 }
643 607
608 err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
609 if (err)
610 err = dma_set_mask_and_coherent(&pci_dev->dev,
611 DMA_BIT_MASK(32));
612 if (err)
613 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
614
644 /* Device capability is only mandatory for devices that have 615 /* Device capability is only mandatory for devices that have
645 * device-specific configuration. 616 * device-specific configuration.
646 */ 617 */