aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 22:44:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 22:44:27 -0400
commitf605ba97fb80522656c7dce9825a908f1e765b57 (patch)
treecd7d7e1ebd2d59562a61ee45c350392ab00c7ed6
parent016c6f25d11af187f93324d5c591f5d6f63dfb75 (diff)
parentda9147140fe3de5a3a3fe5fe7f69739d4f39bea1 (diff)
Merge tag 'vfio-v4.17-rc1' of git://github.com/awilliam/linux-vfio
Pull VFIO updates from Alex Williamson: - Adopt iommu_unmap_fast() interface to type1 backend (Suravee Suthikulpanit) - mdev sample driver fixup (Shunyong Yang) - More efficient PFN mapping handling in type1 backend (Jason Cai) - VFIO device ioeventfd interface (Alex Williamson) - Tag new vfio-platform sub-maintainer (Alex Williamson) * tag 'vfio-v4.17-rc1' of git://github.com/awilliam/linux-vfio: MAINTAINERS: vfio/platform: Update sub-maintainer vfio/pci: Add ioeventfd support vfio/pci: Use endian neutral helpers vfio/pci: Pull BAR mapping setup from read-write path vfio/type1: Improve memory pinning process for raw PFN mapping vfio-mdev/samples: change RDI interrupt condition vfio/type1: Adopt fast IOTLB flush interface when unmap IOVAs
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/vfio/pci/vfio_pci.c35
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h19
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c184
-rw-r--r--drivers/vfio/vfio_iommu_type1.c151
-rw-r--r--include/uapi/linux/vfio.h27
-rw-r--r--samples/vfio-mdev/mtty.c2
7 files changed, 377 insertions, 43 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 83f5a6fd7de3..7e48624f4f9f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14797,7 +14797,7 @@ F: include/linux/mdev.h
14797F: samples/vfio-mdev/ 14797F: samples/vfio-mdev/
14798 14798
14799VFIO PLATFORM DRIVER 14799VFIO PLATFORM DRIVER
14800M: Baptiste Reynal <b.reynal@virtualopensystems.com> 14800M: Eric Auger <eric.auger@redhat.com>
14801L: kvm@vger.kernel.org 14801L: kvm@vger.kernel.org
14802S: Maintained 14802S: Maintained
14803F: drivers/vfio/platform/ 14803F: drivers/vfio/platform/
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 8a1508a8e481..b423a309a6e0 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -302,6 +302,7 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
302{ 302{
303 struct pci_dev *pdev = vdev->pdev; 303 struct pci_dev *pdev = vdev->pdev;
304 struct vfio_pci_dummy_resource *dummy_res, *tmp; 304 struct vfio_pci_dummy_resource *dummy_res, *tmp;
305 struct vfio_pci_ioeventfd *ioeventfd, *ioeventfd_tmp;
305 int i, bar; 306 int i, bar;
306 307
307 /* Stop the device from further DMA */ 308 /* Stop the device from further DMA */
@@ -311,6 +312,15 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
311 VFIO_IRQ_SET_ACTION_TRIGGER, 312 VFIO_IRQ_SET_ACTION_TRIGGER,
312 vdev->irq_type, 0, 0, NULL); 313 vdev->irq_type, 0, 0, NULL);
313 314
315 /* Device closed, don't need mutex here */
316 list_for_each_entry_safe(ioeventfd, ioeventfd_tmp,
317 &vdev->ioeventfds_list, next) {
318 vfio_virqfd_disable(&ioeventfd->virqfd);
319 list_del(&ioeventfd->next);
320 kfree(ioeventfd);
321 }
322 vdev->ioeventfds_nr = 0;
323
314 vdev->virq_disabled = false; 324 vdev->virq_disabled = false;
315 325
316 for (i = 0; i < vdev->num_regions; i++) 326 for (i = 0; i < vdev->num_regions; i++)
@@ -1009,6 +1019,28 @@ hot_reset_release:
1009 1019
1010 kfree(groups); 1020 kfree(groups);
1011 return ret; 1021 return ret;
1022 } else if (cmd == VFIO_DEVICE_IOEVENTFD) {
1023 struct vfio_device_ioeventfd ioeventfd;
1024 int count;
1025
1026 minsz = offsetofend(struct vfio_device_ioeventfd, fd);
1027
1028 if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
1029 return -EFAULT;
1030
1031 if (ioeventfd.argsz < minsz)
1032 return -EINVAL;
1033
1034 if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
1035 return -EINVAL;
1036
1037 count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
1038
1039 if (hweight8(count) != 1 || ioeventfd.fd < -1)
1040 return -EINVAL;
1041
1042 return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
1043 ioeventfd.data, count, ioeventfd.fd);
1012 } 1044 }
1013 1045
1014 return -ENOTTY; 1046 return -ENOTTY;
@@ -1171,6 +1203,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1171 vdev->irq_type = VFIO_PCI_NUM_IRQS; 1203 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1172 mutex_init(&vdev->igate); 1204 mutex_init(&vdev->igate);
1173 spin_lock_init(&vdev->irqlock); 1205 spin_lock_init(&vdev->irqlock);
1206 mutex_init(&vdev->ioeventfds_lock);
1207 INIT_LIST_HEAD(&vdev->ioeventfds_list);
1174 1208
1175 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); 1209 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1176 if (ret) { 1210 if (ret) {
@@ -1212,6 +1246,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
1212 1246
1213 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); 1247 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1214 kfree(vdev->region); 1248 kfree(vdev->region);
1249 mutex_destroy(&vdev->ioeventfds_lock);
1215 kfree(vdev); 1250 kfree(vdev);
1216 1251
1217 if (vfio_pci_is_vga(pdev)) { 1252 if (vfio_pci_is_vga(pdev)) {
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index f561ac1c78a0..cde3b5d3441a 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -29,6 +29,19 @@
29#define PCI_CAP_ID_INVALID 0xFF /* default raw access */ 29#define PCI_CAP_ID_INVALID 0xFF /* default raw access */
30#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */ 30#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */
31 31
32/* Cap maximum number of ioeventfds per device (arbitrary) */
33#define VFIO_PCI_IOEVENTFD_MAX 1000
34
35struct vfio_pci_ioeventfd {
36 struct list_head next;
37 struct virqfd *virqfd;
38 void __iomem *addr;
39 uint64_t data;
40 loff_t pos;
41 int bar;
42 int count;
43};
44
32struct vfio_pci_irq_ctx { 45struct vfio_pci_irq_ctx {
33 struct eventfd_ctx *trigger; 46 struct eventfd_ctx *trigger;
34 struct virqfd *unmask; 47 struct virqfd *unmask;
@@ -92,9 +105,12 @@ struct vfio_pci_device {
92 bool nointx; 105 bool nointx;
93 struct pci_saved_state *pci_saved_state; 106 struct pci_saved_state *pci_saved_state;
94 int refcnt; 107 int refcnt;
108 int ioeventfds_nr;
95 struct eventfd_ctx *err_trigger; 109 struct eventfd_ctx *err_trigger;
96 struct eventfd_ctx *req_trigger; 110 struct eventfd_ctx *req_trigger;
97 struct list_head dummy_resources_list; 111 struct list_head dummy_resources_list;
112 struct mutex ioeventfds_lock;
113 struct list_head ioeventfds_list;
98}; 114};
99 115
100#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) 116#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
@@ -120,6 +136,9 @@ extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
120extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, 136extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
121 size_t count, loff_t *ppos, bool iswrite); 137 size_t count, loff_t *ppos, bool iswrite);
122 138
139extern long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
140 uint64_t data, int count, int fd);
141
123extern int vfio_pci_init_perm_bits(void); 142extern int vfio_pci_init_perm_bits(void);
124extern void vfio_pci_uninit_perm_bits(void); 143extern void vfio_pci_uninit_perm_bits(void);
125 144
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 357243d76f10..a6029d0a5524 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -17,10 +17,29 @@
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/vfio.h>
20#include <linux/vgaarb.h> 21#include <linux/vgaarb.h>
21 22
22#include "vfio_pci_private.h" 23#include "vfio_pci_private.h"
23 24
25#ifdef __LITTLE_ENDIAN
26#define vfio_ioread64 ioread64
27#define vfio_iowrite64 iowrite64
28#define vfio_ioread32 ioread32
29#define vfio_iowrite32 iowrite32
30#define vfio_ioread16 ioread16
31#define vfio_iowrite16 iowrite16
32#else
33#define vfio_ioread64 ioread64be
34#define vfio_iowrite64 iowrite64be
35#define vfio_ioread32 ioread32be
36#define vfio_iowrite32 iowrite32be
37#define vfio_ioread16 ioread16be
38#define vfio_iowrite16 iowrite16be
39#endif
40#define vfio_ioread8 ioread8
41#define vfio_iowrite8 iowrite8
42
24/* 43/*
25 * Read or write from an __iomem region (MMIO or I/O port) with an excluded 44 * Read or write from an __iomem region (MMIO or I/O port) with an excluded
26 * range which is inaccessible. The excluded range drops writes and fills 45 * range which is inaccessible. The excluded range drops writes and fills
@@ -44,15 +63,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
44 fillable = 0; 63 fillable = 0;
45 64
46 if (fillable >= 4 && !(off % 4)) { 65 if (fillable >= 4 && !(off % 4)) {
47 __le32 val; 66 u32 val;
48 67
49 if (iswrite) { 68 if (iswrite) {
50 if (copy_from_user(&val, buf, 4)) 69 if (copy_from_user(&val, buf, 4))
51 return -EFAULT; 70 return -EFAULT;
52 71
53 iowrite32(le32_to_cpu(val), io + off); 72 vfio_iowrite32(val, io + off);
54 } else { 73 } else {
55 val = cpu_to_le32(ioread32(io + off)); 74 val = vfio_ioread32(io + off);
56 75
57 if (copy_to_user(buf, &val, 4)) 76 if (copy_to_user(buf, &val, 4))
58 return -EFAULT; 77 return -EFAULT;
@@ -60,15 +79,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
60 79
61 filled = 4; 80 filled = 4;
62 } else if (fillable >= 2 && !(off % 2)) { 81 } else if (fillable >= 2 && !(off % 2)) {
63 __le16 val; 82 u16 val;
64 83
65 if (iswrite) { 84 if (iswrite) {
66 if (copy_from_user(&val, buf, 2)) 85 if (copy_from_user(&val, buf, 2))
67 return -EFAULT; 86 return -EFAULT;
68 87
69 iowrite16(le16_to_cpu(val), io + off); 88 vfio_iowrite16(val, io + off);
70 } else { 89 } else {
71 val = cpu_to_le16(ioread16(io + off)); 90 val = vfio_ioread16(io + off);
72 91
73 if (copy_to_user(buf, &val, 2)) 92 if (copy_to_user(buf, &val, 2))
74 return -EFAULT; 93 return -EFAULT;
@@ -82,9 +101,9 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
82 if (copy_from_user(&val, buf, 1)) 101 if (copy_from_user(&val, buf, 1))
83 return -EFAULT; 102 return -EFAULT;
84 103
85 iowrite8(val, io + off); 104 vfio_iowrite8(val, io + off);
86 } else { 105 } else {
87 val = ioread8(io + off); 106 val = vfio_ioread8(io + off);
88 107
89 if (copy_to_user(buf, &val, 1)) 108 if (copy_to_user(buf, &val, 1))
90 return -EFAULT; 109 return -EFAULT;
@@ -113,6 +132,30 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf,
113 return done; 132 return done;
114} 133}
115 134
135static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar)
136{
137 struct pci_dev *pdev = vdev->pdev;
138 int ret;
139 void __iomem *io;
140
141 if (vdev->barmap[bar])
142 return 0;
143
144 ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
145 if (ret)
146 return ret;
147
148 io = pci_iomap(pdev, bar, 0);
149 if (!io) {
150 pci_release_selected_regions(pdev, 1 << bar);
151 return -ENOMEM;
152 }
153
154 vdev->barmap[bar] = io;
155
156 return 0;
157}
158
116ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, 159ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
117 size_t count, loff_t *ppos, bool iswrite) 160 size_t count, loff_t *ppos, bool iswrite)
118{ 161{
@@ -147,22 +190,13 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
147 if (!io) 190 if (!io)
148 return -ENOMEM; 191 return -ENOMEM;
149 x_end = end; 192 x_end = end;
150 } else if (!vdev->barmap[bar]) { 193 } else {
151 int ret; 194 int ret = vfio_pci_setup_barmap(vdev, bar);
152
153 ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
154 if (ret) 195 if (ret)
155 return ret; 196 return ret;
156 197
157 io = pci_iomap(pdev, bar, 0);
158 if (!io) {
159 pci_release_selected_regions(pdev, 1 << bar);
160 return -ENOMEM;
161 }
162
163 vdev->barmap[bar] = io;
164 } else
165 io = vdev->barmap[bar]; 198 io = vdev->barmap[bar];
199 }
166 200
167 if (bar == vdev->msix_bar) { 201 if (bar == vdev->msix_bar) {
168 x_start = vdev->msix_offset; 202 x_start = vdev->msix_offset;
@@ -242,3 +276,113 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
242 276
243 return done; 277 return done;
244} 278}
279
280static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
281{
282 struct vfio_pci_ioeventfd *ioeventfd = opaque;
283
284 switch (ioeventfd->count) {
285 case 1:
286 vfio_iowrite8(ioeventfd->data, ioeventfd->addr);
287 break;
288 case 2:
289 vfio_iowrite16(ioeventfd->data, ioeventfd->addr);
290 break;
291 case 4:
292 vfio_iowrite32(ioeventfd->data, ioeventfd->addr);
293 break;
294#ifdef iowrite64
295 case 8:
296 vfio_iowrite64(ioeventfd->data, ioeventfd->addr);
297 break;
298#endif
299 }
300
301 return 0;
302}
303
304long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
305 uint64_t data, int count, int fd)
306{
307 struct pci_dev *pdev = vdev->pdev;
308 loff_t pos = offset & VFIO_PCI_OFFSET_MASK;
309 int ret, bar = VFIO_PCI_OFFSET_TO_INDEX(offset);
310 struct vfio_pci_ioeventfd *ioeventfd;
311
312 /* Only support ioeventfds into BARs */
313 if (bar > VFIO_PCI_BAR5_REGION_INDEX)
314 return -EINVAL;
315
316 if (pos + count > pci_resource_len(pdev, bar))
317 return -EINVAL;
318
319 /* Disallow ioeventfds working around MSI-X table writes */
320 if (bar == vdev->msix_bar &&
321 !(pos + count <= vdev->msix_offset ||
322 pos >= vdev->msix_offset + vdev->msix_size))
323 return -EINVAL;
324
325#ifndef iowrite64
326 if (count == 8)
327 return -EINVAL;
328#endif
329
330 ret = vfio_pci_setup_barmap(vdev, bar);
331 if (ret)
332 return ret;
333
334 mutex_lock(&vdev->ioeventfds_lock);
335
336 list_for_each_entry(ioeventfd, &vdev->ioeventfds_list, next) {
337 if (ioeventfd->pos == pos && ioeventfd->bar == bar &&
338 ioeventfd->data == data && ioeventfd->count == count) {
339 if (fd == -1) {
340 vfio_virqfd_disable(&ioeventfd->virqfd);
341 list_del(&ioeventfd->next);
342 vdev->ioeventfds_nr--;
343 kfree(ioeventfd);
344 ret = 0;
345 } else
346 ret = -EEXIST;
347
348 goto out_unlock;
349 }
350 }
351
352 if (fd < 0) {
353 ret = -ENODEV;
354 goto out_unlock;
355 }
356
357 if (vdev->ioeventfds_nr >= VFIO_PCI_IOEVENTFD_MAX) {
358 ret = -ENOSPC;
359 goto out_unlock;
360 }
361
362 ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL);
363 if (!ioeventfd) {
364 ret = -ENOMEM;
365 goto out_unlock;
366 }
367
368 ioeventfd->addr = vdev->barmap[bar] + pos;
369 ioeventfd->data = data;
370 ioeventfd->pos = pos;
371 ioeventfd->bar = bar;
372 ioeventfd->count = count;
373
374 ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
375 NULL, NULL, &ioeventfd->virqfd, fd);
376 if (ret) {
377 kfree(ioeventfd);
378 goto out_unlock;
379 }
380
381 list_add(&ioeventfd->next, &vdev->ioeventfds_list);
382 vdev->ioeventfds_nr++;
383
384out_unlock:
385 mutex_unlock(&vdev->ioeventfds_lock);
386
387 return ret;
388}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 45657e2b1ff7..5c212bf29640 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -102,6 +102,13 @@ struct vfio_pfn {
102 atomic_t ref_count; 102 atomic_t ref_count;
103}; 103};
104 104
105struct vfio_regions {
106 struct list_head list;
107 dma_addr_t iova;
108 phys_addr_t phys;
109 size_t len;
110};
111
105#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ 112#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
106 (!list_empty(&iommu->domain_list)) 113 (!list_empty(&iommu->domain_list))
107 114
@@ -397,7 +404,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
397{ 404{
398 unsigned long pfn = 0; 405 unsigned long pfn = 0;
399 long ret, pinned = 0, lock_acct = 0; 406 long ret, pinned = 0, lock_acct = 0;
400 bool rsvd;
401 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; 407 dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
402 408
403 /* This code path is only user initiated */ 409 /* This code path is only user initiated */
@@ -408,14 +414,23 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
408 if (ret) 414 if (ret)
409 return ret; 415 return ret;
410 416
417 if (is_invalid_reserved_pfn(*pfn_base)) {
418 struct vm_area_struct *vma;
419
420 down_read(&current->mm->mmap_sem);
421 vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
422 pinned = min_t(long, npage, vma_pages(vma));
423 up_read(&current->mm->mmap_sem);
424 return pinned;
425 }
426
411 pinned++; 427 pinned++;
412 rsvd = is_invalid_reserved_pfn(*pfn_base);
413 428
414 /* 429 /*
415 * Reserved pages aren't counted against the user, externally pinned 430 * Reserved pages aren't counted against the user, externally pinned
416 * pages are already counted against the user. 431 * pages are already counted against the user.
417 */ 432 */
418 if (!rsvd && !vfio_find_vpfn(dma, iova)) { 433 if (!vfio_find_vpfn(dma, iova)) {
419 if (!lock_cap && current->mm->locked_vm + 1 > limit) { 434 if (!lock_cap && current->mm->locked_vm + 1 > limit) {
420 put_pfn(*pfn_base, dma->prot); 435 put_pfn(*pfn_base, dma->prot);
421 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, 436 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
@@ -435,13 +450,12 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
435 if (ret) 450 if (ret)
436 break; 451 break;
437 452
438 if (pfn != *pfn_base + pinned || 453 if (pfn != *pfn_base + pinned) {
439 rsvd != is_invalid_reserved_pfn(pfn)) {
440 put_pfn(pfn, dma->prot); 454 put_pfn(pfn, dma->prot);
441 break; 455 break;
442 } 456 }
443 457
444 if (!rsvd && !vfio_find_vpfn(dma, iova)) { 458 if (!vfio_find_vpfn(dma, iova)) {
445 if (!lock_cap && 459 if (!lock_cap &&
446 current->mm->locked_vm + lock_acct + 1 > limit) { 460 current->mm->locked_vm + lock_acct + 1 > limit) {
447 put_pfn(pfn, dma->prot); 461 put_pfn(pfn, dma->prot);
@@ -459,10 +473,8 @@ out:
459 473
460unpin_out: 474unpin_out:
461 if (ret) { 475 if (ret) {
462 if (!rsvd) { 476 for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
463 for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 477 put_pfn(pfn, dma->prot);
464 put_pfn(pfn, dma->prot);
465 }
466 478
467 return ret; 479 return ret;
468 } 480 }
@@ -660,11 +672,102 @@ unpin_exit:
660 return i > npage ? npage : (i > 0 ? i : -EINVAL); 672 return i > npage ? npage : (i > 0 ? i : -EINVAL);
661} 673}
662 674
675static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
676 struct list_head *regions)
677{
678 long unlocked = 0;
679 struct vfio_regions *entry, *next;
680
681 iommu_tlb_sync(domain->domain);
682
683 list_for_each_entry_safe(entry, next, regions, list) {
684 unlocked += vfio_unpin_pages_remote(dma,
685 entry->iova,
686 entry->phys >> PAGE_SHIFT,
687 entry->len >> PAGE_SHIFT,
688 false);
689 list_del(&entry->list);
690 kfree(entry);
691 }
692
693 cond_resched();
694
695 return unlocked;
696}
697
698/*
699 * Generally, VFIO needs to unpin remote pages after each IOTLB flush.
700 * Therefore, when using IOTLB flush sync interface, VFIO need to keep track
701 * of these regions (currently using a list).
702 *
703 * This value specifies maximum number of regions for each IOTLB flush sync.
704 */
705#define VFIO_IOMMU_TLB_SYNC_MAX 512
706
707static size_t unmap_unpin_fast(struct vfio_domain *domain,
708 struct vfio_dma *dma, dma_addr_t *iova,
709 size_t len, phys_addr_t phys, long *unlocked,
710 struct list_head *unmapped_list,
711 int *unmapped_cnt)
712{
713 size_t unmapped = 0;
714 struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
715
716 if (entry) {
717 unmapped = iommu_unmap_fast(domain->domain, *iova, len);
718
719 if (!unmapped) {
720 kfree(entry);
721 } else {
722 iommu_tlb_range_add(domain->domain, *iova, unmapped);
723 entry->iova = *iova;
724 entry->phys = phys;
725 entry->len = unmapped;
726 list_add_tail(&entry->list, unmapped_list);
727
728 *iova += unmapped;
729 (*unmapped_cnt)++;
730 }
731 }
732
733 /*
734 * Sync if the number of fast-unmap regions hits the limit
735 * or in case of errors.
736 */
737 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
738 *unlocked += vfio_sync_unpin(dma, domain,
739 unmapped_list);
740 *unmapped_cnt = 0;
741 }
742
743 return unmapped;
744}
745
746static size_t unmap_unpin_slow(struct vfio_domain *domain,
747 struct vfio_dma *dma, dma_addr_t *iova,
748 size_t len, phys_addr_t phys,
749 long *unlocked)
750{
751 size_t unmapped = iommu_unmap(domain->domain, *iova, len);
752
753 if (unmapped) {
754 *unlocked += vfio_unpin_pages_remote(dma, *iova,
755 phys >> PAGE_SHIFT,
756 unmapped >> PAGE_SHIFT,
757 false);
758 *iova += unmapped;
759 cond_resched();
760 }
761 return unmapped;
762}
763
663static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, 764static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
664 bool do_accounting) 765 bool do_accounting)
665{ 766{
666 dma_addr_t iova = dma->iova, end = dma->iova + dma->size; 767 dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
667 struct vfio_domain *domain, *d; 768 struct vfio_domain *domain, *d;
769 LIST_HEAD(unmapped_region_list);
770 int unmapped_region_cnt = 0;
668 long unlocked = 0; 771 long unlocked = 0;
669 772
670 if (!dma->size) 773 if (!dma->size)
@@ -710,20 +813,26 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
710 break; 813 break;
711 } 814 }
712 815
713 unmapped = iommu_unmap(domain->domain, iova, len); 816 /*
714 if (WARN_ON(!unmapped)) 817 * First, try to use fast unmap/unpin. In case of failure,
715 break; 818 * switch to slow unmap/unpin path.
716 819 */
717 unlocked += vfio_unpin_pages_remote(dma, iova, 820 unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
718 phys >> PAGE_SHIFT, 821 &unlocked, &unmapped_region_list,
719 unmapped >> PAGE_SHIFT, 822 &unmapped_region_cnt);
720 false); 823 if (!unmapped) {
721 iova += unmapped; 824 unmapped = unmap_unpin_slow(domain, dma, &iova, len,
722 825 phys, &unlocked);
723 cond_resched(); 826 if (WARN_ON(!unmapped))
827 break;
828 }
724 } 829 }
725 830
726 dma->iommu_mapped = false; 831 dma->iommu_mapped = false;
832
833 if (unmapped_region_cnt)
834 unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list);
835
727 if (do_accounting) { 836 if (do_accounting) {
728 vfio_lock_acct(dma->task, -unlocked, NULL); 837 vfio_lock_acct(dma->task, -unlocked, NULL);
729 return 0; 838 return 0;
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index c74372163ed2..1aa7b82e8169 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -575,6 +575,33 @@ struct vfio_device_gfx_plane_info {
575 575
576#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15) 576#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
577 577
578/**
579 * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
580 * struct vfio_device_ioeventfd)
581 *
582 * Perform a write to the device at the specified device fd offset, with
583 * the specified data and width when the provided eventfd is triggered.
584 * vfio bus drivers may not support this for all regions, for all widths,
585 * or at all. vfio-pci currently only enables support for BAR regions,
586 * excluding the MSI-X vector table.
587 *
588 * Return: 0 on success, -errno on failure.
589 */
590struct vfio_device_ioeventfd {
591 __u32 argsz;
592 __u32 flags;
593#define VFIO_DEVICE_IOEVENTFD_8 (1 << 0) /* 1-byte write */
594#define VFIO_DEVICE_IOEVENTFD_16 (1 << 1) /* 2-byte write */
595#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
596#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
597#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
598 __u64 offset; /* device fd offset of write */
599 __u64 data; /* data to be written */
600 __s32 fd; /* -1 for de-assignment */
601};
602
603#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
604
578/* -------- API for Type1 VFIO IOMMU -------- */ 605/* -------- API for Type1 VFIO IOMMU -------- */
579 606
580/** 607/**
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index 09f255bdf3ac..7abb79d8313d 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -534,7 +534,7 @@ static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
534 534
535 /* Interrupt priority 2: Fifo trigger level reached */ 535 /* Interrupt priority 2: Fifo trigger level reached */
536 if ((ier & UART_IER_RDI) && 536 if ((ier & UART_IER_RDI) &&
537 (mdev_state->s[index].rxtx.count == 537 (mdev_state->s[index].rxtx.count >=
538 mdev_state->s[index].intr_trigger_level)) 538 mdev_state->s[index].intr_trigger_level))
539 *buf |= UART_IIR_RDI; 539 *buf |= UART_IIR_RDI;
540 540