aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2015-02-06 12:59:16 -0500
committerAlex Williamson <alex.williamson@redhat.com>2015-02-06 12:59:16 -0500
commitbabbf1760970f141eb4021288ce0fb7196bc1a23 (patch)
tree00f84b04dce039df80ed367ca195293ba8194ab3 /drivers/vfio
parent6fe1010d6d9c02cf3556ab076585104551a6ee7e (diff)
vfio/type1: Chunk contiguous reserved/invalid page mappings
We currently map invalid and reserved pages, such as often occur from mapping MMIO regions of a VM through the IOMMU, using single pages. There's really no reason we can't instead follow the methodology we use for normal pages and find the largest possible physically contiguous chunk for mapping. The only difference is that we don't do locked memory accounting for these since they're not back by RAM. In most applications this will be a very minor improvement, but when graphics and GPGPU devices are in play, MMIO BARs become non-trivial. Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index e6e7f155bdd9..35c90089478e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -265,6 +265,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
265 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 265 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
266 bool lock_cap = capable(CAP_IPC_LOCK); 266 bool lock_cap = capable(CAP_IPC_LOCK);
267 long ret, i; 267 long ret, i;
268 bool rsvd;
268 269
269 if (!current->mm) 270 if (!current->mm)
270 return -ENODEV; 271 return -ENODEV;
@@ -273,10 +274,9 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
273 if (ret) 274 if (ret)
274 return ret; 275 return ret;
275 276
276 if (is_invalid_reserved_pfn(*pfn_base)) 277 rsvd = is_invalid_reserved_pfn(*pfn_base);
277 return 1;
278 278
279 if (!lock_cap && current->mm->locked_vm + 1 > limit) { 279 if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
280 put_pfn(*pfn_base, prot); 280 put_pfn(*pfn_base, prot);
281 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, 281 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
282 limit << PAGE_SHIFT); 282 limit << PAGE_SHIFT);
@@ -284,7 +284,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
284 } 284 }
285 285
286 if (unlikely(disable_hugepages)) { 286 if (unlikely(disable_hugepages)) {
287 vfio_lock_acct(1); 287 if (!rsvd)
288 vfio_lock_acct(1);
288 return 1; 289 return 1;
289 } 290 }
290 291
@@ -296,12 +297,14 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
296 if (ret) 297 if (ret)
297 break; 298 break;
298 299
299 if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) { 300 if (pfn != *pfn_base + i ||
301 rsvd != is_invalid_reserved_pfn(pfn)) {
300 put_pfn(pfn, prot); 302 put_pfn(pfn, prot);
301 break; 303 break;
302 } 304 }
303 305
304 if (!lock_cap && current->mm->locked_vm + i + 1 > limit) { 306 if (!rsvd && !lock_cap &&
307 current->mm->locked_vm + i + 1 > limit) {
305 put_pfn(pfn, prot); 308 put_pfn(pfn, prot);
306 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", 309 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
307 __func__, limit << PAGE_SHIFT); 310 __func__, limit << PAGE_SHIFT);
@@ -309,7 +312,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
309 } 312 }
310 } 313 }
311 314
312 vfio_lock_acct(i); 315 if (!rsvd)
316 vfio_lock_acct(i);
313 317
314 return i; 318 return i;
315} 319}