aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2018-06-02 10:41:44 -0400
committerAlex Williamson <alex.williamson@redhat.com>2018-06-02 10:41:44 -0400
commit89c29def6b0101fff66a3d74d0178b844f88d732 (patch)
tree8da69b007e6652aa4e5a9750b18a52b5e3b4e55c
parent0512e0134582ef85dee77d51aae77dcd1edec495 (diff)
Revert "vfio/type1: Improve memory pinning process for raw PFN mapping"
Bisection by Amadeusz Sławiński implicates this commit leading to bad page state issues after VM shutdown, likely due to unbalanced page references. The original commit was intended only as a performance improvement, therefore revert for offline rework. Link: https://lkml.org/lkml/2018/6/2/97 Fixes: 356e88ebe447 ("vfio/type1: Improve memory pinning process for raw PFN mapping") Cc: Jason Cai (Xiang Feng) <jason.cai@linux.alibaba.com> Reported-by: Amadeusz Sławiński <amade@asmblr.net> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
-rw-r--r--drivers/vfio/vfio_iommu_type1.c25
1 files changed, 10 insertions, 15 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 5c212bf29640..3c082451ab1a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -404,6 +404,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
404{ 404{
405 unsigned long pfn = 0; 405 unsigned long pfn = 0;
406 long ret, pinned = 0, lock_acct = 0; 406 long ret, pinned = 0, lock_acct = 0;
407 bool rsvd;
407 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; 408 dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
408 409
409 /* This code path is only user initiated */ 410 /* This code path is only user initiated */
@@ -414,23 +415,14 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
414 if (ret) 415 if (ret)
415 return ret; 416 return ret;
416 417
417 if (is_invalid_reserved_pfn(*pfn_base)) {
418 struct vm_area_struct *vma;
419
420 down_read(&current->mm->mmap_sem);
421 vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
422 pinned = min_t(long, npage, vma_pages(vma));
423 up_read(&current->mm->mmap_sem);
424 return pinned;
425 }
426
427 pinned++; 418 pinned++;
419 rsvd = is_invalid_reserved_pfn(*pfn_base);
428 420
429 /* 421 /*
430 * Reserved pages aren't counted against the user, externally pinned 422 * Reserved pages aren't counted against the user, externally pinned
431 * pages are already counted against the user. 423 * pages are already counted against the user.
432 */ 424 */
433 if (!vfio_find_vpfn(dma, iova)) { 425 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
434 if (!lock_cap && current->mm->locked_vm + 1 > limit) { 426 if (!lock_cap && current->mm->locked_vm + 1 > limit) {
435 put_pfn(*pfn_base, dma->prot); 427 put_pfn(*pfn_base, dma->prot);
436 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, 428 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
@@ -450,12 +442,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
450 if (ret) 442 if (ret)
451 break; 443 break;
452 444
453 if (pfn != *pfn_base + pinned) { 445 if (pfn != *pfn_base + pinned ||
446 rsvd != is_invalid_reserved_pfn(pfn)) {
454 put_pfn(pfn, dma->prot); 447 put_pfn(pfn, dma->prot);
455 break; 448 break;
456 } 449 }
457 450
458 if (!vfio_find_vpfn(dma, iova)) { 451 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
459 if (!lock_cap && 452 if (!lock_cap &&
460 current->mm->locked_vm + lock_acct + 1 > limit) { 453 current->mm->locked_vm + lock_acct + 1 > limit) {
461 put_pfn(pfn, dma->prot); 454 put_pfn(pfn, dma->prot);
@@ -473,8 +466,10 @@ out:
473 466
474unpin_out: 467unpin_out:
475 if (ret) { 468 if (ret) {
476 for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 469 if (!rsvd) {
477 put_pfn(pfn, dma->prot); 470 for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
471 put_pfn(pfn, dma->prot);
472 }
478 473
479 return ret; 474 return ret;
480 } 475 }