aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2017-04-13 16:10:15 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-05-20 08:28:38 -0400
commit9f43f70dcc56ac294aeb8c0b29766a7a1cb1aec0 (patch)
tree7102213a0eada53a918cd2d46efe56bbb2b7e5bb
parent1773131ec4b945a286b1cdb692bb632958408b3b (diff)
vfio/type1: Remove locked page accounting workqueue
commit 0cfef2b7410b64d7a430947e0b533314c4f97153 upstream. If the mmap_sem is contented then the vfio type1 IOMMU backend will defer locked page accounting updates to a workqueue task. This has a few problems and depending on which side the user tries to play, they might be over-penalized for unmaps that haven't yet been accounted or race the workqueue to enter more mappings than they're allowed. The original intent of this workqueue mechanism seems to be focused on reducing latency through the ioctl, but we cannot do so at the cost of correctness. Remove this workqueue mechanism and update the callers to allow for failure. We can also now recheck the limit under write lock to make sure we don't exceed it. vfio_pin_pages_remote() also now necessarily includes an unwind path which we can jump to directly if the consecutive page pinning finds that we're exceeding the user's memory limits. This avoids the current lazy approach which does accounting and mapping up to the fault, only to return an error on the next iteration to unwind the entire vfio_dma. Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/vfio/vfio_iommu_type1.c102
1 files changed, 44 insertions, 58 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ba19424e4a1..1d48e62f4f52 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -130,57 +130,36 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
130 rb_erase(&old->node, &iommu->dma_list); 130 rb_erase(&old->node, &iommu->dma_list);
131} 131}
132 132
133struct vwork { 133static int vfio_lock_acct(long npage, bool *lock_cap)
134 struct mm_struct *mm;
135 long npage;
136 struct work_struct work;
137};
138
139/* delayed decrement/increment for locked_vm */
140static void vfio_lock_acct_bg(struct work_struct *work)
141{ 134{
142 struct vwork *vwork = container_of(work, struct vwork, work); 135 int ret;
143 struct mm_struct *mm;
144
145 mm = vwork->mm;
146 down_write(&mm->mmap_sem);
147 mm->locked_vm += vwork->npage;
148 up_write(&mm->mmap_sem);
149 mmput(mm);
150 kfree(vwork);
151}
152 136
153static void vfio_lock_acct(long npage) 137 if (!npage)
154{ 138 return 0;
155 struct vwork *vwork;
156 struct mm_struct *mm;
157 139
158 if (!current->mm || !npage) 140 if (!current->mm)
159 return; /* process exited or nothing to do */ 141 return -ESRCH; /* process exited */
142
143 ret = down_write_killable(&current->mm->mmap_sem);
144 if (!ret) {
145 if (npage > 0) {
146 if (lock_cap ? !*lock_cap : !capable(CAP_IPC_LOCK)) {
147 unsigned long limit;
148
149 limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
150
151 if (current->mm->locked_vm + npage > limit)
152 ret = -ENOMEM;
153 }
154 }
155
156 if (!ret)
157 current->mm->locked_vm += npage;
160 158
161 if (down_write_trylock(&current->mm->mmap_sem)) {
162 current->mm->locked_vm += npage;
163 up_write(&current->mm->mmap_sem); 159 up_write(&current->mm->mmap_sem);
164 return;
165 } 160 }
166 161
167 /* 162 return ret;
168 * Couldn't get mmap_sem lock, so must setup to update
169 * mm->locked_vm later. If locked_vm were atomic, we
170 * wouldn't need this silliness
171 */
172 vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
173 if (!vwork)
174 return;
175 mm = get_task_mm(current);
176 if (!mm) {
177 kfree(vwork);
178 return;
179 }
180 INIT_WORK(&vwork->work, vfio_lock_acct_bg);
181 vwork->mm = mm;
182 vwork->npage = npage;
183 schedule_work(&vwork->work);
184} 163}
185 164
186/* 165/*
@@ -262,9 +241,9 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
262static long vfio_pin_pages(unsigned long vaddr, long npage, 241static long vfio_pin_pages(unsigned long vaddr, long npage,
263 int prot, unsigned long *pfn_base) 242 int prot, unsigned long *pfn_base)
264{ 243{
265 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 244 unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
266 bool lock_cap = capable(CAP_IPC_LOCK); 245 bool lock_cap = capable(CAP_IPC_LOCK);
267 long ret, i; 246 long ret, i = 1;
268 bool rsvd; 247 bool rsvd;
269 248
270 if (!current->mm) 249 if (!current->mm)
@@ -283,16 +262,11 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
283 return -ENOMEM; 262 return -ENOMEM;
284 } 263 }
285 264
286 if (unlikely(disable_hugepages)) { 265 if (unlikely(disable_hugepages))
287 if (!rsvd) 266 goto out;
288 vfio_lock_acct(1);
289 return 1;
290 }
291 267
292 /* Lock all the consecutive pages from pfn_base */ 268 /* Lock all the consecutive pages from pfn_base */
293 for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) { 269 for (vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
294 unsigned long pfn = 0;
295
296 ret = vaddr_get_pfn(vaddr, prot, &pfn); 270 ret = vaddr_get_pfn(vaddr, prot, &pfn);
297 if (ret) 271 if (ret)
298 break; 272 break;
@@ -308,12 +282,24 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
308 put_pfn(pfn, prot); 282 put_pfn(pfn, prot);
309 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", 283 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
310 __func__, limit << PAGE_SHIFT); 284 __func__, limit << PAGE_SHIFT);
311 break; 285 ret = -ENOMEM;
286 goto unpin_out;
312 } 287 }
313 } 288 }
314 289
290out:
315 if (!rsvd) 291 if (!rsvd)
316 vfio_lock_acct(i); 292 ret = vfio_lock_acct(i, &lock_cap);
293
294unpin_out:
295 if (ret) {
296 if (!rsvd) {
297 for (pfn = *pfn_base ; i ; pfn++, i--)
298 put_pfn(pfn, prot);
299 }
300
301 return ret;
302 }
317 303
318 return i; 304 return i;
319} 305}
@@ -328,7 +314,7 @@ static long vfio_unpin_pages(unsigned long pfn, long npage,
328 unlocked += put_pfn(pfn++, prot); 314 unlocked += put_pfn(pfn++, prot);
329 315
330 if (do_accounting) 316 if (do_accounting)
331 vfio_lock_acct(-unlocked); 317 vfio_lock_acct(-unlocked, NULL);
332 318
333 return unlocked; 319 return unlocked;
334} 320}
@@ -390,7 +376,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
390 cond_resched(); 376 cond_resched();
391 } 377 }
392 378
393 vfio_lock_acct(-unlocked); 379 vfio_lock_acct(-unlocked, NULL);
394} 380}
395 381
396static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) 382static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)