aboutsummaryrefslogtreecommitdiffstats
path: root/fs/userfaultfd.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/userfaultfd.c')
-rw-r--r--fs/userfaultfd.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 2d97952e341a..85959d8324df 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -257,10 +257,9 @@ out:
257 * fatal_signal_pending()s, and the mmap_sem must be released before 257 * fatal_signal_pending()s, and the mmap_sem must be released before
258 * returning it. 258 * returning it.
259 */ 259 */
260int handle_userfault(struct vm_area_struct *vma, unsigned long address, 260int handle_userfault(struct fault_env *fe, unsigned long reason)
261 unsigned int flags, unsigned long reason)
262{ 261{
263 struct mm_struct *mm = vma->vm_mm; 262 struct mm_struct *mm = fe->vma->vm_mm;
264 struct userfaultfd_ctx *ctx; 263 struct userfaultfd_ctx *ctx;
265 struct userfaultfd_wait_queue uwq; 264 struct userfaultfd_wait_queue uwq;
266 int ret; 265 int ret;
@@ -269,7 +268,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
269 BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 268 BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
270 269
271 ret = VM_FAULT_SIGBUS; 270 ret = VM_FAULT_SIGBUS;
272 ctx = vma->vm_userfaultfd_ctx.ctx; 271 ctx = fe->vma->vm_userfaultfd_ctx.ctx;
273 if (!ctx) 272 if (!ctx)
274 goto out; 273 goto out;
275 274
@@ -302,17 +301,17 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
302 * without first stopping userland access to the memory. For 301 * without first stopping userland access to the memory. For
303 * VM_UFFD_MISSING userfaults this is enough for now. 302 * VM_UFFD_MISSING userfaults this is enough for now.
304 */ 303 */
305 if (unlikely(!(flags & FAULT_FLAG_ALLOW_RETRY))) { 304 if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) {
306 /* 305 /*
307 * Validate the invariant that nowait must allow retry 306 * Validate the invariant that nowait must allow retry
308 * to be sure not to return SIGBUS erroneously on 307 * to be sure not to return SIGBUS erroneously on
309 * nowait invocations. 308 * nowait invocations.
310 */ 309 */
311 BUG_ON(flags & FAULT_FLAG_RETRY_NOWAIT); 310 BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT);
312#ifdef CONFIG_DEBUG_VM 311#ifdef CONFIG_DEBUG_VM
313 if (printk_ratelimit()) { 312 if (printk_ratelimit()) {
314 printk(KERN_WARNING 313 printk(KERN_WARNING
315 "FAULT_FLAG_ALLOW_RETRY missing %x\n", flags); 314 "FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags);
316 dump_stack(); 315 dump_stack();
317 } 316 }
318#endif 317#endif
@@ -324,7 +323,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
324 * and wait. 323 * and wait.
325 */ 324 */
326 ret = VM_FAULT_RETRY; 325 ret = VM_FAULT_RETRY;
327 if (flags & FAULT_FLAG_RETRY_NOWAIT) 326 if (fe->flags & FAULT_FLAG_RETRY_NOWAIT)
328 goto out; 327 goto out;
329 328
330 /* take the reference before dropping the mmap_sem */ 329 /* take the reference before dropping the mmap_sem */
@@ -332,10 +331,11 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
332 331
333 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); 332 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
334 uwq.wq.private = current; 333 uwq.wq.private = current;
335 uwq.msg = userfault_msg(address, flags, reason); 334 uwq.msg = userfault_msg(fe->address, fe->flags, reason);
336 uwq.ctx = ctx; 335 uwq.ctx = ctx;
337 336
338 return_to_userland = (flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == 337 return_to_userland =
338 (fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
339 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); 339 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
340 340
341 spin_lock(&ctx->fault_pending_wqh.lock); 341 spin_lock(&ctx->fault_pending_wqh.lock);
@@ -353,7 +353,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
353 TASK_KILLABLE); 353 TASK_KILLABLE);
354 spin_unlock(&ctx->fault_pending_wqh.lock); 354 spin_unlock(&ctx->fault_pending_wqh.lock);
355 355
356 must_wait = userfaultfd_must_wait(ctx, address, flags, reason); 356 must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
357 up_read(&mm->mmap_sem); 357 up_read(&mm->mmap_sem);
358 358
359 if (likely(must_wait && !ACCESS_ONCE(ctx->released) && 359 if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&