summaryrefslogtreecommitdiffstats
path: root/fs/userfaultfd.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2017-10-23 17:07:29 -0400
committerIngo Molnar <mingo@kernel.org>2017-10-25 05:01:08 -0400
commit6aa7de059173a986114ac43b8f50b297a86f09a8 (patch)
tree77666afe795e022914ca26433d61686c694dc4fd /fs/userfaultfd.c
parentb03a0fe0c5e4b46dcd400d27395b124499554a71 (diff)
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: davem@davemloft.net Cc: linux-arch@vger.kernel.org Cc: mpe@ellerman.id.au Cc: shuah@kernel.org Cc: snitzer@redhat.com Cc: thor.thayer@linux.intel.com Cc: tj@kernel.org Cc: viro@zeniv.linux.org.uk Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'fs/userfaultfd.c')
-rw-r--r--fs/userfaultfd.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1c713fd5b3e6..f46d133c0949 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -381,7 +381,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
381 * in __get_user_pages if userfaultfd_release waits on the 381 * in __get_user_pages if userfaultfd_release waits on the
382 * caller of handle_userfault to release the mmap_sem. 382 * caller of handle_userfault to release the mmap_sem.
383 */ 383 */
384 if (unlikely(ACCESS_ONCE(ctx->released))) { 384 if (unlikely(READ_ONCE(ctx->released))) {
385 /* 385 /*
386 * Don't return VM_FAULT_SIGBUS in this case, so a non 386 * Don't return VM_FAULT_SIGBUS in this case, so a non
387 * cooperative manager can close the uffd after the 387 * cooperative manager can close the uffd after the
@@ -477,7 +477,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
477 vmf->flags, reason); 477 vmf->flags, reason);
478 up_read(&mm->mmap_sem); 478 up_read(&mm->mmap_sem);
479 479
480 if (likely(must_wait && !ACCESS_ONCE(ctx->released) && 480 if (likely(must_wait && !READ_ONCE(ctx->released) &&
481 (return_to_userland ? !signal_pending(current) : 481 (return_to_userland ? !signal_pending(current) :
482 !fatal_signal_pending(current)))) { 482 !fatal_signal_pending(current)))) {
483 wake_up_poll(&ctx->fd_wqh, POLLIN); 483 wake_up_poll(&ctx->fd_wqh, POLLIN);
@@ -586,7 +586,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
586 set_current_state(TASK_KILLABLE); 586 set_current_state(TASK_KILLABLE);
587 if (ewq->msg.event == 0) 587 if (ewq->msg.event == 0)
588 break; 588 break;
589 if (ACCESS_ONCE(ctx->released) || 589 if (READ_ONCE(ctx->released) ||
590 fatal_signal_pending(current)) { 590 fatal_signal_pending(current)) {
591 /* 591 /*
592 * &ewq->wq may be queued in fork_event, but 592 * &ewq->wq may be queued in fork_event, but
@@ -833,7 +833,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
833 struct userfaultfd_wake_range range = { .len = 0, }; 833 struct userfaultfd_wake_range range = { .len = 0, };
834 unsigned long new_flags; 834 unsigned long new_flags;
835 835
836 ACCESS_ONCE(ctx->released) = true; 836 WRITE_ONCE(ctx->released, true);
837 837
838 if (!mmget_not_zero(mm)) 838 if (!mmget_not_zero(mm))
839 goto wakeup; 839 goto wakeup;