diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 15:38:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 15:38:26 -0500 |
commit | 8e9a2dba8686187d8c8179e5b86640e653963889 (patch) | |
tree | a4ba543649219cbb28d91aab65b785d763f5d069 /fs/userfaultfd.c | |
parent | 6098850e7e6978f95a958f79a645a653228d0002 (diff) | |
parent | 450cbdd0125cfa5d7bbf9e2a6b6961cc48d29730 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar:
"The main changes in this cycle are:
- Another attempt at enabling cross-release lockdep dependency
tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time
with better performance and fewer false positives. (Byungchul Park)
- Introduce lockdep_assert_irqs_enabled()/disabled() and convert
open-coded equivalents to lockdep variants. (Frederic Weisbecker)
- Add down_read_killable() and use it in the VFS's iterate_dir()
method. (Kirill Tkhai)
- Convert remaining uses of ACCESS_ONCE() to
READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle
driven. (Mark Rutland, Paul E. McKenney)
- Get rid of lockless_dereference(), by strengthening Alpha atomics,
strengthening READ_ONCE() with smp_read_barrier_depends() and thus
being able to convert users of lockless_dereference() to
READ_ONCE(). (Will Deacon)
- Various micro-optimizations:
- better PV qspinlocks (Waiman Long),
- better x86 barriers (Michael S. Tsirkin)
- better x86 refcounts (Kees Cook)
- ... plus other fixes and enhancements. (Borislav Petkov, Juergen
Gross, Miguel Bernal Marin)"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE
rcu: Use lockdep to assert IRQs are disabled/enabled
netpoll: Use lockdep to assert IRQs are disabled/enabled
timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled
sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled
irq_work: Use lockdep to assert IRQs are disabled/enabled
irq/timings: Use lockdep to assert IRQs are disabled/enabled
perf/core: Use lockdep to assert IRQs are disabled/enabled
x86: Use lockdep to assert IRQs are disabled/enabled
smp/core: Use lockdep to assert IRQs are disabled/enabled
timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled
timers/nohz: Use lockdep to assert IRQs are disabled/enabled
workqueue: Use lockdep to assert IRQs are disabled/enabled
irq/softirqs: Use lockdep to assert IRQs are disabled/enabled
locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled()
locking/pvqspinlock: Implement hybrid PV queued/unfair locks
locking/rwlocks: Fix comments
x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized
block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion()
workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes
...
Diffstat (limited to 'fs/userfaultfd.c')
-rw-r--r-- | fs/userfaultfd.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1c713fd5b3e6..f46d133c0949 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -381,7 +381,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
381 | * in __get_user_pages if userfaultfd_release waits on the | 381 | * in __get_user_pages if userfaultfd_release waits on the |
382 | * caller of handle_userfault to release the mmap_sem. | 382 | * caller of handle_userfault to release the mmap_sem. |
383 | */ | 383 | */ |
384 | if (unlikely(ACCESS_ONCE(ctx->released))) { | 384 | if (unlikely(READ_ONCE(ctx->released))) { |
385 | /* | 385 | /* |
386 | * Don't return VM_FAULT_SIGBUS in this case, so a non | 386 | * Don't return VM_FAULT_SIGBUS in this case, so a non |
387 | * cooperative manager can close the uffd after the | 387 | * cooperative manager can close the uffd after the |
@@ -477,7 +477,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
477 | vmf->flags, reason); | 477 | vmf->flags, reason); |
478 | up_read(&mm->mmap_sem); | 478 | up_read(&mm->mmap_sem); |
479 | 479 | ||
480 | if (likely(must_wait && !ACCESS_ONCE(ctx->released) && | 480 | if (likely(must_wait && !READ_ONCE(ctx->released) && |
481 | (return_to_userland ? !signal_pending(current) : | 481 | (return_to_userland ? !signal_pending(current) : |
482 | !fatal_signal_pending(current)))) { | 482 | !fatal_signal_pending(current)))) { |
483 | wake_up_poll(&ctx->fd_wqh, POLLIN); | 483 | wake_up_poll(&ctx->fd_wqh, POLLIN); |
@@ -586,7 +586,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
586 | set_current_state(TASK_KILLABLE); | 586 | set_current_state(TASK_KILLABLE); |
587 | if (ewq->msg.event == 0) | 587 | if (ewq->msg.event == 0) |
588 | break; | 588 | break; |
589 | if (ACCESS_ONCE(ctx->released) || | 589 | if (READ_ONCE(ctx->released) || |
590 | fatal_signal_pending(current)) { | 590 | fatal_signal_pending(current)) { |
591 | /* | 591 | /* |
592 | * &ewq->wq may be queued in fork_event, but | 592 | * &ewq->wq may be queued in fork_event, but |
@@ -833,7 +833,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) | |||
833 | struct userfaultfd_wake_range range = { .len = 0, }; | 833 | struct userfaultfd_wake_range range = { .len = 0, }; |
834 | unsigned long new_flags; | 834 | unsigned long new_flags; |
835 | 835 | ||
836 | ACCESS_ONCE(ctx->released) = true; | 836 | WRITE_ONCE(ctx->released, true); |
837 | 837 | ||
838 | if (!mmget_not_zero(mm)) | 838 | if (!mmget_not_zero(mm)) |
839 | goto wakeup; | 839 | goto wakeup; |