diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/aio.c | 2 | ||||
-rw-r--r-- | fs/buffer.c | 3 | ||||
-rw-r--r-- | fs/crypto/keyinfo.c | 2 | ||||
-rw-r--r-- | fs/direct-io.c | 2 | ||||
-rw-r--r-- | fs/exec.c | 2 | ||||
-rw-r--r-- | fs/fcntl.c | 2 | ||||
-rw-r--r-- | fs/fs_pin.c | 4 | ||||
-rw-r--r-- | fs/fuse/dev.c | 2 | ||||
-rw-r--r-- | fs/inode.c | 2 | ||||
-rw-r--r-- | fs/namei.c | 4 | ||||
-rw-r--r-- | fs/namespace.c | 2 | ||||
-rw-r--r-- | fs/nfs/dir.c | 8 | ||||
-rw-r--r-- | fs/proc/array.c | 2 | ||||
-rw-r--r-- | fs/proc_namespace.c | 2 | ||||
-rw-r--r-- | fs/splice.c | 2 | ||||
-rw-r--r-- | fs/userfaultfd.c | 8 | ||||
-rw-r--r-- | fs/xfs/xfs_log_priv.h | 4 |
17 files changed, 27 insertions, 26 deletions
@@ -576,7 +576,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) | |||
576 | * actually has a cancel function, hence the cmpxchg() | 576 | * actually has a cancel function, hence the cmpxchg() |
577 | */ | 577 | */ |
578 | 578 | ||
579 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | 579 | cancel = READ_ONCE(kiocb->ki_cancel); |
580 | do { | 580 | do { |
581 | if (!cancel || cancel == KIOCB_CANCELLED) | 581 | if (!cancel || cancel == KIOCB_CANCELLED) |
582 | return -EINVAL; | 582 | return -EINVAL; |
diff --git a/fs/buffer.c b/fs/buffer.c index 170df856bdb9..32ce01f0f95f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1692,7 +1692,8 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode * | |||
1692 | BUG_ON(!PageLocked(page)); | 1692 | BUG_ON(!PageLocked(page)); |
1693 | 1693 | ||
1694 | if (!page_has_buffers(page)) | 1694 | if (!page_has_buffers(page)) |
1695 | create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); | 1695 | create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits), |
1696 | b_state); | ||
1696 | return page_buffers(page); | 1697 | return page_buffers(page); |
1697 | } | 1698 | } |
1698 | 1699 | ||
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 8e704d12a1cf..0083bd4fcaa5 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
@@ -373,7 +373,7 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) | |||
373 | struct fscrypt_info *prev; | 373 | struct fscrypt_info *prev; |
374 | 374 | ||
375 | if (ci == NULL) | 375 | if (ci == NULL) |
376 | ci = ACCESS_ONCE(inode->i_crypt_info); | 376 | ci = READ_ONCE(inode->i_crypt_info); |
377 | if (ci == NULL) | 377 | if (ci == NULL) |
378 | return; | 378 | return; |
379 | 379 | ||
diff --git a/fs/direct-io.c b/fs/direct-io.c index b53e66d9abd7..98fe1325da9d 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1152,7 +1152,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1152 | get_block_t get_block, dio_iodone_t end_io, | 1152 | get_block_t get_block, dio_iodone_t end_io, |
1153 | dio_submit_t submit_io, int flags) | 1153 | dio_submit_t submit_io, int flags) |
1154 | { | 1154 | { |
1155 | unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); | 1155 | unsigned i_blkbits = READ_ONCE(inode->i_blkbits); |
1156 | unsigned blkbits = i_blkbits; | 1156 | unsigned blkbits = i_blkbits; |
1157 | unsigned blocksize_mask = (1 << blkbits) - 1; | 1157 | unsigned blocksize_mask = (1 << blkbits) - 1; |
1158 | ssize_t retval = -EINVAL; | 1158 | ssize_t retval = -EINVAL; |
@@ -1911,7 +1911,7 @@ void set_dumpable(struct mm_struct *mm, int value) | |||
1911 | return; | 1911 | return; |
1912 | 1912 | ||
1913 | do { | 1913 | do { |
1914 | old = ACCESS_ONCE(mm->flags); | 1914 | old = READ_ONCE(mm->flags); |
1915 | new = (old & ~MMF_DUMPABLE_MASK) | value; | 1915 | new = (old & ~MMF_DUMPABLE_MASK) | value; |
1916 | } while (cmpxchg(&mm->flags, old, new) != old); | 1916 | } while (cmpxchg(&mm->flags, old, new) != old); |
1917 | } | 1917 | } |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 448a1119f0be..57bf2964bb83 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -724,7 +724,7 @@ static void send_sigio_to_task(struct task_struct *p, | |||
724 | * F_SETSIG can change ->signum lockless in parallel, make | 724 | * F_SETSIG can change ->signum lockless in parallel, make |
725 | * sure we read it once and use the same value throughout. | 725 | * sure we read it once and use the same value throughout. |
726 | */ | 726 | */ |
727 | int signum = ACCESS_ONCE(fown->signum); | 727 | int signum = READ_ONCE(fown->signum); |
728 | 728 | ||
729 | if (!sigio_perm(p, fown, signum)) | 729 | if (!sigio_perm(p, fown, signum)) |
730 | return; | 730 | return; |
diff --git a/fs/fs_pin.c b/fs/fs_pin.c index e747b3d720ee..2d07f292b625 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c | |||
@@ -78,7 +78,7 @@ void mnt_pin_kill(struct mount *m) | |||
78 | while (1) { | 78 | while (1) { |
79 | struct hlist_node *p; | 79 | struct hlist_node *p; |
80 | rcu_read_lock(); | 80 | rcu_read_lock(); |
81 | p = ACCESS_ONCE(m->mnt_pins.first); | 81 | p = READ_ONCE(m->mnt_pins.first); |
82 | if (!p) { | 82 | if (!p) { |
83 | rcu_read_unlock(); | 83 | rcu_read_unlock(); |
84 | break; | 84 | break; |
@@ -92,7 +92,7 @@ void group_pin_kill(struct hlist_head *p) | |||
92 | while (1) { | 92 | while (1) { |
93 | struct hlist_node *q; | 93 | struct hlist_node *q; |
94 | rcu_read_lock(); | 94 | rcu_read_lock(); |
95 | q = ACCESS_ONCE(p->first); | 95 | q = READ_ONCE(p->first); |
96 | if (!q) { | 96 | if (!q) { |
97 | rcu_read_unlock(); | 97 | rcu_read_unlock(); |
98 | break; | 98 | break; |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 13c65dd2d37d..a42d89371748 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -33,7 +33,7 @@ static struct fuse_dev *fuse_get_dev(struct file *file) | |||
33 | * Lockless access is OK, because file->private data is set | 33 | * Lockless access is OK, because file->private data is set |
34 | * once during mount and is valid until the file is released. | 34 | * once during mount and is valid until the file is released. |
35 | */ | 35 | */ |
36 | return ACCESS_ONCE(file->private_data); | 36 | return READ_ONCE(file->private_data); |
37 | } | 37 | } |
38 | 38 | ||
39 | static void fuse_request_init(struct fuse_req *req, struct page **pages, | 39 | static void fuse_request_init(struct fuse_req *req, struct page **pages, |
diff --git a/fs/inode.c b/fs/inode.c index d1e35b53bb23..fd401028a309 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -2090,7 +2090,7 @@ void inode_set_flags(struct inode *inode, unsigned int flags, | |||
2090 | 2090 | ||
2091 | WARN_ON_ONCE(flags & ~mask); | 2091 | WARN_ON_ONCE(flags & ~mask); |
2092 | do { | 2092 | do { |
2093 | old_flags = ACCESS_ONCE(inode->i_flags); | 2093 | old_flags = READ_ONCE(inode->i_flags); |
2094 | new_flags = (old_flags & ~mask) | flags; | 2094 | new_flags = (old_flags & ~mask) | flags; |
2095 | } while (unlikely(cmpxchg(&inode->i_flags, old_flags, | 2095 | } while (unlikely(cmpxchg(&inode->i_flags, old_flags, |
2096 | new_flags) != old_flags)); | 2096 | new_flags) != old_flags)); |
diff --git a/fs/namei.c b/fs/namei.c index c75ea03ca147..40a0f34bf990 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1209,7 +1209,7 @@ static int follow_managed(struct path *path, struct nameidata *nd) | |||
1209 | /* Given that we're not holding a lock here, we retain the value in a | 1209 | /* Given that we're not holding a lock here, we retain the value in a |
1210 | * local variable for each dentry as we look at it so that we don't see | 1210 | * local variable for each dentry as we look at it so that we don't see |
1211 | * the components of that value change under us */ | 1211 | * the components of that value change under us */ |
1212 | while (managed = ACCESS_ONCE(path->dentry->d_flags), | 1212 | while (managed = READ_ONCE(path->dentry->d_flags), |
1213 | managed &= DCACHE_MANAGED_DENTRY, | 1213 | managed &= DCACHE_MANAGED_DENTRY, |
1214 | unlikely(managed != 0)) { | 1214 | unlikely(managed != 0)) { |
1215 | /* Allow the filesystem to manage the transit without i_mutex | 1215 | /* Allow the filesystem to manage the transit without i_mutex |
@@ -1394,7 +1394,7 @@ int follow_down(struct path *path) | |||
1394 | unsigned managed; | 1394 | unsigned managed; |
1395 | int ret; | 1395 | int ret; |
1396 | 1396 | ||
1397 | while (managed = ACCESS_ONCE(path->dentry->d_flags), | 1397 | while (managed = READ_ONCE(path->dentry->d_flags), |
1398 | unlikely(managed & DCACHE_MANAGED_DENTRY)) { | 1398 | unlikely(managed & DCACHE_MANAGED_DENTRY)) { |
1399 | /* Allow the filesystem to manage the transit without i_mutex | 1399 | /* Allow the filesystem to manage the transit without i_mutex |
1400 | * being held. | 1400 | * being held. |
diff --git a/fs/namespace.c b/fs/namespace.c index d18deb4c410b..e158ec6b527b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -353,7 +353,7 @@ int __mnt_want_write(struct vfsmount *m) | |||
353 | * incremented count after it has set MNT_WRITE_HOLD. | 353 | * incremented count after it has set MNT_WRITE_HOLD. |
354 | */ | 354 | */ |
355 | smp_mb(); | 355 | smp_mb(); |
356 | while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) | 356 | while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) |
357 | cpu_relax(); | 357 | cpu_relax(); |
358 | /* | 358 | /* |
359 | * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will | 359 | * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5ceaeb1f6fb6..f439f1c45008 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1081,7 +1081,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1081 | int error; | 1081 | int error; |
1082 | 1082 | ||
1083 | if (flags & LOOKUP_RCU) { | 1083 | if (flags & LOOKUP_RCU) { |
1084 | parent = ACCESS_ONCE(dentry->d_parent); | 1084 | parent = READ_ONCE(dentry->d_parent); |
1085 | dir = d_inode_rcu(parent); | 1085 | dir = d_inode_rcu(parent); |
1086 | if (!dir) | 1086 | if (!dir) |
1087 | return -ECHILD; | 1087 | return -ECHILD; |
@@ -1168,7 +1168,7 @@ out_set_verifier: | |||
1168 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); | 1168 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); |
1169 | out_valid: | 1169 | out_valid: |
1170 | if (flags & LOOKUP_RCU) { | 1170 | if (flags & LOOKUP_RCU) { |
1171 | if (parent != ACCESS_ONCE(dentry->d_parent)) | 1171 | if (parent != READ_ONCE(dentry->d_parent)) |
1172 | return -ECHILD; | 1172 | return -ECHILD; |
1173 | } else | 1173 | } else |
1174 | dput(parent); | 1174 | dput(parent); |
@@ -1582,7 +1582,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1582 | struct inode *dir; | 1582 | struct inode *dir; |
1583 | 1583 | ||
1584 | if (flags & LOOKUP_RCU) { | 1584 | if (flags & LOOKUP_RCU) { |
1585 | parent = ACCESS_ONCE(dentry->d_parent); | 1585 | parent = READ_ONCE(dentry->d_parent); |
1586 | dir = d_inode_rcu(parent); | 1586 | dir = d_inode_rcu(parent); |
1587 | if (!dir) | 1587 | if (!dir) |
1588 | return -ECHILD; | 1588 | return -ECHILD; |
@@ -1596,7 +1596,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1596 | ret = -ECHILD; | 1596 | ret = -ECHILD; |
1597 | if (!(flags & LOOKUP_RCU)) | 1597 | if (!(flags & LOOKUP_RCU)) |
1598 | dput(parent); | 1598 | dput(parent); |
1599 | else if (parent != ACCESS_ONCE(dentry->d_parent)) | 1599 | else if (parent != READ_ONCE(dentry->d_parent)) |
1600 | return -ECHILD; | 1600 | return -ECHILD; |
1601 | goto out; | 1601 | goto out; |
1602 | } | 1602 | } |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 77a8eacbe032..375e8bf0dd24 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -453,7 +453,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
453 | cutime = sig->cutime; | 453 | cutime = sig->cutime; |
454 | cstime = sig->cstime; | 454 | cstime = sig->cstime; |
455 | cgtime = sig->cgtime; | 455 | cgtime = sig->cgtime; |
456 | rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); | 456 | rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); |
457 | 457 | ||
458 | /* add up live thread stats at the group level */ | 458 | /* add up live thread stats at the group level */ |
459 | if (whole) { | 459 | if (whole) { |
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 99dff222fe67..03afd5150916 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c | |||
@@ -27,7 +27,7 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) | |||
27 | 27 | ||
28 | poll_wait(file, &p->ns->poll, wait); | 28 | poll_wait(file, &p->ns->poll, wait); |
29 | 29 | ||
30 | event = ACCESS_ONCE(ns->event); | 30 | event = READ_ONCE(ns->event); |
31 | if (m->poll_event != event) { | 31 | if (m->poll_event != event) { |
32 | m->poll_event = event; | 32 | m->poll_event = event; |
33 | res |= POLLERR | POLLPRI; | 33 | res |= POLLERR | POLLPRI; |
diff --git a/fs/splice.c b/fs/splice.c index f3084cce0ea6..39e2dc01ac12 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -253,7 +253,7 @@ EXPORT_SYMBOL(add_to_pipe); | |||
253 | */ | 253 | */ |
254 | int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) | 254 | int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) |
255 | { | 255 | { |
256 | unsigned int buffers = ACCESS_ONCE(pipe->buffers); | 256 | unsigned int buffers = READ_ONCE(pipe->buffers); |
257 | 257 | ||
258 | spd->nr_pages_max = buffers; | 258 | spd->nr_pages_max = buffers; |
259 | if (buffers <= PIPE_DEF_BUFFERS) | 259 | if (buffers <= PIPE_DEF_BUFFERS) |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1c713fd5b3e6..f46d133c0949 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -381,7 +381,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
381 | * in __get_user_pages if userfaultfd_release waits on the | 381 | * in __get_user_pages if userfaultfd_release waits on the |
382 | * caller of handle_userfault to release the mmap_sem. | 382 | * caller of handle_userfault to release the mmap_sem. |
383 | */ | 383 | */ |
384 | if (unlikely(ACCESS_ONCE(ctx->released))) { | 384 | if (unlikely(READ_ONCE(ctx->released))) { |
385 | /* | 385 | /* |
386 | * Don't return VM_FAULT_SIGBUS in this case, so a non | 386 | * Don't return VM_FAULT_SIGBUS in this case, so a non |
387 | * cooperative manager can close the uffd after the | 387 | * cooperative manager can close the uffd after the |
@@ -477,7 +477,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
477 | vmf->flags, reason); | 477 | vmf->flags, reason); |
478 | up_read(&mm->mmap_sem); | 478 | up_read(&mm->mmap_sem); |
479 | 479 | ||
480 | if (likely(must_wait && !ACCESS_ONCE(ctx->released) && | 480 | if (likely(must_wait && !READ_ONCE(ctx->released) && |
481 | (return_to_userland ? !signal_pending(current) : | 481 | (return_to_userland ? !signal_pending(current) : |
482 | !fatal_signal_pending(current)))) { | 482 | !fatal_signal_pending(current)))) { |
483 | wake_up_poll(&ctx->fd_wqh, POLLIN); | 483 | wake_up_poll(&ctx->fd_wqh, POLLIN); |
@@ -586,7 +586,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
586 | set_current_state(TASK_KILLABLE); | 586 | set_current_state(TASK_KILLABLE); |
587 | if (ewq->msg.event == 0) | 587 | if (ewq->msg.event == 0) |
588 | break; | 588 | break; |
589 | if (ACCESS_ONCE(ctx->released) || | 589 | if (READ_ONCE(ctx->released) || |
590 | fatal_signal_pending(current)) { | 590 | fatal_signal_pending(current)) { |
591 | /* | 591 | /* |
592 | * &ewq->wq may be queued in fork_event, but | 592 | * &ewq->wq may be queued in fork_event, but |
@@ -833,7 +833,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) | |||
833 | struct userfaultfd_wake_range range = { .len = 0, }; | 833 | struct userfaultfd_wake_range range = { .len = 0, }; |
834 | unsigned long new_flags; | 834 | unsigned long new_flags; |
835 | 835 | ||
836 | ACCESS_ONCE(ctx->released) = true; | 836 | WRITE_ONCE(ctx->released, true); |
837 | 837 | ||
838 | if (!mmget_not_zero(mm)) | 838 | if (!mmget_not_zero(mm)) |
839 | goto wakeup; | 839 | goto wakeup; |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 51bf7b827387..129975970d99 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -592,9 +592,9 @@ xlog_valid_lsn( | |||
592 | * a transiently forward state. Instead, we can see the LSN in a | 592 | * a transiently forward state. Instead, we can see the LSN in a |
593 | * transiently behind state if we happen to race with a cycle wrap. | 593 | * transiently behind state if we happen to race with a cycle wrap. |
594 | */ | 594 | */ |
595 | cur_cycle = ACCESS_ONCE(log->l_curr_cycle); | 595 | cur_cycle = READ_ONCE(log->l_curr_cycle); |
596 | smp_rmb(); | 596 | smp_rmb(); |
597 | cur_block = ACCESS_ONCE(log->l_curr_block); | 597 | cur_block = READ_ONCE(log->l_curr_block); |
598 | 598 | ||
599 | if ((CYCLE_LSN(lsn) > cur_cycle) || | 599 | if ((CYCLE_LSN(lsn) > cur_cycle) || |
600 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { | 600 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { |