diff options
author | Mark Rutland <mark.rutland@arm.com> | 2017-10-23 17:07:29 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-10-25 05:01:08 -0400 |
commit | 6aa7de059173a986114ac43b8f50b297a86f09a8 (patch) | |
tree | 77666afe795e022914ca26433d61686c694dc4fd | |
parent | b03a0fe0c5e4b46dcd400d27395b124499554a71 (diff) |
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
180 files changed, 383 insertions, 385 deletions
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index f46267153ec2..94cabe73664b 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -245,7 +245,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) | |||
245 | * and read back old value | 245 | * and read back old value |
246 | */ | 246 | */ |
247 | do { | 247 | do { |
248 | new = old = ACCESS_ONCE(*ipi_data_ptr); | 248 | new = old = READ_ONCE(*ipi_data_ptr); |
249 | new |= 1U << msg; | 249 | new |= 1U << msg; |
250 | } while (cmpxchg(ipi_data_ptr, old, new) != old); | 250 | } while (cmpxchg(ipi_data_ptr, old, new) != old); |
251 | 251 | ||
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index daa87212c9a1..77f50ae0aeb4 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -71,7 +71,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
71 | 71 | ||
72 | while (lockval.tickets.next != lockval.tickets.owner) { | 72 | while (lockval.tickets.next != lockval.tickets.owner) { |
73 | wfe(); | 73 | wfe(); |
74 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); | 74 | lockval.tickets.owner = READ_ONCE(lock->tickets.owner); |
75 | } | 75 | } |
76 | 76 | ||
77 | smp_mb(); | 77 | smp_mb(); |
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c index 76e4c83cd5c8..3f24addd7972 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c | |||
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, | |||
179 | bool entered_lp2 = false; | 179 | bool entered_lp2 = false; |
180 | 180 | ||
181 | if (tegra_pending_sgi()) | 181 | if (tegra_pending_sgi()) |
182 | ACCESS_ONCE(abort_flag) = true; | 182 | WRITE_ONCE(abort_flag, true); |
183 | 183 | ||
184 | cpuidle_coupled_parallel_barrier(dev, &abort_barrier); | 184 | cpuidle_coupled_parallel_barrier(dev, &abort_barrier); |
185 | 185 | ||
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c index 79214d5ff097..a9dd619c6c29 100644 --- a/arch/arm/vdso/vgettimeofday.c +++ b/arch/arm/vdso/vgettimeofday.c | |||
@@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata) | |||
35 | { | 35 | { |
36 | u32 seq; | 36 | u32 seq; |
37 | repeat: | 37 | repeat: |
38 | seq = ACCESS_ONCE(vdata->seq_count); | 38 | seq = READ_ONCE(vdata->seq_count); |
39 | if (seq & 1) { | 39 | if (seq & 1) { |
40 | cpu_relax(); | 40 | cpu_relax(); |
41 | goto repeat; | 41 | goto repeat; |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 35b31884863b..e98775be112d 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -61,7 +61,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) | |||
61 | 61 | ||
62 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) | 62 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
63 | { | 63 | { |
64 | int tmp = ACCESS_ONCE(lock->lock); | 64 | int tmp = READ_ONCE(lock->lock); |
65 | 65 | ||
66 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) | 66 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) |
67 | return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; | 67 | return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; |
@@ -73,19 +73,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) | |||
73 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; | 73 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
74 | 74 | ||
75 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); | 75 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); |
76 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | 76 | WRITE_ONCE(*p, (tmp + 2) & ~1); |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) | 79 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
80 | { | 80 | { |
81 | long tmp = ACCESS_ONCE(lock->lock); | 81 | long tmp = READ_ONCE(lock->lock); |
82 | 82 | ||
83 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); | 83 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) | 86 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
87 | { | 87 | { |
88 | long tmp = ACCESS_ONCE(lock->lock); | 88 | long tmp = READ_ONCE(lock->lock); |
89 | 89 | ||
90 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; | 90 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
91 | } | 91 | } |
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h index b7cd6cf77b83..91bf0c2c265c 100644 --- a/arch/mips/include/asm/vdso.h +++ b/arch/mips/include/asm/vdso.h | |||
@@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data) | |||
99 | u32 seq; | 99 | u32 seq; |
100 | 100 | ||
101 | while (true) { | 101 | while (true) { |
102 | seq = ACCESS_ONCE(data->seq_count); | 102 | seq = READ_ONCE(data->seq_count); |
103 | if (likely(!(seq & 1))) { | 103 | if (likely(!(seq & 1))) { |
104 | /* Paired with smp_wmb() in vdso_data_write_*(). */ | 104 | /* Paired with smp_wmb() in vdso_data_write_*(). */ |
105 | smp_rmb(); | 105 | smp_rmb(); |
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 4655017f2377..1d2996cd58da 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c | |||
@@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state) | |||
166 | nc_core_ready_count = nc_addr; | 166 | nc_core_ready_count = nc_addr; |
167 | 167 | ||
168 | /* Ensure ready_count is zero-initialised before the assembly runs */ | 168 | /* Ensure ready_count is zero-initialised before the assembly runs */ |
169 | ACCESS_ONCE(*nc_core_ready_count) = 0; | 169 | WRITE_ONCE(*nc_core_ready_count, 0); |
170 | coupled_barrier(&per_cpu(pm_barrier, core), online); | 170 | coupled_barrier(&per_cpu(pm_barrier, core), online); |
171 | 171 | ||
172 | /* Run the generated entry code */ | 172 | /* Run the generated entry code */ |
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c index 7ecf69879e2d..d7ef1232a82a 100644 --- a/arch/mn10300/kernel/mn10300-serial.c +++ b/arch/mn10300/kernel/mn10300-serial.c | |||
@@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port) | |||
543 | 543 | ||
544 | try_again: | 544 | try_again: |
545 | /* pull chars out of the hat */ | 545 | /* pull chars out of the hat */ |
546 | ix = ACCESS_ONCE(port->rx_outp); | 546 | ix = READ_ONCE(port->rx_outp); |
547 | if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) { | 547 | if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) { |
548 | if (push && !tport->low_latency) | 548 | if (push && !tport->low_latency) |
549 | tty_flip_buffer_push(tport); | 549 | tty_flip_buffer_push(tport); |
@@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port) | |||
1724 | if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) { | 1724 | if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) { |
1725 | do { | 1725 | do { |
1726 | /* pull chars out of the hat */ | 1726 | /* pull chars out of the hat */ |
1727 | ix = ACCESS_ONCE(port->rx_outp); | 1727 | ix = READ_ONCE(port->rx_outp); |
1728 | if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) | 1728 | if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) |
1729 | return NO_POLL_CHAR; | 1729 | return NO_POLL_CHAR; |
1730 | 1730 | ||
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 17b98a87e5e2..c57d4e8307f2 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -260,7 +260,7 @@ atomic64_set(atomic64_t *v, s64 i) | |||
260 | static __inline__ s64 | 260 | static __inline__ s64 |
261 | atomic64_read(const atomic64_t *v) | 261 | atomic64_read(const atomic64_t *v) |
262 | { | 262 | { |
263 | return ACCESS_ONCE((v)->counter); | 263 | return READ_ONCE((v)->counter); |
264 | } | 264 | } |
265 | 265 | ||
266 | #define atomic64_inc(v) (atomic64_add( 1,(v))) | 266 | #define atomic64_inc(v) (atomic64_add( 1,(v))) |
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index 7a9cde0cfbd1..acd3206dfae3 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c | |||
@@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) | |||
43 | if (!opal_memcons) | 43 | if (!opal_memcons) |
44 | return -ENODEV; | 44 | return -ENODEV; |
45 | 45 | ||
46 | out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos)); | 46 | out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos)); |
47 | 47 | ||
48 | /* Now we've read out_pos, put a barrier in before reading the new | 48 | /* Now we've read out_pos, put a barrier in before reading the new |
49 | * data it points to in conbuf. */ | 49 | * data it points to in conbuf. */ |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 9fa855f91e55..66f4160010ef 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -117,14 +117,14 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp); | |||
117 | 117 | ||
118 | static inline int arch_read_trylock_once(arch_rwlock_t *rw) | 118 | static inline int arch_read_trylock_once(arch_rwlock_t *rw) |
119 | { | 119 | { |
120 | int old = ACCESS_ONCE(rw->lock); | 120 | int old = READ_ONCE(rw->lock); |
121 | return likely(old >= 0 && | 121 | return likely(old >= 0 && |
122 | __atomic_cmpxchg_bool(&rw->lock, old, old + 1)); | 122 | __atomic_cmpxchg_bool(&rw->lock, old, old + 1)); |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline int arch_write_trylock_once(arch_rwlock_t *rw) | 125 | static inline int arch_write_trylock_once(arch_rwlock_t *rw) |
126 | { | 126 | { |
127 | int old = ACCESS_ONCE(rw->lock); | 127 | int old = READ_ONCE(rw->lock); |
128 | return likely(old == 0 && | 128 | return likely(old == 0 && |
129 | __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)); | 129 | __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)); |
130 | } | 130 | } |
@@ -211,7 +211,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
211 | int old; | 211 | int old; |
212 | 212 | ||
213 | do { | 213 | do { |
214 | old = ACCESS_ONCE(rw->lock); | 214 | old = READ_ONCE(rw->lock); |
215 | } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1)); | 215 | } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1)); |
216 | } | 216 | } |
217 | 217 | ||
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index b12663d653d8..34e30b9ea234 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -162,8 +162,8 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) | |||
162 | smp_yield_cpu(~owner); | 162 | smp_yield_cpu(~owner); |
163 | count = spin_retry; | 163 | count = spin_retry; |
164 | } | 164 | } |
165 | old = ACCESS_ONCE(rw->lock); | 165 | old = READ_ONCE(rw->lock); |
166 | owner = ACCESS_ONCE(rw->owner); | 166 | owner = READ_ONCE(rw->owner); |
167 | if (old < 0) | 167 | if (old < 0) |
168 | continue; | 168 | continue; |
169 | if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) | 169 | if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) |
@@ -178,7 +178,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) | |||
178 | int old; | 178 | int old; |
179 | 179 | ||
180 | while (count-- > 0) { | 180 | while (count-- > 0) { |
181 | old = ACCESS_ONCE(rw->lock); | 181 | old = READ_ONCE(rw->lock); |
182 | if (old < 0) | 182 | if (old < 0) |
183 | continue; | 183 | continue; |
184 | if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) | 184 | if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) |
@@ -202,8 +202,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, int prev) | |||
202 | smp_yield_cpu(~owner); | 202 | smp_yield_cpu(~owner); |
203 | count = spin_retry; | 203 | count = spin_retry; |
204 | } | 204 | } |
205 | old = ACCESS_ONCE(rw->lock); | 205 | old = READ_ONCE(rw->lock); |
206 | owner = ACCESS_ONCE(rw->owner); | 206 | owner = READ_ONCE(rw->owner); |
207 | smp_mb(); | 207 | smp_mb(); |
208 | if (old >= 0) { | 208 | if (old >= 0) { |
209 | prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); | 209 | prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); |
@@ -230,8 +230,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) | |||
230 | smp_yield_cpu(~owner); | 230 | smp_yield_cpu(~owner); |
231 | count = spin_retry; | 231 | count = spin_retry; |
232 | } | 232 | } |
233 | old = ACCESS_ONCE(rw->lock); | 233 | old = READ_ONCE(rw->lock); |
234 | owner = ACCESS_ONCE(rw->owner); | 234 | owner = READ_ONCE(rw->owner); |
235 | if (old >= 0 && | 235 | if (old >= 0 && |
236 | __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000)) | 236 | __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000)) |
237 | prev = old; | 237 | prev = old; |
@@ -251,7 +251,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) | |||
251 | int old; | 251 | int old; |
252 | 252 | ||
253 | while (count-- > 0) { | 253 | while (count-- > 0) { |
254 | old = ACCESS_ONCE(rw->lock); | 254 | old = READ_ONCE(rw->lock); |
255 | if (old) | 255 | if (old) |
256 | continue; | 256 | continue; |
257 | if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)) | 257 | if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000)) |
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7643e979e333..e2f398e9456c 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h | |||
@@ -31,7 +31,7 @@ void atomic_set(atomic_t *, int); | |||
31 | 31 | ||
32 | #define atomic_set_release(v, i) atomic_set((v), (i)) | 32 | #define atomic_set_release(v, i) atomic_set((v), (i)) |
33 | 33 | ||
34 | #define atomic_read(v) ACCESS_ONCE((v)->counter) | 34 | #define atomic_read(v) READ_ONCE((v)->counter) |
35 | 35 | ||
36 | #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) | 36 | #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) |
37 | #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) | 37 | #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) |
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c index baa60357f8ba..b7ba577d82ca 100644 --- a/arch/tile/gxio/dma_queue.c +++ b/arch/tile/gxio/dma_queue.c | |||
@@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue, | |||
163 | int64_t completion_slot, int update) | 163 | int64_t completion_slot, int update) |
164 | { | 164 | { |
165 | if (update) { | 165 | if (update) { |
166 | if (ACCESS_ONCE(dma_queue->hw_complete_count) > | 166 | if (READ_ONCE(dma_queue->hw_complete_count) > |
167 | completion_slot) | 167 | completion_slot) |
168 | return 1; | 168 | return 1; |
169 | 169 | ||
170 | __gxio_dma_queue_update_credits(dma_queue); | 170 | __gxio_dma_queue_update_credits(dma_queue); |
171 | } | 171 | } |
172 | 172 | ||
173 | return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot; | 173 | return READ_ONCE(dma_queue->hw_complete_count) > completion_slot; |
174 | } | 174 | } |
175 | 175 | ||
176 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete); | 176 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete); |
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h index b9e45e37649e..c8fd47edba30 100644 --- a/arch/tile/include/gxio/dma_queue.h +++ b/arch/tile/include/gxio/dma_queue.h | |||
@@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue, | |||
121 | * if the result is LESS than "hw_complete_count". | 121 | * if the result is LESS than "hw_complete_count". |
122 | */ | 122 | */ |
123 | uint64_t complete; | 123 | uint64_t complete; |
124 | complete = ACCESS_ONCE(dma_queue->hw_complete_count); | 124 | complete = READ_ONCE(dma_queue->hw_complete_count); |
125 | slot |= (complete & 0xffffffffff000000); | 125 | slot |= (complete & 0xffffffffff000000); |
126 | if (slot < complete) | 126 | if (slot < complete) |
127 | slot += 0x1000000; | 127 | slot += 0x1000000; |
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index e1a078e6828e..d516d61751c2 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c | |||
@@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
255 | 255 | ||
256 | int do_syscall_trace_enter(struct pt_regs *regs) | 256 | int do_syscall_trace_enter(struct pt_regs *regs) |
257 | { | 257 | { |
258 | u32 work = ACCESS_ONCE(current_thread_info()->flags); | 258 | u32 work = READ_ONCE(current_thread_info()->flags); |
259 | 259 | ||
260 | if ((work & _TIF_SYSCALL_TRACE) && | 260 | if ((work & _TIF_SYSCALL_TRACE) && |
261 | tracehook_report_syscall_entry(regs)) { | 261 | tracehook_report_syscall_entry(regs)) { |
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 03505ffbe1b6..eaa0ba66cf96 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c | |||
@@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs) | |||
75 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) | 75 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) |
76 | BUG_ON(regs != task_pt_regs(current)); | 76 | BUG_ON(regs != task_pt_regs(current)); |
77 | 77 | ||
78 | work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; | 78 | work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; |
79 | 79 | ||
80 | if (unlikely(work & _TIF_SYSCALL_EMU)) | 80 | if (unlikely(work & _TIF_SYSCALL_EMU)) |
81 | emulated = true; | 81 | emulated = true; |
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index fa8dbfcf7ed3..11b13c4b43d5 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c | |||
@@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *) | |||
318 | notrace time_t __vdso_time(time_t *t) | 318 | notrace time_t __vdso_time(time_t *t) |
319 | { | 319 | { |
320 | /* This is atomic on x86 so we don't need any locks. */ | 320 | /* This is atomic on x86 so we don't need any locks. */ |
321 | time_t result = ACCESS_ONCE(gtod->wall_time_sec); | 321 | time_t result = READ_ONCE(gtod->wall_time_sec); |
322 | 322 | ||
323 | if (t) | 323 | if (t) |
324 | *t = result; | 324 | *t = result; |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 589af1eec7c1..140d33288e78 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event) | |||
2118 | event->destroy(event); | 2118 | event->destroy(event); |
2119 | } | 2119 | } |
2120 | 2120 | ||
2121 | if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) | 2121 | if (READ_ONCE(x86_pmu.attr_rdpmc)) |
2122 | event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; | 2122 | event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; |
2123 | 2123 | ||
2124 | return err; | 2124 | return err; |
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 022e59714562..53dd162576a8 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -48,7 +48,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s) | |||
48 | unsigned ret; | 48 | unsigned ret; |
49 | 49 | ||
50 | repeat: | 50 | repeat: |
51 | ret = ACCESS_ONCE(s->seq); | 51 | ret = READ_ONCE(s->seq); |
52 | if (unlikely(ret & 1)) { | 52 | if (unlikely(ret & 1)) { |
53 | cpu_relax(); | 53 | cpu_relax(); |
54 | goto repeat; | 54 | goto repeat; |
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 9c4e7ba6870c..7d7715dde901 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c | |||
@@ -155,14 +155,14 @@ void init_espfix_ap(int cpu) | |||
155 | page = cpu/ESPFIX_STACKS_PER_PAGE; | 155 | page = cpu/ESPFIX_STACKS_PER_PAGE; |
156 | 156 | ||
157 | /* Did another CPU already set this up? */ | 157 | /* Did another CPU already set this up? */ |
158 | stack_page = ACCESS_ONCE(espfix_pages[page]); | 158 | stack_page = READ_ONCE(espfix_pages[page]); |
159 | if (likely(stack_page)) | 159 | if (likely(stack_page)) |
160 | goto done; | 160 | goto done; |
161 | 161 | ||
162 | mutex_lock(&espfix_init_mutex); | 162 | mutex_lock(&espfix_init_mutex); |
163 | 163 | ||
164 | /* Did we race on the lock? */ | 164 | /* Did we race on the lock? */ |
165 | stack_page = ACCESS_ONCE(espfix_pages[page]); | 165 | stack_page = READ_ONCE(espfix_pages[page]); |
166 | if (stack_page) | 166 | if (stack_page) |
167 | goto unlock_done; | 167 | goto unlock_done; |
168 | 168 | ||
@@ -200,7 +200,7 @@ void init_espfix_ap(int cpu) | |||
200 | set_pte(&pte_p[n*PTE_STRIDE], pte); | 200 | set_pte(&pte_p[n*PTE_STRIDE], pte); |
201 | 201 | ||
202 | /* Job is done for this CPU and any CPU which shares this page */ | 202 | /* Job is done for this CPU and any CPU which shares this page */ |
203 | ACCESS_ONCE(espfix_pages[page]) = stack_page; | 203 | WRITE_ONCE(espfix_pages[page], stack_page); |
204 | 204 | ||
205 | unlock_done: | 205 | unlock_done: |
206 | mutex_unlock(&espfix_init_mutex); | 206 | mutex_unlock(&espfix_init_mutex); |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 35aafc95e4b8..18bc9b51ac9b 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
@@ -105,7 +105,7 @@ static void nmi_max_handler(struct irq_work *w) | |||
105 | { | 105 | { |
106 | struct nmiaction *a = container_of(w, struct nmiaction, irq_work); | 106 | struct nmiaction *a = container_of(w, struct nmiaction, irq_work); |
107 | int remainder_ns, decimal_msecs; | 107 | int remainder_ns, decimal_msecs; |
108 | u64 whole_msecs = ACCESS_ONCE(a->max_duration); | 108 | u64 whole_msecs = READ_ONCE(a->max_duration); |
109 | 109 | ||
110 | remainder_ns = do_div(whole_msecs, (1000 * 1000)); | 110 | remainder_ns = do_div(whole_msecs, (1000 * 1000)); |
111 | decimal_msecs = remainder_ns / 1000; | 111 | decimal_msecs = remainder_ns / 1000; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7a69cf053711..a119b361b8b7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -443,7 +443,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) | |||
443 | 443 | ||
444 | static u64 __get_spte_lockless(u64 *sptep) | 444 | static u64 __get_spte_lockless(u64 *sptep) |
445 | { | 445 | { |
446 | return ACCESS_ONCE(*sptep); | 446 | return READ_ONCE(*sptep); |
447 | } | 447 | } |
448 | #else | 448 | #else |
449 | union split_spte { | 449 | union split_spte { |
@@ -4819,7 +4819,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
4819 | * If we don't have indirect shadow pages, it means no page is | 4819 | * If we don't have indirect shadow pages, it means no page is |
4820 | * write-protected, so we can exit simply. | 4820 | * write-protected, so we can exit simply. |
4821 | */ | 4821 | */ |
4822 | if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) | 4822 | if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) |
4823 | return; | 4823 | return; |
4824 | 4824 | ||
4825 | remote_flush = local_flush = false; | 4825 | remote_flush = local_flush = false; |
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index ea67dc876316..01c1371f39f8 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c | |||
@@ -157,7 +157,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
157 | return false; | 157 | return false; |
158 | 158 | ||
159 | index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); | 159 | index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); |
160 | return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); | 160 | return !!READ_ONCE(slot->arch.gfn_track[mode][index]); |
161 | } | 161 | } |
162 | 162 | ||
163 | void kvm_page_track_cleanup(struct kvm *kvm) | 163 | void kvm_page_track_cleanup(struct kvm *kvm) |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 6083ba462f35..13b4f19b9131 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -547,7 +547,7 @@ int xen_alloc_p2m_entry(unsigned long pfn) | |||
547 | if (p2m_top_mfn && pfn < MAX_P2M_PFN) { | 547 | if (p2m_top_mfn && pfn < MAX_P2M_PFN) { |
548 | topidx = p2m_top_index(pfn); | 548 | topidx = p2m_top_index(pfn); |
549 | top_mfn_p = &p2m_top_mfn[topidx]; | 549 | top_mfn_p = &p2m_top_mfn[topidx]; |
550 | mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]); | 550 | mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]); |
551 | 551 | ||
552 | BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); | 552 | BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); |
553 | 553 | ||
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c index 4dc0c1b43f4b..2f7eb66c23ec 100644 --- a/arch/xtensa/platforms/xtfpga/lcd.c +++ b/arch/xtensa/platforms/xtfpga/lcd.c | |||
@@ -34,23 +34,23 @@ | |||
34 | static void lcd_put_byte(u8 *addr, u8 data) | 34 | static void lcd_put_byte(u8 *addr, u8 data) |
35 | { | 35 | { |
36 | #ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS | 36 | #ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS |
37 | ACCESS_ONCE(*addr) = data; | 37 | WRITE_ONCE(*addr, data); |
38 | #else | 38 | #else |
39 | ACCESS_ONCE(*addr) = data & 0xf0; | 39 | WRITE_ONCE(*addr, data & 0xf0); |
40 | ACCESS_ONCE(*addr) = (data << 4) & 0xf0; | 40 | WRITE_ONCE(*addr, (data << 4) & 0xf0); |
41 | #endif | 41 | #endif |
42 | } | 42 | } |
43 | 43 | ||
44 | static int __init lcd_init(void) | 44 | static int __init lcd_init(void) |
45 | { | 45 | { |
46 | ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; | 46 | WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); |
47 | mdelay(5); | 47 | mdelay(5); |
48 | ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; | 48 | WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); |
49 | udelay(200); | 49 | udelay(200); |
50 | ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; | 50 | WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); |
51 | udelay(50); | 51 | udelay(50); |
52 | #ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS | 52 | #ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS |
53 | ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT; | 53 | WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); |
54 | udelay(50); | 54 | udelay(50); |
55 | lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); | 55 | lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); |
56 | udelay(50); | 56 | udelay(50); |
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 6a9a0f03a67b..d822530e6aea 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c | |||
@@ -261,7 +261,7 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat) | |||
261 | 261 | ||
262 | static u64 rwb_sync_issue_lat(struct rq_wb *rwb) | 262 | static u64 rwb_sync_issue_lat(struct rq_wb *rwb) |
263 | { | 263 | { |
264 | u64 now, issue = ACCESS_ONCE(rwb->sync_issue); | 264 | u64 now, issue = READ_ONCE(rwb->sync_issue); |
265 | 265 | ||
266 | if (!issue || !rwb->sync_cookie) | 266 | if (!issue || !rwb->sync_cookie) |
267 | return 0; | 267 | return 0; |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 12ebd055724c..4b8ba2a75a4d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev) | |||
668 | * so be careful about accessing it. dev->bus and dev->class should | 668 | * so be careful about accessing it. dev->bus and dev->class should |
669 | * never change once they are set, so they don't need special care. | 669 | * never change once they are set, so they don't need special care. |
670 | */ | 670 | */ |
671 | drv = ACCESS_ONCE(dev->driver); | 671 | drv = READ_ONCE(dev->driver); |
672 | return drv ? drv->name : | 672 | return drv ? drv->name : |
673 | (dev->bus ? dev->bus->name : | 673 | (dev->bus ? dev->bus->name : |
674 | (dev->class ? dev->class->name : "")); | 674 | (dev->class ? dev->class->name : "")); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..41d7c2b99f69 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) | |||
134 | if (!dev->power.use_autosuspend) | 134 | if (!dev->power.use_autosuspend) |
135 | goto out; | 135 | goto out; |
136 | 136 | ||
137 | autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); | 137 | autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); |
138 | if (autosuspend_delay < 0) | 138 | if (autosuspend_delay < 0) |
139 | goto out; | 139 | goto out; |
140 | 140 | ||
141 | last_busy = ACCESS_ONCE(dev->power.last_busy); | 141 | last_busy = READ_ONCE(dev->power.last_busy); |
142 | elapsed = jiffies - last_busy; | 142 | elapsed = jiffies - last_busy; |
143 | if (elapsed < 0) | 143 | if (elapsed < 0) |
144 | goto out; /* jiffies has wrapped around. */ | 144 | goto out; /* jiffies has wrapped around. */ |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ad92707e45f..6c7ccac2679e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) | |||
641 | return; | 641 | return; |
642 | 642 | ||
643 | retry: | 643 | retry: |
644 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | 644 | entropy_count = orig = READ_ONCE(r->entropy_count); |
645 | if (nfrac < 0) { | 645 | if (nfrac < 0) { |
646 | /* Debit */ | 646 | /* Debit */ |
647 | entropy_count += nfrac; | 647 | entropy_count += nfrac; |
@@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
1265 | 1265 | ||
1266 | /* Can we pull enough? */ | 1266 | /* Can we pull enough? */ |
1267 | retry: | 1267 | retry: |
1268 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | 1268 | entropy_count = orig = READ_ONCE(r->entropy_count); |
1269 | ibytes = nbytes; | 1269 | ibytes = nbytes; |
1270 | /* never pull more than available */ | 1270 | /* never pull more than available */ |
1271 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); | 1271 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); |
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 39e489a96ad7..60da2537bef9 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c | |||
@@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id) | |||
71 | if (readl_relaxed(timer->control) & timer->match_mask) { | 71 | if (readl_relaxed(timer->control) & timer->match_mask) { |
72 | writel_relaxed(timer->match_mask, timer->control); | 72 | writel_relaxed(timer->match_mask, timer->control); |
73 | 73 | ||
74 | event_handler = ACCESS_ONCE(timer->evt.event_handler); | 74 | event_handler = READ_ONCE(timer->evt.event_handler); |
75 | if (event_handler) | 75 | if (event_handler) |
76 | event_handler(&timer->evt); | 76 | event_handler(&timer->evt); |
77 | return IRQ_HANDLED; | 77 | return IRQ_HANDLED; |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d258953ff488..f4f258075b89 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
172 | 172 | ||
173 | while (rd_reg32(&jrp->rregs->outring_used)) { | 173 | while (rd_reg32(&jrp->rregs->outring_used)) { |
174 | 174 | ||
175 | head = ACCESS_ONCE(jrp->head); | 175 | head = READ_ONCE(jrp->head); |
176 | 176 | ||
177 | spin_lock(&jrp->outlock); | 177 | spin_lock(&jrp->outlock); |
178 | 178 | ||
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
341 | spin_lock_bh(&jrp->inplock); | 341 | spin_lock_bh(&jrp->inplock); |
342 | 342 | ||
343 | head = jrp->head; | 343 | head = jrp->head; |
344 | tail = ACCESS_ONCE(jrp->tail); | 344 | tail = READ_ONCE(jrp->tail); |
345 | 345 | ||
346 | if (!rd_reg32(&jrp->rregs->inpring_avail) || | 346 | if (!rd_reg32(&jrp->rregs->inpring_avail) || |
347 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { | 347 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { |
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 874ddf5e9087..0f20f5ec9617 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
@@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem, | |||
193 | ktime_t start = wmem->start, now = ktime_get(); | 193 | ktime_t start = wmem->start, now = ktime_get(); |
194 | ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); | 194 | ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); |
195 | 195 | ||
196 | while (!(ACCESS_ONCE(csb->flags) & CSB_V)) { | 196 | while (!(READ_ONCE(csb->flags) & CSB_V)) { |
197 | cpu_relax(); | 197 | cpu_relax(); |
198 | now = ktime_get(); | 198 | now = ktime_get(); |
199 | if (ktime_after(now, timeout)) | 199 | if (ktime_after(now, timeout)) |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8bf89267dc25..ccf52368a073 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
734 | __le16 res_count, next_res_count; | 734 | __le16 res_count, next_res_count; |
735 | 735 | ||
736 | i = ar_first_buffer_index(ctx); | 736 | i = ar_first_buffer_index(ctx); |
737 | res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); | 737 | res_count = READ_ONCE(ctx->descriptors[i].res_count); |
738 | 738 | ||
739 | /* A buffer that is not yet completely filled must be the last one. */ | 739 | /* A buffer that is not yet completely filled must be the last one. */ |
740 | while (i != last && res_count == 0) { | 740 | while (i != last && res_count == 0) { |
@@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
742 | /* Peek at the next descriptor. */ | 742 | /* Peek at the next descriptor. */ |
743 | next_i = ar_next_buffer_index(i); | 743 | next_i = ar_next_buffer_index(i); |
744 | rmb(); /* read descriptors in order */ | 744 | rmb(); /* read descriptors in order */ |
745 | next_res_count = ACCESS_ONCE( | 745 | next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); |
746 | ctx->descriptors[next_i].res_count); | ||
747 | /* | 746 | /* |
748 | * If the next descriptor is still empty, we must stop at this | 747 | * If the next descriptor is still empty, we must stop at this |
749 | * descriptor. | 748 | * descriptor. |
@@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
759 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { | 758 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { |
760 | next_i = ar_next_buffer_index(next_i); | 759 | next_i = ar_next_buffer_index(next_i); |
761 | rmb(); | 760 | rmb(); |
762 | next_res_count = ACCESS_ONCE( | 761 | next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); |
763 | ctx->descriptors[next_i].res_count); | ||
764 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) | 762 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) |
765 | goto next_buffer_is_active; | 763 | goto next_buffer_is_active; |
766 | } | 764 | } |
@@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context, | |||
2812 | u32 buffer_dma; | 2810 | u32 buffer_dma; |
2813 | 2811 | ||
2814 | req_count = le16_to_cpu(last->req_count); | 2812 | req_count = le16_to_cpu(last->req_count); |
2815 | res_count = le16_to_cpu(ACCESS_ONCE(last->res_count)); | 2813 | res_count = le16_to_cpu(READ_ONCE(last->res_count)); |
2816 | completed = req_count - res_count; | 2814 | completed = req_count - res_count; |
2817 | buffer_dma = le32_to_cpu(last->data_address); | 2815 | buffer_dma = le32_to_cpu(last->data_address); |
2818 | 2816 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 333bad749067..303b5e099a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) | |||
260 | */ | 260 | */ |
261 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | 261 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) |
262 | { | 262 | { |
263 | uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); | 263 | uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); |
264 | struct dma_fence *fence, **ptr; | 264 | struct dma_fence *fence, **ptr; |
265 | int r; | 265 | int r; |
266 | 266 | ||
@@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) | |||
300 | amdgpu_fence_process(ring); | 300 | amdgpu_fence_process(ring); |
301 | emitted = 0x100000000ull; | 301 | emitted = 0x100000000ull; |
302 | emitted -= atomic_read(&ring->fence_drv.last_seq); | 302 | emitted -= atomic_read(&ring->fence_drv.last_seq); |
303 | emitted += ACCESS_ONCE(ring->fence_drv.sync_seq); | 303 | emitted += READ_ONCE(ring->fence_drv.sync_seq); |
304 | return lower_32_bits(emitted); | 304 | return lower_32_bits(emitted); |
305 | } | 305 | } |
306 | 306 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e..6149a47fe63d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) | |||
788 | seq_printf(m, "\t0x%08x: %12ld byte %s", | 788 | seq_printf(m, "\t0x%08x: %12ld byte %s", |
789 | id, amdgpu_bo_size(bo), placement); | 789 | id, amdgpu_bo_size(bo), placement); |
790 | 790 | ||
791 | offset = ACCESS_ONCE(bo->tbo.mem.start); | 791 | offset = READ_ONCE(bo->tbo.mem.start); |
792 | if (offset != AMDGPU_BO_INVALID_OFFSET) | 792 | if (offset != AMDGPU_BO_INVALID_OFFSET) |
793 | seq_printf(m, " @ 0x%010Lx", offset); | 793 | seq_printf(m, " @ 0x%010Lx", offset); |
794 | 794 | ||
795 | pin_count = ACCESS_ONCE(bo->pin_count); | 795 | pin_count = READ_ONCE(bo->pin_count); |
796 | if (pin_count) | 796 | if (pin_count) |
797 | seq_printf(m, " pin count %d", pin_count); | 797 | seq_printf(m, " pin count %d", pin_count); |
798 | seq_printf(m, "\n"); | 798 | seq_printf(m, "\n"); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8..a25f6c72f219 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) | |||
187 | if (kfifo_is_empty(&entity->job_queue)) | 187 | if (kfifo_is_empty(&entity->job_queue)) |
188 | return false; | 188 | return false; |
189 | 189 | ||
190 | if (ACCESS_ONCE(entity->dependency)) | 190 | if (READ_ONCE(entity->dependency)) |
191 | return false; | 191 | return false; |
192 | 192 | ||
193 | return true; | 193 | return true; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3386452bd2f0..cf3deb283da5 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
451 | else | 451 | else |
452 | r = 0; | 452 | r = 0; |
453 | 453 | ||
454 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); | 454 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
455 | args->domain = radeon_mem_type_to_domain(cur_placement); | 455 | args->domain = radeon_mem_type_to_domain(cur_placement); |
456 | drm_gem_object_put_unlocked(gobj); | 456 | drm_gem_object_put_unlocked(gobj); |
457 | return r; | 457 | return r; |
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
481 | r = ret; | 481 | r = ret; |
482 | 482 | ||
483 | /* Flush HDP cache via MMIO if necessary */ | 483 | /* Flush HDP cache via MMIO if necessary */ |
484 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); | 484 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
485 | if (rdev->asic->mmio_hdp_flush && | 485 | if (rdev->asic->mmio_hdp_flush && |
486 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) | 486 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
487 | robj->rdev->asic->mmio_hdp_flush(rdev); | 487 | robj->rdev->asic->mmio_hdp_flush(rdev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index a552e4ea5440..6ac094ee8983 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, | |||
904 | if (unlikely(drm_is_render_client(file_priv))) | 904 | if (unlikely(drm_is_render_client(file_priv))) |
905 | require_exist = true; | 905 | require_exist = true; |
906 | 906 | ||
907 | if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { | 907 | if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) { |
908 | DRM_ERROR("Locked master refused legacy " | 908 | DRM_ERROR("Locked master refused legacy " |
909 | "surface reference.\n"); | 909 | "surface reference.\n"); |
910 | return -EACCES; | 910 | return -EACCES; |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a1e9893136..97bea2e1aa6a 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | |||
380 | if (sc->flags & SCF_FROZEN) { | 380 | if (sc->flags & SCF_FROZEN) { |
381 | wait_event_interruptible_timeout( | 381 | wait_event_interruptible_timeout( |
382 | dd->event_queue, | 382 | dd->event_queue, |
383 | !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), | 383 | !(READ_ONCE(dd->flags) & HFI1_FROZEN), |
384 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); | 384 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); |
385 | if (dd->flags & HFI1_FROZEN) | 385 | if (dd->flags & HFI1_FROZEN) |
386 | return -ENOLCK; | 386 | return -ENOLCK; |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 7108a4b5e94c..75e740780285 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
@@ -1423,14 +1423,14 @@ retry: | |||
1423 | goto done; | 1423 | goto done; |
1424 | } | 1424 | } |
1425 | /* copy from receiver cache line and recalculate */ | 1425 | /* copy from receiver cache line and recalculate */ |
1426 | sc->alloc_free = ACCESS_ONCE(sc->free); | 1426 | sc->alloc_free = READ_ONCE(sc->free); |
1427 | avail = | 1427 | avail = |
1428 | (unsigned long)sc->credits - | 1428 | (unsigned long)sc->credits - |
1429 | (sc->fill - sc->alloc_free); | 1429 | (sc->fill - sc->alloc_free); |
1430 | if (blocks > avail) { | 1430 | if (blocks > avail) { |
1431 | /* still no room, actively update */ | 1431 | /* still no room, actively update */ |
1432 | sc_release_update(sc); | 1432 | sc_release_update(sc); |
1433 | sc->alloc_free = ACCESS_ONCE(sc->free); | 1433 | sc->alloc_free = READ_ONCE(sc->free); |
1434 | trycount++; | 1434 | trycount++; |
1435 | goto retry; | 1435 | goto retry; |
1436 | } | 1436 | } |
@@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc) | |||
1667 | 1667 | ||
1668 | /* call sent buffer callbacks */ | 1668 | /* call sent buffer callbacks */ |
1669 | code = -1; /* code not yet set */ | 1669 | code = -1; /* code not yet set */ |
1670 | head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */ | 1670 | head = READ_ONCE(sc->sr_head); /* snapshot the head */ |
1671 | tail = sc->sr_tail; | 1671 | tail = sc->sr_tail; |
1672 | while (head != tail) { | 1672 | while (head != tail) { |
1673 | pbuf = &sc->sr[tail].pbuf; | 1673 | pbuf = &sc->sr[tail].pbuf; |
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index b3291f0fde9a..a7fc664f0d4e 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c | |||
@@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp) | |||
363 | 363 | ||
364 | again: | 364 | again: |
365 | smp_read_barrier_depends(); /* see post_one_send() */ | 365 | smp_read_barrier_depends(); /* see post_one_send() */ |
366 | if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) | 366 | if (sqp->s_last == READ_ONCE(sqp->s_head)) |
367 | goto clr_busy; | 367 | goto clr_busy; |
368 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); | 368 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); |
369 | 369 | ||
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 6781bcdb10b3..08346d25441c 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c | |||
@@ -1725,7 +1725,7 @@ retry: | |||
1725 | 1725 | ||
1726 | swhead = sde->descq_head & sde->sdma_mask; | 1726 | swhead = sde->descq_head & sde->sdma_mask; |
1727 | /* this code is really bad for cache line trading */ | 1727 | /* this code is really bad for cache line trading */ |
1728 | swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 1728 | swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
1729 | cnt = sde->descq_cnt; | 1729 | cnt = sde->descq_cnt; |
1730 | 1730 | ||
1731 | if (swhead < swtail) | 1731 | if (swhead < swtail) |
@@ -1872,7 +1872,7 @@ retry: | |||
1872 | if ((status & sde->idle_mask) && !idle_check_done) { | 1872 | if ((status & sde->idle_mask) && !idle_check_done) { |
1873 | u16 swtail; | 1873 | u16 swtail; |
1874 | 1874 | ||
1875 | swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 1875 | swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
1876 | if (swtail != hwhead) { | 1876 | if (swtail != hwhead) { |
1877 | hwhead = (u16)read_sde_csr(sde, SD(HEAD)); | 1877 | hwhead = (u16)read_sde_csr(sde, SD(HEAD)); |
1878 | idle_check_done = 1; | 1878 | idle_check_done = 1; |
@@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) | |||
2222 | u16 len; | 2222 | u16 len; |
2223 | 2223 | ||
2224 | head = sde->descq_head & sde->sdma_mask; | 2224 | head = sde->descq_head & sde->sdma_mask; |
2225 | tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 2225 | tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
2226 | seq_printf(s, SDE_FMT, sde->this_idx, | 2226 | seq_printf(s, SDE_FMT, sde->this_idx, |
2227 | sde->cpu, | 2227 | sde->cpu, |
2228 | sdma_state_name(sde->state.current_state), | 2228 | sdma_state_name(sde->state.current_state), |
@@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde) | |||
3305 | return -EINVAL; | 3305 | return -EINVAL; |
3306 | } | 3306 | } |
3307 | while (1) { | 3307 | while (1) { |
3308 | nr = ffz(ACCESS_ONCE(sde->ahg_bits)); | 3308 | nr = ffz(READ_ONCE(sde->ahg_bits)); |
3309 | if (nr > 31) { | 3309 | if (nr > 31) { |
3310 | trace_hfi1_ahg_allocate(sde, -ENOSPC); | 3310 | trace_hfi1_ahg_allocate(sde, -ENOSPC); |
3311 | return -ENOSPC; | 3311 | return -ENOSPC; |
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 107011d8613b..374c59784950 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h | |||
@@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) | |||
445 | { | 445 | { |
446 | return sde->descq_cnt - | 446 | return sde->descq_cnt - |
447 | (sde->descq_tail - | 447 | (sde->descq_tail - |
448 | ACCESS_ONCE(sde->descq_head)) - 1; | 448 | READ_ONCE(sde->descq_head)) - 1; |
449 | } | 449 | } |
450 | 450 | ||
451 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) | 451 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) |
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 0b646173ca22..9a31c585427f 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c | |||
@@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
80 | goto bail; | 80 | goto bail; |
81 | /* We are in the error state, flush the work request. */ | 81 | /* We are in the error state, flush the work request. */ |
82 | smp_read_barrier_depends(); /* see post_one_send() */ | 82 | smp_read_barrier_depends(); /* see post_one_send() */ |
83 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 83 | if (qp->s_last == READ_ONCE(qp->s_head)) |
84 | goto bail; | 84 | goto bail; |
85 | /* If DMAs are in progress, we can't flush immediately. */ | 85 | /* If DMAs are in progress, we can't flush immediately. */ |
86 | if (iowait_sdma_pending(&priv->s_iowait)) { | 86 | if (iowait_sdma_pending(&priv->s_iowait)) { |
@@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
121 | goto bail; | 121 | goto bail; |
122 | /* Check if send work queue is empty. */ | 122 | /* Check if send work queue is empty. */ |
123 | smp_read_barrier_depends(); /* see post_one_send() */ | 123 | smp_read_barrier_depends(); /* see post_one_send() */ |
124 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) { | 124 | if (qp->s_cur == READ_ONCE(qp->s_head)) { |
125 | clear_ahg(qp); | 125 | clear_ahg(qp); |
126 | goto bail; | 126 | goto bail; |
127 | } | 127 | } |
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 2ba74fdd6f15..7fec6b984e3e 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c | |||
@@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
487 | goto bail; | 487 | goto bail; |
488 | /* We are in the error state, flush the work request. */ | 488 | /* We are in the error state, flush the work request. */ |
489 | smp_read_barrier_depends(); /* see post_one_send */ | 489 | smp_read_barrier_depends(); /* see post_one_send */ |
490 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 490 | if (qp->s_last == READ_ONCE(qp->s_head)) |
491 | goto bail; | 491 | goto bail; |
492 | /* If DMAs are in progress, we can't flush immediately. */ | 492 | /* If DMAs are in progress, we can't flush immediately. */ |
493 | if (iowait_sdma_pending(&priv->s_iowait)) { | 493 | if (iowait_sdma_pending(&priv->s_iowait)) { |
@@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
501 | 501 | ||
502 | /* see post_one_send() */ | 502 | /* see post_one_send() */ |
503 | smp_read_barrier_depends(); | 503 | smp_read_barrier_depends(); |
504 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 504 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
505 | goto bail; | 505 | goto bail; |
506 | 506 | ||
507 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); | 507 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index c0c0e0445cbf..8ec6e8a8d6f7 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, | |||
276 | /* Wait until all requests have been freed. */ | 276 | /* Wait until all requests have been freed. */ |
277 | wait_event_interruptible( | 277 | wait_event_interruptible( |
278 | pq->wait, | 278 | pq->wait, |
279 | (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); | 279 | (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); |
280 | kfree(pq->reqs); | 280 | kfree(pq->reqs); |
281 | kfree(pq->req_in_use); | 281 | kfree(pq->req_in_use); |
282 | kmem_cache_destroy(pq->txreq_cache); | 282 | kmem_cache_destroy(pq->txreq_cache); |
@@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, | |||
591 | if (ret != -EBUSY) { | 591 | if (ret != -EBUSY) { |
592 | req->status = ret; | 592 | req->status = ret; |
593 | WRITE_ONCE(req->has_error, 1); | 593 | WRITE_ONCE(req->has_error, 1); |
594 | if (ACCESS_ONCE(req->seqcomp) == | 594 | if (READ_ONCE(req->seqcomp) == |
595 | req->seqsubmitted - 1) | 595 | req->seqsubmitted - 1) |
596 | goto free_req; | 596 | goto free_req; |
597 | return ret; | 597 | return ret; |
@@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) | |||
825 | */ | 825 | */ |
826 | if (req->data_len) { | 826 | if (req->data_len) { |
827 | iovec = &req->iovs[req->iov_idx]; | 827 | iovec = &req->iovs[req->iov_idx]; |
828 | if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) { | 828 | if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { |
829 | if (++req->iov_idx == req->data_iovs) { | 829 | if (++req->iov_idx == req->data_iovs) { |
830 | ret = -EFAULT; | 830 | ret = -EFAULT; |
831 | goto free_txreq; | 831 | goto free_txreq; |
@@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) | |||
1390 | } else { | 1390 | } else { |
1391 | if (status != SDMA_TXREQ_S_OK) | 1391 | if (status != SDMA_TXREQ_S_OK) |
1392 | req->status = status; | 1392 | req->status = status; |
1393 | if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && | 1393 | if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) && |
1394 | (READ_ONCE(req->done) || | 1394 | (READ_ONCE(req->done) || |
1395 | READ_ONCE(req->has_error))) { | 1395 | READ_ONCE(req->has_error))) { |
1396 | user_sdma_free_request(req, false); | 1396 | user_sdma_free_request(req, false); |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 53efbb0b40c4..9a37e844d4c8 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) | |||
368 | 368 | ||
369 | again: | 369 | again: |
370 | smp_read_barrier_depends(); /* see post_one_send() */ | 370 | smp_read_barrier_depends(); /* see post_one_send() */ |
371 | if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) | 371 | if (sqp->s_last == READ_ONCE(sqp->s_head)) |
372 | goto clr_busy; | 372 | goto clr_busy; |
373 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); | 373 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); |
374 | 374 | ||
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 498e2202e72c..bddcc37ace44 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) | |||
61 | goto bail; | 61 | goto bail; |
62 | /* We are in the error state, flush the work request. */ | 62 | /* We are in the error state, flush the work request. */ |
63 | smp_read_barrier_depends(); /* see post_one_send() */ | 63 | smp_read_barrier_depends(); /* see post_one_send() */ |
64 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 64 | if (qp->s_last == READ_ONCE(qp->s_head)) |
65 | goto bail; | 65 | goto bail; |
66 | /* If DMAs are in progress, we can't flush immediately. */ | 66 | /* If DMAs are in progress, we can't flush immediately. */ |
67 | if (atomic_read(&priv->s_dma_busy)) { | 67 | if (atomic_read(&priv->s_dma_busy)) { |
@@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) | |||
91 | goto bail; | 91 | goto bail; |
92 | /* Check if send work queue is empty. */ | 92 | /* Check if send work queue is empty. */ |
93 | smp_read_barrier_depends(); /* see post_one_send() */ | 93 | smp_read_barrier_depends(); /* see post_one_send() */ |
94 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 94 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
95 | goto bail; | 95 | goto bail; |
96 | /* | 96 | /* |
97 | * Start a new request. | 97 | * Start a new request. |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index be4907453ac4..15962ed193ce 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) | |||
253 | goto bail; | 253 | goto bail; |
254 | /* We are in the error state, flush the work request. */ | 254 | /* We are in the error state, flush the work request. */ |
255 | smp_read_barrier_depends(); /* see post_one_send */ | 255 | smp_read_barrier_depends(); /* see post_one_send */ |
256 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 256 | if (qp->s_last == READ_ONCE(qp->s_head)) |
257 | goto bail; | 257 | goto bail; |
258 | /* If DMAs are in progress, we can't flush immediately. */ | 258 | /* If DMAs are in progress, we can't flush immediately. */ |
259 | if (atomic_read(&priv->s_dma_busy)) { | 259 | if (atomic_read(&priv->s_dma_busy)) { |
@@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) | |||
267 | 267 | ||
268 | /* see post_one_send() */ | 268 | /* see post_one_send() */ |
269 | smp_read_barrier_depends(); | 269 | smp_read_barrier_depends(); |
270 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 270 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
271 | goto bail; | 271 | goto bail; |
272 | 272 | ||
273 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); | 273 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 22df09ae809e..b670cb9d2006 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) | |||
1073 | rdi->driver_f.notify_error_qp(qp); | 1073 | rdi->driver_f.notify_error_qp(qp); |
1074 | 1074 | ||
1075 | /* Schedule the sending tasklet to drain the send work queue. */ | 1075 | /* Schedule the sending tasklet to drain the send work queue. */ |
1076 | if (ACCESS_ONCE(qp->s_last) != qp->s_head) | 1076 | if (READ_ONCE(qp->s_last) != qp->s_head) |
1077 | rdi->driver_f.schedule_send(qp); | 1077 | rdi->driver_f.schedule_send(qp); |
1078 | 1078 | ||
1079 | rvt_clear_mr_refs(qp, 0); | 1079 | rvt_clear_mr_refs(qp, 0); |
@@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail( | |||
1686 | if (likely(qp->s_avail)) | 1686 | if (likely(qp->s_avail)) |
1687 | return 0; | 1687 | return 0; |
1688 | smp_read_barrier_depends(); /* see rc.c */ | 1688 | smp_read_barrier_depends(); /* see rc.c */ |
1689 | slast = ACCESS_ONCE(qp->s_last); | 1689 | slast = READ_ONCE(qp->s_last); |
1690 | if (qp->s_head >= slast) | 1690 | if (qp->s_head >= slast) |
1691 | avail = qp->s_size - (qp->s_head - slast); | 1691 | avail = qp->s_size - (qp->s_head - slast); |
1692 | else | 1692 | else |
@@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1917 | * ahead and kick the send engine into gear. Otherwise we will always | 1917 | * ahead and kick the send engine into gear. Otherwise we will always |
1918 | * just schedule the send to happen later. | 1918 | * just schedule the send to happen later. |
1919 | */ | 1919 | */ |
1920 | call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; | 1920 | call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; |
1921 | 1921 | ||
1922 | for (; wr; wr = wr->next) { | 1922 | for (; wr; wr = wr->next) { |
1923 | err = rvt_post_one_wr(qp, wr, &call_send); | 1923 | err = rvt_post_one_wr(qp, wr, &call_send); |
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c index 2e8f801932be..a1db1e5040dc 100644 --- a/drivers/input/misc/regulator-haptic.c +++ b/drivers/input/misc/regulator-haptic.c | |||
@@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev) | |||
233 | 233 | ||
234 | haptic->suspended = false; | 234 | haptic->suspended = false; |
235 | 235 | ||
236 | magnitude = ACCESS_ONCE(haptic->magnitude); | 236 | magnitude = READ_ONCE(haptic->magnitude); |
237 | if (magnitude) | 237 | if (magnitude) |
238 | regulator_haptic_set_voltage(haptic, magnitude); | 238 | regulator_haptic_set_voltage(haptic, magnitude); |
239 | 239 | ||
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d216a8f7bc22..33bb074d6941 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void) | |||
347 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); | 347 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); |
348 | BUG_ON(dm_bufio_client_count < 0); | 348 | BUG_ON(dm_bufio_client_count < 0); |
349 | 349 | ||
350 | dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size); | 350 | dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * Use default if set to 0 and report the actual cache size used. | 353 | * Use default if set to 0 and report the actual cache size used. |
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c, | |||
960 | { | 960 | { |
961 | unsigned long buffers; | 961 | unsigned long buffers; |
962 | 962 | ||
963 | if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { | 963 | if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { |
964 | if (mutex_trylock(&dm_bufio_clients_lock)) { | 964 | if (mutex_trylock(&dm_bufio_clients_lock)) { |
965 | __cache_size_refresh(); | 965 | __cache_size_refresh(); |
966 | mutex_unlock(&dm_bufio_clients_lock); | 966 | mutex_unlock(&dm_bufio_clients_lock); |
@@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) | |||
1600 | 1600 | ||
1601 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) | 1601 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
1602 | { | 1602 | { |
1603 | unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); | 1603 | unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); |
1604 | return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); | 1604 | return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); |
1605 | } | 1605 | } |
1606 | 1606 | ||
@@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |||
1647 | { | 1647 | { |
1648 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); | 1648 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); |
1649 | 1649 | ||
1650 | return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); | 1650 | return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); |
1651 | } | 1651 | } |
1652 | 1652 | ||
1653 | /* | 1653 | /* |
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); | |||
1818 | 1818 | ||
1819 | static unsigned get_max_age_hz(void) | 1819 | static unsigned get_max_age_hz(void) |
1820 | { | 1820 | { |
1821 | unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); | 1821 | unsigned max_age = READ_ONCE(dm_bufio_max_age); |
1822 | 1822 | ||
1823 | if (max_age > UINT_MAX / HZ) | 1823 | if (max_age > UINT_MAX / HZ) |
1824 | max_age = UINT_MAX / HZ; | 1824 | max_age = UINT_MAX / HZ; |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index cf2c67e35eaf..eb45cc3df31d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t) | |||
107 | try_again: | 107 | try_again: |
108 | spin_lock_irq(&throttle_spinlock); | 108 | spin_lock_irq(&throttle_spinlock); |
109 | 109 | ||
110 | throttle = ACCESS_ONCE(t->throttle); | 110 | throttle = READ_ONCE(t->throttle); |
111 | 111 | ||
112 | if (likely(throttle >= 100)) | 112 | if (likely(throttle >= 100)) |
113 | goto skip_limit; | 113 | goto skip_limit; |
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t) | |||
157 | 157 | ||
158 | t->num_io_jobs--; | 158 | t->num_io_jobs--; |
159 | 159 | ||
160 | if (likely(ACCESS_ONCE(t->throttle) >= 100)) | 160 | if (likely(READ_ONCE(t->throttle) >= 100)) |
161 | goto skip_limit; | 161 | goto skip_limit; |
162 | 162 | ||
163 | if (!t->num_io_jobs) { | 163 | if (!t->num_io_jobs) { |
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 6028d8247f58..a1a5eec783cc 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c | |||
@@ -431,7 +431,7 @@ do_sync_free: | |||
431 | synchronize_rcu_expedited(); | 431 | synchronize_rcu_expedited(); |
432 | dm_stat_free(&s->rcu_head); | 432 | dm_stat_free(&s->rcu_head); |
433 | } else { | 433 | } else { |
434 | ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; | 434 | WRITE_ONCE(dm_stat_need_rcu_barrier, 1); |
435 | call_rcu(&s->rcu_head, dm_stat_free); | 435 | call_rcu(&s->rcu_head, dm_stat_free); |
436 | } | 436 | } |
437 | return 0; | 437 | return 0; |
@@ -639,12 +639,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, | |||
639 | */ | 639 | */ |
640 | last = raw_cpu_ptr(stats->last); | 640 | last = raw_cpu_ptr(stats->last); |
641 | stats_aux->merged = | 641 | stats_aux->merged = |
642 | (bi_sector == (ACCESS_ONCE(last->last_sector) && | 642 | (bi_sector == (READ_ONCE(last->last_sector) && |
643 | ((bi_rw == WRITE) == | 643 | ((bi_rw == WRITE) == |
644 | (ACCESS_ONCE(last->last_rw) == WRITE)) | 644 | (READ_ONCE(last->last_rw) == WRITE)) |
645 | )); | 645 | )); |
646 | ACCESS_ONCE(last->last_sector) = end_sector; | 646 | WRITE_ONCE(last->last_sector, end_sector); |
647 | ACCESS_ONCE(last->last_rw) = bi_rw; | 647 | WRITE_ONCE(last->last_rw, bi_rw); |
648 | } | 648 | } |
649 | 649 | ||
650 | rcu_read_lock(); | 650 | rcu_read_lock(); |
@@ -693,22 +693,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared | |||
693 | 693 | ||
694 | for_each_possible_cpu(cpu) { | 694 | for_each_possible_cpu(cpu) { |
695 | p = &s->stat_percpu[cpu][x]; | 695 | p = &s->stat_percpu[cpu][x]; |
696 | shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); | 696 | shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); |
697 | shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); | 697 | shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); |
698 | shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]); | 698 | shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]); |
699 | shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]); | 699 | shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]); |
700 | shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]); | 700 | shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]); |
701 | shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]); | 701 | shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]); |
702 | shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]); | 702 | shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]); |
703 | shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]); | 703 | shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]); |
704 | shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]); | 704 | shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]); |
705 | shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]); | 705 | shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]); |
706 | shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total); | 706 | shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total); |
707 | shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue); | 707 | shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue); |
708 | if (s->n_histogram_entries) { | 708 | if (s->n_histogram_entries) { |
709 | unsigned i; | 709 | unsigned i; |
710 | for (i = 0; i < s->n_histogram_entries + 1; i++) | 710 | for (i = 0; i < s->n_histogram_entries + 1; i++) |
711 | shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]); | 711 | shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]); |
712 | } | 712 | } |
713 | } | 713 | } |
714 | } | 714 | } |
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 4c8de1ff78ca..8d0ba879777e 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c | |||
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long | |||
144 | 144 | ||
145 | switch_get_position(sctx, region_nr, ®ion_index, &bit); | 145 | switch_get_position(sctx, region_nr, ®ion_index, &bit); |
146 | 146 | ||
147 | return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & | 147 | return (READ_ONCE(sctx->region_table[region_index]) >> bit) & |
148 | ((1 << sctx->region_table_entry_bits) - 1); | 148 | ((1 << sctx->region_table_entry_bits) - 1); |
149 | } | 149 | } |
150 | 150 | ||
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1e25705209c2..89e5dff9b4cf 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
2431 | struct pool_c *pt = pool->ti->private; | 2431 | struct pool_c *pt = pool->ti->private; |
2432 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); | 2432 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); |
2433 | enum pool_mode old_mode = get_pool_mode(pool); | 2433 | enum pool_mode old_mode = get_pool_mode(pool); |
2434 | unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; | 2434 | unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ; |
2435 | 2435 | ||
2436 | /* | 2436 | /* |
2437 | * Never allow the pool to transition to PM_WRITE mode if user | 2437 | * Never allow the pool to transition to PM_WRITE mode if user |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index bda3caca23ca..fba93237a780 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work) | |||
589 | verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); | 589 | verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); |
590 | verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); | 590 | verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); |
591 | if (!i) { | 591 | if (!i) { |
592 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); | 592 | unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster); |
593 | 593 | ||
594 | cluster >>= v->data_dev_block_bits; | 594 | cluster >>= v->data_dev_block_bits; |
595 | if (unlikely(!cluster)) | 595 | if (unlikely(!cluster)) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4be85324f44d..8aaffa19b29a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; | |||
114 | 114 | ||
115 | static int __dm_get_module_param_int(int *module_param, int min, int max) | 115 | static int __dm_get_module_param_int(int *module_param, int min, int max) |
116 | { | 116 | { |
117 | int param = ACCESS_ONCE(*module_param); | 117 | int param = READ_ONCE(*module_param); |
118 | int modified_param = 0; | 118 | int modified_param = 0; |
119 | bool modified = true; | 119 | bool modified = true; |
120 | 120 | ||
@@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max) | |||
136 | unsigned __dm_get_module_param(unsigned *module_param, | 136 | unsigned __dm_get_module_param(unsigned *module_param, |
137 | unsigned def, unsigned max) | 137 | unsigned def, unsigned max) |
138 | { | 138 | { |
139 | unsigned param = ACCESS_ONCE(*module_param); | 139 | unsigned param = READ_ONCE(*module_param); |
140 | unsigned modified_param = 0; | 140 | unsigned modified_param = 0; |
141 | 141 | ||
142 | if (!param) | 142 | if (!param) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 0ff1bbf6c90e..447ddcbc9566 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page) | |||
2651 | { | 2651 | { |
2652 | char *sep = ","; | 2652 | char *sep = ","; |
2653 | size_t len = 0; | 2653 | size_t len = 0; |
2654 | unsigned long flags = ACCESS_ONCE(rdev->flags); | 2654 | unsigned long flags = READ_ONCE(rdev->flags); |
2655 | 2655 | ||
2656 | if (test_bit(Faulty, &flags) || | 2656 | if (test_bit(Faulty, &flags) || |
2657 | (!test_bit(ExternalBbl, &flags) && | 2657 | (!test_bit(ExternalBbl, &flags) && |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 928e24a07133..7d9a50eed9db 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n | |||
6072 | */ | 6072 | */ |
6073 | rcu_read_lock(); | 6073 | rcu_read_lock(); |
6074 | for (i = 0; i < conf->raid_disks; i++) { | 6074 | for (i = 0; i < conf->raid_disks; i++) { |
6075 | struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); | 6075 | struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); |
6076 | 6076 | ||
6077 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) | 6077 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) |
6078 | still_degraded = 1; | 6078 | still_degraded = 1; |
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c index 637cc4686742..b665757ca89a 100644 --- a/drivers/misc/mic/scif/scif_rb.c +++ b/drivers/misc/mic/scif/scif_rb.c | |||
@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb) | |||
138 | * the read barrier in scif_rb_count(..) | 138 | * the read barrier in scif_rb_count(..) |
139 | */ | 139 | */ |
140 | wmb(); | 140 | wmb(); |
141 | ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; | 141 | WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); |
142 | #ifdef CONFIG_INTEL_MIC_CARD | 142 | #ifdef CONFIG_INTEL_MIC_CARD |
143 | /* | 143 | /* |
144 | * X100 Si bug: For the case where a Core is performing an EXT_WR | 144 | * X100 Si bug: For the case where a Core is performing an EXT_WR |
@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb) | |||
147 | * This way, if ordering is violated for the Interrupt Message, it will | 147 | * This way, if ordering is violated for the Interrupt Message, it will |
148 | * fall just behind the first Posted associated with the first EXT_WR. | 148 | * fall just behind the first Posted associated with the first EXT_WR. |
149 | */ | 149 | */ |
150 | ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; | 150 | WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); |
151 | #endif | 151 | #endif |
152 | } | 152 | } |
153 | 153 | ||
@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) | |||
210 | * scif_rb_space(..) | 210 | * scif_rb_space(..) |
211 | */ | 211 | */ |
212 | mb(); | 212 | mb(); |
213 | ACCESS_ONCE(*rb->read_ptr) = new_offset; | 213 | WRITE_ONCE(*rb->read_ptr, new_offset); |
214 | #ifdef CONFIG_INTEL_MIC_CARD | 214 | #ifdef CONFIG_INTEL_MIC_CARD |
215 | /* | 215 | /* |
216 | * X100 Si Bug: For the case where a Core is performing an EXT_WR | 216 | * X100 Si Bug: For the case where a Core is performing an EXT_WR |
@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) | |||
219 | * This way, if ordering is violated for the Interrupt Message, it will | 219 | * This way, if ordering is violated for the Interrupt Message, it will |
220 | * fall just behind the first Posted associated with the first EXT_WR. | 220 | * fall just behind the first Posted associated with the first EXT_WR. |
221 | */ | 221 | */ |
222 | ACCESS_ONCE(*rb->read_ptr) = new_offset; | 222 | WRITE_ONCE(*rb->read_ptr, new_offset); |
223 | #endif | 223 | #endif |
224 | } | 224 | } |
225 | 225 | ||
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c index e1ef8daedd5a..a036dbb4101e 100644 --- a/drivers/misc/mic/scif/scif_rma_list.c +++ b/drivers/misc/mic/scif/scif_rma_list.c | |||
@@ -277,7 +277,7 @@ retry: | |||
277 | * Need to restart list traversal if there has been | 277 | * Need to restart list traversal if there has been |
278 | * an asynchronous list entry deletion. | 278 | * an asynchronous list entry deletion. |
279 | */ | 279 | */ |
280 | if (ACCESS_ONCE(ep->rma_info.async_list_del)) | 280 | if (READ_ONCE(ep->rma_info.async_list_del)) |
281 | goto retry; | 281 | goto retry; |
282 | } | 282 | } |
283 | mutex_unlock(&ep->rma_info.rma_lock); | 283 | mutex_unlock(&ep->rma_info.rma_lock); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c02cc817a490..1ed9529e7bd1 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1378 | unsigned int count; | 1378 | unsigned int count; |
1379 | 1379 | ||
1380 | slaves = rcu_dereference(bond->slave_arr); | 1380 | slaves = rcu_dereference(bond->slave_arr); |
1381 | count = slaves ? ACCESS_ONCE(slaves->count) : 0; | 1381 | count = slaves ? READ_ONCE(slaves->count) : 0; |
1382 | if (likely(count)) | 1382 | if (likely(count)) |
1383 | tx_slave = slaves->arr[hash_index % | 1383 | tx_slave = slaves->arr[hash_index % |
1384 | count]; | 1384 | count]; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c99dc59d729b..af51b90cecbb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1167 | slave = bond_slave_get_rcu(skb->dev); | 1167 | slave = bond_slave_get_rcu(skb->dev); |
1168 | bond = slave->bond; | 1168 | bond = slave->bond; |
1169 | 1169 | ||
1170 | recv_probe = ACCESS_ONCE(bond->recv_probe); | 1170 | recv_probe = READ_ONCE(bond->recv_probe); |
1171 | if (recv_probe) { | 1171 | if (recv_probe) { |
1172 | ret = recv_probe(skb, bond, slave); | 1172 | ret = recv_probe(skb, bond, slave); |
1173 | if (ret == RX_HANDLER_CONSUMED) { | 1173 | if (ret == RX_HANDLER_CONSUMED) { |
@@ -3810,7 +3810,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
3810 | else | 3810 | else |
3811 | bond_xmit_slave_id(bond, skb, 0); | 3811 | bond_xmit_slave_id(bond, skb, 0); |
3812 | } else { | 3812 | } else { |
3813 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); | 3813 | int slave_cnt = READ_ONCE(bond->slave_cnt); |
3814 | 3814 | ||
3815 | if (likely(slave_cnt)) { | 3815 | if (likely(slave_cnt)) { |
3816 | slave_id = bond_rr_gen_slave_id(bond); | 3816 | slave_id = bond_rr_gen_slave_id(bond); |
@@ -3972,7 +3972,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3972 | unsigned int count; | 3972 | unsigned int count; |
3973 | 3973 | ||
3974 | slaves = rcu_dereference(bond->slave_arr); | 3974 | slaves = rcu_dereference(bond->slave_arr); |
3975 | count = slaves ? ACCESS_ONCE(slaves->count) : 0; | 3975 | count = slaves ? READ_ONCE(slaves->count) : 0; |
3976 | if (likely(count)) { | 3976 | if (likely(count)) { |
3977 | slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; | 3977 | slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; |
3978 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3978 | bond_dev_queue_xmit(bond, skb, slave->dev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 4ef68f69b58c..43f52a8fe708 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q, | |||
405 | */ | 405 | */ |
406 | static inline int reclaimable(const struct sge_txq *q) | 406 | static inline int reclaimable(const struct sge_txq *q) |
407 | { | 407 | { |
408 | int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); | 408 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
409 | hw_cidx -= q->cidx; | 409 | hw_cidx -= q->cidx; |
410 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; | 410 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; |
411 | } | 411 | } |
@@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb); | |||
1375 | */ | 1375 | */ |
1376 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | 1376 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) |
1377 | { | 1377 | { |
1378 | int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); | 1378 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
1379 | int reclaim = hw_cidx - q->cidx; | 1379 | int reclaim = hw_cidx - q->cidx; |
1380 | 1380 | ||
1381 | if (reclaim < 0) | 1381 | if (reclaim < 0) |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a807..c6e859a27ee6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) | |||
605 | 605 | ||
606 | if (wrapped) | 606 | if (wrapped) |
607 | newacc += 65536; | 607 | newacc += 65536; |
608 | ACCESS_ONCE(*acc) = newacc; | 608 | WRITE_ONCE(*acc, newacc); |
609 | } | 609 | } |
610 | 610 | ||
611 | static void populate_erx_stats(struct be_adapter *adapter, | 611 | static void populate_erx_stats(struct be_adapter *adapter, |
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 0cec06bec63e..340e28211135 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c | |||
@@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) | |||
373 | unsigned int count; | 373 | unsigned int count; |
374 | 374 | ||
375 | smp_rmb(); | 375 | smp_rmb(); |
376 | count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); | 376 | count = tx_count(READ_ONCE(priv->tx_head), tx_tail); |
377 | if (count == 0) | 377 | if (count == 0) |
378 | goto out; | 378 | goto out; |
379 | 379 | ||
@@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
431 | dma_addr_t phys; | 431 | dma_addr_t phys; |
432 | 432 | ||
433 | smp_rmb(); | 433 | smp_rmb(); |
434 | count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); | 434 | count = tx_count(tx_head, READ_ONCE(priv->tx_tail)); |
435 | if (count == (TX_DESC_NUM - 1)) { | 435 | if (count == (TX_DESC_NUM - 1)) { |
436 | netif_stop_queue(ndev); | 436 | netif_stop_queue(ndev); |
437 | return NETDEV_TX_BUSY; | 437 | return NETDEV_TX_BUSY; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8f326f87a815..2cb9539c931e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
@@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) | |||
264 | vsi->rx_buf_failed, vsi->rx_page_failed); | 264 | vsi->rx_buf_failed, vsi->rx_page_failed); |
265 | rcu_read_lock(); | 265 | rcu_read_lock(); |
266 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 266 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
267 | struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); | 267 | struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); |
268 | 268 | ||
269 | if (!rx_ring) | 269 | if (!rx_ring) |
270 | continue; | 270 | continue; |
@@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) | |||
320 | ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); | 320 | ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); |
321 | } | 321 | } |
322 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 322 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
323 | struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); | 323 | struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); |
324 | 324 | ||
325 | if (!tx_ring) | 325 | if (!tx_ring) |
326 | continue; | 326 | continue; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 05e89864f781..e9e04a485e0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, | |||
1570 | } | 1570 | } |
1571 | rcu_read_lock(); | 1571 | rcu_read_lock(); |
1572 | for (j = 0; j < vsi->num_queue_pairs; j++) { | 1572 | for (j = 0; j < vsi->num_queue_pairs; j++) { |
1573 | tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); | 1573 | tx_ring = READ_ONCE(vsi->tx_rings[j]); |
1574 | 1574 | ||
1575 | if (!tx_ring) | 1575 | if (!tx_ring) |
1576 | continue; | 1576 | continue; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cb..de1fcac7834d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, | |||
455 | u64 bytes, packets; | 455 | u64 bytes, packets; |
456 | unsigned int start; | 456 | unsigned int start; |
457 | 457 | ||
458 | tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); | 458 | tx_ring = READ_ONCE(vsi->tx_rings[i]); |
459 | if (!tx_ring) | 459 | if (!tx_ring) |
460 | continue; | 460 | continue; |
461 | i40e_get_netdev_stats_struct_tx(tx_ring, stats); | 461 | i40e_get_netdev_stats_struct_tx(tx_ring, stats); |
@@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) | |||
791 | rcu_read_lock(); | 791 | rcu_read_lock(); |
792 | for (q = 0; q < vsi->num_queue_pairs; q++) { | 792 | for (q = 0; q < vsi->num_queue_pairs; q++) { |
793 | /* locate Tx ring */ | 793 | /* locate Tx ring */ |
794 | p = ACCESS_ONCE(vsi->tx_rings[q]); | 794 | p = READ_ONCE(vsi->tx_rings[q]); |
795 | 795 | ||
796 | do { | 796 | do { |
797 | start = u64_stats_fetch_begin_irq(&p->syncp); | 797 | start = u64_stats_fetch_begin_irq(&p->syncp); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d8456c381c99..97381238eb7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c | |||
@@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) | |||
130 | } | 130 | } |
131 | 131 | ||
132 | smp_mb(); /* Force any pending update before accessing. */ | 132 | smp_mb(); /* Force any pending update before accessing. */ |
133 | adj = ACCESS_ONCE(pf->ptp_base_adj); | 133 | adj = READ_ONCE(pf->ptp_base_adj); |
134 | 134 | ||
135 | freq = adj; | 135 | freq = adj; |
136 | freq *= ppb; | 136 | freq *= ppb; |
@@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) | |||
499 | wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); | 499 | wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); |
500 | 500 | ||
501 | /* Update the base adjustement value. */ | 501 | /* Update the base adjustement value. */ |
502 | ACCESS_ONCE(pf->ptp_base_adj) = incval; | 502 | WRITE_ONCE(pf->ptp_base_adj, incval); |
503 | smp_mb(); /* Force the above update. */ | 503 | smp_mb(); /* Force the above update. */ |
504 | } | 504 | } |
505 | 505 | ||
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 58adbf234e07..31a3f09df9f7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h | |||
@@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg); | |||
375 | /* write operations, indexed using DWORDS */ | 375 | /* write operations, indexed using DWORDS */ |
376 | #define wr32(reg, val) \ | 376 | #define wr32(reg, val) \ |
377 | do { \ | 377 | do { \ |
378 | u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ | 378 | u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ |
379 | if (!E1000_REMOVED(hw_addr)) \ | 379 | if (!E1000_REMOVED(hw_addr)) \ |
380 | writel((val), &hw_addr[(reg)]); \ | 380 | writel((val), &hw_addr[(reg)]); \ |
381 | } while (0) | 381 | } while (0) |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fd4a46b03cc8..6bccc2be2b91 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
750 | u32 igb_rd32(struct e1000_hw *hw, u32 reg) | 750 | u32 igb_rd32(struct e1000_hw *hw, u32 reg) |
751 | { | 751 | { |
752 | struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); | 752 | struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); |
753 | u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); | 753 | u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); |
754 | u32 value = 0; | 754 | u32 value = 0; |
755 | 755 | ||
756 | if (E1000_REMOVED(hw_addr)) | 756 | if (E1000_REMOVED(hw_addr)) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index e083732adf64..a01409e2e06c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | |||
@@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr) | |||
161 | 161 | ||
162 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) | 162 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) |
163 | { | 163 | { |
164 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 164 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
165 | 165 | ||
166 | if (ixgbe_removed(reg_addr)) | 166 | if (ixgbe_removed(reg_addr)) |
167 | return; | 167 | return; |
@@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr) | |||
180 | 180 | ||
181 | static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) | 181 | static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) |
182 | { | 182 | { |
183 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 183 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
184 | 184 | ||
185 | if (ixgbe_removed(reg_addr)) | 185 | if (ixgbe_removed(reg_addr)) |
186 | return; | 186 | return; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4d76afd13868..2224e691ee07 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) | |||
380 | */ | 380 | */ |
381 | u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) | 381 | u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) |
382 | { | 382 | { |
383 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 383 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
384 | u32 value; | 384 | u32 value; |
385 | 385 | ||
386 | if (ixgbe_removed(reg_addr)) | 386 | if (ixgbe_removed(reg_addr)) |
@@ -8630,7 +8630,7 @@ static void ixgbe_get_stats64(struct net_device *netdev, | |||
8630 | 8630 | ||
8631 | rcu_read_lock(); | 8631 | rcu_read_lock(); |
8632 | for (i = 0; i < adapter->num_rx_queues; i++) { | 8632 | for (i = 0; i < adapter->num_rx_queues; i++) { |
8633 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); | 8633 | struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); |
8634 | u64 bytes, packets; | 8634 | u64 bytes, packets; |
8635 | unsigned int start; | 8635 | unsigned int start; |
8636 | 8636 | ||
@@ -8646,12 +8646,12 @@ static void ixgbe_get_stats64(struct net_device *netdev, | |||
8646 | } | 8646 | } |
8647 | 8647 | ||
8648 | for (i = 0; i < adapter->num_tx_queues; i++) { | 8648 | for (i = 0; i < adapter->num_tx_queues; i++) { |
8649 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); | 8649 | struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); |
8650 | 8650 | ||
8651 | ixgbe_get_ring_stats64(stats, ring); | 8651 | ixgbe_get_ring_stats64(stats, ring); |
8652 | } | 8652 | } |
8653 | for (i = 0; i < adapter->num_xdp_queues; i++) { | 8653 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
8654 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]); | 8654 | struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); |
8655 | 8655 | ||
8656 | ixgbe_get_ring_stats64(stats, ring); | 8656 | ixgbe_get_ring_stats64(stats, ring); |
8657 | } | 8657 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 86d6924a2b71..ae312c45696a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | |||
@@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) | |||
378 | } | 378 | } |
379 | 379 | ||
380 | smp_mb(); | 380 | smp_mb(); |
381 | incval = ACCESS_ONCE(adapter->base_incval); | 381 | incval = READ_ONCE(adapter->base_incval); |
382 | 382 | ||
383 | freq = incval; | 383 | freq = incval; |
384 | freq *= ppb; | 384 | freq *= ppb; |
@@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) | |||
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | /* update the base incval used to calculate frequency adjustment */ | 1161 | /* update the base incval used to calculate frequency adjustment */ |
1162 | ACCESS_ONCE(adapter->base_incval) = incval; | 1162 | WRITE_ONCE(adapter->base_incval, incval); |
1163 | smp_mb(); | 1163 | smp_mb(); |
1164 | 1164 | ||
1165 | /* need lock to prevent incorrect read while modifying cyclecounter */ | 1165 | /* need lock to prevent incorrect read while modifying cyclecounter */ |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357..cacb30682434 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) | |||
164 | 164 | ||
165 | u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) | 165 | u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) |
166 | { | 166 | { |
167 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 167 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
168 | u32 value; | 168 | u32 value; |
169 | 169 | ||
170 | if (IXGBE_REMOVED(reg_addr)) | 170 | if (IXGBE_REMOVED(reg_addr)) |
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 04d8d4ee4f04..c651fefcc3d2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h | |||
@@ -182,7 +182,7 @@ struct ixgbevf_info { | |||
182 | 182 | ||
183 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) | 183 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) |
184 | { | 184 | { |
185 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 185 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
186 | 186 | ||
187 | if (IXGBE_REMOVED(reg_addr)) | 187 | if (IXGBE_REMOVED(reg_addr)) |
188 | return; | 188 | return; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8a32a8f7f9c0..3541a7f9d12e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, | |||
414 | 414 | ||
415 | index = cons_index & size_mask; | 415 | index = cons_index & size_mask; |
416 | cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; | 416 | cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; |
417 | last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); | 417 | last_nr_txbb = READ_ONCE(ring->last_nr_txbb); |
418 | ring_cons = ACCESS_ONCE(ring->cons); | 418 | ring_cons = READ_ONCE(ring->cons); |
419 | ring_index = ring_cons & size_mask; | 419 | ring_index = ring_cons & size_mask; |
420 | stamp_index = ring_index; | 420 | stamp_index = ring_index; |
421 | 421 | ||
@@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, | |||
479 | wmb(); | 479 | wmb(); |
480 | 480 | ||
481 | /* we want to dirty this cache line once */ | 481 | /* we want to dirty this cache line once */ |
482 | ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; | 482 | WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); |
483 | ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; | 483 | WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); |
484 | 484 | ||
485 | if (cq->type == TX_XDP) | 485 | if (cq->type == TX_XDP) |
486 | return done < budget; | 486 | return done < budget; |
@@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
858 | goto tx_drop; | 858 | goto tx_drop; |
859 | 859 | ||
860 | /* fetch ring->cons far ahead before needing it to avoid stall */ | 860 | /* fetch ring->cons far ahead before needing it to avoid stall */ |
861 | ring_cons = ACCESS_ONCE(ring->cons); | 861 | ring_cons = READ_ONCE(ring->cons); |
862 | 862 | ||
863 | real_size = get_real_size(skb, shinfo, dev, &lso_header_size, | 863 | real_size = get_real_size(skb, shinfo, dev, &lso_header_size, |
864 | &inline_ok, &fragptr); | 864 | &inline_ok, &fragptr); |
@@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1066 | */ | 1066 | */ |
1067 | smp_rmb(); | 1067 | smp_rmb(); |
1068 | 1068 | ||
1069 | ring_cons = ACCESS_ONCE(ring->cons); | 1069 | ring_cons = READ_ONCE(ring->cons); |
1070 | if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { | 1070 | if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { |
1071 | netif_tx_wake_queue(ring->tx_queue); | 1071 | netif_tx_wake_queue(ring->tx_queue); |
1072 | ring->wake_queue++; | 1072 | ring->wake_queue++; |
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50ea69d88480..5dd5f61e1114 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c | |||
@@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2629 | ring = &vdev->vpaths[i].ring; | 2629 | ring = &vdev->vpaths[i].ring; |
2630 | 2630 | ||
2631 | /* Truncated to machine word size number of frames */ | 2631 | /* Truncated to machine word size number of frames */ |
2632 | rx_frms = ACCESS_ONCE(ring->stats.rx_frms); | 2632 | rx_frms = READ_ONCE(ring->stats.rx_frms); |
2633 | 2633 | ||
2634 | /* Did this vpath received any packets */ | 2634 | /* Did this vpath received any packets */ |
2635 | if (ring->stats.prev_rx_frms == rx_frms) { | 2635 | if (ring->stats.prev_rx_frms == rx_frms) { |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d2..a95a46bcd339 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |||
2073 | netif_vdbg(efx, intr, efx->net_dev, | 2073 | netif_vdbg(efx, intr, efx->net_dev, |
2074 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); | 2074 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); |
2075 | 2075 | ||
2076 | if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { | 2076 | if (likely(READ_ONCE(efx->irq_soft_enabled))) { |
2077 | /* Note test interrupts */ | 2077 | /* Note test interrupts */ |
2078 | if (context->index == efx->irq_level) | 2078 | if (context->index == efx->irq_level) |
2079 | efx->last_irq_cpu = raw_smp_processor_id(); | 2079 | efx->last_irq_cpu = raw_smp_processor_id(); |
@@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |||
2088 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) | 2088 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) |
2089 | { | 2089 | { |
2090 | struct efx_nic *efx = dev_id; | 2090 | struct efx_nic *efx = dev_id; |
2091 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 2091 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
2092 | struct efx_channel *channel; | 2092 | struct efx_channel *channel; |
2093 | efx_dword_t reg; | 2093 | efx_dword_t reg; |
2094 | u32 queues; | 2094 | u32 queues; |
@@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, | |||
3291 | bool rx_cont; | 3291 | bool rx_cont; |
3292 | u16 flags = 0; | 3292 | u16 flags = 0; |
3293 | 3293 | ||
3294 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 3294 | if (unlikely(READ_ONCE(efx->reset_pending))) |
3295 | return 0; | 3295 | return 0; |
3296 | 3296 | ||
3297 | /* Basic packet information */ | 3297 | /* Basic packet information */ |
@@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
3428 | unsigned int tx_ev_q_label; | 3428 | unsigned int tx_ev_q_label; |
3429 | int tx_descs = 0; | 3429 | int tx_descs = 0; |
3430 | 3430 | ||
3431 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 3431 | if (unlikely(READ_ONCE(efx->reset_pending))) |
3432 | return 0; | 3432 | return 0; |
3433 | 3433 | ||
3434 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) | 3434 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) |
@@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx) | |||
5316 | int i; | 5316 | int i; |
5317 | 5317 | ||
5318 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { | 5318 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { |
5319 | if (ACCESS_ONCE(table->entry[i].spec) & | 5319 | if (READ_ONCE(table->entry[i].spec) & |
5320 | EFX_EF10_FILTER_FLAG_AUTO_OLD) { | 5320 | EFX_EF10_FILTER_FLAG_AUTO_OLD) { |
5321 | rc = efx_ef10_filter_remove_internal(efx, | 5321 | rc = efx_ef10_filter_remove_internal(efx, |
5322 | 1U << EFX_FILTER_PRI_AUTO, i, true); | 5322 | 1U << EFX_FILTER_PRI_AUTO, i, true); |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b9cb697b2818..016616a63880 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data) | |||
2809 | unsigned long pending; | 2809 | unsigned long pending; |
2810 | enum reset_type method; | 2810 | enum reset_type method; |
2811 | 2811 | ||
2812 | pending = ACCESS_ONCE(efx->reset_pending); | 2812 | pending = READ_ONCE(efx->reset_pending); |
2813 | method = fls(pending) - 1; | 2813 | method = fls(pending) - 1; |
2814 | 2814 | ||
2815 | if (method == RESET_TYPE_MC_BIST) | 2815 | if (method == RESET_TYPE_MC_BIST) |
@@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | |||
2874 | /* If we're not READY then just leave the flags set as the cue | 2874 | /* If we're not READY then just leave the flags set as the cue |
2875 | * to abort probing or reschedule the reset later. | 2875 | * to abort probing or reschedule the reset later. |
2876 | */ | 2876 | */ |
2877 | if (ACCESS_ONCE(efx->state) != STATE_READY) | 2877 | if (READ_ONCE(efx->state) != STATE_READY) |
2878 | return; | 2878 | return; |
2879 | 2879 | ||
2880 | /* efx_process_channel() will no longer read events once a | 2880 | /* efx_process_channel() will no longer read events once a |
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 29614da91cbf..7263275fde4a 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c | |||
@@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data) | |||
2545 | unsigned long pending; | 2545 | unsigned long pending; |
2546 | enum reset_type method; | 2546 | enum reset_type method; |
2547 | 2547 | ||
2548 | pending = ACCESS_ONCE(efx->reset_pending); | 2548 | pending = READ_ONCE(efx->reset_pending); |
2549 | method = fls(pending) - 1; | 2549 | method = fls(pending) - 1; |
2550 | 2550 | ||
2551 | if ((method == RESET_TYPE_RECOVER_OR_DISABLE || | 2551 | if ((method == RESET_TYPE_RECOVER_OR_DISABLE || |
@@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type) | |||
2605 | /* If we're not READY then just leave the flags set as the cue | 2605 | /* If we're not READY then just leave the flags set as the cue |
2606 | * to abort probing or reschedule the reset later. | 2606 | * to abort probing or reschedule the reset later. |
2607 | */ | 2607 | */ |
2608 | if (ACCESS_ONCE(efx->state) != STATE_READY) | 2608 | if (READ_ONCE(efx->state) != STATE_READY) |
2609 | return; | 2609 | return; |
2610 | 2610 | ||
2611 | queue_work(reset_workqueue, &efx->reset_work); | 2611 | queue_work(reset_workqueue, &efx->reset_work); |
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 93c713c1f627..cd8bb472d758 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c | |||
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
452 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", | 452 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", |
453 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); | 453 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); |
454 | 454 | ||
455 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 455 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
456 | return IRQ_HANDLED; | 456 | return IRQ_HANDLED; |
457 | 457 | ||
458 | /* Check to see if we have a serious error condition */ | 458 | /* Check to see if we have a serious error condition */ |
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx) | |||
1372 | ef4_oword_t reg; | 1372 | ef4_oword_t reg; |
1373 | int link_speed, isolate; | 1373 | int link_speed, isolate; |
1374 | 1374 | ||
1375 | isolate = !!ACCESS_ONCE(efx->reset_pending); | 1375 | isolate = !!READ_ONCE(efx->reset_pending); |
1376 | 1376 | ||
1377 | switch (link_state->speed) { | 1377 | switch (link_state->speed) { |
1378 | case 10000: link_speed = 3; break; | 1378 | case 10000: link_speed = 3; break; |
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index 05916c710d8c..494884f6af4a 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c | |||
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) | |||
834 | struct ef4_nic *efx = channel->efx; | 834 | struct ef4_nic *efx = channel->efx; |
835 | int tx_packets = 0; | 835 | int tx_packets = 0; |
836 | 836 | ||
837 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 837 | if (unlikely(READ_ONCE(efx->reset_pending))) |
838 | return 0; | 838 | return 0; |
839 | 839 | ||
840 | if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | 840 | if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) | |||
990 | struct ef4_rx_queue *rx_queue; | 990 | struct ef4_rx_queue *rx_queue; |
991 | struct ef4_nic *efx = channel->efx; | 991 | struct ef4_nic *efx = channel->efx; |
992 | 992 | ||
993 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 993 | if (unlikely(READ_ONCE(efx->reset_pending))) |
994 | return; | 994 | return; |
995 | 995 | ||
996 | rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | 996 | rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); |
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) | |||
1504 | irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) | 1504 | irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) |
1505 | { | 1505 | { |
1506 | struct ef4_nic *efx = dev_id; | 1506 | struct ef4_nic *efx = dev_id; |
1507 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 1507 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
1508 | ef4_oword_t *int_ker = efx->irq_status.addr; | 1508 | ef4_oword_t *int_ker = efx->irq_status.addr; |
1509 | irqreturn_t result = IRQ_NONE; | 1509 | irqreturn_t result = IRQ_NONE; |
1510 | struct ef4_channel *channel; | 1510 | struct ef4_channel *channel; |
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id) | |||
1596 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", | 1596 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", |
1597 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); | 1597 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); |
1598 | 1598 | ||
1599 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 1599 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
1600 | return IRQ_HANDLED; | 1600 | return IRQ_HANDLED; |
1601 | 1601 | ||
1602 | /* Handle non-event-queue sources */ | 1602 | /* Handle non-event-queue sources */ |
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index a4c4592f6023..54ca457cdb15 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h | |||
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_ | |||
83 | static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, | 83 | static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, |
84 | unsigned int write_count) | 84 | unsigned int write_count) |
85 | { | 85 | { |
86 | unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | 86 | unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); |
87 | 87 | ||
88 | if (empty_read_count == 0) | 88 | if (empty_read_count == 0) |
89 | return false; | 89 | return false; |
@@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx); | |||
464 | 464 | ||
465 | static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) | 465 | static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) |
466 | { | 466 | { |
467 | return ACCESS_ONCE(channel->event_test_cpu); | 467 | return READ_ONCE(channel->event_test_cpu); |
468 | } | 468 | } |
469 | static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) | 469 | static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) |
470 | { | 470 | { |
471 | return ACCESS_ONCE(efx->last_irq_cpu); | 471 | return READ_ONCE(efx->last_irq_cpu); |
472 | } | 472 | } |
473 | 473 | ||
474 | /* Global Resources */ | 474 | /* Global Resources */ |
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 6a75f4140a4b..6486814e97dc 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c | |||
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1) | |||
134 | */ | 134 | */ |
135 | netif_tx_stop_queue(txq1->core_txq); | 135 | netif_tx_stop_queue(txq1->core_txq); |
136 | smp_mb(); | 136 | smp_mb(); |
137 | txq1->old_read_count = ACCESS_ONCE(txq1->read_count); | 137 | txq1->old_read_count = READ_ONCE(txq1->read_count); |
138 | txq2->old_read_count = ACCESS_ONCE(txq2->read_count); | 138 | txq2->old_read_count = READ_ONCE(txq2->read_count); |
139 | 139 | ||
140 | fill_level = max(txq1->insert_count - txq1->old_read_count, | 140 | fill_level = max(txq1->insert_count - txq1->old_read_count, |
141 | txq2->insert_count - txq2->old_read_count); | 141 | txq2->insert_count - txq2->old_read_count); |
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index) | |||
524 | 524 | ||
525 | /* Check whether the hardware queue is now empty */ | 525 | /* Check whether the hardware queue is now empty */ |
526 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | 526 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
527 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | 527 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
528 | if (tx_queue->read_count == tx_queue->old_write_count) { | 528 | if (tx_queue->read_count == tx_queue->old_write_count) { |
529 | smp_mb(); | 529 | smp_mb(); |
530 | tx_queue->empty_read_count = | 530 | tx_queue->empty_read_count = |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index ba45150f53c7..86454d25a405 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
827 | struct efx_nic *efx = channel->efx; | 827 | struct efx_nic *efx = channel->efx; |
828 | int tx_packets = 0; | 828 | int tx_packets = 0; |
829 | 829 | ||
830 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 830 | if (unlikely(READ_ONCE(efx->reset_pending))) |
831 | return 0; | 831 | return 0; |
832 | 832 | ||
833 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | 833 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
@@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |||
979 | struct efx_rx_queue *rx_queue; | 979 | struct efx_rx_queue *rx_queue; |
980 | struct efx_nic *efx = channel->efx; | 980 | struct efx_nic *efx = channel->efx; |
981 | 981 | ||
982 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 982 | if (unlikely(READ_ONCE(efx->reset_pending))) |
983 | return; | 983 | return; |
984 | 984 | ||
985 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | 985 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); |
@@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) | |||
1520 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) | 1520 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) |
1521 | { | 1521 | { |
1522 | struct efx_nic *efx = dev_id; | 1522 | struct efx_nic *efx = dev_id; |
1523 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 1523 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
1524 | efx_oword_t *int_ker = efx->irq_status.addr; | 1524 | efx_oword_t *int_ker = efx->irq_status.addr; |
1525 | irqreturn_t result = IRQ_NONE; | 1525 | irqreturn_t result = IRQ_NONE; |
1526 | struct efx_channel *channel; | 1526 | struct efx_channel *channel; |
@@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) | |||
1612 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 1612 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
1613 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1613 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1614 | 1614 | ||
1615 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 1615 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
1616 | return IRQ_HANDLED; | 1616 | return IRQ_HANDLED; |
1617 | 1617 | ||
1618 | /* Handle non-event-queue sources */ | 1618 | /* Handle non-event-queue sources */ |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 4d7fb8af880d..7b51b6371724 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) | |||
81 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, | 81 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, |
82 | unsigned int write_count) | 82 | unsigned int write_count) |
83 | { | 83 | { |
84 | unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | 84 | unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); |
85 | 85 | ||
86 | if (empty_read_count == 0) | 86 | if (empty_read_count == 0) |
87 | return false; | 87 | return false; |
@@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); | |||
617 | 617 | ||
618 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) | 618 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) |
619 | { | 619 | { |
620 | return ACCESS_ONCE(channel->event_test_cpu); | 620 | return READ_ONCE(channel->event_test_cpu); |
621 | } | 621 | } |
622 | static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) | 622 | static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) |
623 | { | 623 | { |
624 | return ACCESS_ONCE(efx->last_irq_cpu); | 624 | return READ_ONCE(efx->last_irq_cpu); |
625 | } | 625 | } |
626 | 626 | ||
627 | /* Global Resources */ | 627 | /* Global Resources */ |
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 60cdb97f58e2..56c2db398def 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c | |||
@@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, | |||
658 | 658 | ||
659 | /* Write host time for specified period or until MC is done */ | 659 | /* Write host time for specified period or until MC is done */ |
660 | while ((timespec64_compare(&now.ts_real, &limit) < 0) && | 660 | while ((timespec64_compare(&now.ts_real, &limit) < 0) && |
661 | ACCESS_ONCE(*mc_running)) { | 661 | READ_ONCE(*mc_running)) { |
662 | struct timespec64 update_time; | 662 | struct timespec64 update_time; |
663 | unsigned int host_time; | 663 | unsigned int host_time; |
664 | 664 | ||
@@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, | |||
668 | do { | 668 | do { |
669 | pps_get_ts(&now); | 669 | pps_get_ts(&now); |
670 | } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && | 670 | } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && |
671 | ACCESS_ONCE(*mc_running)); | 671 | READ_ONCE(*mc_running)); |
672 | 672 | ||
673 | /* Synchronise NIC with single word of time only */ | 673 | /* Synchronise NIC with single word of time only */ |
674 | host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | | 674 | host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | |
@@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) | |||
832 | ptp->start.dma_addr); | 832 | ptp->start.dma_addr); |
833 | 833 | ||
834 | /* Clear flag that signals MC ready */ | 834 | /* Clear flag that signals MC ready */ |
835 | ACCESS_ONCE(*start) = 0; | 835 | WRITE_ONCE(*start, 0); |
836 | rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, | 836 | rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, |
837 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN); | 837 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN); |
838 | EFX_WARN_ON_ONCE_PARANOID(rc); | 838 | EFX_WARN_ON_ONCE_PARANOID(rc); |
839 | 839 | ||
840 | /* Wait for start from MCDI (or timeout) */ | 840 | /* Wait for start from MCDI (or timeout) */ |
841 | timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); | 841 | timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); |
842 | while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { | 842 | while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { |
843 | udelay(20); /* Usually start MCDI execution quickly */ | 843 | udelay(20); /* Usually start MCDI execution quickly */ |
844 | loops++; | 844 | loops++; |
845 | } | 845 | } |
@@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) | |||
849 | if (!time_before(jiffies, timeout)) | 849 | if (!time_before(jiffies, timeout)) |
850 | ++ptp->sync_timeouts; | 850 | ++ptp->sync_timeouts; |
851 | 851 | ||
852 | if (ACCESS_ONCE(*start)) | 852 | if (READ_ONCE(*start)) |
853 | efx_ptp_send_times(efx, &last_time); | 853 | efx_ptp_send_times(efx, &last_time); |
854 | 854 | ||
855 | /* Collect results */ | 855 | /* Collect results */ |
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..efb66ea21f27 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) | |||
136 | */ | 136 | */ |
137 | netif_tx_stop_queue(txq1->core_txq); | 137 | netif_tx_stop_queue(txq1->core_txq); |
138 | smp_mb(); | 138 | smp_mb(); |
139 | txq1->old_read_count = ACCESS_ONCE(txq1->read_count); | 139 | txq1->old_read_count = READ_ONCE(txq1->read_count); |
140 | txq2->old_read_count = ACCESS_ONCE(txq2->read_count); | 140 | txq2->old_read_count = READ_ONCE(txq2->read_count); |
141 | 141 | ||
142 | fill_level = max(txq1->insert_count - txq1->old_read_count, | 142 | fill_level = max(txq1->insert_count - txq1->old_read_count, |
143 | txq2->insert_count - txq2->old_read_count); | 143 | txq2->insert_count - txq2->old_read_count); |
@@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
752 | 752 | ||
753 | /* Check whether the hardware queue is now empty */ | 753 | /* Check whether the hardware queue is now empty */ |
754 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | 754 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
755 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | 755 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
756 | if (tx_queue->read_count == tx_queue->old_write_count) { | 756 | if (tx_queue->read_count == tx_queue->old_write_count) { |
757 | smp_mb(); | 757 | smp_mb(); |
758 | tx_queue->empty_read_count = | 758 | tx_queue->empty_read_count = |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90..8ab0fb6892d5 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np, | |||
6245 | 6245 | ||
6246 | pkts = dropped = errors = bytes = 0; | 6246 | pkts = dropped = errors = bytes = 0; |
6247 | 6247 | ||
6248 | rx_rings = ACCESS_ONCE(np->rx_rings); | 6248 | rx_rings = READ_ONCE(np->rx_rings); |
6249 | if (!rx_rings) | 6249 | if (!rx_rings) |
6250 | goto no_rings; | 6250 | goto no_rings; |
6251 | 6251 | ||
@@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np, | |||
6276 | 6276 | ||
6277 | pkts = errors = bytes = 0; | 6277 | pkts = errors = bytes = 0; |
6278 | 6278 | ||
6279 | tx_rings = ACCESS_ONCE(np->tx_rings); | 6279 | tx_rings = READ_ONCE(np->tx_rings); |
6280 | if (!tx_rings) | 6280 | if (!tx_rings) |
6281 | goto no_rings; | 6281 | goto no_rings; |
6282 | 6282 | ||
diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 21b71ae947fd..b55b29b90b88 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c | |||
@@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap, | |||
257 | * and validate that the result isn't NULL - in case we are | 257 | * and validate that the result isn't NULL - in case we are |
258 | * racing against queue removal. | 258 | * racing against queue removal. |
259 | */ | 259 | */ |
260 | int numvtaps = ACCESS_ONCE(tap->numvtaps); | 260 | int numvtaps = READ_ONCE(tap->numvtaps); |
261 | __u32 rxq; | 261 | __u32 rxq; |
262 | 262 | ||
263 | if (!numvtaps) | 263 | if (!numvtaps) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e21bf90b819f..27cd50c5bc9e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
469 | u32 numqueues = 0; | 469 | u32 numqueues = 0; |
470 | 470 | ||
471 | rcu_read_lock(); | 471 | rcu_read_lock(); |
472 | numqueues = ACCESS_ONCE(tun->numqueues); | 472 | numqueues = READ_ONCE(tun->numqueues); |
473 | 473 | ||
474 | txq = __skb_get_hash_symmetric(skb); | 474 | txq = __skb_get_hash_symmetric(skb); |
475 | if (txq) { | 475 | if (txq) { |
@@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
864 | 864 | ||
865 | rcu_read_lock(); | 865 | rcu_read_lock(); |
866 | tfile = rcu_dereference(tun->tfiles[txq]); | 866 | tfile = rcu_dereference(tun->tfiles[txq]); |
867 | numqueues = ACCESS_ONCE(tun->numqueues); | 867 | numqueues = READ_ONCE(tun->numqueues); |
868 | 868 | ||
869 | /* Drop packet if interface is not attached */ | 869 | /* Drop packet if interface is not attached */ |
870 | if (txq >= numqueues) | 870 | if (txq >= numqueues) |
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c index bd8d4392d68b..80f75139495f 100644 --- a/drivers/net/wireless/ath/ath5k/desc.c +++ b/drivers/net/wireless/ath/ath5k/desc.c | |||
@@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, | |||
500 | 500 | ||
501 | tx_status = &desc->ud.ds_tx5212.tx_stat; | 501 | tx_status = &desc->ud.ds_tx5212.tx_stat; |
502 | 502 | ||
503 | txstat1 = ACCESS_ONCE(tx_status->tx_status_1); | 503 | txstat1 = READ_ONCE(tx_status->tx_status_1); |
504 | 504 | ||
505 | /* No frame has been send or error */ | 505 | /* No frame has been send or error */ |
506 | if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) | 506 | if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) |
507 | return -EINPROGRESS; | 507 | return -EINPROGRESS; |
508 | 508 | ||
509 | txstat0 = ACCESS_ONCE(tx_status->tx_status_0); | 509 | txstat0 = READ_ONCE(tx_status->tx_status_0); |
510 | 510 | ||
511 | /* | 511 | /* |
512 | * Get descriptor status | 512 | * Get descriptor status |
@@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, | |||
700 | u32 rxstat0, rxstat1; | 700 | u32 rxstat0, rxstat1; |
701 | 701 | ||
702 | rx_status = &desc->ud.ds_rx.rx_stat; | 702 | rx_status = &desc->ud.ds_rx.rx_stat; |
703 | rxstat1 = ACCESS_ONCE(rx_status->rx_status_1); | 703 | rxstat1 = READ_ONCE(rx_status->rx_status_1); |
704 | 704 | ||
705 | /* No frame received / not ready */ | 705 | /* No frame received / not ready */ |
706 | if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) | 706 | if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) |
707 | return -EINPROGRESS; | 707 | return -EINPROGRESS; |
708 | 708 | ||
709 | memset(rs, 0, sizeof(struct ath5k_rx_status)); | 709 | memset(rs, 0, sizeof(struct ath5k_rx_status)); |
710 | rxstat0 = ACCESS_ONCE(rx_status->rx_status_0); | 710 | rxstat0 = READ_ONCE(rx_status->rx_status_0); |
711 | 711 | ||
712 | /* | 712 | /* |
713 | * Frame receive status | 713 | * Frame receive status |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 613caca7dc02..785a0f33b7e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | |||
@@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work) | |||
3628 | 3628 | ||
3629 | bus->dpc_running = true; | 3629 | bus->dpc_running = true; |
3630 | wmb(); | 3630 | wmb(); |
3631 | while (ACCESS_ONCE(bus->dpc_triggered)) { | 3631 | while (READ_ONCE(bus->dpc_triggered)) { |
3632 | bus->dpc_triggered = false; | 3632 | bus->dpc_triggered = false; |
3633 | brcmf_sdio_dpc(bus); | 3633 | brcmf_sdio_dpc(bus); |
3634 | bus->idlecount = 0; | 3634 | bus->idlecount = 0; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 231878969332..0f45f34e39d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -1118,7 +1118,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) | |||
1118 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) | 1118 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
1119 | { | 1119 | { |
1120 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 1120 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
1121 | bool calibrating = ACCESS_ONCE(mvm->calibrating); | 1121 | bool calibrating = READ_ONCE(mvm->calibrating); |
1122 | 1122 | ||
1123 | if (state) | 1123 | if (state) |
1124 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); | 1124 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f2e2af23219..6e9d3289b9d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) | |||
652 | return -1; | 652 | return -1; |
653 | } else if (info.control.vif->type == NL80211_IFTYPE_STATION && | 653 | } else if (info.control.vif->type == NL80211_IFTYPE_STATION && |
654 | is_multicast_ether_addr(hdr->addr1)) { | 654 | is_multicast_ether_addr(hdr->addr1)) { |
655 | u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); | 655 | u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); |
656 | 656 | ||
657 | if (ap_sta_id != IWL_MVM_INVALID_STA) | 657 | if (ap_sta_id != IWL_MVM_INVALID_STA) |
658 | sta_id = ap_sta_id; | 658 | sta_id = ap_sta_id; |
@@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
700 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + | 700 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + |
701 | tcp_hdrlen(skb); | 701 | tcp_hdrlen(skb); |
702 | 702 | ||
703 | dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); | 703 | dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); |
704 | 704 | ||
705 | if (!sta->max_amsdu_len || | 705 | if (!sta->max_amsdu_len || |
706 | !ieee80211_is_data_qos(hdr->frame_control) || | 706 | !ieee80211_is_data_qos(hdr->frame_control) || |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a06b6612b658..f25ce3a1ea50 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c | |||
@@ -1247,7 +1247,7 @@ restart: | |||
1247 | spin_lock(&rxq->lock); | 1247 | spin_lock(&rxq->lock); |
1248 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | 1248 | /* uCode's read index (stored in shared DRAM) indicates the last Rx |
1249 | * buffer that the driver may process (last buffer filled by ucode). */ | 1249 | * buffer that the driver may process (last buffer filled by ucode). */ |
1250 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; | 1250 | r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
1251 | i = rxq->read; | 1251 | i = rxq->read; |
1252 | 1252 | ||
1253 | /* W/A 9000 device step A0 wrap-around bug */ | 1253 | /* W/A 9000 device step A0 wrap-around bug */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2e3e013ec95a..9ad3f4fe5894 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
@@ -2076,12 +2076,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) | |||
2076 | 2076 | ||
2077 | IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); | 2077 | IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); |
2078 | txq = trans_pcie->txq[txq_idx]; | 2078 | txq = trans_pcie->txq[txq_idx]; |
2079 | wr_ptr = ACCESS_ONCE(txq->write_ptr); | 2079 | wr_ptr = READ_ONCE(txq->write_ptr); |
2080 | 2080 | ||
2081 | while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && | 2081 | while (txq->read_ptr != READ_ONCE(txq->write_ptr) && |
2082 | !time_after(jiffies, | 2082 | !time_after(jiffies, |
2083 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { | 2083 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { |
2084 | u8 write_ptr = ACCESS_ONCE(txq->write_ptr); | 2084 | u8 write_ptr = READ_ONCE(txq->write_ptr); |
2085 | 2085 | ||
2086 | if (WARN_ONCE(wr_ptr != write_ptr, | 2086 | if (WARN_ONCE(wr_ptr != write_ptr, |
2087 | "WR pointer moved while flushing %d -> %d\n", | 2087 | "WR pointer moved while flushing %d -> %d\n", |
@@ -2553,7 +2553,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, | |||
2553 | 2553 | ||
2554 | spin_lock(&rxq->lock); | 2554 | spin_lock(&rxq->lock); |
2555 | 2555 | ||
2556 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; | 2556 | r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
2557 | 2557 | ||
2558 | for (i = rxq->read, j = 0; | 2558 | for (i = rxq->read, j = 0; |
2559 | i != r && j < allocated_rb_nums; | 2559 | i != r && j < allocated_rb_nums; |
@@ -2814,7 +2814,7 @@ static struct iwl_trans_dump_data | |||
2814 | /* Dump RBs is supported only for pre-9000 devices (1 queue) */ | 2814 | /* Dump RBs is supported only for pre-9000 devices (1 queue) */ |
2815 | struct iwl_rxq *rxq = &trans_pcie->rxq[0]; | 2815 | struct iwl_rxq *rxq = &trans_pcie->rxq[0]; |
2816 | /* RBs */ | 2816 | /* RBs */ |
2817 | num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) | 2817 | num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) |
2818 | & 0x0FFF; | 2818 | & 0x0FFF; |
2819 | num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; | 2819 | num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; |
2820 | len += num_rbs * (sizeof(*data) + | 2820 | len += num_rbs * (sizeof(*data) + |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6467ffac9811..d2b3d6177a55 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, | |||
1380 | mac80211_hwsim_monitor_rx(hw, skb, channel); | 1380 | mac80211_hwsim_monitor_rx(hw, skb, channel); |
1381 | 1381 | ||
1382 | /* wmediumd mode check */ | 1382 | /* wmediumd mode check */ |
1383 | _portid = ACCESS_ONCE(data->wmediumd); | 1383 | _portid = READ_ONCE(data->wmediumd); |
1384 | 1384 | ||
1385 | if (_portid) | 1385 | if (_portid) |
1386 | return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); | 1386 | return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); |
@@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, | |||
1477 | struct ieee80211_channel *chan) | 1477 | struct ieee80211_channel *chan) |
1478 | { | 1478 | { |
1479 | struct mac80211_hwsim_data *data = hw->priv; | 1479 | struct mac80211_hwsim_data *data = hw->priv; |
1480 | u32 _pid = ACCESS_ONCE(data->wmediumd); | 1480 | u32 _pid = READ_ONCE(data->wmediumd); |
1481 | 1481 | ||
1482 | if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { | 1482 | if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { |
1483 | struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); | 1483 | struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f05cfc83c9c8..f946bf889015 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work) | |||
996 | if (logout_started) { | 996 | if (logout_started) { |
997 | bool traced = false; | 997 | bool traced = false; |
998 | 998 | ||
999 | while (!ACCESS_ONCE(sess->logout_completed)) { | 999 | while (!READ_ONCE(sess->logout_completed)) { |
1000 | if (!traced) { | 1000 | if (!traced) { |
1001 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, | 1001 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, |
1002 | "%s: waiting for sess %p logout\n", | 1002 | "%s: waiting for sess %p logout\n", |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 942d094269fb..9469695f5871 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
985 | mb = udev->mb_addr; | 985 | mb = udev->mb_addr; |
986 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | 986 | tcmu_flush_dcache_range(mb, sizeof(*mb)); |
987 | 987 | ||
988 | while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { | 988 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { |
989 | 989 | ||
990 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | 990 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; |
991 | struct tcmu_cmd *cmd; | 991 | struct tcmu_cmd *cmd; |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 3e865dbf878c..fbaa2a90d25d 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -483,7 +483,7 @@ static ssize_t wdm_read | |||
483 | if (rv < 0) | 483 | if (rv < 0) |
484 | return -ERESTARTSYS; | 484 | return -ERESTARTSYS; |
485 | 485 | ||
486 | cntr = ACCESS_ONCE(desc->length); | 486 | cntr = READ_ONCE(desc->length); |
487 | if (cntr == 0) { | 487 | if (cntr == 0) { |
488 | desc->read = 0; | 488 | desc->read = 0; |
489 | retry: | 489 | retry: |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9326f31db8d..4ae667d8c238 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -150,7 +150,7 @@ static int usbfs_increase_memory_usage(u64 amount) | |||
150 | { | 150 | { |
151 | u64 lim; | 151 | u64 lim; |
152 | 152 | ||
153 | lim = ACCESS_ONCE(usbfs_memory_mb); | 153 | lim = READ_ONCE(usbfs_memory_mb); |
154 | lim <<= 20; | 154 | lim <<= 20; |
155 | 155 | ||
156 | atomic64_add(amount, &usbfs_memory_usage); | 156 | atomic64_add(amount, &usbfs_memory_usage); |
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index d930bfda4010..58d59c5f8592 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c | |||
@@ -973,7 +973,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr, | |||
973 | char *string; | 973 | char *string; |
974 | 974 | ||
975 | intf = to_usb_interface(dev); | 975 | intf = to_usb_interface(dev); |
976 | string = ACCESS_ONCE(intf->cur_altsetting->string); | 976 | string = READ_ONCE(intf->cur_altsetting->string); |
977 | if (!string) | 977 | if (!string) |
978 | return 0; | 978 | return 0; |
979 | return sprintf(buf, "%s\n", string); | 979 | return sprintf(buf, "%s\n", string); |
@@ -989,7 +989,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | |||
989 | 989 | ||
990 | intf = to_usb_interface(dev); | 990 | intf = to_usb_interface(dev); |
991 | udev = interface_to_usbdev(intf); | 991 | udev = interface_to_usbdev(intf); |
992 | alt = ACCESS_ONCE(intf->cur_altsetting); | 992 | alt = READ_ONCE(intf->cur_altsetting); |
993 | 993 | ||
994 | return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" | 994 | return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" |
995 | "ic%02Xisc%02Xip%02Xin%02X\n", | 995 | "ic%02Xisc%02Xip%02Xin%02X\n", |
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 1f9941145746..0b59fa50aa30 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c | |||
@@ -1261,7 +1261,7 @@ static int gr_handle_in_ep(struct gr_ep *ep) | |||
1261 | if (!req->last_desc) | 1261 | if (!req->last_desc) |
1262 | return 0; | 1262 | return 0; |
1263 | 1263 | ||
1264 | if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) | 1264 | if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) |
1265 | return 0; /* Not put in hardware buffers yet */ | 1265 | return 0; /* Not put in hardware buffers yet */ |
1266 | 1266 | ||
1267 | if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) | 1267 | if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) |
@@ -1290,7 +1290,7 @@ static int gr_handle_out_ep(struct gr_ep *ep) | |||
1290 | if (!req->curr_desc) | 1290 | if (!req->curr_desc) |
1291 | return 0; | 1291 | return 0; |
1292 | 1292 | ||
1293 | ctrl = ACCESS_ONCE(req->curr_desc->ctrl); | 1293 | ctrl = READ_ONCE(req->curr_desc->ctrl); |
1294 | if (ctrl & GR_DESC_OUT_CTRL_EN) | 1294 | if (ctrl & GR_DESC_OUT_CTRL_EN) |
1295 | return 0; /* Not received yet */ | 1295 | return 0; /* Not received yet */ |
1296 | 1296 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 44924824fa41..c86f89babd57 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci) | |||
785 | } | 785 | } |
786 | 786 | ||
787 | /* find the last TD processed by the controller. */ | 787 | /* find the last TD processed by the controller. */ |
788 | head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK; | 788 | head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK; |
789 | td_start = td; | 789 | td_start = td; |
790 | td_next = list_prepare_entry(td, &ed->td_list, td_list); | 790 | td_next = list_prepare_entry(td, &ed->td_list, td_list); |
791 | list_for_each_entry_continue(td_next, &ed->td_list, td_list) { | 791 | list_for_each_entry_continue(td_next, &ed->td_list, td_list) { |
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index 91b22b2ea3aa..09a2a259941b 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h | |||
@@ -186,7 +186,7 @@ struct uhci_qh { | |||
186 | * We need a special accessor for the element pointer because it is | 186 | * We need a special accessor for the element pointer because it is |
187 | * subject to asynchronous updates by the controller. | 187 | * subject to asynchronous updates by the controller. |
188 | */ | 188 | */ |
189 | #define qh_element(qh) ACCESS_ONCE((qh)->element) | 189 | #define qh_element(qh) READ_ONCE((qh)->element) |
190 | 190 | ||
191 | #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ | 191 | #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ |
192 | cpu_to_hc32((uhci), (qh)->dma_handle)) | 192 | cpu_to_hc32((uhci), (qh)->dma_handle)) |
@@ -274,7 +274,7 @@ struct uhci_td { | |||
274 | * subject to asynchronous updates by the controller. | 274 | * subject to asynchronous updates by the controller. |
275 | */ | 275 | */ |
276 | #define td_status(uhci, td) hc32_to_cpu((uhci), \ | 276 | #define td_status(uhci, td) hc32_to_cpu((uhci), \ |
277 | ACCESS_ONCE((td)->status)) | 277 | READ_ONCE((td)->status)) |
278 | 278 | ||
279 | #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) | 279 | #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) |
280 | 280 | ||
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index f5a86f651f38..2bc3705a99bd 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c | |||
@@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data) | |||
665 | { | 665 | { |
666 | struct vfio_group *group = data; | 666 | struct vfio_group *group = data; |
667 | struct vfio_device *device; | 667 | struct vfio_device *device; |
668 | struct device_driver *drv = ACCESS_ONCE(dev->driver); | 668 | struct device_driver *drv = READ_ONCE(dev->driver); |
669 | struct vfio_unbound_dev *unbound; | 669 | struct vfio_unbound_dev *unbound; |
670 | int ret = -EINVAL; | 670 | int ret = -EINVAL; |
671 | 671 | ||
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 046f6d280af5..35e929f132e8 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
929 | continue; | 929 | continue; |
930 | } | 930 | } |
931 | 931 | ||
932 | tpg = ACCESS_ONCE(vs_tpg[*target]); | 932 | tpg = READ_ONCE(vs_tpg[*target]); |
933 | if (unlikely(!tpg)) { | 933 | if (unlikely(!tpg)) { |
934 | /* Target does not exist, fail the request */ | 934 | /* Target does not exist, fail the request */ |
935 | vhost_scsi_send_bad_target(vs, vq, head, out); | 935 | vhost_scsi_send_bad_target(vs, vq, head, out); |
@@ -576,7 +576,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) | |||
576 | * actually has a cancel function, hence the cmpxchg() | 576 | * actually has a cancel function, hence the cmpxchg() |
577 | */ | 577 | */ |
578 | 578 | ||
579 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | 579 | cancel = READ_ONCE(kiocb->ki_cancel); |
580 | do { | 580 | do { |
581 | if (!cancel || cancel == KIOCB_CANCELLED) | 581 | if (!cancel || cancel == KIOCB_CANCELLED) |
582 | return -EINVAL; | 582 | return -EINVAL; |
diff --git a/fs/buffer.c b/fs/buffer.c index 170df856bdb9..32ce01f0f95f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1692,7 +1692,8 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode * | |||
1692 | BUG_ON(!PageLocked(page)); | 1692 | BUG_ON(!PageLocked(page)); |
1693 | 1693 | ||
1694 | if (!page_has_buffers(page)) | 1694 | if (!page_has_buffers(page)) |
1695 | create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); | 1695 | create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits), |
1696 | b_state); | ||
1696 | return page_buffers(page); | 1697 | return page_buffers(page); |
1697 | } | 1698 | } |
1698 | 1699 | ||
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 8e704d12a1cf..0083bd4fcaa5 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
@@ -373,7 +373,7 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) | |||
373 | struct fscrypt_info *prev; | 373 | struct fscrypt_info *prev; |
374 | 374 | ||
375 | if (ci == NULL) | 375 | if (ci == NULL) |
376 | ci = ACCESS_ONCE(inode->i_crypt_info); | 376 | ci = READ_ONCE(inode->i_crypt_info); |
377 | if (ci == NULL) | 377 | if (ci == NULL) |
378 | return; | 378 | return; |
379 | 379 | ||
diff --git a/fs/direct-io.c b/fs/direct-io.c index b53e66d9abd7..98fe1325da9d 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1152,7 +1152,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1152 | get_block_t get_block, dio_iodone_t end_io, | 1152 | get_block_t get_block, dio_iodone_t end_io, |
1153 | dio_submit_t submit_io, int flags) | 1153 | dio_submit_t submit_io, int flags) |
1154 | { | 1154 | { |
1155 | unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); | 1155 | unsigned i_blkbits = READ_ONCE(inode->i_blkbits); |
1156 | unsigned blkbits = i_blkbits; | 1156 | unsigned blkbits = i_blkbits; |
1157 | unsigned blocksize_mask = (1 << blkbits) - 1; | 1157 | unsigned blocksize_mask = (1 << blkbits) - 1; |
1158 | ssize_t retval = -EINVAL; | 1158 | ssize_t retval = -EINVAL; |
@@ -1911,7 +1911,7 @@ void set_dumpable(struct mm_struct *mm, int value) | |||
1911 | return; | 1911 | return; |
1912 | 1912 | ||
1913 | do { | 1913 | do { |
1914 | old = ACCESS_ONCE(mm->flags); | 1914 | old = READ_ONCE(mm->flags); |
1915 | new = (old & ~MMF_DUMPABLE_MASK) | value; | 1915 | new = (old & ~MMF_DUMPABLE_MASK) | value; |
1916 | } while (cmpxchg(&mm->flags, old, new) != old); | 1916 | } while (cmpxchg(&mm->flags, old, new) != old); |
1917 | } | 1917 | } |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 448a1119f0be..57bf2964bb83 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -724,7 +724,7 @@ static void send_sigio_to_task(struct task_struct *p, | |||
724 | * F_SETSIG can change ->signum lockless in parallel, make | 724 | * F_SETSIG can change ->signum lockless in parallel, make |
725 | * sure we read it once and use the same value throughout. | 725 | * sure we read it once and use the same value throughout. |
726 | */ | 726 | */ |
727 | int signum = ACCESS_ONCE(fown->signum); | 727 | int signum = READ_ONCE(fown->signum); |
728 | 728 | ||
729 | if (!sigio_perm(p, fown, signum)) | 729 | if (!sigio_perm(p, fown, signum)) |
730 | return; | 730 | return; |
diff --git a/fs/fs_pin.c b/fs/fs_pin.c index e747b3d720ee..2d07f292b625 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c | |||
@@ -78,7 +78,7 @@ void mnt_pin_kill(struct mount *m) | |||
78 | while (1) { | 78 | while (1) { |
79 | struct hlist_node *p; | 79 | struct hlist_node *p; |
80 | rcu_read_lock(); | 80 | rcu_read_lock(); |
81 | p = ACCESS_ONCE(m->mnt_pins.first); | 81 | p = READ_ONCE(m->mnt_pins.first); |
82 | if (!p) { | 82 | if (!p) { |
83 | rcu_read_unlock(); | 83 | rcu_read_unlock(); |
84 | break; | 84 | break; |
@@ -92,7 +92,7 @@ void group_pin_kill(struct hlist_head *p) | |||
92 | while (1) { | 92 | while (1) { |
93 | struct hlist_node *q; | 93 | struct hlist_node *q; |
94 | rcu_read_lock(); | 94 | rcu_read_lock(); |
95 | q = ACCESS_ONCE(p->first); | 95 | q = READ_ONCE(p->first); |
96 | if (!q) { | 96 | if (!q) { |
97 | rcu_read_unlock(); | 97 | rcu_read_unlock(); |
98 | break; | 98 | break; |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 13c65dd2d37d..a42d89371748 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -33,7 +33,7 @@ static struct fuse_dev *fuse_get_dev(struct file *file) | |||
33 | * Lockless access is OK, because file->private data is set | 33 | * Lockless access is OK, because file->private data is set |
34 | * once during mount and is valid until the file is released. | 34 | * once during mount and is valid until the file is released. |
35 | */ | 35 | */ |
36 | return ACCESS_ONCE(file->private_data); | 36 | return READ_ONCE(file->private_data); |
37 | } | 37 | } |
38 | 38 | ||
39 | static void fuse_request_init(struct fuse_req *req, struct page **pages, | 39 | static void fuse_request_init(struct fuse_req *req, struct page **pages, |
diff --git a/fs/inode.c b/fs/inode.c index d1e35b53bb23..fd401028a309 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -2090,7 +2090,7 @@ void inode_set_flags(struct inode *inode, unsigned int flags, | |||
2090 | 2090 | ||
2091 | WARN_ON_ONCE(flags & ~mask); | 2091 | WARN_ON_ONCE(flags & ~mask); |
2092 | do { | 2092 | do { |
2093 | old_flags = ACCESS_ONCE(inode->i_flags); | 2093 | old_flags = READ_ONCE(inode->i_flags); |
2094 | new_flags = (old_flags & ~mask) | flags; | 2094 | new_flags = (old_flags & ~mask) | flags; |
2095 | } while (unlikely(cmpxchg(&inode->i_flags, old_flags, | 2095 | } while (unlikely(cmpxchg(&inode->i_flags, old_flags, |
2096 | new_flags) != old_flags)); | 2096 | new_flags) != old_flags)); |
diff --git a/fs/namei.c b/fs/namei.c index c75ea03ca147..40a0f34bf990 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1209,7 +1209,7 @@ static int follow_managed(struct path *path, struct nameidata *nd) | |||
1209 | /* Given that we're not holding a lock here, we retain the value in a | 1209 | /* Given that we're not holding a lock here, we retain the value in a |
1210 | * local variable for each dentry as we look at it so that we don't see | 1210 | * local variable for each dentry as we look at it so that we don't see |
1211 | * the components of that value change under us */ | 1211 | * the components of that value change under us */ |
1212 | while (managed = ACCESS_ONCE(path->dentry->d_flags), | 1212 | while (managed = READ_ONCE(path->dentry->d_flags), |
1213 | managed &= DCACHE_MANAGED_DENTRY, | 1213 | managed &= DCACHE_MANAGED_DENTRY, |
1214 | unlikely(managed != 0)) { | 1214 | unlikely(managed != 0)) { |
1215 | /* Allow the filesystem to manage the transit without i_mutex | 1215 | /* Allow the filesystem to manage the transit without i_mutex |
@@ -1394,7 +1394,7 @@ int follow_down(struct path *path) | |||
1394 | unsigned managed; | 1394 | unsigned managed; |
1395 | int ret; | 1395 | int ret; |
1396 | 1396 | ||
1397 | while (managed = ACCESS_ONCE(path->dentry->d_flags), | 1397 | while (managed = READ_ONCE(path->dentry->d_flags), |
1398 | unlikely(managed & DCACHE_MANAGED_DENTRY)) { | 1398 | unlikely(managed & DCACHE_MANAGED_DENTRY)) { |
1399 | /* Allow the filesystem to manage the transit without i_mutex | 1399 | /* Allow the filesystem to manage the transit without i_mutex |
1400 | * being held. | 1400 | * being held. |
diff --git a/fs/namespace.c b/fs/namespace.c index d18deb4c410b..e158ec6b527b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -353,7 +353,7 @@ int __mnt_want_write(struct vfsmount *m) | |||
353 | * incremented count after it has set MNT_WRITE_HOLD. | 353 | * incremented count after it has set MNT_WRITE_HOLD. |
354 | */ | 354 | */ |
355 | smp_mb(); | 355 | smp_mb(); |
356 | while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) | 356 | while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) |
357 | cpu_relax(); | 357 | cpu_relax(); |
358 | /* | 358 | /* |
359 | * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will | 359 | * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5ceaeb1f6fb6..f439f1c45008 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1081,7 +1081,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1081 | int error; | 1081 | int error; |
1082 | 1082 | ||
1083 | if (flags & LOOKUP_RCU) { | 1083 | if (flags & LOOKUP_RCU) { |
1084 | parent = ACCESS_ONCE(dentry->d_parent); | 1084 | parent = READ_ONCE(dentry->d_parent); |
1085 | dir = d_inode_rcu(parent); | 1085 | dir = d_inode_rcu(parent); |
1086 | if (!dir) | 1086 | if (!dir) |
1087 | return -ECHILD; | 1087 | return -ECHILD; |
@@ -1168,7 +1168,7 @@ out_set_verifier: | |||
1168 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); | 1168 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); |
1169 | out_valid: | 1169 | out_valid: |
1170 | if (flags & LOOKUP_RCU) { | 1170 | if (flags & LOOKUP_RCU) { |
1171 | if (parent != ACCESS_ONCE(dentry->d_parent)) | 1171 | if (parent != READ_ONCE(dentry->d_parent)) |
1172 | return -ECHILD; | 1172 | return -ECHILD; |
1173 | } else | 1173 | } else |
1174 | dput(parent); | 1174 | dput(parent); |
@@ -1582,7 +1582,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1582 | struct inode *dir; | 1582 | struct inode *dir; |
1583 | 1583 | ||
1584 | if (flags & LOOKUP_RCU) { | 1584 | if (flags & LOOKUP_RCU) { |
1585 | parent = ACCESS_ONCE(dentry->d_parent); | 1585 | parent = READ_ONCE(dentry->d_parent); |
1586 | dir = d_inode_rcu(parent); | 1586 | dir = d_inode_rcu(parent); |
1587 | if (!dir) | 1587 | if (!dir) |
1588 | return -ECHILD; | 1588 | return -ECHILD; |
@@ -1596,7 +1596,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1596 | ret = -ECHILD; | 1596 | ret = -ECHILD; |
1597 | if (!(flags & LOOKUP_RCU)) | 1597 | if (!(flags & LOOKUP_RCU)) |
1598 | dput(parent); | 1598 | dput(parent); |
1599 | else if (parent != ACCESS_ONCE(dentry->d_parent)) | 1599 | else if (parent != READ_ONCE(dentry->d_parent)) |
1600 | return -ECHILD; | 1600 | return -ECHILD; |
1601 | goto out; | 1601 | goto out; |
1602 | } | 1602 | } |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 77a8eacbe032..375e8bf0dd24 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -453,7 +453,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
453 | cutime = sig->cutime; | 453 | cutime = sig->cutime; |
454 | cstime = sig->cstime; | 454 | cstime = sig->cstime; |
455 | cgtime = sig->cgtime; | 455 | cgtime = sig->cgtime; |
456 | rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); | 456 | rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); |
457 | 457 | ||
458 | /* add up live thread stats at the group level */ | 458 | /* add up live thread stats at the group level */ |
459 | if (whole) { | 459 | if (whole) { |
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 99dff222fe67..03afd5150916 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c | |||
@@ -27,7 +27,7 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) | |||
27 | 27 | ||
28 | poll_wait(file, &p->ns->poll, wait); | 28 | poll_wait(file, &p->ns->poll, wait); |
29 | 29 | ||
30 | event = ACCESS_ONCE(ns->event); | 30 | event = READ_ONCE(ns->event); |
31 | if (m->poll_event != event) { | 31 | if (m->poll_event != event) { |
32 | m->poll_event = event; | 32 | m->poll_event = event; |
33 | res |= POLLERR | POLLPRI; | 33 | res |= POLLERR | POLLPRI; |
diff --git a/fs/splice.c b/fs/splice.c index f3084cce0ea6..39e2dc01ac12 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -253,7 +253,7 @@ EXPORT_SYMBOL(add_to_pipe); | |||
253 | */ | 253 | */ |
254 | int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) | 254 | int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) |
255 | { | 255 | { |
256 | unsigned int buffers = ACCESS_ONCE(pipe->buffers); | 256 | unsigned int buffers = READ_ONCE(pipe->buffers); |
257 | 257 | ||
258 | spd->nr_pages_max = buffers; | 258 | spd->nr_pages_max = buffers; |
259 | if (buffers <= PIPE_DEF_BUFFERS) | 259 | if (buffers <= PIPE_DEF_BUFFERS) |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1c713fd5b3e6..f46d133c0949 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -381,7 +381,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
381 | * in __get_user_pages if userfaultfd_release waits on the | 381 | * in __get_user_pages if userfaultfd_release waits on the |
382 | * caller of handle_userfault to release the mmap_sem. | 382 | * caller of handle_userfault to release the mmap_sem. |
383 | */ | 383 | */ |
384 | if (unlikely(ACCESS_ONCE(ctx->released))) { | 384 | if (unlikely(READ_ONCE(ctx->released))) { |
385 | /* | 385 | /* |
386 | * Don't return VM_FAULT_SIGBUS in this case, so a non | 386 | * Don't return VM_FAULT_SIGBUS in this case, so a non |
387 | * cooperative manager can close the uffd after the | 387 | * cooperative manager can close the uffd after the |
@@ -477,7 +477,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
477 | vmf->flags, reason); | 477 | vmf->flags, reason); |
478 | up_read(&mm->mmap_sem); | 478 | up_read(&mm->mmap_sem); |
479 | 479 | ||
480 | if (likely(must_wait && !ACCESS_ONCE(ctx->released) && | 480 | if (likely(must_wait && !READ_ONCE(ctx->released) && |
481 | (return_to_userland ? !signal_pending(current) : | 481 | (return_to_userland ? !signal_pending(current) : |
482 | !fatal_signal_pending(current)))) { | 482 | !fatal_signal_pending(current)))) { |
483 | wake_up_poll(&ctx->fd_wqh, POLLIN); | 483 | wake_up_poll(&ctx->fd_wqh, POLLIN); |
@@ -586,7 +586,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
586 | set_current_state(TASK_KILLABLE); | 586 | set_current_state(TASK_KILLABLE); |
587 | if (ewq->msg.event == 0) | 587 | if (ewq->msg.event == 0) |
588 | break; | 588 | break; |
589 | if (ACCESS_ONCE(ctx->released) || | 589 | if (READ_ONCE(ctx->released) || |
590 | fatal_signal_pending(current)) { | 590 | fatal_signal_pending(current)) { |
591 | /* | 591 | /* |
592 | * &ewq->wq may be queued in fork_event, but | 592 | * &ewq->wq may be queued in fork_event, but |
@@ -833,7 +833,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) | |||
833 | struct userfaultfd_wake_range range = { .len = 0, }; | 833 | struct userfaultfd_wake_range range = { .len = 0, }; |
834 | unsigned long new_flags; | 834 | unsigned long new_flags; |
835 | 835 | ||
836 | ACCESS_ONCE(ctx->released) = true; | 836 | WRITE_ONCE(ctx->released, true); |
837 | 837 | ||
838 | if (!mmget_not_zero(mm)) | 838 | if (!mmget_not_zero(mm)) |
839 | goto wakeup; | 839 | goto wakeup; |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 51bf7b827387..129975970d99 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -592,9 +592,9 @@ xlog_valid_lsn( | |||
592 | * a transiently forward state. Instead, we can see the LSN in a | 592 | * a transiently forward state. Instead, we can see the LSN in a |
593 | * transiently behind state if we happen to race with a cycle wrap. | 593 | * transiently behind state if we happen to race with a cycle wrap. |
594 | */ | 594 | */ |
595 | cur_cycle = ACCESS_ONCE(log->l_curr_cycle); | 595 | cur_cycle = READ_ONCE(log->l_curr_cycle); |
596 | smp_rmb(); | 596 | smp_rmb(); |
597 | cur_block = ACCESS_ONCE(log->l_curr_block); | 597 | cur_block = READ_ONCE(log->l_curr_block); |
598 | 598 | ||
599 | if ((CYCLE_LSN(lsn) > cur_cycle) || | 599 | if ((CYCLE_LSN(lsn) > cur_cycle) || |
600 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { | 600 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 8fbe259b197c..0a7ce668f8e0 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -236,7 +236,7 @@ static inline unsigned long __ffs64(u64 word) | |||
236 | typeof(*ptr) old, new; \ | 236 | typeof(*ptr) old, new; \ |
237 | \ | 237 | \ |
238 | do { \ | 238 | do { \ |
239 | old = ACCESS_ONCE(*ptr); \ | 239 | old = READ_ONCE(*ptr); \ |
240 | new = (old & ~mask) | bits; \ | 240 | new = (old & ~mask) | bits; \ |
241 | } while (cmpxchg(ptr, old, new) != old); \ | 241 | } while (cmpxchg(ptr, old, new) != old); \ |
242 | \ | 242 | \ |
@@ -251,7 +251,7 @@ static inline unsigned long __ffs64(u64 word) | |||
251 | typeof(*ptr) old, new; \ | 251 | typeof(*ptr) old, new; \ |
252 | \ | 252 | \ |
253 | do { \ | 253 | do { \ |
254 | old = ACCESS_ONCE(*ptr); \ | 254 | old = READ_ONCE(*ptr); \ |
255 | new = old & ~clear; \ | 255 | new = old & ~clear; \ |
256 | } while (!(old & test) && \ | 256 | } while (!(old & test) && \ |
257 | cmpxchg(ptr, old, new) != old); \ | 257 | cmpxchg(ptr, old, new) != old); \ |
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index a4be70398ce1..36dd4ffb5715 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h | |||
@@ -88,7 +88,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count) | |||
88 | /* Returns how many objects can be queued, < 0 indicates over limit. */ | 88 | /* Returns how many objects can be queued, < 0 indicates over limit. */ |
89 | static inline int dql_avail(const struct dql *dql) | 89 | static inline int dql_avail(const struct dql *dql) |
90 | { | 90 | { |
91 | return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued); | 91 | return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued); |
92 | } | 92 | } |
93 | 93 | ||
94 | /* Record number of completed objects and recalculate the limit. */ | 94 | /* Record number of completed objects and recalculate the limit. */ |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 14bc21c2ee7f..785a00ca4628 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -221,7 +221,7 @@ extern struct page *huge_zero_page; | |||
221 | 221 | ||
222 | static inline bool is_huge_zero_page(struct page *page) | 222 | static inline bool is_huge_zero_page(struct page *page) |
223 | { | 223 | { |
224 | return ACCESS_ONCE(huge_zero_page) == page; | 224 | return READ_ONCE(huge_zero_page) == page; |
225 | } | 225 | } |
226 | 226 | ||
227 | static inline bool is_huge_zero_pmd(pmd_t pmd) | 227 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 30294603526f..d95cae09dea0 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h | |||
@@ -247,7 +247,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team, | |||
247 | 247 | ||
248 | static inline int team_num_to_port_index(struct team *team, unsigned int num) | 248 | static inline int team_num_to_port_index(struct team *team, unsigned int num) |
249 | { | 249 | { |
250 | int en_port_count = ACCESS_ONCE(team->en_port_count); | 250 | int en_port_count = READ_ONCE(team->en_port_count); |
251 | 251 | ||
252 | if (unlikely(!en_port_count)) | 252 | if (unlikely(!en_port_count)) |
253 | return 0; | 253 | return 0; |
diff --git a/include/linux/llist.h b/include/linux/llist.h index 1957635e6d5f..85abc2915e8d 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h | |||
@@ -198,7 +198,7 @@ static inline void init_llist_head(struct llist_head *list) | |||
198 | */ | 198 | */ |
199 | static inline bool llist_empty(const struct llist_head *head) | 199 | static inline bool llist_empty(const struct llist_head *head) |
200 | { | 200 | { |
201 | return ACCESS_ONCE(head->first) == NULL; | 201 | return READ_ONCE(head->first) == NULL; |
202 | } | 202 | } |
203 | 203 | ||
204 | static inline struct llist_node *llist_next(struct llist_node *node) | 204 | static inline struct llist_node *llist_next(struct llist_node *node) |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2efb08a60e63..f0fc4700b6ff 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) | |||
105 | 105 | ||
106 | static inline void pm_runtime_mark_last_busy(struct device *dev) | 106 | static inline void pm_runtime_mark_last_busy(struct device *dev) |
107 | { | 107 | { |
108 | ACCESS_ONCE(dev->power.last_busy) = jiffies; | 108 | WRITE_ONCE(dev->power.last_busy, jiffies); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline bool pm_runtime_is_irq_safe(struct device *dev) | 111 | static inline bool pm_runtime_is_irq_safe(struct device *dev) |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 4f4f786255ef..3fadb6f9982b 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -983,12 +983,12 @@ static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) | |||
983 | 983 | ||
984 | static inline int sysctl_sync_period(struct netns_ipvs *ipvs) | 984 | static inline int sysctl_sync_period(struct netns_ipvs *ipvs) |
985 | { | 985 | { |
986 | return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]); | 986 | return READ_ONCE(ipvs->sysctl_sync_threshold[1]); |
987 | } | 987 | } |
988 | 988 | ||
989 | static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) | 989 | static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) |
990 | { | 990 | { |
991 | return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period); | 991 | return READ_ONCE(ipvs->sysctl_sync_refresh_period); |
992 | } | 992 | } |
993 | 993 | ||
994 | static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) | 994 | static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) |
@@ -1013,7 +1013,7 @@ static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) | |||
1013 | 1013 | ||
1014 | static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) | 1014 | static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) |
1015 | { | 1015 | { |
1016 | return ACCESS_ONCE(ipvs->sysctl_sync_ports); | 1016 | return READ_ONCE(ipvs->sysctl_sync_ports); |
1017 | } | 1017 | } |
1018 | 1018 | ||
1019 | static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) | 1019 | static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) |
diff --git a/kernel/acct.c b/kernel/acct.c index 5e72af29ab73..21eedd0dd81a 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -146,7 +146,7 @@ static struct bsd_acct_struct *acct_get(struct pid_namespace *ns) | |||
146 | again: | 146 | again: |
147 | smp_rmb(); | 147 | smp_rmb(); |
148 | rcu_read_lock(); | 148 | rcu_read_lock(); |
149 | res = to_acct(ACCESS_ONCE(ns->bacct)); | 149 | res = to_acct(READ_ONCE(ns->bacct)); |
150 | if (!res) { | 150 | if (!res) { |
151 | rcu_read_unlock(); | 151 | rcu_read_unlock(); |
152 | return NULL; | 152 | return NULL; |
@@ -158,7 +158,7 @@ again: | |||
158 | } | 158 | } |
159 | rcu_read_unlock(); | 159 | rcu_read_unlock(); |
160 | mutex_lock(&res->lock); | 160 | mutex_lock(&res->lock); |
161 | if (res != to_acct(ACCESS_ONCE(ns->bacct))) { | 161 | if (res != to_acct(READ_ONCE(ns->bacct))) { |
162 | mutex_unlock(&res->lock); | 162 | mutex_unlock(&res->lock); |
163 | acct_put(res); | 163 | acct_put(res); |
164 | goto again; | 164 | goto again; |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 824a583079a1..8fd2f2d1358a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1200,7 +1200,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting) | |||
1200 | 1200 | ||
1201 | again: | 1201 | again: |
1202 | rcu_read_lock(); | 1202 | rcu_read_lock(); |
1203 | ctx = ACCESS_ONCE(event->ctx); | 1203 | ctx = READ_ONCE(event->ctx); |
1204 | if (!atomic_inc_not_zero(&ctx->refcount)) { | 1204 | if (!atomic_inc_not_zero(&ctx->refcount)) { |
1205 | rcu_read_unlock(); | 1205 | rcu_read_unlock(); |
1206 | goto again; | 1206 | goto again; |
@@ -5302,8 +5302,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
5302 | if (!rb) | 5302 | if (!rb) |
5303 | goto aux_unlock; | 5303 | goto aux_unlock; |
5304 | 5304 | ||
5305 | aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); | 5305 | aux_offset = READ_ONCE(rb->user_page->aux_offset); |
5306 | aux_size = ACCESS_ONCE(rb->user_page->aux_size); | 5306 | aux_size = READ_ONCE(rb->user_page->aux_size); |
5307 | 5307 | ||
5308 | if (aux_offset < perf_data_size(rb) + PAGE_SIZE) | 5308 | if (aux_offset < perf_data_size(rb) + PAGE_SIZE) |
5309 | goto aux_unlock; | 5309 | goto aux_unlock; |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index f684d8e5fa2b..f3e37971c842 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -381,7 +381,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, | |||
381 | * (B) <-> (C) ordering is still observed by the pmu driver. | 381 | * (B) <-> (C) ordering is still observed by the pmu driver. |
382 | */ | 382 | */ |
383 | if (!rb->aux_overwrite) { | 383 | if (!rb->aux_overwrite) { |
384 | aux_tail = ACCESS_ONCE(rb->user_page->aux_tail); | 384 | aux_tail = READ_ONCE(rb->user_page->aux_tail); |
385 | handle->wakeup = rb->aux_wakeup + rb->aux_watermark; | 385 | handle->wakeup = rb->aux_wakeup + rb->aux_watermark; |
386 | if (aux_head - aux_tail < perf_aux_size(rb)) | 386 | if (aux_head - aux_tail < perf_aux_size(rb)) |
387 | handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); | 387 | handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); |
diff --git a/kernel/exit.c b/kernel/exit.c index f6cad39f35df..6b4298a41167 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1339,7 +1339,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
1339 | * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition | 1339 | * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition |
1340 | * can't confuse the checks below. | 1340 | * can't confuse the checks below. |
1341 | */ | 1341 | */ |
1342 | int exit_state = ACCESS_ONCE(p->exit_state); | 1342 | int exit_state = READ_ONCE(p->exit_state); |
1343 | int ret; | 1343 | int ret; |
1344 | 1344 | ||
1345 | if (unlikely(exit_state == EXIT_DEAD)) | 1345 | if (unlikely(exit_state == EXIT_DEAD)) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 81279c6602ff..845f3805c73d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2724,7 +2724,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2724 | * if it happened, we have to fail the write. | 2724 | * if it happened, we have to fail the write. |
2725 | */ | 2725 | */ |
2726 | barrier(); | 2726 | barrier(); |
2727 | if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { | 2727 | if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { |
2728 | local_dec(&cpu_buffer->committing); | 2728 | local_dec(&cpu_buffer->committing); |
2729 | local_dec(&cpu_buffer->commits); | 2729 | local_dec(&cpu_buffer->commits); |
2730 | return NULL; | 2730 | return NULL; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 652c682707cd..9050c8b3ccde 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -1459,7 +1459,7 @@ extern struct trace_event_file *find_event_file(struct trace_array *tr, | |||
1459 | 1459 | ||
1460 | static inline void *event_file_data(struct file *filp) | 1460 | static inline void *event_file_data(struct file *filp) |
1461 | { | 1461 | { |
1462 | return ACCESS_ONCE(file_inode(filp)->i_private); | 1462 | return READ_ONCE(file_inode(filp)->i_private); |
1463 | } | 1463 | } |
1464 | 1464 | ||
1465 | extern struct mutex event_mutex; | 1465 | extern struct mutex event_mutex; |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 49cb41412eec..780262210c9a 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -77,7 +77,7 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
77 | { | 77 | { |
78 | unsigned long this_size, flags; unsigned long *p, *top, *start; | 78 | unsigned long this_size, flags; unsigned long *p, *top, *start; |
79 | static int tracer_frame; | 79 | static int tracer_frame; |
80 | int frame_size = ACCESS_ONCE(tracer_frame); | 80 | int frame_size = READ_ONCE(tracer_frame); |
81 | int i, x; | 81 | int i, x; |
82 | 82 | ||
83 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); | 83 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index c490f1e4313b..d32b45662fb6 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -894,7 +894,7 @@ static bool new_idmap_permitted(const struct file *file, | |||
894 | int proc_setgroups_show(struct seq_file *seq, void *v) | 894 | int proc_setgroups_show(struct seq_file *seq, void *v) |
895 | { | 895 | { |
896 | struct user_namespace *ns = seq->private; | 896 | struct user_namespace *ns = seq->private; |
897 | unsigned long userns_flags = ACCESS_ONCE(ns->flags); | 897 | unsigned long userns_flags = READ_ONCE(ns->flags); |
898 | 898 | ||
899 | seq_printf(seq, "%s\n", | 899 | seq_printf(seq, "%s\n", |
900 | (userns_flags & USERNS_SETGROUPS_ALLOWED) ? | 900 | (userns_flags & USERNS_SETGROUPS_ALLOWED) ? |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 155c55d8db5f..fe7953aead82 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -39,7 +39,7 @@ begin_node: | |||
39 | /* Descend through a shortcut */ | 39 | /* Descend through a shortcut */ |
40 | shortcut = assoc_array_ptr_to_shortcut(cursor); | 40 | shortcut = assoc_array_ptr_to_shortcut(cursor); |
41 | smp_read_barrier_depends(); | 41 | smp_read_barrier_depends(); |
42 | cursor = ACCESS_ONCE(shortcut->next_node); | 42 | cursor = READ_ONCE(shortcut->next_node); |
43 | } | 43 | } |
44 | 44 | ||
45 | node = assoc_array_ptr_to_node(cursor); | 45 | node = assoc_array_ptr_to_node(cursor); |
@@ -55,7 +55,7 @@ begin_node: | |||
55 | */ | 55 | */ |
56 | has_meta = 0; | 56 | has_meta = 0; |
57 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 57 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
58 | ptr = ACCESS_ONCE(node->slots[slot]); | 58 | ptr = READ_ONCE(node->slots[slot]); |
59 | has_meta |= (unsigned long)ptr; | 59 | has_meta |= (unsigned long)ptr; |
60 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { | 60 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { |
61 | /* We need a barrier between the read of the pointer | 61 | /* We need a barrier between the read of the pointer |
@@ -89,7 +89,7 @@ continue_node: | |||
89 | smp_read_barrier_depends(); | 89 | smp_read_barrier_depends(); |
90 | 90 | ||
91 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 91 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
92 | ptr = ACCESS_ONCE(node->slots[slot]); | 92 | ptr = READ_ONCE(node->slots[slot]); |
93 | if (assoc_array_ptr_is_meta(ptr)) { | 93 | if (assoc_array_ptr_is_meta(ptr)) { |
94 | cursor = ptr; | 94 | cursor = ptr; |
95 | goto begin_node; | 95 | goto begin_node; |
@@ -98,7 +98,7 @@ continue_node: | |||
98 | 98 | ||
99 | finished_node: | 99 | finished_node: |
100 | /* Move up to the parent (may need to skip back over a shortcut) */ | 100 | /* Move up to the parent (may need to skip back over a shortcut) */ |
101 | parent = ACCESS_ONCE(node->back_pointer); | 101 | parent = READ_ONCE(node->back_pointer); |
102 | slot = node->parent_slot; | 102 | slot = node->parent_slot; |
103 | if (parent == stop) | 103 | if (parent == stop) |
104 | return 0; | 104 | return 0; |
@@ -107,7 +107,7 @@ finished_node: | |||
107 | shortcut = assoc_array_ptr_to_shortcut(parent); | 107 | shortcut = assoc_array_ptr_to_shortcut(parent); |
108 | smp_read_barrier_depends(); | 108 | smp_read_barrier_depends(); |
109 | cursor = parent; | 109 | cursor = parent; |
110 | parent = ACCESS_ONCE(shortcut->back_pointer); | 110 | parent = READ_ONCE(shortcut->back_pointer); |
111 | slot = shortcut->parent_slot; | 111 | slot = shortcut->parent_slot; |
112 | if (parent == stop) | 112 | if (parent == stop) |
113 | return 0; | 113 | return 0; |
@@ -147,7 +147,7 @@ int assoc_array_iterate(const struct assoc_array *array, | |||
147 | void *iterator_data), | 147 | void *iterator_data), |
148 | void *iterator_data) | 148 | void *iterator_data) |
149 | { | 149 | { |
150 | struct assoc_array_ptr *root = ACCESS_ONCE(array->root); | 150 | struct assoc_array_ptr *root = READ_ONCE(array->root); |
151 | 151 | ||
152 | if (!root) | 152 | if (!root) |
153 | return 0; | 153 | return 0; |
@@ -194,7 +194,7 @@ assoc_array_walk(const struct assoc_array *array, | |||
194 | 194 | ||
195 | pr_devel("-->%s()\n", __func__); | 195 | pr_devel("-->%s()\n", __func__); |
196 | 196 | ||
197 | cursor = ACCESS_ONCE(array->root); | 197 | cursor = READ_ONCE(array->root); |
198 | if (!cursor) | 198 | if (!cursor) |
199 | return assoc_array_walk_tree_empty; | 199 | return assoc_array_walk_tree_empty; |
200 | 200 | ||
@@ -220,7 +220,7 @@ consider_node: | |||
220 | 220 | ||
221 | slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK); | 221 | slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK); |
222 | slot &= ASSOC_ARRAY_FAN_MASK; | 222 | slot &= ASSOC_ARRAY_FAN_MASK; |
223 | ptr = ACCESS_ONCE(node->slots[slot]); | 223 | ptr = READ_ONCE(node->slots[slot]); |
224 | 224 | ||
225 | pr_devel("consider slot %x [ix=%d type=%lu]\n", | 225 | pr_devel("consider slot %x [ix=%d type=%lu]\n", |
226 | slot, level, (unsigned long)ptr & 3); | 226 | slot, level, (unsigned long)ptr & 3); |
@@ -294,7 +294,7 @@ follow_shortcut: | |||
294 | } while (sc_level < shortcut->skip_to_level); | 294 | } while (sc_level < shortcut->skip_to_level); |
295 | 295 | ||
296 | /* The shortcut matches the leaf's index to this point. */ | 296 | /* The shortcut matches the leaf's index to this point. */ |
297 | cursor = ACCESS_ONCE(shortcut->next_node); | 297 | cursor = READ_ONCE(shortcut->next_node); |
298 | if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) { | 298 | if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) { |
299 | level = sc_level; | 299 | level = sc_level; |
300 | goto jumped; | 300 | goto jumped; |
@@ -337,7 +337,7 @@ void *assoc_array_find(const struct assoc_array *array, | |||
337 | * the terminal node. | 337 | * the terminal node. |
338 | */ | 338 | */ |
339 | for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 339 | for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
340 | ptr = ACCESS_ONCE(node->slots[slot]); | 340 | ptr = READ_ONCE(node->slots[slot]); |
341 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { | 341 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { |
342 | /* We need a barrier between the read of the pointer | 342 | /* We need a barrier between the read of the pointer |
343 | * and dereferencing the pointer - but only if we are | 343 | * and dereferencing the pointer - but only if we are |
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index f346715e2255..81770a55cb16 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c | |||
@@ -20,7 +20,7 @@ void dql_completed(struct dql *dql, unsigned int count) | |||
20 | unsigned int ovlimit, completed, num_queued; | 20 | unsigned int ovlimit, completed, num_queued; |
21 | bool all_prev_completed; | 21 | bool all_prev_completed; |
22 | 22 | ||
23 | num_queued = ACCESS_ONCE(dql->num_queued); | 23 | num_queued = READ_ONCE(dql->num_queued); |
24 | 24 | ||
25 | /* Can't complete more than what's in queue */ | 25 | /* Can't complete more than what's in queue */ |
26 | BUG_ON(count > num_queued - dql->num_completed); | 26 | BUG_ON(count > num_queued - dql->num_completed); |
diff --git a/lib/llist.c b/lib/llist.c index ae5872b1df0c..7062e931a7bb 100644 --- a/lib/llist.c +++ b/lib/llist.c | |||
@@ -41,7 +41,7 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | |||
41 | struct llist_node *first; | 41 | struct llist_node *first; |
42 | 42 | ||
43 | do { | 43 | do { |
44 | new_last->next = first = ACCESS_ONCE(head->first); | 44 | new_last->next = first = READ_ONCE(head->first); |
45 | } while (cmpxchg(&head->first, first, new_first) != first); | 45 | } while (cmpxchg(&head->first, first, new_first) != first); |
46 | 46 | ||
47 | return !first; | 47 | return !first; |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 86c3385b9eb3..1746bae94d41 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -620,8 +620,8 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp | |||
620 | 620 | ||
621 | rcu_read_lock(); | 621 | rcu_read_lock(); |
622 | for (i = 0; i < depth; i++, d = p) { | 622 | for (i = 0; i < depth; i++, d = p) { |
623 | p = ACCESS_ONCE(d->d_parent); | 623 | p = READ_ONCE(d->d_parent); |
624 | array[i] = ACCESS_ONCE(d->d_name.name); | 624 | array[i] = READ_ONCE(d->d_name.name); |
625 | if (p == d) { | 625 | if (p == d) { |
626 | if (i) | 626 | if (i) |
627 | array[i] = ""; | 627 | array[i] = ""; |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 269b5df58543..c3bf907a03ee 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2715,7 +2715,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink, | |||
2715 | struct shrink_control *sc) | 2715 | struct shrink_control *sc) |
2716 | { | 2716 | { |
2717 | struct pglist_data *pgdata = NODE_DATA(sc->nid); | 2717 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
2718 | return ACCESS_ONCE(pgdata->split_queue_len); | 2718 | return READ_ONCE(pgdata->split_queue_len); |
2719 | } | 2719 | } |
2720 | 2720 | ||
2721 | static unsigned long deferred_split_scan(struct shrinker *shrink, | 2721 | static unsigned long deferred_split_scan(struct shrinker *shrink, |
diff --git a/net/core/dev.c b/net/core/dev.c index 11596a302a26..61559ca3980b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3725,7 +3725,7 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | |||
3725 | flow_table = rcu_dereference(rxqueue->rps_flow_table); | 3725 | flow_table = rcu_dereference(rxqueue->rps_flow_table); |
3726 | if (flow_table && flow_id <= flow_table->mask) { | 3726 | if (flow_table && flow_id <= flow_table->mask) { |
3727 | rflow = &flow_table->flows[flow_id]; | 3727 | rflow = &flow_table->flows[flow_id]; |
3728 | cpu = ACCESS_ONCE(rflow->cpu); | 3728 | cpu = READ_ONCE(rflow->cpu); |
3729 | if (rflow->filter == filter_id && cpu < nr_cpu_ids && | 3729 | if (rflow->filter == filter_id && cpu < nr_cpu_ids && |
3730 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - | 3730 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - |
3731 | rflow->last_qtail) < | 3731 | rflow->last_qtail) < |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 6e1e10ff433a..3b2034f6d49d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3377,7 +3377,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) | |||
3377 | 3377 | ||
3378 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) | 3378 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) |
3379 | { | 3379 | { |
3380 | unsigned int burst = ACCESS_ONCE(pkt_dev->burst); | 3380 | unsigned int burst = READ_ONCE(pkt_dev->burst); |
3381 | struct net_device *odev = pkt_dev->odev; | 3381 | struct net_device *odev = pkt_dev->odev; |
3382 | struct netdev_queue *txq; | 3382 | struct netdev_queue *txq; |
3383 | struct sk_buff *skb; | 3383 | struct sk_buff *skb; |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index af74d0433453..f9597ba26599 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -164,7 +164,7 @@ static void inet_frag_worker(struct work_struct *work) | |||
164 | 164 | ||
165 | local_bh_disable(); | 165 | local_bh_disable(); |
166 | 166 | ||
167 | for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) { | 167 | for (i = READ_ONCE(f->next_bucket); budget; --budget) { |
168 | evicted += inet_evict_bucket(f, &f->hash[i]); | 168 | evicted += inet_evict_bucket(f, &f->hash[i]); |
169 | i = (i + 1) & (INETFRAGS_HASHSZ - 1); | 169 | i = (i + 1) & (INETFRAGS_HASHSZ - 1); |
170 | if (evicted > INETFRAGS_EVICT_MAX) | 170 | if (evicted > INETFRAGS_EVICT_MAX) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3d9f1c2f81c5..c0864562083b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -495,7 +495,7 @@ u32 ip_idents_reserve(u32 hash, int segs) | |||
495 | { | 495 | { |
496 | u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; | 496 | u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; |
497 | atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; | 497 | atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; |
498 | u32 old = ACCESS_ONCE(*p_tstamp); | 498 | u32 old = READ_ONCE(*p_tstamp); |
499 | u32 now = (u32)jiffies; | 499 | u32 now = (u32)jiffies; |
500 | u32 new, delta = 0; | 500 | u32 new, delta = 0; |
501 | 501 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 0bc9e46a5369..48531da1aba6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1908,7 +1908,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | |||
1908 | if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) | 1908 | if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) |
1909 | goto send_now; | 1909 | goto send_now; |
1910 | 1910 | ||
1911 | win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); | 1911 | win_divisor = READ_ONCE(sysctl_tcp_tso_win_divisor); |
1912 | if (win_divisor) { | 1912 | if (win_divisor) { |
1913 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); | 1913 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); |
1914 | 1914 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ebfbccae62fd..02ec9a349303 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1853,7 +1853,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1853 | */ | 1853 | */ |
1854 | 1854 | ||
1855 | /* if we're overly short, let UDP handle it */ | 1855 | /* if we're overly short, let UDP handle it */ |
1856 | encap_rcv = ACCESS_ONCE(up->encap_rcv); | 1856 | encap_rcv = READ_ONCE(up->encap_rcv); |
1857 | if (encap_rcv) { | 1857 | if (encap_rcv) { |
1858 | int ret; | 1858 | int ret; |
1859 | 1859 | ||
@@ -2298,7 +2298,7 @@ void udp_destroy_sock(struct sock *sk) | |||
2298 | unlock_sock_fast(sk, slow); | 2298 | unlock_sock_fast(sk, slow); |
2299 | if (static_key_false(&udp_encap_needed) && up->encap_type) { | 2299 | if (static_key_false(&udp_encap_needed) && up->encap_type) { |
2300 | void (*encap_destroy)(struct sock *sk); | 2300 | void (*encap_destroy)(struct sock *sk); |
2301 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | 2301 | encap_destroy = READ_ONCE(up->encap_destroy); |
2302 | if (encap_destroy) | 2302 | if (encap_destroy) |
2303 | encap_destroy(sk); | 2303 | encap_destroy(sk); |
2304 | } | 2304 | } |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a1c24443cd9e..dab946554157 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -490,7 +490,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
490 | if (!t) | 490 | if (!t) |
491 | goto out; | 491 | goto out; |
492 | 492 | ||
493 | tproto = ACCESS_ONCE(t->parms.proto); | 493 | tproto = READ_ONCE(t->parms.proto); |
494 | if (tproto != ipproto && tproto != 0) | 494 | if (tproto != ipproto && tproto != 0) |
495 | goto out; | 495 | goto out; |
496 | 496 | ||
@@ -899,7 +899,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, | |||
899 | t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); | 899 | t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); |
900 | 900 | ||
901 | if (t) { | 901 | if (t) { |
902 | u8 tproto = ACCESS_ONCE(t->parms.proto); | 902 | u8 tproto = READ_ONCE(t->parms.proto); |
903 | 903 | ||
904 | if (tproto != ipproto && tproto != 0) | 904 | if (tproto != ipproto && tproto != 0) |
905 | goto drop; | 905 | goto drop; |
@@ -1233,7 +1233,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1233 | 1233 | ||
1234 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 1234 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
1235 | 1235 | ||
1236 | tproto = ACCESS_ONCE(t->parms.proto); | 1236 | tproto = READ_ONCE(t->parms.proto); |
1237 | if (tproto != IPPROTO_IPIP && tproto != 0) | 1237 | if (tproto != IPPROTO_IPIP && tproto != 0) |
1238 | return -1; | 1238 | return -1; |
1239 | 1239 | ||
@@ -1303,7 +1303,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1303 | u8 tproto; | 1303 | u8 tproto; |
1304 | int err; | 1304 | int err; |
1305 | 1305 | ||
1306 | tproto = ACCESS_ONCE(t->parms.proto); | 1306 | tproto = READ_ONCE(t->parms.proto); |
1307 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || | 1307 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || |
1308 | ip6_tnl_addr_conflict(t, ipv6h)) | 1308 | ip6_tnl_addr_conflict(t, ipv6h)) |
1309 | return -1; | 1309 | return -1; |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 40d7234c27b9..3f30fa313bf2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -606,7 +606,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
606 | */ | 606 | */ |
607 | 607 | ||
608 | /* if we're overly short, let UDP handle it */ | 608 | /* if we're overly short, let UDP handle it */ |
609 | encap_rcv = ACCESS_ONCE(up->encap_rcv); | 609 | encap_rcv = READ_ONCE(up->encap_rcv); |
610 | if (encap_rcv) { | 610 | if (encap_rcv) { |
611 | int ret; | 611 | int ret; |
612 | 612 | ||
@@ -1432,7 +1432,7 @@ void udpv6_destroy_sock(struct sock *sk) | |||
1432 | 1432 | ||
1433 | if (static_key_false(&udpv6_encap_needed) && up->encap_type) { | 1433 | if (static_key_false(&udpv6_encap_needed) && up->encap_type) { |
1434 | void (*encap_destroy)(struct sock *sk); | 1434 | void (*encap_destroy)(struct sock *sk); |
1435 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | 1435 | encap_destroy = READ_ONCE(up->encap_destroy); |
1436 | if (encap_destroy) | 1436 | if (encap_destroy) |
1437 | encap_destroy(sk); | 1437 | encap_destroy(sk); |
1438 | } | 1438 | } |
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index dd3e83328ad5..82cb93f66b9b 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c | |||
@@ -193,7 +193,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
193 | */ | 193 | */ |
194 | rcv = rcu_dereference(sap->rcv_func); | 194 | rcv = rcu_dereference(sap->rcv_func); |
195 | dest = llc_pdu_type(skb); | 195 | dest = llc_pdu_type(skb); |
196 | sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL; | 196 | sap_handler = dest ? READ_ONCE(llc_type_handlers[dest - 1]) : NULL; |
197 | if (unlikely(!sap_handler)) { | 197 | if (unlikely(!sap_handler)) { |
198 | if (rcv) | 198 | if (rcv) |
199 | rcv(skb, dev, pt, orig_dev); | 199 | rcv(skb, dev, pt, orig_dev); |
@@ -214,7 +214,7 @@ drop: | |||
214 | kfree_skb(skb); | 214 | kfree_skb(skb); |
215 | goto out; | 215 | goto out; |
216 | handle_station: | 216 | handle_station: |
217 | sta_handler = ACCESS_ONCE(llc_station_handler); | 217 | sta_handler = READ_ONCE(llc_station_handler); |
218 | if (!sta_handler) | 218 | if (!sta_handler) |
219 | goto drop; | 219 | goto drop; |
220 | sta_handler(skb); | 220 | sta_handler(skb); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 69615016d5bf..214d2ba02877 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -2008,7 +2008,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, | |||
2008 | 2008 | ||
2009 | static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) | 2009 | static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) |
2010 | { | 2010 | { |
2011 | u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate); | 2011 | u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); |
2012 | 2012 | ||
2013 | if (rate == STA_STATS_RATE_INVALID) | 2013 | if (rate == STA_STATS_RATE_INVALID) |
2014 | return -EINVAL; | 2014 | return -EINVAL; |
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c index d177dd066504..4d748975117d 100644 --- a/net/netlabel/netlabel_calipso.c +++ b/net/netlabel/netlabel_calipso.c | |||
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(netlbl_calipso_ops_register); | |||
393 | 393 | ||
394 | static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void) | 394 | static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void) |
395 | { | 395 | { |
396 | return ACCESS_ONCE(calipso_ops); | 396 | return READ_ONCE(calipso_ops); |
397 | } | 397 | } |
398 | 398 | ||
399 | /** | 399 | /** |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d396cb61a280..eb866647a27a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -14201,7 +14201,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, | |||
14201 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); | 14201 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); |
14202 | struct sk_buff *msg; | 14202 | struct sk_buff *msg; |
14203 | void *hdr; | 14203 | void *hdr; |
14204 | u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid); | 14204 | u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid); |
14205 | 14205 | ||
14206 | if (!nlportid) | 14206 | if (!nlportid) |
14207 | return false; | 14207 | return false; |
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c index 23ccddb20de1..4210e5c6262e 100644 --- a/sound/firewire/amdtp-am824.c +++ b/sound/firewire/amdtp-am824.c | |||
@@ -247,7 +247,7 @@ void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port, | |||
247 | struct amdtp_am824 *p = s->protocol; | 247 | struct amdtp_am824 *p = s->protocol; |
248 | 248 | ||
249 | if (port < p->midi_ports) | 249 | if (port < p->midi_ports) |
250 | ACCESS_ONCE(p->midi[port]) = midi; | 250 | WRITE_ONCE(p->midi[port], midi); |
251 | } | 251 | } |
252 | EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger); | 252 | EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger); |
253 | 253 | ||
@@ -336,7 +336,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, __be32 *buffe | |||
336 | unsigned int data_blocks, unsigned int *syt) | 336 | unsigned int data_blocks, unsigned int *syt) |
337 | { | 337 | { |
338 | struct amdtp_am824 *p = s->protocol; | 338 | struct amdtp_am824 *p = s->protocol; |
339 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 339 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
340 | unsigned int pcm_frames; | 340 | unsigned int pcm_frames; |
341 | 341 | ||
342 | if (pcm) { | 342 | if (pcm) { |
@@ -357,7 +357,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, __be32 *buffe | |||
357 | unsigned int data_blocks, unsigned int *syt) | 357 | unsigned int data_blocks, unsigned int *syt) |
358 | { | 358 | { |
359 | struct amdtp_am824 *p = s->protocol; | 359 | struct amdtp_am824 *p = s->protocol; |
360 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 360 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
361 | unsigned int pcm_frames; | 361 | unsigned int pcm_frames; |
362 | 362 | ||
363 | if (pcm) { | 363 | if (pcm) { |
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index 3fc581a5ad62..4a1dc145327b 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c | |||
@@ -376,7 +376,7 @@ static void update_pcm_pointers(struct amdtp_stream *s, | |||
376 | ptr = s->pcm_buffer_pointer + frames; | 376 | ptr = s->pcm_buffer_pointer + frames; |
377 | if (ptr >= pcm->runtime->buffer_size) | 377 | if (ptr >= pcm->runtime->buffer_size) |
378 | ptr -= pcm->runtime->buffer_size; | 378 | ptr -= pcm->runtime->buffer_size; |
379 | ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; | 379 | WRITE_ONCE(s->pcm_buffer_pointer, ptr); |
380 | 380 | ||
381 | s->pcm_period_pointer += frames; | 381 | s->pcm_period_pointer += frames; |
382 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { | 382 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { |
@@ -388,7 +388,7 @@ static void update_pcm_pointers(struct amdtp_stream *s, | |||
388 | static void pcm_period_tasklet(unsigned long data) | 388 | static void pcm_period_tasklet(unsigned long data) |
389 | { | 389 | { |
390 | struct amdtp_stream *s = (void *)data; | 390 | struct amdtp_stream *s = (void *)data; |
391 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 391 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
392 | 392 | ||
393 | if (pcm) | 393 | if (pcm) |
394 | snd_pcm_period_elapsed(pcm); | 394 | snd_pcm_period_elapsed(pcm); |
@@ -453,7 +453,7 @@ static int handle_out_packet(struct amdtp_stream *s, | |||
453 | s->data_block_counter = | 453 | s->data_block_counter = |
454 | (s->data_block_counter + data_blocks) & 0xff; | 454 | (s->data_block_counter + data_blocks) & 0xff; |
455 | 455 | ||
456 | buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) | | 456 | buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | |
457 | (s->data_block_quadlets << CIP_DBS_SHIFT) | | 457 | (s->data_block_quadlets << CIP_DBS_SHIFT) | |
458 | ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | | 458 | ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | |
459 | s->data_block_counter); | 459 | s->data_block_counter); |
@@ -472,7 +472,7 @@ static int handle_out_packet(struct amdtp_stream *s, | |||
472 | if (queue_out_packet(s, payload_length) < 0) | 472 | if (queue_out_packet(s, payload_length) < 0) |
473 | return -EIO; | 473 | return -EIO; |
474 | 474 | ||
475 | pcm = ACCESS_ONCE(s->pcm); | 475 | pcm = READ_ONCE(s->pcm); |
476 | if (pcm && pcm_frames > 0) | 476 | if (pcm && pcm_frames > 0) |
477 | update_pcm_pointers(s, pcm, pcm_frames); | 477 | update_pcm_pointers(s, pcm, pcm_frames); |
478 | 478 | ||
@@ -504,7 +504,7 @@ static int handle_out_packet_without_header(struct amdtp_stream *s, | |||
504 | if (queue_out_packet(s, payload_length) < 0) | 504 | if (queue_out_packet(s, payload_length) < 0) |
505 | return -EIO; | 505 | return -EIO; |
506 | 506 | ||
507 | pcm = ACCESS_ONCE(s->pcm); | 507 | pcm = READ_ONCE(s->pcm); |
508 | if (pcm && pcm_frames > 0) | 508 | if (pcm && pcm_frames > 0) |
509 | update_pcm_pointers(s, pcm, pcm_frames); | 509 | update_pcm_pointers(s, pcm, pcm_frames); |
510 | 510 | ||
@@ -621,7 +621,7 @@ end: | |||
621 | if (queue_in_packet(s) < 0) | 621 | if (queue_in_packet(s) < 0) |
622 | return -EIO; | 622 | return -EIO; |
623 | 623 | ||
624 | pcm = ACCESS_ONCE(s->pcm); | 624 | pcm = READ_ONCE(s->pcm); |
625 | if (pcm && pcm_frames > 0) | 625 | if (pcm && pcm_frames > 0) |
626 | update_pcm_pointers(s, pcm, pcm_frames); | 626 | update_pcm_pointers(s, pcm, pcm_frames); |
627 | 627 | ||
@@ -649,7 +649,7 @@ static int handle_in_packet_without_header(struct amdtp_stream *s, | |||
649 | if (queue_in_packet(s) < 0) | 649 | if (queue_in_packet(s) < 0) |
650 | return -EIO; | 650 | return -EIO; |
651 | 651 | ||
652 | pcm = ACCESS_ONCE(s->pcm); | 652 | pcm = READ_ONCE(s->pcm); |
653 | if (pcm && pcm_frames > 0) | 653 | if (pcm && pcm_frames > 0) |
654 | update_pcm_pointers(s, pcm, pcm_frames); | 654 | update_pcm_pointers(s, pcm, pcm_frames); |
655 | 655 | ||
@@ -947,7 +947,7 @@ unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s) | |||
947 | if (!in_interrupt() && amdtp_stream_running(s)) | 947 | if (!in_interrupt() && amdtp_stream_running(s)) |
948 | fw_iso_context_flush_completions(s->context); | 948 | fw_iso_context_flush_completions(s->context); |
949 | 949 | ||
950 | return ACCESS_ONCE(s->pcm_buffer_pointer); | 950 | return READ_ONCE(s->pcm_buffer_pointer); |
951 | } | 951 | } |
952 | EXPORT_SYMBOL(amdtp_stream_pcm_pointer); | 952 | EXPORT_SYMBOL(amdtp_stream_pcm_pointer); |
953 | 953 | ||
@@ -977,9 +977,8 @@ EXPORT_SYMBOL(amdtp_stream_pcm_ack); | |||
977 | void amdtp_stream_update(struct amdtp_stream *s) | 977 | void amdtp_stream_update(struct amdtp_stream *s) |
978 | { | 978 | { |
979 | /* Precomputing. */ | 979 | /* Precomputing. */ |
980 | ACCESS_ONCE(s->source_node_id_field) = | 980 | WRITE_ONCE(s->source_node_id_field, |
981 | (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & | 981 | (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); |
982 | CIP_SID_MASK; | ||
983 | } | 982 | } |
984 | EXPORT_SYMBOL(amdtp_stream_update); | 983 | EXPORT_SYMBOL(amdtp_stream_update); |
985 | 984 | ||
@@ -1022,7 +1021,7 @@ void amdtp_stream_pcm_abort(struct amdtp_stream *s) | |||
1022 | { | 1021 | { |
1023 | struct snd_pcm_substream *pcm; | 1022 | struct snd_pcm_substream *pcm; |
1024 | 1023 | ||
1025 | pcm = ACCESS_ONCE(s->pcm); | 1024 | pcm = READ_ONCE(s->pcm); |
1026 | if (pcm) | 1025 | if (pcm) |
1027 | snd_pcm_stop_xrun(pcm); | 1026 | snd_pcm_stop_xrun(pcm); |
1028 | } | 1027 | } |
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h index ed6eafd10992..f9abd8b07ce6 100644 --- a/sound/firewire/amdtp-stream.h +++ b/sound/firewire/amdtp-stream.h | |||
@@ -220,7 +220,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s) | |||
220 | static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s, | 220 | static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s, |
221 | struct snd_pcm_substream *pcm) | 221 | struct snd_pcm_substream *pcm) |
222 | { | 222 | { |
223 | ACCESS_ONCE(s->pcm) = pcm; | 223 | WRITE_ONCE(s->pcm, pcm); |
224 | } | 224 | } |
225 | 225 | ||
226 | static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc) | 226 | static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc) |
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c index 1453c34ce99f..4a884a335248 100644 --- a/sound/firewire/digi00x/amdtp-dot.c +++ b/sound/firewire/digi00x/amdtp-dot.c | |||
@@ -327,7 +327,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port, | |||
327 | struct amdtp_dot *p = s->protocol; | 327 | struct amdtp_dot *p = s->protocol; |
328 | 328 | ||
329 | if (port < MAX_MIDI_PORTS) | 329 | if (port < MAX_MIDI_PORTS) |
330 | ACCESS_ONCE(p->midi[port]) = midi; | 330 | WRITE_ONCE(p->midi[port], midi); |
331 | } | 331 | } |
332 | 332 | ||
333 | static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | 333 | static unsigned int process_tx_data_blocks(struct amdtp_stream *s, |
@@ -338,7 +338,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | |||
338 | struct snd_pcm_substream *pcm; | 338 | struct snd_pcm_substream *pcm; |
339 | unsigned int pcm_frames; | 339 | unsigned int pcm_frames; |
340 | 340 | ||
341 | pcm = ACCESS_ONCE(s->pcm); | 341 | pcm = READ_ONCE(s->pcm); |
342 | if (pcm) { | 342 | if (pcm) { |
343 | read_pcm_s32(s, pcm, buffer, data_blocks); | 343 | read_pcm_s32(s, pcm, buffer, data_blocks); |
344 | pcm_frames = data_blocks; | 344 | pcm_frames = data_blocks; |
@@ -359,7 +359,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, | |||
359 | struct snd_pcm_substream *pcm; | 359 | struct snd_pcm_substream *pcm; |
360 | unsigned int pcm_frames; | 360 | unsigned int pcm_frames; |
361 | 361 | ||
362 | pcm = ACCESS_ONCE(s->pcm); | 362 | pcm = READ_ONCE(s->pcm); |
363 | if (pcm) { | 363 | if (pcm) { |
364 | write_pcm_s32(s, pcm, buffer, data_blocks); | 364 | write_pcm_s32(s, pcm, buffer, data_blocks); |
365 | pcm_frames = data_blocks; | 365 | pcm_frames = data_blocks; |
diff --git a/sound/firewire/fireface/amdtp-ff.c b/sound/firewire/fireface/amdtp-ff.c index 780da9deb2f0..77c7598b61ab 100644 --- a/sound/firewire/fireface/amdtp-ff.c +++ b/sound/firewire/fireface/amdtp-ff.c | |||
@@ -108,7 +108,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, | |||
108 | unsigned int data_blocks, | 108 | unsigned int data_blocks, |
109 | unsigned int *syt) | 109 | unsigned int *syt) |
110 | { | 110 | { |
111 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 111 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
112 | unsigned int pcm_frames; | 112 | unsigned int pcm_frames; |
113 | 113 | ||
114 | if (pcm) { | 114 | if (pcm) { |
@@ -127,7 +127,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | |||
127 | unsigned int data_blocks, | 127 | unsigned int data_blocks, |
128 | unsigned int *syt) | 128 | unsigned int *syt) |
129 | { | 129 | { |
130 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 130 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
131 | unsigned int pcm_frames; | 131 | unsigned int pcm_frames; |
132 | 132 | ||
133 | if (pcm) { | 133 | if (pcm) { |
diff --git a/sound/firewire/fireface/ff-midi.c b/sound/firewire/fireface/ff-midi.c index 949ee56b4e0e..6a49611ee462 100644 --- a/sound/firewire/fireface/ff-midi.c +++ b/sound/firewire/fireface/ff-midi.c | |||
@@ -22,7 +22,7 @@ static int midi_playback_open(struct snd_rawmidi_substream *substream) | |||
22 | ff->running_status[substream->number] = 0; | 22 | ff->running_status[substream->number] = 0; |
23 | ff->rx_midi_error[substream->number] = false; | 23 | ff->rx_midi_error[substream->number] = false; |
24 | 24 | ||
25 | ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = substream; | 25 | WRITE_ONCE(ff->rx_midi_substreams[substream->number], substream); |
26 | 26 | ||
27 | return 0; | 27 | return 0; |
28 | } | 28 | } |
@@ -38,7 +38,7 @@ static int midi_playback_close(struct snd_rawmidi_substream *substream) | |||
38 | struct snd_ff *ff = substream->rmidi->private_data; | 38 | struct snd_ff *ff = substream->rmidi->private_data; |
39 | 39 | ||
40 | cancel_work_sync(&ff->rx_midi_work[substream->number]); | 40 | cancel_work_sync(&ff->rx_midi_work[substream->number]); |
41 | ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = NULL; | 41 | WRITE_ONCE(ff->rx_midi_substreams[substream->number], NULL); |
42 | 42 | ||
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
@@ -52,10 +52,10 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *substream, | |||
52 | spin_lock_irqsave(&ff->lock, flags); | 52 | spin_lock_irqsave(&ff->lock, flags); |
53 | 53 | ||
54 | if (up) | 54 | if (up) |
55 | ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) = | 55 | WRITE_ONCE(ff->tx_midi_substreams[substream->number], |
56 | substream; | 56 | substream); |
57 | else | 57 | else |
58 | ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) = NULL; | 58 | WRITE_ONCE(ff->tx_midi_substreams[substream->number], NULL); |
59 | 59 | ||
60 | spin_unlock_irqrestore(&ff->lock, flags); | 60 | spin_unlock_irqrestore(&ff->lock, flags); |
61 | } | 61 | } |
diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c index dd6c8e839647..332b29f8ed75 100644 --- a/sound/firewire/fireface/ff-transaction.c +++ b/sound/firewire/fireface/ff-transaction.c | |||
@@ -12,7 +12,7 @@ static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port, | |||
12 | int rcode) | 12 | int rcode) |
13 | { | 13 | { |
14 | struct snd_rawmidi_substream *substream = | 14 | struct snd_rawmidi_substream *substream = |
15 | ACCESS_ONCE(ff->rx_midi_substreams[port]); | 15 | READ_ONCE(ff->rx_midi_substreams[port]); |
16 | 16 | ||
17 | if (rcode_is_permanent_error(rcode)) { | 17 | if (rcode_is_permanent_error(rcode)) { |
18 | ff->rx_midi_error[port] = true; | 18 | ff->rx_midi_error[port] = true; |
@@ -60,7 +60,7 @@ static inline void fill_midi_buf(struct snd_ff *ff, unsigned int port, | |||
60 | static void transmit_midi_msg(struct snd_ff *ff, unsigned int port) | 60 | static void transmit_midi_msg(struct snd_ff *ff, unsigned int port) |
61 | { | 61 | { |
62 | struct snd_rawmidi_substream *substream = | 62 | struct snd_rawmidi_substream *substream = |
63 | ACCESS_ONCE(ff->rx_midi_substreams[port]); | 63 | READ_ONCE(ff->rx_midi_substreams[port]); |
64 | u8 *buf = (u8 *)ff->msg_buf[port]; | 64 | u8 *buf = (u8 *)ff->msg_buf[port]; |
65 | int i, len; | 65 | int i, len; |
66 | 66 | ||
@@ -159,7 +159,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request, | |||
159 | */ | 159 | */ |
160 | index = (quad >> 8) & 0xff; | 160 | index = (quad >> 8) & 0xff; |
161 | if (index > 0) { | 161 | if (index > 0) { |
162 | substream = ACCESS_ONCE(ff->tx_midi_substreams[0]); | 162 | substream = READ_ONCE(ff->tx_midi_substreams[0]); |
163 | if (substream != NULL) { | 163 | if (substream != NULL) { |
164 | byte = quad & 0xff; | 164 | byte = quad & 0xff; |
165 | snd_rawmidi_receive(substream, &byte, 1); | 165 | snd_rawmidi_receive(substream, &byte, 1); |
@@ -169,7 +169,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request, | |||
169 | /* Message in second port. */ | 169 | /* Message in second port. */ |
170 | index = (quad >> 24) & 0xff; | 170 | index = (quad >> 24) & 0xff; |
171 | if (index > 0) { | 171 | if (index > 0) { |
172 | substream = ACCESS_ONCE(ff->tx_midi_substreams[1]); | 172 | substream = READ_ONCE(ff->tx_midi_substreams[1]); |
173 | if (substream != NULL) { | 173 | if (substream != NULL) { |
174 | byte = (quad >> 16) & 0xff; | 174 | byte = (quad >> 16) & 0xff; |
175 | snd_rawmidi_receive(substream, &byte, 1); | 175 | snd_rawmidi_receive(substream, &byte, 1); |
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c index 5826aa8362f1..46092fa3ff9b 100644 --- a/sound/firewire/isight.c +++ b/sound/firewire/isight.c | |||
@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count) | |||
96 | ptr += count; | 96 | ptr += count; |
97 | if (ptr >= runtime->buffer_size) | 97 | if (ptr >= runtime->buffer_size) |
98 | ptr -= runtime->buffer_size; | 98 | ptr -= runtime->buffer_size; |
99 | ACCESS_ONCE(isight->buffer_pointer) = ptr; | 99 | WRITE_ONCE(isight->buffer_pointer, ptr); |
100 | 100 | ||
101 | isight->period_counter += count; | 101 | isight->period_counter += count; |
102 | if (isight->period_counter >= runtime->period_size) { | 102 | if (isight->period_counter >= runtime->period_size) { |
@@ -111,7 +111,7 @@ static void isight_samples(struct isight *isight, | |||
111 | struct snd_pcm_runtime *runtime; | 111 | struct snd_pcm_runtime *runtime; |
112 | unsigned int count1; | 112 | unsigned int count1; |
113 | 113 | ||
114 | if (!ACCESS_ONCE(isight->pcm_running)) | 114 | if (!READ_ONCE(isight->pcm_running)) |
115 | return; | 115 | return; |
116 | 116 | ||
117 | runtime = isight->pcm->runtime; | 117 | runtime = isight->pcm->runtime; |
@@ -131,7 +131,7 @@ static void isight_samples(struct isight *isight, | |||
131 | 131 | ||
132 | static void isight_pcm_abort(struct isight *isight) | 132 | static void isight_pcm_abort(struct isight *isight) |
133 | { | 133 | { |
134 | if (ACCESS_ONCE(isight->pcm_active)) | 134 | if (READ_ONCE(isight->pcm_active)) |
135 | snd_pcm_stop_xrun(isight->pcm); | 135 | snd_pcm_stop_xrun(isight->pcm); |
136 | } | 136 | } |
137 | 137 | ||
@@ -141,7 +141,7 @@ static void isight_dropped_samples(struct isight *isight, unsigned int total) | |||
141 | u32 dropped; | 141 | u32 dropped; |
142 | unsigned int count1; | 142 | unsigned int count1; |
143 | 143 | ||
144 | if (!ACCESS_ONCE(isight->pcm_running)) | 144 | if (!READ_ONCE(isight->pcm_running)) |
145 | return; | 145 | return; |
146 | 146 | ||
147 | runtime = isight->pcm->runtime; | 147 | runtime = isight->pcm->runtime; |
@@ -293,7 +293,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream, | |||
293 | if (err < 0) | 293 | if (err < 0) |
294 | return err; | 294 | return err; |
295 | 295 | ||
296 | ACCESS_ONCE(isight->pcm_active) = true; | 296 | WRITE_ONCE(isight->pcm_active, true); |
297 | 297 | ||
298 | return 0; | 298 | return 0; |
299 | } | 299 | } |
@@ -331,7 +331,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream) | |||
331 | { | 331 | { |
332 | struct isight *isight = substream->private_data; | 332 | struct isight *isight = substream->private_data; |
333 | 333 | ||
334 | ACCESS_ONCE(isight->pcm_active) = false; | 334 | WRITE_ONCE(isight->pcm_active, false); |
335 | 335 | ||
336 | mutex_lock(&isight->mutex); | 336 | mutex_lock(&isight->mutex); |
337 | isight_stop_streaming(isight); | 337 | isight_stop_streaming(isight); |
@@ -424,10 +424,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd) | |||
424 | 424 | ||
425 | switch (cmd) { | 425 | switch (cmd) { |
426 | case SNDRV_PCM_TRIGGER_START: | 426 | case SNDRV_PCM_TRIGGER_START: |
427 | ACCESS_ONCE(isight->pcm_running) = true; | 427 | WRITE_ONCE(isight->pcm_running, true); |
428 | break; | 428 | break; |
429 | case SNDRV_PCM_TRIGGER_STOP: | 429 | case SNDRV_PCM_TRIGGER_STOP: |
430 | ACCESS_ONCE(isight->pcm_running) = false; | 430 | WRITE_ONCE(isight->pcm_running, false); |
431 | break; | 431 | break; |
432 | default: | 432 | default: |
433 | return -EINVAL; | 433 | return -EINVAL; |
@@ -439,7 +439,7 @@ static snd_pcm_uframes_t isight_pointer(struct snd_pcm_substream *substream) | |||
439 | { | 439 | { |
440 | struct isight *isight = substream->private_data; | 440 | struct isight *isight = substream->private_data; |
441 | 441 | ||
442 | return ACCESS_ONCE(isight->buffer_pointer); | 442 | return READ_ONCE(isight->buffer_pointer); |
443 | } | 443 | } |
444 | 444 | ||
445 | static int isight_create_pcm(struct isight *isight) | 445 | static int isight_create_pcm(struct isight *isight) |
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c index 96f0091144bb..f0555a24d90e 100644 --- a/sound/firewire/motu/amdtp-motu.c +++ b/sound/firewire/motu/amdtp-motu.c | |||
@@ -310,7 +310,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | |||
310 | if (p->midi_ports) | 310 | if (p->midi_ports) |
311 | read_midi_messages(s, buffer, data_blocks); | 311 | read_midi_messages(s, buffer, data_blocks); |
312 | 312 | ||
313 | pcm = ACCESS_ONCE(s->pcm); | 313 | pcm = READ_ONCE(s->pcm); |
314 | if (data_blocks > 0 && pcm) | 314 | if (data_blocks > 0 && pcm) |
315 | read_pcm_s32(s, pcm->runtime, buffer, data_blocks); | 315 | read_pcm_s32(s, pcm->runtime, buffer, data_blocks); |
316 | 316 | ||
@@ -374,7 +374,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, | |||
374 | if (p->midi_ports) | 374 | if (p->midi_ports) |
375 | write_midi_messages(s, buffer, data_blocks); | 375 | write_midi_messages(s, buffer, data_blocks); |
376 | 376 | ||
377 | pcm = ACCESS_ONCE(s->pcm); | 377 | pcm = READ_ONCE(s->pcm); |
378 | if (pcm) | 378 | if (pcm) |
379 | write_pcm_s32(s, pcm->runtime, buffer, data_blocks); | 379 | write_pcm_s32(s, pcm->runtime, buffer, data_blocks); |
380 | else | 380 | else |
diff --git a/sound/firewire/oxfw/oxfw-scs1x.c b/sound/firewire/oxfw/oxfw-scs1x.c index 02d595665898..f33497cdc706 100644 --- a/sound/firewire/oxfw/oxfw-scs1x.c +++ b/sound/firewire/oxfw/oxfw-scs1x.c | |||
@@ -112,7 +112,7 @@ static void handle_hss(struct fw_card *card, struct fw_request *request, | |||
112 | } | 112 | } |
113 | 113 | ||
114 | if (length >= 1) { | 114 | if (length >= 1) { |
115 | stream = ACCESS_ONCE(scs->input); | 115 | stream = READ_ONCE(scs->input); |
116 | if (stream) | 116 | if (stream) |
117 | midi_input_packet(scs, stream, data, length); | 117 | midi_input_packet(scs, stream, data, length); |
118 | } | 118 | } |
@@ -183,7 +183,7 @@ static void scs_output_work(struct work_struct *work) | |||
183 | if (scs->transaction_running) | 183 | if (scs->transaction_running) |
184 | return; | 184 | return; |
185 | 185 | ||
186 | stream = ACCESS_ONCE(scs->output); | 186 | stream = READ_ONCE(scs->output); |
187 | if (!stream || scs->error) { | 187 | if (!stream || scs->error) { |
188 | scs->output_idle = true; | 188 | scs->output_idle = true; |
189 | wake_up(&scs->idle_wait); | 189 | wake_up(&scs->idle_wait); |
@@ -291,9 +291,9 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *stream, int up) | |||
291 | 291 | ||
292 | if (up) { | 292 | if (up) { |
293 | scs->input_escape_count = 0; | 293 | scs->input_escape_count = 0; |
294 | ACCESS_ONCE(scs->input) = stream; | 294 | WRITE_ONCE(scs->input, stream); |
295 | } else { | 295 | } else { |
296 | ACCESS_ONCE(scs->input) = NULL; | 296 | WRITE_ONCE(scs->input, NULL); |
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
@@ -319,10 +319,10 @@ static void midi_playback_trigger(struct snd_rawmidi_substream *stream, int up) | |||
319 | scs->transaction_bytes = 0; | 319 | scs->transaction_bytes = 0; |
320 | scs->error = false; | 320 | scs->error = false; |
321 | 321 | ||
322 | ACCESS_ONCE(scs->output) = stream; | 322 | WRITE_ONCE(scs->output, stream); |
323 | schedule_work(&scs->work); | 323 | schedule_work(&scs->work); |
324 | } else { | 324 | } else { |
325 | ACCESS_ONCE(scs->output) = NULL; | 325 | WRITE_ONCE(scs->output, NULL); |
326 | } | 326 | } |
327 | } | 327 | } |
328 | static void midi_playback_drain(struct snd_rawmidi_substream *stream) | 328 | static void midi_playback_drain(struct snd_rawmidi_substream *stream) |
diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c index 6aff1fc1c72d..ab482423c165 100644 --- a/sound/firewire/tascam/amdtp-tascam.c +++ b/sound/firewire/tascam/amdtp-tascam.c | |||
@@ -124,7 +124,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | |||
124 | { | 124 | { |
125 | struct snd_pcm_substream *pcm; | 125 | struct snd_pcm_substream *pcm; |
126 | 126 | ||
127 | pcm = ACCESS_ONCE(s->pcm); | 127 | pcm = READ_ONCE(s->pcm); |
128 | if (data_blocks > 0 && pcm) | 128 | if (data_blocks > 0 && pcm) |
129 | read_pcm_s32(s, pcm, buffer, data_blocks); | 129 | read_pcm_s32(s, pcm, buffer, data_blocks); |
130 | 130 | ||
@@ -143,7 +143,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, | |||
143 | /* This field is not used. */ | 143 | /* This field is not used. */ |
144 | *syt = 0x0000; | 144 | *syt = 0x0000; |
145 | 145 | ||
146 | pcm = ACCESS_ONCE(s->pcm); | 146 | pcm = READ_ONCE(s->pcm); |
147 | if (pcm) | 147 | if (pcm) |
148 | write_pcm_s32(s, pcm, buffer, data_blocks); | 148 | write_pcm_s32(s, pcm, buffer, data_blocks); |
149 | else | 149 | else |
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c index 8967c52f5032..2ad692dd4b13 100644 --- a/sound/firewire/tascam/tascam-transaction.c +++ b/sound/firewire/tascam/tascam-transaction.c | |||
@@ -148,7 +148,7 @@ static void async_midi_port_callback(struct fw_card *card, int rcode, | |||
148 | void *callback_data) | 148 | void *callback_data) |
149 | { | 149 | { |
150 | struct snd_fw_async_midi_port *port = callback_data; | 150 | struct snd_fw_async_midi_port *port = callback_data; |
151 | struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream); | 151 | struct snd_rawmidi_substream *substream = READ_ONCE(port->substream); |
152 | 152 | ||
153 | /* This port is closed. */ | 153 | /* This port is closed. */ |
154 | if (substream == NULL) | 154 | if (substream == NULL) |
@@ -173,7 +173,7 @@ static void midi_port_work(struct work_struct *work) | |||
173 | { | 173 | { |
174 | struct snd_fw_async_midi_port *port = | 174 | struct snd_fw_async_midi_port *port = |
175 | container_of(work, struct snd_fw_async_midi_port, work); | 175 | container_of(work, struct snd_fw_async_midi_port, work); |
176 | struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream); | 176 | struct snd_rawmidi_substream *substream = READ_ONCE(port->substream); |
177 | int generation; | 177 | int generation; |
178 | 178 | ||
179 | /* Under transacting or error state. */ | 179 | /* Under transacting or error state. */ |
@@ -282,7 +282,7 @@ static void handle_midi_tx(struct fw_card *card, struct fw_request *request, | |||
282 | bytes = 3; | 282 | bytes = 3; |
283 | } | 283 | } |
284 | 284 | ||
285 | substream = ACCESS_ONCE(tscm->tx_midi_substreams[port]); | 285 | substream = READ_ONCE(tscm->tx_midi_substreams[port]); |
286 | if (substream != NULL) | 286 | if (substream != NULL) |
287 | snd_rawmidi_receive(substream, b + 1, bytes); | 287 | snd_rawmidi_receive(substream, b + 1, bytes); |
288 | } | 288 | } |
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c index 8382ffa3bcaf..2472144b329e 100644 --- a/sound/soc/xtensa/xtfpga-i2s.c +++ b/sound/soc/xtensa/xtfpga-i2s.c | |||
@@ -165,7 +165,7 @@ static bool xtfpga_pcm_push_tx(struct xtfpga_i2s *i2s) | |||
165 | tx_substream = rcu_dereference(i2s->tx_substream); | 165 | tx_substream = rcu_dereference(i2s->tx_substream); |
166 | tx_active = tx_substream && snd_pcm_running(tx_substream); | 166 | tx_active = tx_substream && snd_pcm_running(tx_substream); |
167 | if (tx_active) { | 167 | if (tx_active) { |
168 | unsigned tx_ptr = ACCESS_ONCE(i2s->tx_ptr); | 168 | unsigned tx_ptr = READ_ONCE(i2s->tx_ptr); |
169 | unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime, | 169 | unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime, |
170 | tx_ptr); | 170 | tx_ptr); |
171 | 171 | ||
@@ -437,7 +437,7 @@ static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
437 | case SNDRV_PCM_TRIGGER_START: | 437 | case SNDRV_PCM_TRIGGER_START: |
438 | case SNDRV_PCM_TRIGGER_RESUME: | 438 | case SNDRV_PCM_TRIGGER_RESUME: |
439 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 439 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
440 | ACCESS_ONCE(i2s->tx_ptr) = 0; | 440 | WRITE_ONCE(i2s->tx_ptr, 0); |
441 | rcu_assign_pointer(i2s->tx_substream, substream); | 441 | rcu_assign_pointer(i2s->tx_substream, substream); |
442 | xtfpga_pcm_refill_fifo(i2s); | 442 | xtfpga_pcm_refill_fifo(i2s); |
443 | break; | 443 | break; |
@@ -459,7 +459,7 @@ static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream) | |||
459 | { | 459 | { |
460 | struct snd_pcm_runtime *runtime = substream->runtime; | 460 | struct snd_pcm_runtime *runtime = substream->runtime; |
461 | struct xtfpga_i2s *i2s = runtime->private_data; | 461 | struct xtfpga_i2s *i2s = runtime->private_data; |
462 | snd_pcm_uframes_t pos = ACCESS_ONCE(i2s->tx_ptr); | 462 | snd_pcm_uframes_t pos = READ_ONCE(i2s->tx_ptr); |
463 | 463 | ||
464 | return pos < runtime->buffer_size ? pos : 0; | 464 | return pos < runtime->buffer_size ? pos : 0; |
465 | } | 465 | } |
diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c index 7371e5b06035..fc579f330601 100644 --- a/sound/usb/bcd2000/bcd2000.c +++ b/sound/usb/bcd2000/bcd2000.c | |||
@@ -108,7 +108,7 @@ static void bcd2000_midi_handle_input(struct bcd2000 *bcd2k, | |||
108 | unsigned int payload_length, tocopy; | 108 | unsigned int payload_length, tocopy; |
109 | struct snd_rawmidi_substream *midi_receive_substream; | 109 | struct snd_rawmidi_substream *midi_receive_substream; |
110 | 110 | ||
111 | midi_receive_substream = ACCESS_ONCE(bcd2k->midi_receive_substream); | 111 | midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream); |
112 | if (!midi_receive_substream) | 112 | if (!midi_receive_substream) |
113 | return; | 113 | return; |
114 | 114 | ||
@@ -139,7 +139,7 @@ static void bcd2000_midi_send(struct bcd2000 *bcd2k) | |||
139 | 139 | ||
140 | BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE); | 140 | BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE); |
141 | 141 | ||
142 | midi_out_substream = ACCESS_ONCE(bcd2k->midi_out_substream); | 142 | midi_out_substream = READ_ONCE(bcd2k->midi_out_substream); |
143 | if (!midi_out_substream) | 143 | if (!midi_out_substream) |
144 | return; | 144 | return; |
145 | 145 | ||
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h index 328eeceec709..96e2d06cb031 100644 --- a/tools/arch/x86/include/asm/atomic.h +++ b/tools/arch/x86/include/asm/atomic.h | |||
@@ -24,7 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | static inline int atomic_read(const atomic_t *v) | 25 | static inline int atomic_read(const atomic_t *v) |
26 | { | 26 | { |
27 | return ACCESS_ONCE((v)->counter); | 27 | return READ_ONCE((v)->counter); |
28 | } | 28 | } |
29 | 29 | ||
30 | /** | 30 | /** |
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h index 5e9738f97bf3..97427e700e3b 100644 --- a/tools/include/asm-generic/atomic-gcc.h +++ b/tools/include/asm-generic/atomic-gcc.h | |||
@@ -21,7 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | static inline int atomic_read(const atomic_t *v) | 22 | static inline int atomic_read(const atomic_t *v) |
23 | { | 23 | { |
24 | return ACCESS_ONCE((v)->counter); | 24 | return READ_ONCE((v)->counter); |
25 | } | 25 | } |
26 | 26 | ||
27 | /** | 27 | /** |
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index 33b5e6cdf38c..d19e11b68de7 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h | |||
@@ -378,7 +378,7 @@ struct addr_filters { | |||
378 | static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) | 378 | static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) |
379 | { | 379 | { |
380 | struct perf_event_mmap_page *pc = mm->userpg; | 380 | struct perf_event_mmap_page *pc = mm->userpg; |
381 | u64 head = ACCESS_ONCE(pc->aux_head); | 381 | u64 head = READ_ONCE(pc->aux_head); |
382 | 382 | ||
383 | /* Ensure all reads are done after we read the head */ | 383 | /* Ensure all reads are done after we read the head */ |
384 | rmb(); | 384 | rmb(); |
@@ -389,7 +389,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm) | |||
389 | { | 389 | { |
390 | struct perf_event_mmap_page *pc = mm->userpg; | 390 | struct perf_event_mmap_page *pc = mm->userpg; |
391 | #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) | 391 | #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) |
392 | u64 head = ACCESS_ONCE(pc->aux_head); | 392 | u64 head = READ_ONCE(pc->aux_head); |
393 | #else | 393 | #else |
394 | u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); | 394 | u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); |
395 | #endif | 395 | #endif |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 47b5e7dbcb18..aae9645c7122 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -113,7 +113,7 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session, | |||
113 | 113 | ||
114 | extern volatile int session_done; | 114 | extern volatile int session_done; |
115 | 115 | ||
116 | #define session_done() ACCESS_ONCE(session_done) | 116 | #define session_done() READ_ONCE(session_done) |
117 | 117 | ||
118 | int perf_session__deliver_synth_event(struct perf_session *session, | 118 | int perf_session__deliver_synth_event(struct perf_session *session, |
119 | union perf_event *event, | 119 | union perf_event *event, |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9deb5a245b83..ce507ae1d4f5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -2302,7 +2302,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) | |||
2302 | continue; | 2302 | continue; |
2303 | } else if (pass && i > last_boosted_vcpu) | 2303 | } else if (pass && i > last_boosted_vcpu) |
2304 | break; | 2304 | break; |
2305 | if (!ACCESS_ONCE(vcpu->preempted)) | 2305 | if (!READ_ONCE(vcpu->preempted)) |
2306 | continue; | 2306 | continue; |
2307 | if (vcpu == me) | 2307 | if (vcpu == me) |
2308 | continue; | 2308 | continue; |