diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 15:38:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-13 15:38:26 -0500 |
commit | 8e9a2dba8686187d8c8179e5b86640e653963889 (patch) | |
tree | a4ba543649219cbb28d91aab65b785d763f5d069 | |
parent | 6098850e7e6978f95a958f79a645a653228d0002 (diff) | |
parent | 450cbdd0125cfa5d7bbf9e2a6b6961cc48d29730 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar:
"The main changes in this cycle are:
- Another attempt at enabling cross-release lockdep dependency
tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time
with better performance and fewer false positives. (Byungchul Park)
- Introduce lockdep_assert_irqs_enabled()/disabled() and convert
open-coded equivalents to lockdep variants. (Frederic Weisbecker)
- Add down_read_killable() and use it in the VFS's iterate_dir()
method. (Kirill Tkhai)
- Convert remaining uses of ACCESS_ONCE() to
READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle
driven. (Mark Rutland, Paul E. McKenney)
- Get rid of lockless_dereference(), by strengthening Alpha atomics,
strengthening READ_ONCE() with smp_read_barrier_depends() and thus
being able to convert users of lockless_dereference() to
READ_ONCE(). (Will Deacon)
- Various micro-optimizations:
- better PV qspinlocks (Waiman Long),
- better x86 barriers (Michael S. Tsirkin)
- better x86 refcounts (Kees Cook)
- ... plus other fixes and enhancements. (Borislav Petkov, Juergen
Gross, Miguel Bernal Marin)"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE
rcu: Use lockdep to assert IRQs are disabled/enabled
netpoll: Use lockdep to assert IRQs are disabled/enabled
timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled
sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled
irq_work: Use lockdep to assert IRQs are disabled/enabled
irq/timings: Use lockdep to assert IRQs are disabled/enabled
perf/core: Use lockdep to assert IRQs are disabled/enabled
x86: Use lockdep to assert IRQs are disabled/enabled
smp/core: Use lockdep to assert IRQs are disabled/enabled
timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled
timers/nohz: Use lockdep to assert IRQs are disabled/enabled
workqueue: Use lockdep to assert IRQs are disabled/enabled
irq/softirqs: Use lockdep to assert IRQs are disabled/enabled
locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled()
locking/pvqspinlock: Implement hybrid PV queued/unfair locks
locking/rwlocks: Fix comments
x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized
block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion()
workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes
...
307 files changed, 1252 insertions, 1672 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 4af4dd46cbd9..116e798b61e6 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -709,6 +709,9 @@ | |||
709 | It will be ignored when crashkernel=X,high is not used | 709 | It will be ignored when crashkernel=X,high is not used |
710 | or memory reserved is below 4G. | 710 | or memory reserved is below 4G. |
711 | 711 | ||
712 | crossrelease_fullstack | ||
713 | [KNL] Allow to record full stack trace in cross-release | ||
714 | |||
712 | cryptomgr.notests | 715 | cryptomgr.notests |
713 | [KNL] Disable crypto self-tests | 716 | [KNL] Disable crypto self-tests |
714 | 717 | ||
diff --git a/Documentation/filesystems/path-lookup.md b/Documentation/filesystems/path-lookup.md index 1b39e084a2b2..1933ef734e63 100644 --- a/Documentation/filesystems/path-lookup.md +++ b/Documentation/filesystems/path-lookup.md | |||
@@ -826,9 +826,9 @@ If the filesystem may need to revalidate dcache entries, then | |||
826 | *is* passed the dentry but does not have access to the `inode` or the | 826 | *is* passed the dentry but does not have access to the `inode` or the |
827 | `seq` number from the `nameidata`, so it needs to be extra careful | 827 | `seq` number from the `nameidata`, so it needs to be extra careful |
828 | when accessing fields in the dentry. This "extra care" typically | 828 | when accessing fields in the dentry. This "extra care" typically |
829 | involves using `ACCESS_ONCE()` or the newer [`READ_ONCE()`] to access | 829 | involves using [`READ_ONCE()`] to access fields, and verifying the |
830 | fields, and verifying the result is not NULL before using it. This | 830 | result is not NULL before using it. This pattern can be seen in |
831 | pattern can be see in `nfs_lookup_revalidate()`. | 831 | `nfs_lookup_revalidate()`. |
832 | 832 | ||
833 | A pair of patterns | 833 | A pair of patterns |
834 | ------------------ | 834 | ------------------ |
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 519940ec767f..479ecec80593 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt | |||
@@ -1880,18 +1880,6 @@ There are some more advanced barrier functions: | |||
1880 | See Documentation/atomic_{t,bitops}.txt for more information. | 1880 | See Documentation/atomic_{t,bitops}.txt for more information. |
1881 | 1881 | ||
1882 | 1882 | ||
1883 | (*) lockless_dereference(); | ||
1884 | |||
1885 | This can be thought of as a pointer-fetch wrapper around the | ||
1886 | smp_read_barrier_depends() data-dependency barrier. | ||
1887 | |||
1888 | This is also similar to rcu_dereference(), but in cases where | ||
1889 | object lifetime is handled by some mechanism other than RCU, for | ||
1890 | example, when the objects removed only when the system goes down. | ||
1891 | In addition, lockless_dereference() is used in some data structures | ||
1892 | that can be used both with and without RCU. | ||
1893 | |||
1894 | |||
1895 | (*) dma_wmb(); | 1883 | (*) dma_wmb(); |
1896 | (*) dma_rmb(); | 1884 | (*) dma_rmb(); |
1897 | 1885 | ||
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index a7a813258013..ec3b46e27b7a 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt | |||
@@ -1858,18 +1858,6 @@ Mandatory ë°°ë¦¬ì–´ë“¤ì€ SMP 시스템ì—ì„œë„ UP 시스템ì—ì„œë„ SMP íš¨ê³ | |||
1858 | ì°¸ê³ í•˜ì„¸ìš”. | 1858 | ì°¸ê³ í•˜ì„¸ìš”. |
1859 | 1859 | ||
1860 | 1860 | ||
1861 | (*) lockless_dereference(); | ||
1862 | |||
1863 | ì´ í•¨ìˆ˜ëŠ” smp_read_barrier_depends() ë°ì´í„° ì˜ì¡´ì„± 배리어를 사용하는 | ||
1864 | í¬ì¸í„° ì½ì–´ì˜¤ê¸° 래í¼(wrapper) 함수로 ìƒê°ë 수 있습니다. | ||
1865 | |||
1866 | ê°ì²´ì˜ ë¼ì´í”„íƒ€ìž„ì´ RCU ì™¸ì˜ ë©”ì»¤ë‹ˆì¦˜ìœ¼ë¡œ 관리ëœë‹¤ëŠ” ì ì„ ì œì™¸í•˜ë©´ | ||
1867 | rcu_dereference() ì™€ë„ ìœ ì‚¬í•œë°, 예를 들면 ê°ì²´ê°€ ì‹œìŠ¤í…œì´ êº¼ì§ˆ 때ì—ë§Œ | ||
1868 | ì œê±°ë˜ëŠ” 경우 등입니다. ë˜í•œ, lockless_dereference() ì€ RCU 와 함께 | ||
1869 | 사용ë 수ë„, RCU ì—†ì´ ì‚¬ìš©ë ìˆ˜ë„ ìžˆëŠ” ì¼ë¶€ ë°ì´í„° êµ¬ì¡°ì— ì‚¬ìš©ë˜ê³ | ||
1870 | 있습니다. | ||
1871 | |||
1872 | |||
1873 | (*) dma_wmb(); | 1861 | (*) dma_wmb(); |
1874 | (*) dma_rmb(); | 1862 | (*) dma_rmb(); |
1875 | 1863 | ||
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 85867d3cea64..767bfdd42992 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h | |||
@@ -14,6 +14,15 @@ | |||
14 | * than regular operations. | 14 | * than regular operations. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | /* | ||
18 | * To ensure dependency ordering is preserved for the _relaxed and | ||
19 | * _release atomics, an smp_read_barrier_depends() is unconditionally | ||
20 | * inserted into the _relaxed variants, which are used to build the | ||
21 | * barriered versions. To avoid redundant back-to-back fences, we can | ||
22 | * define the _acquire and _fence versions explicitly. | ||
23 | */ | ||
24 | #define __atomic_op_acquire(op, args...) op##_relaxed(args) | ||
25 | #define __atomic_op_fence __atomic_op_release | ||
17 | 26 | ||
18 | #define ATOMIC_INIT(i) { (i) } | 27 | #define ATOMIC_INIT(i) { (i) } |
19 | #define ATOMIC64_INIT(i) { (i) } | 28 | #define ATOMIC64_INIT(i) { (i) } |
@@ -61,6 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ | |||
61 | ".previous" \ | 70 | ".previous" \ |
62 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 71 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
63 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 72 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
73 | smp_read_barrier_depends(); \ | ||
64 | return result; \ | 74 | return result; \ |
65 | } | 75 | } |
66 | 76 | ||
@@ -78,6 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ | |||
78 | ".previous" \ | 88 | ".previous" \ |
79 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 89 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
80 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 90 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
91 | smp_read_barrier_depends(); \ | ||
81 | return result; \ | 92 | return result; \ |
82 | } | 93 | } |
83 | 94 | ||
@@ -112,6 +123,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ | |||
112 | ".previous" \ | 123 | ".previous" \ |
113 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 124 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
114 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 125 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
126 | smp_read_barrier_depends(); \ | ||
115 | return result; \ | 127 | return result; \ |
116 | } | 128 | } |
117 | 129 | ||
@@ -129,6 +141,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ | |||
129 | ".previous" \ | 141 | ".previous" \ |
130 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 142 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
131 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 143 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
144 | smp_read_barrier_depends(); \ | ||
132 | return result; \ | 145 | return result; \ |
133 | } | 146 | } |
134 | 147 | ||
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h index 3925f06afd6b..cf8fc8f9a2ed 100644 --- a/arch/alpha/include/asm/rwsem.h +++ b/arch/alpha/include/asm/rwsem.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 22 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
23 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 23 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
24 | 24 | ||
25 | static inline void __down_read(struct rw_semaphore *sem) | 25 | static inline int ___down_read(struct rw_semaphore *sem) |
26 | { | 26 | { |
27 | long oldcount; | 27 | long oldcount; |
28 | #ifndef CONFIG_SMP | 28 | #ifndef CONFIG_SMP |
@@ -42,10 +42,24 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
42 | :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) | 42 | :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) |
43 | :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); | 43 | :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); |
44 | #endif | 44 | #endif |
45 | if (unlikely(oldcount < 0)) | 45 | return (oldcount < 0); |
46 | } | ||
47 | |||
48 | static inline void __down_read(struct rw_semaphore *sem) | ||
49 | { | ||
50 | if (unlikely(___down_read(sem))) | ||
46 | rwsem_down_read_failed(sem); | 51 | rwsem_down_read_failed(sem); |
47 | } | 52 | } |
48 | 53 | ||
54 | static inline int __down_read_killable(struct rw_semaphore *sem) | ||
55 | { | ||
56 | if (unlikely(___down_read(sem))) | ||
57 | if (IS_ERR(rwsem_down_read_failed_killable(sem))) | ||
58 | return -EINTR; | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
49 | /* | 63 | /* |
50 | * trylock for reading -- returns 1 if successful, 0 if contention | 64 | * trylock for reading -- returns 1 if successful, 0 if contention |
51 | */ | 65 | */ |
@@ -95,9 +109,10 @@ static inline void __down_write(struct rw_semaphore *sem) | |||
95 | 109 | ||
96 | static inline int __down_write_killable(struct rw_semaphore *sem) | 110 | static inline int __down_write_killable(struct rw_semaphore *sem) |
97 | { | 111 | { |
98 | if (unlikely(___down_write(sem))) | 112 | if (unlikely(___down_write(sem))) { |
99 | if (IS_ERR(rwsem_down_write_failed_killable(sem))) | 113 | if (IS_ERR(rwsem_down_write_failed_killable(sem))) |
100 | return -EINTR; | 114 | return -EINTR; |
115 | } | ||
101 | 116 | ||
102 | return 0; | 117 | return 0; |
103 | } | 118 | } |
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index aa4304afbea6..1221cbb86a6f 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h | |||
@@ -14,7 +14,6 @@ | |||
14 | * We make no fairness assumptions. They have a cost. | 14 | * We make no fairness assumptions. They have a cost. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
18 | #define arch_spin_is_locked(x) ((x)->lock != 0) | 17 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
19 | 18 | ||
20 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) | 19 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
@@ -55,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
55 | 54 | ||
56 | /***********************************************************/ | 55 | /***********************************************************/ |
57 | 56 | ||
58 | static inline int arch_read_can_lock(arch_rwlock_t *lock) | ||
59 | { | ||
60 | return (lock->lock & 1) == 0; | ||
61 | } | ||
62 | |||
63 | static inline int arch_write_can_lock(arch_rwlock_t *lock) | ||
64 | { | ||
65 | return lock->lock == 0; | ||
66 | } | ||
67 | |||
68 | static inline void arch_read_lock(arch_rwlock_t *lock) | 57 | static inline void arch_read_lock(arch_rwlock_t *lock) |
69 | { | 58 | { |
70 | long regx; | 59 | long regx; |
@@ -171,7 +160,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock) | |||
171 | lock->lock = 0; | 160 | lock->lock = 0; |
172 | } | 161 | } |
173 | 162 | ||
174 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
175 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
176 | |||
177 | #endif /* _ALPHA_SPINLOCK_H */ | 163 | #endif /* _ALPHA_SPINLOCK_H */ |
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 47efc8451b70..2ba04a7db621 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <asm/barrier.h> | 14 | #include <asm/barrier.h> |
15 | 15 | ||
16 | #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__) | 16 | #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__) |
17 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
18 | 17 | ||
19 | #ifdef CONFIG_ARC_HAS_LLSC | 18 | #ifdef CONFIG_ARC_HAS_LLSC |
20 | 19 | ||
@@ -410,14 +409,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
410 | 409 | ||
411 | #endif | 410 | #endif |
412 | 411 | ||
413 | #define arch_read_can_lock(x) ((x)->counter > 0) | ||
414 | #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) | ||
415 | |||
416 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
417 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
418 | |||
419 | #define arch_spin_relax(lock) cpu_relax() | ||
420 | #define arch_read_relax(lock) cpu_relax() | ||
421 | #define arch_write_relax(lock) cpu_relax() | ||
422 | |||
423 | #endif /* __ASM_SPINLOCK_H */ | 412 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 6df9d94a9537..efe8b4200a67 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -250,7 +250,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) | |||
250 | * and read back old value | 250 | * and read back old value |
251 | */ | 251 | */ |
252 | do { | 252 | do { |
253 | new = old = ACCESS_ONCE(*ipi_data_ptr); | 253 | new = old = READ_ONCE(*ipi_data_ptr); |
254 | new |= 1U << msg; | 254 | new |= 1U << msg; |
255 | } while (cmpxchg(ipi_data_ptr, old, new) != old); | 255 | } while (cmpxchg(ipi_data_ptr, old, new) != old); |
256 | 256 | ||
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index e9c9a117bd25..c7cdbb43ae7c 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h | |||
@@ -126,8 +126,7 @@ extern unsigned long profile_pc(struct pt_regs *regs); | |||
126 | /* | 126 | /* |
127 | * kprobe-based event tracer support | 127 | * kprobe-based event tracer support |
128 | */ | 128 | */ |
129 | #include <linux/stddef.h> | 129 | #include <linux/compiler.h> |
130 | #include <linux/types.h> | ||
131 | #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) | 130 | #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) |
132 | 131 | ||
133 | extern int regs_query_register_offset(const char *name); | 132 | extern int regs_query_register_offset(const char *name); |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 25cb465c8538..099c78fcf62d 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -53,8 +53,6 @@ static inline void dsb_sev(void) | |||
53 | * memory. | 53 | * memory. |
54 | */ | 54 | */ |
55 | 55 | ||
56 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
57 | |||
58 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 56 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
59 | { | 57 | { |
60 | unsigned long tmp; | 58 | unsigned long tmp; |
@@ -74,7 +72,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
74 | 72 | ||
75 | while (lockval.tickets.next != lockval.tickets.owner) { | 73 | while (lockval.tickets.next != lockval.tickets.owner) { |
76 | wfe(); | 74 | wfe(); |
77 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); | 75 | lockval.tickets.owner = READ_ONCE(lock->tickets.owner); |
78 | } | 76 | } |
79 | 77 | ||
80 | smp_mb(); | 78 | smp_mb(); |
@@ -194,9 +192,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
194 | dsb_sev(); | 192 | dsb_sev(); |
195 | } | 193 | } |
196 | 194 | ||
197 | /* write_can_lock - would write_trylock() succeed? */ | ||
198 | #define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0) | ||
199 | |||
200 | /* | 195 | /* |
201 | * Read locks are a bit more hairy: | 196 | * Read locks are a bit more hairy: |
202 | * - Exclusively load the lock value. | 197 | * - Exclusively load the lock value. |
@@ -274,14 +269,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
274 | } | 269 | } |
275 | } | 270 | } |
276 | 271 | ||
277 | /* read_can_lock - would read_trylock() succeed? */ | ||
278 | #define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000) | ||
279 | |||
280 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
281 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
282 | |||
283 | #define arch_spin_relax(lock) cpu_relax() | ||
284 | #define arch_read_relax(lock) cpu_relax() | ||
285 | #define arch_write_relax(lock) cpu_relax() | ||
286 | |||
287 | #endif /* __ASM_SPINLOCK_H */ | 272 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c index 76e4c83cd5c8..3f24addd7972 100644 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c | |||
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, | |||
179 | bool entered_lp2 = false; | 179 | bool entered_lp2 = false; |
180 | 180 | ||
181 | if (tegra_pending_sgi()) | 181 | if (tegra_pending_sgi()) |
182 | ACCESS_ONCE(abort_flag) = true; | 182 | WRITE_ONCE(abort_flag, true); |
183 | 183 | ||
184 | cpuidle_coupled_parallel_barrier(dev, &abort_barrier); | 184 | cpuidle_coupled_parallel_barrier(dev, &abort_barrier); |
185 | 185 | ||
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c index 79214d5ff097..a9dd619c6c29 100644 --- a/arch/arm/vdso/vgettimeofday.c +++ b/arch/arm/vdso/vgettimeofday.c | |||
@@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata) | |||
35 | { | 35 | { |
36 | u32 seq; | 36 | u32 seq; |
37 | repeat: | 37 | repeat: |
38 | seq = ACCESS_ONCE(vdata->seq_count); | 38 | seq = READ_ONCE(vdata->seq_count); |
39 | if (seq & 1) { | 39 | if (seq & 1) { |
40 | cpu_relax(); | 40 | cpu_relax(); |
41 | goto repeat; | 41 | goto repeat; |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0df64a6a56d4..df02ad932020 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -22,7 +22,24 @@ config ARM64 | |||
22 | select ARCH_HAS_STRICT_MODULE_RWX | 22 | select ARCH_HAS_STRICT_MODULE_RWX |
23 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | 23 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST |
24 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA | 24 | select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA |
25 | select ARCH_INLINE_READ_LOCK if !PREEMPT | ||
26 | select ARCH_INLINE_READ_LOCK_BH if !PREEMPT | ||
27 | select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT | ||
28 | select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT | ||
29 | select ARCH_INLINE_READ_UNLOCK if !PREEMPT | ||
30 | select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT | ||
31 | select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT | ||
32 | select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT | ||
33 | select ARCH_INLINE_WRITE_LOCK if !PREEMPT | ||
34 | select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT | ||
35 | select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT | ||
36 | select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT | ||
37 | select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT | ||
38 | select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT | ||
39 | select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT | ||
40 | select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT | ||
25 | select ARCH_USE_CMPXCHG_LOCKREF | 41 | select ARCH_USE_CMPXCHG_LOCKREF |
42 | select ARCH_USE_QUEUED_RWLOCKS | ||
26 | select ARCH_SUPPORTS_MEMORY_FAILURE | 43 | select ARCH_SUPPORTS_MEMORY_FAILURE |
27 | select ARCH_SUPPORTS_ATOMIC_RMW | 44 | select ARCH_SUPPORTS_ATOMIC_RMW |
28 | select ARCH_SUPPORTS_NUMA_BALANCING | 45 | select ARCH_SUPPORTS_NUMA_BALANCING |
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 2326e39d5892..e63d0a8312de 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild | |||
@@ -16,6 +16,7 @@ generic-y += mcs_spinlock.h | |||
16 | generic-y += mm-arch-hooks.h | 16 | generic-y += mm-arch-hooks.h |
17 | generic-y += msi.h | 17 | generic-y += msi.h |
18 | generic-y += preempt.h | 18 | generic-y += preempt.h |
19 | generic-y += qrwlock.h | ||
19 | generic-y += rwsem.h | 20 | generic-y += rwsem.h |
20 | generic-y += segment.h | 21 | generic-y += segment.h |
21 | generic-y += serial.h | 22 | generic-y += serial.h |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 95ad7102b63c..fdb827c7832f 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -27,8 +27,6 @@ | |||
27 | * instructions. | 27 | * instructions. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
31 | |||
32 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 30 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
33 | { | 31 | { |
34 | unsigned int tmp; | 32 | unsigned int tmp; |
@@ -139,176 +137,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |||
139 | } | 137 | } |
140 | #define arch_spin_is_contended arch_spin_is_contended | 138 | #define arch_spin_is_contended arch_spin_is_contended |
141 | 139 | ||
142 | /* | 140 | #include <asm/qrwlock.h> |
143 | * Write lock implementation. | ||
144 | * | ||
145 | * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is | ||
146 | * exclusively held. | ||
147 | * | ||
148 | * The memory barriers are implicit with the load-acquire and store-release | ||
149 | * instructions. | ||
150 | */ | ||
151 | |||
152 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
153 | { | ||
154 | unsigned int tmp; | ||
155 | |||
156 | asm volatile(ARM64_LSE_ATOMIC_INSN( | ||
157 | /* LL/SC */ | ||
158 | " sevl\n" | ||
159 | "1: wfe\n" | ||
160 | "2: ldaxr %w0, %1\n" | ||
161 | " cbnz %w0, 1b\n" | ||
162 | " stxr %w0, %w2, %1\n" | ||
163 | " cbnz %w0, 2b\n" | ||
164 | __nops(1), | ||
165 | /* LSE atomics */ | ||
166 | "1: mov %w0, wzr\n" | ||
167 | "2: casa %w0, %w2, %1\n" | ||
168 | " cbz %w0, 3f\n" | ||
169 | " ldxr %w0, %1\n" | ||
170 | " cbz %w0, 2b\n" | ||
171 | " wfe\n" | ||
172 | " b 1b\n" | ||
173 | "3:") | ||
174 | : "=&r" (tmp), "+Q" (rw->lock) | ||
175 | : "r" (0x80000000) | ||
176 | : "memory"); | ||
177 | } | ||
178 | |||
179 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
180 | { | ||
181 | unsigned int tmp; | ||
182 | |||
183 | asm volatile(ARM64_LSE_ATOMIC_INSN( | ||
184 | /* LL/SC */ | ||
185 | "1: ldaxr %w0, %1\n" | ||
186 | " cbnz %w0, 2f\n" | ||
187 | " stxr %w0, %w2, %1\n" | ||
188 | " cbnz %w0, 1b\n" | ||
189 | "2:", | ||
190 | /* LSE atomics */ | ||
191 | " mov %w0, wzr\n" | ||
192 | " casa %w0, %w2, %1\n" | ||
193 | __nops(2)) | ||
194 | : "=&r" (tmp), "+Q" (rw->lock) | ||
195 | : "r" (0x80000000) | ||
196 | : "memory"); | ||
197 | |||
198 | return !tmp; | ||
199 | } | ||
200 | |||
201 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
202 | { | ||
203 | asm volatile(ARM64_LSE_ATOMIC_INSN( | ||
204 | " stlr wzr, %0", | ||
205 | " swpl wzr, wzr, %0") | ||
206 | : "=Q" (rw->lock) :: "memory"); | ||
207 | } | ||
208 | |||
209 | /* write_can_lock - would write_trylock() succeed? */ | ||
210 | #define arch_write_can_lock(x) ((x)->lock == 0) | ||
211 | |||
212 | /* | ||
213 | * Read lock implementation. | ||
214 | * | ||
215 | * It exclusively loads the lock value, increments it and stores the new value | ||
216 | * back if positive and the CPU still exclusively owns the location. If the | ||
217 | * value is negative, the lock is already held. | ||
218 | * | ||
219 | * During unlocking there may be multiple active read locks but no write lock. | ||
220 | * | ||
221 | * The memory barriers are implicit with the load-acquire and store-release | ||
222 | * instructions. | ||
223 | * | ||
224 | * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC | ||
225 | * and LSE implementations may exhibit different behaviour (although this | ||
226 | * will have no effect on lockdep). | ||
227 | */ | ||
228 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
229 | { | ||
230 | unsigned int tmp, tmp2; | ||
231 | |||
232 | asm volatile( | ||
233 | " sevl\n" | ||
234 | ARM64_LSE_ATOMIC_INSN( | ||
235 | /* LL/SC */ | ||
236 | "1: wfe\n" | ||
237 | "2: ldaxr %w0, %2\n" | ||
238 | " add %w0, %w0, #1\n" | ||
239 | " tbnz %w0, #31, 1b\n" | ||
240 | " stxr %w1, %w0, %2\n" | ||
241 | " cbnz %w1, 2b\n" | ||
242 | __nops(1), | ||
243 | /* LSE atomics */ | ||
244 | "1: wfe\n" | ||
245 | "2: ldxr %w0, %2\n" | ||
246 | " adds %w1, %w0, #1\n" | ||
247 | " tbnz %w1, #31, 1b\n" | ||
248 | " casa %w0, %w1, %2\n" | ||
249 | " sbc %w0, %w1, %w0\n" | ||
250 | " cbnz %w0, 2b") | ||
251 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) | ||
252 | : | ||
253 | : "cc", "memory"); | ||
254 | } | ||
255 | |||
256 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
257 | { | ||
258 | unsigned int tmp, tmp2; | ||
259 | |||
260 | asm volatile(ARM64_LSE_ATOMIC_INSN( | ||
261 | /* LL/SC */ | ||
262 | "1: ldxr %w0, %2\n" | ||
263 | " sub %w0, %w0, #1\n" | ||
264 | " stlxr %w1, %w0, %2\n" | ||
265 | " cbnz %w1, 1b", | ||
266 | /* LSE atomics */ | ||
267 | " movn %w0, #0\n" | ||
268 | " staddl %w0, %2\n" | ||
269 | __nops(2)) | ||
270 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) | ||
271 | : | ||
272 | : "memory"); | ||
273 | } | ||
274 | |||
275 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
276 | { | ||
277 | unsigned int tmp, tmp2; | ||
278 | |||
279 | asm volatile(ARM64_LSE_ATOMIC_INSN( | ||
280 | /* LL/SC */ | ||
281 | " mov %w1, #1\n" | ||
282 | "1: ldaxr %w0, %2\n" | ||
283 | " add %w0, %w0, #1\n" | ||
284 | " tbnz %w0, #31, 2f\n" | ||
285 | " stxr %w1, %w0, %2\n" | ||
286 | " cbnz %w1, 1b\n" | ||
287 | "2:", | ||
288 | /* LSE atomics */ | ||
289 | " ldr %w0, %2\n" | ||
290 | " adds %w1, %w0, #1\n" | ||
291 | " tbnz %w1, #31, 1f\n" | ||
292 | " casa %w0, %w1, %2\n" | ||
293 | " sbc %w1, %w1, %w0\n" | ||
294 | __nops(1) | ||
295 | "1:") | ||
296 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) | ||
297 | : | ||
298 | : "cc", "memory"); | ||
299 | |||
300 | return !tmp2; | ||
301 | } | ||
302 | |||
303 | /* read_can_lock - would read_trylock() succeed? */ | ||
304 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) | ||
305 | |||
306 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
307 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
308 | |||
309 | #define arch_spin_relax(lock) cpu_relax() | ||
310 | #define arch_read_relax(lock) cpu_relax() | ||
311 | #define arch_write_relax(lock) cpu_relax() | ||
312 | 141 | ||
313 | /* See include/linux/spinlock.h */ | 142 | /* See include/linux/spinlock.h */ |
314 | #define smp_mb__after_spinlock() smp_mb() | 143 | #define smp_mb__after_spinlock() smp_mb() |
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 55be59a35e3f..6b856012c51b 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h | |||
@@ -36,10 +36,6 @@ typedef struct { | |||
36 | 36 | ||
37 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } | 37 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } |
38 | 38 | ||
39 | typedef struct { | 39 | #include <asm-generic/qrwlock_types.h> |
40 | volatile unsigned int lock; | ||
41 | } arch_rwlock_t; | ||
42 | |||
43 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
44 | 40 | ||
45 | #endif | 41 | #endif |
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index f6431439d15d..839d1441af3a 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h | |||
@@ -36,8 +36,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
36 | __raw_spin_lock_asm(&lock->lock); | 36 | __raw_spin_lock_asm(&lock->lock); |
37 | } | 37 | } |
38 | 38 | ||
39 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
40 | |||
41 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
42 | { | 40 | { |
43 | return __raw_spin_trylock_asm(&lock->lock); | 41 | return __raw_spin_trylock_asm(&lock->lock); |
@@ -48,23 +46,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
48 | __raw_spin_unlock_asm(&lock->lock); | 46 | __raw_spin_unlock_asm(&lock->lock); |
49 | } | 47 | } |
50 | 48 | ||
51 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
52 | { | ||
53 | return __raw_uncached_fetch_asm(&rw->lock) > 0; | ||
54 | } | ||
55 | |||
56 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
57 | { | ||
58 | return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; | ||
59 | } | ||
60 | |||
61 | static inline void arch_read_lock(arch_rwlock_t *rw) | 49 | static inline void arch_read_lock(arch_rwlock_t *rw) |
62 | { | 50 | { |
63 | __raw_read_lock_asm(&rw->lock); | 51 | __raw_read_lock_asm(&rw->lock); |
64 | } | 52 | } |
65 | 53 | ||
66 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
67 | |||
68 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 54 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
69 | { | 55 | { |
70 | return __raw_read_trylock_asm(&rw->lock); | 56 | return __raw_read_trylock_asm(&rw->lock); |
@@ -80,8 +66,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
80 | __raw_write_lock_asm(&rw->lock); | 66 | __raw_write_lock_asm(&rw->lock); |
81 | } | 67 | } |
82 | 68 | ||
83 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
84 | |||
85 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 69 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
86 | { | 70 | { |
87 | return __raw_write_trylock_asm(&rw->lock); | 71 | return __raw_write_trylock_asm(&rw->lock); |
@@ -92,10 +76,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
92 | __raw_write_unlock_asm(&rw->lock); | 76 | __raw_write_unlock_asm(&rw->lock); |
93 | } | 77 | } |
94 | 78 | ||
95 | #define arch_spin_relax(lock) cpu_relax() | ||
96 | #define arch_read_relax(lock) cpu_relax() | ||
97 | #define arch_write_relax(lock) cpu_relax() | ||
98 | |||
99 | #endif | 79 | #endif |
100 | 80 | ||
101 | #endif /* !__BFIN_SPINLOCK_H */ | 81 | #endif /* !__BFIN_SPINLOCK_H */ |
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h index 53a8d5885887..48020863f53a 100644 --- a/arch/hexagon/include/asm/spinlock.h +++ b/arch/hexagon/include/asm/spinlock.h | |||
@@ -86,16 +86,6 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) | |||
86 | return temp; | 86 | return temp; |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline int arch_read_can_lock(arch_rwlock_t *rwlock) | ||
90 | { | ||
91 | return rwlock->lock == 0; | ||
92 | } | ||
93 | |||
94 | static inline int arch_write_can_lock(arch_rwlock_t *rwlock) | ||
95 | { | ||
96 | return rwlock->lock == 0; | ||
97 | } | ||
98 | |||
99 | /* Stuffs a -1 in the lock value? */ | 89 | /* Stuffs a -1 in the lock value? */ |
100 | static inline void arch_write_lock(arch_rwlock_t *lock) | 90 | static inline void arch_write_lock(arch_rwlock_t *lock) |
101 | { | 91 | { |
@@ -177,11 +167,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) | |||
177 | /* | 167 | /* |
178 | * SMP spinlocks are intended to allow only a single CPU at the lock | 168 | * SMP spinlocks are intended to allow only a single CPU at the lock |
179 | */ | 169 | */ |
180 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
181 | |||
182 | #define arch_spin_is_locked(x) ((x)->lock != 0) | 170 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
183 | 171 | ||
184 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
185 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
186 | |||
187 | #endif | 172 | #endif |
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index 7d6fceb3d567..917910607e0e 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h | |||
@@ -38,15 +38,31 @@ | |||
38 | /* | 38 | /* |
39 | * lock for reading | 39 | * lock for reading |
40 | */ | 40 | */ |
41 | static inline void | 41 | static inline int |
42 | __down_read (struct rw_semaphore *sem) | 42 | ___down_read (struct rw_semaphore *sem) |
43 | { | 43 | { |
44 | long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1); | 44 | long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1); |
45 | 45 | ||
46 | if (result < 0) | 46 | return (result < 0); |
47 | } | ||
48 | |||
49 | static inline void | ||
50 | __down_read (struct rw_semaphore *sem) | ||
51 | { | ||
52 | if (___down_read(sem)) | ||
47 | rwsem_down_read_failed(sem); | 53 | rwsem_down_read_failed(sem); |
48 | } | 54 | } |
49 | 55 | ||
56 | static inline int | ||
57 | __down_read_killable (struct rw_semaphore *sem) | ||
58 | { | ||
59 | if (___down_read(sem)) | ||
60 | if (IS_ERR(rwsem_down_read_failed_killable(sem))) | ||
61 | return -EINTR; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
50 | /* | 66 | /* |
51 | * lock for writing | 67 | * lock for writing |
52 | */ | 68 | */ |
@@ -73,9 +89,10 @@ __down_write (struct rw_semaphore *sem) | |||
73 | static inline int | 89 | static inline int |
74 | __down_write_killable (struct rw_semaphore *sem) | 90 | __down_write_killable (struct rw_semaphore *sem) |
75 | { | 91 | { |
76 | if (___down_write(sem)) | 92 | if (___down_write(sem)) { |
77 | if (IS_ERR(rwsem_down_write_failed_killable(sem))) | 93 | if (IS_ERR(rwsem_down_write_failed_killable(sem))) |
78 | return -EINTR; | 94 | return -EINTR; |
95 | } | ||
79 | 96 | ||
80 | return 0; | 97 | return 0; |
81 | } | 98 | } |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index aa057abd948e..afd0b3121b4c 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -62,7 +62,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) | |||
62 | 62 | ||
63 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) | 63 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
64 | { | 64 | { |
65 | int tmp = ACCESS_ONCE(lock->lock); | 65 | int tmp = READ_ONCE(lock->lock); |
66 | 66 | ||
67 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) | 67 | if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) |
68 | return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; | 68 | return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; |
@@ -74,19 +74,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) | |||
74 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; | 74 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
75 | 75 | ||
76 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); | 76 | asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); |
77 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | 77 | WRITE_ONCE(*p, (tmp + 2) & ~1); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) | 80 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
81 | { | 81 | { |
82 | long tmp = ACCESS_ONCE(lock->lock); | 82 | long tmp = READ_ONCE(lock->lock); |
83 | 83 | ||
84 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); | 84 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
85 | } | 85 | } |
86 | 86 | ||
87 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) | 87 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
88 | { | 88 | { |
89 | long tmp = ACCESS_ONCE(lock->lock); | 89 | long tmp = READ_ONCE(lock->lock); |
90 | 90 | ||
91 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; | 91 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
92 | } | 92 | } |
@@ -127,9 +127,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, | |||
127 | { | 127 | { |
128 | arch_spin_lock(lock); | 128 | arch_spin_lock(lock); |
129 | } | 129 | } |
130 | 130 | #define arch_spin_lock_flags arch_spin_lock_flags | |
131 | #define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | ||
132 | #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) | ||
133 | 131 | ||
134 | #ifdef ASM_SUPPORTED | 132 | #ifdef ASM_SUPPORTED |
135 | 133 | ||
@@ -157,6 +155,7 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) | |||
157 | : "p6", "p7", "r2", "memory"); | 155 | : "p6", "p7", "r2", "memory"); |
158 | } | 156 | } |
159 | 157 | ||
158 | #define arch_read_lock_flags arch_read_lock_flags | ||
160 | #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) | 159 | #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) |
161 | 160 | ||
162 | #else /* !ASM_SUPPORTED */ | 161 | #else /* !ASM_SUPPORTED */ |
@@ -209,6 +208,7 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) | |||
209 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); | 208 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); |
210 | } | 209 | } |
211 | 210 | ||
211 | #define arch_write_lock_flags arch_write_lock_flags | ||
212 | #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) | 212 | #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) |
213 | 213 | ||
214 | #define arch_write_trylock(rw) \ | 214 | #define arch_write_trylock(rw) \ |
@@ -232,8 +232,6 @@ static inline void arch_write_unlock(arch_rwlock_t *x) | |||
232 | 232 | ||
233 | #else /* !ASM_SUPPORTED */ | 233 | #else /* !ASM_SUPPORTED */ |
234 | 234 | ||
235 | #define arch_write_lock_flags(l, flags) arch_write_lock(l) | ||
236 | |||
237 | #define arch_write_lock(l) \ | 235 | #define arch_write_lock(l) \ |
238 | ({ \ | 236 | ({ \ |
239 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 237 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
@@ -273,8 +271,4 @@ static inline int arch_read_trylock(arch_rwlock_t *x) | |||
273 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; | 271 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; |
274 | } | 272 | } |
275 | 273 | ||
276 | #define arch_spin_relax(lock) cpu_relax() | ||
277 | #define arch_read_relax(lock) cpu_relax() | ||
278 | #define arch_write_relax(lock) cpu_relax() | ||
279 | |||
280 | #endif /* _ASM_IA64_SPINLOCK_H */ | 274 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 604af84427ff..0189f410f8f5 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h | |||
@@ -29,7 +29,6 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 31 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
32 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
33 | 32 | ||
34 | /** | 33 | /** |
35 | * arch_spin_trylock - Try spin lock and return a result | 34 | * arch_spin_trylock - Try spin lock and return a result |
@@ -138,18 +137,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
138 | * semaphore.h for details. -ben | 137 | * semaphore.h for details. -ben |
139 | */ | 138 | */ |
140 | 139 | ||
141 | /** | ||
142 | * read_can_lock - would read_trylock() succeed? | ||
143 | * @lock: the rwlock in question. | ||
144 | */ | ||
145 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) | ||
146 | |||
147 | /** | ||
148 | * write_can_lock - would write_trylock() succeed? | ||
149 | * @lock: the rwlock in question. | ||
150 | */ | ||
151 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
152 | |||
153 | static inline void arch_read_lock(arch_rwlock_t *rw) | 140 | static inline void arch_read_lock(arch_rwlock_t *rw) |
154 | { | 141 | { |
155 | unsigned long tmp0, tmp1; | 142 | unsigned long tmp0, tmp1; |
@@ -318,11 +305,4 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) | |||
318 | return 0; | 305 | return 0; |
319 | } | 306 | } |
320 | 307 | ||
321 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
322 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
323 | |||
324 | #define arch_spin_relax(lock) cpu_relax() | ||
325 | #define arch_read_relax(lock) cpu_relax() | ||
326 | #define arch_write_relax(lock) cpu_relax() | ||
327 | |||
328 | #endif /* _ASM_M32R_SPINLOCK_H */ | 308 | #endif /* _ASM_M32R_SPINLOCK_H */ |
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h index 349938c35f2d..4497c232d9c1 100644 --- a/arch/metag/include/asm/spinlock.h +++ b/arch/metag/include/asm/spinlock.h | |||
@@ -16,13 +16,4 @@ | |||
16 | * locked. | 16 | * locked. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
20 | |||
21 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
22 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
23 | |||
24 | #define arch_spin_relax(lock) cpu_relax() | ||
25 | #define arch_read_relax(lock) cpu_relax() | ||
26 | #define arch_write_relax(lock) cpu_relax() | ||
27 | |||
28 | #endif /* __ASM_SPINLOCK_H */ | 19 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h index 029935560b7f..dfd780eab350 100644 --- a/arch/metag/include/asm/spinlock_lnkget.h +++ b/arch/metag/include/asm/spinlock_lnkget.h | |||
@@ -137,21 +137,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
137 | : "memory"); | 137 | : "memory"); |
138 | } | 138 | } |
139 | 139 | ||
140 | /* write_can_lock - would write_trylock() succeed? */ | ||
141 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
142 | { | ||
143 | int ret; | ||
144 | |||
145 | asm volatile ("LNKGETD %0, [%1]\n" | ||
146 | "CMP %0, #0\n" | ||
147 | "MOV %0, #1\n" | ||
148 | "XORNZ %0, %0, %0\n" | ||
149 | : "=&d" (ret) | ||
150 | : "da" (&rw->lock) | ||
151 | : "cc"); | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | /* | 140 | /* |
156 | * Read locks are a bit more hairy: | 141 | * Read locks are a bit more hairy: |
157 | * - Exclusively load the lock value. | 142 | * - Exclusively load the lock value. |
@@ -225,26 +210,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
225 | return tmp; | 210 | return tmp; |
226 | } | 211 | } |
227 | 212 | ||
228 | /* read_can_lock - would read_trylock() succeed? */ | ||
229 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
230 | { | ||
231 | int tmp; | ||
232 | |||
233 | asm volatile ("LNKGETD %0, [%1]\n" | ||
234 | "CMP %0, %2\n" | ||
235 | "MOV %0, #1\n" | ||
236 | "XORZ %0, %0, %0\n" | ||
237 | : "=&d" (tmp) | ||
238 | : "da" (&rw->lock), "bd" (0x80000000) | ||
239 | : "cc"); | ||
240 | return tmp; | ||
241 | } | ||
242 | |||
243 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
244 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
245 | |||
246 | #define arch_spin_relax(lock) cpu_relax() | ||
247 | #define arch_read_relax(lock) cpu_relax() | ||
248 | #define arch_write_relax(lock) cpu_relax() | ||
249 | |||
250 | #endif /* __ASM_SPINLOCK_LNKGET_H */ | 213 | #endif /* __ASM_SPINLOCK_LNKGET_H */ |
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h index 12de9862d190..c0bd81bbe18c 100644 --- a/arch/metag/include/asm/spinlock_lock1.h +++ b/arch/metag/include/asm/spinlock_lock1.h | |||
@@ -105,16 +105,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
105 | rw->lock = 0; | 105 | rw->lock = 0; |
106 | } | 106 | } |
107 | 107 | ||
108 | /* write_can_lock - would write_trylock() succeed? */ | ||
109 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
110 | { | ||
111 | unsigned int ret; | ||
112 | |||
113 | barrier(); | ||
114 | ret = rw->lock; | ||
115 | return (ret == 0); | ||
116 | } | ||
117 | |||
118 | /* | 108 | /* |
119 | * Read locks are a bit more hairy: | 109 | * Read locks are a bit more hairy: |
120 | * - Exclusively load the lock value. | 110 | * - Exclusively load the lock value. |
@@ -172,14 +162,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
172 | return (ret < 0x80000000); | 162 | return (ret < 0x80000000); |
173 | } | 163 | } |
174 | 164 | ||
175 | /* read_can_lock - would read_trylock() succeed? */ | ||
176 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
177 | { | ||
178 | unsigned int ret; | ||
179 | |||
180 | barrier(); | ||
181 | ret = rw->lock; | ||
182 | return (ret < 0x80000000); | ||
183 | } | ||
184 | |||
185 | #endif /* __ASM_SPINLOCK_LOCK1_H */ | 165 | #endif /* __ASM_SPINLOCK_LOCK1_H */ |
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index a7d21da16b6a..ee81297d9117 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
@@ -13,11 +13,4 @@ | |||
13 | #include <asm/qrwlock.h> | 13 | #include <asm/qrwlock.h> |
14 | #include <asm/qspinlock.h> | 14 | #include <asm/qspinlock.h> |
15 | 15 | ||
16 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
17 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
18 | |||
19 | #define arch_spin_relax(lock) cpu_relax() | ||
20 | #define arch_read_relax(lock) cpu_relax() | ||
21 | #define arch_write_relax(lock) cpu_relax() | ||
22 | |||
23 | #endif /* _ASM_SPINLOCK_H */ | 16 | #endif /* _ASM_SPINLOCK_H */ |
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h index b7cd6cf77b83..91bf0c2c265c 100644 --- a/arch/mips/include/asm/vdso.h +++ b/arch/mips/include/asm/vdso.h | |||
@@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data) | |||
99 | u32 seq; | 99 | u32 seq; |
100 | 100 | ||
101 | while (true) { | 101 | while (true) { |
102 | seq = ACCESS_ONCE(data->seq_count); | 102 | seq = READ_ONCE(data->seq_count); |
103 | if (likely(!(seq & 1))) { | 103 | if (likely(!(seq & 1))) { |
104 | /* Paired with smp_wmb() in vdso_data_write_*(). */ | 104 | /* Paired with smp_wmb() in vdso_data_write_*(). */ |
105 | smp_rmb(); | 105 | smp_rmb(); |
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 9dd624c2fe56..421e06dfee72 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c | |||
@@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state) | |||
166 | nc_core_ready_count = nc_addr; | 166 | nc_core_ready_count = nc_addr; |
167 | 167 | ||
168 | /* Ensure ready_count is zero-initialised before the assembly runs */ | 168 | /* Ensure ready_count is zero-initialised before the assembly runs */ |
169 | ACCESS_ONCE(*nc_core_ready_count) = 0; | 169 | WRITE_ONCE(*nc_core_ready_count, 0); |
170 | coupled_barrier(&per_cpu(pm_barrier, core), online); | 170 | coupled_barrier(&per_cpu(pm_barrier, core), online); |
171 | 171 | ||
172 | /* Run the generated entry code */ | 172 | /* Run the generated entry code */ |
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h index fe413b41df6c..879cd0df53ba 100644 --- a/arch/mn10300/include/asm/spinlock.h +++ b/arch/mn10300/include/asm/spinlock.h | |||
@@ -84,6 +84,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, | |||
84 | : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL) | 84 | : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL) |
85 | : "memory", "cc"); | 85 | : "memory", "cc"); |
86 | } | 86 | } |
87 | #define arch_spin_lock_flags arch_spin_lock_flags | ||
87 | 88 | ||
88 | #ifdef __KERNEL__ | 89 | #ifdef __KERNEL__ |
89 | 90 | ||
@@ -98,18 +99,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, | |||
98 | * read-locks. | 99 | * read-locks. |
99 | */ | 100 | */ |
100 | 101 | ||
101 | /** | ||
102 | * read_can_lock - would read_trylock() succeed? | ||
103 | * @lock: the rwlock in question. | ||
104 | */ | ||
105 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) | ||
106 | |||
107 | /** | ||
108 | * write_can_lock - would write_trylock() succeed? | ||
109 | * @lock: the rwlock in question. | ||
110 | */ | ||
111 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
112 | |||
113 | /* | 102 | /* |
114 | * On mn10300, we implement read-write locks as a 32-bit counter | 103 | * On mn10300, we implement read-write locks as a 32-bit counter |
115 | * with the high bit (sign) being the "contended" bit. | 104 | * with the high bit (sign) being the "contended" bit. |
@@ -183,9 +172,6 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) | |||
183 | return 0; | 172 | return 0; |
184 | } | 173 | } |
185 | 174 | ||
186 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
187 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
188 | |||
189 | #define _raw_spin_relax(lock) cpu_relax() | 175 | #define _raw_spin_relax(lock) cpu_relax() |
190 | #define _raw_read_relax(lock) cpu_relax() | 176 | #define _raw_read_relax(lock) cpu_relax() |
191 | #define _raw_write_relax(lock) cpu_relax() | 177 | #define _raw_write_relax(lock) cpu_relax() |
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c index 7ecf69879e2d..d7ef1232a82a 100644 --- a/arch/mn10300/kernel/mn10300-serial.c +++ b/arch/mn10300/kernel/mn10300-serial.c | |||
@@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port) | |||
543 | 543 | ||
544 | try_again: | 544 | try_again: |
545 | /* pull chars out of the hat */ | 545 | /* pull chars out of the hat */ |
546 | ix = ACCESS_ONCE(port->rx_outp); | 546 | ix = READ_ONCE(port->rx_outp); |
547 | if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) { | 547 | if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) { |
548 | if (push && !tport->low_latency) | 548 | if (push && !tport->low_latency) |
549 | tty_flip_buffer_push(tport); | 549 | tty_flip_buffer_push(tport); |
@@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port) | |||
1724 | if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) { | 1724 | if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) { |
1725 | do { | 1725 | do { |
1726 | /* pull chars out of the hat */ | 1726 | /* pull chars out of the hat */ |
1727 | ix = ACCESS_ONCE(port->rx_outp); | 1727 | ix = READ_ONCE(port->rx_outp); |
1728 | if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) | 1728 | if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) |
1729 | return NO_POLL_CHAR; | 1729 | return NO_POLL_CHAR; |
1730 | 1730 | ||
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index bc54addd589f..88bae6676c9b 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -261,7 +261,7 @@ atomic64_set(atomic64_t *v, s64 i) | |||
261 | static __inline__ s64 | 261 | static __inline__ s64 |
262 | atomic64_read(const atomic64_t *v) | 262 | atomic64_read(const atomic64_t *v) |
263 | { | 263 | { |
264 | return ACCESS_ONCE((v)->counter); | 264 | return READ_ONCE((v)->counter); |
265 | } | 265 | } |
266 | 266 | ||
267 | #define atomic64_inc(v) (atomic64_add( 1,(v))) | 267 | #define atomic64_inc(v) (atomic64_add( 1,(v))) |
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index af03359e6ac5..6f84b6acc86e 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -32,6 +32,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, | |||
32 | cpu_relax(); | 32 | cpu_relax(); |
33 | mb(); | 33 | mb(); |
34 | } | 34 | } |
35 | #define arch_spin_lock_flags arch_spin_lock_flags | ||
35 | 36 | ||
36 | static inline void arch_spin_unlock(arch_spinlock_t *x) | 37 | static inline void arch_spin_unlock(arch_spinlock_t *x) |
37 | { | 38 | { |
@@ -169,25 +170,4 @@ static __inline__ int arch_write_trylock(arch_rwlock_t *rw) | |||
169 | return result; | 170 | return result; |
170 | } | 171 | } |
171 | 172 | ||
172 | /* | ||
173 | * read_can_lock - would read_trylock() succeed? | ||
174 | * @lock: the rwlock in question. | ||
175 | */ | ||
176 | static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) | ||
177 | { | ||
178 | return rw->counter >= 0; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * write_can_lock - would write_trylock() succeed? | ||
183 | * @lock: the rwlock in question. | ||
184 | */ | ||
185 | static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) | ||
186 | { | ||
187 | return !rw->counter; | ||
188 | } | ||
189 | |||
190 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
191 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
192 | |||
193 | #endif /* __ASM_SPINLOCK_H */ | 173 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index edbe571bcc54..b9ebc3085fb7 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -161,6 +161,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |||
161 | local_irq_restore(flags_dis); | 161 | local_irq_restore(flags_dis); |
162 | } | 162 | } |
163 | } | 163 | } |
164 | #define arch_spin_lock_flags arch_spin_lock_flags | ||
164 | 165 | ||
165 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 166 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
166 | { | 167 | { |
@@ -181,9 +182,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
181 | * read-locks. | 182 | * read-locks. |
182 | */ | 183 | */ |
183 | 184 | ||
184 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) | ||
185 | #define arch_write_can_lock(rw) (!(rw)->lock) | ||
186 | |||
187 | #ifdef CONFIG_PPC64 | 185 | #ifdef CONFIG_PPC64 |
188 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" | 186 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" |
189 | #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ | 187 | #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ |
@@ -302,9 +300,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
302 | rw->lock = 0; | 300 | rw->lock = 0; |
303 | } | 301 | } |
304 | 302 | ||
305 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
306 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
307 | |||
308 | #define arch_spin_relax(lock) __spin_yield(lock) | 303 | #define arch_spin_relax(lock) __spin_yield(lock) |
309 | #define arch_read_relax(lock) __rw_yield(lock) | 304 | #define arch_read_relax(lock) __rw_yield(lock) |
310 | #define arch_write_relax(lock) __rw_yield(lock) | 305 | #define arch_write_relax(lock) __rw_yield(lock) |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 1643e9e53655..3f1c4fcbe0aa 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -78,7 +78,7 @@ static unsigned long lock_rtas(void) | |||
78 | 78 | ||
79 | local_irq_save(flags); | 79 | local_irq_save(flags); |
80 | preempt_disable(); | 80 | preempt_disable(); |
81 | arch_spin_lock_flags(&rtas.lock, flags); | 81 | arch_spin_lock(&rtas.lock); |
82 | return flags; | 82 | return flags; |
83 | } | 83 | } |
84 | 84 | ||
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index 7a9cde0cfbd1..acd3206dfae3 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c | |||
@@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count) | |||
43 | if (!opal_memcons) | 43 | if (!opal_memcons) |
44 | return -ENODEV; | 44 | return -ENODEV; |
45 | 45 | ||
46 | out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos)); | 46 | out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos)); |
47 | 47 | ||
48 | /* Now we've read out_pos, put a barrier in before reading the new | 48 | /* Now we've read out_pos, put a barrier in before reading the new |
49 | * data it points to in conbuf. */ | 49 | * data it points to in conbuf. */ |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 31f95a636aed..0a29588aa00b 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -38,6 +38,7 @@ bool arch_vcpu_is_preempted(int cpu); | |||
38 | */ | 38 | */ |
39 | 39 | ||
40 | void arch_spin_relax(arch_spinlock_t *lock); | 40 | void arch_spin_relax(arch_spinlock_t *lock); |
41 | #define arch_spin_relax arch_spin_relax | ||
41 | 42 | ||
42 | void arch_spin_lock_wait(arch_spinlock_t *); | 43 | void arch_spin_lock_wait(arch_spinlock_t *); |
43 | int arch_spin_trylock_retry(arch_spinlock_t *); | 44 | int arch_spin_trylock_retry(arch_spinlock_t *); |
@@ -76,6 +77,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp, | |||
76 | if (!arch_spin_trylock_once(lp)) | 77 | if (!arch_spin_trylock_once(lp)) |
77 | arch_spin_lock_wait(lp); | 78 | arch_spin_lock_wait(lp); |
78 | } | 79 | } |
80 | #define arch_spin_lock_flags arch_spin_lock_flags | ||
79 | 81 | ||
80 | static inline int arch_spin_trylock(arch_spinlock_t *lp) | 82 | static inline int arch_spin_trylock(arch_spinlock_t *lp) |
81 | { | 83 | { |
@@ -105,20 +107,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) | |||
105 | * read-locks. | 107 | * read-locks. |
106 | */ | 108 | */ |
107 | 109 | ||
108 | /** | ||
109 | * read_can_lock - would read_trylock() succeed? | ||
110 | * @lock: the rwlock in question. | ||
111 | */ | ||
112 | #define arch_read_can_lock(x) (((x)->cnts & 0xffff0000) == 0) | ||
113 | |||
114 | /** | ||
115 | * write_can_lock - would write_trylock() succeed? | ||
116 | * @lock: the rwlock in question. | ||
117 | */ | ||
118 | #define arch_write_can_lock(x) ((x)->cnts == 0) | ||
119 | |||
120 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
121 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
122 | #define arch_read_relax(rw) barrier() | 110 | #define arch_read_relax(rw) barrier() |
123 | #define arch_write_relax(rw) barrier() | 111 | #define arch_write_relax(rw) barrier() |
124 | 112 | ||
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 2a781cb6515c..84c0faeaf7ea 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -215,7 +215,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp) | |||
215 | lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ | 215 | lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ |
216 | 216 | ||
217 | /* Pass the virtual CPU to the lock holder if it is not running */ | 217 | /* Pass the virtual CPU to the lock holder if it is not running */ |
218 | owner = arch_spin_yield_target(ACCESS_ONCE(lp->lock), NULL); | 218 | owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); |
219 | if (owner && arch_vcpu_is_preempted(owner - 1)) | 219 | if (owner && arch_vcpu_is_preempted(owner - 1)) |
220 | smp_yield_cpu(owner - 1); | 220 | smp_yield_cpu(owner - 1); |
221 | 221 | ||
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h index 5ed7dbbd94ff..270ee4d3e25b 100644 --- a/arch/sh/include/asm/spinlock-cas.h +++ b/arch/sh/include/asm/spinlock-cas.h | |||
@@ -27,7 +27,6 @@ static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define arch_spin_is_locked(x) ((x)->lock <= 0) | 29 | #define arch_spin_is_locked(x) ((x)->lock <= 0) |
30 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
31 | 30 | ||
32 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 31 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
33 | { | 32 | { |
@@ -53,18 +52,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
53 | * read-locks. | 52 | * read-locks. |
54 | */ | 53 | */ |
55 | 54 | ||
56 | /** | ||
57 | * read_can_lock - would read_trylock() succeed? | ||
58 | * @lock: the rwlock in question. | ||
59 | */ | ||
60 | #define arch_read_can_lock(x) ((x)->lock > 0) | ||
61 | |||
62 | /** | ||
63 | * write_can_lock - would write_trylock() succeed? | ||
64 | * @lock: the rwlock in question. | ||
65 | */ | ||
66 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
67 | |||
68 | static inline void arch_read_lock(arch_rwlock_t *rw) | 55 | static inline void arch_read_lock(arch_rwlock_t *rw) |
69 | { | 56 | { |
70 | unsigned old; | 57 | unsigned old; |
@@ -102,11 +89,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
102 | return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS; | 89 | return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS; |
103 | } | 90 | } |
104 | 91 | ||
105 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
106 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
107 | |||
108 | #define arch_spin_relax(lock) cpu_relax() | ||
109 | #define arch_read_relax(lock) cpu_relax() | ||
110 | #define arch_write_relax(lock) cpu_relax() | ||
111 | |||
112 | #endif /* __ASM_SH_SPINLOCK_CAS_H */ | 92 | #endif /* __ASM_SH_SPINLOCK_CAS_H */ |
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h index f77263aae760..715595de286a 100644 --- a/arch/sh/include/asm/spinlock-llsc.h +++ b/arch/sh/include/asm/spinlock-llsc.h | |||
@@ -19,7 +19,6 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #define arch_spin_is_locked(x) ((x)->lock <= 0) | 21 | #define arch_spin_is_locked(x) ((x)->lock <= 0) |
22 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
23 | 22 | ||
24 | /* | 23 | /* |
25 | * Simple spin lock operations. There are two variants, one clears IRQ's | 24 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -89,18 +88,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
89 | * read-locks. | 88 | * read-locks. |
90 | */ | 89 | */ |
91 | 90 | ||
92 | /** | ||
93 | * read_can_lock - would read_trylock() succeed? | ||
94 | * @lock: the rwlock in question. | ||
95 | */ | ||
96 | #define arch_read_can_lock(x) ((x)->lock > 0) | ||
97 | |||
98 | /** | ||
99 | * write_can_lock - would write_trylock() succeed? | ||
100 | * @lock: the rwlock in question. | ||
101 | */ | ||
102 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
103 | |||
104 | static inline void arch_read_lock(arch_rwlock_t *rw) | 91 | static inline void arch_read_lock(arch_rwlock_t *rw) |
105 | { | 92 | { |
106 | unsigned long tmp; | 93 | unsigned long tmp; |
@@ -209,11 +196,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
209 | return (oldval > (RW_LOCK_BIAS - 1)); | 196 | return (oldval > (RW_LOCK_BIAS - 1)); |
210 | } | 197 | } |
211 | 198 | ||
212 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
213 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
214 | |||
215 | #define arch_spin_relax(lock) cpu_relax() | ||
216 | #define arch_read_relax(lock) cpu_relax() | ||
217 | #define arch_write_relax(lock) cpu_relax() | ||
218 | |||
219 | #endif /* __ASM_SH_SPINLOCK_LLSC_H */ | 199 | #endif /* __ASM_SH_SPINLOCK_LLSC_H */ |
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 0c3b3b4a9963..d13ce517f4b9 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h | |||
@@ -32,7 +32,7 @@ void atomic_set(atomic_t *, int); | |||
32 | 32 | ||
33 | #define atomic_set_release(v, i) atomic_set((v), (i)) | 33 | #define atomic_set_release(v, i) atomic_set((v), (i)) |
34 | 34 | ||
35 | #define atomic_read(v) ACCESS_ONCE((v)->counter) | 35 | #define atomic_read(v) READ_ONCE((v)->counter) |
36 | 36 | ||
37 | #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) | 37 | #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) |
38 | #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) | 38 | #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) |
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index 6a339a78f4f4..71dd82b43cc5 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #if defined(__sparc__) && defined(__arch64__) | 7 | #if defined(__sparc__) && defined(__arch64__) |
8 | #ifndef __ASSEMBLY__ | 8 | #ifndef __ASSEMBLY__ |
9 | 9 | ||
10 | #include <linux/compiler.h> | ||
10 | #include <linux/threads.h> | 11 | #include <linux/threads.h> |
11 | #include <asm/switch_to.h> | 12 | #include <asm/switch_to.h> |
12 | 13 | ||
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 26f00ac2b470..bc5aa6f61676 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -183,17 +183,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw) | |||
183 | res; \ | 183 | res; \ |
184 | }) | 184 | }) |
185 | 185 | ||
186 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
187 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) | ||
188 | #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) | ||
189 | |||
190 | #define arch_spin_relax(lock) cpu_relax() | ||
191 | #define arch_read_relax(lock) cpu_relax() | ||
192 | #define arch_write_relax(lock) cpu_relax() | ||
193 | |||
194 | #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) | ||
195 | #define arch_write_can_lock(rw) (!(rw)->lock) | ||
196 | |||
197 | #endif /* !(__ASSEMBLY__) */ | 186 | #endif /* !(__ASSEMBLY__) */ |
198 | 187 | ||
199 | #endif /* __SPARC_SPINLOCK_H */ | 188 | #endif /* __SPARC_SPINLOCK_H */ |
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 4822a7e94a30..7fc82a233f49 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -14,13 +14,6 @@ | |||
14 | #include <asm/qrwlock.h> | 14 | #include <asm/qrwlock.h> |
15 | #include <asm/qspinlock.h> | 15 | #include <asm/qspinlock.h> |
16 | 16 | ||
17 | #define arch_read_lock_flags(p, f) arch_read_lock(p) | ||
18 | #define arch_write_lock_flags(p, f) arch_write_lock(p) | ||
19 | |||
20 | #define arch_spin_relax(lock) cpu_relax() | ||
21 | #define arch_read_relax(lock) cpu_relax() | ||
22 | #define arch_write_relax(lock) cpu_relax() | ||
23 | |||
24 | #endif /* !(__ASSEMBLY__) */ | 17 | #endif /* !(__ASSEMBLY__) */ |
25 | 18 | ||
26 | #endif /* !(__SPARC64_SPINLOCK_H) */ | 19 | #endif /* !(__SPARC64_SPINLOCK_H) */ |
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c index baa60357f8ba..b7ba577d82ca 100644 --- a/arch/tile/gxio/dma_queue.c +++ b/arch/tile/gxio/dma_queue.c | |||
@@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue, | |||
163 | int64_t completion_slot, int update) | 163 | int64_t completion_slot, int update) |
164 | { | 164 | { |
165 | if (update) { | 165 | if (update) { |
166 | if (ACCESS_ONCE(dma_queue->hw_complete_count) > | 166 | if (READ_ONCE(dma_queue->hw_complete_count) > |
167 | completion_slot) | 167 | completion_slot) |
168 | return 1; | 168 | return 1; |
169 | 169 | ||
170 | __gxio_dma_queue_update_credits(dma_queue); | 170 | __gxio_dma_queue_update_credits(dma_queue); |
171 | } | 171 | } |
172 | 172 | ||
173 | return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot; | 173 | return READ_ONCE(dma_queue->hw_complete_count) > completion_slot; |
174 | } | 174 | } |
175 | 175 | ||
176 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete); | 176 | EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete); |
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h index cba8ba9b8da6..fb5313d77315 100644 --- a/arch/tile/include/asm/spinlock_32.h +++ b/arch/tile/include/asm/spinlock_32.h | |||
@@ -51,9 +51,6 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock) | |||
51 | 51 | ||
52 | void arch_spin_lock(arch_spinlock_t *lock); | 52 | void arch_spin_lock(arch_spinlock_t *lock); |
53 | 53 | ||
54 | /* We cannot take an interrupt after getting a ticket, so don't enable them. */ | ||
55 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
56 | |||
57 | int arch_spin_trylock(arch_spinlock_t *lock); | 54 | int arch_spin_trylock(arch_spinlock_t *lock); |
58 | 55 | ||
59 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 56 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
@@ -80,22 +77,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
80 | #define _RD_COUNT_WIDTH 8 | 77 | #define _RD_COUNT_WIDTH 8 |
81 | 78 | ||
82 | /** | 79 | /** |
83 | * arch_read_can_lock() - would read_trylock() succeed? | ||
84 | */ | ||
85 | static inline int arch_read_can_lock(arch_rwlock_t *rwlock) | ||
86 | { | ||
87 | return (rwlock->lock << _RD_COUNT_WIDTH) == 0; | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * arch_write_can_lock() - would write_trylock() succeed? | ||
92 | */ | ||
93 | static inline int arch_write_can_lock(arch_rwlock_t *rwlock) | ||
94 | { | ||
95 | return rwlock->lock == 0; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * arch_read_lock() - acquire a read lock. | 80 | * arch_read_lock() - acquire a read lock. |
100 | */ | 81 | */ |
101 | void arch_read_lock(arch_rwlock_t *rwlock); | 82 | void arch_read_lock(arch_rwlock_t *rwlock); |
@@ -125,7 +106,4 @@ void arch_read_unlock(arch_rwlock_t *rwlock); | |||
125 | */ | 106 | */ |
126 | void arch_write_unlock(arch_rwlock_t *rwlock); | 107 | void arch_write_unlock(arch_rwlock_t *rwlock); |
127 | 108 | ||
128 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
129 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
130 | |||
131 | #endif /* _ASM_TILE_SPINLOCK_32_H */ | 109 | #endif /* _ASM_TILE_SPINLOCK_32_H */ |
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h index 9a2c2d605752..5b616ef642a8 100644 --- a/arch/tile/include/asm/spinlock_64.h +++ b/arch/tile/include/asm/spinlock_64.h | |||
@@ -75,9 +75,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
75 | /* Try to get the lock, and return whether we succeeded. */ | 75 | /* Try to get the lock, and return whether we succeeded. */ |
76 | int arch_spin_trylock(arch_spinlock_t *lock); | 76 | int arch_spin_trylock(arch_spinlock_t *lock); |
77 | 77 | ||
78 | /* We cannot take an interrupt after getting a ticket, so don't enable them. */ | ||
79 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
80 | |||
81 | /* | 78 | /* |
82 | * Read-write spinlocks, allowing multiple readers | 79 | * Read-write spinlocks, allowing multiple readers |
83 | * but only one writer. | 80 | * but only one writer. |
@@ -93,24 +90,6 @@ static inline int arch_write_val_locked(int val) | |||
93 | return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */ | 90 | return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */ |
94 | } | 91 | } |
95 | 92 | ||
96 | /** | ||
97 | * read_can_lock - would read_trylock() succeed? | ||
98 | * @lock: the rwlock in question. | ||
99 | */ | ||
100 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
101 | { | ||
102 | return !arch_write_val_locked(rw->lock); | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * write_can_lock - would write_trylock() succeed? | ||
107 | * @lock: the rwlock in question. | ||
108 | */ | ||
109 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
110 | { | ||
111 | return rw->lock == 0; | ||
112 | } | ||
113 | |||
114 | extern void __read_lock_failed(arch_rwlock_t *rw); | 93 | extern void __read_lock_failed(arch_rwlock_t *rw); |
115 | 94 | ||
116 | static inline void arch_read_lock(arch_rwlock_t *rw) | 95 | static inline void arch_read_lock(arch_rwlock_t *rw) |
@@ -156,7 +135,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
156 | return 0; | 135 | return 0; |
157 | } | 136 | } |
158 | 137 | ||
159 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
160 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
161 | |||
162 | #endif /* _ASM_TILE_SPINLOCK_64_H */ | 138 | #endif /* _ASM_TILE_SPINLOCK_64_H */ |
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h index b9e45e37649e..c8fd47edba30 100644 --- a/arch/tile/include/gxio/dma_queue.h +++ b/arch/tile/include/gxio/dma_queue.h | |||
@@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue, | |||
121 | * if the result is LESS than "hw_complete_count". | 121 | * if the result is LESS than "hw_complete_count". |
122 | */ | 122 | */ |
123 | uint64_t complete; | 123 | uint64_t complete; |
124 | complete = ACCESS_ONCE(dma_queue->hw_complete_count); | 124 | complete = READ_ONCE(dma_queue->hw_complete_count); |
125 | slot |= (complete & 0xffffffffff000000); | 125 | slot |= (complete & 0xffffffffff000000); |
126 | if (slot < complete) | 126 | if (slot < complete) |
127 | slot += 0x1000000; | 127 | slot += 0x1000000; |
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index e1a078e6828e..d516d61751c2 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c | |||
@@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
255 | 255 | ||
256 | int do_syscall_trace_enter(struct pt_regs *regs) | 256 | int do_syscall_trace_enter(struct pt_regs *regs) |
257 | { | 257 | { |
258 | u32 work = ACCESS_ONCE(current_thread_info()->flags); | 258 | u32 work = READ_ONCE(current_thread_info()->flags); |
259 | 259 | ||
260 | if ((work & _TIF_SYSCALL_TRACE) && | 260 | if ((work & _TIF_SYSCALL_TRACE) && |
261 | tracehook_report_syscall_entry(regs)) { | 261 | tracehook_report_syscall_entry(regs)) { |
diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h index 390572daa40d..b3f5865a92c9 100644 --- a/arch/um/include/shared/init.h +++ b/arch/um/include/shared/init.h | |||
@@ -41,7 +41,7 @@ | |||
41 | typedef int (*initcall_t)(void); | 41 | typedef int (*initcall_t)(void); |
42 | typedef void (*exitcall_t)(void); | 42 | typedef void (*exitcall_t)(void); |
43 | 43 | ||
44 | #include <linux/compiler.h> | 44 | #include <linux/compiler_types.h> |
45 | 45 | ||
46 | /* These are for everybody (although not all archs will actually | 46 | /* These are for everybody (although not all archs will actually |
47 | discard it in modules) */ | 47 | discard it in modules) */ |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2fdb23313dd5..9bceea6a5852 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -56,7 +56,7 @@ config X86 | |||
56 | select ARCH_HAS_KCOV if X86_64 | 56 | select ARCH_HAS_KCOV if X86_64 |
57 | select ARCH_HAS_PMEM_API if X86_64 | 57 | select ARCH_HAS_PMEM_API if X86_64 |
58 | # Causing hangs/crashes, see the commit that added this change for details. | 58 | # Causing hangs/crashes, see the commit that added this change for details. |
59 | select ARCH_HAS_REFCOUNT if BROKEN | 59 | select ARCH_HAS_REFCOUNT |
60 | select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 | 60 | select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 |
61 | select ARCH_HAS_SET_MEMORY | 61 | select ARCH_HAS_SET_MEMORY |
62 | select ARCH_HAS_SG_CHAIN | 62 | select ARCH_HAS_SG_CHAIN |
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 03505ffbe1b6..d7d3cc24baf4 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c | |||
@@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs) | |||
75 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) | 75 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) |
76 | BUG_ON(regs != task_pt_regs(current)); | 76 | BUG_ON(regs != task_pt_regs(current)); |
77 | 77 | ||
78 | work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; | 78 | work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; |
79 | 79 | ||
80 | if (unlikely(work & _TIF_SYSCALL_EMU)) | 80 | if (unlikely(work & _TIF_SYSCALL_EMU)) |
81 | emulated = true; | 81 | emulated = true; |
@@ -186,9 +186,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) | |||
186 | 186 | ||
187 | addr_limit_user_check(); | 187 | addr_limit_user_check(); |
188 | 188 | ||
189 | if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled())) | 189 | lockdep_assert_irqs_disabled(); |
190 | local_irq_disable(); | ||
191 | |||
192 | lockdep_sys_exit(); | 190 | lockdep_sys_exit(); |
193 | 191 | ||
194 | cached_flags = READ_ONCE(ti->flags); | 192 | cached_flags = READ_ONCE(ti->flags); |
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index fa8dbfcf7ed3..11b13c4b43d5 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c | |||
@@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *) | |||
318 | notrace time_t __vdso_time(time_t *t) | 318 | notrace time_t __vdso_time(time_t *t) |
319 | { | 319 | { |
320 | /* This is atomic on x86 so we don't need any locks. */ | 320 | /* This is atomic on x86 so we don't need any locks. */ |
321 | time_t result = ACCESS_ONCE(gtod->wall_time_sec); | 321 | time_t result = READ_ONCE(gtod->wall_time_sec); |
322 | 322 | ||
323 | if (t) | 323 | if (t) |
324 | *t = result; | 324 | *t = result; |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 80534d3c2480..140d33288e78 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event) | |||
2118 | event->destroy(event); | 2118 | event->destroy(event); |
2119 | } | 2119 | } |
2120 | 2120 | ||
2121 | if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) | 2121 | if (READ_ONCE(x86_pmu.attr_rdpmc)) |
2122 | event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; | 2122 | event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; |
2123 | 2123 | ||
2124 | return err; | 2124 | return err; |
@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment) | |||
2371 | struct ldt_struct *ldt; | 2371 | struct ldt_struct *ldt; |
2372 | 2372 | ||
2373 | /* IRQs are off, so this synchronizes with smp_store_release */ | 2373 | /* IRQs are off, so this synchronizes with smp_store_release */ |
2374 | ldt = lockless_dereference(current->active_mm->context.ldt); | 2374 | ldt = READ_ONCE(current->active_mm->context.ldt); |
2375 | if (!ldt || idx >= ldt->nr_entries) | 2375 | if (!ldt || idx >= ldt->nr_entries) |
2376 | return 0; | 2376 | return 0; |
2377 | 2377 | ||
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 01727dbc294a..7fb336210e1b 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
@@ -12,11 +12,11 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #ifdef CONFIG_X86_32 | 14 | #ifdef CONFIG_X86_32 |
15 | #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \ | 15 | #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \ |
16 | X86_FEATURE_XMM2) ::: "memory", "cc") | 16 | X86_FEATURE_XMM2) ::: "memory", "cc") |
17 | #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \ | 17 | #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \ |
18 | X86_FEATURE_XMM2) ::: "memory", "cc") | 18 | X86_FEATURE_XMM2) ::: "memory", "cc") |
19 | #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \ | 19 | #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \ |
20 | X86_FEATURE_XMM2) ::: "memory", "cc") | 20 | X86_FEATURE_XMM2) ::: "memory", "cc") |
21 | #else | 21 | #else |
22 | #define mb() asm volatile("mfence":::"memory") | 22 | #define mb() asm volatile("mfence":::"memory") |
@@ -31,7 +31,11 @@ | |||
31 | #endif | 31 | #endif |
32 | #define dma_wmb() barrier() | 32 | #define dma_wmb() barrier() |
33 | 33 | ||
34 | #define __smp_mb() mb() | 34 | #ifdef CONFIG_X86_32 |
35 | #define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc") | ||
36 | #else | ||
37 | #define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc") | ||
38 | #endif | ||
35 | #define __smp_rmb() dma_rmb() | 39 | #define __smp_rmb() dma_rmb() |
36 | #define __smp_wmb() barrier() | 40 | #define __smp_wmb() barrier() |
37 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) | 41 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 6699fc441644..6d16d15d09a0 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -73,8 +73,8 @@ static inline void load_mm_ldt(struct mm_struct *mm) | |||
73 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | 73 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
74 | struct ldt_struct *ldt; | 74 | struct ldt_struct *ldt; |
75 | 75 | ||
76 | /* lockless_dereference synchronizes with smp_store_release */ | 76 | /* READ_ONCE synchronizes with smp_store_release */ |
77 | ldt = lockless_dereference(mm->context.ldt); | 77 | ldt = READ_ONCE(mm->context.ldt); |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Any change to mm->context.ldt is followed by an IPI to all | 80 | * Any change to mm->context.ldt is followed by an IPI to all |
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 9982dd96f093..5e16b5d40d32 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #ifndef _ASM_X86_QSPINLOCK_H | 2 | #ifndef _ASM_X86_QSPINLOCK_H |
3 | #define _ASM_X86_QSPINLOCK_H | 3 | #define _ASM_X86_QSPINLOCK_H |
4 | 4 | ||
5 | #include <linux/jump_label.h> | ||
5 | #include <asm/cpufeature.h> | 6 | #include <asm/cpufeature.h> |
6 | #include <asm-generic/qspinlock_types.h> | 7 | #include <asm-generic/qspinlock_types.h> |
7 | #include <asm/paravirt.h> | 8 | #include <asm/paravirt.h> |
@@ -47,10 +48,14 @@ static inline void queued_spin_unlock(struct qspinlock *lock) | |||
47 | #endif | 48 | #endif |
48 | 49 | ||
49 | #ifdef CONFIG_PARAVIRT | 50 | #ifdef CONFIG_PARAVIRT |
51 | DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); | ||
52 | |||
53 | void native_pv_lock_init(void) __init; | ||
54 | |||
50 | #define virt_spin_lock virt_spin_lock | 55 | #define virt_spin_lock virt_spin_lock |
51 | static inline bool virt_spin_lock(struct qspinlock *lock) | 56 | static inline bool virt_spin_lock(struct qspinlock *lock) |
52 | { | 57 | { |
53 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | 58 | if (!static_branch_likely(&virt_spin_lock_key)) |
54 | return false; | 59 | return false; |
55 | 60 | ||
56 | /* | 61 | /* |
@@ -66,6 +71,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock) | |||
66 | 71 | ||
67 | return true; | 72 | return true; |
68 | } | 73 | } |
74 | #else | ||
75 | static inline void native_pv_lock_init(void) | ||
76 | { | ||
77 | } | ||
69 | #endif /* CONFIG_PARAVIRT */ | 78 | #endif /* CONFIG_PARAVIRT */ |
70 | 79 | ||
71 | #include <asm-generic/qspinlock.h> | 80 | #include <asm-generic/qspinlock.h> |
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index ff871210b9f2..4e44250e7d0d 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * back to the regular execution flow in .text. | 15 | * back to the regular execution flow in .text. |
16 | */ | 16 | */ |
17 | #define _REFCOUNT_EXCEPTION \ | 17 | #define _REFCOUNT_EXCEPTION \ |
18 | ".pushsection .text.unlikely\n" \ | 18 | ".pushsection .text..refcount\n" \ |
19 | "111:\tlea %[counter], %%" _ASM_CX "\n" \ | 19 | "111:\tlea %[counter], %%" _ASM_CX "\n" \ |
20 | "112:\t" ASM_UD0 "\n" \ | 20 | "112:\t" ASM_UD0 "\n" \ |
21 | ASM_UNREACHABLE \ | 21 | ASM_UNREACHABLE \ |
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h index 4d38d85a16ad..4c25cf6caefa 100644 --- a/arch/x86/include/asm/rwsem.h +++ b/arch/x86/include/asm/rwsem.h | |||
@@ -61,18 +61,33 @@ | |||
61 | /* | 61 | /* |
62 | * lock for reading | 62 | * lock for reading |
63 | */ | 63 | */ |
64 | #define ____down_read(sem, slow_path) \ | ||
65 | ({ \ | ||
66 | struct rw_semaphore* ret; \ | ||
67 | asm volatile("# beginning down_read\n\t" \ | ||
68 | LOCK_PREFIX _ASM_INC "(%[sem])\n\t" \ | ||
69 | /* adds 0x00000001 */ \ | ||
70 | " jns 1f\n" \ | ||
71 | " call " slow_path "\n" \ | ||
72 | "1:\n\t" \ | ||
73 | "# ending down_read\n\t" \ | ||
74 | : "+m" (sem->count), "=a" (ret), \ | ||
75 | ASM_CALL_CONSTRAINT \ | ||
76 | : [sem] "a" (sem) \ | ||
77 | : "memory", "cc"); \ | ||
78 | ret; \ | ||
79 | }) | ||
80 | |||
64 | static inline void __down_read(struct rw_semaphore *sem) | 81 | static inline void __down_read(struct rw_semaphore *sem) |
65 | { | 82 | { |
66 | asm volatile("# beginning down_read\n\t" | 83 | ____down_read(sem, "call_rwsem_down_read_failed"); |
67 | LOCK_PREFIX _ASM_INC "(%1)\n\t" | 84 | } |
68 | /* adds 0x00000001 */ | 85 | |
69 | " jns 1f\n" | 86 | static inline int __down_read_killable(struct rw_semaphore *sem) |
70 | " call call_rwsem_down_read_failed\n" | 87 | { |
71 | "1:\n\t" | 88 | if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable"))) |
72 | "# ending down_read\n\t" | 89 | return -EINTR; |
73 | : "+m" (sem->count) | 90 | return 0; |
74 | : "a" (sem) | ||
75 | : "memory", "cc"); | ||
76 | } | 91 | } |
77 | 92 | ||
78 | /* | 93 | /* |
@@ -82,17 +97,18 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) | |||
82 | { | 97 | { |
83 | long result, tmp; | 98 | long result, tmp; |
84 | asm volatile("# beginning __down_read_trylock\n\t" | 99 | asm volatile("# beginning __down_read_trylock\n\t" |
85 | " mov %0,%1\n\t" | 100 | " mov %[count],%[result]\n\t" |
86 | "1:\n\t" | 101 | "1:\n\t" |
87 | " mov %1,%2\n\t" | 102 | " mov %[result],%[tmp]\n\t" |
88 | " add %3,%2\n\t" | 103 | " add %[inc],%[tmp]\n\t" |
89 | " jle 2f\n\t" | 104 | " jle 2f\n\t" |
90 | LOCK_PREFIX " cmpxchg %2,%0\n\t" | 105 | LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t" |
91 | " jnz 1b\n\t" | 106 | " jnz 1b\n\t" |
92 | "2:\n\t" | 107 | "2:\n\t" |
93 | "# ending __down_read_trylock\n\t" | 108 | "# ending __down_read_trylock\n\t" |
94 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) | 109 | : [count] "+m" (sem->count), [result] "=&a" (result), |
95 | : "i" (RWSEM_ACTIVE_READ_BIAS) | 110 | [tmp] "=&r" (tmp) |
111 | : [inc] "i" (RWSEM_ACTIVE_READ_BIAS) | ||
96 | : "memory", "cc"); | 112 | : "memory", "cc"); |
97 | return result >= 0; | 113 | return result >= 0; |
98 | } | 114 | } |
@@ -106,7 +122,7 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) | |||
106 | struct rw_semaphore* ret; \ | 122 | struct rw_semaphore* ret; \ |
107 | \ | 123 | \ |
108 | asm volatile("# beginning down_write\n\t" \ | 124 | asm volatile("# beginning down_write\n\t" \ |
109 | LOCK_PREFIX " xadd %1,(%4)\n\t" \ | 125 | LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \ |
110 | /* adds 0xffff0001, returns the old value */ \ | 126 | /* adds 0xffff0001, returns the old value */ \ |
111 | " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \ | 127 | " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \ |
112 | /* was the active mask 0 before? */\ | 128 | /* was the active mask 0 before? */\ |
@@ -114,9 +130,9 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem) | |||
114 | " call " slow_path "\n" \ | 130 | " call " slow_path "\n" \ |
115 | "1:\n" \ | 131 | "1:\n" \ |
116 | "# ending down_write" \ | 132 | "# ending down_write" \ |
117 | : "+m" (sem->count), "=d" (tmp), \ | 133 | : "+m" (sem->count), [tmp] "=d" (tmp), \ |
118 | "=a" (ret), ASM_CALL_CONSTRAINT \ | 134 | "=a" (ret), ASM_CALL_CONSTRAINT \ |
119 | : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ | 135 | : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \ |
120 | : "memory", "cc"); \ | 136 | : "memory", "cc"); \ |
121 | ret; \ | 137 | ret; \ |
122 | }) | 138 | }) |
@@ -142,21 +158,21 @@ static inline bool __down_write_trylock(struct rw_semaphore *sem) | |||
142 | bool result; | 158 | bool result; |
143 | long tmp0, tmp1; | 159 | long tmp0, tmp1; |
144 | asm volatile("# beginning __down_write_trylock\n\t" | 160 | asm volatile("# beginning __down_write_trylock\n\t" |
145 | " mov %0,%1\n\t" | 161 | " mov %[count],%[tmp0]\n\t" |
146 | "1:\n\t" | 162 | "1:\n\t" |
147 | " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" | 163 | " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" |
148 | /* was the active mask 0 before? */ | 164 | /* was the active mask 0 before? */ |
149 | " jnz 2f\n\t" | 165 | " jnz 2f\n\t" |
150 | " mov %1,%2\n\t" | 166 | " mov %[tmp0],%[tmp1]\n\t" |
151 | " add %4,%2\n\t" | 167 | " add %[inc],%[tmp1]\n\t" |
152 | LOCK_PREFIX " cmpxchg %2,%0\n\t" | 168 | LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t" |
153 | " jnz 1b\n\t" | 169 | " jnz 1b\n\t" |
154 | "2:\n\t" | 170 | "2:\n\t" |
155 | CC_SET(e) | 171 | CC_SET(e) |
156 | "# ending __down_write_trylock\n\t" | 172 | "# ending __down_write_trylock\n\t" |
157 | : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1), | 173 | : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0), |
158 | CC_OUT(e) (result) | 174 | [tmp1] "=&r" (tmp1), CC_OUT(e) (result) |
159 | : "er" (RWSEM_ACTIVE_WRITE_BIAS) | 175 | : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS) |
160 | : "memory"); | 176 | : "memory"); |
161 | return result; | 177 | return result; |
162 | } | 178 | } |
@@ -168,14 +184,14 @@ static inline void __up_read(struct rw_semaphore *sem) | |||
168 | { | 184 | { |
169 | long tmp; | 185 | long tmp; |
170 | asm volatile("# beginning __up_read\n\t" | 186 | asm volatile("# beginning __up_read\n\t" |
171 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 187 | LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" |
172 | /* subtracts 1, returns the old value */ | 188 | /* subtracts 1, returns the old value */ |
173 | " jns 1f\n\t" | 189 | " jns 1f\n\t" |
174 | " call call_rwsem_wake\n" /* expects old value in %edx */ | 190 | " call call_rwsem_wake\n" /* expects old value in %edx */ |
175 | "1:\n" | 191 | "1:\n" |
176 | "# ending __up_read\n" | 192 | "# ending __up_read\n" |
177 | : "+m" (sem->count), "=d" (tmp) | 193 | : "+m" (sem->count), [tmp] "=d" (tmp) |
178 | : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS) | 194 | : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS) |
179 | : "memory", "cc"); | 195 | : "memory", "cc"); |
180 | } | 196 | } |
181 | 197 | ||
@@ -186,14 +202,14 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
186 | { | 202 | { |
187 | long tmp; | 203 | long tmp; |
188 | asm volatile("# beginning __up_write\n\t" | 204 | asm volatile("# beginning __up_write\n\t" |
189 | LOCK_PREFIX " xadd %1,(%2)\n\t" | 205 | LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" |
190 | /* subtracts 0xffff0001, returns the old value */ | 206 | /* subtracts 0xffff0001, returns the old value */ |
191 | " jns 1f\n\t" | 207 | " jns 1f\n\t" |
192 | " call call_rwsem_wake\n" /* expects old value in %edx */ | 208 | " call call_rwsem_wake\n" /* expects old value in %edx */ |
193 | "1:\n\t" | 209 | "1:\n\t" |
194 | "# ending __up_write\n" | 210 | "# ending __up_write\n" |
195 | : "+m" (sem->count), "=d" (tmp) | 211 | : "+m" (sem->count), [tmp] "=d" (tmp) |
196 | : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) | 212 | : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS) |
197 | : "memory", "cc"); | 213 | : "memory", "cc"); |
198 | } | 214 | } |
199 | 215 | ||
@@ -203,7 +219,7 @@ static inline void __up_write(struct rw_semaphore *sem) | |||
203 | static inline void __downgrade_write(struct rw_semaphore *sem) | 219 | static inline void __downgrade_write(struct rw_semaphore *sem) |
204 | { | 220 | { |
205 | asm volatile("# beginning __downgrade_write\n\t" | 221 | asm volatile("# beginning __downgrade_write\n\t" |
206 | LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" | 222 | LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t" |
207 | /* | 223 | /* |
208 | * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) | 224 | * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) |
209 | * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) | 225 | * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) |
@@ -213,7 +229,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
213 | "1:\n\t" | 229 | "1:\n\t" |
214 | "# ending __downgrade_write\n" | 230 | "# ending __downgrade_write\n" |
215 | : "+m" (sem->count) | 231 | : "+m" (sem->count) |
216 | : "a" (sem), "er" (-RWSEM_WAITING_BIAS) | 232 | : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS) |
217 | : "memory", "cc"); | 233 | : "memory", "cc"); |
218 | } | 234 | } |
219 | 235 | ||
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index b34625796eb2..5b6bc7016c22 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -42,11 +42,4 @@ | |||
42 | 42 | ||
43 | #include <asm/qrwlock.h> | 43 | #include <asm/qrwlock.h> |
44 | 44 | ||
45 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
46 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
47 | |||
48 | #define arch_spin_relax(lock) cpu_relax() | ||
49 | #define arch_read_relax(lock) cpu_relax() | ||
50 | #define arch_write_relax(lock) cpu_relax() | ||
51 | |||
52 | #endif /* _ASM_X86_SPINLOCK_H */ | 45 | #endif /* _ASM_X86_SPINLOCK_H */ |
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 52250681f68c..fb856c9f0449 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -49,7 +49,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s) | |||
49 | unsigned ret; | 49 | unsigned ret; |
50 | 50 | ||
51 | repeat: | 51 | repeat: |
52 | ret = ACCESS_ONCE(s->seq); | 52 | ret = READ_ONCE(s->seq); |
53 | if (unlikely(ret & 1)) { | 53 | if (unlikely(ret & 1)) { |
54 | cpu_relax(); | 54 | cpu_relax(); |
55 | goto repeat; | 55 | goto repeat; |
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 9c4e7ba6870c..7d7715dde901 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c | |||
@@ -155,14 +155,14 @@ void init_espfix_ap(int cpu) | |||
155 | page = cpu/ESPFIX_STACKS_PER_PAGE; | 155 | page = cpu/ESPFIX_STACKS_PER_PAGE; |
156 | 156 | ||
157 | /* Did another CPU already set this up? */ | 157 | /* Did another CPU already set this up? */ |
158 | stack_page = ACCESS_ONCE(espfix_pages[page]); | 158 | stack_page = READ_ONCE(espfix_pages[page]); |
159 | if (likely(stack_page)) | 159 | if (likely(stack_page)) |
160 | goto done; | 160 | goto done; |
161 | 161 | ||
162 | mutex_lock(&espfix_init_mutex); | 162 | mutex_lock(&espfix_init_mutex); |
163 | 163 | ||
164 | /* Did we race on the lock? */ | 164 | /* Did we race on the lock? */ |
165 | stack_page = ACCESS_ONCE(espfix_pages[page]); | 165 | stack_page = READ_ONCE(espfix_pages[page]); |
166 | if (stack_page) | 166 | if (stack_page) |
167 | goto unlock_done; | 167 | goto unlock_done; |
168 | 168 | ||
@@ -200,7 +200,7 @@ void init_espfix_ap(int cpu) | |||
200 | set_pte(&pte_p[n*PTE_STRIDE], pte); | 200 | set_pte(&pte_p[n*PTE_STRIDE], pte); |
201 | 201 | ||
202 | /* Job is done for this CPU and any CPU which shares this page */ | 202 | /* Job is done for this CPU and any CPU which shares this page */ |
203 | ACCESS_ONCE(espfix_pages[page]) = stack_page; | 203 | WRITE_ONCE(espfix_pages[page], stack_page); |
204 | 204 | ||
205 | unlock_done: | 205 | unlock_done: |
206 | mutex_unlock(&espfix_init_mutex); | 206 | mutex_unlock(&espfix_init_mutex); |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 4d17bacf4030..a2fcf037bd80 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -102,7 +102,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt) | |||
102 | static void install_ldt(struct mm_struct *current_mm, | 102 | static void install_ldt(struct mm_struct *current_mm, |
103 | struct ldt_struct *ldt) | 103 | struct ldt_struct *ldt) |
104 | { | 104 | { |
105 | /* Synchronizes with lockless_dereference in load_mm_ldt. */ | 105 | /* Synchronizes with READ_ONCE in load_mm_ldt. */ |
106 | smp_store_release(¤t_mm->context.ldt, ldt); | 106 | smp_store_release(¤t_mm->context.ldt, ldt); |
107 | 107 | ||
108 | /* Activate the LDT for all CPUs using current_mm. */ | 108 | /* Activate the LDT for all CPUs using current_mm. */ |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 35aafc95e4b8..18bc9b51ac9b 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
@@ -105,7 +105,7 @@ static void nmi_max_handler(struct irq_work *w) | |||
105 | { | 105 | { |
106 | struct nmiaction *a = container_of(w, struct nmiaction, irq_work); | 106 | struct nmiaction *a = container_of(w, struct nmiaction, irq_work); |
107 | int remainder_ns, decimal_msecs; | 107 | int remainder_ns, decimal_msecs; |
108 | u64 whole_msecs = ACCESS_ONCE(a->max_duration); | 108 | u64 whole_msecs = READ_ONCE(a->max_duration); |
109 | 109 | ||
110 | remainder_ns = do_div(whole_msecs, (1000 * 1000)); | 110 | remainder_ns = do_div(whole_msecs, (1000 * 1000)); |
111 | decimal_msecs = remainder_ns / 1000; | 111 | decimal_msecs = remainder_ns / 1000; |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 19a3e8f961c7..041096bdef86 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -115,8 +115,18 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, | |||
115 | return 5; | 115 | return 5; |
116 | } | 116 | } |
117 | 117 | ||
118 | /* Neat trick to map patch type back to the call within the | 118 | DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); |
119 | * corresponding structure. */ | 119 | |
120 | void __init native_pv_lock_init(void) | ||
121 | { | ||
122 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | ||
123 | static_branch_disable(&virt_spin_lock_key); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Neat trick to map patch type back to the call within the | ||
128 | * corresponding structure. | ||
129 | */ | ||
120 | static void *get_call_destination(u8 type) | 130 | static void *get_call_destination(u8 type) |
121 | { | 131 | { |
122 | struct paravirt_patch_template tmpl = { | 132 | struct paravirt_patch_template tmpl = { |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 65a0ccdc3050..7ab6db86b573 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #include <asm/i8259.h> | 77 | #include <asm/i8259.h> |
78 | #include <asm/realmode.h> | 78 | #include <asm/realmode.h> |
79 | #include <asm/misc.h> | 79 | #include <asm/misc.h> |
80 | #include <asm/qspinlock.h> | ||
80 | 81 | ||
81 | /* Number of siblings per CPU package */ | 82 | /* Number of siblings per CPU package */ |
82 | int smp_num_siblings = 1; | 83 | int smp_num_siblings = 1; |
@@ -1095,7 +1096,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
1095 | unsigned long flags; | 1096 | unsigned long flags; |
1096 | int err, ret = 0; | 1097 | int err, ret = 0; |
1097 | 1098 | ||
1098 | WARN_ON(irqs_disabled()); | 1099 | lockdep_assert_irqs_enabled(); |
1099 | 1100 | ||
1100 | pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); | 1101 | pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
1101 | 1102 | ||
@@ -1358,6 +1359,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1358 | pr_info("CPU0: "); | 1359 | pr_info("CPU0: "); |
1359 | print_cpu_info(&cpu_data(0)); | 1360 | print_cpu_info(&cpu_data(0)); |
1360 | 1361 | ||
1362 | native_pv_lock_init(); | ||
1363 | |||
1361 | uv_system_init(); | 1364 | uv_system_init(); |
1362 | 1365 | ||
1363 | set_mtrr_aps_delayed_init(); | 1366 | set_mtrr_aps_delayed_init(); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7a69cf053711..a119b361b8b7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -443,7 +443,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) | |||
443 | 443 | ||
444 | static u64 __get_spte_lockless(u64 *sptep) | 444 | static u64 __get_spte_lockless(u64 *sptep) |
445 | { | 445 | { |
446 | return ACCESS_ONCE(*sptep); | 446 | return READ_ONCE(*sptep); |
447 | } | 447 | } |
448 | #else | 448 | #else |
449 | union split_spte { | 449 | union split_spte { |
@@ -4819,7 +4819,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
4819 | * If we don't have indirect shadow pages, it means no page is | 4819 | * If we don't have indirect shadow pages, it means no page is |
4820 | * write-protected, so we can exit simply. | 4820 | * write-protected, so we can exit simply. |
4821 | */ | 4821 | */ |
4822 | if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) | 4822 | if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) |
4823 | return; | 4823 | return; |
4824 | 4824 | ||
4825 | remote_flush = local_flush = false; | 4825 | remote_flush = local_flush = false; |
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index ea67dc876316..01c1371f39f8 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c | |||
@@ -157,7 +157,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
157 | return false; | 157 | return false; |
158 | 158 | ||
159 | index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); | 159 | index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); |
160 | return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); | 160 | return !!READ_ONCE(slot->arch.gfn_track[mode][index]); |
161 | } | 161 | } |
162 | 162 | ||
163 | void kvm_page_track_cleanup(struct kvm *kvm) | 163 | void kvm_page_track_cleanup(struct kvm *kvm) |
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S index bf2c6074efd2..dc2ab6ea6768 100644 --- a/arch/x86/lib/rwsem.S +++ b/arch/x86/lib/rwsem.S | |||
@@ -98,6 +98,18 @@ ENTRY(call_rwsem_down_read_failed) | |||
98 | ret | 98 | ret |
99 | ENDPROC(call_rwsem_down_read_failed) | 99 | ENDPROC(call_rwsem_down_read_failed) |
100 | 100 | ||
101 | ENTRY(call_rwsem_down_read_failed_killable) | ||
102 | FRAME_BEGIN | ||
103 | save_common_regs | ||
104 | __ASM_SIZE(push,) %__ASM_REG(dx) | ||
105 | movq %rax,%rdi | ||
106 | call rwsem_down_read_failed_killable | ||
107 | __ASM_SIZE(pop,) %__ASM_REG(dx) | ||
108 | restore_common_regs | ||
109 | FRAME_END | ||
110 | ret | ||
111 | ENDPROC(call_rwsem_down_read_failed_killable) | ||
112 | |||
101 | ENTRY(call_rwsem_down_write_failed) | 113 | ENTRY(call_rwsem_down_write_failed) |
102 | FRAME_BEGIN | 114 | FRAME_BEGIN |
103 | save_common_regs | 115 | save_common_regs |
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index c3521e2be396..3321b446b66c 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -67,12 +67,17 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup, | |||
67 | * wrapped around) will be set. Additionally, seeing the refcount | 67 | * wrapped around) will be set. Additionally, seeing the refcount |
68 | * reach 0 will set ZF (Zero Flag: result was zero). In each of | 68 | * reach 0 will set ZF (Zero Flag: result was zero). In each of |
69 | * these cases we want a report, since it's a boundary condition. | 69 | * these cases we want a report, since it's a boundary condition. |
70 | * | 70 | * The SF case is not reported since it indicates post-boundary |
71 | * manipulations below zero or above INT_MAX. And if none of the | ||
72 | * flags are set, something has gone very wrong, so report it. | ||
71 | */ | 73 | */ |
72 | if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) { | 74 | if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) { |
73 | bool zero = regs->flags & X86_EFLAGS_ZF; | 75 | bool zero = regs->flags & X86_EFLAGS_ZF; |
74 | 76 | ||
75 | refcount_error_report(regs, zero ? "hit zero" : "overflow"); | 77 | refcount_error_report(regs, zero ? "hit zero" : "overflow"); |
78 | } else if ((regs->flags & X86_EFLAGS_SF) == 0) { | ||
79 | /* Report if none of OF, ZF, nor SF are set. */ | ||
80 | refcount_error_report(regs, "unexpected saturation"); | ||
76 | } | 81 | } |
77 | 82 | ||
78 | return true; | 83 | return true; |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 6083ba462f35..13b4f19b9131 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -547,7 +547,7 @@ int xen_alloc_p2m_entry(unsigned long pfn) | |||
547 | if (p2m_top_mfn && pfn < MAX_P2M_PFN) { | 547 | if (p2m_top_mfn && pfn < MAX_P2M_PFN) { |
548 | topidx = p2m_top_index(pfn); | 548 | topidx = p2m_top_index(pfn); |
549 | top_mfn_p = &p2m_top_mfn[topidx]; | 549 | top_mfn_p = &p2m_top_mfn[topidx]; |
550 | mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]); | 550 | mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]); |
551 | 551 | ||
552 | BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); | 552 | BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); |
553 | 553 | ||
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 08324c64005d..02f3445a2b5f 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | 12 | ||
13 | #include <asm/paravirt.h> | 13 | #include <asm/paravirt.h> |
14 | #include <asm/qspinlock.h> | ||
14 | 15 | ||
15 | #include <xen/interface/xen.h> | 16 | #include <xen/interface/xen.h> |
16 | #include <xen/events.h> | 17 | #include <xen/events.h> |
@@ -81,8 +82,11 @@ void xen_init_lock_cpu(int cpu) | |||
81 | int irq; | 82 | int irq; |
82 | char *name; | 83 | char *name; |
83 | 84 | ||
84 | if (!xen_pvspin) | 85 | if (!xen_pvspin) { |
86 | if (cpu == 0) | ||
87 | static_branch_disable(&virt_spin_lock_key); | ||
85 | return; | 88 | return; |
89 | } | ||
86 | 90 | ||
87 | WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", | 91 | WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", |
88 | cpu, per_cpu(lock_kicker_irq, cpu)); | 92 | cpu, per_cpu(lock_kicker_irq, cpu)); |
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 3bb49681ee24..c6e1290dcbb7 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h | |||
@@ -33,8 +33,6 @@ | |||
33 | 33 | ||
34 | #define arch_spin_is_locked(x) ((x)->slock != 0) | 34 | #define arch_spin_is_locked(x) ((x)->slock != 0) |
35 | 35 | ||
36 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
37 | |||
38 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 36 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
39 | { | 37 | { |
40 | unsigned long tmp; | 38 | unsigned long tmp; |
@@ -97,8 +95,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
97 | * 0x80000000 one writer owns the rwlock, no other writers, no readers | 95 | * 0x80000000 one writer owns the rwlock, no other writers, no readers |
98 | */ | 96 | */ |
99 | 97 | ||
100 | #define arch_write_can_lock(x) ((x)->lock == 0) | ||
101 | |||
102 | static inline void arch_write_lock(arch_rwlock_t *rw) | 98 | static inline void arch_write_lock(arch_rwlock_t *rw) |
103 | { | 99 | { |
104 | unsigned long tmp; | 100 | unsigned long tmp; |
@@ -200,7 +196,4 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
200 | : "memory"); | 196 | : "memory"); |
201 | } | 197 | } |
202 | 198 | ||
203 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
204 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
205 | |||
206 | #endif /* _XTENSA_SPINLOCK_H */ | 199 | #endif /* _XTENSA_SPINLOCK_H */ |
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c index 4dc0c1b43f4b..2f7eb66c23ec 100644 --- a/arch/xtensa/platforms/xtfpga/lcd.c +++ b/arch/xtensa/platforms/xtfpga/lcd.c | |||
@@ -34,23 +34,23 @@ | |||
34 | static void lcd_put_byte(u8 *addr, u8 data) | 34 | static void lcd_put_byte(u8 *addr, u8 data) |
35 | { | 35 | { |
36 | #ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS | 36 | #ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS |
37 | ACCESS_ONCE(*addr) = data; | 37 | WRITE_ONCE(*addr, data); |
38 | #else | 38 | #else |
39 | ACCESS_ONCE(*addr) = data & 0xf0; | 39 | WRITE_ONCE(*addr, data & 0xf0); |
40 | ACCESS_ONCE(*addr) = (data << 4) & 0xf0; | 40 | WRITE_ONCE(*addr, (data << 4) & 0xf0); |
41 | #endif | 41 | #endif |
42 | } | 42 | } |
43 | 43 | ||
44 | static int __init lcd_init(void) | 44 | static int __init lcd_init(void) |
45 | { | 45 | { |
46 | ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; | 46 | WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); |
47 | mdelay(5); | 47 | mdelay(5); |
48 | ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; | 48 | WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); |
49 | udelay(200); | 49 | udelay(200); |
50 | ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT; | 50 | WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); |
51 | udelay(50); | 51 | udelay(50); |
52 | #ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS | 52 | #ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS |
53 | ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT; | 53 | WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); |
54 | udelay(50); | 54 | udelay(50); |
55 | lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); | 55 | lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); |
56 | udelay(50); | 56 | udelay(50); |
diff --git a/block/bio.c b/block/bio.c index 101c2a9b5481..cc60213e56d8 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -917,17 +917,9 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) | |||
917 | } | 917 | } |
918 | EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); | 918 | EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); |
919 | 919 | ||
920 | struct submit_bio_ret { | ||
921 | struct completion event; | ||
922 | int error; | ||
923 | }; | ||
924 | |||
925 | static void submit_bio_wait_endio(struct bio *bio) | 920 | static void submit_bio_wait_endio(struct bio *bio) |
926 | { | 921 | { |
927 | struct submit_bio_ret *ret = bio->bi_private; | 922 | complete(bio->bi_private); |
928 | |||
929 | ret->error = blk_status_to_errno(bio->bi_status); | ||
930 | complete(&ret->event); | ||
931 | } | 923 | } |
932 | 924 | ||
933 | /** | 925 | /** |
@@ -943,16 +935,15 @@ static void submit_bio_wait_endio(struct bio *bio) | |||
943 | */ | 935 | */ |
944 | int submit_bio_wait(struct bio *bio) | 936 | int submit_bio_wait(struct bio *bio) |
945 | { | 937 | { |
946 | struct submit_bio_ret ret; | 938 | DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map); |
947 | 939 | ||
948 | init_completion(&ret.event); | 940 | bio->bi_private = &done; |
949 | bio->bi_private = &ret; | ||
950 | bio->bi_end_io = submit_bio_wait_endio; | 941 | bio->bi_end_io = submit_bio_wait_endio; |
951 | bio->bi_opf |= REQ_SYNC; | 942 | bio->bi_opf |= REQ_SYNC; |
952 | submit_bio(bio); | 943 | submit_bio(bio); |
953 | wait_for_completion_io(&ret.event); | 944 | wait_for_completion_io(&done); |
954 | 945 | ||
955 | return ret.error; | 946 | return blk_status_to_errno(bio->bi_status); |
956 | } | 947 | } |
957 | EXPORT_SYMBOL(submit_bio_wait); | 948 | EXPORT_SYMBOL(submit_bio_wait); |
958 | 949 | ||
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 6a9a0f03a67b..d822530e6aea 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c | |||
@@ -261,7 +261,7 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat) | |||
261 | 261 | ||
262 | static u64 rwb_sync_issue_lat(struct rq_wb *rwb) | 262 | static u64 rwb_sync_issue_lat(struct rq_wb *rwb) |
263 | { | 263 | { |
264 | u64 now, issue = ACCESS_ONCE(rwb->sync_issue); | 264 | u64 now, issue = READ_ONCE(rwb->sync_issue); |
265 | 265 | ||
266 | if (!issue || !rwb->sync_cookie) | 266 | if (!issue || !rwb->sync_cookie) |
267 | return 0; | 267 | return 0; |
diff --git a/block/genhd.c b/block/genhd.c index dd305c65ffb0..630c0da6cfcf 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1354,13 +1354,7 @@ dev_t blk_lookup_devt(const char *name, int partno) | |||
1354 | } | 1354 | } |
1355 | EXPORT_SYMBOL(blk_lookup_devt); | 1355 | EXPORT_SYMBOL(blk_lookup_devt); |
1356 | 1356 | ||
1357 | struct gendisk *alloc_disk(int minors) | 1357 | struct gendisk *__alloc_disk_node(int minors, int node_id) |
1358 | { | ||
1359 | return alloc_disk_node(minors, NUMA_NO_NODE); | ||
1360 | } | ||
1361 | EXPORT_SYMBOL(alloc_disk); | ||
1362 | |||
1363 | struct gendisk *alloc_disk_node(int minors, int node_id) | ||
1364 | { | 1358 | { |
1365 | struct gendisk *disk; | 1359 | struct gendisk *disk; |
1366 | struct disk_part_tbl *ptbl; | 1360 | struct disk_part_tbl *ptbl; |
@@ -1411,7 +1405,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id) | |||
1411 | } | 1405 | } |
1412 | return disk; | 1406 | return disk; |
1413 | } | 1407 | } |
1414 | EXPORT_SYMBOL(alloc_disk_node); | 1408 | EXPORT_SYMBOL(__alloc_disk_node); |
1415 | 1409 | ||
1416 | struct kobject *get_disk(struct gendisk *disk) | 1410 | struct kobject *get_disk(struct gendisk *disk) |
1417 | { | 1411 | { |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 12ebd055724c..4b8ba2a75a4d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev) | |||
668 | * so be careful about accessing it. dev->bus and dev->class should | 668 | * so be careful about accessing it. dev->bus and dev->class should |
669 | * never change once they are set, so they don't need special care. | 669 | * never change once they are set, so they don't need special care. |
670 | */ | 670 | */ |
671 | drv = ACCESS_ONCE(dev->driver); | 671 | drv = READ_ONCE(dev->driver); |
672 | return drv ? drv->name : | 672 | return drv ? drv->name : |
673 | (dev->bus ? dev->bus->name : | 673 | (dev->bus ? dev->bus->name : |
674 | (dev->class ? dev->class->name : "")); | 674 | (dev->class ? dev->class->name : "")); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..41d7c2b99f69 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) | |||
134 | if (!dev->power.use_autosuspend) | 134 | if (!dev->power.use_autosuspend) |
135 | goto out; | 135 | goto out; |
136 | 136 | ||
137 | autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); | 137 | autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); |
138 | if (autosuspend_delay < 0) | 138 | if (autosuspend_delay < 0) |
139 | goto out; | 139 | goto out; |
140 | 140 | ||
141 | last_busy = ACCESS_ONCE(dev->power.last_busy); | 141 | last_busy = READ_ONCE(dev->power.last_busy); |
142 | elapsed = jiffies - last_busy; | 142 | elapsed = jiffies - last_busy; |
143 | if (elapsed < 0) | 143 | if (elapsed < 0) |
144 | goto out; /* jiffies has wrapped around. */ | 144 | goto out; /* jiffies has wrapped around. */ |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ad92707e45f..6c7ccac2679e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) | |||
641 | return; | 641 | return; |
642 | 642 | ||
643 | retry: | 643 | retry: |
644 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | 644 | entropy_count = orig = READ_ONCE(r->entropy_count); |
645 | if (nfrac < 0) { | 645 | if (nfrac < 0) { |
646 | /* Debit */ | 646 | /* Debit */ |
647 | entropy_count += nfrac; | 647 | entropy_count += nfrac; |
@@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
1265 | 1265 | ||
1266 | /* Can we pull enough? */ | 1266 | /* Can we pull enough? */ |
1267 | retry: | 1267 | retry: |
1268 | entropy_count = orig = ACCESS_ONCE(r->entropy_count); | 1268 | entropy_count = orig = READ_ONCE(r->entropy_count); |
1269 | ibytes = nbytes; | 1269 | ibytes = nbytes; |
1270 | /* never pull more than available */ | 1270 | /* never pull more than available */ |
1271 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); | 1271 | have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); |
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 39e489a96ad7..60da2537bef9 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c | |||
@@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id) | |||
71 | if (readl_relaxed(timer->control) & timer->match_mask) { | 71 | if (readl_relaxed(timer->control) & timer->match_mask) { |
72 | writel_relaxed(timer->match_mask, timer->control); | 72 | writel_relaxed(timer->match_mask, timer->control); |
73 | 73 | ||
74 | event_handler = ACCESS_ONCE(timer->evt.event_handler); | 74 | event_handler = READ_ONCE(timer->evt.event_handler); |
75 | if (event_handler) | 75 | if (event_handler) |
76 | event_handler(&timer->evt); | 76 | event_handler(&timer->evt); |
77 | return IRQ_HANDLED; | 77 | return IRQ_HANDLED; |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d258953ff488..f4f258075b89 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
172 | 172 | ||
173 | while (rd_reg32(&jrp->rregs->outring_used)) { | 173 | while (rd_reg32(&jrp->rregs->outring_used)) { |
174 | 174 | ||
175 | head = ACCESS_ONCE(jrp->head); | 175 | head = READ_ONCE(jrp->head); |
176 | 176 | ||
177 | spin_lock(&jrp->outlock); | 177 | spin_lock(&jrp->outlock); |
178 | 178 | ||
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
341 | spin_lock_bh(&jrp->inplock); | 341 | spin_lock_bh(&jrp->inplock); |
342 | 342 | ||
343 | head = jrp->head; | 343 | head = jrp->head; |
344 | tail = ACCESS_ONCE(jrp->tail); | 344 | tail = READ_ONCE(jrp->tail); |
345 | 345 | ||
346 | if (!rd_reg32(&jrp->rregs->inpring_avail) || | 346 | if (!rd_reg32(&jrp->rregs->inpring_avail) || |
347 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { | 347 | CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { |
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 874ddf5e9087..0f20f5ec9617 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
@@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem, | |||
193 | ktime_t start = wmem->start, now = ktime_get(); | 193 | ktime_t start = wmem->start, now = ktime_get(); |
194 | ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); | 194 | ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); |
195 | 195 | ||
196 | while (!(ACCESS_ONCE(csb->flags) & CSB_V)) { | 196 | while (!(READ_ONCE(csb->flags) & CSB_V)) { |
197 | cpu_relax(); | 197 | cpu_relax(); |
198 | now = ktime_get(); | 198 | now = ktime_get(); |
199 | if (ktime_after(now, timeout)) | 199 | if (ktime_after(now, timeout)) |
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 346c4987b284..11d6419788c2 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c | |||
@@ -175,11 +175,11 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file, | |||
175 | /* | 175 | /* |
176 | * To trigger the error, we need to read the data back | 176 | * To trigger the error, we need to read the data back |
177 | * (the data was written with errors above). | 177 | * (the data was written with errors above). |
178 | * The ACCESS_ONCE macros and printk are used to prevent the | 178 | * The READ_ONCE macros and printk are used to prevent the |
179 | * the compiler optimizing these reads out. | 179 | * the compiler optimizing these reads out. |
180 | */ | 180 | */ |
181 | reg = ACCESS_ONCE(ptemp[0]); | 181 | reg = READ_ONCE(ptemp[0]); |
182 | read_reg = ACCESS_ONCE(ptemp[1]); | 182 | read_reg = READ_ONCE(ptemp[1]); |
183 | /* Force Read */ | 183 | /* Force Read */ |
184 | rmb(); | 184 | rmb(); |
185 | 185 | ||
@@ -618,7 +618,7 @@ static ssize_t altr_edac_device_trig(struct file *file, | |||
618 | for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) { | 618 | for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) { |
619 | /* Read data so we're in the correct state */ | 619 | /* Read data so we're in the correct state */ |
620 | rmb(); | 620 | rmb(); |
621 | if (ACCESS_ONCE(ptemp[i])) | 621 | if (READ_ONCE(ptemp[i])) |
622 | result = -1; | 622 | result = -1; |
623 | /* Toggle Error bit (it is latched), leave ECC enabled */ | 623 | /* Toggle Error bit (it is latched), leave ECC enabled */ |
624 | writel(error_mask, (drvdata->base + priv->set_err_ofst)); | 624 | writel(error_mask, (drvdata->base + priv->set_err_ofst)); |
@@ -635,7 +635,7 @@ static ssize_t altr_edac_device_trig(struct file *file, | |||
635 | 635 | ||
636 | /* Read out written data. ECC error caused here */ | 636 | /* Read out written data. ECC error caused here */ |
637 | for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++) | 637 | for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++) |
638 | if (ACCESS_ONCE(ptemp[i]) != i) | 638 | if (READ_ONCE(ptemp[i]) != i) |
639 | edac_printk(KERN_ERR, EDAC_DEVICE, | 639 | edac_printk(KERN_ERR, EDAC_DEVICE, |
640 | "Read doesn't match written data\n"); | 640 | "Read doesn't match written data\n"); |
641 | 641 | ||
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8bf89267dc25..ccf52368a073 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
734 | __le16 res_count, next_res_count; | 734 | __le16 res_count, next_res_count; |
735 | 735 | ||
736 | i = ar_first_buffer_index(ctx); | 736 | i = ar_first_buffer_index(ctx); |
737 | res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); | 737 | res_count = READ_ONCE(ctx->descriptors[i].res_count); |
738 | 738 | ||
739 | /* A buffer that is not yet completely filled must be the last one. */ | 739 | /* A buffer that is not yet completely filled must be the last one. */ |
740 | while (i != last && res_count == 0) { | 740 | while (i != last && res_count == 0) { |
@@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
742 | /* Peek at the next descriptor. */ | 742 | /* Peek at the next descriptor. */ |
743 | next_i = ar_next_buffer_index(i); | 743 | next_i = ar_next_buffer_index(i); |
744 | rmb(); /* read descriptors in order */ | 744 | rmb(); /* read descriptors in order */ |
745 | next_res_count = ACCESS_ONCE( | 745 | next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); |
746 | ctx->descriptors[next_i].res_count); | ||
747 | /* | 746 | /* |
748 | * If the next descriptor is still empty, we must stop at this | 747 | * If the next descriptor is still empty, we must stop at this |
749 | * descriptor. | 748 | * descriptor. |
@@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, | |||
759 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { | 758 | if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { |
760 | next_i = ar_next_buffer_index(next_i); | 759 | next_i = ar_next_buffer_index(next_i); |
761 | rmb(); | 760 | rmb(); |
762 | next_res_count = ACCESS_ONCE( | 761 | next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); |
763 | ctx->descriptors[next_i].res_count); | ||
764 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) | 762 | if (next_res_count != cpu_to_le16(PAGE_SIZE)) |
765 | goto next_buffer_is_active; | 763 | goto next_buffer_is_active; |
766 | } | 764 | } |
@@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context, | |||
2812 | u32 buffer_dma; | 2810 | u32 buffer_dma; |
2813 | 2811 | ||
2814 | req_count = le16_to_cpu(last->req_count); | 2812 | req_count = le16_to_cpu(last->req_count); |
2815 | res_count = le16_to_cpu(ACCESS_ONCE(last->res_count)); | 2813 | res_count = le16_to_cpu(READ_ONCE(last->res_count)); |
2816 | completed = req_count - res_count; | 2814 | completed = req_count - res_count; |
2817 | buffer_dma = le32_to_cpu(last->data_address); | 2815 | buffer_dma = le32_to_cpu(last->data_address); |
2818 | 2816 | ||
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c index a01461d63f68..00de793e6423 100644 --- a/drivers/firmware/tegra/ivc.c +++ b/drivers/firmware/tegra/ivc.c | |||
@@ -99,11 +99,11 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, | |||
99 | { | 99 | { |
100 | /* | 100 | /* |
101 | * This function performs multiple checks on the same values with | 101 | * This function performs multiple checks on the same values with |
102 | * security implications, so create snapshots with ACCESS_ONCE() to | 102 | * security implications, so create snapshots with READ_ONCE() to |
103 | * ensure that these checks use the same values. | 103 | * ensure that these checks use the same values. |
104 | */ | 104 | */ |
105 | u32 tx = ACCESS_ONCE(header->tx.count); | 105 | u32 tx = READ_ONCE(header->tx.count); |
106 | u32 rx = ACCESS_ONCE(header->rx.count); | 106 | u32 rx = READ_ONCE(header->rx.count); |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * Perform an over-full check to prevent denial of service attacks | 109 | * Perform an over-full check to prevent denial of service attacks |
@@ -124,8 +124,8 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, | |||
124 | static inline bool tegra_ivc_full(struct tegra_ivc *ivc, | 124 | static inline bool tegra_ivc_full(struct tegra_ivc *ivc, |
125 | struct tegra_ivc_header *header) | 125 | struct tegra_ivc_header *header) |
126 | { | 126 | { |
127 | u32 tx = ACCESS_ONCE(header->tx.count); | 127 | u32 tx = READ_ONCE(header->tx.count); |
128 | u32 rx = ACCESS_ONCE(header->rx.count); | 128 | u32 rx = READ_ONCE(header->rx.count); |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * Invalid cases where the counters indicate that the queue is over | 131 | * Invalid cases where the counters indicate that the queue is over |
@@ -137,8 +137,8 @@ static inline bool tegra_ivc_full(struct tegra_ivc *ivc, | |||
137 | static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, | 137 | static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, |
138 | struct tegra_ivc_header *header) | 138 | struct tegra_ivc_header *header) |
139 | { | 139 | { |
140 | u32 tx = ACCESS_ONCE(header->tx.count); | 140 | u32 tx = READ_ONCE(header->tx.count); |
141 | u32 rx = ACCESS_ONCE(header->rx.count); | 141 | u32 rx = READ_ONCE(header->rx.count); |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * This function isn't expected to be used in scenarios where an | 144 | * This function isn't expected to be used in scenarios where an |
@@ -151,8 +151,8 @@ static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, | |||
151 | 151 | ||
152 | static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) | 152 | static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) |
153 | { | 153 | { |
154 | ACCESS_ONCE(ivc->tx.channel->tx.count) = | 154 | WRITE_ONCE(ivc->tx.channel->tx.count, |
155 | ACCESS_ONCE(ivc->tx.channel->tx.count) + 1; | 155 | READ_ONCE(ivc->tx.channel->tx.count) + 1); |
156 | 156 | ||
157 | if (ivc->tx.position == ivc->num_frames - 1) | 157 | if (ivc->tx.position == ivc->num_frames - 1) |
158 | ivc->tx.position = 0; | 158 | ivc->tx.position = 0; |
@@ -162,8 +162,8 @@ static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) | |||
162 | 162 | ||
163 | static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) | 163 | static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) |
164 | { | 164 | { |
165 | ACCESS_ONCE(ivc->rx.channel->rx.count) = | 165 | WRITE_ONCE(ivc->rx.channel->rx.count, |
166 | ACCESS_ONCE(ivc->rx.channel->rx.count) + 1; | 166 | READ_ONCE(ivc->rx.channel->rx.count) + 1); |
167 | 167 | ||
168 | if (ivc->rx.position == ivc->num_frames - 1) | 168 | if (ivc->rx.position == ivc->num_frames - 1) |
169 | ivc->rx.position = 0; | 169 | ivc->rx.position = 0; |
@@ -428,7 +428,7 @@ int tegra_ivc_notified(struct tegra_ivc *ivc) | |||
428 | 428 | ||
429 | /* Copy the receiver's state out of shared memory. */ | 429 | /* Copy the receiver's state out of shared memory. */ |
430 | tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); | 430 | tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); |
431 | state = ACCESS_ONCE(ivc->rx.channel->tx.state); | 431 | state = READ_ONCE(ivc->rx.channel->tx.state); |
432 | 432 | ||
433 | if (state == TEGRA_IVC_STATE_SYNC) { | 433 | if (state == TEGRA_IVC_STATE_SYNC) { |
434 | offset = offsetof(struct tegra_ivc_header, tx.count); | 434 | offset = offsetof(struct tegra_ivc_header, tx.count); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 333bad749067..303b5e099a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) | |||
260 | */ | 260 | */ |
261 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) | 261 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) |
262 | { | 262 | { |
263 | uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); | 263 | uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); |
264 | struct dma_fence *fence, **ptr; | 264 | struct dma_fence *fence, **ptr; |
265 | int r; | 265 | int r; |
266 | 266 | ||
@@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) | |||
300 | amdgpu_fence_process(ring); | 300 | amdgpu_fence_process(ring); |
301 | emitted = 0x100000000ull; | 301 | emitted = 0x100000000ull; |
302 | emitted -= atomic_read(&ring->fence_drv.last_seq); | 302 | emitted -= atomic_read(&ring->fence_drv.last_seq); |
303 | emitted += ACCESS_ONCE(ring->fence_drv.sync_seq); | 303 | emitted += READ_ONCE(ring->fence_drv.sync_seq); |
304 | return lower_32_bits(emitted); | 304 | return lower_32_bits(emitted); |
305 | } | 305 | } |
306 | 306 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e..6149a47fe63d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) | |||
788 | seq_printf(m, "\t0x%08x: %12ld byte %s", | 788 | seq_printf(m, "\t0x%08x: %12ld byte %s", |
789 | id, amdgpu_bo_size(bo), placement); | 789 | id, amdgpu_bo_size(bo), placement); |
790 | 790 | ||
791 | offset = ACCESS_ONCE(bo->tbo.mem.start); | 791 | offset = READ_ONCE(bo->tbo.mem.start); |
792 | if (offset != AMDGPU_BO_INVALID_OFFSET) | 792 | if (offset != AMDGPU_BO_INVALID_OFFSET) |
793 | seq_printf(m, " @ 0x%010Lx", offset); | 793 | seq_printf(m, " @ 0x%010Lx", offset); |
794 | 794 | ||
795 | pin_count = ACCESS_ONCE(bo->pin_count); | 795 | pin_count = READ_ONCE(bo->pin_count); |
796 | if (pin_count) | 796 | if (pin_count) |
797 | seq_printf(m, " pin count %d", pin_count); | 797 | seq_printf(m, " pin count %d", pin_count); |
798 | seq_printf(m, "\n"); | 798 | seq_printf(m, "\n"); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8..a25f6c72f219 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) | |||
187 | if (kfifo_is_empty(&entity->job_queue)) | 187 | if (kfifo_is_empty(&entity->job_queue)) |
188 | return false; | 188 | return false; |
189 | 189 | ||
190 | if (ACCESS_ONCE(entity->dependency)) | 190 | if (READ_ONCE(entity->dependency)) |
191 | return false; | 191 | return false; |
192 | 192 | ||
193 | return true; | 193 | return true; |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3386452bd2f0..cf3deb283da5 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
451 | else | 451 | else |
452 | r = 0; | 452 | r = 0; |
453 | 453 | ||
454 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); | 454 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
455 | args->domain = radeon_mem_type_to_domain(cur_placement); | 455 | args->domain = radeon_mem_type_to_domain(cur_placement); |
456 | drm_gem_object_put_unlocked(gobj); | 456 | drm_gem_object_put_unlocked(gobj); |
457 | return r; | 457 | return r; |
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
481 | r = ret; | 481 | r = ret; |
482 | 482 | ||
483 | /* Flush HDP cache via MMIO if necessary */ | 483 | /* Flush HDP cache via MMIO if necessary */ |
484 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); | 484 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
485 | if (rdev->asic->mmio_hdp_flush && | 485 | if (rdev->asic->mmio_hdp_flush && |
486 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) | 486 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
487 | robj->rdev->asic->mmio_hdp_flush(rdev); | 487 | robj->rdev->asic->mmio_hdp_flush(rdev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index a552e4ea5440..6ac094ee8983 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, | |||
904 | if (unlikely(drm_is_render_client(file_priv))) | 904 | if (unlikely(drm_is_render_client(file_priv))) |
905 | require_exist = true; | 905 | require_exist = true; |
906 | 906 | ||
907 | if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { | 907 | if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) { |
908 | DRM_ERROR("Locked master refused legacy " | 908 | DRM_ERROR("Locked master refused legacy " |
909 | "surface reference.\n"); | 909 | "surface reference.\n"); |
910 | return -EACCES; | 910 | return -EACCES; |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a1e9893136..97bea2e1aa6a 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | |||
380 | if (sc->flags & SCF_FROZEN) { | 380 | if (sc->flags & SCF_FROZEN) { |
381 | wait_event_interruptible_timeout( | 381 | wait_event_interruptible_timeout( |
382 | dd->event_queue, | 382 | dd->event_queue, |
383 | !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), | 383 | !(READ_ONCE(dd->flags) & HFI1_FROZEN), |
384 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); | 384 | msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); |
385 | if (dd->flags & HFI1_FROZEN) | 385 | if (dd->flags & HFI1_FROZEN) |
386 | return -ENOLCK; | 386 | return -ENOLCK; |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 7108a4b5e94c..75e740780285 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
@@ -1423,14 +1423,14 @@ retry: | |||
1423 | goto done; | 1423 | goto done; |
1424 | } | 1424 | } |
1425 | /* copy from receiver cache line and recalculate */ | 1425 | /* copy from receiver cache line and recalculate */ |
1426 | sc->alloc_free = ACCESS_ONCE(sc->free); | 1426 | sc->alloc_free = READ_ONCE(sc->free); |
1427 | avail = | 1427 | avail = |
1428 | (unsigned long)sc->credits - | 1428 | (unsigned long)sc->credits - |
1429 | (sc->fill - sc->alloc_free); | 1429 | (sc->fill - sc->alloc_free); |
1430 | if (blocks > avail) { | 1430 | if (blocks > avail) { |
1431 | /* still no room, actively update */ | 1431 | /* still no room, actively update */ |
1432 | sc_release_update(sc); | 1432 | sc_release_update(sc); |
1433 | sc->alloc_free = ACCESS_ONCE(sc->free); | 1433 | sc->alloc_free = READ_ONCE(sc->free); |
1434 | trycount++; | 1434 | trycount++; |
1435 | goto retry; | 1435 | goto retry; |
1436 | } | 1436 | } |
@@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc) | |||
1667 | 1667 | ||
1668 | /* call sent buffer callbacks */ | 1668 | /* call sent buffer callbacks */ |
1669 | code = -1; /* code not yet set */ | 1669 | code = -1; /* code not yet set */ |
1670 | head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */ | 1670 | head = READ_ONCE(sc->sr_head); /* snapshot the head */ |
1671 | tail = sc->sr_tail; | 1671 | tail = sc->sr_tail; |
1672 | while (head != tail) { | 1672 | while (head != tail) { |
1673 | pbuf = &sc->sr[tail].pbuf; | 1673 | pbuf = &sc->sr[tail].pbuf; |
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index b3291f0fde9a..a7fc664f0d4e 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c | |||
@@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp) | |||
363 | 363 | ||
364 | again: | 364 | again: |
365 | smp_read_barrier_depends(); /* see post_one_send() */ | 365 | smp_read_barrier_depends(); /* see post_one_send() */ |
366 | if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) | 366 | if (sqp->s_last == READ_ONCE(sqp->s_head)) |
367 | goto clr_busy; | 367 | goto clr_busy; |
368 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); | 368 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); |
369 | 369 | ||
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 6781bcdb10b3..08346d25441c 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c | |||
@@ -1725,7 +1725,7 @@ retry: | |||
1725 | 1725 | ||
1726 | swhead = sde->descq_head & sde->sdma_mask; | 1726 | swhead = sde->descq_head & sde->sdma_mask; |
1727 | /* this code is really bad for cache line trading */ | 1727 | /* this code is really bad for cache line trading */ |
1728 | swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 1728 | swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
1729 | cnt = sde->descq_cnt; | 1729 | cnt = sde->descq_cnt; |
1730 | 1730 | ||
1731 | if (swhead < swtail) | 1731 | if (swhead < swtail) |
@@ -1872,7 +1872,7 @@ retry: | |||
1872 | if ((status & sde->idle_mask) && !idle_check_done) { | 1872 | if ((status & sde->idle_mask) && !idle_check_done) { |
1873 | u16 swtail; | 1873 | u16 swtail; |
1874 | 1874 | ||
1875 | swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 1875 | swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
1876 | if (swtail != hwhead) { | 1876 | if (swtail != hwhead) { |
1877 | hwhead = (u16)read_sde_csr(sde, SD(HEAD)); | 1877 | hwhead = (u16)read_sde_csr(sde, SD(HEAD)); |
1878 | idle_check_done = 1; | 1878 | idle_check_done = 1; |
@@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) | |||
2222 | u16 len; | 2222 | u16 len; |
2223 | 2223 | ||
2224 | head = sde->descq_head & sde->sdma_mask; | 2224 | head = sde->descq_head & sde->sdma_mask; |
2225 | tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; | 2225 | tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
2226 | seq_printf(s, SDE_FMT, sde->this_idx, | 2226 | seq_printf(s, SDE_FMT, sde->this_idx, |
2227 | sde->cpu, | 2227 | sde->cpu, |
2228 | sdma_state_name(sde->state.current_state), | 2228 | sdma_state_name(sde->state.current_state), |
@@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde) | |||
3305 | return -EINVAL; | 3305 | return -EINVAL; |
3306 | } | 3306 | } |
3307 | while (1) { | 3307 | while (1) { |
3308 | nr = ffz(ACCESS_ONCE(sde->ahg_bits)); | 3308 | nr = ffz(READ_ONCE(sde->ahg_bits)); |
3309 | if (nr > 31) { | 3309 | if (nr > 31) { |
3310 | trace_hfi1_ahg_allocate(sde, -ENOSPC); | 3310 | trace_hfi1_ahg_allocate(sde, -ENOSPC); |
3311 | return -ENOSPC; | 3311 | return -ENOSPC; |
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 107011d8613b..374c59784950 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h | |||
@@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) | |||
445 | { | 445 | { |
446 | return sde->descq_cnt - | 446 | return sde->descq_cnt - |
447 | (sde->descq_tail - | 447 | (sde->descq_tail - |
448 | ACCESS_ONCE(sde->descq_head)) - 1; | 448 | READ_ONCE(sde->descq_head)) - 1; |
449 | } | 449 | } |
450 | 450 | ||
451 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) | 451 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) |
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 0b646173ca22..9a31c585427f 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c | |||
@@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
80 | goto bail; | 80 | goto bail; |
81 | /* We are in the error state, flush the work request. */ | 81 | /* We are in the error state, flush the work request. */ |
82 | smp_read_barrier_depends(); /* see post_one_send() */ | 82 | smp_read_barrier_depends(); /* see post_one_send() */ |
83 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 83 | if (qp->s_last == READ_ONCE(qp->s_head)) |
84 | goto bail; | 84 | goto bail; |
85 | /* If DMAs are in progress, we can't flush immediately. */ | 85 | /* If DMAs are in progress, we can't flush immediately. */ |
86 | if (iowait_sdma_pending(&priv->s_iowait)) { | 86 | if (iowait_sdma_pending(&priv->s_iowait)) { |
@@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
121 | goto bail; | 121 | goto bail; |
122 | /* Check if send work queue is empty. */ | 122 | /* Check if send work queue is empty. */ |
123 | smp_read_barrier_depends(); /* see post_one_send() */ | 123 | smp_read_barrier_depends(); /* see post_one_send() */ |
124 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) { | 124 | if (qp->s_cur == READ_ONCE(qp->s_head)) { |
125 | clear_ahg(qp); | 125 | clear_ahg(qp); |
126 | goto bail; | 126 | goto bail; |
127 | } | 127 | } |
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 2ba74fdd6f15..7fec6b984e3e 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c | |||
@@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
487 | goto bail; | 487 | goto bail; |
488 | /* We are in the error state, flush the work request. */ | 488 | /* We are in the error state, flush the work request. */ |
489 | smp_read_barrier_depends(); /* see post_one_send */ | 489 | smp_read_barrier_depends(); /* see post_one_send */ |
490 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 490 | if (qp->s_last == READ_ONCE(qp->s_head)) |
491 | goto bail; | 491 | goto bail; |
492 | /* If DMAs are in progress, we can't flush immediately. */ | 492 | /* If DMAs are in progress, we can't flush immediately. */ |
493 | if (iowait_sdma_pending(&priv->s_iowait)) { | 493 | if (iowait_sdma_pending(&priv->s_iowait)) { |
@@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) | |||
501 | 501 | ||
502 | /* see post_one_send() */ | 502 | /* see post_one_send() */ |
503 | smp_read_barrier_depends(); | 503 | smp_read_barrier_depends(); |
504 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 504 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
505 | goto bail; | 505 | goto bail; |
506 | 506 | ||
507 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); | 507 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index c0c0e0445cbf..8ec6e8a8d6f7 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, | |||
276 | /* Wait until all requests have been freed. */ | 276 | /* Wait until all requests have been freed. */ |
277 | wait_event_interruptible( | 277 | wait_event_interruptible( |
278 | pq->wait, | 278 | pq->wait, |
279 | (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); | 279 | (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); |
280 | kfree(pq->reqs); | 280 | kfree(pq->reqs); |
281 | kfree(pq->req_in_use); | 281 | kfree(pq->req_in_use); |
282 | kmem_cache_destroy(pq->txreq_cache); | 282 | kmem_cache_destroy(pq->txreq_cache); |
@@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, | |||
591 | if (ret != -EBUSY) { | 591 | if (ret != -EBUSY) { |
592 | req->status = ret; | 592 | req->status = ret; |
593 | WRITE_ONCE(req->has_error, 1); | 593 | WRITE_ONCE(req->has_error, 1); |
594 | if (ACCESS_ONCE(req->seqcomp) == | 594 | if (READ_ONCE(req->seqcomp) == |
595 | req->seqsubmitted - 1) | 595 | req->seqsubmitted - 1) |
596 | goto free_req; | 596 | goto free_req; |
597 | return ret; | 597 | return ret; |
@@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) | |||
825 | */ | 825 | */ |
826 | if (req->data_len) { | 826 | if (req->data_len) { |
827 | iovec = &req->iovs[req->iov_idx]; | 827 | iovec = &req->iovs[req->iov_idx]; |
828 | if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) { | 828 | if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { |
829 | if (++req->iov_idx == req->data_iovs) { | 829 | if (++req->iov_idx == req->data_iovs) { |
830 | ret = -EFAULT; | 830 | ret = -EFAULT; |
831 | goto free_txreq; | 831 | goto free_txreq; |
@@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) | |||
1390 | } else { | 1390 | } else { |
1391 | if (status != SDMA_TXREQ_S_OK) | 1391 | if (status != SDMA_TXREQ_S_OK) |
1392 | req->status = status; | 1392 | req->status = status; |
1393 | if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && | 1393 | if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) && |
1394 | (READ_ONCE(req->done) || | 1394 | (READ_ONCE(req->done) || |
1395 | READ_ONCE(req->has_error))) { | 1395 | READ_ONCE(req->has_error))) { |
1396 | user_sdma_free_request(req, false); | 1396 | user_sdma_free_request(req, false); |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 53efbb0b40c4..9a37e844d4c8 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) | |||
368 | 368 | ||
369 | again: | 369 | again: |
370 | smp_read_barrier_depends(); /* see post_one_send() */ | 370 | smp_read_barrier_depends(); /* see post_one_send() */ |
371 | if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) | 371 | if (sqp->s_last == READ_ONCE(sqp->s_head)) |
372 | goto clr_busy; | 372 | goto clr_busy; |
373 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); | 373 | wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); |
374 | 374 | ||
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 498e2202e72c..bddcc37ace44 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) | |||
61 | goto bail; | 61 | goto bail; |
62 | /* We are in the error state, flush the work request. */ | 62 | /* We are in the error state, flush the work request. */ |
63 | smp_read_barrier_depends(); /* see post_one_send() */ | 63 | smp_read_barrier_depends(); /* see post_one_send() */ |
64 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 64 | if (qp->s_last == READ_ONCE(qp->s_head)) |
65 | goto bail; | 65 | goto bail; |
66 | /* If DMAs are in progress, we can't flush immediately. */ | 66 | /* If DMAs are in progress, we can't flush immediately. */ |
67 | if (atomic_read(&priv->s_dma_busy)) { | 67 | if (atomic_read(&priv->s_dma_busy)) { |
@@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) | |||
91 | goto bail; | 91 | goto bail; |
92 | /* Check if send work queue is empty. */ | 92 | /* Check if send work queue is empty. */ |
93 | smp_read_barrier_depends(); /* see post_one_send() */ | 93 | smp_read_barrier_depends(); /* see post_one_send() */ |
94 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 94 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
95 | goto bail; | 95 | goto bail; |
96 | /* | 96 | /* |
97 | * Start a new request. | 97 | * Start a new request. |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index be4907453ac4..15962ed193ce 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) | |||
253 | goto bail; | 253 | goto bail; |
254 | /* We are in the error state, flush the work request. */ | 254 | /* We are in the error state, flush the work request. */ |
255 | smp_read_barrier_depends(); /* see post_one_send */ | 255 | smp_read_barrier_depends(); /* see post_one_send */ |
256 | if (qp->s_last == ACCESS_ONCE(qp->s_head)) | 256 | if (qp->s_last == READ_ONCE(qp->s_head)) |
257 | goto bail; | 257 | goto bail; |
258 | /* If DMAs are in progress, we can't flush immediately. */ | 258 | /* If DMAs are in progress, we can't flush immediately. */ |
259 | if (atomic_read(&priv->s_dma_busy)) { | 259 | if (atomic_read(&priv->s_dma_busy)) { |
@@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) | |||
267 | 267 | ||
268 | /* see post_one_send() */ | 268 | /* see post_one_send() */ |
269 | smp_read_barrier_depends(); | 269 | smp_read_barrier_depends(); |
270 | if (qp->s_cur == ACCESS_ONCE(qp->s_head)) | 270 | if (qp->s_cur == READ_ONCE(qp->s_head)) |
271 | goto bail; | 271 | goto bail; |
272 | 272 | ||
273 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); | 273 | wqe = rvt_get_swqe_ptr(qp, qp->s_cur); |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 22df09ae809e..b670cb9d2006 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) | |||
1073 | rdi->driver_f.notify_error_qp(qp); | 1073 | rdi->driver_f.notify_error_qp(qp); |
1074 | 1074 | ||
1075 | /* Schedule the sending tasklet to drain the send work queue. */ | 1075 | /* Schedule the sending tasklet to drain the send work queue. */ |
1076 | if (ACCESS_ONCE(qp->s_last) != qp->s_head) | 1076 | if (READ_ONCE(qp->s_last) != qp->s_head) |
1077 | rdi->driver_f.schedule_send(qp); | 1077 | rdi->driver_f.schedule_send(qp); |
1078 | 1078 | ||
1079 | rvt_clear_mr_refs(qp, 0); | 1079 | rvt_clear_mr_refs(qp, 0); |
@@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail( | |||
1686 | if (likely(qp->s_avail)) | 1686 | if (likely(qp->s_avail)) |
1687 | return 0; | 1687 | return 0; |
1688 | smp_read_barrier_depends(); /* see rc.c */ | 1688 | smp_read_barrier_depends(); /* see rc.c */ |
1689 | slast = ACCESS_ONCE(qp->s_last); | 1689 | slast = READ_ONCE(qp->s_last); |
1690 | if (qp->s_head >= slast) | 1690 | if (qp->s_head >= slast) |
1691 | avail = qp->s_size - (qp->s_head - slast); | 1691 | avail = qp->s_size - (qp->s_head - slast); |
1692 | else | 1692 | else |
@@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1917 | * ahead and kick the send engine into gear. Otherwise we will always | 1917 | * ahead and kick the send engine into gear. Otherwise we will always |
1918 | * just schedule the send to happen later. | 1918 | * just schedule the send to happen later. |
1919 | */ | 1919 | */ |
1920 | call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; | 1920 | call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; |
1921 | 1921 | ||
1922 | for (; wr; wr = wr->next) { | 1922 | for (; wr; wr = wr->next) { |
1923 | err = rvt_post_one_wr(qp, wr, &call_send); | 1923 | err = rvt_post_one_wr(qp, wr, &call_send); |
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c index 2e8f801932be..a1db1e5040dc 100644 --- a/drivers/input/misc/regulator-haptic.c +++ b/drivers/input/misc/regulator-haptic.c | |||
@@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev) | |||
233 | 233 | ||
234 | haptic->suspended = false; | 234 | haptic->suspended = false; |
235 | 235 | ||
236 | magnitude = ACCESS_ONCE(haptic->magnitude); | 236 | magnitude = READ_ONCE(haptic->magnitude); |
237 | if (magnitude) | 237 | if (magnitude) |
238 | regulator_haptic_set_voltage(haptic, magnitude); | 238 | regulator_haptic_set_voltage(haptic, magnitude); |
239 | 239 | ||
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d216a8f7bc22..33bb074d6941 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void) | |||
347 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); | 347 | BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); |
348 | BUG_ON(dm_bufio_client_count < 0); | 348 | BUG_ON(dm_bufio_client_count < 0); |
349 | 349 | ||
350 | dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size); | 350 | dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * Use default if set to 0 and report the actual cache size used. | 353 | * Use default if set to 0 and report the actual cache size used. |
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c, | |||
960 | { | 960 | { |
961 | unsigned long buffers; | 961 | unsigned long buffers; |
962 | 962 | ||
963 | if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { | 963 | if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { |
964 | if (mutex_trylock(&dm_bufio_clients_lock)) { | 964 | if (mutex_trylock(&dm_bufio_clients_lock)) { |
965 | __cache_size_refresh(); | 965 | __cache_size_refresh(); |
966 | mutex_unlock(&dm_bufio_clients_lock); | 966 | mutex_unlock(&dm_bufio_clients_lock); |
@@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) | |||
1600 | 1600 | ||
1601 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) | 1601 | static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
1602 | { | 1602 | { |
1603 | unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); | 1603 | unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); |
1604 | return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); | 1604 | return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); |
1605 | } | 1605 | } |
1606 | 1606 | ||
@@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |||
1647 | { | 1647 | { |
1648 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); | 1648 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); |
1649 | 1649 | ||
1650 | return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); | 1650 | return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); |
1651 | } | 1651 | } |
1652 | 1652 | ||
1653 | /* | 1653 | /* |
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); | |||
1818 | 1818 | ||
1819 | static unsigned get_max_age_hz(void) | 1819 | static unsigned get_max_age_hz(void) |
1820 | { | 1820 | { |
1821 | unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); | 1821 | unsigned max_age = READ_ONCE(dm_bufio_max_age); |
1822 | 1822 | ||
1823 | if (max_age > UINT_MAX / HZ) | 1823 | if (max_age > UINT_MAX / HZ) |
1824 | max_age = UINT_MAX / HZ; | 1824 | max_age = UINT_MAX / HZ; |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 096fe9b66c50..8c5756e1df94 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * This file is released under the GPL. | 6 | * This file is released under the GPL. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/compiler.h> | ||
9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
10 | #include <linux/device-mapper.h> | 11 | #include <linux/device-mapper.h> |
11 | #include <linux/dm-io.h> | 12 | #include <linux/dm-io.h> |
@@ -80,13 +81,13 @@ struct journal_entry { | |||
80 | #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) | 81 | #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) |
81 | 82 | ||
82 | #if BITS_PER_LONG == 64 | 83 | #if BITS_PER_LONG == 64 |
83 | #define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0) | 84 | #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0) |
84 | #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) | 85 | #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) |
85 | #elif defined(CONFIG_LBDAF) | 86 | #elif defined(CONFIG_LBDAF) |
86 | #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0) | 87 | #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0) |
87 | #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) | 88 | #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) |
88 | #else | 89 | #else |
89 | #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0) | 90 | #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0) |
90 | #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo) | 91 | #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo) |
91 | #endif | 92 | #endif |
92 | #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) | 93 | #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) |
@@ -320,7 +321,7 @@ static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, in | |||
320 | 321 | ||
321 | static int dm_integrity_failed(struct dm_integrity_c *ic) | 322 | static int dm_integrity_failed(struct dm_integrity_c *ic) |
322 | { | 323 | { |
323 | return ACCESS_ONCE(ic->failed); | 324 | return READ_ONCE(ic->failed); |
324 | } | 325 | } |
325 | 326 | ||
326 | static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, | 327 | static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, |
@@ -1545,7 +1546,7 @@ retry_kmap: | |||
1545 | smp_mb(); | 1546 | smp_mb(); |
1546 | if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) | 1547 | if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) |
1547 | wake_up(&ic->copy_to_journal_wait); | 1548 | wake_up(&ic->copy_to_journal_wait); |
1548 | if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { | 1549 | if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { |
1549 | queue_work(ic->commit_wq, &ic->commit_work); | 1550 | queue_work(ic->commit_wq, &ic->commit_work); |
1550 | } else { | 1551 | } else { |
1551 | schedule_autocommit(ic); | 1552 | schedule_autocommit(ic); |
@@ -1798,7 +1799,7 @@ static void integrity_commit(struct work_struct *w) | |||
1798 | ic->n_committed_sections += commit_sections; | 1799 | ic->n_committed_sections += commit_sections; |
1799 | spin_unlock_irq(&ic->endio_wait.lock); | 1800 | spin_unlock_irq(&ic->endio_wait.lock); |
1800 | 1801 | ||
1801 | if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) | 1802 | if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) |
1802 | queue_work(ic->writer_wq, &ic->writer_work); | 1803 | queue_work(ic->writer_wq, &ic->writer_work); |
1803 | 1804 | ||
1804 | release_flush_bios: | 1805 | release_flush_bios: |
@@ -1980,7 +1981,7 @@ static void integrity_writer(struct work_struct *w) | |||
1980 | unsigned prev_free_sectors; | 1981 | unsigned prev_free_sectors; |
1981 | 1982 | ||
1982 | /* the following test is not needed, but it tests the replay code */ | 1983 | /* the following test is not needed, but it tests the replay code */ |
1983 | if (ACCESS_ONCE(ic->suspending)) | 1984 | if (READ_ONCE(ic->suspending)) |
1984 | return; | 1985 | return; |
1985 | 1986 | ||
1986 | spin_lock_irq(&ic->endio_wait.lock); | 1987 | spin_lock_irq(&ic->endio_wait.lock); |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index cf2c67e35eaf..eb45cc3df31d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t) | |||
107 | try_again: | 107 | try_again: |
108 | spin_lock_irq(&throttle_spinlock); | 108 | spin_lock_irq(&throttle_spinlock); |
109 | 109 | ||
110 | throttle = ACCESS_ONCE(t->throttle); | 110 | throttle = READ_ONCE(t->throttle); |
111 | 111 | ||
112 | if (likely(throttle >= 100)) | 112 | if (likely(throttle >= 100)) |
113 | goto skip_limit; | 113 | goto skip_limit; |
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t) | |||
157 | 157 | ||
158 | t->num_io_jobs--; | 158 | t->num_io_jobs--; |
159 | 159 | ||
160 | if (likely(ACCESS_ONCE(t->throttle) >= 100)) | 160 | if (likely(READ_ONCE(t->throttle) >= 100)) |
161 | goto skip_limit; | 161 | goto skip_limit; |
162 | 162 | ||
163 | if (!t->num_io_jobs) { | 163 | if (!t->num_io_jobs) { |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 11f273d2f018..3f88c9d32f7e 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m, | |||
366 | 366 | ||
367 | pgpath = path_to_pgpath(path); | 367 | pgpath = path_to_pgpath(path); |
368 | 368 | ||
369 | if (unlikely(lockless_dereference(m->current_pg) != pg)) { | 369 | if (unlikely(READ_ONCE(m->current_pg) != pg)) { |
370 | /* Only update current_pgpath if pg changed */ | 370 | /* Only update current_pgpath if pg changed */ |
371 | spin_lock_irqsave(&m->lock, flags); | 371 | spin_lock_irqsave(&m->lock, flags); |
372 | m->current_pgpath = pgpath; | 372 | m->current_pgpath = pgpath; |
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
390 | } | 390 | } |
391 | 391 | ||
392 | /* Were we instructed to switch PG? */ | 392 | /* Were we instructed to switch PG? */ |
393 | if (lockless_dereference(m->next_pg)) { | 393 | if (READ_ONCE(m->next_pg)) { |
394 | spin_lock_irqsave(&m->lock, flags); | 394 | spin_lock_irqsave(&m->lock, flags); |
395 | pg = m->next_pg; | 395 | pg = m->next_pg; |
396 | if (!pg) { | 396 | if (!pg) { |
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
406 | 406 | ||
407 | /* Don't change PG until it has no remaining paths */ | 407 | /* Don't change PG until it has no remaining paths */ |
408 | check_current_pg: | 408 | check_current_pg: |
409 | pg = lockless_dereference(m->current_pg); | 409 | pg = READ_ONCE(m->current_pg); |
410 | if (pg) { | 410 | if (pg) { |
411 | pgpath = choose_path_in_pg(m, pg, nr_bytes); | 411 | pgpath = choose_path_in_pg(m, pg, nr_bytes); |
412 | if (!IS_ERR_OR_NULL(pgpath)) | 412 | if (!IS_ERR_OR_NULL(pgpath)) |
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | |||
473 | struct request *clone; | 473 | struct request *clone; |
474 | 474 | ||
475 | /* Do we need to select a new pgpath? */ | 475 | /* Do we need to select a new pgpath? */ |
476 | pgpath = lockless_dereference(m->current_pgpath); | 476 | pgpath = READ_ONCE(m->current_pgpath); |
477 | if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) | 477 | if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) |
478 | pgpath = choose_pgpath(m, nr_bytes); | 478 | pgpath = choose_pgpath(m, nr_bytes); |
479 | 479 | ||
@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
535 | bool queue_io; | 535 | bool queue_io; |
536 | 536 | ||
537 | /* Do we need to select a new pgpath? */ | 537 | /* Do we need to select a new pgpath? */ |
538 | pgpath = lockless_dereference(m->current_pgpath); | 538 | pgpath = READ_ONCE(m->current_pgpath); |
539 | queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); | 539 | queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); |
540 | if (!pgpath || !queue_io) | 540 | if (!pgpath || !queue_io) |
541 | pgpath = choose_pgpath(m, nr_bytes); | 541 | pgpath = choose_pgpath(m, nr_bytes); |
@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, | |||
1804 | struct pgpath *current_pgpath; | 1804 | struct pgpath *current_pgpath; |
1805 | int r; | 1805 | int r; |
1806 | 1806 | ||
1807 | current_pgpath = lockless_dereference(m->current_pgpath); | 1807 | current_pgpath = READ_ONCE(m->current_pgpath); |
1808 | if (!current_pgpath) | 1808 | if (!current_pgpath) |
1809 | current_pgpath = choose_pgpath(m, 0); | 1809 | current_pgpath = choose_pgpath(m, 0); |
1810 | 1810 | ||
@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, | |||
1826 | } | 1826 | } |
1827 | 1827 | ||
1828 | if (r == -ENOTCONN) { | 1828 | if (r == -ENOTCONN) { |
1829 | if (!lockless_dereference(m->current_pg)) { | 1829 | if (!READ_ONCE(m->current_pg)) { |
1830 | /* Path status changed, redo selection */ | 1830 | /* Path status changed, redo selection */ |
1831 | (void) choose_pgpath(m, 0); | 1831 | (void) choose_pgpath(m, 0); |
1832 | } | 1832 | } |
@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti) | |||
1895 | return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); | 1895 | return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); |
1896 | 1896 | ||
1897 | /* Guess which priority_group will be used at next mapping time */ | 1897 | /* Guess which priority_group will be used at next mapping time */ |
1898 | pg = lockless_dereference(m->current_pg); | 1898 | pg = READ_ONCE(m->current_pg); |
1899 | next_pg = lockless_dereference(m->next_pg); | 1899 | next_pg = READ_ONCE(m->next_pg); |
1900 | if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg)) | 1900 | if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) |
1901 | pg = next_pg; | 1901 | pg = next_pg; |
1902 | 1902 | ||
1903 | if (!pg) { | 1903 | if (!pg) { |
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index a7868503d135..29bc51084c82 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c | |||
@@ -432,7 +432,7 @@ do_sync_free: | |||
432 | synchronize_rcu_expedited(); | 432 | synchronize_rcu_expedited(); |
433 | dm_stat_free(&s->rcu_head); | 433 | dm_stat_free(&s->rcu_head); |
434 | } else { | 434 | } else { |
435 | ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; | 435 | WRITE_ONCE(dm_stat_need_rcu_barrier, 1); |
436 | call_rcu(&s->rcu_head, dm_stat_free); | 436 | call_rcu(&s->rcu_head, dm_stat_free); |
437 | } | 437 | } |
438 | return 0; | 438 | return 0; |
@@ -640,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, | |||
640 | */ | 640 | */ |
641 | last = raw_cpu_ptr(stats->last); | 641 | last = raw_cpu_ptr(stats->last); |
642 | stats_aux->merged = | 642 | stats_aux->merged = |
643 | (bi_sector == (ACCESS_ONCE(last->last_sector) && | 643 | (bi_sector == (READ_ONCE(last->last_sector) && |
644 | ((bi_rw == WRITE) == | 644 | ((bi_rw == WRITE) == |
645 | (ACCESS_ONCE(last->last_rw) == WRITE)) | 645 | (READ_ONCE(last->last_rw) == WRITE)) |
646 | )); | 646 | )); |
647 | ACCESS_ONCE(last->last_sector) = end_sector; | 647 | WRITE_ONCE(last->last_sector, end_sector); |
648 | ACCESS_ONCE(last->last_rw) = bi_rw; | 648 | WRITE_ONCE(last->last_rw, bi_rw); |
649 | } | 649 | } |
650 | 650 | ||
651 | rcu_read_lock(); | 651 | rcu_read_lock(); |
@@ -694,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared | |||
694 | 694 | ||
695 | for_each_possible_cpu(cpu) { | 695 | for_each_possible_cpu(cpu) { |
696 | p = &s->stat_percpu[cpu][x]; | 696 | p = &s->stat_percpu[cpu][x]; |
697 | shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); | 697 | shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); |
698 | shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); | 698 | shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); |
699 | shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]); | 699 | shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]); |
700 | shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]); | 700 | shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]); |
701 | shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]); | 701 | shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]); |
702 | shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]); | 702 | shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]); |
703 | shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]); | 703 | shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]); |
704 | shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]); | 704 | shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]); |
705 | shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]); | 705 | shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]); |
706 | shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]); | 706 | shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]); |
707 | shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total); | 707 | shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total); |
708 | shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue); | 708 | shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue); |
709 | if (s->n_histogram_entries) { | 709 | if (s->n_histogram_entries) { |
710 | unsigned i; | 710 | unsigned i; |
711 | for (i = 0; i < s->n_histogram_entries + 1; i++) | 711 | for (i = 0; i < s->n_histogram_entries + 1; i++) |
712 | shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]); | 712 | shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]); |
713 | } | 713 | } |
714 | } | 714 | } |
715 | } | 715 | } |
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 4c8de1ff78ca..8d0ba879777e 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c | |||
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long | |||
144 | 144 | ||
145 | switch_get_position(sctx, region_nr, ®ion_index, &bit); | 145 | switch_get_position(sctx, region_nr, ®ion_index, &bit); |
146 | 146 | ||
147 | return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & | 147 | return (READ_ONCE(sctx->region_table[region_index]) >> bit) & |
148 | ((1 << sctx->region_table_entry_bits) - 1); | 148 | ((1 << sctx->region_table_entry_bits) - 1); |
149 | } | 149 | } |
150 | 150 | ||
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1e25705209c2..89e5dff9b4cf 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
2431 | struct pool_c *pt = pool->ti->private; | 2431 | struct pool_c *pt = pool->ti->private; |
2432 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); | 2432 | bool needs_check = dm_pool_metadata_needs_check(pool->pmd); |
2433 | enum pool_mode old_mode = get_pool_mode(pool); | 2433 | enum pool_mode old_mode = get_pool_mode(pool); |
2434 | unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; | 2434 | unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ; |
2435 | 2435 | ||
2436 | /* | 2436 | /* |
2437 | * Never allow the pool to transition to PM_WRITE mode if user | 2437 | * Never allow the pool to transition to PM_WRITE mode if user |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index bda3caca23ca..fba93237a780 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work) | |||
589 | verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); | 589 | verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); |
590 | verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); | 590 | verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); |
591 | if (!i) { | 591 | if (!i) { |
592 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); | 592 | unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster); |
593 | 593 | ||
594 | cluster >>= v->data_dev_block_bits; | 594 | cluster >>= v->data_dev_block_bits; |
595 | if (unlikely(!cluster)) | 595 | if (unlikely(!cluster)) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4be85324f44d..8aaffa19b29a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; | |||
114 | 114 | ||
115 | static int __dm_get_module_param_int(int *module_param, int min, int max) | 115 | static int __dm_get_module_param_int(int *module_param, int min, int max) |
116 | { | 116 | { |
117 | int param = ACCESS_ONCE(*module_param); | 117 | int param = READ_ONCE(*module_param); |
118 | int modified_param = 0; | 118 | int modified_param = 0; |
119 | bool modified = true; | 119 | bool modified = true; |
120 | 120 | ||
@@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max) | |||
136 | unsigned __dm_get_module_param(unsigned *module_param, | 136 | unsigned __dm_get_module_param(unsigned *module_param, |
137 | unsigned def, unsigned max) | 137 | unsigned def, unsigned max) |
138 | { | 138 | { |
139 | unsigned param = ACCESS_ONCE(*module_param); | 139 | unsigned param = READ_ONCE(*module_param); |
140 | unsigned modified_param = 0; | 140 | unsigned modified_param = 0; |
141 | 141 | ||
142 | if (!param) | 142 | if (!param) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 0ff1bbf6c90e..447ddcbc9566 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page) | |||
2651 | { | 2651 | { |
2652 | char *sep = ","; | 2652 | char *sep = ","; |
2653 | size_t len = 0; | 2653 | size_t len = 0; |
2654 | unsigned long flags = ACCESS_ONCE(rdev->flags); | 2654 | unsigned long flags = READ_ONCE(rdev->flags); |
2655 | 2655 | ||
2656 | if (test_bit(Faulty, &flags) || | 2656 | if (test_bit(Faulty, &flags) || |
2657 | (!test_bit(ExternalBbl, &flags) && | 2657 | (!test_bit(ExternalBbl, &flags) && |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 928e24a07133..7d9a50eed9db 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n | |||
6072 | */ | 6072 | */ |
6073 | rcu_read_lock(); | 6073 | rcu_read_lock(); |
6074 | for (i = 0; i < conf->raid_disks; i++) { | 6074 | for (i = 0; i < conf->raid_disks; i++) { |
6075 | struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); | 6075 | struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); |
6076 | 6076 | ||
6077 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) | 6077 | if (rdev == NULL || test_bit(Faulty, &rdev->flags)) |
6078 | still_degraded = 1; | 6078 | still_degraded = 1; |
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c index 2322af1b8742..53011629c9ad 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.c +++ b/drivers/media/dvb-core/dvb_ringbuffer.c | |||
@@ -66,12 +66,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) | |||
66 | { | 66 | { |
67 | ssize_t free; | 67 | ssize_t free; |
68 | 68 | ||
69 | /* ACCESS_ONCE() to load read pointer on writer side | 69 | /* READ_ONCE() to load read pointer on writer side |
70 | * this pairs with smp_store_release() in dvb_ringbuffer_read(), | 70 | * this pairs with smp_store_release() in dvb_ringbuffer_read(), |
71 | * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), | 71 | * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), |
72 | * or dvb_ringbuffer_reset() | 72 | * or dvb_ringbuffer_reset() |
73 | */ | 73 | */ |
74 | free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite; | 74 | free = READ_ONCE(rbuf->pread) - rbuf->pwrite; |
75 | if (free <= 0) | 75 | if (free <= 0) |
76 | free += rbuf->size; | 76 | free += rbuf->size; |
77 | return free-1; | 77 | return free-1; |
@@ -143,7 +143,7 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si | |||
143 | todo -= split; | 143 | todo -= split; |
144 | /* smp_store_release() for read pointer update to ensure | 144 | /* smp_store_release() for read pointer update to ensure |
145 | * that buf is not overwritten until read is complete, | 145 | * that buf is not overwritten until read is complete, |
146 | * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() | 146 | * this pairs with READ_ONCE() in dvb_ringbuffer_free() |
147 | */ | 147 | */ |
148 | smp_store_release(&rbuf->pread, 0); | 148 | smp_store_release(&rbuf->pread, 0); |
149 | } | 149 | } |
@@ -168,7 +168,7 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) | |||
168 | todo -= split; | 168 | todo -= split; |
169 | /* smp_store_release() for read pointer update to ensure | 169 | /* smp_store_release() for read pointer update to ensure |
170 | * that buf is not overwritten until read is complete, | 170 | * that buf is not overwritten until read is complete, |
171 | * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() | 171 | * this pairs with READ_ONCE() in dvb_ringbuffer_free() |
172 | */ | 172 | */ |
173 | smp_store_release(&rbuf->pread, 0); | 173 | smp_store_release(&rbuf->pread, 0); |
174 | } | 174 | } |
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c index 637cc4686742..b665757ca89a 100644 --- a/drivers/misc/mic/scif/scif_rb.c +++ b/drivers/misc/mic/scif/scif_rb.c | |||
@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb) | |||
138 | * the read barrier in scif_rb_count(..) | 138 | * the read barrier in scif_rb_count(..) |
139 | */ | 139 | */ |
140 | wmb(); | 140 | wmb(); |
141 | ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; | 141 | WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); |
142 | #ifdef CONFIG_INTEL_MIC_CARD | 142 | #ifdef CONFIG_INTEL_MIC_CARD |
143 | /* | 143 | /* |
144 | * X100 Si bug: For the case where a Core is performing an EXT_WR | 144 | * X100 Si bug: For the case where a Core is performing an EXT_WR |
@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb) | |||
147 | * This way, if ordering is violated for the Interrupt Message, it will | 147 | * This way, if ordering is violated for the Interrupt Message, it will |
148 | * fall just behind the first Posted associated with the first EXT_WR. | 148 | * fall just behind the first Posted associated with the first EXT_WR. |
149 | */ | 149 | */ |
150 | ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; | 150 | WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); |
151 | #endif | 151 | #endif |
152 | } | 152 | } |
153 | 153 | ||
@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) | |||
210 | * scif_rb_space(..) | 210 | * scif_rb_space(..) |
211 | */ | 211 | */ |
212 | mb(); | 212 | mb(); |
213 | ACCESS_ONCE(*rb->read_ptr) = new_offset; | 213 | WRITE_ONCE(*rb->read_ptr, new_offset); |
214 | #ifdef CONFIG_INTEL_MIC_CARD | 214 | #ifdef CONFIG_INTEL_MIC_CARD |
215 | /* | 215 | /* |
216 | * X100 Si Bug: For the case where a Core is performing an EXT_WR | 216 | * X100 Si Bug: For the case where a Core is performing an EXT_WR |
@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) | |||
219 | * This way, if ordering is violated for the Interrupt Message, it will | 219 | * This way, if ordering is violated for the Interrupt Message, it will |
220 | * fall just behind the first Posted associated with the first EXT_WR. | 220 | * fall just behind the first Posted associated with the first EXT_WR. |
221 | */ | 221 | */ |
222 | ACCESS_ONCE(*rb->read_ptr) = new_offset; | 222 | WRITE_ONCE(*rb->read_ptr, new_offset); |
223 | #endif | 223 | #endif |
224 | } | 224 | } |
225 | 225 | ||
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c index e1ef8daedd5a..a036dbb4101e 100644 --- a/drivers/misc/mic/scif/scif_rma_list.c +++ b/drivers/misc/mic/scif/scif_rma_list.c | |||
@@ -277,7 +277,7 @@ retry: | |||
277 | * Need to restart list traversal if there has been | 277 | * Need to restart list traversal if there has been |
278 | * an asynchronous list entry deletion. | 278 | * an asynchronous list entry deletion. |
279 | */ | 279 | */ |
280 | if (ACCESS_ONCE(ep->rma_info.async_list_del)) | 280 | if (READ_ONCE(ep->rma_info.async_list_del)) |
281 | goto retry; | 281 | goto retry; |
282 | } | 282 | } |
283 | mutex_unlock(&ep->rma_info.rma_lock); | 283 | mutex_unlock(&ep->rma_info.rma_lock); |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c02cc817a490..1ed9529e7bd1 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) | |||
1378 | unsigned int count; | 1378 | unsigned int count; |
1379 | 1379 | ||
1380 | slaves = rcu_dereference(bond->slave_arr); | 1380 | slaves = rcu_dereference(bond->slave_arr); |
1381 | count = slaves ? ACCESS_ONCE(slaves->count) : 0; | 1381 | count = slaves ? READ_ONCE(slaves->count) : 0; |
1382 | if (likely(count)) | 1382 | if (likely(count)) |
1383 | tx_slave = slaves->arr[hash_index % | 1383 | tx_slave = slaves->arr[hash_index % |
1384 | count]; | 1384 | count]; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2db581131b2..08a4f57cf409 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1167 | slave = bond_slave_get_rcu(skb->dev); | 1167 | slave = bond_slave_get_rcu(skb->dev); |
1168 | bond = slave->bond; | 1168 | bond = slave->bond; |
1169 | 1169 | ||
1170 | recv_probe = ACCESS_ONCE(bond->recv_probe); | 1170 | recv_probe = READ_ONCE(bond->recv_probe); |
1171 | if (recv_probe) { | 1171 | if (recv_probe) { |
1172 | ret = recv_probe(skb, bond, slave); | 1172 | ret = recv_probe(skb, bond, slave); |
1173 | if (ret == RX_HANDLER_CONSUMED) { | 1173 | if (ret == RX_HANDLER_CONSUMED) { |
@@ -3811,7 +3811,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev | |||
3811 | else | 3811 | else |
3812 | bond_xmit_slave_id(bond, skb, 0); | 3812 | bond_xmit_slave_id(bond, skb, 0); |
3813 | } else { | 3813 | } else { |
3814 | int slave_cnt = ACCESS_ONCE(bond->slave_cnt); | 3814 | int slave_cnt = READ_ONCE(bond->slave_cnt); |
3815 | 3815 | ||
3816 | if (likely(slave_cnt)) { | 3816 | if (likely(slave_cnt)) { |
3817 | slave_id = bond_rr_gen_slave_id(bond); | 3817 | slave_id = bond_rr_gen_slave_id(bond); |
@@ -3973,7 +3973,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3973 | unsigned int count; | 3973 | unsigned int count; |
3974 | 3974 | ||
3975 | slaves = rcu_dereference(bond->slave_arr); | 3975 | slaves = rcu_dereference(bond->slave_arr); |
3976 | count = slaves ? ACCESS_ONCE(slaves->count) : 0; | 3976 | count = slaves ? READ_ONCE(slaves->count) : 0; |
3977 | if (likely(count)) { | 3977 | if (likely(count)) { |
3978 | slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; | 3978 | slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; |
3979 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3979 | bond_dev_queue_xmit(bond, skb, slave->dev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 4ef68f69b58c..43f52a8fe708 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q, | |||
405 | */ | 405 | */ |
406 | static inline int reclaimable(const struct sge_txq *q) | 406 | static inline int reclaimable(const struct sge_txq *q) |
407 | { | 407 | { |
408 | int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); | 408 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
409 | hw_cidx -= q->cidx; | 409 | hw_cidx -= q->cidx; |
410 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; | 410 | return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; |
411 | } | 411 | } |
@@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb); | |||
1375 | */ | 1375 | */ |
1376 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) | 1376 | static inline void reclaim_completed_tx_imm(struct sge_txq *q) |
1377 | { | 1377 | { |
1378 | int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); | 1378 | int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); |
1379 | int reclaim = hw_cidx - q->cidx; | 1379 | int reclaim = hw_cidx - q->cidx; |
1380 | 1380 | ||
1381 | if (reclaim < 0) | 1381 | if (reclaim < 0) |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a807..c6e859a27ee6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) | |||
605 | 605 | ||
606 | if (wrapped) | 606 | if (wrapped) |
607 | newacc += 65536; | 607 | newacc += 65536; |
608 | ACCESS_ONCE(*acc) = newacc; | 608 | WRITE_ONCE(*acc, newacc); |
609 | } | 609 | } |
610 | 610 | ||
611 | static void populate_erx_stats(struct be_adapter *adapter, | 611 | static void populate_erx_stats(struct be_adapter *adapter, |
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 0cec06bec63e..340e28211135 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c | |||
@@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) | |||
373 | unsigned int count; | 373 | unsigned int count; |
374 | 374 | ||
375 | smp_rmb(); | 375 | smp_rmb(); |
376 | count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); | 376 | count = tx_count(READ_ONCE(priv->tx_head), tx_tail); |
377 | if (count == 0) | 377 | if (count == 0) |
378 | goto out; | 378 | goto out; |
379 | 379 | ||
@@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
431 | dma_addr_t phys; | 431 | dma_addr_t phys; |
432 | 432 | ||
433 | smp_rmb(); | 433 | smp_rmb(); |
434 | count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); | 434 | count = tx_count(tx_head, READ_ONCE(priv->tx_tail)); |
435 | if (count == (TX_DESC_NUM - 1)) { | 435 | if (count == (TX_DESC_NUM - 1)) { |
436 | netif_stop_queue(ndev); | 436 | netif_stop_queue(ndev); |
437 | return NETDEV_TX_BUSY; | 437 | return NETDEV_TX_BUSY; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8f326f87a815..2cb9539c931e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
@@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) | |||
264 | vsi->rx_buf_failed, vsi->rx_page_failed); | 264 | vsi->rx_buf_failed, vsi->rx_page_failed); |
265 | rcu_read_lock(); | 265 | rcu_read_lock(); |
266 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 266 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
267 | struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); | 267 | struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); |
268 | 268 | ||
269 | if (!rx_ring) | 269 | if (!rx_ring) |
270 | continue; | 270 | continue; |
@@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) | |||
320 | ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); | 320 | ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); |
321 | } | 321 | } |
322 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 322 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
323 | struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); | 323 | struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); |
324 | 324 | ||
325 | if (!tx_ring) | 325 | if (!tx_ring) |
326 | continue; | 326 | continue; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 05e89864f781..e9e04a485e0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, | |||
1570 | } | 1570 | } |
1571 | rcu_read_lock(); | 1571 | rcu_read_lock(); |
1572 | for (j = 0; j < vsi->num_queue_pairs; j++) { | 1572 | for (j = 0; j < vsi->num_queue_pairs; j++) { |
1573 | tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); | 1573 | tx_ring = READ_ONCE(vsi->tx_rings[j]); |
1574 | 1574 | ||
1575 | if (!tx_ring) | 1575 | if (!tx_ring) |
1576 | continue; | 1576 | continue; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cb..de1fcac7834d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, | |||
455 | u64 bytes, packets; | 455 | u64 bytes, packets; |
456 | unsigned int start; | 456 | unsigned int start; |
457 | 457 | ||
458 | tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); | 458 | tx_ring = READ_ONCE(vsi->tx_rings[i]); |
459 | if (!tx_ring) | 459 | if (!tx_ring) |
460 | continue; | 460 | continue; |
461 | i40e_get_netdev_stats_struct_tx(tx_ring, stats); | 461 | i40e_get_netdev_stats_struct_tx(tx_ring, stats); |
@@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) | |||
791 | rcu_read_lock(); | 791 | rcu_read_lock(); |
792 | for (q = 0; q < vsi->num_queue_pairs; q++) { | 792 | for (q = 0; q < vsi->num_queue_pairs; q++) { |
793 | /* locate Tx ring */ | 793 | /* locate Tx ring */ |
794 | p = ACCESS_ONCE(vsi->tx_rings[q]); | 794 | p = READ_ONCE(vsi->tx_rings[q]); |
795 | 795 | ||
796 | do { | 796 | do { |
797 | start = u64_stats_fetch_begin_irq(&p->syncp); | 797 | start = u64_stats_fetch_begin_irq(&p->syncp); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d8456c381c99..97381238eb7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c | |||
@@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) | |||
130 | } | 130 | } |
131 | 131 | ||
132 | smp_mb(); /* Force any pending update before accessing. */ | 132 | smp_mb(); /* Force any pending update before accessing. */ |
133 | adj = ACCESS_ONCE(pf->ptp_base_adj); | 133 | adj = READ_ONCE(pf->ptp_base_adj); |
134 | 134 | ||
135 | freq = adj; | 135 | freq = adj; |
136 | freq *= ppb; | 136 | freq *= ppb; |
@@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) | |||
499 | wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); | 499 | wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); |
500 | 500 | ||
501 | /* Update the base adjustement value. */ | 501 | /* Update the base adjustement value. */ |
502 | ACCESS_ONCE(pf->ptp_base_adj) = incval; | 502 | WRITE_ONCE(pf->ptp_base_adj, incval); |
503 | smp_mb(); /* Force the above update. */ | 503 | smp_mb(); /* Force the above update. */ |
504 | } | 504 | } |
505 | 505 | ||
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 58adbf234e07..31a3f09df9f7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h | |||
@@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg); | |||
375 | /* write operations, indexed using DWORDS */ | 375 | /* write operations, indexed using DWORDS */ |
376 | #define wr32(reg, val) \ | 376 | #define wr32(reg, val) \ |
377 | do { \ | 377 | do { \ |
378 | u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ | 378 | u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ |
379 | if (!E1000_REMOVED(hw_addr)) \ | 379 | if (!E1000_REMOVED(hw_addr)) \ |
380 | writel((val), &hw_addr[(reg)]); \ | 380 | writel((val), &hw_addr[(reg)]); \ |
381 | } while (0) | 381 | } while (0) |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea69af267d63..18b6c25d4705 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
750 | u32 igb_rd32(struct e1000_hw *hw, u32 reg) | 750 | u32 igb_rd32(struct e1000_hw *hw, u32 reg) |
751 | { | 751 | { |
752 | struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); | 752 | struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); |
753 | u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); | 753 | u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); |
754 | u32 value = 0; | 754 | u32 value = 0; |
755 | 755 | ||
756 | if (E1000_REMOVED(hw_addr)) | 756 | if (E1000_REMOVED(hw_addr)) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index e083732adf64..a01409e2e06c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | |||
@@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr) | |||
161 | 161 | ||
162 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) | 162 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) |
163 | { | 163 | { |
164 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 164 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
165 | 165 | ||
166 | if (ixgbe_removed(reg_addr)) | 166 | if (ixgbe_removed(reg_addr)) |
167 | return; | 167 | return; |
@@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr) | |||
180 | 180 | ||
181 | static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) | 181 | static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) |
182 | { | 182 | { |
183 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 183 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
184 | 184 | ||
185 | if (ixgbe_removed(reg_addr)) | 185 | if (ixgbe_removed(reg_addr)) |
186 | return; | 186 | return; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6d5f31e94358..935a2f15b0b0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) | |||
380 | */ | 380 | */ |
381 | u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) | 381 | u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) |
382 | { | 382 | { |
383 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 383 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
384 | u32 value; | 384 | u32 value; |
385 | 385 | ||
386 | if (ixgbe_removed(reg_addr)) | 386 | if (ixgbe_removed(reg_addr)) |
@@ -8624,7 +8624,7 @@ static void ixgbe_get_stats64(struct net_device *netdev, | |||
8624 | 8624 | ||
8625 | rcu_read_lock(); | 8625 | rcu_read_lock(); |
8626 | for (i = 0; i < adapter->num_rx_queues; i++) { | 8626 | for (i = 0; i < adapter->num_rx_queues; i++) { |
8627 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); | 8627 | struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); |
8628 | u64 bytes, packets; | 8628 | u64 bytes, packets; |
8629 | unsigned int start; | 8629 | unsigned int start; |
8630 | 8630 | ||
@@ -8640,12 +8640,12 @@ static void ixgbe_get_stats64(struct net_device *netdev, | |||
8640 | } | 8640 | } |
8641 | 8641 | ||
8642 | for (i = 0; i < adapter->num_tx_queues; i++) { | 8642 | for (i = 0; i < adapter->num_tx_queues; i++) { |
8643 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); | 8643 | struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); |
8644 | 8644 | ||
8645 | ixgbe_get_ring_stats64(stats, ring); | 8645 | ixgbe_get_ring_stats64(stats, ring); |
8646 | } | 8646 | } |
8647 | for (i = 0; i < adapter->num_xdp_queues; i++) { | 8647 | for (i = 0; i < adapter->num_xdp_queues; i++) { |
8648 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]); | 8648 | struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); |
8649 | 8649 | ||
8650 | ixgbe_get_ring_stats64(stats, ring); | 8650 | ixgbe_get_ring_stats64(stats, ring); |
8651 | } | 8651 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 86d6924a2b71..ae312c45696a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | |||
@@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) | |||
378 | } | 378 | } |
379 | 379 | ||
380 | smp_mb(); | 380 | smp_mb(); |
381 | incval = ACCESS_ONCE(adapter->base_incval); | 381 | incval = READ_ONCE(adapter->base_incval); |
382 | 382 | ||
383 | freq = incval; | 383 | freq = incval; |
384 | freq *= ppb; | 384 | freq *= ppb; |
@@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) | |||
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | /* update the base incval used to calculate frequency adjustment */ | 1161 | /* update the base incval used to calculate frequency adjustment */ |
1162 | ACCESS_ONCE(adapter->base_incval) = incval; | 1162 | WRITE_ONCE(adapter->base_incval, incval); |
1163 | smp_mb(); | 1163 | smp_mb(); |
1164 | 1164 | ||
1165 | /* need lock to prevent incorrect read while modifying cyclecounter */ | 1165 | /* need lock to prevent incorrect read while modifying cyclecounter */ |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357..cacb30682434 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) | |||
164 | 164 | ||
165 | u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) | 165 | u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) |
166 | { | 166 | { |
167 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 167 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
168 | u32 value; | 168 | u32 value; |
169 | 169 | ||
170 | if (IXGBE_REMOVED(reg_addr)) | 170 | if (IXGBE_REMOVED(reg_addr)) |
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 04d8d4ee4f04..c651fefcc3d2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h | |||
@@ -182,7 +182,7 @@ struct ixgbevf_info { | |||
182 | 182 | ||
183 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) | 183 | static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) |
184 | { | 184 | { |
185 | u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); | 185 | u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); |
186 | 186 | ||
187 | if (IXGBE_REMOVED(reg_addr)) | 187 | if (IXGBE_REMOVED(reg_addr)) |
188 | return; | 188 | return; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8a32a8f7f9c0..3541a7f9d12e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, | |||
414 | 414 | ||
415 | index = cons_index & size_mask; | 415 | index = cons_index & size_mask; |
416 | cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; | 416 | cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; |
417 | last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); | 417 | last_nr_txbb = READ_ONCE(ring->last_nr_txbb); |
418 | ring_cons = ACCESS_ONCE(ring->cons); | 418 | ring_cons = READ_ONCE(ring->cons); |
419 | ring_index = ring_cons & size_mask; | 419 | ring_index = ring_cons & size_mask; |
420 | stamp_index = ring_index; | 420 | stamp_index = ring_index; |
421 | 421 | ||
@@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, | |||
479 | wmb(); | 479 | wmb(); |
480 | 480 | ||
481 | /* we want to dirty this cache line once */ | 481 | /* we want to dirty this cache line once */ |
482 | ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; | 482 | WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); |
483 | ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; | 483 | WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); |
484 | 484 | ||
485 | if (cq->type == TX_XDP) | 485 | if (cq->type == TX_XDP) |
486 | return done < budget; | 486 | return done < budget; |
@@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
858 | goto tx_drop; | 858 | goto tx_drop; |
859 | 859 | ||
860 | /* fetch ring->cons far ahead before needing it to avoid stall */ | 860 | /* fetch ring->cons far ahead before needing it to avoid stall */ |
861 | ring_cons = ACCESS_ONCE(ring->cons); | 861 | ring_cons = READ_ONCE(ring->cons); |
862 | 862 | ||
863 | real_size = get_real_size(skb, shinfo, dev, &lso_header_size, | 863 | real_size = get_real_size(skb, shinfo, dev, &lso_header_size, |
864 | &inline_ok, &fragptr); | 864 | &inline_ok, &fragptr); |
@@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1066 | */ | 1066 | */ |
1067 | smp_rmb(); | 1067 | smp_rmb(); |
1068 | 1068 | ||
1069 | ring_cons = ACCESS_ONCE(ring->cons); | 1069 | ring_cons = READ_ONCE(ring->cons); |
1070 | if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { | 1070 | if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { |
1071 | netif_tx_wake_queue(ring->tx_queue); | 1071 | netif_tx_wake_queue(ring->tx_queue); |
1072 | ring->wake_queue++; | 1072 | ring->wake_queue++; |
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50ea69d88480..5dd5f61e1114 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c | |||
@@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data) | |||
2629 | ring = &vdev->vpaths[i].ring; | 2629 | ring = &vdev->vpaths[i].ring; |
2630 | 2630 | ||
2631 | /* Truncated to machine word size number of frames */ | 2631 | /* Truncated to machine word size number of frames */ |
2632 | rx_frms = ACCESS_ONCE(ring->stats.rx_frms); | 2632 | rx_frms = READ_ONCE(ring->stats.rx_frms); |
2633 | 2633 | ||
2634 | /* Did this vpath received any packets */ | 2634 | /* Did this vpath received any packets */ |
2635 | if (ring->stats.prev_rx_frms == rx_frms) { | 2635 | if (ring->stats.prev_rx_frms == rx_frms) { |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d2..a95a46bcd339 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |||
2073 | netif_vdbg(efx, intr, efx->net_dev, | 2073 | netif_vdbg(efx, intr, efx->net_dev, |
2074 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); | 2074 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); |
2075 | 2075 | ||
2076 | if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { | 2076 | if (likely(READ_ONCE(efx->irq_soft_enabled))) { |
2077 | /* Note test interrupts */ | 2077 | /* Note test interrupts */ |
2078 | if (context->index == efx->irq_level) | 2078 | if (context->index == efx->irq_level) |
2079 | efx->last_irq_cpu = raw_smp_processor_id(); | 2079 | efx->last_irq_cpu = raw_smp_processor_id(); |
@@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) | |||
2088 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) | 2088 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) |
2089 | { | 2089 | { |
2090 | struct efx_nic *efx = dev_id; | 2090 | struct efx_nic *efx = dev_id; |
2091 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 2091 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
2092 | struct efx_channel *channel; | 2092 | struct efx_channel *channel; |
2093 | efx_dword_t reg; | 2093 | efx_dword_t reg; |
2094 | u32 queues; | 2094 | u32 queues; |
@@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, | |||
3291 | bool rx_cont; | 3291 | bool rx_cont; |
3292 | u16 flags = 0; | 3292 | u16 flags = 0; |
3293 | 3293 | ||
3294 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 3294 | if (unlikely(READ_ONCE(efx->reset_pending))) |
3295 | return 0; | 3295 | return 0; |
3296 | 3296 | ||
3297 | /* Basic packet information */ | 3297 | /* Basic packet information */ |
@@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
3428 | unsigned int tx_ev_q_label; | 3428 | unsigned int tx_ev_q_label; |
3429 | int tx_descs = 0; | 3429 | int tx_descs = 0; |
3430 | 3430 | ||
3431 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 3431 | if (unlikely(READ_ONCE(efx->reset_pending))) |
3432 | return 0; | 3432 | return 0; |
3433 | 3433 | ||
3434 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) | 3434 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) |
@@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx) | |||
5316 | int i; | 5316 | int i; |
5317 | 5317 | ||
5318 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { | 5318 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { |
5319 | if (ACCESS_ONCE(table->entry[i].spec) & | 5319 | if (READ_ONCE(table->entry[i].spec) & |
5320 | EFX_EF10_FILTER_FLAG_AUTO_OLD) { | 5320 | EFX_EF10_FILTER_FLAG_AUTO_OLD) { |
5321 | rc = efx_ef10_filter_remove_internal(efx, | 5321 | rc = efx_ef10_filter_remove_internal(efx, |
5322 | 1U << EFX_FILTER_PRI_AUTO, i, true); | 5322 | 1U << EFX_FILTER_PRI_AUTO, i, true); |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b9cb697b2818..016616a63880 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data) | |||
2809 | unsigned long pending; | 2809 | unsigned long pending; |
2810 | enum reset_type method; | 2810 | enum reset_type method; |
2811 | 2811 | ||
2812 | pending = ACCESS_ONCE(efx->reset_pending); | 2812 | pending = READ_ONCE(efx->reset_pending); |
2813 | method = fls(pending) - 1; | 2813 | method = fls(pending) - 1; |
2814 | 2814 | ||
2815 | if (method == RESET_TYPE_MC_BIST) | 2815 | if (method == RESET_TYPE_MC_BIST) |
@@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | |||
2874 | /* If we're not READY then just leave the flags set as the cue | 2874 | /* If we're not READY then just leave the flags set as the cue |
2875 | * to abort probing or reschedule the reset later. | 2875 | * to abort probing or reschedule the reset later. |
2876 | */ | 2876 | */ |
2877 | if (ACCESS_ONCE(efx->state) != STATE_READY) | 2877 | if (READ_ONCE(efx->state) != STATE_READY) |
2878 | return; | 2878 | return; |
2879 | 2879 | ||
2880 | /* efx_process_channel() will no longer read events once a | 2880 | /* efx_process_channel() will no longer read events once a |
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 29614da91cbf..7263275fde4a 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c | |||
@@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data) | |||
2545 | unsigned long pending; | 2545 | unsigned long pending; |
2546 | enum reset_type method; | 2546 | enum reset_type method; |
2547 | 2547 | ||
2548 | pending = ACCESS_ONCE(efx->reset_pending); | 2548 | pending = READ_ONCE(efx->reset_pending); |
2549 | method = fls(pending) - 1; | 2549 | method = fls(pending) - 1; |
2550 | 2550 | ||
2551 | if ((method == RESET_TYPE_RECOVER_OR_DISABLE || | 2551 | if ((method == RESET_TYPE_RECOVER_OR_DISABLE || |
@@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type) | |||
2605 | /* If we're not READY then just leave the flags set as the cue | 2605 | /* If we're not READY then just leave the flags set as the cue |
2606 | * to abort probing or reschedule the reset later. | 2606 | * to abort probing or reschedule the reset later. |
2607 | */ | 2607 | */ |
2608 | if (ACCESS_ONCE(efx->state) != STATE_READY) | 2608 | if (READ_ONCE(efx->state) != STATE_READY) |
2609 | return; | 2609 | return; |
2610 | 2610 | ||
2611 | queue_work(reset_workqueue, &efx->reset_work); | 2611 | queue_work(reset_workqueue, &efx->reset_work); |
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 93c713c1f627..cd8bb472d758 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c | |||
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
452 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", | 452 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", |
453 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); | 453 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); |
454 | 454 | ||
455 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 455 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
456 | return IRQ_HANDLED; | 456 | return IRQ_HANDLED; |
457 | 457 | ||
458 | /* Check to see if we have a serious error condition */ | 458 | /* Check to see if we have a serious error condition */ |
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx) | |||
1372 | ef4_oword_t reg; | 1372 | ef4_oword_t reg; |
1373 | int link_speed, isolate; | 1373 | int link_speed, isolate; |
1374 | 1374 | ||
1375 | isolate = !!ACCESS_ONCE(efx->reset_pending); | 1375 | isolate = !!READ_ONCE(efx->reset_pending); |
1376 | 1376 | ||
1377 | switch (link_state->speed) { | 1377 | switch (link_state->speed) { |
1378 | case 10000: link_speed = 3; break; | 1378 | case 10000: link_speed = 3; break; |
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index 05916c710d8c..494884f6af4a 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c | |||
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) | |||
834 | struct ef4_nic *efx = channel->efx; | 834 | struct ef4_nic *efx = channel->efx; |
835 | int tx_packets = 0; | 835 | int tx_packets = 0; |
836 | 836 | ||
837 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 837 | if (unlikely(READ_ONCE(efx->reset_pending))) |
838 | return 0; | 838 | return 0; |
839 | 839 | ||
840 | if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | 840 | if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) | |||
990 | struct ef4_rx_queue *rx_queue; | 990 | struct ef4_rx_queue *rx_queue; |
991 | struct ef4_nic *efx = channel->efx; | 991 | struct ef4_nic *efx = channel->efx; |
992 | 992 | ||
993 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 993 | if (unlikely(READ_ONCE(efx->reset_pending))) |
994 | return; | 994 | return; |
995 | 995 | ||
996 | rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | 996 | rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); |
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) | |||
1504 | irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) | 1504 | irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) |
1505 | { | 1505 | { |
1506 | struct ef4_nic *efx = dev_id; | 1506 | struct ef4_nic *efx = dev_id; |
1507 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 1507 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
1508 | ef4_oword_t *int_ker = efx->irq_status.addr; | 1508 | ef4_oword_t *int_ker = efx->irq_status.addr; |
1509 | irqreturn_t result = IRQ_NONE; | 1509 | irqreturn_t result = IRQ_NONE; |
1510 | struct ef4_channel *channel; | 1510 | struct ef4_channel *channel; |
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id) | |||
1596 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", | 1596 | "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", |
1597 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); | 1597 | irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); |
1598 | 1598 | ||
1599 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 1599 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
1600 | return IRQ_HANDLED; | 1600 | return IRQ_HANDLED; |
1601 | 1601 | ||
1602 | /* Handle non-event-queue sources */ | 1602 | /* Handle non-event-queue sources */ |
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index a4c4592f6023..54ca457cdb15 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h | |||
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_ | |||
83 | static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, | 83 | static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, |
84 | unsigned int write_count) | 84 | unsigned int write_count) |
85 | { | 85 | { |
86 | unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | 86 | unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); |
87 | 87 | ||
88 | if (empty_read_count == 0) | 88 | if (empty_read_count == 0) |
89 | return false; | 89 | return false; |
@@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx); | |||
464 | 464 | ||
465 | static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) | 465 | static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) |
466 | { | 466 | { |
467 | return ACCESS_ONCE(channel->event_test_cpu); | 467 | return READ_ONCE(channel->event_test_cpu); |
468 | } | 468 | } |
469 | static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) | 469 | static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) |
470 | { | 470 | { |
471 | return ACCESS_ONCE(efx->last_irq_cpu); | 471 | return READ_ONCE(efx->last_irq_cpu); |
472 | } | 472 | } |
473 | 473 | ||
474 | /* Global Resources */ | 474 | /* Global Resources */ |
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 6a75f4140a4b..6486814e97dc 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c | |||
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1) | |||
134 | */ | 134 | */ |
135 | netif_tx_stop_queue(txq1->core_txq); | 135 | netif_tx_stop_queue(txq1->core_txq); |
136 | smp_mb(); | 136 | smp_mb(); |
137 | txq1->old_read_count = ACCESS_ONCE(txq1->read_count); | 137 | txq1->old_read_count = READ_ONCE(txq1->read_count); |
138 | txq2->old_read_count = ACCESS_ONCE(txq2->read_count); | 138 | txq2->old_read_count = READ_ONCE(txq2->read_count); |
139 | 139 | ||
140 | fill_level = max(txq1->insert_count - txq1->old_read_count, | 140 | fill_level = max(txq1->insert_count - txq1->old_read_count, |
141 | txq2->insert_count - txq2->old_read_count); | 141 | txq2->insert_count - txq2->old_read_count); |
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index) | |||
524 | 524 | ||
525 | /* Check whether the hardware queue is now empty */ | 525 | /* Check whether the hardware queue is now empty */ |
526 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | 526 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
527 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | 527 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
528 | if (tx_queue->read_count == tx_queue->old_write_count) { | 528 | if (tx_queue->read_count == tx_queue->old_write_count) { |
529 | smp_mb(); | 529 | smp_mb(); |
530 | tx_queue->empty_read_count = | 530 | tx_queue->empty_read_count = |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index ba45150f53c7..86454d25a405 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
@@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | |||
827 | struct efx_nic *efx = channel->efx; | 827 | struct efx_nic *efx = channel->efx; |
828 | int tx_packets = 0; | 828 | int tx_packets = 0; |
829 | 829 | ||
830 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 830 | if (unlikely(READ_ONCE(efx->reset_pending))) |
831 | return 0; | 831 | return 0; |
832 | 832 | ||
833 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | 833 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
@@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | |||
979 | struct efx_rx_queue *rx_queue; | 979 | struct efx_rx_queue *rx_queue; |
980 | struct efx_nic *efx = channel->efx; | 980 | struct efx_nic *efx = channel->efx; |
981 | 981 | ||
982 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) | 982 | if (unlikely(READ_ONCE(efx->reset_pending))) |
983 | return; | 983 | return; |
984 | 984 | ||
985 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); | 985 | rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); |
@@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) | |||
1520 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) | 1520 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) |
1521 | { | 1521 | { |
1522 | struct efx_nic *efx = dev_id; | 1522 | struct efx_nic *efx = dev_id; |
1523 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); | 1523 | bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); |
1524 | efx_oword_t *int_ker = efx->irq_status.addr; | 1524 | efx_oword_t *int_ker = efx->irq_status.addr; |
1525 | irqreturn_t result = IRQ_NONE; | 1525 | irqreturn_t result = IRQ_NONE; |
1526 | struct efx_channel *channel; | 1526 | struct efx_channel *channel; |
@@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) | |||
1612 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 1612 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
1613 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1613 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1614 | 1614 | ||
1615 | if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) | 1615 | if (!likely(READ_ONCE(efx->irq_soft_enabled))) |
1616 | return IRQ_HANDLED; | 1616 | return IRQ_HANDLED; |
1617 | 1617 | ||
1618 | /* Handle non-event-queue sources */ | 1618 | /* Handle non-event-queue sources */ |
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 4d7fb8af880d..7b51b6371724 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h | |||
@@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) | |||
81 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, | 81 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, |
82 | unsigned int write_count) | 82 | unsigned int write_count) |
83 | { | 83 | { |
84 | unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | 84 | unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); |
85 | 85 | ||
86 | if (empty_read_count == 0) | 86 | if (empty_read_count == 0) |
87 | return false; | 87 | return false; |
@@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); | |||
617 | 617 | ||
618 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) | 618 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) |
619 | { | 619 | { |
620 | return ACCESS_ONCE(channel->event_test_cpu); | 620 | return READ_ONCE(channel->event_test_cpu); |
621 | } | 621 | } |
622 | static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) | 622 | static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) |
623 | { | 623 | { |
624 | return ACCESS_ONCE(efx->last_irq_cpu); | 624 | return READ_ONCE(efx->last_irq_cpu); |
625 | } | 625 | } |
626 | 626 | ||
627 | /* Global Resources */ | 627 | /* Global Resources */ |
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 60cdb97f58e2..56c2db398def 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c | |||
@@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, | |||
658 | 658 | ||
659 | /* Write host time for specified period or until MC is done */ | 659 | /* Write host time for specified period or until MC is done */ |
660 | while ((timespec64_compare(&now.ts_real, &limit) < 0) && | 660 | while ((timespec64_compare(&now.ts_real, &limit) < 0) && |
661 | ACCESS_ONCE(*mc_running)) { | 661 | READ_ONCE(*mc_running)) { |
662 | struct timespec64 update_time; | 662 | struct timespec64 update_time; |
663 | unsigned int host_time; | 663 | unsigned int host_time; |
664 | 664 | ||
@@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, | |||
668 | do { | 668 | do { |
669 | pps_get_ts(&now); | 669 | pps_get_ts(&now); |
670 | } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && | 670 | } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && |
671 | ACCESS_ONCE(*mc_running)); | 671 | READ_ONCE(*mc_running)); |
672 | 672 | ||
673 | /* Synchronise NIC with single word of time only */ | 673 | /* Synchronise NIC with single word of time only */ |
674 | host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | | 674 | host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | |
@@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) | |||
832 | ptp->start.dma_addr); | 832 | ptp->start.dma_addr); |
833 | 833 | ||
834 | /* Clear flag that signals MC ready */ | 834 | /* Clear flag that signals MC ready */ |
835 | ACCESS_ONCE(*start) = 0; | 835 | WRITE_ONCE(*start, 0); |
836 | rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, | 836 | rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, |
837 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN); | 837 | MC_CMD_PTP_IN_SYNCHRONIZE_LEN); |
838 | EFX_WARN_ON_ONCE_PARANOID(rc); | 838 | EFX_WARN_ON_ONCE_PARANOID(rc); |
839 | 839 | ||
840 | /* Wait for start from MCDI (or timeout) */ | 840 | /* Wait for start from MCDI (or timeout) */ |
841 | timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); | 841 | timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); |
842 | while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { | 842 | while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { |
843 | udelay(20); /* Usually start MCDI execution quickly */ | 843 | udelay(20); /* Usually start MCDI execution quickly */ |
844 | loops++; | 844 | loops++; |
845 | } | 845 | } |
@@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) | |||
849 | if (!time_before(jiffies, timeout)) | 849 | if (!time_before(jiffies, timeout)) |
850 | ++ptp->sync_timeouts; | 850 | ++ptp->sync_timeouts; |
851 | 851 | ||
852 | if (ACCESS_ONCE(*start)) | 852 | if (READ_ONCE(*start)) |
853 | efx_ptp_send_times(efx, &last_time); | 853 | efx_ptp_send_times(efx, &last_time); |
854 | 854 | ||
855 | /* Collect results */ | 855 | /* Collect results */ |
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..efb66ea21f27 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c | |||
@@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) | |||
136 | */ | 136 | */ |
137 | netif_tx_stop_queue(txq1->core_txq); | 137 | netif_tx_stop_queue(txq1->core_txq); |
138 | smp_mb(); | 138 | smp_mb(); |
139 | txq1->old_read_count = ACCESS_ONCE(txq1->read_count); | 139 | txq1->old_read_count = READ_ONCE(txq1->read_count); |
140 | txq2->old_read_count = ACCESS_ONCE(txq2->read_count); | 140 | txq2->old_read_count = READ_ONCE(txq2->read_count); |
141 | 141 | ||
142 | fill_level = max(txq1->insert_count - txq1->old_read_count, | 142 | fill_level = max(txq1->insert_count - txq1->old_read_count, |
143 | txq2->insert_count - txq2->old_read_count); | 143 | txq2->insert_count - txq2->old_read_count); |
@@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
752 | 752 | ||
753 | /* Check whether the hardware queue is now empty */ | 753 | /* Check whether the hardware queue is now empty */ |
754 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { | 754 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
755 | tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); | 755 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
756 | if (tx_queue->read_count == tx_queue->old_write_count) { | 756 | if (tx_queue->read_count == tx_queue->old_write_count) { |
757 | smp_mb(); | 757 | smp_mb(); |
758 | tx_queue->empty_read_count = | 758 | tx_queue->empty_read_count = |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90..8ab0fb6892d5 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np, | |||
6245 | 6245 | ||
6246 | pkts = dropped = errors = bytes = 0; | 6246 | pkts = dropped = errors = bytes = 0; |
6247 | 6247 | ||
6248 | rx_rings = ACCESS_ONCE(np->rx_rings); | 6248 | rx_rings = READ_ONCE(np->rx_rings); |
6249 | if (!rx_rings) | 6249 | if (!rx_rings) |
6250 | goto no_rings; | 6250 | goto no_rings; |
6251 | 6251 | ||
@@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np, | |||
6276 | 6276 | ||
6277 | pkts = errors = bytes = 0; | 6277 | pkts = errors = bytes = 0; |
6278 | 6278 | ||
6279 | tx_rings = ACCESS_ONCE(np->tx_rings); | 6279 | tx_rings = READ_ONCE(np->tx_rings); |
6280 | if (!tx_rings) | 6280 | if (!tx_rings) |
6281 | goto no_rings; | 6281 | goto no_rings; |
6282 | 6282 | ||
diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 6c0c84c33e1f..b13890953ebb 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c | |||
@@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap, | |||
257 | * and validate that the result isn't NULL - in case we are | 257 | * and validate that the result isn't NULL - in case we are |
258 | * racing against queue removal. | 258 | * racing against queue removal. |
259 | */ | 259 | */ |
260 | int numvtaps = ACCESS_ONCE(tap->numvtaps); | 260 | int numvtaps = READ_ONCE(tap->numvtaps); |
261 | __u32 rxq; | 261 | __u32 rxq; |
262 | 262 | ||
263 | if (!numvtaps) | 263 | if (!numvtaps) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 42bb820a56c9..c1685a6d7883 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
469 | u32 numqueues = 0; | 469 | u32 numqueues = 0; |
470 | 470 | ||
471 | rcu_read_lock(); | 471 | rcu_read_lock(); |
472 | numqueues = ACCESS_ONCE(tun->numqueues); | 472 | numqueues = READ_ONCE(tun->numqueues); |
473 | 473 | ||
474 | txq = __skb_get_hash_symmetric(skb); | 474 | txq = __skb_get_hash_symmetric(skb); |
475 | if (txq) { | 475 | if (txq) { |
@@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
864 | 864 | ||
865 | rcu_read_lock(); | 865 | rcu_read_lock(); |
866 | tfile = rcu_dereference(tun->tfiles[txq]); | 866 | tfile = rcu_dereference(tun->tfiles[txq]); |
867 | numqueues = ACCESS_ONCE(tun->numqueues); | 867 | numqueues = READ_ONCE(tun->numqueues); |
868 | 868 | ||
869 | /* Drop packet if interface is not attached */ | 869 | /* Drop packet if interface is not attached */ |
870 | if (txq >= numqueues) | 870 | if (txq >= numqueues) |
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c index bd8d4392d68b..80f75139495f 100644 --- a/drivers/net/wireless/ath/ath5k/desc.c +++ b/drivers/net/wireless/ath/ath5k/desc.c | |||
@@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, | |||
500 | 500 | ||
501 | tx_status = &desc->ud.ds_tx5212.tx_stat; | 501 | tx_status = &desc->ud.ds_tx5212.tx_stat; |
502 | 502 | ||
503 | txstat1 = ACCESS_ONCE(tx_status->tx_status_1); | 503 | txstat1 = READ_ONCE(tx_status->tx_status_1); |
504 | 504 | ||
505 | /* No frame has been send or error */ | 505 | /* No frame has been send or error */ |
506 | if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) | 506 | if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) |
507 | return -EINPROGRESS; | 507 | return -EINPROGRESS; |
508 | 508 | ||
509 | txstat0 = ACCESS_ONCE(tx_status->tx_status_0); | 509 | txstat0 = READ_ONCE(tx_status->tx_status_0); |
510 | 510 | ||
511 | /* | 511 | /* |
512 | * Get descriptor status | 512 | * Get descriptor status |
@@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, | |||
700 | u32 rxstat0, rxstat1; | 700 | u32 rxstat0, rxstat1; |
701 | 701 | ||
702 | rx_status = &desc->ud.ds_rx.rx_stat; | 702 | rx_status = &desc->ud.ds_rx.rx_stat; |
703 | rxstat1 = ACCESS_ONCE(rx_status->rx_status_1); | 703 | rxstat1 = READ_ONCE(rx_status->rx_status_1); |
704 | 704 | ||
705 | /* No frame received / not ready */ | 705 | /* No frame received / not ready */ |
706 | if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) | 706 | if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) |
707 | return -EINPROGRESS; | 707 | return -EINPROGRESS; |
708 | 708 | ||
709 | memset(rs, 0, sizeof(struct ath5k_rx_status)); | 709 | memset(rs, 0, sizeof(struct ath5k_rx_status)); |
710 | rxstat0 = ACCESS_ONCE(rx_status->rx_status_0); | 710 | rxstat0 = READ_ONCE(rx_status->rx_status_0); |
711 | 711 | ||
712 | /* | 712 | /* |
713 | * Frame receive status | 713 | * Frame receive status |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 613caca7dc02..785a0f33b7e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | |||
@@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work) | |||
3628 | 3628 | ||
3629 | bus->dpc_running = true; | 3629 | bus->dpc_running = true; |
3630 | wmb(); | 3630 | wmb(); |
3631 | while (ACCESS_ONCE(bus->dpc_triggered)) { | 3631 | while (READ_ONCE(bus->dpc_triggered)) { |
3632 | bus->dpc_triggered = false; | 3632 | bus->dpc_triggered = false; |
3633 | brcmf_sdio_dpc(bus); | 3633 | brcmf_sdio_dpc(bus); |
3634 | bus->idlecount = 0; | 3634 | bus->idlecount = 0; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 231878969332..0f45f34e39d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -1118,7 +1118,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) | |||
1118 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) | 1118 | static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) |
1119 | { | 1119 | { |
1120 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 1120 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
1121 | bool calibrating = ACCESS_ONCE(mvm->calibrating); | 1121 | bool calibrating = READ_ONCE(mvm->calibrating); |
1122 | 1122 | ||
1123 | if (state) | 1123 | if (state) |
1124 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); | 1124 | set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f2e2af23219..6e9d3289b9d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) | |||
652 | return -1; | 652 | return -1; |
653 | } else if (info.control.vif->type == NL80211_IFTYPE_STATION && | 653 | } else if (info.control.vif->type == NL80211_IFTYPE_STATION && |
654 | is_multicast_ether_addr(hdr->addr1)) { | 654 | is_multicast_ether_addr(hdr->addr1)) { |
655 | u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); | 655 | u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); |
656 | 656 | ||
657 | if (ap_sta_id != IWL_MVM_INVALID_STA) | 657 | if (ap_sta_id != IWL_MVM_INVALID_STA) |
658 | sta_id = ap_sta_id; | 658 | sta_id = ap_sta_id; |
@@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
700 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + | 700 | snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + |
701 | tcp_hdrlen(skb); | 701 | tcp_hdrlen(skb); |
702 | 702 | ||
703 | dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); | 703 | dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); |
704 | 704 | ||
705 | if (!sta->max_amsdu_len || | 705 | if (!sta->max_amsdu_len || |
706 | !ieee80211_is_data_qos(hdr->frame_control) || | 706 | !ieee80211_is_data_qos(hdr->frame_control) || |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a06b6612b658..f25ce3a1ea50 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c | |||
@@ -1247,7 +1247,7 @@ restart: | |||
1247 | spin_lock(&rxq->lock); | 1247 | spin_lock(&rxq->lock); |
1248 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | 1248 | /* uCode's read index (stored in shared DRAM) indicates the last Rx |
1249 | * buffer that the driver may process (last buffer filled by ucode). */ | 1249 | * buffer that the driver may process (last buffer filled by ucode). */ |
1250 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; | 1250 | r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
1251 | i = rxq->read; | 1251 | i = rxq->read; |
1252 | 1252 | ||
1253 | /* W/A 9000 device step A0 wrap-around bug */ | 1253 | /* W/A 9000 device step A0 wrap-around bug */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2e3e013ec95a..9ad3f4fe5894 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
@@ -2076,12 +2076,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) | |||
2076 | 2076 | ||
2077 | IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); | 2077 | IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); |
2078 | txq = trans_pcie->txq[txq_idx]; | 2078 | txq = trans_pcie->txq[txq_idx]; |
2079 | wr_ptr = ACCESS_ONCE(txq->write_ptr); | 2079 | wr_ptr = READ_ONCE(txq->write_ptr); |
2080 | 2080 | ||
2081 | while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && | 2081 | while (txq->read_ptr != READ_ONCE(txq->write_ptr) && |
2082 | !time_after(jiffies, | 2082 | !time_after(jiffies, |
2083 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { | 2083 | now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { |
2084 | u8 write_ptr = ACCESS_ONCE(txq->write_ptr); | 2084 | u8 write_ptr = READ_ONCE(txq->write_ptr); |
2085 | 2085 | ||
2086 | if (WARN_ONCE(wr_ptr != write_ptr, | 2086 | if (WARN_ONCE(wr_ptr != write_ptr, |
2087 | "WR pointer moved while flushing %d -> %d\n", | 2087 | "WR pointer moved while flushing %d -> %d\n", |
@@ -2553,7 +2553,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, | |||
2553 | 2553 | ||
2554 | spin_lock(&rxq->lock); | 2554 | spin_lock(&rxq->lock); |
2555 | 2555 | ||
2556 | r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; | 2556 | r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; |
2557 | 2557 | ||
2558 | for (i = rxq->read, j = 0; | 2558 | for (i = rxq->read, j = 0; |
2559 | i != r && j < allocated_rb_nums; | 2559 | i != r && j < allocated_rb_nums; |
@@ -2814,7 +2814,7 @@ static struct iwl_trans_dump_data | |||
2814 | /* Dump RBs is supported only for pre-9000 devices (1 queue) */ | 2814 | /* Dump RBs is supported only for pre-9000 devices (1 queue) */ |
2815 | struct iwl_rxq *rxq = &trans_pcie->rxq[0]; | 2815 | struct iwl_rxq *rxq = &trans_pcie->rxq[0]; |
2816 | /* RBs */ | 2816 | /* RBs */ |
2817 | num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) | 2817 | num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) |
2818 | & 0x0FFF; | 2818 | & 0x0FFF; |
2819 | num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; | 2819 | num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; |
2820 | len += num_rbs * (sizeof(*data) + | 2820 | len += num_rbs * (sizeof(*data) + |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6467ffac9811..d2b3d6177a55 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, | |||
1380 | mac80211_hwsim_monitor_rx(hw, skb, channel); | 1380 | mac80211_hwsim_monitor_rx(hw, skb, channel); |
1381 | 1381 | ||
1382 | /* wmediumd mode check */ | 1382 | /* wmediumd mode check */ |
1383 | _portid = ACCESS_ONCE(data->wmediumd); | 1383 | _portid = READ_ONCE(data->wmediumd); |
1384 | 1384 | ||
1385 | if (_portid) | 1385 | if (_portid) |
1386 | return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); | 1386 | return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); |
@@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, | |||
1477 | struct ieee80211_channel *chan) | 1477 | struct ieee80211_channel *chan) |
1478 | { | 1478 | { |
1479 | struct mac80211_hwsim_data *data = hw->priv; | 1479 | struct mac80211_hwsim_data *data = hw->priv; |
1480 | u32 _pid = ACCESS_ONCE(data->wmediumd); | 1480 | u32 _pid = READ_ONCE(data->wmediumd); |
1481 | 1481 | ||
1482 | if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { | 1482 | if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { |
1483 | struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); | 1483 | struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f05cfc83c9c8..f946bf889015 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work) | |||
996 | if (logout_started) { | 996 | if (logout_started) { |
997 | bool traced = false; | 997 | bool traced = false; |
998 | 998 | ||
999 | while (!ACCESS_ONCE(sess->logout_completed)) { | 999 | while (!READ_ONCE(sess->logout_completed)) { |
1000 | if (!traced) { | 1000 | if (!traced) { |
1001 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, | 1001 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, |
1002 | "%s: waiting for sess %p logout\n", | 1002 | "%s: waiting for sess %p logout\n", |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 942d094269fb..9469695f5871 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) | |||
985 | mb = udev->mb_addr; | 985 | mb = udev->mb_addr; |
986 | tcmu_flush_dcache_range(mb, sizeof(*mb)); | 986 | tcmu_flush_dcache_range(mb, sizeof(*mb)); |
987 | 987 | ||
988 | while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { | 988 | while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { |
989 | 989 | ||
990 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; | 990 | struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; |
991 | struct tcmu_cmd *cmd; | 991 | struct tcmu_cmd *cmd; |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 3e865dbf878c..fbaa2a90d25d 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
@@ -483,7 +483,7 @@ static ssize_t wdm_read | |||
483 | if (rv < 0) | 483 | if (rv < 0) |
484 | return -ERESTARTSYS; | 484 | return -ERESTARTSYS; |
485 | 485 | ||
486 | cntr = ACCESS_ONCE(desc->length); | 486 | cntr = READ_ONCE(desc->length); |
487 | if (cntr == 0) { | 487 | if (cntr == 0) { |
488 | desc->read = 0; | 488 | desc->read = 0; |
489 | retry: | 489 | retry: |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9326f31db8d..4ae667d8c238 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -150,7 +150,7 @@ static int usbfs_increase_memory_usage(u64 amount) | |||
150 | { | 150 | { |
151 | u64 lim; | 151 | u64 lim; |
152 | 152 | ||
153 | lim = ACCESS_ONCE(usbfs_memory_mb); | 153 | lim = READ_ONCE(usbfs_memory_mb); |
154 | lim <<= 20; | 154 | lim <<= 20; |
155 | 155 | ||
156 | atomic64_add(amount, &usbfs_memory_usage); | 156 | atomic64_add(amount, &usbfs_memory_usage); |
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index d930bfda4010..58d59c5f8592 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c | |||
@@ -973,7 +973,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr, | |||
973 | char *string; | 973 | char *string; |
974 | 974 | ||
975 | intf = to_usb_interface(dev); | 975 | intf = to_usb_interface(dev); |
976 | string = ACCESS_ONCE(intf->cur_altsetting->string); | 976 | string = READ_ONCE(intf->cur_altsetting->string); |
977 | if (!string) | 977 | if (!string) |
978 | return 0; | 978 | return 0; |
979 | return sprintf(buf, "%s\n", string); | 979 | return sprintf(buf, "%s\n", string); |
@@ -989,7 +989,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | |||
989 | 989 | ||
990 | intf = to_usb_interface(dev); | 990 | intf = to_usb_interface(dev); |
991 | udev = interface_to_usbdev(intf); | 991 | udev = interface_to_usbdev(intf); |
992 | alt = ACCESS_ONCE(intf->cur_altsetting); | 992 | alt = READ_ONCE(intf->cur_altsetting); |
993 | 993 | ||
994 | return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" | 994 | return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" |
995 | "ic%02Xisc%02Xip%02Xin%02X\n", | 995 | "ic%02Xisc%02Xip%02Xin%02X\n", |
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 1f9941145746..0b59fa50aa30 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c | |||
@@ -1261,7 +1261,7 @@ static int gr_handle_in_ep(struct gr_ep *ep) | |||
1261 | if (!req->last_desc) | 1261 | if (!req->last_desc) |
1262 | return 0; | 1262 | return 0; |
1263 | 1263 | ||
1264 | if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) | 1264 | if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) |
1265 | return 0; /* Not put in hardware buffers yet */ | 1265 | return 0; /* Not put in hardware buffers yet */ |
1266 | 1266 | ||
1267 | if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) | 1267 | if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) |
@@ -1290,7 +1290,7 @@ static int gr_handle_out_ep(struct gr_ep *ep) | |||
1290 | if (!req->curr_desc) | 1290 | if (!req->curr_desc) |
1291 | return 0; | 1291 | return 0; |
1292 | 1292 | ||
1293 | ctrl = ACCESS_ONCE(req->curr_desc->ctrl); | 1293 | ctrl = READ_ONCE(req->curr_desc->ctrl); |
1294 | if (ctrl & GR_DESC_OUT_CTRL_EN) | 1294 | if (ctrl & GR_DESC_OUT_CTRL_EN) |
1295 | return 0; /* Not received yet */ | 1295 | return 0; /* Not received yet */ |
1296 | 1296 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 44924824fa41..c86f89babd57 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci) | |||
785 | } | 785 | } |
786 | 786 | ||
787 | /* find the last TD processed by the controller. */ | 787 | /* find the last TD processed by the controller. */ |
788 | head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK; | 788 | head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK; |
789 | td_start = td; | 789 | td_start = td; |
790 | td_next = list_prepare_entry(td, &ed->td_list, td_list); | 790 | td_next = list_prepare_entry(td, &ed->td_list, td_list); |
791 | list_for_each_entry_continue(td_next, &ed->td_list, td_list) { | 791 | list_for_each_entry_continue(td_next, &ed->td_list, td_list) { |
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index d97f0d9b3ce6..f1cc47292a59 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h | |||
@@ -187,7 +187,7 @@ struct uhci_qh { | |||
187 | * We need a special accessor for the element pointer because it is | 187 | * We need a special accessor for the element pointer because it is |
188 | * subject to asynchronous updates by the controller. | 188 | * subject to asynchronous updates by the controller. |
189 | */ | 189 | */ |
190 | #define qh_element(qh) ACCESS_ONCE((qh)->element) | 190 | #define qh_element(qh) READ_ONCE((qh)->element) |
191 | 191 | ||
192 | #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ | 192 | #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ |
193 | cpu_to_hc32((uhci), (qh)->dma_handle)) | 193 | cpu_to_hc32((uhci), (qh)->dma_handle)) |
@@ -275,7 +275,7 @@ struct uhci_td { | |||
275 | * subject to asynchronous updates by the controller. | 275 | * subject to asynchronous updates by the controller. |
276 | */ | 276 | */ |
277 | #define td_status(uhci, td) hc32_to_cpu((uhci), \ | 277 | #define td_status(uhci, td) hc32_to_cpu((uhci), \ |
278 | ACCESS_ONCE((td)->status)) | 278 | READ_ONCE((td)->status)) |
279 | 279 | ||
280 | #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) | 280 | #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) |
281 | 281 | ||
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index f5a86f651f38..2bc3705a99bd 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c | |||
@@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data) | |||
665 | { | 665 | { |
666 | struct vfio_group *group = data; | 666 | struct vfio_group *group = data; |
667 | struct vfio_device *device; | 667 | struct vfio_device *device; |
668 | struct device_driver *drv = ACCESS_ONCE(dev->driver); | 668 | struct device_driver *drv = READ_ONCE(dev->driver); |
669 | struct vfio_unbound_dev *unbound; | 669 | struct vfio_unbound_dev *unbound; |
670 | int ret = -EINVAL; | 670 | int ret = -EINVAL; |
671 | 671 | ||
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 046f6d280af5..35e929f132e8 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) | |||
929 | continue; | 929 | continue; |
930 | } | 930 | } |
931 | 931 | ||
932 | tpg = ACCESS_ONCE(vs_tpg[*target]); | 932 | tpg = READ_ONCE(vs_tpg[*target]); |
933 | if (unlikely(!tpg)) { | 933 | if (unlikely(!tpg)) { |
934 | /* Target does not exist, fail the request */ | 934 | /* Target does not exist, fail the request */ |
935 | vhost_scsi_send_bad_target(vs, vq, head, out); | 935 | vhost_scsi_send_bad_target(vs, vq, head, out); |
@@ -576,7 +576,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) | |||
576 | * actually has a cancel function, hence the cmpxchg() | 576 | * actually has a cancel function, hence the cmpxchg() |
577 | */ | 577 | */ |
578 | 578 | ||
579 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | 579 | cancel = READ_ONCE(kiocb->ki_cancel); |
580 | do { | 580 | do { |
581 | if (!cancel || cancel == KIOCB_CANCELLED) | 581 | if (!cancel || cancel == KIOCB_CANCELLED) |
582 | return -EINVAL; | 582 | return -EINVAL; |
diff --git a/fs/buffer.c b/fs/buffer.c index 170df856bdb9..32ce01f0f95f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1692,7 +1692,8 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode * | |||
1692 | BUG_ON(!PageLocked(page)); | 1692 | BUG_ON(!PageLocked(page)); |
1693 | 1693 | ||
1694 | if (!page_has_buffers(page)) | 1694 | if (!page_has_buffers(page)) |
1695 | create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); | 1695 | create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits), |
1696 | b_state); | ||
1696 | return page_buffers(page); | 1697 | return page_buffers(page); |
1697 | } | 1698 | } |
1698 | 1699 | ||
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index a38630214058..577dfaf0367f 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
@@ -374,7 +374,7 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) | |||
374 | struct fscrypt_info *prev; | 374 | struct fscrypt_info *prev; |
375 | 375 | ||
376 | if (ci == NULL) | 376 | if (ci == NULL) |
377 | ci = ACCESS_ONCE(inode->i_crypt_info); | 377 | ci = READ_ONCE(inode->i_crypt_info); |
378 | if (ci == NULL) | 378 | if (ci == NULL) |
379 | return; | 379 | return; |
380 | 380 | ||
diff --git a/fs/dcache.c b/fs/dcache.c index f90141387f01..bcc9f6981569 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c | |||
231 | { | 231 | { |
232 | /* | 232 | /* |
233 | * Be careful about RCU walk racing with rename: | 233 | * Be careful about RCU walk racing with rename: |
234 | * use 'lockless_dereference' to fetch the name pointer. | 234 | * use 'READ_ONCE' to fetch the name pointer. |
235 | * | 235 | * |
236 | * NOTE! Even if a rename will mean that the length | 236 | * NOTE! Even if a rename will mean that the length |
237 | * was not loaded atomically, we don't care. The | 237 | * was not loaded atomically, we don't care. The |
@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c | |||
245 | * early because the data cannot match (there can | 245 | * early because the data cannot match (there can |
246 | * be no NUL in the ct/tcount data) | 246 | * be no NUL in the ct/tcount data) |
247 | */ | 247 | */ |
248 | const unsigned char *cs = lockless_dereference(dentry->d_name.name); | 248 | const unsigned char *cs = READ_ONCE(dentry->d_name.name); |
249 | 249 | ||
250 | return dentry_string_cmp(cs, ct, tcount); | 250 | return dentry_string_cmp(cs, ct, tcount); |
251 | } | 251 | } |
@@ -630,7 +630,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry) | |||
630 | rcu_read_lock(); | 630 | rcu_read_lock(); |
631 | spin_unlock(&dentry->d_lock); | 631 | spin_unlock(&dentry->d_lock); |
632 | again: | 632 | again: |
633 | parent = ACCESS_ONCE(dentry->d_parent); | 633 | parent = READ_ONCE(dentry->d_parent); |
634 | spin_lock(&parent->d_lock); | 634 | spin_lock(&parent->d_lock); |
635 | /* | 635 | /* |
636 | * We can't blindly lock dentry until we are sure | 636 | * We can't blindly lock dentry until we are sure |
@@ -721,7 +721,7 @@ static inline bool fast_dput(struct dentry *dentry) | |||
721 | * around with a zero refcount. | 721 | * around with a zero refcount. |
722 | */ | 722 | */ |
723 | smp_rmb(); | 723 | smp_rmb(); |
724 | d_flags = ACCESS_ONCE(dentry->d_flags); | 724 | d_flags = READ_ONCE(dentry->d_flags); |
725 | d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED; | 725 | d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED; |
726 | 726 | ||
727 | /* Nothing to do? Dropping the reference was all we needed? */ | 727 | /* Nothing to do? Dropping the reference was all we needed? */ |
@@ -850,11 +850,11 @@ struct dentry *dget_parent(struct dentry *dentry) | |||
850 | * locking. | 850 | * locking. |
851 | */ | 851 | */ |
852 | rcu_read_lock(); | 852 | rcu_read_lock(); |
853 | ret = ACCESS_ONCE(dentry->d_parent); | 853 | ret = READ_ONCE(dentry->d_parent); |
854 | gotref = lockref_get_not_zero(&ret->d_lockref); | 854 | gotref = lockref_get_not_zero(&ret->d_lockref); |
855 | rcu_read_unlock(); | 855 | rcu_read_unlock(); |
856 | if (likely(gotref)) { | 856 | if (likely(gotref)) { |
857 | if (likely(ret == ACCESS_ONCE(dentry->d_parent))) | 857 | if (likely(ret == READ_ONCE(dentry->d_parent))) |
858 | return ret; | 858 | return ret; |
859 | dput(ret); | 859 | dput(ret); |
860 | } | 860 | } |
@@ -3040,7 +3040,7 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen) | |||
3040 | * @buflen: allocated length of the buffer | 3040 | * @buflen: allocated length of the buffer |
3041 | * @name: name string and length qstr structure | 3041 | * @name: name string and length qstr structure |
3042 | * | 3042 | * |
3043 | * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to | 3043 | * With RCU path tracing, it may race with d_move(). Use READ_ONCE() to |
3044 | * make sure that either the old or the new name pointer and length are | 3044 | * make sure that either the old or the new name pointer and length are |
3045 | * fetched. However, there may be mismatch between length and pointer. | 3045 | * fetched. However, there may be mismatch between length and pointer. |
3046 | * The length cannot be trusted, we need to copy it byte-by-byte until | 3046 | * The length cannot be trusted, we need to copy it byte-by-byte until |
@@ -3054,8 +3054,8 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen) | |||
3054 | */ | 3054 | */ |
3055 | static int prepend_name(char **buffer, int *buflen, const struct qstr *name) | 3055 | static int prepend_name(char **buffer, int *buflen, const struct qstr *name) |
3056 | { | 3056 | { |
3057 | const char *dname = ACCESS_ONCE(name->name); | 3057 | const char *dname = READ_ONCE(name->name); |
3058 | u32 dlen = ACCESS_ONCE(name->len); | 3058 | u32 dlen = READ_ONCE(name->len); |
3059 | char *p; | 3059 | char *p; |
3060 | 3060 | ||
3061 | smp_read_barrier_depends(); | 3061 | smp_read_barrier_depends(); |
@@ -3120,7 +3120,7 @@ restart: | |||
3120 | struct dentry * parent; | 3120 | struct dentry * parent; |
3121 | 3121 | ||
3122 | if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { | 3122 | if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { |
3123 | struct mount *parent = ACCESS_ONCE(mnt->mnt_parent); | 3123 | struct mount *parent = READ_ONCE(mnt->mnt_parent); |
3124 | /* Escaped? */ | 3124 | /* Escaped? */ |
3125 | if (dentry != vfsmnt->mnt_root) { | 3125 | if (dentry != vfsmnt->mnt_root) { |
3126 | bptr = *buffer; | 3126 | bptr = *buffer; |
@@ -3130,7 +3130,7 @@ restart: | |||
3130 | } | 3130 | } |
3131 | /* Global root? */ | 3131 | /* Global root? */ |
3132 | if (mnt != parent) { | 3132 | if (mnt != parent) { |
3133 | dentry = ACCESS_ONCE(mnt->mnt_mountpoint); | 3133 | dentry = READ_ONCE(mnt->mnt_mountpoint); |
3134 | mnt = parent; | 3134 | mnt = parent; |
3135 | vfsmnt = &mnt->mnt; | 3135 | vfsmnt = &mnt->mnt; |
3136 | continue; | 3136 | continue; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index b53e66d9abd7..98fe1325da9d 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1152,7 +1152,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1152 | get_block_t get_block, dio_iodone_t end_io, | 1152 | get_block_t get_block, dio_iodone_t end_io, |
1153 | dio_submit_t submit_io, int flags) | 1153 | dio_submit_t submit_io, int flags) |
1154 | { | 1154 | { |
1155 | unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); | 1155 | unsigned i_blkbits = READ_ONCE(inode->i_blkbits); |
1156 | unsigned blkbits = i_blkbits; | 1156 | unsigned blkbits = i_blkbits; |
1157 | unsigned blocksize_mask = (1 << blkbits) - 1; | 1157 | unsigned blocksize_mask = (1 << blkbits) - 1; |
1158 | ssize_t retval = -EINVAL; | 1158 | ssize_t retval = -EINVAL; |
@@ -1911,7 +1911,7 @@ void set_dumpable(struct mm_struct *mm, int value) | |||
1911 | return; | 1911 | return; |
1912 | 1912 | ||
1913 | do { | 1913 | do { |
1914 | old = ACCESS_ONCE(mm->flags); | 1914 | old = READ_ONCE(mm->flags); |
1915 | new = (old & ~MMF_DUMPABLE_MASK) | value; | 1915 | new = (old & ~MMF_DUMPABLE_MASK) | value; |
1916 | } while (cmpxchg(&mm->flags, old, new) != old); | 1916 | } while (cmpxchg(&mm->flags, old, new) != old); |
1917 | } | 1917 | } |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 8d78ffd7b399..30f47d0f74a0 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -725,7 +725,7 @@ static void send_sigio_to_task(struct task_struct *p, | |||
725 | * F_SETSIG can change ->signum lockless in parallel, make | 725 | * F_SETSIG can change ->signum lockless in parallel, make |
726 | * sure we read it once and use the same value throughout. | 726 | * sure we read it once and use the same value throughout. |
727 | */ | 727 | */ |
728 | int signum = ACCESS_ONCE(fown->signum); | 728 | int signum = READ_ONCE(fown->signum); |
729 | 729 | ||
730 | if (!sigio_perm(p, fown, signum)) | 730 | if (!sigio_perm(p, fown, signum)) |
731 | return; | 731 | return; |
diff --git a/fs/fs_pin.c b/fs/fs_pin.c index 0d285fd5b44a..a6497cf8ae53 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c | |||
@@ -79,7 +79,7 @@ void mnt_pin_kill(struct mount *m) | |||
79 | while (1) { | 79 | while (1) { |
80 | struct hlist_node *p; | 80 | struct hlist_node *p; |
81 | rcu_read_lock(); | 81 | rcu_read_lock(); |
82 | p = ACCESS_ONCE(m->mnt_pins.first); | 82 | p = READ_ONCE(m->mnt_pins.first); |
83 | if (!p) { | 83 | if (!p) { |
84 | rcu_read_unlock(); | 84 | rcu_read_unlock(); |
85 | break; | 85 | break; |
@@ -93,7 +93,7 @@ void group_pin_kill(struct hlist_head *p) | |||
93 | while (1) { | 93 | while (1) { |
94 | struct hlist_node *q; | 94 | struct hlist_node *q; |
95 | rcu_read_lock(); | 95 | rcu_read_lock(); |
96 | q = ACCESS_ONCE(p->first); | 96 | q = READ_ONCE(p->first); |
97 | if (!q) { | 97 | if (!q) { |
98 | rcu_read_unlock(); | 98 | rcu_read_unlock(); |
99 | break; | 99 | break; |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 13c65dd2d37d..a42d89371748 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -33,7 +33,7 @@ static struct fuse_dev *fuse_get_dev(struct file *file) | |||
33 | * Lockless access is OK, because file->private data is set | 33 | * Lockless access is OK, because file->private data is set |
34 | * once during mount and is valid until the file is released. | 34 | * once during mount and is valid until the file is released. |
35 | */ | 35 | */ |
36 | return ACCESS_ONCE(file->private_data); | 36 | return READ_ONCE(file->private_data); |
37 | } | 37 | } |
38 | 38 | ||
39 | static void fuse_request_init(struct fuse_req *req, struct page **pages, | 39 | static void fuse_request_init(struct fuse_req *req, struct page **pages, |
diff --git a/fs/inode.c b/fs/inode.c index d1e35b53bb23..fd401028a309 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -2090,7 +2090,7 @@ void inode_set_flags(struct inode *inode, unsigned int flags, | |||
2090 | 2090 | ||
2091 | WARN_ON_ONCE(flags & ~mask); | 2091 | WARN_ON_ONCE(flags & ~mask); |
2092 | do { | 2092 | do { |
2093 | old_flags = ACCESS_ONCE(inode->i_flags); | 2093 | old_flags = READ_ONCE(inode->i_flags); |
2094 | new_flags = (old_flags & ~mask) | flags; | 2094 | new_flags = (old_flags & ~mask) | flags; |
2095 | } while (unlikely(cmpxchg(&inode->i_flags, old_flags, | 2095 | } while (unlikely(cmpxchg(&inode->i_flags, old_flags, |
2096 | new_flags) != old_flags)); | 2096 | new_flags) != old_flags)); |
diff --git a/fs/namei.c b/fs/namei.c index ed8b9488a890..5424b10cfdc4 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1210,7 +1210,7 @@ static int follow_managed(struct path *path, struct nameidata *nd) | |||
1210 | /* Given that we're not holding a lock here, we retain the value in a | 1210 | /* Given that we're not holding a lock here, we retain the value in a |
1211 | * local variable for each dentry as we look at it so that we don't see | 1211 | * local variable for each dentry as we look at it so that we don't see |
1212 | * the components of that value change under us */ | 1212 | * the components of that value change under us */ |
1213 | while (managed = ACCESS_ONCE(path->dentry->d_flags), | 1213 | while (managed = READ_ONCE(path->dentry->d_flags), |
1214 | managed &= DCACHE_MANAGED_DENTRY, | 1214 | managed &= DCACHE_MANAGED_DENTRY, |
1215 | unlikely(managed != 0)) { | 1215 | unlikely(managed != 0)) { |
1216 | /* Allow the filesystem to manage the transit without i_mutex | 1216 | /* Allow the filesystem to manage the transit without i_mutex |
@@ -1395,7 +1395,7 @@ int follow_down(struct path *path) | |||
1395 | unsigned managed; | 1395 | unsigned managed; |
1396 | int ret; | 1396 | int ret; |
1397 | 1397 | ||
1398 | while (managed = ACCESS_ONCE(path->dentry->d_flags), | 1398 | while (managed = READ_ONCE(path->dentry->d_flags), |
1399 | unlikely(managed & DCACHE_MANAGED_DENTRY)) { | 1399 | unlikely(managed & DCACHE_MANAGED_DENTRY)) { |
1400 | /* Allow the filesystem to manage the transit without i_mutex | 1400 | /* Allow the filesystem to manage the transit without i_mutex |
1401 | * being held. | 1401 | * being held. |
diff --git a/fs/namespace.c b/fs/namespace.c index d18deb4c410b..e158ec6b527b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -353,7 +353,7 @@ int __mnt_want_write(struct vfsmount *m) | |||
353 | * incremented count after it has set MNT_WRITE_HOLD. | 353 | * incremented count after it has set MNT_WRITE_HOLD. |
354 | */ | 354 | */ |
355 | smp_mb(); | 355 | smp_mb(); |
356 | while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) | 356 | while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) |
357 | cpu_relax(); | 357 | cpu_relax(); |
358 | /* | 358 | /* |
359 | * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will | 359 | * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will |
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index b5ec1d980dc9..0c57c5c5d40a 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c | |||
@@ -120,10 +120,6 @@ static inline int ncp_case_sensitive(const struct inode *i) | |||
120 | /* | 120 | /* |
121 | * Note: leave the hash unchanged if the directory | 121 | * Note: leave the hash unchanged if the directory |
122 | * is case-sensitive. | 122 | * is case-sensitive. |
123 | * | ||
124 | * Accessing the parent inode can be racy under RCU pathwalking. | ||
125 | * Use ACCESS_ONCE() to make sure we use _one_ particular inode, | ||
126 | * the callers will handle races. | ||
127 | */ | 123 | */ |
128 | static int | 124 | static int |
129 | ncp_hash_dentry(const struct dentry *dentry, struct qstr *this) | 125 | ncp_hash_dentry(const struct dentry *dentry, struct qstr *this) |
@@ -148,11 +144,6 @@ ncp_hash_dentry(const struct dentry *dentry, struct qstr *this) | |||
148 | return 0; | 144 | return 0; |
149 | } | 145 | } |
150 | 146 | ||
151 | /* | ||
152 | * Accessing the parent inode can be racy under RCU pathwalking. | ||
153 | * Use ACCESS_ONCE() to make sure we use _one_ particular inode, | ||
154 | * the callers will handle races. | ||
155 | */ | ||
156 | static int | 147 | static int |
157 | ncp_compare_dentry(const struct dentry *dentry, | 148 | ncp_compare_dentry(const struct dentry *dentry, |
158 | unsigned int len, const char *str, const struct qstr *name) | 149 | unsigned int len, const char *str, const struct qstr *name) |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5ceaeb1f6fb6..f439f1c45008 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1081,7 +1081,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1081 | int error; | 1081 | int error; |
1082 | 1082 | ||
1083 | if (flags & LOOKUP_RCU) { | 1083 | if (flags & LOOKUP_RCU) { |
1084 | parent = ACCESS_ONCE(dentry->d_parent); | 1084 | parent = READ_ONCE(dentry->d_parent); |
1085 | dir = d_inode_rcu(parent); | 1085 | dir = d_inode_rcu(parent); |
1086 | if (!dir) | 1086 | if (!dir) |
1087 | return -ECHILD; | 1087 | return -ECHILD; |
@@ -1168,7 +1168,7 @@ out_set_verifier: | |||
1168 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); | 1168 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); |
1169 | out_valid: | 1169 | out_valid: |
1170 | if (flags & LOOKUP_RCU) { | 1170 | if (flags & LOOKUP_RCU) { |
1171 | if (parent != ACCESS_ONCE(dentry->d_parent)) | 1171 | if (parent != READ_ONCE(dentry->d_parent)) |
1172 | return -ECHILD; | 1172 | return -ECHILD; |
1173 | } else | 1173 | } else |
1174 | dput(parent); | 1174 | dput(parent); |
@@ -1582,7 +1582,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1582 | struct inode *dir; | 1582 | struct inode *dir; |
1583 | 1583 | ||
1584 | if (flags & LOOKUP_RCU) { | 1584 | if (flags & LOOKUP_RCU) { |
1585 | parent = ACCESS_ONCE(dentry->d_parent); | 1585 | parent = READ_ONCE(dentry->d_parent); |
1586 | dir = d_inode_rcu(parent); | 1586 | dir = d_inode_rcu(parent); |
1587 | if (!dir) | 1587 | if (!dir) |
1588 | return -ECHILD; | 1588 | return -ECHILD; |
@@ -1596,7 +1596,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) | |||
1596 | ret = -ECHILD; | 1596 | ret = -ECHILD; |
1597 | if (!(flags & LOOKUP_RCU)) | 1597 | if (!(flags & LOOKUP_RCU)) |
1598 | dput(parent); | 1598 | dput(parent); |
1599 | else if (parent != ACCESS_ONCE(dentry->d_parent)) | 1599 | else if (parent != READ_ONCE(dentry->d_parent)) |
1600 | return -ECHILD; | 1600 | return -ECHILD; |
1601 | goto out; | 1601 | goto out; |
1602 | } | 1602 | } |
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index 25d9b5adcd42..36b49bd09264 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h | |||
@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode) | |||
77 | 77 | ||
78 | static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi) | 78 | static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi) |
79 | { | 79 | { |
80 | return lockless_dereference(oi->__upperdentry); | 80 | return READ_ONCE(oi->__upperdentry); |
81 | } | 81 | } |
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 698b74dd750e..c310e3ff7f3f 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c | |||
@@ -754,7 +754,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, | |||
754 | if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { | 754 | if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { |
755 | struct inode *inode = file_inode(file); | 755 | struct inode *inode = file_inode(file); |
756 | 756 | ||
757 | realfile = lockless_dereference(od->upperfile); | 757 | realfile = READ_ONCE(od->upperfile); |
758 | if (!realfile) { | 758 | if (!realfile) { |
759 | struct path upperpath; | 759 | struct path upperpath; |
760 | 760 | ||
diff --git a/fs/proc/array.c b/fs/proc/array.c index 9390032a11e1..d82549e80402 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -454,7 +454,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
454 | cutime = sig->cutime; | 454 | cutime = sig->cutime; |
455 | cstime = sig->cstime; | 455 | cstime = sig->cstime; |
456 | cgtime = sig->cgtime; | 456 | cgtime = sig->cgtime; |
457 | rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); | 457 | rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); |
458 | 458 | ||
459 | /* add up live thread stats at the group level */ | 459 | /* add up live thread stats at the group level */ |
460 | if (whole) { | 460 | if (whole) { |
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 7626ee11b06c..7b635d173213 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c | |||
@@ -28,7 +28,7 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) | |||
28 | 28 | ||
29 | poll_wait(file, &p->ns->poll, wait); | 29 | poll_wait(file, &p->ns->poll, wait); |
30 | 30 | ||
31 | event = ACCESS_ONCE(ns->event); | 31 | event = READ_ONCE(ns->event); |
32 | if (m->poll_event != event) { | 32 | if (m->poll_event != event) { |
33 | m->poll_event = event; | 33 | m->poll_event = event; |
34 | res |= POLLERR | POLLPRI; | 34 | res |= POLLERR | POLLPRI; |
diff --git a/fs/readdir.c b/fs/readdir.c index d336db65a33e..1b83b0ad183b 100644 --- a/fs/readdir.c +++ b/fs/readdir.c | |||
@@ -37,13 +37,12 @@ int iterate_dir(struct file *file, struct dir_context *ctx) | |||
37 | if (res) | 37 | if (res) |
38 | goto out; | 38 | goto out; |
39 | 39 | ||
40 | if (shared) { | 40 | if (shared) |
41 | inode_lock_shared(inode); | 41 | res = down_read_killable(&inode->i_rwsem); |
42 | } else { | 42 | else |
43 | res = down_write_killable(&inode->i_rwsem); | 43 | res = down_write_killable(&inode->i_rwsem); |
44 | if (res) | 44 | if (res) |
45 | goto out; | 45 | goto out; |
46 | } | ||
47 | 46 | ||
48 | res = -ENOENT; | 47 | res = -ENOENT; |
49 | if (!IS_DEADDIR(inode)) { | 48 | if (!IS_DEADDIR(inode)) { |
diff --git a/fs/splice.c b/fs/splice.c index f3084cce0ea6..39e2dc01ac12 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -253,7 +253,7 @@ EXPORT_SYMBOL(add_to_pipe); | |||
253 | */ | 253 | */ |
254 | int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) | 254 | int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) |
255 | { | 255 | { |
256 | unsigned int buffers = ACCESS_ONCE(pipe->buffers); | 256 | unsigned int buffers = READ_ONCE(pipe->buffers); |
257 | 257 | ||
258 | spd->nr_pages_max = buffers; | 258 | spd->nr_pages_max = buffers; |
259 | if (buffers <= PIPE_DEF_BUFFERS) | 259 | if (buffers <= PIPE_DEF_BUFFERS) |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1c713fd5b3e6..f46d133c0949 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
@@ -381,7 +381,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
381 | * in __get_user_pages if userfaultfd_release waits on the | 381 | * in __get_user_pages if userfaultfd_release waits on the |
382 | * caller of handle_userfault to release the mmap_sem. | 382 | * caller of handle_userfault to release the mmap_sem. |
383 | */ | 383 | */ |
384 | if (unlikely(ACCESS_ONCE(ctx->released))) { | 384 | if (unlikely(READ_ONCE(ctx->released))) { |
385 | /* | 385 | /* |
386 | * Don't return VM_FAULT_SIGBUS in this case, so a non | 386 | * Don't return VM_FAULT_SIGBUS in this case, so a non |
387 | * cooperative manager can close the uffd after the | 387 | * cooperative manager can close the uffd after the |
@@ -477,7 +477,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) | |||
477 | vmf->flags, reason); | 477 | vmf->flags, reason); |
478 | up_read(&mm->mmap_sem); | 478 | up_read(&mm->mmap_sem); |
479 | 479 | ||
480 | if (likely(must_wait && !ACCESS_ONCE(ctx->released) && | 480 | if (likely(must_wait && !READ_ONCE(ctx->released) && |
481 | (return_to_userland ? !signal_pending(current) : | 481 | (return_to_userland ? !signal_pending(current) : |
482 | !fatal_signal_pending(current)))) { | 482 | !fatal_signal_pending(current)))) { |
483 | wake_up_poll(&ctx->fd_wqh, POLLIN); | 483 | wake_up_poll(&ctx->fd_wqh, POLLIN); |
@@ -586,7 +586,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
586 | set_current_state(TASK_KILLABLE); | 586 | set_current_state(TASK_KILLABLE); |
587 | if (ewq->msg.event == 0) | 587 | if (ewq->msg.event == 0) |
588 | break; | 588 | break; |
589 | if (ACCESS_ONCE(ctx->released) || | 589 | if (READ_ONCE(ctx->released) || |
590 | fatal_signal_pending(current)) { | 590 | fatal_signal_pending(current)) { |
591 | /* | 591 | /* |
592 | * &ewq->wq may be queued in fork_event, but | 592 | * &ewq->wq may be queued in fork_event, but |
@@ -833,7 +833,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) | |||
833 | struct userfaultfd_wake_range range = { .len = 0, }; | 833 | struct userfaultfd_wake_range range = { .len = 0, }; |
834 | unsigned long new_flags; | 834 | unsigned long new_flags; |
835 | 835 | ||
836 | ACCESS_ONCE(ctx->released) = true; | 836 | WRITE_ONCE(ctx->released, true); |
837 | 837 | ||
838 | if (!mmget_not_zero(mm)) | 838 | if (!mmget_not_zero(mm)) |
839 | goto wakeup; | 839 | goto wakeup; |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 51bf7b827387..129975970d99 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -592,9 +592,9 @@ xlog_valid_lsn( | |||
592 | * a transiently forward state. Instead, we can see the LSN in a | 592 | * a transiently forward state. Instead, we can see the LSN in a |
593 | * transiently behind state if we happen to race with a cycle wrap. | 593 | * transiently behind state if we happen to race with a cycle wrap. |
594 | */ | 594 | */ |
595 | cur_cycle = ACCESS_ONCE(log->l_curr_cycle); | 595 | cur_cycle = READ_ONCE(log->l_curr_cycle); |
596 | smp_rmb(); | 596 | smp_rmb(); |
597 | cur_block = ACCESS_ONCE(log->l_curr_block); | 597 | cur_block = READ_ONCE(log->l_curr_block); |
598 | 598 | ||
599 | if ((CYCLE_LSN(lsn) > cur_cycle) || | 599 | if ((CYCLE_LSN(lsn) > cur_cycle) || |
600 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { | 600 | (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { |
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 49be4bba1e96..34a028a7bcc5 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h | |||
@@ -244,4 +244,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) | |||
244 | #define atomic_long_inc_not_zero(l) \ | 244 | #define atomic_long_inc_not_zero(l) \ |
245 | ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) | 245 | ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) |
246 | 246 | ||
247 | #define atomic_long_cond_read_acquire(v, c) \ | ||
248 | ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c)) | ||
249 | |||
247 | #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ | 250 | #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ |
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 7d026bf27713..0f7062bd55e5 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h | |||
@@ -26,51 +26,20 @@ | |||
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Writer states & reader shift and bias. | 28 | * Writer states & reader shift and bias. |
29 | * | ||
30 | * | +0 | +1 | +2 | +3 | | ||
31 | * ----+----+----+----+----+ | ||
32 | * LE | 78 | 56 | 34 | 12 | 0x12345678 | ||
33 | * ----+----+----+----+----+ | ||
34 | * | wr | rd | | ||
35 | * +----+----+----+----+ | ||
36 | * | ||
37 | * ----+----+----+----+----+ | ||
38 | * BE | 12 | 34 | 56 | 78 | 0x12345678 | ||
39 | * ----+----+----+----+----+ | ||
40 | * | rd | wr | | ||
41 | * +----+----+----+----+ | ||
42 | */ | 29 | */ |
43 | #define _QW_WAITING 1 /* A writer is waiting */ | 30 | #define _QW_WAITING 0x100 /* A writer is waiting */ |
44 | #define _QW_LOCKED 0xff /* A writer holds the lock */ | 31 | #define _QW_LOCKED 0x0ff /* A writer holds the lock */ |
45 | #define _QW_WMASK 0xff /* Writer mask */ | 32 | #define _QW_WMASK 0x1ff /* Writer mask */ |
46 | #define _QR_SHIFT 8 /* Reader count shift */ | 33 | #define _QR_SHIFT 9 /* Reader count shift */ |
47 | #define _QR_BIAS (1U << _QR_SHIFT) | 34 | #define _QR_BIAS (1U << _QR_SHIFT) |
48 | 35 | ||
49 | /* | 36 | /* |
50 | * External function declarations | 37 | * External function declarations |
51 | */ | 38 | */ |
52 | extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts); | 39 | extern void queued_read_lock_slowpath(struct qrwlock *lock); |
53 | extern void queued_write_lock_slowpath(struct qrwlock *lock); | 40 | extern void queued_write_lock_slowpath(struct qrwlock *lock); |
54 | 41 | ||
55 | /** | 42 | /** |
56 | * queued_read_can_lock- would read_trylock() succeed? | ||
57 | * @lock: Pointer to queue rwlock structure | ||
58 | */ | ||
59 | static inline int queued_read_can_lock(struct qrwlock *lock) | ||
60 | { | ||
61 | return !(atomic_read(&lock->cnts) & _QW_WMASK); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * queued_write_can_lock- would write_trylock() succeed? | ||
66 | * @lock: Pointer to queue rwlock structure | ||
67 | */ | ||
68 | static inline int queued_write_can_lock(struct qrwlock *lock) | ||
69 | { | ||
70 | return !atomic_read(&lock->cnts); | ||
71 | } | ||
72 | |||
73 | /** | ||
74 | * queued_read_trylock - try to acquire read lock of a queue rwlock | 43 | * queued_read_trylock - try to acquire read lock of a queue rwlock |
75 | * @lock : Pointer to queue rwlock structure | 44 | * @lock : Pointer to queue rwlock structure |
76 | * Return: 1 if lock acquired, 0 if failed | 45 | * Return: 1 if lock acquired, 0 if failed |
@@ -118,7 +87,7 @@ static inline void queued_read_lock(struct qrwlock *lock) | |||
118 | return; | 87 | return; |
119 | 88 | ||
120 | /* The slowpath will decrement the reader count, if necessary. */ | 89 | /* The slowpath will decrement the reader count, if necessary. */ |
121 | queued_read_lock_slowpath(lock, cnts); | 90 | queued_read_lock_slowpath(lock); |
122 | } | 91 | } |
123 | 92 | ||
124 | /** | 93 | /** |
@@ -147,30 +116,18 @@ static inline void queued_read_unlock(struct qrwlock *lock) | |||
147 | } | 116 | } |
148 | 117 | ||
149 | /** | 118 | /** |
150 | * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock | ||
151 | * @lock : Pointer to queue rwlock structure | ||
152 | * Return: the write byte address of a queue rwlock | ||
153 | */ | ||
154 | static inline u8 *__qrwlock_write_byte(struct qrwlock *lock) | ||
155 | { | ||
156 | return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN); | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * queued_write_unlock - release write lock of a queue rwlock | 119 | * queued_write_unlock - release write lock of a queue rwlock |
161 | * @lock : Pointer to queue rwlock structure | 120 | * @lock : Pointer to queue rwlock structure |
162 | */ | 121 | */ |
163 | static inline void queued_write_unlock(struct qrwlock *lock) | 122 | static inline void queued_write_unlock(struct qrwlock *lock) |
164 | { | 123 | { |
165 | smp_store_release(__qrwlock_write_byte(lock), 0); | 124 | smp_store_release(&lock->wlocked, 0); |
166 | } | 125 | } |
167 | 126 | ||
168 | /* | 127 | /* |
169 | * Remapping rwlock architecture specific functions to the corresponding | 128 | * Remapping rwlock architecture specific functions to the corresponding |
170 | * queue rwlock functions. | 129 | * queue rwlock functions. |
171 | */ | 130 | */ |
172 | #define arch_read_can_lock(l) queued_read_can_lock(l) | ||
173 | #define arch_write_can_lock(l) queued_write_can_lock(l) | ||
174 | #define arch_read_lock(l) queued_read_lock(l) | 131 | #define arch_read_lock(l) queued_read_lock(l) |
175 | #define arch_write_lock(l) queued_write_lock(l) | 132 | #define arch_write_lock(l) queued_write_lock(l) |
176 | #define arch_read_trylock(l) queued_read_trylock(l) | 133 | #define arch_read_trylock(l) queued_read_trylock(l) |
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h index d93573eff162..137ecdd16daa 100644 --- a/include/asm-generic/qrwlock_types.h +++ b/include/asm-generic/qrwlock_types.h | |||
@@ -10,12 +10,23 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | typedef struct qrwlock { | 12 | typedef struct qrwlock { |
13 | atomic_t cnts; | 13 | union { |
14 | atomic_t cnts; | ||
15 | struct { | ||
16 | #ifdef __LITTLE_ENDIAN | ||
17 | u8 wlocked; /* Locked for write? */ | ||
18 | u8 __lstate[3]; | ||
19 | #else | ||
20 | u8 __lstate[3]; | ||
21 | u8 wlocked; /* Locked for write? */ | ||
22 | #endif | ||
23 | }; | ||
24 | }; | ||
14 | arch_spinlock_t wait_lock; | 25 | arch_spinlock_t wait_lock; |
15 | } arch_rwlock_t; | 26 | } arch_rwlock_t; |
16 | 27 | ||
17 | #define __ARCH_RW_LOCK_UNLOCKED { \ | 28 | #define __ARCH_RW_LOCK_UNLOCKED { \ |
18 | .cnts = ATOMIC_INIT(0), \ | 29 | { .cnts = ATOMIC_INIT(0), }, \ |
19 | .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ | 30 | .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ |
20 | } | 31 | } |
21 | 32 | ||
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 66260777d644..b37b4ad7eb94 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h | |||
@@ -121,6 +121,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) | |||
121 | #define arch_spin_lock(l) queued_spin_lock(l) | 121 | #define arch_spin_lock(l) queued_spin_lock(l) |
122 | #define arch_spin_trylock(l) queued_spin_trylock(l) | 122 | #define arch_spin_trylock(l) queued_spin_trylock(l) |
123 | #define arch_spin_unlock(l) queued_spin_unlock(l) | 123 | #define arch_spin_unlock(l) queued_spin_unlock(l) |
124 | #define arch_spin_lock_flags(l, f) queued_spin_lock(l) | ||
125 | 124 | ||
126 | #endif /* __ASM_GENERIC_QSPINLOCK_H */ | 125 | #endif /* __ASM_GENERIC_QSPINLOCK_H */ |
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h index bdbe43bac230..93e67a055a4d 100644 --- a/include/asm-generic/rwsem.h +++ b/include/asm-generic/rwsem.h | |||
@@ -38,6 +38,16 @@ static inline void __down_read(struct rw_semaphore *sem) | |||
38 | rwsem_down_read_failed(sem); | 38 | rwsem_down_read_failed(sem); |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline int __down_read_killable(struct rw_semaphore *sem) | ||
42 | { | ||
43 | if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { | ||
44 | if (IS_ERR(rwsem_down_read_failed_killable(sem))) | ||
45 | return -EINTR; | ||
46 | } | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | |||
41 | static inline int __down_read_trylock(struct rw_semaphore *sem) | 51 | static inline int __down_read_trylock(struct rw_semaphore *sem) |
42 | { | 52 | { |
43 | long tmp; | 53 | long tmp; |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8acfc1e099e1..e549bff87c5b 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -459,6 +459,7 @@ | |||
459 | #define TEXT_TEXT \ | 459 | #define TEXT_TEXT \ |
460 | ALIGN_FUNCTION(); \ | 460 | ALIGN_FUNCTION(); \ |
461 | *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ | 461 | *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ |
462 | *(.text..refcount) \ | ||
462 | *(.ref.text) \ | 463 | *(.ref.text) \ |
463 | MEM_KEEP(init.text) \ | 464 | MEM_KEEP(init.text) \ |
464 | MEM_KEEP(exit.text) \ | 465 | MEM_KEEP(exit.text) \ |
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index cd18203d6ff3..8b276fd9a127 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h | |||
@@ -654,6 +654,8 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
654 | } | 654 | } |
655 | #endif | 655 | #endif |
656 | 656 | ||
657 | #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
658 | |||
657 | #ifdef CONFIG_GENERIC_ATOMIC64 | 659 | #ifdef CONFIG_GENERIC_ATOMIC64 |
658 | #include <asm-generic/atomic64.h> | 660 | #include <asm-generic/atomic64.h> |
659 | #endif | 661 | #endif |
@@ -1073,6 +1075,8 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v | |||
1073 | } | 1075 | } |
1074 | #endif | 1076 | #endif |
1075 | 1077 | ||
1078 | #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
1079 | |||
1076 | #include <asm-generic/atomic-long.h> | 1080 | #include <asm-generic/atomic-long.h> |
1077 | 1081 | ||
1078 | #endif /* _LINUX_ATOMIC_H */ | 1082 | #endif /* _LINUX_ATOMIC_H */ |
diff --git a/include/linux/average.h b/include/linux/average.h index 1b6f5560c264..a1a8f09631ce 100644 --- a/include/linux/average.h +++ b/include/linux/average.h | |||
@@ -2,6 +2,10 @@ | |||
2 | #ifndef _LINUX_AVERAGE_H | 2 | #ifndef _LINUX_AVERAGE_H |
3 | #define _LINUX_AVERAGE_H | 3 | #define _LINUX_AVERAGE_H |
4 | 4 | ||
5 | #include <linux/bug.h> | ||
6 | #include <linux/compiler.h> | ||
7 | #include <linux/log2.h> | ||
8 | |||
5 | /* | 9 | /* |
6 | * Exponentially weighted moving average (EWMA) | 10 | * Exponentially weighted moving average (EWMA) |
7 | * | 11 | * |
@@ -49,7 +53,7 @@ | |||
49 | static inline void ewma_##name##_add(struct ewma_##name *e, \ | 53 | static inline void ewma_##name##_add(struct ewma_##name *e, \ |
50 | unsigned long val) \ | 54 | unsigned long val) \ |
51 | { \ | 55 | { \ |
52 | unsigned long internal = ACCESS_ONCE(e->internal); \ | 56 | unsigned long internal = READ_ONCE(e->internal); \ |
53 | unsigned long weight_rcp = ilog2(_weight_rcp); \ | 57 | unsigned long weight_rcp = ilog2(_weight_rcp); \ |
54 | unsigned long precision = _precision; \ | 58 | unsigned long precision = _precision; \ |
55 | \ | 59 | \ |
@@ -58,10 +62,10 @@ | |||
58 | BUILD_BUG_ON((_precision) > 30); \ | 62 | BUILD_BUG_ON((_precision) > 30); \ |
59 | BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ | 63 | BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \ |
60 | \ | 64 | \ |
61 | ACCESS_ONCE(e->internal) = internal ? \ | 65 | WRITE_ONCE(e->internal, internal ? \ |
62 | (((internal << weight_rcp) - internal) + \ | 66 | (((internal << weight_rcp) - internal) + \ |
63 | (val << precision)) >> weight_rcp : \ | 67 | (val << precision)) >> weight_rcp : \ |
64 | (val << precision); \ | 68 | (val << precision)); \ |
65 | } | 69 | } |
66 | 70 | ||
67 | #endif /* _LINUX_AVERAGE_H */ | 71 | #endif /* _LINUX_AVERAGE_H */ |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index d03c5dd6185d..c537ac7435ad 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -237,7 +237,7 @@ static inline unsigned long __ffs64(u64 word) | |||
237 | typeof(*ptr) old, new; \ | 237 | typeof(*ptr) old, new; \ |
238 | \ | 238 | \ |
239 | do { \ | 239 | do { \ |
240 | old = ACCESS_ONCE(*ptr); \ | 240 | old = READ_ONCE(*ptr); \ |
241 | new = (old & ~mask) | bits; \ | 241 | new = (old & ~mask) | bits; \ |
242 | } while (cmpxchg(ptr, old, new) != old); \ | 242 | } while (cmpxchg(ptr, old, new) != old); \ |
243 | \ | 243 | \ |
@@ -252,7 +252,7 @@ static inline unsigned long __ffs64(u64 word) | |||
252 | typeof(*ptr) old, new; \ | 252 | typeof(*ptr) old, new; \ |
253 | \ | 253 | \ |
254 | do { \ | 254 | do { \ |
255 | old = ACCESS_ONCE(*ptr); \ | 255 | old = READ_ONCE(*ptr); \ |
256 | new = old & ~clear; \ | 256 | new = old & ~clear; \ |
257 | } while (!(old & test) && \ | 257 | } while (!(old & test) && \ |
258 | cmpxchg(ptr, old, new) != old); \ | 258 | cmpxchg(ptr, old, new) != old); \ |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 54dfef70a072..a06583e41f80 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_COMPILER_H | 2 | #ifndef __LINUX_COMPILER_TYPES_H |
3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." | 3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." |
4 | #endif | 4 | #endif |
5 | 5 | ||
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index bb78e5bdff26..2272ded07496 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_COMPILER_H | 2 | #ifndef __LINUX_COMPILER_TYPES_H |
3 | #error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." | 3 | #error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." |
4 | #endif | 4 | #endif |
5 | 5 | ||
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 523d1b74550f..bfa08160db3a 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_COMPILER_H | 2 | #ifndef __LINUX_COMPILER_TYPES_H |
3 | #error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead." | 3 | #error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead." |
4 | #endif | 4 | #endif |
5 | 5 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 202710420d6d..3672353a0acd 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -2,111 +2,12 @@ | |||
2 | #ifndef __LINUX_COMPILER_H | 2 | #ifndef __LINUX_COMPILER_H |
3 | #define __LINUX_COMPILER_H | 3 | #define __LINUX_COMPILER_H |
4 | 4 | ||
5 | #ifndef __ASSEMBLY__ | 5 | #include <linux/compiler_types.h> |
6 | 6 | ||
7 | #ifdef __CHECKER__ | 7 | #ifndef __ASSEMBLY__ |
8 | # define __user __attribute__((noderef, address_space(1))) | ||
9 | # define __kernel __attribute__((address_space(0))) | ||
10 | # define __safe __attribute__((safe)) | ||
11 | # define __force __attribute__((force)) | ||
12 | # define __nocast __attribute__((nocast)) | ||
13 | # define __iomem __attribute__((noderef, address_space(2))) | ||
14 | # define __must_hold(x) __attribute__((context(x,1,1))) | ||
15 | # define __acquires(x) __attribute__((context(x,0,1))) | ||
16 | # define __releases(x) __attribute__((context(x,1,0))) | ||
17 | # define __acquire(x) __context__(x,1) | ||
18 | # define __release(x) __context__(x,-1) | ||
19 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) | ||
20 | # define __percpu __attribute__((noderef, address_space(3))) | ||
21 | # define __rcu __attribute__((noderef, address_space(4))) | ||
22 | # define __private __attribute__((noderef)) | ||
23 | extern void __chk_user_ptr(const volatile void __user *); | ||
24 | extern void __chk_io_ptr(const volatile void __iomem *); | ||
25 | # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) | ||
26 | #else /* __CHECKER__ */ | ||
27 | # ifdef STRUCTLEAK_PLUGIN | ||
28 | # define __user __attribute__((user)) | ||
29 | # else | ||
30 | # define __user | ||
31 | # endif | ||
32 | # define __kernel | ||
33 | # define __safe | ||
34 | # define __force | ||
35 | # define __nocast | ||
36 | # define __iomem | ||
37 | # define __chk_user_ptr(x) (void)0 | ||
38 | # define __chk_io_ptr(x) (void)0 | ||
39 | # define __builtin_warning(x, y...) (1) | ||
40 | # define __must_hold(x) | ||
41 | # define __acquires(x) | ||
42 | # define __releases(x) | ||
43 | # define __acquire(x) (void)0 | ||
44 | # define __release(x) (void)0 | ||
45 | # define __cond_lock(x,c) (c) | ||
46 | # define __percpu | ||
47 | # define __rcu | ||
48 | # define __private | ||
49 | # define ACCESS_PRIVATE(p, member) ((p)->member) | ||
50 | #endif /* __CHECKER__ */ | ||
51 | |||
52 | /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ | ||
53 | #define ___PASTE(a,b) a##b | ||
54 | #define __PASTE(a,b) ___PASTE(a,b) | ||
55 | 8 | ||
56 | #ifdef __KERNEL__ | 9 | #ifdef __KERNEL__ |
57 | 10 | ||
58 | #ifdef __GNUC__ | ||
59 | #include <linux/compiler-gcc.h> | ||
60 | #endif | ||
61 | |||
62 | #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) | ||
63 | #define notrace __attribute__((hotpatch(0,0))) | ||
64 | #else | ||
65 | #define notrace __attribute__((no_instrument_function)) | ||
66 | #endif | ||
67 | |||
68 | /* Intel compiler defines __GNUC__. So we will overwrite implementations | ||
69 | * coming from above header files here | ||
70 | */ | ||
71 | #ifdef __INTEL_COMPILER | ||
72 | # include <linux/compiler-intel.h> | ||
73 | #endif | ||
74 | |||
75 | /* Clang compiler defines __GNUC__. So we will overwrite implementations | ||
76 | * coming from above header files here | ||
77 | */ | ||
78 | #ifdef __clang__ | ||
79 | #include <linux/compiler-clang.h> | ||
80 | #endif | ||
81 | |||
82 | /* | ||
83 | * Generic compiler-dependent macros required for kernel | ||
84 | * build go below this comment. Actual compiler/compiler version | ||
85 | * specific implementations come from the above header files | ||
86 | */ | ||
87 | |||
88 | struct ftrace_branch_data { | ||
89 | const char *func; | ||
90 | const char *file; | ||
91 | unsigned line; | ||
92 | union { | ||
93 | struct { | ||
94 | unsigned long correct; | ||
95 | unsigned long incorrect; | ||
96 | }; | ||
97 | struct { | ||
98 | unsigned long miss; | ||
99 | unsigned long hit; | ||
100 | }; | ||
101 | unsigned long miss_hit[2]; | ||
102 | }; | ||
103 | }; | ||
104 | |||
105 | struct ftrace_likely_data { | ||
106 | struct ftrace_branch_data data; | ||
107 | unsigned long constant; | ||
108 | }; | ||
109 | |||
110 | /* | 11 | /* |
111 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code | 12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
112 | * to disable branch tracing on a per file basis. | 13 | * to disable branch tracing on a per file basis. |
@@ -333,6 +234,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
333 | * with an explicit memory barrier or atomic instruction that provides the | 234 | * with an explicit memory barrier or atomic instruction that provides the |
334 | * required ordering. | 235 | * required ordering. |
335 | */ | 236 | */ |
237 | #include <asm/barrier.h> | ||
336 | 238 | ||
337 | #define __READ_ONCE(x, check) \ | 239 | #define __READ_ONCE(x, check) \ |
338 | ({ \ | 240 | ({ \ |
@@ -341,6 +243,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
341 | __read_once_size(&(x), __u.__c, sizeof(x)); \ | 243 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
342 | else \ | 244 | else \ |
343 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ | 245 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ |
246 | smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ | ||
344 | __u.__val; \ | 247 | __u.__val; \ |
345 | }) | 248 | }) |
346 | #define READ_ONCE(x) __READ_ONCE(x, 1) | 249 | #define READ_ONCE(x) __READ_ONCE(x, 1) |
@@ -363,167 +266,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
363 | 266 | ||
364 | #endif /* __ASSEMBLY__ */ | 267 | #endif /* __ASSEMBLY__ */ |
365 | 268 | ||
366 | #ifdef __KERNEL__ | ||
367 | /* | ||
368 | * Allow us to mark functions as 'deprecated' and have gcc emit a nice | ||
369 | * warning for each use, in hopes of speeding the functions removal. | ||
370 | * Usage is: | ||
371 | * int __deprecated foo(void) | ||
372 | */ | ||
373 | #ifndef __deprecated | ||
374 | # define __deprecated /* unimplemented */ | ||
375 | #endif | ||
376 | |||
377 | #ifdef MODULE | ||
378 | #define __deprecated_for_modules __deprecated | ||
379 | #else | ||
380 | #define __deprecated_for_modules | ||
381 | #endif | ||
382 | |||
383 | #ifndef __must_check | ||
384 | #define __must_check | ||
385 | #endif | ||
386 | |||
387 | #ifndef CONFIG_ENABLE_MUST_CHECK | ||
388 | #undef __must_check | ||
389 | #define __must_check | ||
390 | #endif | ||
391 | #ifndef CONFIG_ENABLE_WARN_DEPRECATED | ||
392 | #undef __deprecated | ||
393 | #undef __deprecated_for_modules | ||
394 | #define __deprecated | ||
395 | #define __deprecated_for_modules | ||
396 | #endif | ||
397 | |||
398 | #ifndef __malloc | ||
399 | #define __malloc | ||
400 | #endif | ||
401 | |||
402 | /* | ||
403 | * Allow us to avoid 'defined but not used' warnings on functions and data, | ||
404 | * as well as force them to be emitted to the assembly file. | ||
405 | * | ||
406 | * As of gcc 3.4, static functions that are not marked with attribute((used)) | ||
407 | * may be elided from the assembly file. As of gcc 3.4, static data not so | ||
408 | * marked will not be elided, but this may change in a future gcc version. | ||
409 | * | ||
410 | * NOTE: Because distributions shipped with a backported unit-at-a-time | ||
411 | * compiler in gcc 3.3, we must define __used to be __attribute__((used)) | ||
412 | * for gcc >=3.3 instead of 3.4. | ||
413 | * | ||
414 | * In prior versions of gcc, such functions and data would be emitted, but | ||
415 | * would be warned about except with attribute((unused)). | ||
416 | * | ||
417 | * Mark functions that are referenced only in inline assembly as __used so | ||
418 | * the code is emitted even though it appears to be unreferenced. | ||
419 | */ | ||
420 | #ifndef __used | ||
421 | # define __used /* unimplemented */ | ||
422 | #endif | ||
423 | |||
424 | #ifndef __maybe_unused | ||
425 | # define __maybe_unused /* unimplemented */ | ||
426 | #endif | ||
427 | |||
428 | #ifndef __always_unused | ||
429 | # define __always_unused /* unimplemented */ | ||
430 | #endif | ||
431 | |||
432 | #ifndef noinline | ||
433 | #define noinline | ||
434 | #endif | ||
435 | |||
436 | /* | ||
437 | * Rather then using noinline to prevent stack consumption, use | ||
438 | * noinline_for_stack instead. For documentation reasons. | ||
439 | */ | ||
440 | #define noinline_for_stack noinline | ||
441 | |||
442 | #ifndef __always_inline | ||
443 | #define __always_inline inline | ||
444 | #endif | ||
445 | |||
446 | #endif /* __KERNEL__ */ | ||
447 | |||
448 | /* | ||
449 | * From the GCC manual: | ||
450 | * | ||
451 | * Many functions do not examine any values except their arguments, | ||
452 | * and have no effects except the return value. Basically this is | ||
453 | * just slightly more strict class than the `pure' attribute above, | ||
454 | * since function is not allowed to read global memory. | ||
455 | * | ||
456 | * Note that a function that has pointer arguments and examines the | ||
457 | * data pointed to must _not_ be declared `const'. Likewise, a | ||
458 | * function that calls a non-`const' function usually must not be | ||
459 | * `const'. It does not make sense for a `const' function to return | ||
460 | * `void'. | ||
461 | */ | ||
462 | #ifndef __attribute_const__ | ||
463 | # define __attribute_const__ /* unimplemented */ | ||
464 | #endif | ||
465 | |||
466 | #ifndef __designated_init | ||
467 | # define __designated_init | ||
468 | #endif | ||
469 | |||
470 | #ifndef __latent_entropy | ||
471 | # define __latent_entropy | ||
472 | #endif | ||
473 | |||
474 | #ifndef __randomize_layout | ||
475 | # define __randomize_layout __designated_init | ||
476 | #endif | ||
477 | |||
478 | #ifndef __no_randomize_layout | ||
479 | # define __no_randomize_layout | ||
480 | #endif | ||
481 | |||
482 | #ifndef randomized_struct_fields_start | ||
483 | # define randomized_struct_fields_start | ||
484 | # define randomized_struct_fields_end | ||
485 | #endif | ||
486 | |||
487 | /* | ||
488 | * Tell gcc if a function is cold. The compiler will assume any path | ||
489 | * directly leading to the call is unlikely. | ||
490 | */ | ||
491 | |||
492 | #ifndef __cold | ||
493 | #define __cold | ||
494 | #endif | ||
495 | |||
496 | /* Simple shorthand for a section definition */ | ||
497 | #ifndef __section | ||
498 | # define __section(S) __attribute__ ((__section__(#S))) | ||
499 | #endif | ||
500 | |||
501 | #ifndef __visible | ||
502 | #define __visible | ||
503 | #endif | ||
504 | |||
505 | #ifndef __nostackprotector | ||
506 | # define __nostackprotector | ||
507 | #endif | ||
508 | |||
509 | /* | ||
510 | * Assume alignment of return value. | ||
511 | */ | ||
512 | #ifndef __assume_aligned | ||
513 | #define __assume_aligned(a, ...) | ||
514 | #endif | ||
515 | |||
516 | |||
517 | /* Are two types/vars the same type (ignoring qualifiers)? */ | ||
518 | #ifndef __same_type | ||
519 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | ||
520 | #endif | ||
521 | |||
522 | /* Is this type a native word size -- useful for atomic operations */ | ||
523 | #ifndef __native_word | ||
524 | # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) | ||
525 | #endif | ||
526 | |||
527 | /* Compile time object size, -1 for unknown */ | 269 | /* Compile time object size, -1 for unknown */ |
528 | #ifndef __compiletime_object_size | 270 | #ifndef __compiletime_object_size |
529 | # define __compiletime_object_size(obj) -1 | 271 | # define __compiletime_object_size(obj) -1 |
@@ -605,24 +347,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
605 | (volatile typeof(x) *)&(x); }) | 347 | (volatile typeof(x) *)&(x); }) |
606 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) | 348 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) |
607 | 349 | ||
608 | /** | ||
609 | * lockless_dereference() - safely load a pointer for later dereference | ||
610 | * @p: The pointer to load | ||
611 | * | ||
612 | * Similar to rcu_dereference(), but for situations where the pointed-to | ||
613 | * object's lifetime is managed by something other than RCU. That | ||
614 | * "something other" might be reference counting or simple immortality. | ||
615 | * | ||
616 | * The seemingly unused variable ___typecheck_p validates that @p is | ||
617 | * indeed a pointer type by using a pointer to typeof(*p) as the type. | ||
618 | * Taking a pointer to typeof(*p) again is needed in case p is void *. | ||
619 | */ | ||
620 | #define lockless_dereference(p) \ | ||
621 | ({ \ | ||
622 | typeof(p) _________p1 = READ_ONCE(p); \ | ||
623 | typeof(*(p)) *___typecheck_p __maybe_unused; \ | ||
624 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ | ||
625 | (_________p1); \ | ||
626 | }) | ||
627 | |||
628 | #endif /* __LINUX_COMPILER_H */ | 350 | #endif /* __LINUX_COMPILER_H */ |
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h new file mode 100644 index 000000000000..6b79a9bba9a7 --- /dev/null +++ b/include/linux/compiler_types.h | |||
@@ -0,0 +1,274 @@ | |||
1 | #ifndef __LINUX_COMPILER_TYPES_H | ||
2 | #define __LINUX_COMPILER_TYPES_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | |||
6 | #ifdef __CHECKER__ | ||
7 | # define __user __attribute__((noderef, address_space(1))) | ||
8 | # define __kernel __attribute__((address_space(0))) | ||
9 | # define __safe __attribute__((safe)) | ||
10 | # define __force __attribute__((force)) | ||
11 | # define __nocast __attribute__((nocast)) | ||
12 | # define __iomem __attribute__((noderef, address_space(2))) | ||
13 | # define __must_hold(x) __attribute__((context(x,1,1))) | ||
14 | # define __acquires(x) __attribute__((context(x,0,1))) | ||
15 | # define __releases(x) __attribute__((context(x,1,0))) | ||
16 | # define __acquire(x) __context__(x,1) | ||
17 | # define __release(x) __context__(x,-1) | ||
18 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) | ||
19 | # define __percpu __attribute__((noderef, address_space(3))) | ||
20 | # define __rcu __attribute__((noderef, address_space(4))) | ||
21 | # define __private __attribute__((noderef)) | ||
22 | extern void __chk_user_ptr(const volatile void __user *); | ||
23 | extern void __chk_io_ptr(const volatile void __iomem *); | ||
24 | # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) | ||
25 | #else /* __CHECKER__ */ | ||
26 | # ifdef STRUCTLEAK_PLUGIN | ||
27 | # define __user __attribute__((user)) | ||
28 | # else | ||
29 | # define __user | ||
30 | # endif | ||
31 | # define __kernel | ||
32 | # define __safe | ||
33 | # define __force | ||
34 | # define __nocast | ||
35 | # define __iomem | ||
36 | # define __chk_user_ptr(x) (void)0 | ||
37 | # define __chk_io_ptr(x) (void)0 | ||
38 | # define __builtin_warning(x, y...) (1) | ||
39 | # define __must_hold(x) | ||
40 | # define __acquires(x) | ||
41 | # define __releases(x) | ||
42 | # define __acquire(x) (void)0 | ||
43 | # define __release(x) (void)0 | ||
44 | # define __cond_lock(x,c) (c) | ||
45 | # define __percpu | ||
46 | # define __rcu | ||
47 | # define __private | ||
48 | # define ACCESS_PRIVATE(p, member) ((p)->member) | ||
49 | #endif /* __CHECKER__ */ | ||
50 | |||
51 | /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ | ||
52 | #define ___PASTE(a,b) a##b | ||
53 | #define __PASTE(a,b) ___PASTE(a,b) | ||
54 | |||
55 | #ifdef __KERNEL__ | ||
56 | |||
57 | #ifdef __GNUC__ | ||
58 | #include <linux/compiler-gcc.h> | ||
59 | #endif | ||
60 | |||
61 | #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) | ||
62 | #define notrace __attribute__((hotpatch(0,0))) | ||
63 | #else | ||
64 | #define notrace __attribute__((no_instrument_function)) | ||
65 | #endif | ||
66 | |||
67 | /* Intel compiler defines __GNUC__. So we will overwrite implementations | ||
68 | * coming from above header files here | ||
69 | */ | ||
70 | #ifdef __INTEL_COMPILER | ||
71 | # include <linux/compiler-intel.h> | ||
72 | #endif | ||
73 | |||
74 | /* Clang compiler defines __GNUC__. So we will overwrite implementations | ||
75 | * coming from above header files here | ||
76 | */ | ||
77 | #ifdef __clang__ | ||
78 | #include <linux/compiler-clang.h> | ||
79 | #endif | ||
80 | |||
81 | /* | ||
82 | * Generic compiler-dependent macros required for kernel | ||
83 | * build go below this comment. Actual compiler/compiler version | ||
84 | * specific implementations come from the above header files | ||
85 | */ | ||
86 | |||
87 | struct ftrace_branch_data { | ||
88 | const char *func; | ||
89 | const char *file; | ||
90 | unsigned line; | ||
91 | union { | ||
92 | struct { | ||
93 | unsigned long correct; | ||
94 | unsigned long incorrect; | ||
95 | }; | ||
96 | struct { | ||
97 | unsigned long miss; | ||
98 | unsigned long hit; | ||
99 | }; | ||
100 | unsigned long miss_hit[2]; | ||
101 | }; | ||
102 | }; | ||
103 | |||
104 | struct ftrace_likely_data { | ||
105 | struct ftrace_branch_data data; | ||
106 | unsigned long constant; | ||
107 | }; | ||
108 | |||
109 | #endif /* __KERNEL__ */ | ||
110 | |||
111 | #endif /* __ASSEMBLY__ */ | ||
112 | |||
113 | #ifdef __KERNEL__ | ||
114 | /* | ||
115 | * Allow us to mark functions as 'deprecated' and have gcc emit a nice | ||
116 | * warning for each use, in hopes of speeding the functions removal. | ||
117 | * Usage is: | ||
118 | * int __deprecated foo(void) | ||
119 | */ | ||
120 | #ifndef __deprecated | ||
121 | # define __deprecated /* unimplemented */ | ||
122 | #endif | ||
123 | |||
124 | #ifdef MODULE | ||
125 | #define __deprecated_for_modules __deprecated | ||
126 | #else | ||
127 | #define __deprecated_for_modules | ||
128 | #endif | ||
129 | |||
130 | #ifndef __must_check | ||
131 | #define __must_check | ||
132 | #endif | ||
133 | |||
134 | #ifndef CONFIG_ENABLE_MUST_CHECK | ||
135 | #undef __must_check | ||
136 | #define __must_check | ||
137 | #endif | ||
138 | #ifndef CONFIG_ENABLE_WARN_DEPRECATED | ||
139 | #undef __deprecated | ||
140 | #undef __deprecated_for_modules | ||
141 | #define __deprecated | ||
142 | #define __deprecated_for_modules | ||
143 | #endif | ||
144 | |||
145 | #ifndef __malloc | ||
146 | #define __malloc | ||
147 | #endif | ||
148 | |||
149 | /* | ||
150 | * Allow us to avoid 'defined but not used' warnings on functions and data, | ||
151 | * as well as force them to be emitted to the assembly file. | ||
152 | * | ||
153 | * As of gcc 3.4, static functions that are not marked with attribute((used)) | ||
154 | * may be elided from the assembly file. As of gcc 3.4, static data not so | ||
155 | * marked will not be elided, but this may change in a future gcc version. | ||
156 | * | ||
157 | * NOTE: Because distributions shipped with a backported unit-at-a-time | ||
158 | * compiler in gcc 3.3, we must define __used to be __attribute__((used)) | ||
159 | * for gcc >=3.3 instead of 3.4. | ||
160 | * | ||
161 | * In prior versions of gcc, such functions and data would be emitted, but | ||
162 | * would be warned about except with attribute((unused)). | ||
163 | * | ||
164 | * Mark functions that are referenced only in inline assembly as __used so | ||
165 | * the code is emitted even though it appears to be unreferenced. | ||
166 | */ | ||
167 | #ifndef __used | ||
168 | # define __used /* unimplemented */ | ||
169 | #endif | ||
170 | |||
171 | #ifndef __maybe_unused | ||
172 | # define __maybe_unused /* unimplemented */ | ||
173 | #endif | ||
174 | |||
175 | #ifndef __always_unused | ||
176 | # define __always_unused /* unimplemented */ | ||
177 | #endif | ||
178 | |||
179 | #ifndef noinline | ||
180 | #define noinline | ||
181 | #endif | ||
182 | |||
183 | /* | ||
184 | * Rather then using noinline to prevent stack consumption, use | ||
185 | * noinline_for_stack instead. For documentation reasons. | ||
186 | */ | ||
187 | #define noinline_for_stack noinline | ||
188 | |||
189 | #ifndef __always_inline | ||
190 | #define __always_inline inline | ||
191 | #endif | ||
192 | |||
193 | #endif /* __KERNEL__ */ | ||
194 | |||
195 | /* | ||
196 | * From the GCC manual: | ||
197 | * | ||
198 | * Many functions do not examine any values except their arguments, | ||
199 | * and have no effects except the return value. Basically this is | ||
200 | * just slightly more strict class than the `pure' attribute above, | ||
201 | * since function is not allowed to read global memory. | ||
202 | * | ||
203 | * Note that a function that has pointer arguments and examines the | ||
204 | * data pointed to must _not_ be declared `const'. Likewise, a | ||
205 | * function that calls a non-`const' function usually must not be | ||
206 | * `const'. It does not make sense for a `const' function to return | ||
207 | * `void'. | ||
208 | */ | ||
209 | #ifndef __attribute_const__ | ||
210 | # define __attribute_const__ /* unimplemented */ | ||
211 | #endif | ||
212 | |||
213 | #ifndef __designated_init | ||
214 | # define __designated_init | ||
215 | #endif | ||
216 | |||
217 | #ifndef __latent_entropy | ||
218 | # define __latent_entropy | ||
219 | #endif | ||
220 | |||
221 | #ifndef __randomize_layout | ||
222 | # define __randomize_layout __designated_init | ||
223 | #endif | ||
224 | |||
225 | #ifndef __no_randomize_layout | ||
226 | # define __no_randomize_layout | ||
227 | #endif | ||
228 | |||
229 | #ifndef randomized_struct_fields_start | ||
230 | # define randomized_struct_fields_start | ||
231 | # define randomized_struct_fields_end | ||
232 | #endif | ||
233 | |||
234 | /* | ||
235 | * Tell gcc if a function is cold. The compiler will assume any path | ||
236 | * directly leading to the call is unlikely. | ||
237 | */ | ||
238 | |||
239 | #ifndef __cold | ||
240 | #define __cold | ||
241 | #endif | ||
242 | |||
243 | /* Simple shorthand for a section definition */ | ||
244 | #ifndef __section | ||
245 | # define __section(S) __attribute__ ((__section__(#S))) | ||
246 | #endif | ||
247 | |||
248 | #ifndef __visible | ||
249 | #define __visible | ||
250 | #endif | ||
251 | |||
252 | #ifndef __nostackprotector | ||
253 | # define __nostackprotector | ||
254 | #endif | ||
255 | |||
256 | /* | ||
257 | * Assume alignment of return value. | ||
258 | */ | ||
259 | #ifndef __assume_aligned | ||
260 | #define __assume_aligned(a, ...) | ||
261 | #endif | ||
262 | |||
263 | |||
264 | /* Are two types/vars the same type (ignoring qualifiers)? */ | ||
265 | #ifndef __same_type | ||
266 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | ||
267 | #endif | ||
268 | |||
269 | /* Is this type a native word size -- useful for atomic operations */ | ||
270 | #ifndef __native_word | ||
271 | # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) | ||
272 | #endif | ||
273 | |||
274 | #endif /* __LINUX_COMPILER_TYPES_H */ | ||
diff --git a/include/linux/completion.h b/include/linux/completion.h index 7828451e161a..0662a417febe 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -50,15 +50,23 @@ static inline void complete_release_commit(struct completion *x) | |||
50 | lock_commit_crosslock((struct lockdep_map *)&x->map); | 50 | lock_commit_crosslock((struct lockdep_map *)&x->map); |
51 | } | 51 | } |
52 | 52 | ||
53 | #define init_completion_map(x, m) \ | ||
54 | do { \ | ||
55 | lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ | ||
56 | (m)->name, (m)->key, 0); \ | ||
57 | __init_completion(x); \ | ||
58 | } while (0) | ||
59 | |||
53 | #define init_completion(x) \ | 60 | #define init_completion(x) \ |
54 | do { \ | 61 | do { \ |
55 | static struct lock_class_key __key; \ | 62 | static struct lock_class_key __key; \ |
56 | lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ | 63 | lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ |
57 | "(complete)" #x, \ | 64 | "(completion)" #x, \ |
58 | &__key, 0); \ | 65 | &__key, 0); \ |
59 | __init_completion(x); \ | 66 | __init_completion(x); \ |
60 | } while (0) | 67 | } while (0) |
61 | #else | 68 | #else |
69 | #define init_completion_map(x, m) __init_completion(x) | ||
62 | #define init_completion(x) __init_completion(x) | 70 | #define init_completion(x) __init_completion(x) |
63 | static inline void complete_acquire(struct completion *x) {} | 71 | static inline void complete_acquire(struct completion *x) {} |
64 | static inline void complete_release(struct completion *x) {} | 72 | static inline void complete_release(struct completion *x) {} |
@@ -68,12 +76,15 @@ static inline void complete_release_commit(struct completion *x) {} | |||
68 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | 76 | #ifdef CONFIG_LOCKDEP_COMPLETIONS |
69 | #define COMPLETION_INITIALIZER(work) \ | 77 | #define COMPLETION_INITIALIZER(work) \ |
70 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \ | 78 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \ |
71 | STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) } | 79 | STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) } |
72 | #else | 80 | #else |
73 | #define COMPLETION_INITIALIZER(work) \ | 81 | #define COMPLETION_INITIALIZER(work) \ |
74 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } | 82 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } |
75 | #endif | 83 | #endif |
76 | 84 | ||
85 | #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ | ||
86 | (*({ init_completion_map(&(work), &(map)); &(work); })) | ||
87 | |||
77 | #define COMPLETION_INITIALIZER_ONSTACK(work) \ | 88 | #define COMPLETION_INITIALIZER_ONSTACK(work) \ |
78 | (*({ init_completion(&work); &work; })) | 89 | (*({ init_completion(&work); &work; })) |
79 | 90 | ||
@@ -103,8 +114,11 @@ static inline void complete_release_commit(struct completion *x) {} | |||
103 | #ifdef CONFIG_LOCKDEP | 114 | #ifdef CONFIG_LOCKDEP |
104 | # define DECLARE_COMPLETION_ONSTACK(work) \ | 115 | # define DECLARE_COMPLETION_ONSTACK(work) \ |
105 | struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) | 116 | struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) |
117 | # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ | ||
118 | struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) | ||
106 | #else | 119 | #else |
107 | # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) | 120 | # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) |
121 | # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) | ||
108 | #endif | 122 | #endif |
109 | 123 | ||
110 | /** | 124 | /** |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f05a659cdf34..65cd8ab60b7a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -520,7 +520,7 @@ static inline struct inode *d_inode(const struct dentry *dentry) | |||
520 | } | 520 | } |
521 | 521 | ||
522 | /** | 522 | /** |
523 | * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE() | 523 | * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE() |
524 | * @dentry: The dentry to query | 524 | * @dentry: The dentry to query |
525 | * | 525 | * |
526 | * This is the helper normal filesystems should use to get at their own inodes | 526 | * This is the helper normal filesystems should use to get at their own inodes |
@@ -528,7 +528,7 @@ static inline struct inode *d_inode(const struct dentry *dentry) | |||
528 | */ | 528 | */ |
529 | static inline struct inode *d_inode_rcu(const struct dentry *dentry) | 529 | static inline struct inode *d_inode_rcu(const struct dentry *dentry) |
530 | { | 530 | { |
531 | return ACCESS_ONCE(dentry->d_inode); | 531 | return READ_ONCE(dentry->d_inode); |
532 | } | 532 | } |
533 | 533 | ||
534 | /** | 534 | /** |
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 34c0a5464c74..023eae69398c 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h | |||
@@ -89,7 +89,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count) | |||
89 | /* Returns how many objects can be queued, < 0 indicates over limit. */ | 89 | /* Returns how many objects can be queued, < 0 indicates over limit. */ |
90 | static inline int dql_avail(const struct dql *dql) | 90 | static inline int dql_avail(const struct dql *dql) |
91 | { | 91 | { |
92 | return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued); | 92 | return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued); |
93 | } | 93 | } |
94 | 94 | ||
95 | /* Record number of completed objects and recalculate the limit. */ | 95 | /* Record number of completed objects and recalculate the limit. */ |
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index b96dd4e1e663..ecc2928e8046 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h | |||
@@ -31,7 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq; | |||
31 | * @p: The pointer to read, prior to dereferencing | 31 | * @p: The pointer to read, prior to dereferencing |
32 | * | 32 | * |
33 | * Return the value of the specified RCU-protected pointer, but omit | 33 | * Return the value of the specified RCU-protected pointer, but omit |
34 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because | 34 | * both the smp_read_barrier_depends() and the READ_ONCE(), because |
35 | * caller holds genl mutex. | 35 | * caller holds genl mutex. |
36 | */ | 36 | */ |
37 | #define genl_dereference(p) \ | 37 | #define genl_dereference(p) \ |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 44790523057f..eaefb7a62f83 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -207,6 +207,7 @@ struct gendisk { | |||
207 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 207 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
208 | int node_id; | 208 | int node_id; |
209 | struct badblocks *bb; | 209 | struct badblocks *bb; |
210 | struct lockdep_map lockdep_map; | ||
210 | }; | 211 | }; |
211 | 212 | ||
212 | static inline struct gendisk *part_to_disk(struct hd_struct *part) | 213 | static inline struct gendisk *part_to_disk(struct hd_struct *part) |
@@ -591,8 +592,7 @@ extern void __delete_partition(struct percpu_ref *); | |||
591 | extern void delete_partition(struct gendisk *, int); | 592 | extern void delete_partition(struct gendisk *, int); |
592 | extern void printk_all_partitions(void); | 593 | extern void printk_all_partitions(void); |
593 | 594 | ||
594 | extern struct gendisk *alloc_disk_node(int minors, int node_id); | 595 | extern struct gendisk *__alloc_disk_node(int minors, int node_id); |
595 | extern struct gendisk *alloc_disk(int minors); | ||
596 | extern struct kobject *get_disk(struct gendisk *disk); | 596 | extern struct kobject *get_disk(struct gendisk *disk); |
597 | extern void put_disk(struct gendisk *disk); | 597 | extern void put_disk(struct gendisk *disk); |
598 | extern void blk_register_region(dev_t devt, unsigned long range, | 598 | extern void blk_register_region(dev_t devt, unsigned long range, |
@@ -616,6 +616,24 @@ extern ssize_t part_fail_store(struct device *dev, | |||
616 | const char *buf, size_t count); | 616 | const char *buf, size_t count); |
617 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ | 617 | #endif /* CONFIG_FAIL_MAKE_REQUEST */ |
618 | 618 | ||
619 | #define alloc_disk_node(minors, node_id) \ | ||
620 | ({ \ | ||
621 | static struct lock_class_key __key; \ | ||
622 | const char *__name; \ | ||
623 | struct gendisk *__disk; \ | ||
624 | \ | ||
625 | __name = "(gendisk_completion)"#minors"("#node_id")"; \ | ||
626 | \ | ||
627 | __disk = __alloc_disk_node(minors, node_id); \ | ||
628 | \ | ||
629 | if (__disk) \ | ||
630 | lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \ | ||
631 | \ | ||
632 | __disk; \ | ||
633 | }) | ||
634 | |||
635 | #define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) | ||
636 | |||
619 | static inline int hd_ref_init(struct hd_struct *part) | 637 | static inline int hd_ref_init(struct hd_struct *part) |
620 | { | 638 | { |
621 | if (percpu_ref_init(&part->ref, __delete_partition, 0, | 639 | if (percpu_ref_init(&part->ref, __delete_partition, 0, |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 87067d23a48b..a8a126259bc4 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -222,7 +222,7 @@ extern struct page *huge_zero_page; | |||
222 | 222 | ||
223 | static inline bool is_huge_zero_page(struct page *page) | 223 | static inline bool is_huge_zero_page(struct page *page) |
224 | { | 224 | { |
225 | return ACCESS_ONCE(huge_zero_page) == page; | 225 | return READ_ONCE(huge_zero_page) == page; |
226 | } | 226 | } |
227 | 227 | ||
228 | static inline bool is_huge_zero_pmd(pmd_t pmd) | 228 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 30294603526f..d95cae09dea0 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h | |||
@@ -247,7 +247,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team, | |||
247 | 247 | ||
248 | static inline int team_num_to_port_index(struct team *team, unsigned int num) | 248 | static inline int team_num_to_port_index(struct team *team, unsigned int num) |
249 | { | 249 | { |
250 | int en_port_count = ACCESS_ONCE(team->en_port_count); | 250 | int en_port_count = READ_ONCE(team->en_port_count); |
251 | 251 | ||
252 | if (unlikely(!en_port_count)) | 252 | if (unlikely(!en_port_count)) |
253 | return 0; | 253 | return 0; |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 3b7675bcca64..c7b368c734af 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -82,9 +82,9 @@ | |||
82 | 82 | ||
83 | extern bool static_key_initialized; | 83 | extern bool static_key_initialized; |
84 | 84 | ||
85 | #define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \ | 85 | #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \ |
86 | "%s used before call to jump_label_init", \ | 86 | "%s(): static key '%pS' used before call to jump_label_init()", \ |
87 | __func__) | 87 | __func__, (key)) |
88 | 88 | ||
89 | #ifdef HAVE_JUMP_LABEL | 89 | #ifdef HAVE_JUMP_LABEL |
90 | 90 | ||
@@ -212,13 +212,13 @@ static __always_inline bool static_key_true(struct static_key *key) | |||
212 | 212 | ||
213 | static inline void static_key_slow_inc(struct static_key *key) | 213 | static inline void static_key_slow_inc(struct static_key *key) |
214 | { | 214 | { |
215 | STATIC_KEY_CHECK_USE(); | 215 | STATIC_KEY_CHECK_USE(key); |
216 | atomic_inc(&key->enabled); | 216 | atomic_inc(&key->enabled); |
217 | } | 217 | } |
218 | 218 | ||
219 | static inline void static_key_slow_dec(struct static_key *key) | 219 | static inline void static_key_slow_dec(struct static_key *key) |
220 | { | 220 | { |
221 | STATIC_KEY_CHECK_USE(); | 221 | STATIC_KEY_CHECK_USE(key); |
222 | atomic_dec(&key->enabled); | 222 | atomic_dec(&key->enabled); |
223 | } | 223 | } |
224 | 224 | ||
@@ -237,7 +237,7 @@ static inline int jump_label_apply_nops(struct module *mod) | |||
237 | 237 | ||
238 | static inline void static_key_enable(struct static_key *key) | 238 | static inline void static_key_enable(struct static_key *key) |
239 | { | 239 | { |
240 | STATIC_KEY_CHECK_USE(); | 240 | STATIC_KEY_CHECK_USE(key); |
241 | 241 | ||
242 | if (atomic_read(&key->enabled) != 0) { | 242 | if (atomic_read(&key->enabled) != 0) { |
243 | WARN_ON_ONCE(atomic_read(&key->enabled) != 1); | 243 | WARN_ON_ONCE(atomic_read(&key->enabled) != 1); |
@@ -248,7 +248,7 @@ static inline void static_key_enable(struct static_key *key) | |||
248 | 248 | ||
249 | static inline void static_key_disable(struct static_key *key) | 249 | static inline void static_key_disable(struct static_key *key) |
250 | { | 250 | { |
251 | STATIC_KEY_CHECK_USE(); | 251 | STATIC_KEY_CHECK_USE(key); |
252 | 252 | ||
253 | if (atomic_read(&key->enabled) != 1) { | 253 | if (atomic_read(&key->enabled) != 1) { |
254 | WARN_ON_ONCE(atomic_read(&key->enabled) != 0); | 254 | WARN_ON_ONCE(atomic_read(&key->enabled) != 0); |
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index fc13ff289903..baa8eabbaa56 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h | |||
@@ -25,18 +25,18 @@ struct static_key_deferred { | |||
25 | }; | 25 | }; |
26 | static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) | 26 | static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) |
27 | { | 27 | { |
28 | STATIC_KEY_CHECK_USE(); | 28 | STATIC_KEY_CHECK_USE(key); |
29 | static_key_slow_dec(&key->key); | 29 | static_key_slow_dec(&key->key); |
30 | } | 30 | } |
31 | static inline void static_key_deferred_flush(struct static_key_deferred *key) | 31 | static inline void static_key_deferred_flush(struct static_key_deferred *key) |
32 | { | 32 | { |
33 | STATIC_KEY_CHECK_USE(); | 33 | STATIC_KEY_CHECK_USE(key); |
34 | } | 34 | } |
35 | static inline void | 35 | static inline void |
36 | jump_label_rate_limit(struct static_key_deferred *key, | 36 | jump_label_rate_limit(struct static_key_deferred *key, |
37 | unsigned long rl) | 37 | unsigned long rl) |
38 | { | 38 | { |
39 | STATIC_KEY_CHECK_USE(); | 39 | STATIC_KEY_CHECK_USE(key); |
40 | } | 40 | } |
41 | #endif /* HAVE_JUMP_LABEL */ | 41 | #endif /* HAVE_JUMP_LABEL */ |
42 | #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ | 42 | #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */ |
diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 2e6f90bd52aa..f68db9e450eb 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #ifndef _LINUX_LINKAGE_H | 2 | #ifndef _LINUX_LINKAGE_H |
3 | #define _LINUX_LINKAGE_H | 3 | #define _LINUX_LINKAGE_H |
4 | 4 | ||
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler_types.h> |
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include <linux/export.h> | 7 | #include <linux/export.h> |
8 | #include <asm/linkage.h> | 8 | #include <asm/linkage.h> |
diff --git a/include/linux/llist.h b/include/linux/llist.h index 1957635e6d5f..85abc2915e8d 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h | |||
@@ -198,7 +198,7 @@ static inline void init_llist_head(struct llist_head *list) | |||
198 | */ | 198 | */ |
199 | static inline bool llist_empty(const struct llist_head *head) | 199 | static inline bool llist_empty(const struct llist_head *head) |
200 | { | 200 | { |
201 | return ACCESS_ONCE(head->first) == NULL; | 201 | return READ_ONCE(head->first) == NULL; |
202 | } | 202 | } |
203 | 203 | ||
204 | static inline struct llist_node *llist_next(struct llist_node *node) | 204 | static inline struct llist_node *llist_next(struct llist_node *node) |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f301d31b473c..a842551fe044 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -528,6 +528,11 @@ static inline void lockdep_on(void) | |||
528 | */ | 528 | */ |
529 | struct lock_class_key { }; | 529 | struct lock_class_key { }; |
530 | 530 | ||
531 | /* | ||
532 | * The lockdep_map takes no space if lockdep is disabled: | ||
533 | */ | ||
534 | struct lockdep_map { }; | ||
535 | |||
531 | #define lockdep_depth(tsk) (0) | 536 | #define lockdep_depth(tsk) (0) |
532 | 537 | ||
533 | #define lockdep_is_held_type(l, r) (1) | 538 | #define lockdep_is_held_type(l, r) (1) |
@@ -720,9 +725,24 @@ do { \ | |||
720 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ | 725 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
721 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ | 726 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
722 | } while (0) | 727 | } while (0) |
728 | |||
729 | #define lockdep_assert_irqs_enabled() do { \ | ||
730 | WARN_ONCE(debug_locks && !current->lockdep_recursion && \ | ||
731 | !current->hardirqs_enabled, \ | ||
732 | "IRQs not enabled as expected\n"); \ | ||
733 | } while (0) | ||
734 | |||
735 | #define lockdep_assert_irqs_disabled() do { \ | ||
736 | WARN_ONCE(debug_locks && !current->lockdep_recursion && \ | ||
737 | current->hardirqs_enabled, \ | ||
738 | "IRQs not disabled as expected\n"); \ | ||
739 | } while (0) | ||
740 | |||
723 | #else | 741 | #else |
724 | # define might_lock(lock) do { } while (0) | 742 | # define might_lock(lock) do { } while (0) |
725 | # define might_lock_read(lock) do { } while (0) | 743 | # define might_lock_read(lock) do { } while (0) |
744 | # define lockdep_assert_irqs_enabled() do { } while (0) | ||
745 | # define lockdep_assert_irqs_disabled() do { } while (0) | ||
726 | #endif | 746 | #endif |
727 | 747 | ||
728 | #ifdef CONFIG_LOCKDEP | 748 | #ifdef CONFIG_LOCKDEP |
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 414a5e769fde..495ba4dd9da5 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h | |||
@@ -67,7 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id) | |||
67 | * @ss: The nfnetlink subsystem ID | 67 | * @ss: The nfnetlink subsystem ID |
68 | * | 68 | * |
69 | * Return the value of the specified RCU-protected pointer, but omit | 69 | * Return the value of the specified RCU-protected pointer, but omit |
70 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because | 70 | * both the smp_read_barrier_depends() and the READ_ONCE(), because |
71 | * caller holds the NFNL subsystem mutex. | 71 | * caller holds the NFNL subsystem mutex. |
72 | */ | 72 | */ |
73 | #define nfnl_dereference(p, ss) \ | 73 | #define nfnl_dereference(p, ss) \ |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 2efb08a60e63..f0fc4700b6ff 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) | |||
105 | 105 | ||
106 | static inline void pm_runtime_mark_last_busy(struct device *dev) | 106 | static inline void pm_runtime_mark_last_busy(struct device *dev) |
107 | { | 107 | { |
108 | ACCESS_ONCE(dev->power.last_busy) = jiffies; | 108 | WRITE_ONCE(dev->power.last_busy, jiffies); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline bool pm_runtime_is_irq_safe(struct device *dev) | 111 | static inline bool pm_runtime_is_irq_safe(struct device *dev) |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index c2cdd45a880a..127f534fec94 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -275,7 +275,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, | |||
275 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | 275 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
276 | */ | 276 | */ |
277 | #define list_entry_rcu(ptr, type, member) \ | 277 | #define list_entry_rcu(ptr, type, member) \ |
278 | container_of(lockless_dereference(ptr), type, member) | 278 | container_of(READ_ONCE(ptr), type, member) |
279 | 279 | ||
280 | /* | 280 | /* |
281 | * Where are list_empty_rcu() and list_first_entry_rcu()? | 281 | * Where are list_empty_rcu() and list_first_entry_rcu()? |
@@ -368,7 +368,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, | |||
368 | * example is when items are added to the list, but never deleted. | 368 | * example is when items are added to the list, but never deleted. |
369 | */ | 369 | */ |
370 | #define list_entry_lockless(ptr, type, member) \ | 370 | #define list_entry_lockless(ptr, type, member) \ |
371 | container_of((typeof(ptr))lockless_dereference(ptr), type, member) | 371 | container_of((typeof(ptr))READ_ONCE(ptr), type, member) |
372 | 372 | ||
373 | /** | 373 | /** |
374 | * list_for_each_entry_lockless - iterate over rcu list of given type | 374 | * list_for_each_entry_lockless - iterate over rcu list of given type |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1a9f70d44af9..a6ddc42f87a5 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
346 | #define __rcu_dereference_check(p, c, space) \ | 346 | #define __rcu_dereference_check(p, c, space) \ |
347 | ({ \ | 347 | ({ \ |
348 | /* Dependency order vs. p above. */ \ | 348 | /* Dependency order vs. p above. */ \ |
349 | typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ | 349 | typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ |
350 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ | 350 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ |
351 | rcu_dereference_sparse(p, space); \ | 351 | rcu_dereference_sparse(p, space); \ |
352 | ((typeof(*p) __force __kernel *)(________p1)); \ | 352 | ((typeof(*p) __force __kernel *)(________p1)); \ |
@@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
360 | #define rcu_dereference_raw(p) \ | 360 | #define rcu_dereference_raw(p) \ |
361 | ({ \ | 361 | ({ \ |
362 | /* Dependency order vs. p above. */ \ | 362 | /* Dependency order vs. p above. */ \ |
363 | typeof(p) ________p1 = lockless_dereference(p); \ | 363 | typeof(p) ________p1 = READ_ONCE(p); \ |
364 | ((typeof(*p) __force __kernel *)(________p1)); \ | 364 | ((typeof(*p) __force __kernel *)(________p1)); \ |
365 | }) | 365 | }) |
366 | 366 | ||
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index ff3dd2ec44b4..54bcd970bfd3 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -68,7 +68,7 @@ static inline bool lockdep_rtnl_is_held(void) | |||
68 | * @p: The pointer to read, prior to dereferencing | 68 | * @p: The pointer to read, prior to dereferencing |
69 | * | 69 | * |
70 | * Return the value of the specified RCU-protected pointer, but omit | 70 | * Return the value of the specified RCU-protected pointer, but omit |
71 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because | 71 | * both the smp_read_barrier_depends() and the READ_ONCE(), because |
72 | * caller holds RTNL. | 72 | * caller holds RTNL. |
73 | */ | 73 | */ |
74 | #define rtnl_dereference(p) \ | 74 | #define rtnl_dereference(p) \ |
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index bc2994ed66e1..3dcd617e65ae 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h | |||
@@ -38,6 +38,15 @@ do { \ | |||
38 | extern int do_raw_write_trylock(rwlock_t *lock); | 38 | extern int do_raw_write_trylock(rwlock_t *lock); |
39 | extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); | 39 | extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); |
40 | #else | 40 | #else |
41 | |||
42 | #ifndef arch_read_lock_flags | ||
43 | # define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
44 | #endif | ||
45 | |||
46 | #ifndef arch_write_lock_flags | ||
47 | # define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
48 | #endif | ||
49 | |||
41 | # define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) | 50 | # define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0) |
42 | # define do_raw_read_lock_flags(lock, flags) \ | 51 | # define do_raw_read_lock_flags(lock, flags) \ |
43 | do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) | 52 | do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0) |
@@ -50,9 +59,6 @@ do { \ | |||
50 | # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) | 59 | # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0) |
51 | #endif | 60 | #endif |
52 | 61 | ||
53 | #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) | ||
54 | #define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) | ||
55 | |||
56 | /* | 62 | /* |
57 | * Define the various rw_lock methods. Note we define these | 63 | * Define the various rw_lock methods. Note we define these |
58 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 64 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index 5b9b84b20407..86ebb4bf9c6e 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h | |||
@@ -211,7 +211,7 @@ static inline void __raw_write_lock(rwlock_t *lock) | |||
211 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | 211 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
212 | } | 212 | } |
213 | 213 | ||
214 | #endif /* CONFIG_PREEMPT */ | 214 | #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ |
215 | 215 | ||
216 | static inline void __raw_write_unlock(rwlock_t *lock) | 216 | static inline void __raw_write_unlock(rwlock_t *lock) |
217 | { | 217 | { |
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index dfa34d803439..56707d5ff6ad 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h | |||
@@ -112,6 +112,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) | |||
112 | * lock for reading | 112 | * lock for reading |
113 | */ | 113 | */ |
114 | extern void down_read(struct rw_semaphore *sem); | 114 | extern void down_read(struct rw_semaphore *sem); |
115 | extern int __must_check down_read_killable(struct rw_semaphore *sem); | ||
115 | 116 | ||
116 | /* | 117 | /* |
117 | * trylock for reading -- returns 1 if successful, 0 if contention | 118 | * trylock for reading -- returns 1 if successful, 0 if contention |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 341e1a12bfc7..a39186194cd6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -166,6 +166,10 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) | |||
166 | arch_spin_lock(&lock->raw_lock); | 166 | arch_spin_lock(&lock->raw_lock); |
167 | } | 167 | } |
168 | 168 | ||
169 | #ifndef arch_spin_lock_flags | ||
170 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
171 | #endif | ||
172 | |||
169 | static inline void | 173 | static inline void |
170 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) | 174 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
171 | { | 175 | { |
@@ -279,12 +283,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) | |||
279 | 1 : ({ local_irq_restore(flags); 0; }); \ | 283 | 1 : ({ local_irq_restore(flags); 0; }); \ |
280 | }) | 284 | }) |
281 | 285 | ||
282 | /** | ||
283 | * raw_spin_can_lock - would raw_spin_trylock() succeed? | ||
284 | * @lock: the spinlock in question. | ||
285 | */ | ||
286 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) | ||
287 | |||
288 | /* Include rwlock functions */ | 286 | /* Include rwlock functions */ |
289 | #include <linux/rwlock.h> | 287 | #include <linux/rwlock.h> |
290 | 288 | ||
@@ -397,11 +395,6 @@ static __always_inline int spin_is_contended(spinlock_t *lock) | |||
397 | return raw_spin_is_contended(&lock->rlock); | 395 | return raw_spin_is_contended(&lock->rlock); |
398 | } | 396 | } |
399 | 397 | ||
400 | static __always_inline int spin_can_lock(spinlock_t *lock) | ||
401 | { | ||
402 | return raw_spin_can_lock(&lock->rlock); | ||
403 | } | ||
404 | |||
405 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) | 398 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
406 | 399 | ||
407 | /* | 400 | /* |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 612fb530af41..0ac9112c1bbe 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -32,14 +32,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
32 | barrier(); | 32 | barrier(); |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline void | ||
36 | arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | ||
37 | { | ||
38 | local_irq_save(flags); | ||
39 | lock->slock = 0; | ||
40 | barrier(); | ||
41 | } | ||
42 | |||
43 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 35 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
44 | { | 36 | { |
45 | char oldval = lock->slock; | 37 | char oldval = lock->slock; |
@@ -77,7 +69,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
77 | 69 | ||
78 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) | 70 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |
79 | 71 | ||
80 | #define arch_read_can_lock(lock) (((void)(lock), 1)) | ||
81 | #define arch_write_can_lock(lock) (((void)(lock), 1)) | ||
82 | |||
83 | #endif /* __LINUX_SPINLOCK_UP_H */ | 72 | #endif /* __LINUX_SPINLOCK_UP_H */ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0eae11fc7a23..1cdabfb813ab 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -219,7 +219,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
219 | \ | 219 | \ |
220 | __init_work((_work), _onstack); \ | 220 | __init_work((_work), _onstack); \ |
221 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ | 221 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
222 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ | 222 | lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \ |
223 | INIT_LIST_HEAD(&(_work)->entry); \ | 223 | INIT_LIST_HEAD(&(_work)->entry); \ |
224 | (_work)->func = (_func); \ | 224 | (_work)->func = (_func); \ |
225 | } while (0) | 225 | } while (0) |
@@ -399,7 +399,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | |||
399 | static struct lock_class_key __key; \ | 399 | static struct lock_class_key __key; \ |
400 | const char *__lock_name; \ | 400 | const char *__lock_name; \ |
401 | \ | 401 | \ |
402 | __lock_name = #fmt#args; \ | 402 | __lock_name = "(wq_completion)"#fmt#args; \ |
403 | \ | 403 | \ |
404 | __alloc_workqueue_key((fmt), (flags), (max_active), \ | 404 | __alloc_workqueue_key((fmt), (flags), (max_active), \ |
405 | &__key, __lock_name, ##args); \ | 405 | &__key, __lock_name, ##args); \ |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 5d08c1950e7d..ff68cf288f9b 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -984,12 +984,12 @@ static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) | |||
984 | 984 | ||
985 | static inline int sysctl_sync_period(struct netns_ipvs *ipvs) | 985 | static inline int sysctl_sync_period(struct netns_ipvs *ipvs) |
986 | { | 986 | { |
987 | return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]); | 987 | return READ_ONCE(ipvs->sysctl_sync_threshold[1]); |
988 | } | 988 | } |
989 | 989 | ||
990 | static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) | 990 | static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) |
991 | { | 991 | { |
992 | return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period); | 992 | return READ_ONCE(ipvs->sysctl_sync_refresh_period); |
993 | } | 993 | } |
994 | 994 | ||
995 | static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) | 995 | static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) |
@@ -1014,7 +1014,7 @@ static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) | |||
1014 | 1014 | ||
1015 | static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) | 1015 | static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) |
1016 | { | 1016 | { |
1017 | return ACCESS_ONCE(ipvs->sysctl_sync_ports); | 1017 | return READ_ONCE(ipvs->sysctl_sync_ports); |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) | 1020 | static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 079c69cae2f6..470c1c71e7f4 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -1165,8 +1165,8 @@ static inline u8 nft_genmask_next(const struct net *net) | |||
1165 | 1165 | ||
1166 | static inline u8 nft_genmask_cur(const struct net *net) | 1166 | static inline u8 nft_genmask_cur(const struct net *net) |
1167 | { | 1167 | { |
1168 | /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */ | 1168 | /* Use READ_ONCE() to prevent refetching the value for atomicity */ |
1169 | return 1 << ACCESS_ONCE(net->nft.gencursor); | 1169 | return 1 << READ_ONCE(net->nft.gencursor); |
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | #define NFT_GENMASK_ANY ((1 << 0) | (1 << 1)) | 1172 | #define NFT_GENMASK_ANY ((1 << 0) | (1 << 1)) |
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index f65b92e0e1f9..ee8220f8dcf5 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
2 | #include <linux/compiler.h> | 2 | #include <linux/compiler_types.h> |
3 | 3 | ||
4 | #ifndef __always_inline | 4 | #ifndef __always_inline |
5 | #define __always_inline inline | 5 | #define __always_inline inline |
diff --git a/kernel/acct.c b/kernel/acct.c index 6670fbd3e466..d15c0ee4d955 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -147,7 +147,7 @@ static struct bsd_acct_struct *acct_get(struct pid_namespace *ns) | |||
147 | again: | 147 | again: |
148 | smp_rmb(); | 148 | smp_rmb(); |
149 | rcu_read_lock(); | 149 | rcu_read_lock(); |
150 | res = to_acct(ACCESS_ONCE(ns->bacct)); | 150 | res = to_acct(READ_ONCE(ns->bacct)); |
151 | if (!res) { | 151 | if (!res) { |
152 | rcu_read_unlock(); | 152 | rcu_read_unlock(); |
153 | return NULL; | 153 | return NULL; |
@@ -159,7 +159,7 @@ again: | |||
159 | } | 159 | } |
160 | rcu_read_unlock(); | 160 | rcu_read_unlock(); |
161 | mutex_lock(&res->lock); | 161 | mutex_lock(&res->lock); |
162 | if (res != to_acct(ACCESS_ONCE(ns->bacct))) { | 162 | if (res != to_acct(READ_ONCE(ns->bacct))) { |
163 | mutex_unlock(&res->lock); | 163 | mutex_unlock(&res->lock); |
164 | acct_put(res); | 164 | acct_put(res); |
165 | goto again; | 165 | goto again; |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 10cdb9c26b5d..c298847d4b85 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -209,7 +209,7 @@ static int event_function(void *info) | |||
209 | struct perf_event_context *task_ctx = cpuctx->task_ctx; | 209 | struct perf_event_context *task_ctx = cpuctx->task_ctx; |
210 | int ret = 0; | 210 | int ret = 0; |
211 | 211 | ||
212 | WARN_ON_ONCE(!irqs_disabled()); | 212 | lockdep_assert_irqs_disabled(); |
213 | 213 | ||
214 | perf_ctx_lock(cpuctx, task_ctx); | 214 | perf_ctx_lock(cpuctx, task_ctx); |
215 | /* | 215 | /* |
@@ -306,7 +306,7 @@ static void event_function_local(struct perf_event *event, event_f func, void *d | |||
306 | struct task_struct *task = READ_ONCE(ctx->task); | 306 | struct task_struct *task = READ_ONCE(ctx->task); |
307 | struct perf_event_context *task_ctx = NULL; | 307 | struct perf_event_context *task_ctx = NULL; |
308 | 308 | ||
309 | WARN_ON_ONCE(!irqs_disabled()); | 309 | lockdep_assert_irqs_disabled(); |
310 | 310 | ||
311 | if (task) { | 311 | if (task) { |
312 | if (task == TASK_TOMBSTONE) | 312 | if (task == TASK_TOMBSTONE) |
@@ -1006,7 +1006,7 @@ static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) | |||
1006 | struct perf_cpu_context *cpuctx; | 1006 | struct perf_cpu_context *cpuctx; |
1007 | int rotations = 0; | 1007 | int rotations = 0; |
1008 | 1008 | ||
1009 | WARN_ON(!irqs_disabled()); | 1009 | lockdep_assert_irqs_disabled(); |
1010 | 1010 | ||
1011 | cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); | 1011 | cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); |
1012 | rotations = perf_rotate_context(cpuctx); | 1012 | rotations = perf_rotate_context(cpuctx); |
@@ -1093,7 +1093,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx) | |||
1093 | { | 1093 | { |
1094 | struct list_head *head = this_cpu_ptr(&active_ctx_list); | 1094 | struct list_head *head = this_cpu_ptr(&active_ctx_list); |
1095 | 1095 | ||
1096 | WARN_ON(!irqs_disabled()); | 1096 | lockdep_assert_irqs_disabled(); |
1097 | 1097 | ||
1098 | WARN_ON(!list_empty(&ctx->active_ctx_list)); | 1098 | WARN_ON(!list_empty(&ctx->active_ctx_list)); |
1099 | 1099 | ||
@@ -1102,7 +1102,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx) | |||
1102 | 1102 | ||
1103 | static void perf_event_ctx_deactivate(struct perf_event_context *ctx) | 1103 | static void perf_event_ctx_deactivate(struct perf_event_context *ctx) |
1104 | { | 1104 | { |
1105 | WARN_ON(!irqs_disabled()); | 1105 | lockdep_assert_irqs_disabled(); |
1106 | 1106 | ||
1107 | WARN_ON(list_empty(&ctx->active_ctx_list)); | 1107 | WARN_ON(list_empty(&ctx->active_ctx_list)); |
1108 | 1108 | ||
@@ -1202,7 +1202,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting) | |||
1202 | 1202 | ||
1203 | again: | 1203 | again: |
1204 | rcu_read_lock(); | 1204 | rcu_read_lock(); |
1205 | ctx = ACCESS_ONCE(event->ctx); | 1205 | ctx = READ_ONCE(event->ctx); |
1206 | if (!atomic_inc_not_zero(&ctx->refcount)) { | 1206 | if (!atomic_inc_not_zero(&ctx->refcount)) { |
1207 | rcu_read_unlock(); | 1207 | rcu_read_unlock(); |
1208 | goto again; | 1208 | goto again; |
@@ -3523,7 +3523,7 @@ void perf_event_task_tick(void) | |||
3523 | struct perf_event_context *ctx, *tmp; | 3523 | struct perf_event_context *ctx, *tmp; |
3524 | int throttled; | 3524 | int throttled; |
3525 | 3525 | ||
3526 | WARN_ON(!irqs_disabled()); | 3526 | lockdep_assert_irqs_disabled(); |
3527 | 3527 | ||
3528 | __this_cpu_inc(perf_throttled_seq); | 3528 | __this_cpu_inc(perf_throttled_seq); |
3529 | throttled = __this_cpu_xchg(perf_throttled_count, 0); | 3529 | throttled = __this_cpu_xchg(perf_throttled_count, 0); |
@@ -4233,7 +4233,7 @@ static void perf_remove_from_owner(struct perf_event *event) | |||
4233 | * indeed free this event, otherwise we need to serialize on | 4233 | * indeed free this event, otherwise we need to serialize on |
4234 | * owner->perf_event_mutex. | 4234 | * owner->perf_event_mutex. |
4235 | */ | 4235 | */ |
4236 | owner = lockless_dereference(event->owner); | 4236 | owner = READ_ONCE(event->owner); |
4237 | if (owner) { | 4237 | if (owner) { |
4238 | /* | 4238 | /* |
4239 | * Since delayed_put_task_struct() also drops the last | 4239 | * Since delayed_put_task_struct() also drops the last |
@@ -4330,7 +4330,7 @@ again: | |||
4330 | * Cannot change, child events are not migrated, see the | 4330 | * Cannot change, child events are not migrated, see the |
4331 | * comment with perf_event_ctx_lock_nested(). | 4331 | * comment with perf_event_ctx_lock_nested(). |
4332 | */ | 4332 | */ |
4333 | ctx = lockless_dereference(child->ctx); | 4333 | ctx = READ_ONCE(child->ctx); |
4334 | /* | 4334 | /* |
4335 | * Since child_mutex nests inside ctx::mutex, we must jump | 4335 | * Since child_mutex nests inside ctx::mutex, we must jump |
4336 | * through hoops. We start by grabbing a reference on the ctx. | 4336 | * through hoops. We start by grabbing a reference on the ctx. |
@@ -5304,8 +5304,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
5304 | if (!rb) | 5304 | if (!rb) |
5305 | goto aux_unlock; | 5305 | goto aux_unlock; |
5306 | 5306 | ||
5307 | aux_offset = ACCESS_ONCE(rb->user_page->aux_offset); | 5307 | aux_offset = READ_ONCE(rb->user_page->aux_offset); |
5308 | aux_size = ACCESS_ONCE(rb->user_page->aux_size); | 5308 | aux_size = READ_ONCE(rb->user_page->aux_size); |
5309 | 5309 | ||
5310 | if (aux_offset < perf_data_size(rb) + PAGE_SIZE) | 5310 | if (aux_offset < perf_data_size(rb) + PAGE_SIZE) |
5311 | goto aux_unlock; | 5311 | goto aux_unlock; |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index f684d8e5fa2b..f3e37971c842 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -381,7 +381,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, | |||
381 | * (B) <-> (C) ordering is still observed by the pmu driver. | 381 | * (B) <-> (C) ordering is still observed by the pmu driver. |
382 | */ | 382 | */ |
383 | if (!rb->aux_overwrite) { | 383 | if (!rb->aux_overwrite) { |
384 | aux_tail = ACCESS_ONCE(rb->user_page->aux_tail); | 384 | aux_tail = READ_ONCE(rb->user_page->aux_tail); |
385 | handle->wakeup = rb->aux_wakeup + rb->aux_watermark; | 385 | handle->wakeup = rb->aux_wakeup + rb->aux_watermark; |
386 | if (aux_head - aux_tail < perf_aux_size(rb)) | 386 | if (aux_head - aux_tail < perf_aux_size(rb)) |
387 | handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); | 387 | handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb)); |
diff --git a/kernel/exit.c b/kernel/exit.c index f6cad39f35df..6b4298a41167 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1339,7 +1339,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
1339 | * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition | 1339 | * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition |
1340 | * can't confuse the checks below. | 1340 | * can't confuse the checks below. |
1341 | */ | 1341 | */ |
1342 | int exit_state = ACCESS_ONCE(p->exit_state); | 1342 | int exit_state = READ_ONCE(p->exit_state); |
1343 | int ret; | 1343 | int ret; |
1344 | 1344 | ||
1345 | if (unlikely(exit_state == EXIT_DEAD)) | 1345 | if (unlikely(exit_state == EXIT_DEAD)) |
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c index c8c1d073fbf1..e0923fa4927a 100644 --- a/kernel/irq/timings.c +++ b/kernel/irq/timings.c | |||
@@ -264,7 +264,7 @@ u64 irq_timings_next_event(u64 now) | |||
264 | * order to prevent the timings circular buffer to be updated | 264 | * order to prevent the timings circular buffer to be updated |
265 | * while we are reading it. | 265 | * while we are reading it. |
266 | */ | 266 | */ |
267 | WARN_ON_ONCE(!irqs_disabled()); | 267 | lockdep_assert_irqs_disabled(); |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * Number of elements in the circular buffer: If it happens it | 270 | * Number of elements in the circular buffer: If it happens it |
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 9f20f6c72579..ec8ac337404d 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -191,7 +191,7 @@ void irq_work_tick(void) | |||
191 | */ | 191 | */ |
192 | void irq_work_sync(struct irq_work *work) | 192 | void irq_work_sync(struct irq_work *work) |
193 | { | 193 | { |
194 | WARN_ON_ONCE(irqs_disabled()); | 194 | lockdep_assert_irqs_enabled(); |
195 | 195 | ||
196 | while (work->flags & IRQ_WORK_BUSY) | 196 | while (work->flags & IRQ_WORK_BUSY) |
197 | cpu_relax(); | 197 | cpu_relax(); |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 0bf2e8f5244a..8ff4ca4665ff 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -83,7 +83,7 @@ static void static_key_slow_inc_cpuslocked(struct static_key *key) | |||
83 | { | 83 | { |
84 | int v, v1; | 84 | int v, v1; |
85 | 85 | ||
86 | STATIC_KEY_CHECK_USE(); | 86 | STATIC_KEY_CHECK_USE(key); |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * Careful if we get concurrent static_key_slow_inc() calls; | 89 | * Careful if we get concurrent static_key_slow_inc() calls; |
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); | |||
128 | 128 | ||
129 | void static_key_enable_cpuslocked(struct static_key *key) | 129 | void static_key_enable_cpuslocked(struct static_key *key) |
130 | { | 130 | { |
131 | STATIC_KEY_CHECK_USE(); | 131 | STATIC_KEY_CHECK_USE(key); |
132 | 132 | ||
133 | if (atomic_read(&key->enabled) > 0) { | 133 | if (atomic_read(&key->enabled) > 0) { |
134 | WARN_ON_ONCE(atomic_read(&key->enabled) != 1); | 134 | WARN_ON_ONCE(atomic_read(&key->enabled) != 1); |
@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(static_key_enable); | |||
158 | 158 | ||
159 | void static_key_disable_cpuslocked(struct static_key *key) | 159 | void static_key_disable_cpuslocked(struct static_key *key) |
160 | { | 160 | { |
161 | STATIC_KEY_CHECK_USE(); | 161 | STATIC_KEY_CHECK_USE(key); |
162 | 162 | ||
163 | if (atomic_read(&key->enabled) != 1) { | 163 | if (atomic_read(&key->enabled) != 1) { |
164 | WARN_ON_ONCE(atomic_read(&key->enabled) != 0); | 164 | WARN_ON_ONCE(atomic_read(&key->enabled) != 0); |
@@ -224,21 +224,21 @@ static void jump_label_update_timeout(struct work_struct *work) | |||
224 | 224 | ||
225 | void static_key_slow_dec(struct static_key *key) | 225 | void static_key_slow_dec(struct static_key *key) |
226 | { | 226 | { |
227 | STATIC_KEY_CHECK_USE(); | 227 | STATIC_KEY_CHECK_USE(key); |
228 | __static_key_slow_dec(key, 0, NULL); | 228 | __static_key_slow_dec(key, 0, NULL); |
229 | } | 229 | } |
230 | EXPORT_SYMBOL_GPL(static_key_slow_dec); | 230 | EXPORT_SYMBOL_GPL(static_key_slow_dec); |
231 | 231 | ||
232 | void static_key_slow_dec_deferred(struct static_key_deferred *key) | 232 | void static_key_slow_dec_deferred(struct static_key_deferred *key) |
233 | { | 233 | { |
234 | STATIC_KEY_CHECK_USE(); | 234 | STATIC_KEY_CHECK_USE(key); |
235 | __static_key_slow_dec(&key->key, key->timeout, &key->work); | 235 | __static_key_slow_dec(&key->key, key->timeout, &key->work); |
236 | } | 236 | } |
237 | EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); | 237 | EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); |
238 | 238 | ||
239 | void static_key_deferred_flush(struct static_key_deferred *key) | 239 | void static_key_deferred_flush(struct static_key_deferred *key) |
240 | { | 240 | { |
241 | STATIC_KEY_CHECK_USE(); | 241 | STATIC_KEY_CHECK_USE(key); |
242 | flush_delayed_work(&key->work); | 242 | flush_delayed_work(&key->work); |
243 | } | 243 | } |
244 | EXPORT_SYMBOL_GPL(static_key_deferred_flush); | 244 | EXPORT_SYMBOL_GPL(static_key_deferred_flush); |
@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(static_key_deferred_flush); | |||
246 | void jump_label_rate_limit(struct static_key_deferred *key, | 246 | void jump_label_rate_limit(struct static_key_deferred *key, |
247 | unsigned long rl) | 247 | unsigned long rl) |
248 | { | 248 | { |
249 | STATIC_KEY_CHECK_USE(); | 249 | STATIC_KEY_CHECK_USE(key); |
250 | key->timeout = rl; | 250 | key->timeout = rl; |
251 | INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); | 251 | INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); |
252 | } | 252 | } |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index e36e652d996f..db933d063bfc 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -76,6 +76,19 @@ module_param(lock_stat, int, 0644); | |||
76 | #define lock_stat 0 | 76 | #define lock_stat 0 |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK | ||
80 | static int crossrelease_fullstack = 1; | ||
81 | #else | ||
82 | static int crossrelease_fullstack; | ||
83 | #endif | ||
84 | static int __init allow_crossrelease_fullstack(char *str) | ||
85 | { | ||
86 | crossrelease_fullstack = 1; | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | early_param("crossrelease_fullstack", allow_crossrelease_fullstack); | ||
91 | |||
79 | /* | 92 | /* |
80 | * lockdep_lock: protects the lockdep graph, the hashes and the | 93 | * lockdep_lock: protects the lockdep graph, the hashes and the |
81 | * class/list/hash allocators. | 94 | * class/list/hash allocators. |
@@ -4863,8 +4876,14 @@ static void add_xhlock(struct held_lock *hlock) | |||
4863 | xhlock->trace.nr_entries = 0; | 4876 | xhlock->trace.nr_entries = 0; |
4864 | xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES; | 4877 | xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES; |
4865 | xhlock->trace.entries = xhlock->trace_entries; | 4878 | xhlock->trace.entries = xhlock->trace_entries; |
4866 | xhlock->trace.skip = 3; | 4879 | |
4867 | save_stack_trace(&xhlock->trace); | 4880 | if (crossrelease_fullstack) { |
4881 | xhlock->trace.skip = 3; | ||
4882 | save_stack_trace(&xhlock->trace); | ||
4883 | } else { | ||
4884 | xhlock->trace.nr_entries = 1; | ||
4885 | xhlock->trace.entries[0] = hlock->acquire_ip; | ||
4886 | } | ||
4868 | } | 4887 | } |
4869 | 4888 | ||
4870 | static inline int same_context_xhlock(struct hist_lock *xhlock) | 4889 | static inline int same_context_xhlock(struct hist_lock *xhlock) |
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index 2655f26ec882..c7471c3fb798 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c | |||
@@ -23,49 +23,11 @@ | |||
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <asm/qrwlock.h> | 24 | #include <asm/qrwlock.h> |
25 | 25 | ||
26 | /* | ||
27 | * This internal data structure is used for optimizing access to some of | ||
28 | * the subfields within the atomic_t cnts. | ||
29 | */ | ||
30 | struct __qrwlock { | ||
31 | union { | ||
32 | atomic_t cnts; | ||
33 | struct { | ||
34 | #ifdef __LITTLE_ENDIAN | ||
35 | u8 wmode; /* Writer mode */ | ||
36 | u8 rcnts[3]; /* Reader counts */ | ||
37 | #else | ||
38 | u8 rcnts[3]; /* Reader counts */ | ||
39 | u8 wmode; /* Writer mode */ | ||
40 | #endif | ||
41 | }; | ||
42 | }; | ||
43 | arch_spinlock_t lock; | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * rspin_until_writer_unlock - inc reader count & spin until writer is gone | ||
48 | * @lock : Pointer to queue rwlock structure | ||
49 | * @writer: Current queue rwlock writer status byte | ||
50 | * | ||
51 | * In interrupt context or at the head of the queue, the reader will just | ||
52 | * increment the reader count & wait until the writer releases the lock. | ||
53 | */ | ||
54 | static __always_inline void | ||
55 | rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) | ||
56 | { | ||
57 | while ((cnts & _QW_WMASK) == _QW_LOCKED) { | ||
58 | cpu_relax(); | ||
59 | cnts = atomic_read_acquire(&lock->cnts); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | /** | 26 | /** |
64 | * queued_read_lock_slowpath - acquire read lock of a queue rwlock | 27 | * queued_read_lock_slowpath - acquire read lock of a queue rwlock |
65 | * @lock: Pointer to queue rwlock structure | 28 | * @lock: Pointer to queue rwlock structure |
66 | * @cnts: Current qrwlock lock value | ||
67 | */ | 29 | */ |
68 | void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) | 30 | void queued_read_lock_slowpath(struct qrwlock *lock) |
69 | { | 31 | { |
70 | /* | 32 | /* |
71 | * Readers come here when they cannot get the lock without waiting | 33 | * Readers come here when they cannot get the lock without waiting |
@@ -73,13 +35,11 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) | |||
73 | if (unlikely(in_interrupt())) { | 35 | if (unlikely(in_interrupt())) { |
74 | /* | 36 | /* |
75 | * Readers in interrupt context will get the lock immediately | 37 | * Readers in interrupt context will get the lock immediately |
76 | * if the writer is just waiting (not holding the lock yet). | 38 | * if the writer is just waiting (not holding the lock yet), |
77 | * The rspin_until_writer_unlock() function returns immediately | 39 | * so spin with ACQUIRE semantics until the lock is available |
78 | * in this case. Otherwise, they will spin (with ACQUIRE | 40 | * without waiting in the queue. |
79 | * semantics) until the lock is available without waiting in | ||
80 | * the queue. | ||
81 | */ | 41 | */ |
82 | rspin_until_writer_unlock(lock, cnts); | 42 | atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); |
83 | return; | 43 | return; |
84 | } | 44 | } |
85 | atomic_sub(_QR_BIAS, &lock->cnts); | 45 | atomic_sub(_QR_BIAS, &lock->cnts); |
@@ -88,14 +48,14 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) | |||
88 | * Put the reader into the wait queue | 48 | * Put the reader into the wait queue |
89 | */ | 49 | */ |
90 | arch_spin_lock(&lock->wait_lock); | 50 | arch_spin_lock(&lock->wait_lock); |
51 | atomic_add(_QR_BIAS, &lock->cnts); | ||
91 | 52 | ||
92 | /* | 53 | /* |
93 | * The ACQUIRE semantics of the following spinning code ensure | 54 | * The ACQUIRE semantics of the following spinning code ensure |
94 | * that accesses can't leak upwards out of our subsequent critical | 55 | * that accesses can't leak upwards out of our subsequent critical |
95 | * section in the case that the lock is currently held for write. | 56 | * section in the case that the lock is currently held for write. |
96 | */ | 57 | */ |
97 | cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts); | 58 | atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); |
98 | rspin_until_writer_unlock(lock, cnts); | ||
99 | 59 | ||
100 | /* | 60 | /* |
101 | * Signal the next one in queue to become queue head | 61 | * Signal the next one in queue to become queue head |
@@ -110,8 +70,6 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); | |||
110 | */ | 70 | */ |
111 | void queued_write_lock_slowpath(struct qrwlock *lock) | 71 | void queued_write_lock_slowpath(struct qrwlock *lock) |
112 | { | 72 | { |
113 | u32 cnts; | ||
114 | |||
115 | /* Put the writer into the wait queue */ | 73 | /* Put the writer into the wait queue */ |
116 | arch_spin_lock(&lock->wait_lock); | 74 | arch_spin_lock(&lock->wait_lock); |
117 | 75 | ||
@@ -120,30 +78,14 @@ void queued_write_lock_slowpath(struct qrwlock *lock) | |||
120 | (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)) | 78 | (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)) |
121 | goto unlock; | 79 | goto unlock; |
122 | 80 | ||
123 | /* | 81 | /* Set the waiting flag to notify readers that a writer is pending */ |
124 | * Set the waiting flag to notify readers that a writer is pending, | 82 | atomic_add(_QW_WAITING, &lock->cnts); |
125 | * or wait for a previous writer to go away. | ||
126 | */ | ||
127 | for (;;) { | ||
128 | struct __qrwlock *l = (struct __qrwlock *)lock; | ||
129 | |||
130 | if (!READ_ONCE(l->wmode) && | ||
131 | (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0)) | ||
132 | break; | ||
133 | 83 | ||
134 | cpu_relax(); | 84 | /* When no more readers or writers, set the locked flag */ |
135 | } | 85 | do { |
136 | 86 | atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); | |
137 | /* When no more readers, set the locked flag */ | 87 | } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, |
138 | for (;;) { | 88 | _QW_LOCKED) != _QW_WAITING); |
139 | cnts = atomic_read(&lock->cnts); | ||
140 | if ((cnts == _QW_WAITING) && | ||
141 | (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING, | ||
142 | _QW_LOCKED) == _QW_WAITING)) | ||
143 | break; | ||
144 | |||
145 | cpu_relax(); | ||
146 | } | ||
147 | unlock: | 89 | unlock: |
148 | arch_spin_unlock(&lock->wait_lock); | 90 | arch_spin_unlock(&lock->wait_lock); |
149 | } | 91 | } |
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 15b6a39366c6..6ee477765e6c 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h | |||
@@ -61,21 +61,50 @@ struct pv_node { | |||
61 | #include "qspinlock_stat.h" | 61 | #include "qspinlock_stat.h" |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Hybrid PV queued/unfair lock | ||
65 | * | ||
64 | * By replacing the regular queued_spin_trylock() with the function below, | 66 | * By replacing the regular queued_spin_trylock() with the function below, |
65 | * it will be called once when a lock waiter enter the PV slowpath before | 67 | * it will be called once when a lock waiter enter the PV slowpath before |
66 | * being queued. By allowing one lock stealing attempt here when the pending | 68 | * being queued. |
67 | * bit is off, it helps to reduce the performance impact of lock waiter | 69 | * |
68 | * preemption without the drawback of lock starvation. | 70 | * The pending bit is set by the queue head vCPU of the MCS wait queue in |
71 | * pv_wait_head_or_lock() to signal that it is ready to spin on the lock. | ||
72 | * When that bit becomes visible to the incoming waiters, no lock stealing | ||
73 | * is allowed. The function will return immediately to make the waiters | ||
74 | * enter the MCS wait queue. So lock starvation shouldn't happen as long | ||
75 | * as the queued mode vCPUs are actively running to set the pending bit | ||
76 | * and hence disabling lock stealing. | ||
77 | * | ||
78 | * When the pending bit isn't set, the lock waiters will stay in the unfair | ||
79 | * mode spinning on the lock unless the MCS wait queue is empty. In this | ||
80 | * case, the lock waiters will enter the queued mode slowpath trying to | ||
81 | * become the queue head and set the pending bit. | ||
82 | * | ||
83 | * This hybrid PV queued/unfair lock combines the best attributes of a | ||
84 | * queued lock (no lock starvation) and an unfair lock (good performance | ||
85 | * on not heavily contended locks). | ||
69 | */ | 86 | */ |
70 | #define queued_spin_trylock(l) pv_queued_spin_steal_lock(l) | 87 | #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) |
71 | static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock) | 88 | static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) |
72 | { | 89 | { |
73 | struct __qspinlock *l = (void *)lock; | 90 | struct __qspinlock *l = (void *)lock; |
74 | 91 | ||
75 | if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) && | 92 | /* |
76 | (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) { | 93 | * Stay in unfair lock mode as long as queued mode waiters are |
77 | qstat_inc(qstat_pv_lock_stealing, true); | 94 | * present in the MCS wait queue but the pending bit isn't set. |
78 | return true; | 95 | */ |
96 | for (;;) { | ||
97 | int val = atomic_read(&lock->val); | ||
98 | |||
99 | if (!(val & _Q_LOCKED_PENDING_MASK) && | ||
100 | (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) { | ||
101 | qstat_inc(qstat_pv_lock_stealing, true); | ||
102 | return true; | ||
103 | } | ||
104 | if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK)) | ||
105 | break; | ||
106 | |||
107 | cpu_relax(); | ||
79 | } | 108 | } |
80 | 109 | ||
81 | return false; | 110 | return false; |
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index a6c76a4832b4..f549c552dbf1 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c | |||
@@ -29,6 +29,22 @@ void __sched down_read(struct rw_semaphore *sem) | |||
29 | 29 | ||
30 | EXPORT_SYMBOL(down_read); | 30 | EXPORT_SYMBOL(down_read); |
31 | 31 | ||
32 | int __sched down_read_killable(struct rw_semaphore *sem) | ||
33 | { | ||
34 | might_sleep(); | ||
35 | rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); | ||
36 | |||
37 | if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { | ||
38 | rwsem_release(&sem->dep_map, 1, _RET_IP_); | ||
39 | return -EINTR; | ||
40 | } | ||
41 | |||
42 | rwsem_set_reader_owned(sem); | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | EXPORT_SYMBOL(down_read_killable); | ||
47 | |||
32 | /* | 48 | /* |
33 | * trylock for reading -- returns 1 if successful, 0 if contention | 49 | * trylock for reading -- returns 1 if successful, 0 if contention |
34 | */ | 50 | */ |
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 6e40fdfba326..1fd1a7543cdd 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c | |||
@@ -30,11 +30,10 @@ | |||
30 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 30 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
31 | /* | 31 | /* |
32 | * The __lock_function inlines are taken from | 32 | * The __lock_function inlines are taken from |
33 | * include/linux/spinlock_api_smp.h | 33 | * spinlock : include/linux/spinlock_api_smp.h |
34 | * rwlock : include/linux/rwlock_api_smp.h | ||
34 | */ | 35 | */ |
35 | #else | 36 | #else |
36 | #define raw_read_can_lock(l) read_can_lock(l) | ||
37 | #define raw_write_can_lock(l) write_can_lock(l) | ||
38 | 37 | ||
39 | /* | 38 | /* |
40 | * Some architectures can relax in favour of the CPU owning the lock. | 39 | * Some architectures can relax in favour of the CPU owning the lock. |
@@ -69,7 +68,7 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ | |||
69 | \ | 68 | \ |
70 | if (!(lock)->break_lock) \ | 69 | if (!(lock)->break_lock) \ |
71 | (lock)->break_lock = 1; \ | 70 | (lock)->break_lock = 1; \ |
72 | while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ | 71 | while ((lock)->break_lock) \ |
73 | arch_##op##_relax(&lock->raw_lock); \ | 72 | arch_##op##_relax(&lock->raw_lock); \ |
74 | } \ | 73 | } \ |
75 | (lock)->break_lock = 0; \ | 74 | (lock)->break_lock = 0; \ |
@@ -89,7 +88,7 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ | |||
89 | \ | 88 | \ |
90 | if (!(lock)->break_lock) \ | 89 | if (!(lock)->break_lock) \ |
91 | (lock)->break_lock = 1; \ | 90 | (lock)->break_lock = 1; \ |
92 | while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ | 91 | while ((lock)->break_lock) \ |
93 | arch_##op##_relax(&lock->raw_lock); \ | 92 | arch_##op##_relax(&lock->raw_lock); \ |
94 | } \ | 93 | } \ |
95 | (lock)->break_lock = 0; \ | 94 | (lock)->break_lock = 0; \ |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e4fe06d42385..f9c0ca2ccf0c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -734,7 +734,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp) | |||
734 | int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; | 734 | int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; |
735 | int *fp = &rnp->need_future_gp[idx]; | 735 | int *fp = &rnp->need_future_gp[idx]; |
736 | 736 | ||
737 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!"); | 737 | lockdep_assert_irqs_disabled(); |
738 | return READ_ONCE(*fp); | 738 | return READ_ONCE(*fp); |
739 | } | 739 | } |
740 | 740 | ||
@@ -746,7 +746,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp) | |||
746 | static bool | 746 | static bool |
747 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | 747 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) |
748 | { | 748 | { |
749 | RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!"); | 749 | lockdep_assert_irqs_disabled(); |
750 | if (rcu_gp_in_progress(rsp)) | 750 | if (rcu_gp_in_progress(rsp)) |
751 | return false; /* No, a grace period is already in progress. */ | 751 | return false; /* No, a grace period is already in progress. */ |
752 | if (rcu_future_needs_gp(rsp)) | 752 | if (rcu_future_needs_gp(rsp)) |
@@ -773,7 +773,7 @@ static void rcu_eqs_enter_common(bool user) | |||
773 | struct rcu_data *rdp; | 773 | struct rcu_data *rdp; |
774 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 774 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
775 | 775 | ||
776 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!"); | 776 | lockdep_assert_irqs_disabled(); |
777 | trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); | 777 | trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0); |
778 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 778 | if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
779 | !user && !is_idle_task(current)) { | 779 | !user && !is_idle_task(current)) { |
@@ -843,7 +843,7 @@ static void rcu_eqs_enter(bool user) | |||
843 | */ | 843 | */ |
844 | void rcu_idle_enter(void) | 844 | void rcu_idle_enter(void) |
845 | { | 845 | { |
846 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!"); | 846 | lockdep_assert_irqs_disabled(); |
847 | rcu_eqs_enter(false); | 847 | rcu_eqs_enter(false); |
848 | } | 848 | } |
849 | 849 | ||
@@ -861,7 +861,7 @@ void rcu_idle_enter(void) | |||
861 | */ | 861 | */ |
862 | void rcu_user_enter(void) | 862 | void rcu_user_enter(void) |
863 | { | 863 | { |
864 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!"); | 864 | lockdep_assert_irqs_disabled(); |
865 | rcu_eqs_enter(true); | 865 | rcu_eqs_enter(true); |
866 | } | 866 | } |
867 | #endif /* CONFIG_NO_HZ_FULL */ | 867 | #endif /* CONFIG_NO_HZ_FULL */ |
@@ -889,7 +889,7 @@ void rcu_irq_exit(void) | |||
889 | { | 889 | { |
890 | struct rcu_dynticks *rdtp; | 890 | struct rcu_dynticks *rdtp; |
891 | 891 | ||
892 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); | 892 | lockdep_assert_irqs_disabled(); |
893 | rdtp = this_cpu_ptr(&rcu_dynticks); | 893 | rdtp = this_cpu_ptr(&rcu_dynticks); |
894 | 894 | ||
895 | /* Page faults can happen in NMI handlers, so check... */ | 895 | /* Page faults can happen in NMI handlers, so check... */ |
@@ -959,7 +959,7 @@ static void rcu_eqs_exit(bool user) | |||
959 | struct rcu_dynticks *rdtp; | 959 | struct rcu_dynticks *rdtp; |
960 | long long oldval; | 960 | long long oldval; |
961 | 961 | ||
962 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!"); | 962 | lockdep_assert_irqs_disabled(); |
963 | rdtp = this_cpu_ptr(&rcu_dynticks); | 963 | rdtp = this_cpu_ptr(&rcu_dynticks); |
964 | oldval = rdtp->dynticks_nesting; | 964 | oldval = rdtp->dynticks_nesting; |
965 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); | 965 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); |
@@ -1039,7 +1039,7 @@ void rcu_irq_enter(void) | |||
1039 | struct rcu_dynticks *rdtp; | 1039 | struct rcu_dynticks *rdtp; |
1040 | long long oldval; | 1040 | long long oldval; |
1041 | 1041 | ||
1042 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); | 1042 | lockdep_assert_irqs_disabled(); |
1043 | rdtp = this_cpu_ptr(&rcu_dynticks); | 1043 | rdtp = this_cpu_ptr(&rcu_dynticks); |
1044 | 1044 | ||
1045 | /* Page faults can happen in NMI handlers, so check... */ | 1045 | /* Page faults can happen in NMI handlers, so check... */ |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 4c857e583802..dd4d0d390e5b 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -326,7 +326,7 @@ static void rcu_preempt_note_context_switch(bool preempt) | |||
326 | struct rcu_data *rdp; | 326 | struct rcu_data *rdp; |
327 | struct rcu_node *rnp; | 327 | struct rcu_node *rnp; |
328 | 328 | ||
329 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_preempt_note_context_switch() invoked with interrupts enabled!!!\n"); | 329 | lockdep_assert_irqs_disabled(); |
330 | WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); | 330 | WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); |
331 | if (t->rcu_read_lock_nesting > 0 && | 331 | if (t->rcu_read_lock_nesting > 0 && |
332 | !t->rcu_read_unlock_special.b.blocked) { | 332 | !t->rcu_read_unlock_special.b.blocked) { |
@@ -1420,7 +1420,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) | |||
1420 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); | 1420 | struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); |
1421 | unsigned long dj; | 1421 | unsigned long dj; |
1422 | 1422 | ||
1423 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_needs_cpu() invoked with irqs enabled!!!"); | 1423 | lockdep_assert_irqs_disabled(); |
1424 | 1424 | ||
1425 | /* Snapshot to detect later posting of non-lazy callback. */ | 1425 | /* Snapshot to detect later posting of non-lazy callback. */ |
1426 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; | 1426 | rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; |
@@ -1469,7 +1469,7 @@ static void rcu_prepare_for_idle(void) | |||
1469 | struct rcu_state *rsp; | 1469 | struct rcu_state *rsp; |
1470 | int tne; | 1470 | int tne; |
1471 | 1471 | ||
1472 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_prepare_for_idle() invoked with irqs enabled!!!"); | 1472 | lockdep_assert_irqs_disabled(); |
1473 | if (rcu_is_nocb_cpu(smp_processor_id())) | 1473 | if (rcu_is_nocb_cpu(smp_processor_id())) |
1474 | return; | 1474 | return; |
1475 | 1475 | ||
@@ -1524,7 +1524,7 @@ static void rcu_prepare_for_idle(void) | |||
1524 | */ | 1524 | */ |
1525 | static void rcu_cleanup_after_idle(void) | 1525 | static void rcu_cleanup_after_idle(void) |
1526 | { | 1526 | { |
1527 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_cleanup_after_idle() invoked with irqs enabled!!!"); | 1527 | lockdep_assert_irqs_disabled(); |
1528 | if (rcu_is_nocb_cpu(smp_processor_id())) | 1528 | if (rcu_is_nocb_cpu(smp_processor_id())) |
1529 | return; | 1529 | return; |
1530 | if (rcu_try_advance_all_cbs()) | 1530 | if (rcu_try_advance_all_cbs()) |
@@ -2016,7 +2016,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, | |||
2016 | struct rcu_data *rdp, | 2016 | struct rcu_data *rdp, |
2017 | unsigned long flags) | 2017 | unsigned long flags) |
2018 | { | 2018 | { |
2019 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_nocb_adopt_orphan_cbs() invoked with irqs enabled!!!"); | 2019 | lockdep_assert_irqs_disabled(); |
2020 | if (!rcu_is_nocb_cpu(smp_processor_id())) | 2020 | if (!rcu_is_nocb_cpu(smp_processor_id())) |
2021 | return false; /* Not NOCBs CPU, caller must migrate CBs. */ | 2021 | return false; /* Not NOCBs CPU, caller must migrate CBs. */ |
2022 | __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), | 2022 | __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index ca0f8fc945c6..e086babe6c61 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -388,7 +388,7 @@ void sched_clock_tick(void) | |||
388 | if (unlikely(!sched_clock_running)) | 388 | if (unlikely(!sched_clock_running)) |
389 | return; | 389 | return; |
390 | 390 | ||
391 | WARN_ON_ONCE(!irqs_disabled()); | 391 | lockdep_assert_irqs_disabled(); |
392 | 392 | ||
393 | scd = this_scd(); | 393 | scd = this_scd(); |
394 | __scd_stamp(scd); | 394 | __scd_stamp(scd); |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 14d2dbf97c53..9be8b68a66da 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -259,8 +259,7 @@ static inline u64 account_other_time(u64 max) | |||
259 | { | 259 | { |
260 | u64 accounted; | 260 | u64 accounted; |
261 | 261 | ||
262 | /* Shall be converted to a lockdep-enabled lightweight check */ | 262 | lockdep_assert_irqs_disabled(); |
263 | WARN_ON_ONCE(!irqs_disabled()); | ||
264 | 263 | ||
265 | accounted = steal_account_process_time(max); | 264 | accounted = steal_account_process_time(max); |
266 | 265 | ||
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 418a1c045933..5f0dfb2abb8d 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -190,7 +190,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd, | |||
190 | u32 ret = SECCOMP_RET_ALLOW; | 190 | u32 ret = SECCOMP_RET_ALLOW; |
191 | /* Make sure cross-thread synced filter points somewhere sane. */ | 191 | /* Make sure cross-thread synced filter points somewhere sane. */ |
192 | struct seccomp_filter *f = | 192 | struct seccomp_filter *f = |
193 | lockless_dereference(current->seccomp.filter); | 193 | READ_ONCE(current->seccomp.filter); |
194 | 194 | ||
195 | /* Ensure unexpected behavior doesn't result in failing open. */ | 195 | /* Ensure unexpected behavior doesn't result in failing open. */ |
196 | if (unlikely(WARN_ON(f == NULL))) | 196 | if (unlikely(WARN_ON(f == NULL))) |
diff --git a/kernel/smp.c b/kernel/smp.c index c94dd85c8d41..084c8b3a2681 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -213,7 +213,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) | |||
213 | call_single_data_t *csd, *csd_next; | 213 | call_single_data_t *csd, *csd_next; |
214 | static bool warned; | 214 | static bool warned; |
215 | 215 | ||
216 | WARN_ON(!irqs_disabled()); | 216 | lockdep_assert_irqs_disabled(); |
217 | 217 | ||
218 | head = this_cpu_ptr(&call_single_queue); | 218 | head = this_cpu_ptr(&call_single_queue); |
219 | entry = llist_del_all(head); | 219 | entry = llist_del_all(head); |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 4e09821f9d9e..662f7b1b7a78 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__local_bh_disable_ip); | |||
137 | 137 | ||
138 | static void __local_bh_enable(unsigned int cnt) | 138 | static void __local_bh_enable(unsigned int cnt) |
139 | { | 139 | { |
140 | WARN_ON_ONCE(!irqs_disabled()); | 140 | lockdep_assert_irqs_disabled(); |
141 | 141 | ||
142 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) | 142 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
143 | trace_softirqs_on(_RET_IP_); | 143 | trace_softirqs_on(_RET_IP_); |
@@ -158,7 +158,8 @@ EXPORT_SYMBOL(_local_bh_enable); | |||
158 | 158 | ||
159 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) | 159 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
160 | { | 160 | { |
161 | WARN_ON_ONCE(in_irq() || irqs_disabled()); | 161 | WARN_ON_ONCE(in_irq()); |
162 | lockdep_assert_irqs_enabled(); | ||
162 | #ifdef CONFIG_TRACE_IRQFLAGS | 163 | #ifdef CONFIG_TRACE_IRQFLAGS |
163 | local_irq_disable(); | 164 | local_irq_disable(); |
164 | #endif | 165 | #endif |
@@ -396,9 +397,8 @@ void irq_exit(void) | |||
396 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED | 397 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
397 | local_irq_disable(); | 398 | local_irq_disable(); |
398 | #else | 399 | #else |
399 | WARN_ON_ONCE(!irqs_disabled()); | 400 | lockdep_assert_irqs_disabled(); |
400 | #endif | 401 | #endif |
401 | |||
402 | account_irq_exit_time(current); | 402 | account_irq_exit_time(current); |
403 | preempt_count_sub(HARDIRQ_OFFSET); | 403 | preempt_count_sub(HARDIRQ_OFFSET); |
404 | if (!in_interrupt() && local_softirq_pending()) | 404 | if (!in_interrupt() && local_softirq_pending()) |
@@ -488,7 +488,7 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); | |||
488 | 488 | ||
489 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) | 489 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) |
490 | { | 490 | { |
491 | BUG_ON(!irqs_disabled()); | 491 | lockdep_assert_irqs_disabled(); |
492 | 492 | ||
493 | t->next = __this_cpu_read(tasklet_hi_vec.head); | 493 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
494 | __this_cpu_write(tasklet_hi_vec.head, t); | 494 | __this_cpu_write(tasklet_hi_vec.head, t); |
diff --git a/kernel/task_work.c b/kernel/task_work.c index 5718b3ea202a..0fef395662a6 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c | |||
@@ -68,7 +68,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) | |||
68 | * we raced with task_work_run(), *pprev == NULL/exited. | 68 | * we raced with task_work_run(), *pprev == NULL/exited. |
69 | */ | 69 | */ |
70 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 70 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
71 | while ((work = lockless_dereference(*pprev))) { | 71 | while ((work = READ_ONCE(*pprev))) { |
72 | if (work->func != func) | 72 | if (work->func != func) |
73 | pprev = &work->next; | 73 | pprev = &work->next; |
74 | else if (cmpxchg(pprev, work, work->next) == work) | 74 | else if (cmpxchg(pprev, work, work->next) == work) |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 88f75f92ef36..d32520840fde 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -758,9 +758,7 @@ void clock_was_set(void) | |||
758 | */ | 758 | */ |
759 | void hrtimers_resume(void) | 759 | void hrtimers_resume(void) |
760 | { | 760 | { |
761 | WARN_ONCE(!irqs_disabled(), | 761 | lockdep_assert_irqs_disabled(); |
762 | KERN_INFO "hrtimers_resume() called with IRQs enabled!"); | ||
763 | |||
764 | /* Retrigger on the local CPU */ | 762 | /* Retrigger on the local CPU */ |
765 | retrigger_next_event(NULL); | 763 | retrigger_next_event(NULL); |
766 | /* And schedule a retrigger for all others */ | 764 | /* And schedule a retrigger for all others */ |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 5b117110b55b..1f27887aa194 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -603,7 +603,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, | |||
603 | /* | 603 | /* |
604 | * Disarm any old timer after extracting its expiry time. | 604 | * Disarm any old timer after extracting its expiry time. |
605 | */ | 605 | */ |
606 | WARN_ON_ONCE(!irqs_disabled()); | 606 | lockdep_assert_irqs_disabled(); |
607 | 607 | ||
608 | ret = 0; | 608 | ret = 0; |
609 | old_incr = timer->it.cpu.incr; | 609 | old_incr = timer->it.cpu.incr; |
@@ -1034,7 +1034,7 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer) | |||
1034 | /* | 1034 | /* |
1035 | * Now re-arm for the new expiry time. | 1035 | * Now re-arm for the new expiry time. |
1036 | */ | 1036 | */ |
1037 | WARN_ON_ONCE(!irqs_disabled()); | 1037 | lockdep_assert_irqs_disabled(); |
1038 | arm_timer(timer); | 1038 | arm_timer(timer); |
1039 | unlock: | 1039 | unlock: |
1040 | unlock_task_sighand(p, &flags); | 1040 | unlock_task_sighand(p, &flags); |
@@ -1125,7 +1125,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1125 | struct k_itimer *timer, *next; | 1125 | struct k_itimer *timer, *next; |
1126 | unsigned long flags; | 1126 | unsigned long flags; |
1127 | 1127 | ||
1128 | WARN_ON_ONCE(!irqs_disabled()); | 1128 | lockdep_assert_irqs_disabled(); |
1129 | 1129 | ||
1130 | /* | 1130 | /* |
1131 | * The fast path checks that there are no expired thread or thread | 1131 | * The fast path checks that there are no expired thread or thread |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index c7a899c5ce64..dd4b7b492c9b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -198,7 +198,7 @@ static bool check_tick_dependency(atomic_t *dep) | |||
198 | 198 | ||
199 | static bool can_stop_full_tick(int cpu, struct tick_sched *ts) | 199 | static bool can_stop_full_tick(int cpu, struct tick_sched *ts) |
200 | { | 200 | { |
201 | WARN_ON_ONCE(!irqs_disabled()); | 201 | lockdep_assert_irqs_disabled(); |
202 | 202 | ||
203 | if (unlikely(!cpu_online(cpu))) | 203 | if (unlikely(!cpu_online(cpu))) |
204 | return false; | 204 | return false; |
@@ -960,8 +960,7 @@ void tick_nohz_idle_enter(void) | |||
960 | { | 960 | { |
961 | struct tick_sched *ts; | 961 | struct tick_sched *ts; |
962 | 962 | ||
963 | WARN_ON_ONCE(irqs_disabled()); | 963 | lockdep_assert_irqs_enabled(); |
964 | |||
965 | /* | 964 | /* |
966 | * Update the idle state in the scheduler domain hierarchy | 965 | * Update the idle state in the scheduler domain hierarchy |
967 | * when tick_nohz_stop_sched_tick() is called from the idle loop. | 966 | * when tick_nohz_stop_sched_tick() is called from the idle loop. |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 81279c6602ff..845f3805c73d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2724,7 +2724,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2724 | * if it happened, we have to fail the write. | 2724 | * if it happened, we have to fail the write. |
2725 | */ | 2725 | */ |
2726 | barrier(); | 2726 | barrier(); |
2727 | if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { | 2727 | if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { |
2728 | local_dec(&cpu_buffer->committing); | 2728 | local_dec(&cpu_buffer->committing); |
2729 | local_dec(&cpu_buffer->commits); | 2729 | local_dec(&cpu_buffer->commits); |
2730 | return NULL; | 2730 | return NULL; |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 401b0639116f..6b0b343a36a2 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -1460,7 +1460,7 @@ extern struct trace_event_file *find_event_file(struct trace_array *tr, | |||
1460 | 1460 | ||
1461 | static inline void *event_file_data(struct file *filp) | 1461 | static inline void *event_file_data(struct file *filp) |
1462 | { | 1462 | { |
1463 | return ACCESS_ONCE(file_inode(filp)->i_private); | 1463 | return READ_ONCE(file_inode(filp)->i_private); |
1464 | } | 1464 | } |
1465 | 1465 | ||
1466 | extern struct mutex event_mutex; | 1466 | extern struct mutex event_mutex; |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 719a52a4064a..734accc02418 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -78,7 +78,7 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
78 | { | 78 | { |
79 | unsigned long this_size, flags; unsigned long *p, *top, *start; | 79 | unsigned long this_size, flags; unsigned long *p, *top, *start; |
80 | static int tracer_frame; | 80 | static int tracer_frame; |
81 | int frame_size = ACCESS_ONCE(tracer_frame); | 81 | int frame_size = READ_ONCE(tracer_frame); |
82 | int i, x; | 82 | int i, x; |
83 | 83 | ||
84 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); | 84 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index c490f1e4313b..d32b45662fb6 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -894,7 +894,7 @@ static bool new_idmap_permitted(const struct file *file, | |||
894 | int proc_setgroups_show(struct seq_file *seq, void *v) | 894 | int proc_setgroups_show(struct seq_file *seq, void *v) |
895 | { | 895 | { |
896 | struct user_namespace *ns = seq->private; | 896 | struct user_namespace *ns = seq->private; |
897 | unsigned long userns_flags = ACCESS_ONCE(ns->flags); | 897 | unsigned long userns_flags = READ_ONCE(ns->flags); |
898 | 898 | ||
899 | seq_printf(seq, "%s\n", | 899 | seq_printf(seq, "%s\n", |
900 | (userns_flags & USERNS_SETGROUPS_ALLOWED) ? | 900 | (userns_flags & USERNS_SETGROUPS_ALLOWED) ? |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a2dccfe1acec..13f67b5a0a0c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1376,7 +1376,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, | |||
1376 | * queued or lose PENDING. Grabbing PENDING and queueing should | 1376 | * queued or lose PENDING. Grabbing PENDING and queueing should |
1377 | * happen with IRQ disabled. | 1377 | * happen with IRQ disabled. |
1378 | */ | 1378 | */ |
1379 | WARN_ON_ONCE(!irqs_disabled()); | 1379 | lockdep_assert_irqs_disabled(); |
1380 | 1380 | ||
1381 | debug_work_activate(work); | 1381 | debug_work_activate(work); |
1382 | 1382 | ||
@@ -2491,15 +2491,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, | |||
2491 | INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); | 2491 | INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); |
2492 | __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); | 2492 | __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); |
2493 | 2493 | ||
2494 | /* | 2494 | init_completion_map(&barr->done, &target->lockdep_map); |
2495 | * Explicitly init the crosslock for wq_barrier::done, make its lock | 2495 | |
2496 | * key a subkey of the corresponding work. As a result we won't | ||
2497 | * build a dependency between wq_barrier::done and unrelated work. | ||
2498 | */ | ||
2499 | lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map, | ||
2500 | "(complete)wq_barr::done", | ||
2501 | target->lockdep_map.key, 1); | ||
2502 | __init_completion(&barr->done); | ||
2503 | barr->task = current; | 2496 | barr->task = current; |
2504 | 2497 | ||
2505 | /* | 2498 | /* |
@@ -2605,16 +2598,13 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
2605 | struct wq_flusher this_flusher = { | 2598 | struct wq_flusher this_flusher = { |
2606 | .list = LIST_HEAD_INIT(this_flusher.list), | 2599 | .list = LIST_HEAD_INIT(this_flusher.list), |
2607 | .flush_color = -1, | 2600 | .flush_color = -1, |
2608 | .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), | 2601 | .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), |
2609 | }; | 2602 | }; |
2610 | int next_color; | 2603 | int next_color; |
2611 | 2604 | ||
2612 | if (WARN_ON(!wq_online)) | 2605 | if (WARN_ON(!wq_online)) |
2613 | return; | 2606 | return; |
2614 | 2607 | ||
2615 | lock_map_acquire(&wq->lockdep_map); | ||
2616 | lock_map_release(&wq->lockdep_map); | ||
2617 | |||
2618 | mutex_lock(&wq->mutex); | 2608 | mutex_lock(&wq->mutex); |
2619 | 2609 | ||
2620 | /* | 2610 | /* |
@@ -2877,9 +2867,6 @@ bool flush_work(struct work_struct *work) | |||
2877 | if (WARN_ON(!wq_online)) | 2867 | if (WARN_ON(!wq_online)) |
2878 | return false; | 2868 | return false; |
2879 | 2869 | ||
2880 | lock_map_acquire(&work->lockdep_map); | ||
2881 | lock_map_release(&work->lockdep_map); | ||
2882 | |||
2883 | if (start_flush_work(work, &barr)) { | 2870 | if (start_flush_work(work, &barr)) { |
2884 | wait_for_completion(&barr.done); | 2871 | wait_for_completion(&barr.done); |
2885 | destroy_work_on_stack(&barr.work); | 2872 | destroy_work_on_stack(&barr.work); |
@@ -4640,7 +4627,7 @@ static void rebind_workers(struct worker_pool *pool) | |||
4640 | * concurrency management. Note that when or whether | 4627 | * concurrency management. Note that when or whether |
4641 | * @worker clears REBOUND doesn't affect correctness. | 4628 | * @worker clears REBOUND doesn't affect correctness. |
4642 | * | 4629 | * |
4643 | * ACCESS_ONCE() is necessary because @worker->flags may be | 4630 | * WRITE_ONCE() is necessary because @worker->flags may be |
4644 | * tested without holding any lock in | 4631 | * tested without holding any lock in |
4645 | * wq_worker_waking_up(). Without it, NOT_RUNNING test may | 4632 | * wq_worker_waking_up(). Without it, NOT_RUNNING test may |
4646 | * fail incorrectly leading to premature concurrency | 4633 | * fail incorrectly leading to premature concurrency |
@@ -4649,7 +4636,7 @@ static void rebind_workers(struct worker_pool *pool) | |||
4649 | WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); | 4636 | WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); |
4650 | worker_flags |= WORKER_REBOUND; | 4637 | worker_flags |= WORKER_REBOUND; |
4651 | worker_flags &= ~WORKER_UNBOUND; | 4638 | worker_flags &= ~WORKER_UNBOUND; |
4652 | ACCESS_ONCE(worker->flags) = worker_flags; | 4639 | WRITE_ONCE(worker->flags, worker_flags); |
4653 | } | 4640 | } |
4654 | 4641 | ||
4655 | spin_unlock_irq(&pool->lock); | 4642 | spin_unlock_irq(&pool->lock); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dfdad67d8f6c..2b439a515c30 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1092,8 +1092,8 @@ config PROVE_LOCKING | |||
1092 | select DEBUG_MUTEXES | 1092 | select DEBUG_MUTEXES |
1093 | select DEBUG_RT_MUTEXES if RT_MUTEXES | 1093 | select DEBUG_RT_MUTEXES if RT_MUTEXES |
1094 | select DEBUG_LOCK_ALLOC | 1094 | select DEBUG_LOCK_ALLOC |
1095 | select LOCKDEP_CROSSRELEASE if BROKEN | 1095 | select LOCKDEP_CROSSRELEASE |
1096 | select LOCKDEP_COMPLETIONS if BROKEN | 1096 | select LOCKDEP_COMPLETIONS |
1097 | select TRACE_IRQFLAGS | 1097 | select TRACE_IRQFLAGS |
1098 | default n | 1098 | default n |
1099 | help | 1099 | help |
@@ -1179,6 +1179,21 @@ config LOCKDEP_COMPLETIONS | |||
1179 | A deadlock caused by wait_for_completion() and complete() can be | 1179 | A deadlock caused by wait_for_completion() and complete() can be |
1180 | detected by lockdep using crossrelease feature. | 1180 | detected by lockdep using crossrelease feature. |
1181 | 1181 | ||
1182 | config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK | ||
1183 | bool "Enable the boot parameter, crossrelease_fullstack" | ||
1184 | depends on LOCKDEP_CROSSRELEASE | ||
1185 | default n | ||
1186 | help | ||
1187 | The lockdep "cross-release" feature needs to record stack traces | ||
1188 | (of calling functions) for all acquisitions, for eventual later | ||
1189 | use during analysis. By default only a single caller is recorded, | ||
1190 | because the unwind operation can be very expensive with deeper | ||
1191 | stack chains. | ||
1192 | |||
1193 | However a boot parameter, crossrelease_fullstack, was | ||
1194 | introduced since sometimes deeper traces are required for full | ||
1195 | analysis. This option turns on the boot parameter. | ||
1196 | |||
1182 | config DEBUG_LOCKDEP | 1197 | config DEBUG_LOCKDEP |
1183 | bool "Lock dependency engine debugging" | 1198 | bool "Lock dependency engine debugging" |
1184 | depends on DEBUG_KERNEL && LOCKDEP | 1199 | depends on DEBUG_KERNEL && LOCKDEP |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 4e53be8bc590..b77d51da8c73 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -39,7 +39,7 @@ begin_node: | |||
39 | /* Descend through a shortcut */ | 39 | /* Descend through a shortcut */ |
40 | shortcut = assoc_array_ptr_to_shortcut(cursor); | 40 | shortcut = assoc_array_ptr_to_shortcut(cursor); |
41 | smp_read_barrier_depends(); | 41 | smp_read_barrier_depends(); |
42 | cursor = ACCESS_ONCE(shortcut->next_node); | 42 | cursor = READ_ONCE(shortcut->next_node); |
43 | } | 43 | } |
44 | 44 | ||
45 | node = assoc_array_ptr_to_node(cursor); | 45 | node = assoc_array_ptr_to_node(cursor); |
@@ -55,7 +55,7 @@ begin_node: | |||
55 | */ | 55 | */ |
56 | has_meta = 0; | 56 | has_meta = 0; |
57 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 57 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
58 | ptr = ACCESS_ONCE(node->slots[slot]); | 58 | ptr = READ_ONCE(node->slots[slot]); |
59 | has_meta |= (unsigned long)ptr; | 59 | has_meta |= (unsigned long)ptr; |
60 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { | 60 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { |
61 | /* We need a barrier between the read of the pointer | 61 | /* We need a barrier between the read of the pointer |
@@ -89,7 +89,7 @@ continue_node: | |||
89 | smp_read_barrier_depends(); | 89 | smp_read_barrier_depends(); |
90 | 90 | ||
91 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 91 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
92 | ptr = ACCESS_ONCE(node->slots[slot]); | 92 | ptr = READ_ONCE(node->slots[slot]); |
93 | if (assoc_array_ptr_is_meta(ptr)) { | 93 | if (assoc_array_ptr_is_meta(ptr)) { |
94 | cursor = ptr; | 94 | cursor = ptr; |
95 | goto begin_node; | 95 | goto begin_node; |
@@ -98,7 +98,7 @@ continue_node: | |||
98 | 98 | ||
99 | finished_node: | 99 | finished_node: |
100 | /* Move up to the parent (may need to skip back over a shortcut) */ | 100 | /* Move up to the parent (may need to skip back over a shortcut) */ |
101 | parent = ACCESS_ONCE(node->back_pointer); | 101 | parent = READ_ONCE(node->back_pointer); |
102 | slot = node->parent_slot; | 102 | slot = node->parent_slot; |
103 | if (parent == stop) | 103 | if (parent == stop) |
104 | return 0; | 104 | return 0; |
@@ -107,7 +107,7 @@ finished_node: | |||
107 | shortcut = assoc_array_ptr_to_shortcut(parent); | 107 | shortcut = assoc_array_ptr_to_shortcut(parent); |
108 | smp_read_barrier_depends(); | 108 | smp_read_barrier_depends(); |
109 | cursor = parent; | 109 | cursor = parent; |
110 | parent = ACCESS_ONCE(shortcut->back_pointer); | 110 | parent = READ_ONCE(shortcut->back_pointer); |
111 | slot = shortcut->parent_slot; | 111 | slot = shortcut->parent_slot; |
112 | if (parent == stop) | 112 | if (parent == stop) |
113 | return 0; | 113 | return 0; |
@@ -147,7 +147,7 @@ int assoc_array_iterate(const struct assoc_array *array, | |||
147 | void *iterator_data), | 147 | void *iterator_data), |
148 | void *iterator_data) | 148 | void *iterator_data) |
149 | { | 149 | { |
150 | struct assoc_array_ptr *root = ACCESS_ONCE(array->root); | 150 | struct assoc_array_ptr *root = READ_ONCE(array->root); |
151 | 151 | ||
152 | if (!root) | 152 | if (!root) |
153 | return 0; | 153 | return 0; |
@@ -194,7 +194,7 @@ assoc_array_walk(const struct assoc_array *array, | |||
194 | 194 | ||
195 | pr_devel("-->%s()\n", __func__); | 195 | pr_devel("-->%s()\n", __func__); |
196 | 196 | ||
197 | cursor = ACCESS_ONCE(array->root); | 197 | cursor = READ_ONCE(array->root); |
198 | if (!cursor) | 198 | if (!cursor) |
199 | return assoc_array_walk_tree_empty; | 199 | return assoc_array_walk_tree_empty; |
200 | 200 | ||
@@ -220,7 +220,7 @@ consider_node: | |||
220 | 220 | ||
221 | slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK); | 221 | slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK); |
222 | slot &= ASSOC_ARRAY_FAN_MASK; | 222 | slot &= ASSOC_ARRAY_FAN_MASK; |
223 | ptr = ACCESS_ONCE(node->slots[slot]); | 223 | ptr = READ_ONCE(node->slots[slot]); |
224 | 224 | ||
225 | pr_devel("consider slot %x [ix=%d type=%lu]\n", | 225 | pr_devel("consider slot %x [ix=%d type=%lu]\n", |
226 | slot, level, (unsigned long)ptr & 3); | 226 | slot, level, (unsigned long)ptr & 3); |
@@ -294,7 +294,7 @@ follow_shortcut: | |||
294 | } while (sc_level < shortcut->skip_to_level); | 294 | } while (sc_level < shortcut->skip_to_level); |
295 | 295 | ||
296 | /* The shortcut matches the leaf's index to this point. */ | 296 | /* The shortcut matches the leaf's index to this point. */ |
297 | cursor = ACCESS_ONCE(shortcut->next_node); | 297 | cursor = READ_ONCE(shortcut->next_node); |
298 | if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) { | 298 | if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) { |
299 | level = sc_level; | 299 | level = sc_level; |
300 | goto jumped; | 300 | goto jumped; |
@@ -337,7 +337,7 @@ void *assoc_array_find(const struct assoc_array *array, | |||
337 | * the terminal node. | 337 | * the terminal node. |
338 | */ | 338 | */ |
339 | for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 339 | for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
340 | ptr = ACCESS_ONCE(node->slots[slot]); | 340 | ptr = READ_ONCE(node->slots[slot]); |
341 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { | 341 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { |
342 | /* We need a barrier between the read of the pointer | 342 | /* We need a barrier between the read of the pointer |
343 | * and dereferencing the pointer - but only if we are | 343 | * and dereferencing the pointer - but only if we are |
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index 6a406fafb5d6..da4672a50a54 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c | |||
@@ -21,7 +21,7 @@ void dql_completed(struct dql *dql, unsigned int count) | |||
21 | unsigned int ovlimit, completed, num_queued; | 21 | unsigned int ovlimit, completed, num_queued; |
22 | bool all_prev_completed; | 22 | bool all_prev_completed; |
23 | 23 | ||
24 | num_queued = ACCESS_ONCE(dql->num_queued); | 24 | num_queued = READ_ONCE(dql->num_queued); |
25 | 25 | ||
26 | /* Can't complete more than what's in queue */ | 26 | /* Can't complete more than what's in queue */ |
27 | BUG_ON(count > num_queued - dql->num_completed); | 27 | BUG_ON(count > num_queued - dql->num_completed); |
diff --git a/lib/llist.c b/lib/llist.c index ae5872b1df0c..7062e931a7bb 100644 --- a/lib/llist.c +++ b/lib/llist.c | |||
@@ -41,7 +41,7 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | |||
41 | struct llist_node *first; | 41 | struct llist_node *first; |
42 | 42 | ||
43 | do { | 43 | do { |
44 | new_last->next = first = ACCESS_ONCE(head->first); | 44 | new_last->next = first = READ_ONCE(head->first); |
45 | } while (cmpxchg(&head->first, first, new_first) != first); | 45 | } while (cmpxchg(&head->first, first, new_first) != first); |
46 | 46 | ||
47 | return !first; | 47 | return !first; |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 86c3385b9eb3..1746bae94d41 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -620,8 +620,8 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp | |||
620 | 620 | ||
621 | rcu_read_lock(); | 621 | rcu_read_lock(); |
622 | for (i = 0; i < depth; i++, d = p) { | 622 | for (i = 0; i < depth; i++, d = p) { |
623 | p = ACCESS_ONCE(d->d_parent); | 623 | p = READ_ONCE(d->d_parent); |
624 | array[i] = ACCESS_ONCE(d->d_name.name); | 624 | array[i] = READ_ONCE(d->d_name.name); |
625 | if (p == d) { | 625 | if (p == d) { |
626 | if (i) | 626 | if (i) |
627 | array[i] = ""; | 627 | array[i] = ""; |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1981ed697dab..b521ed1170f9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2718,7 +2718,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink, | |||
2718 | struct shrink_control *sc) | 2718 | struct shrink_control *sc) |
2719 | { | 2719 | { |
2720 | struct pglist_data *pgdata = NODE_DATA(sc->nid); | 2720 | struct pglist_data *pgdata = NODE_DATA(sc->nid); |
2721 | return ACCESS_ONCE(pgdata->split_queue_len); | 2721 | return READ_ONCE(pgdata->split_queue_len); |
2722 | } | 2722 | } |
2723 | 2723 | ||
2724 | static unsigned long deferred_split_scan(struct shrinker *shrink, | 2724 | static unsigned long deferred_split_scan(struct shrinker *shrink, |
diff --git a/mm/memory.c b/mm/memory.c index a728bed16c20..cae514e7dcfc 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3891,9 +3891,9 @@ static int handle_pte_fault(struct vm_fault *vmf) | |||
3891 | /* | 3891 | /* |
3892 | * some architectures can have larger ptes than wordsize, | 3892 | * some architectures can have larger ptes than wordsize, |
3893 | * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and | 3893 | * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and |
3894 | * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee | 3894 | * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic |
3895 | * atomic accesses. The code below just needs a consistent | 3895 | * accesses. The code below just needs a consistent view |
3896 | * view for the ifs and we later double check anyway with the | 3896 | * for the ifs and we later double check anyway with the |
3897 | * ptl lock held. So here a barrier will do. | 3897 | * ptl lock held. So here a barrier will do. |
3898 | */ | 3898 | */ |
3899 | barrier(); | 3899 | barrier(); |
@@ -259,7 +259,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx) | |||
259 | * memcg_caches issues a write barrier to match this (see | 259 | * memcg_caches issues a write barrier to match this (see |
260 | * memcg_create_kmem_cache()). | 260 | * memcg_create_kmem_cache()). |
261 | */ | 261 | */ |
262 | cachep = lockless_dereference(arr->entries[idx]); | 262 | cachep = READ_ONCE(arr->entries[idx]); |
263 | rcu_read_unlock(); | 263 | rcu_read_unlock(); |
264 | 264 | ||
265 | return cachep; | 265 | return cachep; |
diff --git a/net/core/dev.c b/net/core/dev.c index 11596a302a26..61559ca3980b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3725,7 +3725,7 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | |||
3725 | flow_table = rcu_dereference(rxqueue->rps_flow_table); | 3725 | flow_table = rcu_dereference(rxqueue->rps_flow_table); |
3726 | if (flow_table && flow_id <= flow_table->mask) { | 3726 | if (flow_table && flow_id <= flow_table->mask) { |
3727 | rflow = &flow_table->flows[flow_id]; | 3727 | rflow = &flow_table->flows[flow_id]; |
3728 | cpu = ACCESS_ONCE(rflow->cpu); | 3728 | cpu = READ_ONCE(rflow->cpu); |
3729 | if (rflow->filter == filter_id && cpu < nr_cpu_ids && | 3729 | if (rflow->filter == filter_id && cpu < nr_cpu_ids && |
3730 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - | 3730 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - |
3731 | rflow->last_qtail) < | 3731 | rflow->last_qtail) < |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 912731bed7b7..57557a6a950c 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -334,7 +334,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
334 | /* It is up to the caller to keep npinfo alive. */ | 334 | /* It is up to the caller to keep npinfo alive. */ |
335 | struct netpoll_info *npinfo; | 335 | struct netpoll_info *npinfo; |
336 | 336 | ||
337 | WARN_ON_ONCE(!irqs_disabled()); | 337 | lockdep_assert_irqs_disabled(); |
338 | 338 | ||
339 | npinfo = rcu_dereference_bh(np->dev->npinfo); | 339 | npinfo = rcu_dereference_bh(np->dev->npinfo); |
340 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { | 340 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 6e1e10ff433a..3b2034f6d49d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3377,7 +3377,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) | |||
3377 | 3377 | ||
3378 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) | 3378 | static void pktgen_xmit(struct pktgen_dev *pkt_dev) |
3379 | { | 3379 | { |
3380 | unsigned int burst = ACCESS_ONCE(pkt_dev->burst); | 3380 | unsigned int burst = READ_ONCE(pkt_dev->burst); |
3381 | struct net_device *odev = pkt_dev->odev; | 3381 | struct net_device *odev = pkt_dev->odev; |
3382 | struct netdev_queue *txq; | 3382 | struct netdev_queue *txq; |
3383 | struct sk_buff *skb; | 3383 | struct sk_buff *skb; |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index af74d0433453..f9597ba26599 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -164,7 +164,7 @@ static void inet_frag_worker(struct work_struct *work) | |||
164 | 164 | ||
165 | local_bh_disable(); | 165 | local_bh_disable(); |
166 | 166 | ||
167 | for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) { | 167 | for (i = READ_ONCE(f->next_bucket); budget; --budget) { |
168 | evicted += inet_evict_bucket(f, &f->hash[i]); | 168 | evicted += inet_evict_bucket(f, &f->hash[i]); |
169 | i = (i + 1) & (INETFRAGS_HASHSZ - 1); | 169 | i = (i + 1) & (INETFRAGS_HASHSZ - 1); |
170 | if (evicted > INETFRAGS_EVICT_MAX) | 170 | if (evicted > INETFRAGS_EVICT_MAX) |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 3d9f1c2f81c5..c0864562083b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -495,7 +495,7 @@ u32 ip_idents_reserve(u32 hash, int segs) | |||
495 | { | 495 | { |
496 | u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; | 496 | u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; |
497 | atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; | 497 | atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; |
498 | u32 old = ACCESS_ONCE(*p_tstamp); | 498 | u32 old = READ_ONCE(*p_tstamp); |
499 | u32 now = (u32)jiffies; | 499 | u32 now = (u32)jiffies; |
500 | u32 new, delta = 0; | 500 | u32 new, delta = 0; |
501 | 501 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b6bb3cdfad09..887585045b27 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -816,12 +816,12 @@ static void tcp_update_pacing_rate(struct sock *sk) | |||
816 | if (likely(tp->srtt_us)) | 816 | if (likely(tp->srtt_us)) |
817 | do_div(rate, tp->srtt_us); | 817 | do_div(rate, tp->srtt_us); |
818 | 818 | ||
819 | /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate | 819 | /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate |
820 | * without any lock. We want to make sure compiler wont store | 820 | * without any lock. We want to make sure compiler wont store |
821 | * intermediate values in this location. | 821 | * intermediate values in this location. |
822 | */ | 822 | */ |
823 | ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, | 823 | WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate, |
824 | sk->sk_max_pacing_rate); | 824 | sk->sk_max_pacing_rate)); |
825 | } | 825 | } |
826 | 826 | ||
827 | /* Calculate rto without backoff. This is the second half of Van Jacobson's | 827 | /* Calculate rto without backoff. This is the second half of Van Jacobson's |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 478909f4694d..5a42e873d44a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1910,7 +1910,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, | |||
1910 | if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) | 1910 | if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) |
1911 | goto send_now; | 1911 | goto send_now; |
1912 | 1912 | ||
1913 | win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); | 1913 | win_divisor = READ_ONCE(sysctl_tcp_tso_win_divisor); |
1914 | if (win_divisor) { | 1914 | if (win_divisor) { |
1915 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); | 1915 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); |
1916 | 1916 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ebfbccae62fd..02ec9a349303 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1853,7 +1853,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
1853 | */ | 1853 | */ |
1854 | 1854 | ||
1855 | /* if we're overly short, let UDP handle it */ | 1855 | /* if we're overly short, let UDP handle it */ |
1856 | encap_rcv = ACCESS_ONCE(up->encap_rcv); | 1856 | encap_rcv = READ_ONCE(up->encap_rcv); |
1857 | if (encap_rcv) { | 1857 | if (encap_rcv) { |
1858 | int ret; | 1858 | int ret; |
1859 | 1859 | ||
@@ -2298,7 +2298,7 @@ void udp_destroy_sock(struct sock *sk) | |||
2298 | unlock_sock_fast(sk, slow); | 2298 | unlock_sock_fast(sk, slow); |
2299 | if (static_key_false(&udp_encap_needed) && up->encap_type) { | 2299 | if (static_key_false(&udp_encap_needed) && up->encap_type) { |
2300 | void (*encap_destroy)(struct sock *sk); | 2300 | void (*encap_destroy)(struct sock *sk); |
2301 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | 2301 | encap_destroy = READ_ONCE(up->encap_destroy); |
2302 | if (encap_destroy) | 2302 | if (encap_destroy) |
2303 | encap_destroy(sk); | 2303 | encap_destroy(sk); |
2304 | } | 2304 | } |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a1c24443cd9e..dab946554157 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -490,7 +490,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, | |||
490 | if (!t) | 490 | if (!t) |
491 | goto out; | 491 | goto out; |
492 | 492 | ||
493 | tproto = ACCESS_ONCE(t->parms.proto); | 493 | tproto = READ_ONCE(t->parms.proto); |
494 | if (tproto != ipproto && tproto != 0) | 494 | if (tproto != ipproto && tproto != 0) |
495 | goto out; | 495 | goto out; |
496 | 496 | ||
@@ -899,7 +899,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto, | |||
899 | t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); | 899 | t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); |
900 | 900 | ||
901 | if (t) { | 901 | if (t) { |
902 | u8 tproto = ACCESS_ONCE(t->parms.proto); | 902 | u8 tproto = READ_ONCE(t->parms.proto); |
903 | 903 | ||
904 | if (tproto != ipproto && tproto != 0) | 904 | if (tproto != ipproto && tproto != 0) |
905 | goto drop; | 905 | goto drop; |
@@ -1233,7 +1233,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1233 | 1233 | ||
1234 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 1234 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
1235 | 1235 | ||
1236 | tproto = ACCESS_ONCE(t->parms.proto); | 1236 | tproto = READ_ONCE(t->parms.proto); |
1237 | if (tproto != IPPROTO_IPIP && tproto != 0) | 1237 | if (tproto != IPPROTO_IPIP && tproto != 0) |
1238 | return -1; | 1238 | return -1; |
1239 | 1239 | ||
@@ -1303,7 +1303,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1303 | u8 tproto; | 1303 | u8 tproto; |
1304 | int err; | 1304 | int err; |
1305 | 1305 | ||
1306 | tproto = ACCESS_ONCE(t->parms.proto); | 1306 | tproto = READ_ONCE(t->parms.proto); |
1307 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || | 1307 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || |
1308 | ip6_tnl_addr_conflict(t, ipv6h)) | 1308 | ip6_tnl_addr_conflict(t, ipv6h)) |
1309 | return -1; | 1309 | return -1; |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 40d7234c27b9..3f30fa313bf2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -606,7 +606,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
606 | */ | 606 | */ |
607 | 607 | ||
608 | /* if we're overly short, let UDP handle it */ | 608 | /* if we're overly short, let UDP handle it */ |
609 | encap_rcv = ACCESS_ONCE(up->encap_rcv); | 609 | encap_rcv = READ_ONCE(up->encap_rcv); |
610 | if (encap_rcv) { | 610 | if (encap_rcv) { |
611 | int ret; | 611 | int ret; |
612 | 612 | ||
@@ -1432,7 +1432,7 @@ void udpv6_destroy_sock(struct sock *sk) | |||
1432 | 1432 | ||
1433 | if (static_key_false(&udpv6_encap_needed) && up->encap_type) { | 1433 | if (static_key_false(&udpv6_encap_needed) && up->encap_type) { |
1434 | void (*encap_destroy)(struct sock *sk); | 1434 | void (*encap_destroy)(struct sock *sk); |
1435 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | 1435 | encap_destroy = READ_ONCE(up->encap_destroy); |
1436 | if (encap_destroy) | 1436 | if (encap_destroy) |
1437 | encap_destroy(sk); | 1437 | encap_destroy(sk); |
1438 | } | 1438 | } |
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index dd3e83328ad5..82cb93f66b9b 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c | |||
@@ -193,7 +193,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
193 | */ | 193 | */ |
194 | rcv = rcu_dereference(sap->rcv_func); | 194 | rcv = rcu_dereference(sap->rcv_func); |
195 | dest = llc_pdu_type(skb); | 195 | dest = llc_pdu_type(skb); |
196 | sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL; | 196 | sap_handler = dest ? READ_ONCE(llc_type_handlers[dest - 1]) : NULL; |
197 | if (unlikely(!sap_handler)) { | 197 | if (unlikely(!sap_handler)) { |
198 | if (rcv) | 198 | if (rcv) |
199 | rcv(skb, dev, pt, orig_dev); | 199 | rcv(skb, dev, pt, orig_dev); |
@@ -214,7 +214,7 @@ drop: | |||
214 | kfree_skb(skb); | 214 | kfree_skb(skb); |
215 | goto out; | 215 | goto out; |
216 | handle_station: | 216 | handle_station: |
217 | sta_handler = ACCESS_ONCE(llc_station_handler); | 217 | sta_handler = READ_ONCE(llc_station_handler); |
218 | if (!sta_handler) | 218 | if (!sta_handler) |
219 | goto drop; | 219 | goto drop; |
220 | sta_handler(skb); | 220 | sta_handler(skb); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 69615016d5bf..214d2ba02877 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -2008,7 +2008,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate, | |||
2008 | 2008 | ||
2009 | static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) | 2009 | static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo) |
2010 | { | 2010 | { |
2011 | u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate); | 2011 | u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate); |
2012 | 2012 | ||
2013 | if (rate == STA_STATS_RATE_INVALID) | 2013 | if (rate == STA_STATS_RATE_INVALID) |
2014 | return -EINVAL; | 2014 | return -EINVAL; |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 13f740875507..9ee71cb276d7 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -458,7 +458,7 @@ static inline bool in_persistence(struct ip_vs_conn *cp) | |||
458 | static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, | 458 | static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, |
459 | struct ip_vs_conn *cp, int pkts) | 459 | struct ip_vs_conn *cp, int pkts) |
460 | { | 460 | { |
461 | unsigned long orig = ACCESS_ONCE(cp->sync_endtime); | 461 | unsigned long orig = READ_ONCE(cp->sync_endtime); |
462 | unsigned long now = jiffies; | 462 | unsigned long now = jiffies; |
463 | unsigned long n = (now + cp->timeout) & ~3UL; | 463 | unsigned long n = (now + cp->timeout) & ~3UL; |
464 | unsigned int sync_refresh_period; | 464 | unsigned int sync_refresh_period; |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index c9796629858f..a16356cacec3 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -401,7 +401,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
401 | 401 | ||
402 | outdev = entry->state.out; | 402 | outdev = entry->state.out; |
403 | 403 | ||
404 | switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { | 404 | switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) { |
405 | case NFQNL_COPY_META: | 405 | case NFQNL_COPY_META: |
406 | case NFQNL_COPY_NONE: | 406 | case NFQNL_COPY_NONE: |
407 | break; | 407 | break; |
@@ -412,7 +412,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
412 | skb_checksum_help(entskb)) | 412 | skb_checksum_help(entskb)) |
413 | return NULL; | 413 | return NULL; |
414 | 414 | ||
415 | data_len = ACCESS_ONCE(queue->copy_range); | 415 | data_len = READ_ONCE(queue->copy_range); |
416 | if (data_len > entskb->len) | 416 | if (data_len > entskb->len) |
417 | data_len = entskb->len; | 417 | data_len = entskb->len; |
418 | 418 | ||
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c index d177dd066504..4d748975117d 100644 --- a/net/netlabel/netlabel_calipso.c +++ b/net/netlabel/netlabel_calipso.c | |||
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(netlbl_calipso_ops_register); | |||
393 | 393 | ||
394 | static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void) | 394 | static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void) |
395 | { | 395 | { |
396 | return ACCESS_ONCE(calipso_ops); | 396 | return READ_ONCE(calipso_ops); |
397 | } | 397 | } |
398 | 398 | ||
399 | /** | 399 | /** |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d396cb61a280..eb866647a27a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -14201,7 +14201,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, | |||
14201 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); | 14201 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); |
14202 | struct sk_buff *msg; | 14202 | struct sk_buff *msg; |
14203 | void *hdr; | 14203 | void *hdr; |
14204 | u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid); | 14204 | u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid); |
14205 | 14205 | ||
14206 | if (!nlportid) | 14206 | if (!nlportid) |
14207 | return false; | 14207 | return false; |
diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c index 49db1def1721..f42ce551bb48 100644 --- a/samples/mic/mpssd/mpssd.c +++ b/samples/mic/mpssd/mpssd.c | |||
@@ -65,7 +65,7 @@ static struct mic_info mic_list; | |||
65 | /* to align the pointer to the (next) page boundary */ | 65 | /* to align the pointer to the (next) page boundary */ |
66 | #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) | 66 | #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) |
67 | 67 | ||
68 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | 68 | #define READ_ONCE(x) (*(volatile typeof(x) *)&(x)) |
69 | 69 | ||
70 | #define GSO_ENABLED 1 | 70 | #define GSO_ENABLED 1 |
71 | #define MAX_GSO_SIZE (64 * 1024) | 71 | #define MAX_GSO_SIZE (64 * 1024) |
@@ -382,7 +382,7 @@ disp_iovec(struct mic_info *mic, struct mic_copy_desc *copy, | |||
382 | 382 | ||
383 | static inline __u16 read_avail_idx(struct mic_vring *vr) | 383 | static inline __u16 read_avail_idx(struct mic_vring *vr) |
384 | { | 384 | { |
385 | return ACCESS_ONCE(vr->info->avail_idx); | 385 | return READ_ONCE(vr->info->avail_idx); |
386 | } | 386 | } |
387 | 387 | ||
388 | static inline void txrx_prepare(int type, bool tx, struct mic_vring *vr, | 388 | static inline void txrx_prepare(int type, bool tx, struct mic_vring *vr, |
@@ -523,7 +523,7 @@ spin_for_descriptors(struct mic_info *mic, struct mic_vring *vr) | |||
523 | { | 523 | { |
524 | __u16 avail_idx = read_avail_idx(vr); | 524 | __u16 avail_idx = read_avail_idx(vr); |
525 | 525 | ||
526 | while (avail_idx == le16toh(ACCESS_ONCE(vr->vr.avail->idx))) { | 526 | while (avail_idx == le16toh(READ_ONCE(vr->vr.avail->idx))) { |
527 | #ifdef DEBUG | 527 | #ifdef DEBUG |
528 | mpsslog("%s %s waiting for desc avail %d info_avail %d\n", | 528 | mpsslog("%s %s waiting for desc avail %d info_avail %d\n", |
529 | mic->name, __func__, | 529 | mic->name, __func__, |
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index 4d1ea96e8794..a18bca720995 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh | |||
@@ -34,7 +34,7 @@ do | |||
34 | sed -r \ | 34 | sed -r \ |
35 | -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \ | 35 | -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \ |
36 | -e 's/__attribute_const__([ \t]|$)/\1/g' \ | 36 | -e 's/__attribute_const__([ \t]|$)/\1/g' \ |
37 | -e 's@^#include <linux/compiler.h>@@' \ | 37 | -e 's@^#include <linux/compiler(|_types).h>@@' \ |
38 | -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \ | 38 | -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \ |
39 | -e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \ | 39 | -e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \ |
40 | -e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \ | 40 | -e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \ |
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h index 436b3a722357..f546707a2bbb 100644 --- a/security/apparmor/include/lib.h +++ b/security/apparmor/include/lib.h | |||
@@ -19,17 +19,6 @@ | |||
19 | 19 | ||
20 | #include "match.h" | 20 | #include "match.h" |
21 | 21 | ||
22 | /* Provide our own test for whether a write lock is held for asserts | ||
23 | * this is because on none SMP systems write_can_lock will always | ||
24 | * resolve to true, which is what you want for code making decisions | ||
25 | * based on it, but wrong for asserts checking that the lock is held | ||
26 | */ | ||
27 | #ifdef CONFIG_SMP | ||
28 | #define write_is_locked(X) !write_can_lock(X) | ||
29 | #else | ||
30 | #define write_is_locked(X) (1) | ||
31 | #endif /* CONFIG_SMP */ | ||
32 | |||
33 | /* | 22 | /* |
34 | * DEBUG remains global (no per profile flag) since it is mostly used in sysctl | 23 | * DEBUG remains global (no per profile flag) since it is mostly used in sysctl |
35 | * which is not related to profile accesses. | 24 | * which is not related to profile accesses. |
diff --git a/security/apparmor/label.c b/security/apparmor/label.c index c5b99b954580..ad28e03a6f30 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c | |||
@@ -80,7 +80,7 @@ void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new) | |||
80 | 80 | ||
81 | AA_BUG(!orig); | 81 | AA_BUG(!orig); |
82 | AA_BUG(!new); | 82 | AA_BUG(!new); |
83 | AA_BUG(!write_is_locked(&labels_set(orig)->lock)); | 83 | lockdep_assert_held_exclusive(&labels_set(orig)->lock); |
84 | 84 | ||
85 | tmp = rcu_dereference_protected(orig->proxy->label, | 85 | tmp = rcu_dereference_protected(orig->proxy->label, |
86 | &labels_ns(orig)->lock); | 86 | &labels_ns(orig)->lock); |
@@ -571,7 +571,7 @@ static bool __label_remove(struct aa_label *label, struct aa_label *new) | |||
571 | 571 | ||
572 | AA_BUG(!ls); | 572 | AA_BUG(!ls); |
573 | AA_BUG(!label); | 573 | AA_BUG(!label); |
574 | AA_BUG(!write_is_locked(&ls->lock)); | 574 | lockdep_assert_held_exclusive(&ls->lock); |
575 | 575 | ||
576 | if (new) | 576 | if (new) |
577 | __aa_proxy_redirect(label, new); | 577 | __aa_proxy_redirect(label, new); |
@@ -608,7 +608,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new) | |||
608 | AA_BUG(!ls); | 608 | AA_BUG(!ls); |
609 | AA_BUG(!old); | 609 | AA_BUG(!old); |
610 | AA_BUG(!new); | 610 | AA_BUG(!new); |
611 | AA_BUG(!write_is_locked(&ls->lock)); | 611 | lockdep_assert_held_exclusive(&ls->lock); |
612 | AA_BUG(new->flags & FLAG_IN_TREE); | 612 | AA_BUG(new->flags & FLAG_IN_TREE); |
613 | 613 | ||
614 | if (!label_is_stale(old)) | 614 | if (!label_is_stale(old)) |
@@ -645,7 +645,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls, | |||
645 | AA_BUG(!ls); | 645 | AA_BUG(!ls); |
646 | AA_BUG(!label); | 646 | AA_BUG(!label); |
647 | AA_BUG(labels_set(label) != ls); | 647 | AA_BUG(labels_set(label) != ls); |
648 | AA_BUG(!write_is_locked(&ls->lock)); | 648 | lockdep_assert_held_exclusive(&ls->lock); |
649 | AA_BUG(label->flags & FLAG_IN_TREE); | 649 | AA_BUG(label->flags & FLAG_IN_TREE); |
650 | 650 | ||
651 | /* Figure out where to put new node */ | 651 | /* Figure out where to put new node */ |
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c index 23ccddb20de1..4210e5c6262e 100644 --- a/sound/firewire/amdtp-am824.c +++ b/sound/firewire/amdtp-am824.c | |||
@@ -247,7 +247,7 @@ void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port, | |||
247 | struct amdtp_am824 *p = s->protocol; | 247 | struct amdtp_am824 *p = s->protocol; |
248 | 248 | ||
249 | if (port < p->midi_ports) | 249 | if (port < p->midi_ports) |
250 | ACCESS_ONCE(p->midi[port]) = midi; | 250 | WRITE_ONCE(p->midi[port], midi); |
251 | } | 251 | } |
252 | EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger); | 252 | EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger); |
253 | 253 | ||
@@ -336,7 +336,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, __be32 *buffe | |||
336 | unsigned int data_blocks, unsigned int *syt) | 336 | unsigned int data_blocks, unsigned int *syt) |
337 | { | 337 | { |
338 | struct amdtp_am824 *p = s->protocol; | 338 | struct amdtp_am824 *p = s->protocol; |
339 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 339 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
340 | unsigned int pcm_frames; | 340 | unsigned int pcm_frames; |
341 | 341 | ||
342 | if (pcm) { | 342 | if (pcm) { |
@@ -357,7 +357,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, __be32 *buffe | |||
357 | unsigned int data_blocks, unsigned int *syt) | 357 | unsigned int data_blocks, unsigned int *syt) |
358 | { | 358 | { |
359 | struct amdtp_am824 *p = s->protocol; | 359 | struct amdtp_am824 *p = s->protocol; |
360 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 360 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
361 | unsigned int pcm_frames; | 361 | unsigned int pcm_frames; |
362 | 362 | ||
363 | if (pcm) { | 363 | if (pcm) { |
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index 3fc581a5ad62..4a1dc145327b 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c | |||
@@ -376,7 +376,7 @@ static void update_pcm_pointers(struct amdtp_stream *s, | |||
376 | ptr = s->pcm_buffer_pointer + frames; | 376 | ptr = s->pcm_buffer_pointer + frames; |
377 | if (ptr >= pcm->runtime->buffer_size) | 377 | if (ptr >= pcm->runtime->buffer_size) |
378 | ptr -= pcm->runtime->buffer_size; | 378 | ptr -= pcm->runtime->buffer_size; |
379 | ACCESS_ONCE(s->pcm_buffer_pointer) = ptr; | 379 | WRITE_ONCE(s->pcm_buffer_pointer, ptr); |
380 | 380 | ||
381 | s->pcm_period_pointer += frames; | 381 | s->pcm_period_pointer += frames; |
382 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { | 382 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { |
@@ -388,7 +388,7 @@ static void update_pcm_pointers(struct amdtp_stream *s, | |||
388 | static void pcm_period_tasklet(unsigned long data) | 388 | static void pcm_period_tasklet(unsigned long data) |
389 | { | 389 | { |
390 | struct amdtp_stream *s = (void *)data; | 390 | struct amdtp_stream *s = (void *)data; |
391 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 391 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
392 | 392 | ||
393 | if (pcm) | 393 | if (pcm) |
394 | snd_pcm_period_elapsed(pcm); | 394 | snd_pcm_period_elapsed(pcm); |
@@ -453,7 +453,7 @@ static int handle_out_packet(struct amdtp_stream *s, | |||
453 | s->data_block_counter = | 453 | s->data_block_counter = |
454 | (s->data_block_counter + data_blocks) & 0xff; | 454 | (s->data_block_counter + data_blocks) & 0xff; |
455 | 455 | ||
456 | buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) | | 456 | buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | |
457 | (s->data_block_quadlets << CIP_DBS_SHIFT) | | 457 | (s->data_block_quadlets << CIP_DBS_SHIFT) | |
458 | ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | | 458 | ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | |
459 | s->data_block_counter); | 459 | s->data_block_counter); |
@@ -472,7 +472,7 @@ static int handle_out_packet(struct amdtp_stream *s, | |||
472 | if (queue_out_packet(s, payload_length) < 0) | 472 | if (queue_out_packet(s, payload_length) < 0) |
473 | return -EIO; | 473 | return -EIO; |
474 | 474 | ||
475 | pcm = ACCESS_ONCE(s->pcm); | 475 | pcm = READ_ONCE(s->pcm); |
476 | if (pcm && pcm_frames > 0) | 476 | if (pcm && pcm_frames > 0) |
477 | update_pcm_pointers(s, pcm, pcm_frames); | 477 | update_pcm_pointers(s, pcm, pcm_frames); |
478 | 478 | ||
@@ -504,7 +504,7 @@ static int handle_out_packet_without_header(struct amdtp_stream *s, | |||
504 | if (queue_out_packet(s, payload_length) < 0) | 504 | if (queue_out_packet(s, payload_length) < 0) |
505 | return -EIO; | 505 | return -EIO; |
506 | 506 | ||
507 | pcm = ACCESS_ONCE(s->pcm); | 507 | pcm = READ_ONCE(s->pcm); |
508 | if (pcm && pcm_frames > 0) | 508 | if (pcm && pcm_frames > 0) |
509 | update_pcm_pointers(s, pcm, pcm_frames); | 509 | update_pcm_pointers(s, pcm, pcm_frames); |
510 | 510 | ||
@@ -621,7 +621,7 @@ end: | |||
621 | if (queue_in_packet(s) < 0) | 621 | if (queue_in_packet(s) < 0) |
622 | return -EIO; | 622 | return -EIO; |
623 | 623 | ||
624 | pcm = ACCESS_ONCE(s->pcm); | 624 | pcm = READ_ONCE(s->pcm); |
625 | if (pcm && pcm_frames > 0) | 625 | if (pcm && pcm_frames > 0) |
626 | update_pcm_pointers(s, pcm, pcm_frames); | 626 | update_pcm_pointers(s, pcm, pcm_frames); |
627 | 627 | ||
@@ -649,7 +649,7 @@ static int handle_in_packet_without_header(struct amdtp_stream *s, | |||
649 | if (queue_in_packet(s) < 0) | 649 | if (queue_in_packet(s) < 0) |
650 | return -EIO; | 650 | return -EIO; |
651 | 651 | ||
652 | pcm = ACCESS_ONCE(s->pcm); | 652 | pcm = READ_ONCE(s->pcm); |
653 | if (pcm && pcm_frames > 0) | 653 | if (pcm && pcm_frames > 0) |
654 | update_pcm_pointers(s, pcm, pcm_frames); | 654 | update_pcm_pointers(s, pcm, pcm_frames); |
655 | 655 | ||
@@ -947,7 +947,7 @@ unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s) | |||
947 | if (!in_interrupt() && amdtp_stream_running(s)) | 947 | if (!in_interrupt() && amdtp_stream_running(s)) |
948 | fw_iso_context_flush_completions(s->context); | 948 | fw_iso_context_flush_completions(s->context); |
949 | 949 | ||
950 | return ACCESS_ONCE(s->pcm_buffer_pointer); | 950 | return READ_ONCE(s->pcm_buffer_pointer); |
951 | } | 951 | } |
952 | EXPORT_SYMBOL(amdtp_stream_pcm_pointer); | 952 | EXPORT_SYMBOL(amdtp_stream_pcm_pointer); |
953 | 953 | ||
@@ -977,9 +977,8 @@ EXPORT_SYMBOL(amdtp_stream_pcm_ack); | |||
977 | void amdtp_stream_update(struct amdtp_stream *s) | 977 | void amdtp_stream_update(struct amdtp_stream *s) |
978 | { | 978 | { |
979 | /* Precomputing. */ | 979 | /* Precomputing. */ |
980 | ACCESS_ONCE(s->source_node_id_field) = | 980 | WRITE_ONCE(s->source_node_id_field, |
981 | (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & | 981 | (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); |
982 | CIP_SID_MASK; | ||
983 | } | 982 | } |
984 | EXPORT_SYMBOL(amdtp_stream_update); | 983 | EXPORT_SYMBOL(amdtp_stream_update); |
985 | 984 | ||
@@ -1022,7 +1021,7 @@ void amdtp_stream_pcm_abort(struct amdtp_stream *s) | |||
1022 | { | 1021 | { |
1023 | struct snd_pcm_substream *pcm; | 1022 | struct snd_pcm_substream *pcm; |
1024 | 1023 | ||
1025 | pcm = ACCESS_ONCE(s->pcm); | 1024 | pcm = READ_ONCE(s->pcm); |
1026 | if (pcm) | 1025 | if (pcm) |
1027 | snd_pcm_stop_xrun(pcm); | 1026 | snd_pcm_stop_xrun(pcm); |
1028 | } | 1027 | } |
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h index a608dae83348..e45de3eecfe3 100644 --- a/sound/firewire/amdtp-stream.h +++ b/sound/firewire/amdtp-stream.h | |||
@@ -221,7 +221,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s) | |||
221 | static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s, | 221 | static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s, |
222 | struct snd_pcm_substream *pcm) | 222 | struct snd_pcm_substream *pcm) |
223 | { | 223 | { |
224 | ACCESS_ONCE(s->pcm) = pcm; | 224 | WRITE_ONCE(s->pcm, pcm); |
225 | } | 225 | } |
226 | 226 | ||
227 | static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc) | 227 | static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc) |
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c index 1453c34ce99f..4a884a335248 100644 --- a/sound/firewire/digi00x/amdtp-dot.c +++ b/sound/firewire/digi00x/amdtp-dot.c | |||
@@ -327,7 +327,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port, | |||
327 | struct amdtp_dot *p = s->protocol; | 327 | struct amdtp_dot *p = s->protocol; |
328 | 328 | ||
329 | if (port < MAX_MIDI_PORTS) | 329 | if (port < MAX_MIDI_PORTS) |
330 | ACCESS_ONCE(p->midi[port]) = midi; | 330 | WRITE_ONCE(p->midi[port], midi); |
331 | } | 331 | } |
332 | 332 | ||
333 | static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | 333 | static unsigned int process_tx_data_blocks(struct amdtp_stream *s, |
@@ -338,7 +338,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | |||
338 | struct snd_pcm_substream *pcm; | 338 | struct snd_pcm_substream *pcm; |
339 | unsigned int pcm_frames; | 339 | unsigned int pcm_frames; |
340 | 340 | ||
341 | pcm = ACCESS_ONCE(s->pcm); | 341 | pcm = READ_ONCE(s->pcm); |
342 | if (pcm) { | 342 | if (pcm) { |
343 | read_pcm_s32(s, pcm, buffer, data_blocks); | 343 | read_pcm_s32(s, pcm, buffer, data_blocks); |
344 | pcm_frames = data_blocks; | 344 | pcm_frames = data_blocks; |
@@ -359,7 +359,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, | |||
359 | struct snd_pcm_substream *pcm; | 359 | struct snd_pcm_substream *pcm; |
360 | unsigned int pcm_frames; | 360 | unsigned int pcm_frames; |
361 | 361 | ||
362 | pcm = ACCESS_ONCE(s->pcm); | 362 | pcm = READ_ONCE(s->pcm); |
363 | if (pcm) { | 363 | if (pcm) { |
364 | write_pcm_s32(s, pcm, buffer, data_blocks); | 364 | write_pcm_s32(s, pcm, buffer, data_blocks); |
365 | pcm_frames = data_blocks; | 365 | pcm_frames = data_blocks; |
diff --git a/sound/firewire/fireface/amdtp-ff.c b/sound/firewire/fireface/amdtp-ff.c index 780da9deb2f0..77c7598b61ab 100644 --- a/sound/firewire/fireface/amdtp-ff.c +++ b/sound/firewire/fireface/amdtp-ff.c | |||
@@ -108,7 +108,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, | |||
108 | unsigned int data_blocks, | 108 | unsigned int data_blocks, |
109 | unsigned int *syt) | 109 | unsigned int *syt) |
110 | { | 110 | { |
111 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 111 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
112 | unsigned int pcm_frames; | 112 | unsigned int pcm_frames; |
113 | 113 | ||
114 | if (pcm) { | 114 | if (pcm) { |
@@ -127,7 +127,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | |||
127 | unsigned int data_blocks, | 127 | unsigned int data_blocks, |
128 | unsigned int *syt) | 128 | unsigned int *syt) |
129 | { | 129 | { |
130 | struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm); | 130 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
131 | unsigned int pcm_frames; | 131 | unsigned int pcm_frames; |
132 | 132 | ||
133 | if (pcm) { | 133 | if (pcm) { |
diff --git a/sound/firewire/fireface/ff-midi.c b/sound/firewire/fireface/ff-midi.c index 949ee56b4e0e..6a49611ee462 100644 --- a/sound/firewire/fireface/ff-midi.c +++ b/sound/firewire/fireface/ff-midi.c | |||
@@ -22,7 +22,7 @@ static int midi_playback_open(struct snd_rawmidi_substream *substream) | |||
22 | ff->running_status[substream->number] = 0; | 22 | ff->running_status[substream->number] = 0; |
23 | ff->rx_midi_error[substream->number] = false; | 23 | ff->rx_midi_error[substream->number] = false; |
24 | 24 | ||
25 | ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = substream; | 25 | WRITE_ONCE(ff->rx_midi_substreams[substream->number], substream); |
26 | 26 | ||
27 | return 0; | 27 | return 0; |
28 | } | 28 | } |
@@ -38,7 +38,7 @@ static int midi_playback_close(struct snd_rawmidi_substream *substream) | |||
38 | struct snd_ff *ff = substream->rmidi->private_data; | 38 | struct snd_ff *ff = substream->rmidi->private_data; |
39 | 39 | ||
40 | cancel_work_sync(&ff->rx_midi_work[substream->number]); | 40 | cancel_work_sync(&ff->rx_midi_work[substream->number]); |
41 | ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = NULL; | 41 | WRITE_ONCE(ff->rx_midi_substreams[substream->number], NULL); |
42 | 42 | ||
43 | return 0; | 43 | return 0; |
44 | } | 44 | } |
@@ -52,10 +52,10 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *substream, | |||
52 | spin_lock_irqsave(&ff->lock, flags); | 52 | spin_lock_irqsave(&ff->lock, flags); |
53 | 53 | ||
54 | if (up) | 54 | if (up) |
55 | ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) = | 55 | WRITE_ONCE(ff->tx_midi_substreams[substream->number], |
56 | substream; | 56 | substream); |
57 | else | 57 | else |
58 | ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) = NULL; | 58 | WRITE_ONCE(ff->tx_midi_substreams[substream->number], NULL); |
59 | 59 | ||
60 | spin_unlock_irqrestore(&ff->lock, flags); | 60 | spin_unlock_irqrestore(&ff->lock, flags); |
61 | } | 61 | } |
diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c index dd6c8e839647..332b29f8ed75 100644 --- a/sound/firewire/fireface/ff-transaction.c +++ b/sound/firewire/fireface/ff-transaction.c | |||
@@ -12,7 +12,7 @@ static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port, | |||
12 | int rcode) | 12 | int rcode) |
13 | { | 13 | { |
14 | struct snd_rawmidi_substream *substream = | 14 | struct snd_rawmidi_substream *substream = |
15 | ACCESS_ONCE(ff->rx_midi_substreams[port]); | 15 | READ_ONCE(ff->rx_midi_substreams[port]); |
16 | 16 | ||
17 | if (rcode_is_permanent_error(rcode)) { | 17 | if (rcode_is_permanent_error(rcode)) { |
18 | ff->rx_midi_error[port] = true; | 18 | ff->rx_midi_error[port] = true; |
@@ -60,7 +60,7 @@ static inline void fill_midi_buf(struct snd_ff *ff, unsigned int port, | |||
60 | static void transmit_midi_msg(struct snd_ff *ff, unsigned int port) | 60 | static void transmit_midi_msg(struct snd_ff *ff, unsigned int port) |
61 | { | 61 | { |
62 | struct snd_rawmidi_substream *substream = | 62 | struct snd_rawmidi_substream *substream = |
63 | ACCESS_ONCE(ff->rx_midi_substreams[port]); | 63 | READ_ONCE(ff->rx_midi_substreams[port]); |
64 | u8 *buf = (u8 *)ff->msg_buf[port]; | 64 | u8 *buf = (u8 *)ff->msg_buf[port]; |
65 | int i, len; | 65 | int i, len; |
66 | 66 | ||
@@ -159,7 +159,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request, | |||
159 | */ | 159 | */ |
160 | index = (quad >> 8) & 0xff; | 160 | index = (quad >> 8) & 0xff; |
161 | if (index > 0) { | 161 | if (index > 0) { |
162 | substream = ACCESS_ONCE(ff->tx_midi_substreams[0]); | 162 | substream = READ_ONCE(ff->tx_midi_substreams[0]); |
163 | if (substream != NULL) { | 163 | if (substream != NULL) { |
164 | byte = quad & 0xff; | 164 | byte = quad & 0xff; |
165 | snd_rawmidi_receive(substream, &byte, 1); | 165 | snd_rawmidi_receive(substream, &byte, 1); |
@@ -169,7 +169,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request, | |||
169 | /* Message in second port. */ | 169 | /* Message in second port. */ |
170 | index = (quad >> 24) & 0xff; | 170 | index = (quad >> 24) & 0xff; |
171 | if (index > 0) { | 171 | if (index > 0) { |
172 | substream = ACCESS_ONCE(ff->tx_midi_substreams[1]); | 172 | substream = READ_ONCE(ff->tx_midi_substreams[1]); |
173 | if (substream != NULL) { | 173 | if (substream != NULL) { |
174 | byte = (quad >> 16) & 0xff; | 174 | byte = (quad >> 16) & 0xff; |
175 | snd_rawmidi_receive(substream, &byte, 1); | 175 | snd_rawmidi_receive(substream, &byte, 1); |
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c index 5826aa8362f1..46092fa3ff9b 100644 --- a/sound/firewire/isight.c +++ b/sound/firewire/isight.c | |||
@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count) | |||
96 | ptr += count; | 96 | ptr += count; |
97 | if (ptr >= runtime->buffer_size) | 97 | if (ptr >= runtime->buffer_size) |
98 | ptr -= runtime->buffer_size; | 98 | ptr -= runtime->buffer_size; |
99 | ACCESS_ONCE(isight->buffer_pointer) = ptr; | 99 | WRITE_ONCE(isight->buffer_pointer, ptr); |
100 | 100 | ||
101 | isight->period_counter += count; | 101 | isight->period_counter += count; |
102 | if (isight->period_counter >= runtime->period_size) { | 102 | if (isight->period_counter >= runtime->period_size) { |
@@ -111,7 +111,7 @@ static void isight_samples(struct isight *isight, | |||
111 | struct snd_pcm_runtime *runtime; | 111 | struct snd_pcm_runtime *runtime; |
112 | unsigned int count1; | 112 | unsigned int count1; |
113 | 113 | ||
114 | if (!ACCESS_ONCE(isight->pcm_running)) | 114 | if (!READ_ONCE(isight->pcm_running)) |
115 | return; | 115 | return; |
116 | 116 | ||
117 | runtime = isight->pcm->runtime; | 117 | runtime = isight->pcm->runtime; |
@@ -131,7 +131,7 @@ static void isight_samples(struct isight *isight, | |||
131 | 131 | ||
132 | static void isight_pcm_abort(struct isight *isight) | 132 | static void isight_pcm_abort(struct isight *isight) |
133 | { | 133 | { |
134 | if (ACCESS_ONCE(isight->pcm_active)) | 134 | if (READ_ONCE(isight->pcm_active)) |
135 | snd_pcm_stop_xrun(isight->pcm); | 135 | snd_pcm_stop_xrun(isight->pcm); |
136 | } | 136 | } |
137 | 137 | ||
@@ -141,7 +141,7 @@ static void isight_dropped_samples(struct isight *isight, unsigned int total) | |||
141 | u32 dropped; | 141 | u32 dropped; |
142 | unsigned int count1; | 142 | unsigned int count1; |
143 | 143 | ||
144 | if (!ACCESS_ONCE(isight->pcm_running)) | 144 | if (!READ_ONCE(isight->pcm_running)) |
145 | return; | 145 | return; |
146 | 146 | ||
147 | runtime = isight->pcm->runtime; | 147 | runtime = isight->pcm->runtime; |
@@ -293,7 +293,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream, | |||
293 | if (err < 0) | 293 | if (err < 0) |
294 | return err; | 294 | return err; |
295 | 295 | ||
296 | ACCESS_ONCE(isight->pcm_active) = true; | 296 | WRITE_ONCE(isight->pcm_active, true); |
297 | 297 | ||
298 | return 0; | 298 | return 0; |
299 | } | 299 | } |
@@ -331,7 +331,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream) | |||
331 | { | 331 | { |
332 | struct isight *isight = substream->private_data; | 332 | struct isight *isight = substream->private_data; |
333 | 333 | ||
334 | ACCESS_ONCE(isight->pcm_active) = false; | 334 | WRITE_ONCE(isight->pcm_active, false); |
335 | 335 | ||
336 | mutex_lock(&isight->mutex); | 336 | mutex_lock(&isight->mutex); |
337 | isight_stop_streaming(isight); | 337 | isight_stop_streaming(isight); |
@@ -424,10 +424,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd) | |||
424 | 424 | ||
425 | switch (cmd) { | 425 | switch (cmd) { |
426 | case SNDRV_PCM_TRIGGER_START: | 426 | case SNDRV_PCM_TRIGGER_START: |
427 | ACCESS_ONCE(isight->pcm_running) = true; | 427 | WRITE_ONCE(isight->pcm_running, true); |
428 | break; | 428 | break; |
429 | case SNDRV_PCM_TRIGGER_STOP: | 429 | case SNDRV_PCM_TRIGGER_STOP: |
430 | ACCESS_ONCE(isight->pcm_running) = false; | 430 | WRITE_ONCE(isight->pcm_running, false); |
431 | break; | 431 | break; |
432 | default: | 432 | default: |
433 | return -EINVAL; | 433 | return -EINVAL; |
@@ -439,7 +439,7 @@ static snd_pcm_uframes_t isight_pointer(struct snd_pcm_substream *substream) | |||
439 | { | 439 | { |
440 | struct isight *isight = substream->private_data; | 440 | struct isight *isight = substream->private_data; |
441 | 441 | ||
442 | return ACCESS_ONCE(isight->buffer_pointer); | 442 | return READ_ONCE(isight->buffer_pointer); |
443 | } | 443 | } |
444 | 444 | ||
445 | static int isight_create_pcm(struct isight *isight) | 445 | static int isight_create_pcm(struct isight *isight) |
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c index 96f0091144bb..f0555a24d90e 100644 --- a/sound/firewire/motu/amdtp-motu.c +++ b/sound/firewire/motu/amdtp-motu.c | |||
@@ -310,7 +310,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | |||
310 | if (p->midi_ports) | 310 | if (p->midi_ports) |
311 | read_midi_messages(s, buffer, data_blocks); | 311 | read_midi_messages(s, buffer, data_blocks); |
312 | 312 | ||
313 | pcm = ACCESS_ONCE(s->pcm); | 313 | pcm = READ_ONCE(s->pcm); |
314 | if (data_blocks > 0 && pcm) | 314 | if (data_blocks > 0 && pcm) |
315 | read_pcm_s32(s, pcm->runtime, buffer, data_blocks); | 315 | read_pcm_s32(s, pcm->runtime, buffer, data_blocks); |
316 | 316 | ||
@@ -374,7 +374,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, | |||
374 | if (p->midi_ports) | 374 | if (p->midi_ports) |
375 | write_midi_messages(s, buffer, data_blocks); | 375 | write_midi_messages(s, buffer, data_blocks); |
376 | 376 | ||
377 | pcm = ACCESS_ONCE(s->pcm); | 377 | pcm = READ_ONCE(s->pcm); |
378 | if (pcm) | 378 | if (pcm) |
379 | write_pcm_s32(s, pcm->runtime, buffer, data_blocks); | 379 | write_pcm_s32(s, pcm->runtime, buffer, data_blocks); |
380 | else | 380 | else |
diff --git a/sound/firewire/oxfw/oxfw-scs1x.c b/sound/firewire/oxfw/oxfw-scs1x.c index 02d595665898..f33497cdc706 100644 --- a/sound/firewire/oxfw/oxfw-scs1x.c +++ b/sound/firewire/oxfw/oxfw-scs1x.c | |||
@@ -112,7 +112,7 @@ static void handle_hss(struct fw_card *card, struct fw_request *request, | |||
112 | } | 112 | } |
113 | 113 | ||
114 | if (length >= 1) { | 114 | if (length >= 1) { |
115 | stream = ACCESS_ONCE(scs->input); | 115 | stream = READ_ONCE(scs->input); |
116 | if (stream) | 116 | if (stream) |
117 | midi_input_packet(scs, stream, data, length); | 117 | midi_input_packet(scs, stream, data, length); |
118 | } | 118 | } |
@@ -183,7 +183,7 @@ static void scs_output_work(struct work_struct *work) | |||
183 | if (scs->transaction_running) | 183 | if (scs->transaction_running) |
184 | return; | 184 | return; |
185 | 185 | ||
186 | stream = ACCESS_ONCE(scs->output); | 186 | stream = READ_ONCE(scs->output); |
187 | if (!stream || scs->error) { | 187 | if (!stream || scs->error) { |
188 | scs->output_idle = true; | 188 | scs->output_idle = true; |
189 | wake_up(&scs->idle_wait); | 189 | wake_up(&scs->idle_wait); |
@@ -291,9 +291,9 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *stream, int up) | |||
291 | 291 | ||
292 | if (up) { | 292 | if (up) { |
293 | scs->input_escape_count = 0; | 293 | scs->input_escape_count = 0; |
294 | ACCESS_ONCE(scs->input) = stream; | 294 | WRITE_ONCE(scs->input, stream); |
295 | } else { | 295 | } else { |
296 | ACCESS_ONCE(scs->input) = NULL; | 296 | WRITE_ONCE(scs->input, NULL); |
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
@@ -319,10 +319,10 @@ static void midi_playback_trigger(struct snd_rawmidi_substream *stream, int up) | |||
319 | scs->transaction_bytes = 0; | 319 | scs->transaction_bytes = 0; |
320 | scs->error = false; | 320 | scs->error = false; |
321 | 321 | ||
322 | ACCESS_ONCE(scs->output) = stream; | 322 | WRITE_ONCE(scs->output, stream); |
323 | schedule_work(&scs->work); | 323 | schedule_work(&scs->work); |
324 | } else { | 324 | } else { |
325 | ACCESS_ONCE(scs->output) = NULL; | 325 | WRITE_ONCE(scs->output, NULL); |
326 | } | 326 | } |
327 | } | 327 | } |
328 | static void midi_playback_drain(struct snd_rawmidi_substream *stream) | 328 | static void midi_playback_drain(struct snd_rawmidi_substream *stream) |
diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c index 6aff1fc1c72d..ab482423c165 100644 --- a/sound/firewire/tascam/amdtp-tascam.c +++ b/sound/firewire/tascam/amdtp-tascam.c | |||
@@ -124,7 +124,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, | |||
124 | { | 124 | { |
125 | struct snd_pcm_substream *pcm; | 125 | struct snd_pcm_substream *pcm; |
126 | 126 | ||
127 | pcm = ACCESS_ONCE(s->pcm); | 127 | pcm = READ_ONCE(s->pcm); |
128 | if (data_blocks > 0 && pcm) | 128 | if (data_blocks > 0 && pcm) |
129 | read_pcm_s32(s, pcm, buffer, data_blocks); | 129 | read_pcm_s32(s, pcm, buffer, data_blocks); |
130 | 130 | ||
@@ -143,7 +143,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, | |||
143 | /* This field is not used. */ | 143 | /* This field is not used. */ |
144 | *syt = 0x0000; | 144 | *syt = 0x0000; |
145 | 145 | ||
146 | pcm = ACCESS_ONCE(s->pcm); | 146 | pcm = READ_ONCE(s->pcm); |
147 | if (pcm) | 147 | if (pcm) |
148 | write_pcm_s32(s, pcm, buffer, data_blocks); | 148 | write_pcm_s32(s, pcm, buffer, data_blocks); |
149 | else | 149 | else |
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c index 8967c52f5032..2ad692dd4b13 100644 --- a/sound/firewire/tascam/tascam-transaction.c +++ b/sound/firewire/tascam/tascam-transaction.c | |||
@@ -148,7 +148,7 @@ static void async_midi_port_callback(struct fw_card *card, int rcode, | |||
148 | void *callback_data) | 148 | void *callback_data) |
149 | { | 149 | { |
150 | struct snd_fw_async_midi_port *port = callback_data; | 150 | struct snd_fw_async_midi_port *port = callback_data; |
151 | struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream); | 151 | struct snd_rawmidi_substream *substream = READ_ONCE(port->substream); |
152 | 152 | ||
153 | /* This port is closed. */ | 153 | /* This port is closed. */ |
154 | if (substream == NULL) | 154 | if (substream == NULL) |
@@ -173,7 +173,7 @@ static void midi_port_work(struct work_struct *work) | |||
173 | { | 173 | { |
174 | struct snd_fw_async_midi_port *port = | 174 | struct snd_fw_async_midi_port *port = |
175 | container_of(work, struct snd_fw_async_midi_port, work); | 175 | container_of(work, struct snd_fw_async_midi_port, work); |
176 | struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream); | 176 | struct snd_rawmidi_substream *substream = READ_ONCE(port->substream); |
177 | int generation; | 177 | int generation; |
178 | 178 | ||
179 | /* Under transacting or error state. */ | 179 | /* Under transacting or error state. */ |
@@ -282,7 +282,7 @@ static void handle_midi_tx(struct fw_card *card, struct fw_request *request, | |||
282 | bytes = 3; | 282 | bytes = 3; |
283 | } | 283 | } |
284 | 284 | ||
285 | substream = ACCESS_ONCE(tscm->tx_midi_substreams[port]); | 285 | substream = READ_ONCE(tscm->tx_midi_substreams[port]); |
286 | if (substream != NULL) | 286 | if (substream != NULL) |
287 | snd_rawmidi_receive(substream, b + 1, bytes); | 287 | snd_rawmidi_receive(substream, b + 1, bytes); |
288 | } | 288 | } |
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c index 8382ffa3bcaf..2472144b329e 100644 --- a/sound/soc/xtensa/xtfpga-i2s.c +++ b/sound/soc/xtensa/xtfpga-i2s.c | |||
@@ -165,7 +165,7 @@ static bool xtfpga_pcm_push_tx(struct xtfpga_i2s *i2s) | |||
165 | tx_substream = rcu_dereference(i2s->tx_substream); | 165 | tx_substream = rcu_dereference(i2s->tx_substream); |
166 | tx_active = tx_substream && snd_pcm_running(tx_substream); | 166 | tx_active = tx_substream && snd_pcm_running(tx_substream); |
167 | if (tx_active) { | 167 | if (tx_active) { |
168 | unsigned tx_ptr = ACCESS_ONCE(i2s->tx_ptr); | 168 | unsigned tx_ptr = READ_ONCE(i2s->tx_ptr); |
169 | unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime, | 169 | unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime, |
170 | tx_ptr); | 170 | tx_ptr); |
171 | 171 | ||
@@ -437,7 +437,7 @@ static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
437 | case SNDRV_PCM_TRIGGER_START: | 437 | case SNDRV_PCM_TRIGGER_START: |
438 | case SNDRV_PCM_TRIGGER_RESUME: | 438 | case SNDRV_PCM_TRIGGER_RESUME: |
439 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 439 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
440 | ACCESS_ONCE(i2s->tx_ptr) = 0; | 440 | WRITE_ONCE(i2s->tx_ptr, 0); |
441 | rcu_assign_pointer(i2s->tx_substream, substream); | 441 | rcu_assign_pointer(i2s->tx_substream, substream); |
442 | xtfpga_pcm_refill_fifo(i2s); | 442 | xtfpga_pcm_refill_fifo(i2s); |
443 | break; | 443 | break; |
@@ -459,7 +459,7 @@ static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream) | |||
459 | { | 459 | { |
460 | struct snd_pcm_runtime *runtime = substream->runtime; | 460 | struct snd_pcm_runtime *runtime = substream->runtime; |
461 | struct xtfpga_i2s *i2s = runtime->private_data; | 461 | struct xtfpga_i2s *i2s = runtime->private_data; |
462 | snd_pcm_uframes_t pos = ACCESS_ONCE(i2s->tx_ptr); | 462 | snd_pcm_uframes_t pos = READ_ONCE(i2s->tx_ptr); |
463 | 463 | ||
464 | return pos < runtime->buffer_size ? pos : 0; | 464 | return pos < runtime->buffer_size ? pos : 0; |
465 | } | 465 | } |
diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c index 7371e5b06035..fc579f330601 100644 --- a/sound/usb/bcd2000/bcd2000.c +++ b/sound/usb/bcd2000/bcd2000.c | |||
@@ -108,7 +108,7 @@ static void bcd2000_midi_handle_input(struct bcd2000 *bcd2k, | |||
108 | unsigned int payload_length, tocopy; | 108 | unsigned int payload_length, tocopy; |
109 | struct snd_rawmidi_substream *midi_receive_substream; | 109 | struct snd_rawmidi_substream *midi_receive_substream; |
110 | 110 | ||
111 | midi_receive_substream = ACCESS_ONCE(bcd2k->midi_receive_substream); | 111 | midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream); |
112 | if (!midi_receive_substream) | 112 | if (!midi_receive_substream) |
113 | return; | 113 | return; |
114 | 114 | ||
@@ -139,7 +139,7 @@ static void bcd2000_midi_send(struct bcd2000 *bcd2k) | |||
139 | 139 | ||
140 | BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE); | 140 | BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE); |
141 | 141 | ||
142 | midi_out_substream = ACCESS_ONCE(bcd2k->midi_out_substream); | 142 | midi_out_substream = READ_ONCE(bcd2k->midi_out_substream); |
143 | if (!midi_out_substream) | 143 | if (!midi_out_substream) |
144 | return; | 144 | return; |
145 | 145 | ||
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h index 7d8c3261a50d..1f5e26aae9fc 100644 --- a/tools/arch/x86/include/asm/atomic.h +++ b/tools/arch/x86/include/asm/atomic.h | |||
@@ -25,7 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | static inline int atomic_read(const atomic_t *v) | 26 | static inline int atomic_read(const atomic_t *v) |
27 | { | 27 | { |
28 | return ACCESS_ONCE((v)->counter); | 28 | return READ_ONCE((v)->counter); |
29 | } | 29 | } |
30 | 30 | ||
31 | /** | 31 | /** |
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h index 40b231fb95bd..4c1966f7c77a 100644 --- a/tools/include/asm-generic/atomic-gcc.h +++ b/tools/include/asm-generic/atomic-gcc.h | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | static inline int atomic_read(const atomic_t *v) | 23 | static inline int atomic_read(const atomic_t *v) |
24 | { | 24 | { |
25 | return ACCESS_ONCE((v)->counter); | 25 | return READ_ONCE((v)->counter); |
26 | } | 26 | } |
27 | 27 | ||
28 | /** | 28 | /** |
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index 33b5e6cdf38c..d19e11b68de7 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h | |||
@@ -378,7 +378,7 @@ struct addr_filters { | |||
378 | static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) | 378 | static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) |
379 | { | 379 | { |
380 | struct perf_event_mmap_page *pc = mm->userpg; | 380 | struct perf_event_mmap_page *pc = mm->userpg; |
381 | u64 head = ACCESS_ONCE(pc->aux_head); | 381 | u64 head = READ_ONCE(pc->aux_head); |
382 | 382 | ||
383 | /* Ensure all reads are done after we read the head */ | 383 | /* Ensure all reads are done after we read the head */ |
384 | rmb(); | 384 | rmb(); |
@@ -389,7 +389,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm) | |||
389 | { | 389 | { |
390 | struct perf_event_mmap_page *pc = mm->userpg; | 390 | struct perf_event_mmap_page *pc = mm->userpg; |
391 | #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) | 391 | #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) |
392 | u64 head = ACCESS_ONCE(pc->aux_head); | 392 | u64 head = READ_ONCE(pc->aux_head); |
393 | #else | 393 | #else |
394 | u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); | 394 | u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); |
395 | #endif | 395 | #endif |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 41caa098ed15..3f63ee12471d 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -114,7 +114,7 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session, | |||
114 | 114 | ||
115 | extern volatile int session_done; | 115 | extern volatile int session_done; |
116 | 116 | ||
117 | #define session_done() ACCESS_ONCE(session_done) | 117 | #define session_done() READ_ONCE(session_done) |
118 | 118 | ||
119 | int perf_session__deliver_synth_event(struct perf_session *session, | 119 | int perf_session__deliver_synth_event(struct perf_session *session, |
120 | union perf_event *event, | 120 | union perf_event *event, |
diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h index 18ea223bd398..cdb840bc54f2 100644 --- a/tools/testing/selftests/powerpc/dscr/dscr.h +++ b/tools/testing/selftests/powerpc/dscr/dscr.h | |||
@@ -39,7 +39,7 @@ | |||
39 | #define rmb() asm volatile("lwsync":::"memory") | 39 | #define rmb() asm volatile("lwsync":::"memory") |
40 | #define wmb() asm volatile("lwsync":::"memory") | 40 | #define wmb() asm volatile("lwsync":::"memory") |
41 | 41 | ||
42 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | 42 | #define READ_ONCE(x) (*(volatile typeof(x) *)&(x)) |
43 | 43 | ||
44 | /* Prilvilege state DSCR access */ | 44 | /* Prilvilege state DSCR access */ |
45 | inline unsigned long get_dscr(void) | 45 | inline unsigned long get_dscr(void) |
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c index df17c3bab0a7..9e1a37e93b63 100644 --- a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c +++ b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c | |||
@@ -27,7 +27,7 @@ static void *do_test(void *in) | |||
27 | unsigned long d, cur_dscr, cur_dscr_usr; | 27 | unsigned long d, cur_dscr, cur_dscr_usr; |
28 | unsigned long s1, s2; | 28 | unsigned long s1, s2; |
29 | 29 | ||
30 | s1 = ACCESS_ONCE(sequence); | 30 | s1 = READ_ONCE(sequence); |
31 | if (s1 & 1) | 31 | if (s1 & 1) |
32 | continue; | 32 | continue; |
33 | rmb(); | 33 | rmb(); |
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h index be3fdd351937..3f95a768a03b 100644 --- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h | |||
@@ -35,8 +35,7 @@ | |||
35 | #define rs_smp_mb() do {} while (0) | 35 | #define rs_smp_mb() do {} while (0) |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *) &(x)) | 38 | #define READ_ONCE(x) (*(volatile typeof(x) *) &(x)) |
39 | #define READ_ONCE(x) ACCESS_ONCE(x) | 39 | #define WRITE_ONCE(x) ((*(volatile typeof(x) *) &(x)) = (val)) |
40 | #define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val)) | ||
41 | 40 | ||
42 | #endif | 41 | #endif |
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h index 90b0133004e1..5706e075adf2 100644 --- a/tools/virtio/ringtest/main.h +++ b/tools/virtio/ringtest/main.h | |||
@@ -110,11 +110,15 @@ static inline void busy_wait(void) | |||
110 | barrier(); | 110 | barrier(); |
111 | } | 111 | } |
112 | 112 | ||
113 | #if defined(__x86_64__) || defined(__i386__) | ||
114 | #define smp_mb() asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc") | ||
115 | #else | ||
113 | /* | 116 | /* |
114 | * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized | 117 | * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized |
115 | * with other __ATOMIC_SEQ_CST calls. | 118 | * with other __ATOMIC_SEQ_CST calls. |
116 | */ | 119 | */ |
117 | #define smp_mb() __sync_synchronize() | 120 | #define smp_mb() __sync_synchronize() |
121 | #endif | ||
118 | 122 | ||
119 | /* | 123 | /* |
120 | * This abuses the atomic builtins for thread fences, and | 124 | * This abuses the atomic builtins for thread fences, and |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9deb5a245b83..ce507ae1d4f5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -2302,7 +2302,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) | |||
2302 | continue; | 2302 | continue; |
2303 | } else if (pass && i > last_boosted_vcpu) | 2303 | } else if (pass && i > last_boosted_vcpu) |
2304 | break; | 2304 | break; |
2305 | if (!ACCESS_ONCE(vcpu->preempted)) | 2305 | if (!READ_ONCE(vcpu->preempted)) |
2306 | continue; | 2306 | continue; |
2307 | if (vcpu == me) | 2307 | if (vcpu == me) |
2308 | continue; | 2308 | continue; |