aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-02-22 22:31:41 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-24 02:44:16 -0500
commit4d3199e4ca8e6670b54dc5ee070ffd54385988e9 (patch)
tree5529bcb16c3217c02416e0d17d7c28f277c63581
parent2ae79026818e7d49fead82b79b1a543e3b9c8a23 (diff)
locking: Remove ACCESS_ONCE() usage
With the new standardized functions, we can replace all ACCESS_ONCE() calls across relevant locking - this includes lockref and seqlock while at it. ACCESS_ONCE() does not work reliably on non-scalar types. For example gcc 4.6 and 4.7 might remove the volatile tag for such accesses during the SRA (scalar replacement of aggregates) step: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 Update the new calls regardless of if it is a scalar type, this is cleaner than having three alternatives. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/seqlock.h6
-rw-r--r--kernel/locking/mcs_spinlock.h6
-rw-r--r--kernel/locking/mutex.c8
-rw-r--r--kernel/locking/osq_lock.c14
-rw-r--r--kernel/locking/rwsem-xadd.c10
-rw-r--r--lib/lockref.c2
6 files changed, 23 insertions, 23 deletions
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f5df8f687b4d..5f68d0a391ce 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -108,7 +108,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
108 unsigned ret; 108 unsigned ret;
109 109
110repeat: 110repeat:
111 ret = ACCESS_ONCE(s->sequence); 111 ret = READ_ONCE(s->sequence);
112 if (unlikely(ret & 1)) { 112 if (unlikely(ret & 1)) {
113 cpu_relax(); 113 cpu_relax();
114 goto repeat; 114 goto repeat;
@@ -127,7 +127,7 @@ repeat:
127 */ 127 */
128static inline unsigned raw_read_seqcount(const seqcount_t *s) 128static inline unsigned raw_read_seqcount(const seqcount_t *s)
129{ 129{
130 unsigned ret = ACCESS_ONCE(s->sequence); 130 unsigned ret = READ_ONCE(s->sequence);
131 smp_rmb(); 131 smp_rmb();
132 return ret; 132 return ret;
133} 133}
@@ -179,7 +179,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
179 */ 179 */
180static inline unsigned raw_seqcount_begin(const seqcount_t *s) 180static inline unsigned raw_seqcount_begin(const seqcount_t *s)
181{ 181{
182 unsigned ret = ACCESS_ONCE(s->sequence); 182 unsigned ret = READ_ONCE(s->sequence);
183 smp_rmb(); 183 smp_rmb();
184 return ret & ~1; 184 return ret & ~1;
185} 185}
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index d1fe2ba5bac9..75e114bdf3f2 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
78 */ 78 */
79 return; 79 return;
80 } 80 }
81 ACCESS_ONCE(prev->next) = node; 81 WRITE_ONCE(prev->next, node);
82 82
83 /* Wait until the lock holder passes the lock down. */ 83 /* Wait until the lock holder passes the lock down. */
84 arch_mcs_spin_lock_contended(&node->locked); 84 arch_mcs_spin_lock_contended(&node->locked);
@@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
91static inline 91static inline
92void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) 92void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
93{ 93{
94 struct mcs_spinlock *next = ACCESS_ONCE(node->next); 94 struct mcs_spinlock *next = READ_ONCE(node->next);
95 95
96 if (likely(!next)) { 96 if (likely(!next)) {
97 /* 97 /*
@@ -100,7 +100,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
100 if (likely(cmpxchg(lock, node, NULL) == node)) 100 if (likely(cmpxchg(lock, node, NULL) == node))
101 return; 101 return;
102 /* Wait until the next pointer is set */ 102 /* Wait until the next pointer is set */
103 while (!(next = ACCESS_ONCE(node->next))) 103 while (!(next = READ_ONCE(node->next)))
104 cpu_relax_lowlatency(); 104 cpu_relax_lowlatency();
105 } 105 }
106 106
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 43bf25ef3c81..16b2d3cc88b0 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -266,7 +266,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
266 return 0; 266 return 0;
267 267
268 rcu_read_lock(); 268 rcu_read_lock();
269 owner = ACCESS_ONCE(lock->owner); 269 owner = READ_ONCE(lock->owner);
270 if (owner) 270 if (owner)
271 retval = owner->on_cpu; 271 retval = owner->on_cpu;
272 rcu_read_unlock(); 272 rcu_read_unlock();
@@ -340,7 +340,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
340 * As such, when deadlock detection needs to be 340 * As such, when deadlock detection needs to be
341 * performed the optimistic spinning cannot be done. 341 * performed the optimistic spinning cannot be done.
342 */ 342 */
343 if (ACCESS_ONCE(ww->ctx)) 343 if (READ_ONCE(ww->ctx))
344 break; 344 break;
345 } 345 }
346 346
@@ -348,7 +348,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
348 * If there's an owner, wait for it to either 348 * If there's an owner, wait for it to either
349 * release the lock or go to sleep. 349 * release the lock or go to sleep.
350 */ 350 */
351 owner = ACCESS_ONCE(lock->owner); 351 owner = READ_ONCE(lock->owner);
352 if (owner && !mutex_spin_on_owner(lock, owner)) 352 if (owner && !mutex_spin_on_owner(lock, owner))
353 break; 353 break;
354 354
@@ -487,7 +487,7 @@ static inline int __sched
487__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 487__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
488{ 488{
489 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 489 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
490 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); 490 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
491 491
492 if (!hold_ctx) 492 if (!hold_ctx)
493 return 0; 493 return 0;
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index c112d00341b0..dc85ee23a26f 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
98 98
99 prev = decode_cpu(old); 99 prev = decode_cpu(old);
100 node->prev = prev; 100 node->prev = prev;
101 ACCESS_ONCE(prev->next) = node; 101 WRITE_ONCE(prev->next, node);
102 102
103 /* 103 /*
104 * Normally @prev is untouchable after the above store; because at that 104 * Normally @prev is untouchable after the above store; because at that
@@ -109,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
109 * cmpxchg in an attempt to undo our queueing. 109 * cmpxchg in an attempt to undo our queueing.
110 */ 110 */
111 111
112 while (!ACCESS_ONCE(node->locked)) { 112 while (!READ_ONCE(node->locked)) {
113 /* 113 /*
114 * If we need to reschedule bail... so we can block. 114 * If we need to reschedule bail... so we can block.
115 */ 115 */
@@ -148,7 +148,7 @@ unqueue:
148 * Or we race against a concurrent unqueue()'s step-B, in which 148 * Or we race against a concurrent unqueue()'s step-B, in which
149 * case its step-C will write us a new @node->prev pointer. 149 * case its step-C will write us a new @node->prev pointer.
150 */ 150 */
151 prev = ACCESS_ONCE(node->prev); 151 prev = READ_ONCE(node->prev);
152 } 152 }
153 153
154 /* 154 /*
@@ -170,8 +170,8 @@ unqueue:
170 * it will wait in Step-A. 170 * it will wait in Step-A.
171 */ 171 */
172 172
173 ACCESS_ONCE(next->prev) = prev; 173 WRITE_ONCE(next->prev, prev);
174 ACCESS_ONCE(prev->next) = next; 174 WRITE_ONCE(prev->next, next);
175 175
176 return false; 176 return false;
177} 177}
@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
193 node = this_cpu_ptr(&osq_node); 193 node = this_cpu_ptr(&osq_node);
194 next = xchg(&node->next, NULL); 194 next = xchg(&node->next, NULL);
195 if (next) { 195 if (next) {
196 ACCESS_ONCE(next->locked) = 1; 196 WRITE_ONCE(next->locked, 1);
197 return; 197 return;
198 } 198 }
199 199
200 next = osq_wait_next(lock, node, NULL); 200 next = osq_wait_next(lock, node, NULL);
201 if (next) 201 if (next)
202 ACCESS_ONCE(next->locked) = 1; 202 WRITE_ONCE(next->locked, 1);
203} 203}
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index e4ad019e23f5..06e2214edf98 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -279,7 +279,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
279 */ 279 */
280static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) 280static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
281{ 281{
282 long old, count = ACCESS_ONCE(sem->count); 282 long old, count = READ_ONCE(sem->count);
283 283
284 while (true) { 284 while (true) {
285 if (!(count == 0 || count == RWSEM_WAITING_BIAS)) 285 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
@@ -304,9 +304,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
304 return false; 304 return false;
305 305
306 rcu_read_lock(); 306 rcu_read_lock();
307 owner = ACCESS_ONCE(sem->owner); 307 owner = READ_ONCE(sem->owner);
308 if (!owner) { 308 if (!owner) {
309 long count = ACCESS_ONCE(sem->count); 309 long count = READ_ONCE(sem->count);
310 /* 310 /*
311 * If sem->owner is not set, yet we have just recently entered the 311 * If sem->owner is not set, yet we have just recently entered the
312 * slowpath with the lock being active, then there is a possibility 312 * slowpath with the lock being active, then there is a possibility
@@ -385,7 +385,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
385 goto done; 385 goto done;
386 386
387 while (true) { 387 while (true) {
388 owner = ACCESS_ONCE(sem->owner); 388 owner = READ_ONCE(sem->owner);
389 if (owner && !rwsem_spin_on_owner(sem, owner)) 389 if (owner && !rwsem_spin_on_owner(sem, owner))
390 break; 390 break;
391 391
@@ -459,7 +459,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
459 459
460 /* we're now waiting on the lock, but no longer actively locking */ 460 /* we're now waiting on the lock, but no longer actively locking */
461 if (waiting) { 461 if (waiting) {
462 count = ACCESS_ONCE(sem->count); 462 count = READ_ONCE(sem->count);
463 463
464 /* 464 /*
465 * If there were already threads queued before us and there are 465 * If there were already threads queued before us and there are
diff --git a/lib/lockref.c b/lib/lockref.c
index ecb9a665ec19..494994bf17c8 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -18,7 +18,7 @@
18#define CMPXCHG_LOOP(CODE, SUCCESS) do { \ 18#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
19 struct lockref old; \ 19 struct lockref old; \
20 BUILD_BUG_ON(sizeof(old) != 8); \ 20 BUILD_BUG_ON(sizeof(old) != 8); \
21 old.lock_count = ACCESS_ONCE(lockref->lock_count); \ 21 old.lock_count = READ_ONCE(lockref->lock_count); \
22 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 22 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
23 struct lockref new = old, prev = old; \ 23 struct lockref new = old, prev = old; \
24 CODE \ 24 CODE \