aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@ezchip.com>2015-04-28 13:00:42 -0400
committerChris Metcalf <cmetcalf@ezchip.com>2015-04-28 22:43:16 -0400
commit627ae54854edfbf29d5997015c190de22eef497f (patch)
treeefa812a770bc06ce188f77262b5916e171ceaca1
parent14c3dec2a875d898262be79c0f85e5f2b70a71b0 (diff)
tile: use READ_ONCE() in arch_spin_is_locked()
This avoid potential issues if callers were to loop on these routines without some kind of memory barrier. Currently there are no such users in-tree, but it seems better safe than sorry. Also, in the tilepro case we read "current" before "next", which gives us a slightly better guarantee that the lock was actually unlocked at least momentarily if we return claiming that it is not locked. None of the callers actually rely on this behavior, as far as I know, however. Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
-rw-r--r--arch/tile/include/asm/spinlock_32.h6
-rw-r--r--arch/tile/include/asm/spinlock_64.h5
2 files changed, 9 insertions, 2 deletions
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index c0a77b38d39a..b14b1ba5bf9c 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -41,8 +41,12 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
41 * to claim the lock is held, since it will be momentarily 41 * to claim the lock is held, since it will be momentarily
42 * if not already. There's no need to wait for a "valid" 42 * if not already. There's no need to wait for a "valid"
43 * lock->next_ticket to become available. 43 * lock->next_ticket to become available.
44 * Use READ_ONCE() to ensure that calling this in a loop is OK.
44 */ 45 */
45 return lock->next_ticket != lock->current_ticket; 46 int curr = READ_ONCE(lock->current_ticket);
47 int next = READ_ONCE(lock->next_ticket);
48
49 return next != curr;
46} 50}
47 51
48void arch_spin_lock(arch_spinlock_t *lock); 52void arch_spin_lock(arch_spinlock_t *lock);
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 9a12b9c7e5d3..b9718fb4e74a 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -18,6 +18,8 @@
18#ifndef _ASM_TILE_SPINLOCK_64_H 18#ifndef _ASM_TILE_SPINLOCK_64_H
19#define _ASM_TILE_SPINLOCK_64_H 19#define _ASM_TILE_SPINLOCK_64_H
20 20
21#include <linux/compiler.h>
22
21/* Shifts and masks for the various fields in "lock". */ 23/* Shifts and masks for the various fields in "lock". */
22#define __ARCH_SPIN_CURRENT_SHIFT 17 24#define __ARCH_SPIN_CURRENT_SHIFT 17
23#define __ARCH_SPIN_NEXT_MASK 0x7fff 25#define __ARCH_SPIN_NEXT_MASK 0x7fff
@@ -44,7 +46,8 @@ static inline u32 arch_spin_next(u32 val)
44/* The lock is locked if a task would have to wait to get it. */ 46/* The lock is locked if a task would have to wait to get it. */
45static inline int arch_spin_is_locked(arch_spinlock_t *lock) 47static inline int arch_spin_is_locked(arch_spinlock_t *lock)
46{ 48{
47 u32 val = lock->lock; 49 /* Use READ_ONCE() to ensure that calling this in a loop is OK. */
50 u32 val = READ_ONCE(lock->lock);
48 return arch_spin_current(val) != arch_spin_next(val); 51 return arch_spin_current(val) != arch_spin_next(val);
49} 52}
50 53