diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-06-29 18:53:02 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-08-17 11:08:59 -0400 |
commit | 952111d7db02573e7165e338de8d4871fa447b21 (patch) | |
tree | 9b66fe680204959a22ef8cf72c97180de2a6643b /arch/tile | |
parent | d3a024abbc438277851c510b51ec9b158821488b (diff) |
arch: Remove spin_unlock_wait() arch-specific definitions
There is no agreed-upon definition of spin_unlock_wait()'s semantics,
and it appears that all callers could do just as well with a lock/unlock
pair. This commit therefore removes the underlying arch-specific
arch_spin_unlock_wait() for all architectures providing them.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: <linux-arch@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Boqun Feng <boqun.feng@gmail.com>
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/include/asm/spinlock_32.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/spinlock_64.h | 2 | ||||
-rw-r--r-- | arch/tile/lib/spinlock_32.c | 23 | ||||
-rw-r--r-- | arch/tile/lib/spinlock_64.c | 22 |
4 files changed, 0 insertions, 49 deletions
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h index b14b1ba5bf9c..cba8ba9b8da6 100644 --- a/arch/tile/include/asm/spinlock_32.h +++ b/arch/tile/include/asm/spinlock_32.h | |||
@@ -64,8 +64,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
64 | lock->current_ticket = old_ticket + TICKET_QUANTUM; | 64 | lock->current_ticket = old_ticket + TICKET_QUANTUM; |
65 | } | 65 | } |
66 | 66 | ||
67 | void arch_spin_unlock_wait(arch_spinlock_t *lock); | ||
68 | |||
69 | /* | 67 | /* |
70 | * Read-write spinlocks, allowing multiple readers | 68 | * Read-write spinlocks, allowing multiple readers |
71 | * but only one writer. | 69 | * but only one writer. |
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h index b9718fb4e74a..9a2c2d605752 100644 --- a/arch/tile/include/asm/spinlock_64.h +++ b/arch/tile/include/asm/spinlock_64.h | |||
@@ -58,8 +58,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
58 | __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT); | 58 | __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT); |
59 | } | 59 | } |
60 | 60 | ||
61 | void arch_spin_unlock_wait(arch_spinlock_t *lock); | ||
62 | |||
63 | void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val); | 61 | void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val); |
64 | 62 | ||
65 | /* Grab the "next" ticket number and bump it atomically. | 63 | /* Grab the "next" ticket number and bump it atomically. |
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c index 076c6cc43113..db9333f2447c 100644 --- a/arch/tile/lib/spinlock_32.c +++ b/arch/tile/lib/spinlock_32.c | |||
@@ -62,29 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock) | |||
62 | } | 62 | } |
63 | EXPORT_SYMBOL(arch_spin_trylock); | 63 | EXPORT_SYMBOL(arch_spin_trylock); |
64 | 64 | ||
65 | void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
66 | { | ||
67 | u32 iterations = 0; | ||
68 | int curr = READ_ONCE(lock->current_ticket); | ||
69 | int next = READ_ONCE(lock->next_ticket); | ||
70 | |||
71 | /* Return immediately if unlocked. */ | ||
72 | if (next == curr) | ||
73 | return; | ||
74 | |||
75 | /* Wait until the current locker has released the lock. */ | ||
76 | do { | ||
77 | delay_backoff(iterations++); | ||
78 | } while (READ_ONCE(lock->current_ticket) == curr); | ||
79 | |||
80 | /* | ||
81 | * The TILE architecture doesn't do read speculation; therefore | ||
82 | * a control dependency guarantees a LOAD->{LOAD,STORE} order. | ||
83 | */ | ||
84 | barrier(); | ||
85 | } | ||
86 | EXPORT_SYMBOL(arch_spin_unlock_wait); | ||
87 | |||
88 | /* | 65 | /* |
89 | * The low byte is always reserved to be the marker for a "tns" operation | 66 | * The low byte is always reserved to be the marker for a "tns" operation |
90 | * since the low bit is set to "1" by a tns. The next seven bits are | 67 | * since the low bit is set to "1" by a tns. The next seven bits are |
diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c index a4b5b2cbce93..de414c22892f 100644 --- a/arch/tile/lib/spinlock_64.c +++ b/arch/tile/lib/spinlock_64.c | |||
@@ -62,28 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock) | |||
62 | } | 62 | } |
63 | EXPORT_SYMBOL(arch_spin_trylock); | 63 | EXPORT_SYMBOL(arch_spin_trylock); |
64 | 64 | ||
65 | void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
66 | { | ||
67 | u32 iterations = 0; | ||
68 | u32 val = READ_ONCE(lock->lock); | ||
69 | u32 curr = arch_spin_current(val); | ||
70 | |||
71 | /* Return immediately if unlocked. */ | ||
72 | if (arch_spin_next(val) == curr) | ||
73 | return; | ||
74 | |||
75 | /* Wait until the current locker has released the lock. */ | ||
76 | do { | ||
77 | delay_backoff(iterations++); | ||
78 | } while (arch_spin_current(READ_ONCE(lock->lock)) == curr); | ||
79 | |||
80 | /* | ||
81 | * The TILE architecture doesn't do read speculation; therefore | ||
82 | * a control dependency guarantees a LOAD->{LOAD,STORE} order. | ||
83 | */ | ||
84 | barrier(); | ||
85 | } | ||
86 | EXPORT_SYMBOL(arch_spin_unlock_wait); | ||
87 | 65 | ||
88 | /* | 66 | /* |
89 | * If the read lock fails due to a writer, we retry periodically | 67 | * If the read lock fails due to a writer, we retry periodically |