aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arc/include/asm/spinlock.h174
-rw-r--r--arch/arc/include/asm/spinlock_types.h2
2 files changed, 166 insertions, 10 deletions
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 4f6c90a0a68a..9fd5a0221671 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -75,6 +75,164 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
75 smp_mb(); 75 smp_mb();
76} 76}
77 77
78/*
79 * Read-write spinlocks, allowing multiple readers but only one writer.
80 * Unfair locking as Writers could be starved indefinitely by Reader(s)
81 */
82
83static inline void arch_read_lock(arch_rwlock_t *rw)
84{
85 unsigned int val;
86
87 smp_mb();
88
89 /*
90 * zero means writer holds the lock exclusively, deny Reader.
91 * Otherwise grant lock to first/subseq reader
92 *
93 * if (rw->counter > 0) {
94 * rw->counter--;
95 * ret = 1;
96 * }
97 */
98
99 __asm__ __volatile__(
100 "1: llock %[val], [%[rwlock]] \n"
101 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
102 " sub %[val], %[val], 1 \n" /* reader lock */
103 " scond %[val], [%[rwlock]] \n"
104 " bnz 1b \n"
105 " \n"
106 : [val] "=&r" (val)
107 : [rwlock] "r" (&(rw->counter)),
108 [WR_LOCKED] "ir" (0)
109 : "memory", "cc");
110
111 smp_mb();
112}
113
114/* 1 - lock taken successfully */
115static inline int arch_read_trylock(arch_rwlock_t *rw)
116{
117 unsigned int val, got_it = 0;
118
119 smp_mb();
120
121 __asm__ __volatile__(
122 "1: llock %[val], [%[rwlock]] \n"
123 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
124 " sub %[val], %[val], 1 \n" /* counter-- */
125 " scond %[val], [%[rwlock]] \n"
126 " bnz 1b \n" /* retry if collided with someone */
127 " mov %[got_it], 1 \n"
128 " \n"
129 "4: ; --- done --- \n"
130
131 : [val] "=&r" (val),
132 [got_it] "+&r" (got_it)
133 : [rwlock] "r" (&(rw->counter)),
134 [WR_LOCKED] "ir" (0)
135 : "memory", "cc");
136
137 smp_mb();
138
139 return got_it;
140}
141
142static inline void arch_write_lock(arch_rwlock_t *rw)
143{
144 unsigned int val;
145
146 smp_mb();
147
148 /*
149 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
150 * deny writer. Otherwise if unlocked grant to writer
151 * Hence the claim that Linux rwlocks are unfair to writers.
152 * (can be starved for an indefinite time by readers).
153 *
154 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
155 * rw->counter = 0;
156 * ret = 1;
157 * }
158 */
159
160 __asm__ __volatile__(
161 "1: llock %[val], [%[rwlock]] \n"
162 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
163 " mov %[val], %[WR_LOCKED] \n"
164 " scond %[val], [%[rwlock]] \n"
165 " bnz 1b \n"
166 " \n"
167 : [val] "=&r" (val)
168 : [rwlock] "r" (&(rw->counter)),
169 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
170 [WR_LOCKED] "ir" (0)
171 : "memory", "cc");
172
173 smp_mb();
174}
175
176/* 1 - lock taken successfully */
177static inline int arch_write_trylock(arch_rwlock_t *rw)
178{
179 unsigned int val, got_it = 0;
180
181 smp_mb();
182
183 __asm__ __volatile__(
184 "1: llock %[val], [%[rwlock]] \n"
185 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
186 " mov %[val], %[WR_LOCKED] \n"
187 " scond %[val], [%[rwlock]] \n"
188 " bnz 1b \n" /* retry if collided with someone */
189 " mov %[got_it], 1 \n"
190 " \n"
191 "4: ; --- done --- \n"
192
193 : [val] "=&r" (val),
194 [got_it] "+&r" (got_it)
195 : [rwlock] "r" (&(rw->counter)),
196 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
197 [WR_LOCKED] "ir" (0)
198 : "memory", "cc");
199
200 smp_mb();
201
202 return got_it;
203}
204
205static inline void arch_read_unlock(arch_rwlock_t *rw)
206{
207 unsigned int val;
208
209 smp_mb();
210
211 /*
212 * rw->counter++;
213 */
214 __asm__ __volatile__(
215 "1: llock %[val], [%[rwlock]] \n"
216 " add %[val], %[val], 1 \n"
217 " scond %[val], [%[rwlock]] \n"
218 " bnz 1b \n"
219 " \n"
220 : [val] "=&r" (val)
221 : [rwlock] "r" (&(rw->counter))
222 : "memory", "cc");
223
224 smp_mb();
225}
226
227static inline void arch_write_unlock(arch_rwlock_t *rw)
228{
229 smp_mb();
230
231 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
232
233 smp_mb();
234}
235
78#else /* !CONFIG_ARC_HAS_LLSC */ 236#else /* !CONFIG_ARC_HAS_LLSC */
79 237
80static inline void arch_spin_lock(arch_spinlock_t *lock) 238static inline void arch_spin_lock(arch_spinlock_t *lock)
@@ -148,23 +306,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
148 smp_mb(); 306 smp_mb();
149} 307}
150 308
151#endif
152
153/* 309/*
154 * Read-write spinlocks, allowing multiple readers but only one writer. 310 * Read-write spinlocks, allowing multiple readers but only one writer.
311 * Unfair locking as Writers could be starved indefinitely by Reader(s)
155 * 312 *
156 * The spinlock itself is contained in @counter and access to it is 313 * The spinlock itself is contained in @counter and access to it is
157 * serialized with @lock_mutex. 314 * serialized with @lock_mutex.
158 *
159 * Unfair locking as Writers could be starved indefinitely by Reader(s)
160 */ 315 */
161 316
162/* Would read_trylock() succeed? */
163#define arch_read_can_lock(x) ((x)->counter > 0)
164
165/* Would write_trylock() succeed? */
166#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
167
168/* 1 - lock taken successfully */ 317/* 1 - lock taken successfully */
169static inline int arch_read_trylock(arch_rwlock_t *rw) 318static inline int arch_read_trylock(arch_rwlock_t *rw)
170{ 319{
@@ -235,6 +384,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
235 arch_spin_unlock(&(rw->lock_mutex)); 384 arch_spin_unlock(&(rw->lock_mutex));
236} 385}
237 386
387#endif
388
389#define arch_read_can_lock(x) ((x)->counter > 0)
390#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
391
238#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 392#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
239#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 393#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
240 394
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 662627ced4f2..4e1ef5f650c6 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -26,7 +26,9 @@ typedef struct {
26 */ 26 */
27typedef struct { 27typedef struct {
28 volatile unsigned int counter; 28 volatile unsigned int counter;
29#ifndef CONFIG_ARC_HAS_LLSC
29 arch_spinlock_t lock_mutex; 30 arch_spinlock_t lock_mutex;
31#endif
30} arch_rwlock_t; 32} arch_rwlock_t;
31 33
32#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 34#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000