aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-parisc/spinlock.h')
-rw-r--r--include/asm-parisc/spinlock.h163
1 files changed, 28 insertions, 135 deletions
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 679ea1c651ef..43eaa6e742e0 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -2,30 +2,25 @@
2#define __ASM_SPINLOCK_H 2#define __ASM_SPINLOCK_H
3 3
4#include <asm/system.h> 4#include <asm/system.h>
5#include <asm/processor.h>
6#include <asm/spinlock_types.h>
5 7
6/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked 8/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
7 * since it only has load-and-zero. Moreover, at least on some PA processors, 9 * since it only has load-and-zero. Moreover, at least on some PA processors,
8 * the semaphore address has to be 16-byte aligned. 10 * the semaphore address has to be 16-byte aligned.
9 */ 11 */
10 12
11#ifndef CONFIG_DEBUG_SPINLOCK 13static inline int __raw_spin_is_locked(raw_spinlock_t *x)
12
13#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
14#undef SPIN_LOCK_UNLOCKED
15#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
16
17#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
18
19static inline int spin_is_locked(spinlock_t *x)
20{ 14{
21 volatile unsigned int *a = __ldcw_align(x); 15 volatile unsigned int *a = __ldcw_align(x);
22 return *a == 0; 16 return *a == 0;
23} 17}
24 18
25#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) 19#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
26#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 20#define __raw_spin_unlock_wait(x) \
21 do { cpu_relax(); } while (__raw_spin_is_locked(x))
27 22
28static inline void _raw_spin_lock(spinlock_t *x) 23static inline void __raw_spin_lock(raw_spinlock_t *x)
29{ 24{
30 volatile unsigned int *a; 25 volatile unsigned int *a;
31 26
@@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x)
36 mb(); 31 mb();
37} 32}
38 33
39static inline void _raw_spin_unlock(spinlock_t *x) 34static inline void __raw_spin_unlock(raw_spinlock_t *x)
40{ 35{
41 volatile unsigned int *a; 36 volatile unsigned int *a;
42 mb(); 37 mb();
@@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x)
45 mb(); 40 mb();
46} 41}
47 42
48static inline int _raw_spin_trylock(spinlock_t *x) 43static inline int __raw_spin_trylock(raw_spinlock_t *x)
49{ 44{
50 volatile unsigned int *a; 45 volatile unsigned int *a;
51 int ret; 46 int ret;
@@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x)
57 52
58 return ret; 53 return ret;
59} 54}
60
61#define spin_lock_own(LOCK, LOCATION) ((void)0)
62
63#else /* !(CONFIG_DEBUG_SPINLOCK) */
64
65#define SPINLOCK_MAGIC 0x1D244B3C
66
67#define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
68#undef SPIN_LOCK_UNLOCKED
69#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
70
71#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
72
73#define CHECK_LOCK(x) \
74 do { \
75 if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \
76 printk(KERN_ERR "%s:%d: spin_is_locked" \
77 " on uninitialized spinlock %p.\n", \
78 __FILE__, __LINE__, (x)); \
79 } \
80 } while(0)
81
82#define spin_is_locked(x) \
83 ({ \
84 CHECK_LOCK(x); \
85 volatile unsigned int *a = __ldcw_align(x); \
86 if (unlikely((*a == 0) && (x)->babble)) { \
87 (x)->babble--; \
88 printk("KERN_WARNING \
89 %s:%d: spin_is_locked(%s/%p) already" \
90 " locked by %s:%d in %s at %p(%d)\n", \
91 __FILE__,__LINE__, (x)->module, (x), \
92 (x)->bfile, (x)->bline, (x)->task->comm,\
93 (x)->previous, (x)->oncpu); \
94 } \
95 *a == 0; \
96 })
97
98#define spin_unlock_wait(x) \
99 do { \
100 CHECK_LOCK(x); \
101 volatile unsigned int *a = __ldcw_align(x); \
102 if (unlikely((*a == 0) && (x)->babble)) { \
103 (x)->babble--; \
104 printk("KERN_WARNING \
105 %s:%d: spin_unlock_wait(%s/%p)" \
106 " owned by %s:%d in %s at %p(%d)\n", \
107 __FILE__,__LINE__, (x)->module, (x), \
108 (x)->bfile, (x)->bline, (x)->task->comm,\
109 (x)->previous, (x)->oncpu); \
110 } \
111 barrier(); \
112 } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
113
114extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
115extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
116extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
117
118#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
119
120#define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
121#define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
122#define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
123
124/* just in case we need it */
125#define spin_lock_own(LOCK, LOCATION) \
126do { \
127 volatile unsigned int *a = __ldcw_align(LOCK); \
128 if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \
129 printk("KERN_WARNING \
130 %s: called on %d from %p but lock %s on %d\n", \
131 LOCATION, smp_processor_id(), \
132 __builtin_return_address(0), \
133 (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
134} while (0)
135
136#endif /* !(CONFIG_DEBUG_SPINLOCK) */
137 55
138/* 56/*
139 * Read-write spinlocks, allowing multiple readers 57 * Read-write spinlocks, allowing multiple readers
140 * but only one writer. 58 * but only one writer.
141 */ 59 */
142typedef struct {
143 spinlock_t lock;
144 volatile int counter;
145#ifdef CONFIG_PREEMPT
146 unsigned int break_lock;
147#endif
148} rwlock_t;
149
150#define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
151
152#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
153 60
154#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) 61#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
155 62
156/* read_lock, read_unlock are pretty straightforward. Of course it somehow 63/* read_lock, read_unlock are pretty straightforward. Of course it somehow
157 * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ 64 * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
158 65
159#ifdef CONFIG_DEBUG_RWLOCK 66static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
160extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
161#define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
162#else
163static __inline__ void _raw_read_lock(rwlock_t *rw)
164{ 67{
165 unsigned long flags; 68 unsigned long flags;
166 local_irq_save(flags); 69 local_irq_save(flags);
167 _raw_spin_lock(&rw->lock); 70 __raw_spin_lock(&rw->lock);
168 71
169 rw->counter++; 72 rw->counter++;
170 73
171 _raw_spin_unlock(&rw->lock); 74 __raw_spin_unlock(&rw->lock);
172 local_irq_restore(flags); 75 local_irq_restore(flags);
173} 76}
174#endif /* CONFIG_DEBUG_RWLOCK */
175 77
176static __inline__ void _raw_read_unlock(rwlock_t *rw) 78static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
177{ 79{
178 unsigned long flags; 80 unsigned long flags;
179 local_irq_save(flags); 81 local_irq_save(flags);
180 _raw_spin_lock(&rw->lock); 82 __raw_spin_lock(&rw->lock);
181 83
182 rw->counter--; 84 rw->counter--;
183 85
184 _raw_spin_unlock(&rw->lock); 86 __raw_spin_unlock(&rw->lock);
185 local_irq_restore(flags); 87 local_irq_restore(flags);
186} 88}
187 89
@@ -194,20 +96,17 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw)
194 * writers) in interrupt handlers someone fucked up and we'd dead-lock 96 * writers) in interrupt handlers someone fucked up and we'd dead-lock
195 * sooner or later anyway. prumpf */ 97 * sooner or later anyway. prumpf */
196 98
197#ifdef CONFIG_DEBUG_RWLOCK 99static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
198extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
199#define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
200#else
201static __inline__ void _raw_write_lock(rwlock_t *rw)
202{ 100{
203retry: 101retry:
204 _raw_spin_lock(&rw->lock); 102 __raw_spin_lock(&rw->lock);
205 103
206 if(rw->counter != 0) { 104 if(rw->counter != 0) {
207 /* this basically never happens */ 105 /* this basically never happens */
208 _raw_spin_unlock(&rw->lock); 106 __raw_spin_unlock(&rw->lock);
209 107
210 while(rw->counter != 0); 108 while (rw->counter != 0)
109 cpu_relax();
211 110
212 goto retry; 111 goto retry;
213 } 112 }
@@ -215,26 +114,21 @@ retry:
215 /* got it. now leave without unlocking */ 114 /* got it. now leave without unlocking */
216 rw->counter = -1; /* remember we are locked */ 115 rw->counter = -1; /* remember we are locked */
217} 116}
218#endif /* CONFIG_DEBUG_RWLOCK */
219 117
220/* write_unlock is absolutely trivial - we don't have to wait for anything */ 118/* write_unlock is absolutely trivial - we don't have to wait for anything */
221 119
222static __inline__ void _raw_write_unlock(rwlock_t *rw) 120static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
223{ 121{
224 rw->counter = 0; 122 rw->counter = 0;
225 _raw_spin_unlock(&rw->lock); 123 __raw_spin_unlock(&rw->lock);
226} 124}
227 125
228#ifdef CONFIG_DEBUG_RWLOCK 126static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
229extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline);
230#define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__)
231#else
232static __inline__ int _raw_write_trylock(rwlock_t *rw)
233{ 127{
234 _raw_spin_lock(&rw->lock); 128 __raw_spin_lock(&rw->lock);
235 if (rw->counter != 0) { 129 if (rw->counter != 0) {
236 /* this basically never happens */ 130 /* this basically never happens */
237 _raw_spin_unlock(&rw->lock); 131 __raw_spin_unlock(&rw->lock);
238 132
239 return 0; 133 return 0;
240 } 134 }
@@ -243,14 +137,13 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw)
243 rw->counter = -1; /* remember we are locked */ 137 rw->counter = -1; /* remember we are locked */
244 return 1; 138 return 1;
245} 139}
246#endif /* CONFIG_DEBUG_RWLOCK */
247 140
248static __inline__ int is_read_locked(rwlock_t *rw) 141static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw)
249{ 142{
250 return rw->counter > 0; 143 return rw->counter > 0;
251} 144}
252 145
253static __inline__ int is_write_locked(rwlock_t *rw) 146static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw)
254{ 147{
255 return rw->counter < 0; 148 return rw->counter < 0;
256} 149}