aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc/spinlock.h')
-rw-r--r--include/asm-ppc/spinlock.h215
1 files changed, 215 insertions, 0 deletions
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h
new file mode 100644
index 000000000000..909199aae104
--- /dev/null
+++ b/include/asm-ppc/spinlock.h
@@ -0,0 +1,215 @@
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/system.h>
5
6/*
7 * Simple spin lock operations.
8 */
9
10typedef struct {
11 volatile unsigned long lock;
12#ifdef CONFIG_DEBUG_SPINLOCK
13 volatile unsigned long owner_pc;
14 volatile unsigned long owner_cpu;
15#endif
16#ifdef CONFIG_PREEMPT
17 unsigned int break_lock;
18#endif
19} spinlock_t;
20
21#ifdef __KERNEL__
22#ifdef CONFIG_DEBUG_SPINLOCK
23#define SPINLOCK_DEBUG_INIT , 0, 0
24#else
25#define SPINLOCK_DEBUG_INIT /* */
26#endif
27
28#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 SPINLOCK_DEBUG_INIT }
29
30#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
31#define spin_is_locked(x) ((x)->lock != 0)
32#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
33#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
34
35#ifndef CONFIG_DEBUG_SPINLOCK
36
37static inline void _raw_spin_lock(spinlock_t *lock)
38{
39 unsigned long tmp;
40
41 __asm__ __volatile__(
42 "b 1f # spin_lock\n\
432: lwzx %0,0,%1\n\
44 cmpwi 0,%0,0\n\
45 bne+ 2b\n\
461: lwarx %0,0,%1\n\
47 cmpwi 0,%0,0\n\
48 bne- 2b\n"
49 PPC405_ERR77(0,%1)
50" stwcx. %2,0,%1\n\
51 bne- 2b\n\
52 isync"
53 : "=&r"(tmp)
54 : "r"(&lock->lock), "r"(1)
55 : "cr0", "memory");
56}
57
58static inline void _raw_spin_unlock(spinlock_t *lock)
59{
60 __asm__ __volatile__("eieio # spin_unlock": : :"memory");
61 lock->lock = 0;
62}
63
64#define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
65
66#else
67
68extern void _raw_spin_lock(spinlock_t *lock);
69extern void _raw_spin_unlock(spinlock_t *lock);
70extern int _raw_spin_trylock(spinlock_t *lock);
71
72#endif
73
74/*
75 * Read-write spinlocks, allowing multiple readers
76 * but only one writer.
77 *
78 * NOTE! it is quite common to have readers in interrupts
79 * but no interrupt writers. For those circumstances we
80 * can "mix" irq-safe locks - any writer needs to get a
81 * irq-safe write-lock, but readers can get non-irqsafe
82 * read-locks.
83 */
84typedef struct {
85 volatile signed int lock;
86#ifdef CONFIG_PREEMPT
87 unsigned int break_lock;
88#endif
89} rwlock_t;
90
91#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
92#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
93
94#define read_can_lock(rw) ((rw)->lock >= 0)
95#define write_can_lock(rw) (!(rw)->lock)
96
97#ifndef CONFIG_DEBUG_SPINLOCK
98
99static __inline__ int _raw_read_trylock(rwlock_t *rw)
100{
101 signed int tmp;
102
103 __asm__ __volatile__(
104"2: lwarx %0,0,%1 # read_trylock\n\
105 addic. %0,%0,1\n\
106 ble- 1f\n"
107 PPC405_ERR77(0,%1)
108" stwcx. %0,0,%1\n\
109 bne- 2b\n\
110 isync\n\
1111:"
112 : "=&r"(tmp)
113 : "r"(&rw->lock)
114 : "cr0", "memory");
115
116 return tmp > 0;
117}
118
119static __inline__ void _raw_read_lock(rwlock_t *rw)
120{
121 signed int tmp;
122
123 __asm__ __volatile__(
124 "b 2f # read_lock\n\
1251: lwzx %0,0,%1\n\
126 cmpwi 0,%0,0\n\
127 blt+ 1b\n\
1282: lwarx %0,0,%1\n\
129 addic. %0,%0,1\n\
130 ble- 1b\n"
131 PPC405_ERR77(0,%1)
132" stwcx. %0,0,%1\n\
133 bne- 2b\n\
134 isync"
135 : "=&r"(tmp)
136 : "r"(&rw->lock)
137 : "cr0", "memory");
138}
139
140static __inline__ void _raw_read_unlock(rwlock_t *rw)
141{
142 signed int tmp;
143
144 __asm__ __volatile__(
145 "eieio # read_unlock\n\
1461: lwarx %0,0,%1\n\
147 addic %0,%0,-1\n"
148 PPC405_ERR77(0,%1)
149" stwcx. %0,0,%1\n\
150 bne- 1b"
151 : "=&r"(tmp)
152 : "r"(&rw->lock)
153 : "cr0", "memory");
154}
155
156static __inline__ int _raw_write_trylock(rwlock_t *rw)
157{
158 signed int tmp;
159
160 __asm__ __volatile__(
161"2: lwarx %0,0,%1 # write_trylock\n\
162 cmpwi 0,%0,0\n\
163 bne- 1f\n"
164 PPC405_ERR77(0,%1)
165" stwcx. %2,0,%1\n\
166 bne- 2b\n\
167 isync\n\
1681:"
169 : "=&r"(tmp)
170 : "r"(&rw->lock), "r"(-1)
171 : "cr0", "memory");
172
173 return tmp == 0;
174}
175
176static __inline__ void _raw_write_lock(rwlock_t *rw)
177{
178 signed int tmp;
179
180 __asm__ __volatile__(
181 "b 2f # write_lock\n\
1821: lwzx %0,0,%1\n\
183 cmpwi 0,%0,0\n\
184 bne+ 1b\n\
1852: lwarx %0,0,%1\n\
186 cmpwi 0,%0,0\n\
187 bne- 1b\n"
188 PPC405_ERR77(0,%1)
189" stwcx. %2,0,%1\n\
190 bne- 2b\n\
191 isync"
192 : "=&r"(tmp)
193 : "r"(&rw->lock), "r"(-1)
194 : "cr0", "memory");
195}
196
197static __inline__ void _raw_write_unlock(rwlock_t *rw)
198{
199 __asm__ __volatile__("eieio # write_unlock": : :"memory");
200 rw->lock = 0;
201}
202
203#else
204
205extern void _raw_read_lock(rwlock_t *rw);
206extern void _raw_read_unlock(rwlock_t *rw);
207extern void _raw_write_lock(rwlock_t *rw);
208extern void _raw_write_unlock(rwlock_t *rw);
209extern int _raw_read_trylock(rwlock_t *rw);
210extern int _raw_write_trylock(rwlock_t *rw);
211
212#endif
213
214#endif /* __ASM_SPINLOCK_H */
215#endif /* __KERNEL__ */