aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc/spinlock.h')
-rw-r--r--include/asm-ppc/spinlock.h91
1 files changed, 20 insertions, 71 deletions
diff --git a/include/asm-ppc/spinlock.h b/include/asm-ppc/spinlock.h
index 909199aae104..20edcf2a6e0c 100644
--- a/include/asm-ppc/spinlock.h
+++ b/include/asm-ppc/spinlock.h
@@ -5,41 +5,21 @@
5 5
6/* 6/*
7 * Simple spin lock operations. 7 * Simple spin lock operations.
8 *
9 * (the type definitions are in asm/raw_spinlock_types.h)
8 */ 10 */
9 11
10typedef struct { 12#define __raw_spin_is_locked(x) ((x)->lock != 0)
11 volatile unsigned long lock; 13#define __raw_spin_unlock_wait(lock) \
12#ifdef CONFIG_DEBUG_SPINLOCK 14 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
13 volatile unsigned long owner_pc; 15#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
14 volatile unsigned long owner_cpu; 16
15#endif 17static inline void __raw_spin_lock(raw_spinlock_t *lock)
16#ifdef CONFIG_PREEMPT
17 unsigned int break_lock;
18#endif
19} spinlock_t;
20
21#ifdef __KERNEL__
22#ifdef CONFIG_DEBUG_SPINLOCK
23#define SPINLOCK_DEBUG_INIT , 0, 0
24#else
25#define SPINLOCK_DEBUG_INIT /* */
26#endif
27
28#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 SPINLOCK_DEBUG_INIT }
29
30#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
31#define spin_is_locked(x) ((x)->lock != 0)
32#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
33#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
34
35#ifndef CONFIG_DEBUG_SPINLOCK
36
37static inline void _raw_spin_lock(spinlock_t *lock)
38{ 18{
39 unsigned long tmp; 19 unsigned long tmp;
40 20
41 __asm__ __volatile__( 21 __asm__ __volatile__(
42 "b 1f # spin_lock\n\ 22 "b 1f # __raw_spin_lock\n\
432: lwzx %0,0,%1\n\ 232: lwzx %0,0,%1\n\
44 cmpwi 0,%0,0\n\ 24 cmpwi 0,%0,0\n\
45 bne+ 2b\n\ 25 bne+ 2b\n\
@@ -55,21 +35,13 @@ static inline void _raw_spin_lock(spinlock_t *lock)
55 : "cr0", "memory"); 35 : "cr0", "memory");
56} 36}
57 37
58static inline void _raw_spin_unlock(spinlock_t *lock) 38static inline void __raw_spin_unlock(raw_spinlock_t *lock)
59{ 39{
60 __asm__ __volatile__("eieio # spin_unlock": : :"memory"); 40 __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory");
61 lock->lock = 0; 41 lock->lock = 0;
62} 42}
63 43
64#define _raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock)) 44#define __raw_spin_trylock(l) (!test_and_set_bit(0,&(l)->lock))
65
66#else
67
68extern void _raw_spin_lock(spinlock_t *lock);
69extern void _raw_spin_unlock(spinlock_t *lock);
70extern int _raw_spin_trylock(spinlock_t *lock);
71
72#endif
73 45
74/* 46/*
75 * Read-write spinlocks, allowing multiple readers 47 * Read-write spinlocks, allowing multiple readers
@@ -81,22 +53,11 @@ extern int _raw_spin_trylock(spinlock_t *lock);
81 * irq-safe write-lock, but readers can get non-irqsafe 53 * irq-safe write-lock, but readers can get non-irqsafe
82 * read-locks. 54 * read-locks.
83 */ 55 */
84typedef struct {
85 volatile signed int lock;
86#ifdef CONFIG_PREEMPT
87 unsigned int break_lock;
88#endif
89} rwlock_t;
90 56
91#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } 57#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
92#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) 58#define __raw_write_can_lock(rw) (!(rw)->lock)
93 59
94#define read_can_lock(rw) ((rw)->lock >= 0) 60static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
95#define write_can_lock(rw) (!(rw)->lock)
96
97#ifndef CONFIG_DEBUG_SPINLOCK
98
99static __inline__ int _raw_read_trylock(rwlock_t *rw)
100{ 61{
101 signed int tmp; 62 signed int tmp;
102 63
@@ -116,7 +77,7 @@ static __inline__ int _raw_read_trylock(rwlock_t *rw)
116 return tmp > 0; 77 return tmp > 0;
117} 78}
118 79
119static __inline__ void _raw_read_lock(rwlock_t *rw) 80static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
120{ 81{
121 signed int tmp; 82 signed int tmp;
122 83
@@ -137,7 +98,7 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
137 : "cr0", "memory"); 98 : "cr0", "memory");
138} 99}
139 100
140static __inline__ void _raw_read_unlock(rwlock_t *rw) 101static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
141{ 102{
142 signed int tmp; 103 signed int tmp;
143 104
@@ -153,7 +114,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw)
153 : "cr0", "memory"); 114 : "cr0", "memory");
154} 115}
155 116
156static __inline__ int _raw_write_trylock(rwlock_t *rw) 117static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
157{ 118{
158 signed int tmp; 119 signed int tmp;
159 120
@@ -173,7 +134,7 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw)
173 return tmp == 0; 134 return tmp == 0;
174} 135}
175 136
176static __inline__ void _raw_write_lock(rwlock_t *rw) 137static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
177{ 138{
178 signed int tmp; 139 signed int tmp;
179 140
@@ -194,22 +155,10 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
194 : "cr0", "memory"); 155 : "cr0", "memory");
195} 156}
196 157
197static __inline__ void _raw_write_unlock(rwlock_t *rw) 158static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
198{ 159{
199 __asm__ __volatile__("eieio # write_unlock": : :"memory"); 160 __asm__ __volatile__("eieio # write_unlock": : :"memory");
200 rw->lock = 0; 161 rw->lock = 0;
201} 162}
202 163
203#else
204
205extern void _raw_read_lock(rwlock_t *rw);
206extern void _raw_read_unlock(rwlock_t *rw);
207extern void _raw_write_lock(rwlock_t *rw);
208extern void _raw_write_unlock(rwlock_t *rw);
209extern int _raw_read_trylock(rwlock_t *rw);
210extern int _raw_write_trylock(rwlock_t *rw);
211
212#endif
213
214#endif /* __ASM_SPINLOCK_H */ 164#endif /* __ASM_SPINLOCK_H */
215#endif /* __KERNEL__ */