diff options
Diffstat (limited to 'include/asm-sparc/spinlock_32.h')
-rw-r--r-- | include/asm-sparc/spinlock_32.h | 192 |
1 files changed, 192 insertions, 0 deletions
diff --git a/include/asm-sparc/spinlock_32.h b/include/asm-sparc/spinlock_32.h new file mode 100644 index 000000000000..de2249b267c6 --- /dev/null +++ b/include/asm-sparc/spinlock_32.h | |||
@@ -0,0 +1,192 @@ | |||
1 | /* spinlock.h: 32-bit Sparc spinlock support. | ||
2 | * | ||
3 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | ||
4 | */ | ||
5 | |||
6 | #ifndef __SPARC_SPINLOCK_H | ||
7 | #define __SPARC_SPINLOCK_H | ||
8 | |||
9 | #include <linux/threads.h> /* For NR_CPUS */ | ||
10 | |||
11 | #ifndef __ASSEMBLY__ | ||
12 | |||
13 | #include <asm/psr.h> | ||
14 | |||
15 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | ||
16 | |||
17 | #define __raw_spin_unlock_wait(lock) \ | ||
18 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
19 | |||
20 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
21 | { | ||
22 | __asm__ __volatile__( | ||
23 | "\n1:\n\t" | ||
24 | "ldstub [%0], %%g2\n\t" | ||
25 | "orcc %%g2, 0x0, %%g0\n\t" | ||
26 | "bne,a 2f\n\t" | ||
27 | " ldub [%0], %%g2\n\t" | ||
28 | ".subsection 2\n" | ||
29 | "2:\n\t" | ||
30 | "orcc %%g2, 0x0, %%g0\n\t" | ||
31 | "bne,a 2b\n\t" | ||
32 | " ldub [%0], %%g2\n\t" | ||
33 | "b,a 1b\n\t" | ||
34 | ".previous\n" | ||
35 | : /* no outputs */ | ||
36 | : "r" (lock) | ||
37 | : "g2", "memory", "cc"); | ||
38 | } | ||
39 | |||
40 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
41 | { | ||
42 | unsigned int result; | ||
43 | __asm__ __volatile__("ldstub [%1], %0" | ||
44 | : "=r" (result) | ||
45 | : "r" (lock) | ||
46 | : "memory"); | ||
47 | return (result == 0); | ||
48 | } | ||
49 | |||
50 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
51 | { | ||
52 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | ||
53 | } | ||
54 | |||
55 | /* Read-write spinlocks, allowing multiple readers | ||
56 | * but only one writer. | ||
57 | * | ||
58 | * NOTE! it is quite common to have readers in interrupts | ||
59 | * but no interrupt writers. For those circumstances we | ||
60 | * can "mix" irq-safe locks - any writer needs to get a | ||
61 | * irq-safe write-lock, but readers can get non-irqsafe | ||
62 | * read-locks. | ||
63 | * | ||
64 | * XXX This might create some problems with my dual spinlock | ||
65 | * XXX scheme, deadlocks etc. -DaveM | ||
66 | * | ||
67 | * Sort of like atomic_t's on Sparc, but even more clever. | ||
68 | * | ||
69 | * ------------------------------------ | ||
70 | * | 24-bit counter | wlock | raw_rwlock_t | ||
71 | * ------------------------------------ | ||
72 | * 31 8 7 0 | ||
73 | * | ||
74 | * wlock signifies the one writer is in or somebody is updating | ||
75 | * counter. For a writer, if he successfully acquires the wlock, | ||
76 | * but counter is non-zero, he has to release the lock and wait, | ||
77 | * till both counter and wlock are zero. | ||
78 | * | ||
79 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | ||
80 | */ | ||
81 | static inline void __read_lock(raw_rwlock_t *rw) | ||
82 | { | ||
83 | register raw_rwlock_t *lp asm("g1"); | ||
84 | lp = rw; | ||
85 | __asm__ __volatile__( | ||
86 | "mov %%o7, %%g4\n\t" | ||
87 | "call ___rw_read_enter\n\t" | ||
88 | " ldstub [%%g1 + 3], %%g2\n" | ||
89 | : /* no outputs */ | ||
90 | : "r" (lp) | ||
91 | : "g2", "g4", "memory", "cc"); | ||
92 | } | ||
93 | |||
94 | #define __raw_read_lock(lock) \ | ||
95 | do { unsigned long flags; \ | ||
96 | local_irq_save(flags); \ | ||
97 | __read_lock(lock); \ | ||
98 | local_irq_restore(flags); \ | ||
99 | } while(0) | ||
100 | |||
101 | static inline void __read_unlock(raw_rwlock_t *rw) | ||
102 | { | ||
103 | register raw_rwlock_t *lp asm("g1"); | ||
104 | lp = rw; | ||
105 | __asm__ __volatile__( | ||
106 | "mov %%o7, %%g4\n\t" | ||
107 | "call ___rw_read_exit\n\t" | ||
108 | " ldstub [%%g1 + 3], %%g2\n" | ||
109 | : /* no outputs */ | ||
110 | : "r" (lp) | ||
111 | : "g2", "g4", "memory", "cc"); | ||
112 | } | ||
113 | |||
114 | #define __raw_read_unlock(lock) \ | ||
115 | do { unsigned long flags; \ | ||
116 | local_irq_save(flags); \ | ||
117 | __read_unlock(lock); \ | ||
118 | local_irq_restore(flags); \ | ||
119 | } while(0) | ||
120 | |||
121 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
122 | { | ||
123 | register raw_rwlock_t *lp asm("g1"); | ||
124 | lp = rw; | ||
125 | __asm__ __volatile__( | ||
126 | "mov %%o7, %%g4\n\t" | ||
127 | "call ___rw_write_enter\n\t" | ||
128 | " ldstub [%%g1 + 3], %%g2\n" | ||
129 | : /* no outputs */ | ||
130 | : "r" (lp) | ||
131 | : "g2", "g4", "memory", "cc"); | ||
132 | *(volatile __u32 *)&lp->lock = ~0U; | ||
133 | } | ||
134 | |||
135 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | ||
136 | { | ||
137 | unsigned int val; | ||
138 | |||
139 | __asm__ __volatile__("ldstub [%1 + 3], %0" | ||
140 | : "=r" (val) | ||
141 | : "r" (&rw->lock) | ||
142 | : "memory"); | ||
143 | |||
144 | if (val == 0) { | ||
145 | val = rw->lock & ~0xff; | ||
146 | if (val) | ||
147 | ((volatile u8*)&rw->lock)[3] = 0; | ||
148 | else | ||
149 | *(volatile u32*)&rw->lock = ~0U; | ||
150 | } | ||
151 | |||
152 | return (val == 0); | ||
153 | } | ||
154 | |||
155 | static inline int __read_trylock(raw_rwlock_t *rw) | ||
156 | { | ||
157 | register raw_rwlock_t *lp asm("g1"); | ||
158 | register int res asm("o0"); | ||
159 | lp = rw; | ||
160 | __asm__ __volatile__( | ||
161 | "mov %%o7, %%g4\n\t" | ||
162 | "call ___rw_read_try\n\t" | ||
163 | " ldstub [%%g1 + 3], %%g2\n" | ||
164 | : "=r" (res) | ||
165 | : "r" (lp) | ||
166 | : "g2", "g4", "memory", "cc"); | ||
167 | return res; | ||
168 | } | ||
169 | |||
170 | #define __raw_read_trylock(lock) \ | ||
171 | ({ unsigned long flags; \ | ||
172 | int res; \ | ||
173 | local_irq_save(flags); \ | ||
174 | res = __read_trylock(lock); \ | ||
175 | local_irq_restore(flags); \ | ||
176 | res; \ | ||
177 | }) | ||
178 | |||
179 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | ||
180 | |||
181 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
182 | |||
183 | #define _raw_spin_relax(lock) cpu_relax() | ||
184 | #define _raw_read_relax(lock) cpu_relax() | ||
185 | #define _raw_write_relax(lock) cpu_relax() | ||
186 | |||
187 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) | ||
188 | #define __raw_write_can_lock(rw) (!(rw)->lock) | ||
189 | |||
190 | #endif /* !(__ASSEMBLY__) */ | ||
191 | |||
192 | #endif /* __SPARC_SPINLOCK_H */ | ||