aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-14 08:19:08 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-14 08:19:08 -0400
commit905ec87e93bc9e01b15c60035cd6a50c636cbaef (patch)
tree46fd7618d6511611ffc19eb0dd4d7bc6b90a41c2 /include/asm-sparc64
parent1d6ae775d7a948c9575658eb41184fd2e506c0df (diff)
parent2f4ba45a75d6383b4a1201169a808ffea416ffa0 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/pci.h2
-rw-r--r--include/asm-sparc64/spinlock.h160
-rw-r--r--include/asm-sparc64/spinlock_types.h20
-rw-r--r--include/asm-sparc64/system.h49
4 files changed, 86 insertions, 145 deletions
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index a4ab0ec7143a..89bd71b1c0d8 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -269,6 +269,8 @@ extern void
269pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 269pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
270 struct pci_bus_region *region); 270 struct pci_bus_region *region);
271 271
272extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *);
273
272static inline void pcibios_add_platform_entries(struct pci_dev *dev) 274static inline void pcibios_add_platform_entries(struct pci_dev *dev)
273{ 275{
274} 276}
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index a02c4370eb42..ec85d12d73b9 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -29,24 +29,13 @@
29 * must be pre-V9 branches. 29 * must be pre-V9 branches.
30 */ 30 */
31 31
32#ifndef CONFIG_DEBUG_SPINLOCK 32#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
33 33
34typedef struct { 34#define __raw_spin_unlock_wait(lp) \
35 volatile unsigned char lock; 35 do { rmb(); \
36#ifdef CONFIG_PREEMPT 36 } while((lp)->lock)
37 unsigned int break_lock;
38#endif
39} spinlock_t;
40#define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
41 37
42#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) 38static inline void __raw_spin_lock(raw_spinlock_t *lock)
43#define spin_is_locked(lp) ((lp)->lock != 0)
44
45#define spin_unlock_wait(lp) \
46do { rmb(); \
47} while((lp)->lock)
48
49static inline void _raw_spin_lock(spinlock_t *lock)
50{ 39{
51 unsigned long tmp; 40 unsigned long tmp;
52 41
@@ -67,7 +56,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
67 : "memory"); 56 : "memory");
68} 57}
69 58
70static inline int _raw_spin_trylock(spinlock_t *lock) 59static inline int __raw_spin_trylock(raw_spinlock_t *lock)
71{ 60{
72 unsigned long result; 61 unsigned long result;
73 62
@@ -81,7 +70,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
81 return (result == 0UL); 70 return (result == 0UL);
82} 71}
83 72
84static inline void _raw_spin_unlock(spinlock_t *lock) 73static inline void __raw_spin_unlock(raw_spinlock_t *lock)
85{ 74{
86 __asm__ __volatile__( 75 __asm__ __volatile__(
87" membar #StoreStore | #LoadStore\n" 76" membar #StoreStore | #LoadStore\n"
@@ -91,7 +80,7 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
91 : "memory"); 80 : "memory");
92} 81}
93 82
94static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) 83static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
95{ 84{
96 unsigned long tmp1, tmp2; 85 unsigned long tmp1, tmp2;
97 86
@@ -115,51 +104,9 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
115 : "memory"); 104 : "memory");
116} 105}
117 106
118#else /* !(CONFIG_DEBUG_SPINLOCK) */
119
120typedef struct {
121 volatile unsigned char lock;
122 unsigned int owner_pc, owner_cpu;
123#ifdef CONFIG_PREEMPT
124 unsigned int break_lock;
125#endif
126} spinlock_t;
127#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
128#define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
129#define spin_is_locked(__lock) ((__lock)->lock != 0)
130#define spin_unlock_wait(__lock) \
131do { \
132 rmb(); \
133} while((__lock)->lock)
134
135extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
136extern void _do_spin_unlock(spinlock_t *lock);
137extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
138
139#define _raw_spin_trylock(lp) \
140 _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
141#define _raw_spin_lock(lock) \
142 _do_spin_lock(lock, "spin_lock", \
143 (unsigned long) __builtin_return_address(0))
144#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
145#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
146
147#endif /* CONFIG_DEBUG_SPINLOCK */
148
149/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 107/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
150 108
151#ifndef CONFIG_DEBUG_SPINLOCK 109static void inline __read_lock(raw_rwlock_t *lock)
152
153typedef struct {
154 volatile unsigned int lock;
155#ifdef CONFIG_PREEMPT
156 unsigned int break_lock;
157#endif
158} rwlock_t;
159#define RW_LOCK_UNLOCKED (rwlock_t) {0,}
160#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
161
162static void inline __read_lock(rwlock_t *lock)
163{ 110{
164 unsigned long tmp1, tmp2; 111 unsigned long tmp1, tmp2;
165 112
@@ -184,7 +131,7 @@ static void inline __read_lock(rwlock_t *lock)
184 : "memory"); 131 : "memory");
185} 132}
186 133
187static void inline __read_unlock(rwlock_t *lock) 134static void inline __read_unlock(raw_rwlock_t *lock)
188{ 135{
189 unsigned long tmp1, tmp2; 136 unsigned long tmp1, tmp2;
190 137
@@ -201,7 +148,7 @@ static void inline __read_unlock(rwlock_t *lock)
201 : "memory"); 148 : "memory");
202} 149}
203 150
204static void inline __write_lock(rwlock_t *lock) 151static void inline __write_lock(raw_rwlock_t *lock)
205{ 152{
206 unsigned long mask, tmp1, tmp2; 153 unsigned long mask, tmp1, tmp2;
207 154
@@ -228,7 +175,7 @@ static void inline __write_lock(rwlock_t *lock)
228 : "memory"); 175 : "memory");
229} 176}
230 177
231static void inline __write_unlock(rwlock_t *lock) 178static void inline __write_unlock(raw_rwlock_t *lock)
232{ 179{
233 __asm__ __volatile__( 180 __asm__ __volatile__(
234" membar #LoadStore | #StoreStore\n" 181" membar #LoadStore | #StoreStore\n"
@@ -238,7 +185,7 @@ static void inline __write_unlock(rwlock_t *lock)
238 : "memory"); 185 : "memory");
239} 186}
240 187
241static int inline __write_trylock(rwlock_t *lock) 188static int inline __write_trylock(raw_rwlock_t *lock)
242{ 189{
243 unsigned long mask, tmp1, tmp2, result; 190 unsigned long mask, tmp1, tmp2, result;
244 191
@@ -263,78 +210,15 @@ static int inline __write_trylock(rwlock_t *lock)
263 return result; 210 return result;
264} 211}
265 212
266#define _raw_read_lock(p) __read_lock(p) 213#define __raw_read_lock(p) __read_lock(p)
267#define _raw_read_unlock(p) __read_unlock(p) 214#define __raw_read_unlock(p) __read_unlock(p)
268#define _raw_write_lock(p) __write_lock(p) 215#define __raw_write_lock(p) __write_lock(p)
269#define _raw_write_unlock(p) __write_unlock(p) 216#define __raw_write_unlock(p) __write_unlock(p)
270#define _raw_write_trylock(p) __write_trylock(p) 217#define __raw_write_trylock(p) __write_trylock(p)
271 218
272#else /* !(CONFIG_DEBUG_SPINLOCK) */ 219#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
273 220#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
274typedef struct { 221#define __raw_write_can_lock(rw) (!(rw)->lock)
275 volatile unsigned long lock;
276 unsigned int writer_pc, writer_cpu;
277 unsigned int reader_pc[NR_CPUS];
278#ifdef CONFIG_PREEMPT
279 unsigned int break_lock;
280#endif
281} rwlock_t;
282#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
283#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
284
285extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
286extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
287extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
288extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
289extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
290
291#define _raw_read_lock(lock) \
292do { unsigned long flags; \
293 local_irq_save(flags); \
294 _do_read_lock(lock, "read_lock", \
295 (unsigned long) __builtin_return_address(0)); \
296 local_irq_restore(flags); \
297} while(0)
298
299#define _raw_read_unlock(lock) \
300do { unsigned long flags; \
301 local_irq_save(flags); \
302 _do_read_unlock(lock, "read_unlock", \
303 (unsigned long) __builtin_return_address(0)); \
304 local_irq_restore(flags); \
305} while(0)
306
307#define _raw_write_lock(lock) \
308do { unsigned long flags; \
309 local_irq_save(flags); \
310 _do_write_lock(lock, "write_lock", \
311 (unsigned long) __builtin_return_address(0)); \
312 local_irq_restore(flags); \
313} while(0)
314
315#define _raw_write_unlock(lock) \
316do { unsigned long flags; \
317 local_irq_save(flags); \
318 _do_write_unlock(lock, \
319 (unsigned long) __builtin_return_address(0)); \
320 local_irq_restore(flags); \
321} while(0)
322
323#define _raw_write_trylock(lock) \
324({ unsigned long flags; \
325 int val; \
326 local_irq_save(flags); \
327 val = _do_write_trylock(lock, "write_trylock", \
328 (unsigned long) __builtin_return_address(0)); \
329 local_irq_restore(flags); \
330 val; \
331})
332
333#endif /* CONFIG_DEBUG_SPINLOCK */
334
335#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
336#define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
337#define write_can_lock(rw) (!(rw)->lock)
338 222
339#endif /* !(__ASSEMBLY__) */ 223#endif /* !(__ASSEMBLY__) */
340 224
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h
new file mode 100644
index 000000000000..e128112a0d7c
--- /dev/null
+++ b/include/asm-sparc64/spinlock_types.h
@@ -0,0 +1,20 @@
1#ifndef __SPARC64_SPINLOCK_TYPES_H
2#define __SPARC64_SPINLOCK_TYPES_H
3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8typedef struct {
9 volatile unsigned char lock;
10} raw_spinlock_t;
11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
13
14typedef struct {
15 volatile unsigned int lock;
16} raw_rwlock_t;
17
18#define __RAW_RW_LOCK_UNLOCKED { 0 }
19
20#endif
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 5e94c05dc2fc..b5417529f6f1 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -28,13 +28,48 @@ enum sparc_cpu {
28#define ARCH_SUN4C_SUN4 0 28#define ARCH_SUN4C_SUN4 0
29#define ARCH_SUN4 0 29#define ARCH_SUN4 0
30 30
31extern void mb(void); 31/* These are here in an effort to more fully work around Spitfire Errata
32extern void rmb(void); 32 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
33extern void wmb(void); 33 * branch, the chip can stop executing instructions until a trap occurs.
34extern void membar_storeload(void); 34 * Therefore, if interrupts are disabled, the chip can hang forever.
35extern void membar_storeload_storestore(void); 35 *
36extern void membar_storeload_loadload(void); 36 * It used to be believed that the memory barrier had to be right in the
37extern void membar_storestore_loadstore(void); 37 * delay slot, but a case has been traced recently wherein the memory barrier
38 * was one instruction after the branch delay slot and the chip still hung.
39 * The offending sequence was the following in sym_wakeup_done() of the
40 * sym53c8xx_2 driver:
41 *
42 * call sym_ccb_from_dsa, 0
43 * movge %icc, 0, %l0
44 * brz,pn %o0, .LL1303
45 * mov %o0, %l2
46 * membar #LoadLoad
47 *
48 * The branch has to be mispredicted for the bug to occur. Therefore, we put
49 * the memory barrier explicitly into a "branch always, predicted taken"
50 * delay slot to avoid the problem case.
51 */
52#define membar_safe(type) \
53do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
54 " membar " type "\n" \
55 "1:\n" \
56 : : : "memory"); \
57} while (0)
58
59#define mb() \
60 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
61#define rmb() \
62 membar_safe("#LoadLoad")
63#define wmb() \
64 membar_safe("#StoreStore")
65#define membar_storeload() \
66 membar_safe("#StoreLoad")
67#define membar_storeload_storestore() \
68 membar_safe("#StoreLoad | #StoreStore")
69#define membar_storeload_loadload() \
70 membar_safe("#StoreLoad | #LoadLoad")
71#define membar_storestore_loadstore() \
72 membar_safe("#StoreStore | #LoadStore")
38 73
39#endif 74#endif
40 75