aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc/spinlock.h
diff options
context:
space:
mode:
authorBob Breuer <breuerr@mc.net>2006-03-24 01:36:19 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-24 01:36:19 -0500
commita54123e27779049d27d21e6c8adfee73aa2c0734 (patch)
tree265849e706e4ebe3b75127ebe6e3cbfe2a78850a /include/asm-sparc/spinlock.h
parent674a396c6d2ba0341ebdd7c1c9950f32f018e2dd (diff)
[SPARC]: Try to start getting SMP back into shape.
Todo items: - IRQ_INPROGRESS flag - use sparc64 irq buckets, or generic irq_desc? - sun4d - re-indent large chunks of sun4m_smp.c - some places assume sequential cpu numbering (i.e. 0,1 instead of 0,2) Last I checked (with 2.6.14), random programs segfault with dual HyperSPARC. And with SuperSPARC II's, it seems stable but will eventually die from a write lock error (wrong lock owner or something). I haven't tried the HyperSPARC + highmem combination recently, so that may still be a problem. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc/spinlock.h')
-rw-r--r--include/asm-sparc/spinlock.h25
1 files changed, 22 insertions, 3 deletions
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h
index e344c98a6f5f..3350c90c7869 100644
--- a/include/asm-sparc/spinlock.h
+++ b/include/asm-sparc/spinlock.h
@@ -94,7 +94,7 @@ static inline void __read_lock(raw_rwlock_t *rw)
94#define __raw_read_lock(lock) \ 94#define __raw_read_lock(lock) \
95do { unsigned long flags; \ 95do { unsigned long flags; \
96 local_irq_save(flags); \ 96 local_irq_save(flags); \
97 __raw_read_lock(lock); \ 97 __read_lock(lock); \
98 local_irq_restore(flags); \ 98 local_irq_restore(flags); \
99} while(0) 99} while(0)
100 100
@@ -114,11 +114,11 @@ static inline void __read_unlock(raw_rwlock_t *rw)
114#define __raw_read_unlock(lock) \ 114#define __raw_read_unlock(lock) \
115do { unsigned long flags; \ 115do { unsigned long flags; \
116 local_irq_save(flags); \ 116 local_irq_save(flags); \
117 __raw_read_unlock(lock); \ 117 __read_unlock(lock); \
118 local_irq_restore(flags); \ 118 local_irq_restore(flags); \
119} while(0) 119} while(0)
120 120
121extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) 121static inline void __raw_write_lock(raw_rwlock_t *rw)
122{ 122{
123 register raw_rwlock_t *lp asm("g1"); 123 register raw_rwlock_t *lp asm("g1");
124 lp = rw; 124 lp = rw;
@@ -131,9 +131,28 @@ extern __inline__ void __raw_write_lock(raw_rwlock_t *rw)
131 : "g2", "g4", "memory", "cc"); 131 : "g2", "g4", "memory", "cc");
132} 132}
133 133
134static inline int __raw_write_trylock(raw_rwlock_t *rw)
135{
136 unsigned int val;
137
138 __asm__ __volatile__("ldstub [%1 + 3], %0"
139 : "=r" (val)
140 : "r" (&rw->lock)
141 : "memory");
142
143 if (val == 0) {
144 val = rw->lock & ~0xff;
145 if (val)
146 ((volatile u8*)&rw->lock)[3] = 0;
147 }
148
149 return (val == 0);
150}
151
134#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 152#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
135 153
136#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 154#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
155#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
137 156
138#endif /* !(__ASSEMBLY__) */ 157#endif /* !(__ASSEMBLY__) */
139 158