aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/lib')
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/debuglocks.c56
-rw-r--r--arch/sparc64/lib/mb.S73
3 files changed, 96 insertions, 35 deletions
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 40dbeec7e5d6..6201f1040982 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
12 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ 12 U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
13 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ 13 U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
14 copy_in_user.o user_fixup.o memmove.o \ 14 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o mb.o
16 16
17lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o 17lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
18lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o 18lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
index f03344cf784e..f5f0b5586f01 100644
--- a/arch/sparc64/lib/debuglocks.c
+++ b/arch/sparc64/lib/debuglocks.c
@@ -12,8 +12,6 @@
12 12
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14 14
15#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
16
17static inline void show (char *str, spinlock_t *lock, unsigned long caller) 15static inline void show (char *str, spinlock_t *lock, unsigned long caller)
18{ 16{
19 int cpu = smp_processor_id(); 17 int cpu = smp_processor_id();
@@ -51,20 +49,19 @@ static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
51#undef INIT_STUCK 49#undef INIT_STUCK
52#define INIT_STUCK 100000000 50#define INIT_STUCK 100000000
53 51
54void _do_spin_lock(spinlock_t *lock, char *str) 52void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
55{ 53{
56 unsigned long caller, val; 54 unsigned long val;
57 int stuck = INIT_STUCK; 55 int stuck = INIT_STUCK;
58 int cpu = get_cpu(); 56 int cpu = get_cpu();
59 int shown = 0; 57 int shown = 0;
60 58
61 GET_CALLER(caller);
62again: 59again:
63 __asm__ __volatile__("ldstub [%1], %0" 60 __asm__ __volatile__("ldstub [%1], %0"
64 : "=r" (val) 61 : "=r" (val)
65 : "r" (&(lock->lock)) 62 : "r" (&(lock->lock))
66 : "memory"); 63 : "memory");
67 membar("#StoreLoad | #StoreStore"); 64 membar_storeload_storestore();
68 if (val) { 65 if (val) {
69 while (lock->lock) { 66 while (lock->lock) {
70 if (!--stuck) { 67 if (!--stuck) {
@@ -72,7 +69,7 @@ again:
72 show(str, lock, caller); 69 show(str, lock, caller);
73 stuck = INIT_STUCK; 70 stuck = INIT_STUCK;
74 } 71 }
75 membar("#LoadLoad"); 72 rmb();
76 } 73 }
77 goto again; 74 goto again;
78 } 75 }
@@ -84,17 +81,16 @@ again:
84 put_cpu(); 81 put_cpu();
85} 82}
86 83
87int _do_spin_trylock(spinlock_t *lock) 84int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
88{ 85{
89 unsigned long val, caller; 86 unsigned long val;
90 int cpu = get_cpu(); 87 int cpu = get_cpu();
91 88
92 GET_CALLER(caller);
93 __asm__ __volatile__("ldstub [%1], %0" 89 __asm__ __volatile__("ldstub [%1], %0"
94 : "=r" (val) 90 : "=r" (val)
95 : "r" (&(lock->lock)) 91 : "r" (&(lock->lock))
96 : "memory"); 92 : "memory");
97 membar("#StoreLoad | #StoreStore"); 93 membar_storeload_storestore();
98 if (!val) { 94 if (!val) {
99 lock->owner_pc = ((unsigned int)caller); 95 lock->owner_pc = ((unsigned int)caller);
100 lock->owner_cpu = cpu; 96 lock->owner_cpu = cpu;
@@ -111,21 +107,20 @@ void _do_spin_unlock(spinlock_t *lock)
111{ 107{
112 lock->owner_pc = 0; 108 lock->owner_pc = 0;
113 lock->owner_cpu = NO_PROC_ID; 109 lock->owner_cpu = NO_PROC_ID;
114 membar("#StoreStore | #LoadStore"); 110 membar_storestore_loadstore();
115 lock->lock = 0; 111 lock->lock = 0;
116 current->thread.smp_lock_count--; 112 current->thread.smp_lock_count--;
117} 113}
118 114
119/* Keep INIT_STUCK the same... */ 115/* Keep INIT_STUCK the same... */
120 116
121void _do_read_lock (rwlock_t *rw, char *str) 117void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
122{ 118{
123 unsigned long caller, val; 119 unsigned long val;
124 int stuck = INIT_STUCK; 120 int stuck = INIT_STUCK;
125 int cpu = get_cpu(); 121 int cpu = get_cpu();
126 int shown = 0; 122 int shown = 0;
127 123
128 GET_CALLER(caller);
129wlock_again: 124wlock_again:
130 /* Wait for any writer to go away. */ 125 /* Wait for any writer to go away. */
131 while (((long)(rw->lock)) < 0) { 126 while (((long)(rw->lock)) < 0) {
@@ -134,7 +129,7 @@ wlock_again:
134 show_read(str, rw, caller); 129 show_read(str, rw, caller);
135 stuck = INIT_STUCK; 130 stuck = INIT_STUCK;
136 } 131 }
137 membar("#LoadLoad"); 132 rmb();
138 } 133 }
139 /* Try once to increment the counter. */ 134 /* Try once to increment the counter. */
140 __asm__ __volatile__( 135 __asm__ __volatile__(
@@ -147,7 +142,7 @@ wlock_again:
147"2:" : "=r" (val) 142"2:" : "=r" (val)
148 : "0" (&(rw->lock)) 143 : "0" (&(rw->lock))
149 : "g1", "g7", "memory"); 144 : "g1", "g7", "memory");
150 membar("#StoreLoad | #StoreStore"); 145 membar_storeload_storestore();
151 if (val) 146 if (val)
152 goto wlock_again; 147 goto wlock_again;
153 rw->reader_pc[cpu] = ((unsigned int)caller); 148 rw->reader_pc[cpu] = ((unsigned int)caller);
@@ -157,15 +152,13 @@ wlock_again:
157 put_cpu(); 152 put_cpu();
158} 153}
159 154
160void _do_read_unlock (rwlock_t *rw, char *str) 155void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
161{ 156{
162 unsigned long caller, val; 157 unsigned long val;
163 int stuck = INIT_STUCK; 158 int stuck = INIT_STUCK;
164 int cpu = get_cpu(); 159 int cpu = get_cpu();
165 int shown = 0; 160 int shown = 0;
166 161
167 GET_CALLER(caller);
168
169 /* Drop our identity _first_. */ 162 /* Drop our identity _first_. */
170 rw->reader_pc[cpu] = 0; 163 rw->reader_pc[cpu] = 0;
171 current->thread.smp_lock_count--; 164 current->thread.smp_lock_count--;
@@ -193,14 +186,13 @@ runlock_again:
193 put_cpu(); 186 put_cpu();
194} 187}
195 188
196void _do_write_lock (rwlock_t *rw, char *str) 189void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
197{ 190{
198 unsigned long caller, val; 191 unsigned long val;
199 int stuck = INIT_STUCK; 192 int stuck = INIT_STUCK;
200 int cpu = get_cpu(); 193 int cpu = get_cpu();
201 int shown = 0; 194 int shown = 0;
202 195
203 GET_CALLER(caller);
204wlock_again: 196wlock_again:
205 /* Spin while there is another writer. */ 197 /* Spin while there is another writer. */
206 while (((long)rw->lock) < 0) { 198 while (((long)rw->lock) < 0) {
@@ -209,7 +201,7 @@ wlock_again:
209 show_write(str, rw, caller); 201 show_write(str, rw, caller);
210 stuck = INIT_STUCK; 202 stuck = INIT_STUCK;
211 } 203 }
212 membar("#LoadLoad"); 204 rmb();
213 } 205 }
214 206
215 /* Try to acuire the write bit. */ 207 /* Try to acuire the write bit. */
@@ -264,7 +256,7 @@ wlock_again:
264 show_write(str, rw, caller); 256 show_write(str, rw, caller);
265 stuck = INIT_STUCK; 257 stuck = INIT_STUCK;
266 } 258 }
267 membar("#LoadLoad"); 259 rmb();
268 } 260 }
269 goto wlock_again; 261 goto wlock_again;
270 } 262 }
@@ -278,14 +270,12 @@ wlock_again:
278 put_cpu(); 270 put_cpu();
279} 271}
280 272
281void _do_write_unlock(rwlock_t *rw) 273void _do_write_unlock(rwlock_t *rw, unsigned long caller)
282{ 274{
283 unsigned long caller, val; 275 unsigned long val;
284 int stuck = INIT_STUCK; 276 int stuck = INIT_STUCK;
285 int shown = 0; 277 int shown = 0;
286 278
287 GET_CALLER(caller);
288
289 /* Drop our identity _first_ */ 279 /* Drop our identity _first_ */
290 rw->writer_pc = 0; 280 rw->writer_pc = 0;
291 rw->writer_cpu = NO_PROC_ID; 281 rw->writer_cpu = NO_PROC_ID;
@@ -313,13 +303,11 @@ wlock_again:
313 } 303 }
314} 304}
315 305
316int _do_write_trylock (rwlock_t *rw, char *str) 306int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
317{ 307{
318 unsigned long caller, val; 308 unsigned long val;
319 int cpu = get_cpu(); 309 int cpu = get_cpu();
320 310
321 GET_CALLER(caller);
322
323 /* Try to acuire the write bit. */ 311 /* Try to acuire the write bit. */
324 __asm__ __volatile__( 312 __asm__ __volatile__(
325" mov 1, %%g3\n" 313" mov 1, %%g3\n"
diff --git a/arch/sparc64/lib/mb.S b/arch/sparc64/lib/mb.S
new file mode 100644
index 000000000000..4004f748619f
--- /dev/null
+++ b/arch/sparc64/lib/mb.S
@@ -0,0 +1,73 @@
1/* mb.S: Out of line memory barriers.
2 *
3 * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
4 */
5
6 /* These are here in an effort to more fully work around
7 * Spitfire Errata #51. Essentially, if a memory barrier
8 * occurs soon after a mispredicted branch, the chip can stop
9 * executing instructions until a trap occurs. Therefore, if
10 * interrupts are disabled, the chip can hang forever.
11 *
12 * It used to be believed that the memory barrier had to be
13 * right in the delay slot, but a case has been traced
14 * recently wherein the memory barrier was one instruction
15 * after the branch delay slot and the chip still hung. The
16 * offending sequence was the following in sym_wakeup_done()
17 * of the sym53c8xx_2 driver:
18 *
19 * call sym_ccb_from_dsa, 0
20 * movge %icc, 0, %l0
21 * brz,pn %o0, .LL1303
22 * mov %o0, %l2
23 * membar #LoadLoad
24 *
25 * The branch has to be mispredicted for the bug to occur.
26 * Therefore, we put the memory barrier explicitly into a
27 * "branch always, predicted taken" delay slot to avoid the
28 * problem case.
29 */
30
31 .text
32
3399: retl
34 nop
35
36 .globl mb
37mb: ba,pt %xcc, 99b
38 membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad
39 .size mb, .-mb
40
41 .globl rmb
42rmb: ba,pt %xcc, 99b
43 membar #LoadLoad
44 .size rmb, .-rmb
45
46 .globl wmb
47wmb: ba,pt %xcc, 99b
48 membar #StoreStore
49 .size wmb, .-wmb
50
51 .globl membar_storeload
52membar_storeload:
53 ba,pt %xcc, 99b
54 membar #StoreLoad
55 .size membar_storeload, .-membar_storeload
56
57 .globl membar_storeload_storestore
58membar_storeload_storestore:
59 ba,pt %xcc, 99b
60 membar #StoreLoad | #StoreStore
61 .size membar_storeload_storestore, .-membar_storeload_storestore
62
63 .globl membar_storeload_loadload
64membar_storeload_loadload:
65 ba,pt %xcc, 99b
66 membar #StoreLoad | #LoadLoad
67 .size membar_storeload_loadload, .-membar_storeload_loadload
68
69 .globl membar_storestore_loadstore
70membar_storestore_loadstore:
71 ba,pt %xcc, 99b
72 membar #StoreStore | #LoadStore
73 .size membar_storestore_loadstore, .-membar_storestore_loadstore