aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/rwsem.h3
-rw-r--r--include/asm-sparc64/spinlock.h29
-rw-r--r--include/asm-sparc64/spitfire.h1
3 files changed, 21 insertions, 12 deletions
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index bf2ae90ed3df..a1cc94f95984 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -55,8 +55,9 @@ static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
55 "add %%g1, %1, %%g7\n\t" 55 "add %%g1, %1, %%g7\n\t"
56 "cas [%2], %%g1, %%g7\n\t" 56 "cas [%2], %%g1, %%g7\n\t"
57 "cmp %%g1, %%g7\n\t" 57 "cmp %%g1, %%g7\n\t"
58 "membar #StoreLoad | #StoreStore\n\t"
58 "bne,pn %%icc, 1b\n\t" 59 "bne,pn %%icc, 1b\n\t"
59 " membar #StoreLoad | #StoreStore\n\t" 60 " nop\n\t"
60 "mov %%g7, %0\n\t" 61 "mov %%g7, %0\n\t"
61 : "=&r" (tmp) 62 : "=&r" (tmp)
62 : "0" (tmp), "r" (sem) 63 : "0" (tmp), "r" (sem)
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index db7581bdb531..9cb93a5c2b4f 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -52,12 +52,14 @@ static inline void _raw_spin_lock(spinlock_t *lock)
52 52
53 __asm__ __volatile__( 53 __asm__ __volatile__(
54"1: ldstub [%1], %0\n" 54"1: ldstub [%1], %0\n"
55" membar #StoreLoad | #StoreStore\n"
55" brnz,pn %0, 2f\n" 56" brnz,pn %0, 2f\n"
56" membar #StoreLoad | #StoreStore\n" 57" nop\n"
57" .subsection 2\n" 58" .subsection 2\n"
58"2: ldub [%1], %0\n" 59"2: ldub [%1], %0\n"
60" membar #LoadLoad\n"
59" brnz,pt %0, 2b\n" 61" brnz,pt %0, 2b\n"
60" membar #LoadLoad\n" 62" nop\n"
61" ba,a,pt %%xcc, 1b\n" 63" ba,a,pt %%xcc, 1b\n"
62" .previous" 64" .previous"
63 : "=&r" (tmp) 65 : "=&r" (tmp)
@@ -95,16 +97,18 @@ static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
95 97
96 __asm__ __volatile__( 98 __asm__ __volatile__(
97"1: ldstub [%2], %0\n" 99"1: ldstub [%2], %0\n"
98" brnz,pn %0, 2f\n"
99" membar #StoreLoad | #StoreStore\n" 100" membar #StoreLoad | #StoreStore\n"
101" brnz,pn %0, 2f\n"
102" nop\n"
100" .subsection 2\n" 103" .subsection 2\n"
101"2: rdpr %%pil, %1\n" 104"2: rdpr %%pil, %1\n"
102" wrpr %3, %%pil\n" 105" wrpr %3, %%pil\n"
103"3: ldub [%2], %0\n" 106"3: ldub [%2], %0\n"
104" brnz,pt %0, 3b\n"
105" membar #LoadLoad\n" 107" membar #LoadLoad\n"
108" brnz,pt %0, 3b\n"
109" nop\n"
106" ba,pt %%xcc, 1b\n" 110" ba,pt %%xcc, 1b\n"
107" wrpr %1, %%pil\n" 111" wrpr %1, %%pil\n"
108" .previous" 112" .previous"
109 : "=&r" (tmp1), "=&r" (tmp2) 113 : "=&r" (tmp1), "=&r" (tmp2)
110 : "r"(lock), "r"(flags) 114 : "r"(lock), "r"(flags)
@@ -162,12 +166,14 @@ static void inline __read_lock(rwlock_t *lock)
162"4: add %0, 1, %1\n" 166"4: add %0, 1, %1\n"
163" cas [%2], %0, %1\n" 167" cas [%2], %0, %1\n"
164" cmp %0, %1\n" 168" cmp %0, %1\n"
169" membar #StoreLoad | #StoreStore\n"
165" bne,pn %%icc, 1b\n" 170" bne,pn %%icc, 1b\n"
166" membar #StoreLoad | #StoreStore\n" 171" nop\n"
167" .subsection 2\n" 172" .subsection 2\n"
168"2: ldsw [%2], %0\n" 173"2: ldsw [%2], %0\n"
174" membar #LoadLoad\n"
169" brlz,pt %0, 2b\n" 175" brlz,pt %0, 2b\n"
170" membar #LoadLoad\n" 176" nop\n"
171" ba,a,pt %%xcc, 4b\n" 177" ba,a,pt %%xcc, 4b\n"
172" .previous" 178" .previous"
173 : "=&r" (tmp1), "=&r" (tmp2) 179 : "=&r" (tmp1), "=&r" (tmp2)
@@ -204,12 +210,14 @@ static void inline __write_lock(rwlock_t *lock)
204"4: or %0, %3, %1\n" 210"4: or %0, %3, %1\n"
205" cas [%2], %0, %1\n" 211" cas [%2], %0, %1\n"
206" cmp %0, %1\n" 212" cmp %0, %1\n"
213" membar #StoreLoad | #StoreStore\n"
207" bne,pn %%icc, 1b\n" 214" bne,pn %%icc, 1b\n"
208" membar #StoreLoad | #StoreStore\n" 215" nop\n"
209" .subsection 2\n" 216" .subsection 2\n"
210"2: lduw [%2], %0\n" 217"2: lduw [%2], %0\n"
218" membar #LoadLoad\n"
211" brnz,pt %0, 2b\n" 219" brnz,pt %0, 2b\n"
212" membar #LoadLoad\n" 220" nop\n"
213" ba,a,pt %%xcc, 4b\n" 221" ba,a,pt %%xcc, 4b\n"
214" .previous" 222" .previous"
215 : "=&r" (tmp1), "=&r" (tmp2) 223 : "=&r" (tmp1), "=&r" (tmp2)
@@ -240,8 +248,9 @@ static int inline __write_trylock(rwlock_t *lock)
240" or %0, %4, %1\n" 248" or %0, %4, %1\n"
241" cas [%3], %0, %1\n" 249" cas [%3], %0, %1\n"
242" cmp %0, %1\n" 250" cmp %0, %1\n"
251" membar #StoreLoad | #StoreStore\n"
243" bne,pn %%icc, 1b\n" 252" bne,pn %%icc, 1b\n"
244" membar #StoreLoad | #StoreStore\n" 253" nop\n"
245" mov 1, %2\n" 254" mov 1, %2\n"
246"2:" 255"2:"
247 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) 256 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
index 9d7613eea812..1aa932773af8 100644
--- a/include/asm-sparc64/spitfire.h
+++ b/include/asm-sparc64/spitfire.h
@@ -111,7 +111,6 @@ static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long
111 "membar #Sync" 111 "membar #Sync"
112 : /* No outputs */ 112 : /* No outputs */
113 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG)); 113 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
114 __asm__ __volatile__ ("membar #Sync" : : : "memory");
115} 114}
116 115
117/* The instruction cache lines are flushed with this, but note that 116/* The instruction cache lines are flushed with this, but note that