aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-12-08 12:19:05 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-12-08 12:47:46 -0500
commitbc5deca4e88b2b816dd44a9d6ce685b8c0fc8844 (patch)
treebed855ae9bfae422061ba3c823f8307d5bc232b2
parent0443a922ed2475c192087825d0e24f2d5cc5d3bf (diff)
use FIFO spin locks on sparc64
-rw-r--r--include/asm-sparc64/spinlock.h113
-rw-r--r--include/asm-sparc64/spinlock_types.h5
2 files changed, 53 insertions, 65 deletions
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index 0006fe9f8c7a..16931d4cd928 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -15,93 +15,80 @@
15 * and rebuild your kernel. 15 * and rebuild your kernel.
16 */ 16 */
17 17
18/* All of these locking primitives are expected to work properly 18#define __raw_spin_is_locked(lp) ((lp)->tail != (lp)->head)
19 * even in an RMO memory model, which currently is what the kernel
20 * runs in.
21 *
22 * There is another issue. Because we play games to save cycles
23 * in the non-contention case, we need to be extra careful about
24 * branch targets into the "spinning" code. They live in their
25 * own section, but the newer V9 branches have a shorter range
26 * than the traditional 32-bit sparc branch variants. The rule
27 * is that the branches that go into and out of the spinner sections
28 * must be pre-V9 branches.
29 */
30
31#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
32 19
33#define __raw_spin_unlock_wait(lp) \ 20#define __raw_spin_unlock_wait(lp) \
34 do { rmb(); \ 21 do { rmb(); \
35 } while((lp)->lock) 22 } while((lp)->tail != (lp)->head)
23
24
36 25
37static inline void __raw_spin_lock(raw_spinlock_t *lock) 26static inline void __raw_spin_lock(raw_spinlock_t *lock)
38{ 27{
39 unsigned long tmp; 28 int ticket, tmp;
40
41 __asm__ __volatile__( 29 __asm__ __volatile__(
42"1: ldstub [%1], %0\n" 30"1: lduw [%2], %0 \n" /* read ticket */
43" membar #StoreLoad | #StoreStore\n" 31" add %0, 1, %1 \n"
44" brnz,pn %0, 2f\n" 32" cas [%2], %0, %1 \n"
45" nop\n" 33" cmp %0, %1 \n"
46" .subsection 2\n" 34" be,a,pt %%icc, 2f \n"
47"2: ldub [%1], %0\n" 35" nop \n"
48" membar #LoadLoad\n" 36" membar #LoadLoad | #StoreLoad | #LoadStore\n"
49" brnz,pt %0, 2b\n" 37" ba 1b\n"
50" nop\n" 38" nop \n"
51" ba,a,pt %%xcc, 1b\n" 39"2: lduw [%3], %1 \n"
52" .previous" 40" cmp %0, %1 \n"
53 : "=&r" (tmp) 41" be,a,pt %%icc, 3f \n"
54 : "r" (lock) 42" nop \n"
43" membar #LoadLoad | #StoreLoad | #LoadStore\n"
44" ba 2b\n"
45"3: membar #StoreStore | #StoreLoad"
46 : "=&r" (ticket), "=&r" (tmp)
47 : "r" (&lock->tail), "r" (&lock->head)
55 : "memory"); 48 : "memory");
56} 49}
57 50
58static inline int __raw_spin_trylock(raw_spinlock_t *lock) 51static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59{ 52{
60 unsigned long result; 53 int tail, head;
61
62 __asm__ __volatile__( 54 __asm__ __volatile__(
63" ldstub [%1], %0\n" 55" lduw [%2], %0 \n" /* read tail */
64" membar #StoreLoad | #StoreStore" 56" lduw [%3], %1 \n" /* read head */
65 : "=r" (result) 57" cmp %0, %1 \n"
66 : "r" (lock) 58" bne,a,pn %%icc, 1f \n"
59" nop \n"
60" inc %1 \n"
61" cas [%2], %0, %1 \n" /* try to inc ticket */
62" membar #StoreStore | #StoreLoad \n"
63"1: "
64 : "=&r" (tail), "=&r" (head)
65 : "r" (&lock->tail), "r" (&lock->head)
67 : "memory"); 66 : "memory");
68 67
69 return (result == 0UL); 68 return tail == head;
70} 69}
71 70
72static inline void __raw_spin_unlock(raw_spinlock_t *lock) 71static inline void __raw_spin_unlock(raw_spinlock_t *lock)
73{ 72{
73 int tmp;
74 __asm__ __volatile__( 74 __asm__ __volatile__(
75" membar #StoreStore | #LoadStore\n" 75" membar #StoreStore | #LoadStore \n"
76" stb %%g0, [%0]" 76" lduw [%1], %0 \n"
77 : /* No outputs */ 77" inc %0 \n"
78 : "r" (lock) 78" st %0, [%1] \n"
79" membar #StoreStore | #StoreLoad"
80 : "=&r" (tmp)
81 : "r" (&lock->head)
79 : "memory"); 82 : "memory");
80} 83}
81 84
82static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 85/* We don't handle this yet, but it looks like not re-enabling the interrupts
83{ 86 * works fine, too. For example, lockdep also does it like this.
84 unsigned long tmp1, tmp2; 87 */
88#define __raw_spin_lock_flags(l, f) __raw_spin_lock(l)
89
90
85 91
86 __asm__ __volatile__(
87"1: ldstub [%2], %0\n"
88" membar #StoreLoad | #StoreStore\n"
89" brnz,pn %0, 2f\n"
90" nop\n"
91" .subsection 2\n"
92"2: rdpr %%pil, %1\n"
93" wrpr %3, %%pil\n"
94"3: ldub [%2], %0\n"
95" membar #LoadLoad\n"
96" brnz,pt %0, 3b\n"
97" nop\n"
98" ba,pt %%xcc, 1b\n"
99" wrpr %1, %%pil\n"
100" .previous"
101 : "=&r" (tmp1), "=&r" (tmp2)
102 : "r"(lock), "r"(flags)
103 : "memory");
104}
105 92
106/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
107 94
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h
index e128112a0d7c..1a2e24b29c4d 100644
--- a/include/asm-sparc64/spinlock_types.h
+++ b/include/asm-sparc64/spinlock_types.h
@@ -6,10 +6,11 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned char lock; 9 int tail;
10 int head;
10} raw_spinlock_t; 11} raw_spinlock_t;
11 12
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 13#define __RAW_SPIN_LOCK_UNLOCKED { 0, 0 }
13 14
14typedef struct { 15typedef struct {
15 volatile unsigned int lock; 16 volatile unsigned int lock;