diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-09-05 17:33:10 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-09-05 17:33:10 -0400 |
commit | ac0b75f5a73015913424c94ece9ac192fd61c80d (patch) | |
tree | 84a9103c32541e639bedd835d36f7c9378141aca | |
parent | 34770c2be5a07a7d83a0cb63ca4f2dbbd2c83fcb (diff) |
replace TTS locks with ticket locks
-rw-r--r-- | include/asm-sparc64/spinlock.h | 108 | ||||
-rw-r--r-- | include/asm-sparc64/spinlock_types.h | 5 |
2 files changed, 48 insertions, 65 deletions
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index 0006fe9f8c..abbf12b4d0 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h | |||
@@ -15,93 +15,75 @@ | |||
15 | * and rebuild your kernel. | 15 | * and rebuild your kernel. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | /* All of these locking primitives are expected to work properly | 18 | #define __raw_spin_is_locked(lp) ((lp)->tail != (lp)->head) |
19 | * even in an RMO memory model, which currently is what the kernel | ||
20 | * runs in. | ||
21 | * | ||
22 | * There is another issue. Because we play games to save cycles | ||
23 | * in the non-contention case, we need to be extra careful about | ||
24 | * branch targets into the "spinning" code. They live in their | ||
25 | * own section, but the newer V9 branches have a shorter range | ||
26 | * than the traditional 32-bit sparc branch variants. The rule | ||
27 | * is that the branches that go into and out of the spinner sections | ||
28 | * must be pre-V9 branches. | ||
29 | */ | ||
30 | |||
31 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) | ||
32 | 19 | ||
33 | #define __raw_spin_unlock_wait(lp) \ | 20 | #define __raw_spin_unlock_wait(lp) \ |
34 | do { rmb(); \ | 21 | do { rmb(); \ |
35 | } while((lp)->lock) | 22 | } while((lp)->tail != (lp)->head) |
23 | |||
24 | |||
36 | 25 | ||
37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
38 | { | 27 | { |
39 | unsigned long tmp; | 28 | int ticket, tmp; |
40 | |||
41 | __asm__ __volatile__( | 29 | __asm__ __volatile__( |
42 | "1: ldstub [%1], %0\n" | 30 | "1: lduw [%2], %0 \n" /* read ticket */ |
43 | " membar #StoreLoad | #StoreStore\n" | 31 | " add %0, 1, %1 \n" |
44 | " brnz,pn %0, 2f\n" | 32 | " cas [%2], %0, %1 \n" |
45 | " nop\n" | 33 | " cmp %0, %1 \n" |
46 | " .subsection 2\n" | 34 | " bne,a,pn %%icc, 1b \n" |
47 | "2: ldub [%1], %0\n" | 35 | " nop \n" |
48 | " membar #LoadLoad\n" | 36 | "2: lduw [%3], %1 \n" |
49 | " brnz,pt %0, 2b\n" | 37 | " cmp %0, %1 \n" |
50 | " nop\n" | 38 | " bne,a,pn %%icc, 2b \n" |
51 | " ba,a,pt %%xcc, 1b\n" | 39 | " nop \n" |
52 | " .previous" | 40 | " membar #StoreStore | #StoreLoad" |
53 | : "=&r" (tmp) | 41 | : "=&r" (ticket), "=&r" (tmp) |
54 | : "r" (lock) | 42 | : "r" (&lock->tail), "r" (&lock->head) |
55 | : "memory"); | 43 | : "memory"); |
56 | } | 44 | } |
57 | 45 | ||
58 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 46 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
59 | { | 47 | { |
60 | unsigned long result; | 48 | int tail, head; |
61 | |||
62 | __asm__ __volatile__( | 49 | __asm__ __volatile__( |
63 | " ldstub [%1], %0\n" | 50 | " lduw [%2], %0 \n" /* read tail */ |
64 | " membar #StoreLoad | #StoreStore" | 51 | " lduw [%3], %1 \n" /* read head */ |
65 | : "=r" (result) | 52 | " cmp %0, %1 \n" |
66 | : "r" (lock) | 53 | " bne,a,pn %%icc, 1f \n" |
54 | " nop \n" | ||
55 | " inc %1 \n" | ||
56 | " cas [%2], %0, %1 \n" /* try to inc ticket */ | ||
57 | " membar #StoreStore | #StoreLoad \n" | ||
58 | "1: " | ||
59 | : "=&r" (tail), "=&r" (head) | ||
60 | : "r" (&lock->tail), "r" (&lock->head) | ||
67 | : "memory"); | 61 | : "memory"); |
68 | 62 | ||
69 | return (result == 0UL); | 63 | return tail == head; |
70 | } | 64 | } |
71 | 65 | ||
72 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 66 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
73 | { | 67 | { |
68 | int tmp; | ||
74 | __asm__ __volatile__( | 69 | __asm__ __volatile__( |
75 | " membar #StoreStore | #LoadStore\n" | 70 | " membar #StoreStore | #LoadStore \n" |
76 | " stb %%g0, [%0]" | 71 | " lduw [%1], %0 \n" |
77 | : /* No outputs */ | 72 | " inc %0 \n" |
78 | : "r" (lock) | 73 | " st %0, [%1] \n" |
74 | " membar #StoreStore | #StoreLoad" | ||
75 | : "=&r" (tmp) | ||
76 | : "r" (&lock->head) | ||
79 | : "memory"); | 77 | : "memory"); |
80 | } | 78 | } |
81 | 79 | ||
82 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 80 | /* We don't handle this yet, but it looks like not re-enabling the interrupts |
83 | { | 81 | * works fine, too. For example, lockdep also does it like this. |
84 | unsigned long tmp1, tmp2; | 82 | */ |
83 | #define __raw_spin_lock_flages(l, f) __raw_spin_lock(l) | ||
84 | |||
85 | |||
85 | 86 | ||
86 | __asm__ __volatile__( | ||
87 | "1: ldstub [%2], %0\n" | ||
88 | " membar #StoreLoad | #StoreStore\n" | ||
89 | " brnz,pn %0, 2f\n" | ||
90 | " nop\n" | ||
91 | " .subsection 2\n" | ||
92 | "2: rdpr %%pil, %1\n" | ||
93 | " wrpr %3, %%pil\n" | ||
94 | "3: ldub [%2], %0\n" | ||
95 | " membar #LoadLoad\n" | ||
96 | " brnz,pt %0, 3b\n" | ||
97 | " nop\n" | ||
98 | " ba,pt %%xcc, 1b\n" | ||
99 | " wrpr %1, %%pil\n" | ||
100 | " .previous" | ||
101 | : "=&r" (tmp1), "=&r" (tmp2) | ||
102 | : "r"(lock), "r"(flags) | ||
103 | : "memory"); | ||
104 | } | ||
105 | 87 | ||
106 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 88 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
107 | 89 | ||
diff --git a/include/asm-sparc64/spinlock_types.h b/include/asm-sparc64/spinlock_types.h index e128112a0d..1a2e24b29c 100644 --- a/include/asm-sparc64/spinlock_types.h +++ b/include/asm-sparc64/spinlock_types.h | |||
@@ -6,10 +6,11 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned char lock; | 9 | int tail; |
10 | int head; | ||
10 | } raw_spinlock_t; | 11 | } raw_spinlock_t; |
11 | 12 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 13 | #define __RAW_SPIN_LOCK_UNLOCKED { 0, 0 } |
13 | 14 | ||
14 | typedef struct { | 15 | typedef struct { |
15 | volatile unsigned int lock; | 16 | volatile unsigned int lock; |