diff options
Diffstat (limited to 'arch/tile/include/asm/spinlock_32.h')
-rw-r--r-- | arch/tile/include/asm/spinlock_32.h | 200 |
1 files changed, 200 insertions, 0 deletions
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h new file mode 100644 index 000000000000..f3a8473c68da --- /dev/null +++ b/arch/tile/include/asm/spinlock_32.h | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * 32-bit SMP spinlocks. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_SPINLOCK_32_H | ||
18 | #define _ASM_TILE_SPINLOCK_32_H | ||
19 | |||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <linux/compiler.h> | ||
24 | |||
25 | /* | ||
26 | * We only use even ticket numbers so the '1' inserted by a tns is | ||
27 | * an unambiguous "ticket is busy" flag. | ||
28 | */ | ||
29 | #define TICKET_QUANTUM 2 | ||
30 | |||
31 | |||
32 | /* | ||
33 | * SMP ticket spinlocks, allowing only a single CPU anywhere | ||
34 | * | ||
35 | * (the type definitions are in asm/spinlock_types.h) | ||
36 | */ | ||
37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
38 | { | ||
39 | /* | ||
40 | * Note that even if a new ticket is in the process of being | ||
41 | * acquired, so lock->next_ticket is 1, it's still reasonable | ||
42 | * to claim the lock is held, since it will be momentarily | ||
43 | * if not already. There's no need to wait for a "valid" | ||
44 | * lock->next_ticket to become available. | ||
45 | */ | ||
46 | return lock->next_ticket != lock->current_ticket; | ||
47 | } | ||
48 | |||
49 | void arch_spin_lock(arch_spinlock_t *lock); | ||
50 | |||
51 | /* We cannot take an interrupt after getting a ticket, so don't enable them. */ | ||
52 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
53 | |||
54 | int arch_spin_trylock(arch_spinlock_t *lock); | ||
55 | |||
56 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
57 | { | ||
58 | /* For efficiency, overlap fetching the old ticket with the wmb(). */ | ||
59 | int old_ticket = lock->current_ticket; | ||
60 | wmb(); /* guarantee anything modified under the lock is visible */ | ||
61 | lock->current_ticket = old_ticket + TICKET_QUANTUM; | ||
62 | } | ||
63 | |||
64 | void arch_spin_unlock_wait(arch_spinlock_t *lock); | ||
65 | |||
66 | /* | ||
67 | * Read-write spinlocks, allowing multiple readers | ||
68 | * but only one writer. | ||
69 | * | ||
70 | * We use a "tns/store-back" technique on a single word to manage | ||
71 | * the lock state, looping around to retry if the tns returns 1. | ||
72 | */ | ||
73 | |||
74 | /* Internal layout of the word; do not use. */ | ||
75 | #define _WR_NEXT_SHIFT 8 | ||
76 | #define _WR_CURR_SHIFT 16 | ||
77 | #define _WR_WIDTH 8 | ||
78 | #define _RD_COUNT_SHIFT 24 | ||
79 | #define _RD_COUNT_WIDTH 8 | ||
80 | |||
81 | /* Internal functions; do not use. */ | ||
82 | void arch_read_lock_slow(arch_rwlock_t *, u32); | ||
83 | int arch_read_trylock_slow(arch_rwlock_t *); | ||
84 | void arch_read_unlock_slow(arch_rwlock_t *); | ||
85 | void arch_write_lock_slow(arch_rwlock_t *, u32); | ||
86 | void arch_write_unlock_slow(arch_rwlock_t *, u32); | ||
87 | |||
88 | /** | ||
89 | * arch_read_can_lock() - would read_trylock() succeed? | ||
90 | */ | ||
91 | static inline int arch_read_can_lock(arch_rwlock_t *rwlock) | ||
92 | { | ||
93 | return (rwlock->lock << _RD_COUNT_WIDTH) == 0; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * arch_write_can_lock() - would write_trylock() succeed? | ||
98 | */ | ||
99 | static inline int arch_write_can_lock(arch_rwlock_t *rwlock) | ||
100 | { | ||
101 | return rwlock->lock == 0; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * arch_read_lock() - acquire a read lock. | ||
106 | */ | ||
107 | static inline void arch_read_lock(arch_rwlock_t *rwlock) | ||
108 | { | ||
109 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
110 | if (unlikely(val << _RD_COUNT_WIDTH)) { | ||
111 | arch_read_lock_slow(rwlock, val); | ||
112 | return; | ||
113 | } | ||
114 | rwlock->lock = val + (1 << _RD_COUNT_SHIFT); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * arch_read_lock() - acquire a write lock. | ||
119 | */ | ||
120 | static inline void arch_write_lock(arch_rwlock_t *rwlock) | ||
121 | { | ||
122 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
123 | if (unlikely(val != 0)) { | ||
124 | arch_write_lock_slow(rwlock, val); | ||
125 | return; | ||
126 | } | ||
127 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * arch_read_trylock() - try to acquire a read lock. | ||
132 | */ | ||
133 | static inline int arch_read_trylock(arch_rwlock_t *rwlock) | ||
134 | { | ||
135 | int locked; | ||
136 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
137 | if (unlikely(val & 1)) { | ||
138 | return arch_read_trylock_slow(rwlock); | ||
139 | } | ||
140 | locked = (val << _RD_COUNT_WIDTH) == 0; | ||
141 | rwlock->lock = val + (locked << _RD_COUNT_SHIFT); | ||
142 | return locked; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * arch_write_trylock() - try to acquire a write lock. | ||
147 | */ | ||
148 | static inline int arch_write_trylock(arch_rwlock_t *rwlock) | ||
149 | { | ||
150 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
151 | |||
152 | /* | ||
153 | * If a tns is in progress, or there's a waiting or active locker, | ||
154 | * or active readers, we can't take the lock, so give up. | ||
155 | */ | ||
156 | if (unlikely(val != 0)) { | ||
157 | if (!(val & 1)) | ||
158 | rwlock->lock = val; | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /* Set the "next" field to mark it locked. */ | ||
163 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
164 | return 1; | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * arch_read_unlock() - release a read lock. | ||
169 | */ | ||
170 | static inline void arch_read_unlock(arch_rwlock_t *rwlock) | ||
171 | { | ||
172 | u32 val; | ||
173 | mb(); /* guarantee anything modified under the lock is visible */ | ||
174 | val = __insn_tns((int *)&rwlock->lock); | ||
175 | if (unlikely(val & 1)) { | ||
176 | arch_read_unlock_slow(rwlock); | ||
177 | return; | ||
178 | } | ||
179 | rwlock->lock = val - (1 << _RD_COUNT_SHIFT); | ||
180 | } | ||
181 | |||
182 | /** | ||
183 | * arch_write_unlock() - release a write lock. | ||
184 | */ | ||
185 | static inline void arch_write_unlock(arch_rwlock_t *rwlock) | ||
186 | { | ||
187 | u32 val; | ||
188 | mb(); /* guarantee anything modified under the lock is visible */ | ||
189 | val = __insn_tns((int *)&rwlock->lock); | ||
190 | if (unlikely(val != (1 << _WR_NEXT_SHIFT))) { | ||
191 | arch_write_unlock_slow(rwlock, val); | ||
192 | return; | ||
193 | } | ||
194 | rwlock->lock = 0; | ||
195 | } | ||
196 | |||
197 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
198 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
199 | |||
200 | #endif /* _ASM_TILE_SPINLOCK_32_H */ | ||