aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/lib/spinlock_32.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-05-28 23:09:12 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-06-04 17:11:18 -0400
commit867e359b97c970a60626d5d76bbe2a8fadbf38fb (patch)
treec5ccbb7f5172e8555977119608ecb1eee3cc37e3 /arch/tile/lib/spinlock_32.c
parent5360bd776f73d0a7da571d72a09a03f237e99900 (diff)
arch/tile: core support for Tilera 32-bit chips.
This change is the core kernel support for TILEPro and TILE64 chips. No driver support (except the console driver) is included yet. This includes the relevant Linux headers in asm/; the low-level low-level "Tile architecture" headers in arch/, which are shared with the hypervisor, etc., and are build-system agnostic; and the relevant hypervisor headers in hv/. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/tile/lib/spinlock_32.c')
-rw-r--r--arch/tile/lib/spinlock_32.c221
1 files changed, 221 insertions, 0 deletions
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
new file mode 100644
index 000000000000..485e24d62c6b
--- /dev/null
+++ b/arch/tile/lib/spinlock_32.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <asm/processor.h>
18
19#include "spinlock_common.h"
20
21void arch_spin_lock(arch_spinlock_t *lock)
22{
23 int my_ticket;
24 int iterations = 0;
25 int delta;
26
27 while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1)
28 delay_backoff(iterations++);
29
30 /* Increment the next ticket number, implicitly releasing tns lock. */
31 lock->next_ticket = my_ticket + TICKET_QUANTUM;
32
33 /* Wait until it's our turn. */
34 while ((delta = my_ticket - lock->current_ticket) != 0)
35 relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
36}
37EXPORT_SYMBOL(arch_spin_lock);
38
39int arch_spin_trylock(arch_spinlock_t *lock)
40{
41 /*
42 * Grab a ticket; no need to retry if it's busy, we'll just
43 * treat that the same as "locked", since someone else
44 * will lock it momentarily anyway.
45 */
46 int my_ticket = __insn_tns((void *)&lock->next_ticket);
47
48 if (my_ticket == lock->current_ticket) {
49 /* Not currently locked, so lock it by keeping this ticket. */
50 lock->next_ticket = my_ticket + TICKET_QUANTUM;
51 /* Success! */
52 return 1;
53 }
54
55 if (!(my_ticket & 1)) {
56 /* Release next_ticket. */
57 lock->next_ticket = my_ticket;
58 }
59
60 return 0;
61}
62EXPORT_SYMBOL(arch_spin_trylock);
63
64void arch_spin_unlock_wait(arch_spinlock_t *lock)
65{
66 u32 iterations = 0;
67 while (arch_spin_is_locked(lock))
68 delay_backoff(iterations++);
69}
70EXPORT_SYMBOL(arch_spin_unlock_wait);
71
72/*
73 * The low byte is always reserved to be the marker for a "tns" operation
74 * since the low bit is set to "1" by a tns. The next seven bits are
75 * zeroes. The next byte holds the "next" writer value, i.e. the ticket
76 * available for the next task that wants to write. The third byte holds
77 * the current writer value, i.e. the writer who holds the current ticket.
78 * If current == next == 0, there are no interested writers.
79 */
80#define WR_NEXT_SHIFT _WR_NEXT_SHIFT
81#define WR_CURR_SHIFT _WR_CURR_SHIFT
82#define WR_WIDTH _WR_WIDTH
83#define WR_MASK ((1 << WR_WIDTH) - 1)
84
85/*
86 * The last eight bits hold the active reader count. This has to be
87 * zero before a writer can start to write.
88 */
89#define RD_COUNT_SHIFT _RD_COUNT_SHIFT
90#define RD_COUNT_WIDTH _RD_COUNT_WIDTH
91#define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1)
92
93
94/* Lock the word, spinning until there are no tns-ers. */
95static inline u32 get_rwlock(arch_rwlock_t *rwlock)
96{
97 u32 iterations = 0;
98 for (;;) {
99 u32 val = __insn_tns((int *)&rwlock->lock);
100 if (unlikely(val & 1)) {
101 delay_backoff(iterations++);
102 continue;
103 }
104 return val;
105 }
106}
107
108int arch_read_trylock_slow(arch_rwlock_t *rwlock)
109{
110 u32 val = get_rwlock(rwlock);
111 int locked = (val << RD_COUNT_WIDTH) == 0;
112 rwlock->lock = val + (locked << RD_COUNT_SHIFT);
113 return locked;
114}
115EXPORT_SYMBOL(arch_read_trylock_slow);
116
117void arch_read_unlock_slow(arch_rwlock_t *rwlock)
118{
119 u32 val = get_rwlock(rwlock);
120 rwlock->lock = val - (1 << RD_COUNT_SHIFT);
121}
122EXPORT_SYMBOL(arch_read_unlock_slow);
123
124void arch_write_unlock_slow(arch_rwlock_t *rwlock, u32 val)
125{
126 u32 eq, mask = 1 << WR_CURR_SHIFT;
127 while (unlikely(val & 1)) {
128 /* Limited backoff since we are the highest-priority task. */
129 relax(4);
130 val = __insn_tns((int *)&rwlock->lock);
131 }
132 val = __insn_addb(val, mask);
133 eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));
134 val = __insn_mz(eq & mask, val);
135 rwlock->lock = val;
136}
137EXPORT_SYMBOL(arch_write_unlock_slow);
138
139/*
140 * We spin until everything but the reader bits (which are in the high
141 * part of the word) are zero, i.e. no active or waiting writers, no tns.
142 *
143 * ISSUE: This approach can permanently starve readers. A reader who sees
144 * a writer could instead take a ticket lock (just like a writer would),
145 * and atomically enter read mode (with 1 reader) when it gets the ticket.
146 * This way both readers and writers will always make forward progress
147 * in a finite time.
148 */
149void arch_read_lock_slow(arch_rwlock_t *rwlock, u32 val)
150{
151 u32 iterations = 0;
152 do {
153 if (!(val & 1))
154 rwlock->lock = val;
155 delay_backoff(iterations++);
156 val = __insn_tns((int *)&rwlock->lock);
157 } while ((val << RD_COUNT_WIDTH) != 0);
158 rwlock->lock = val + (1 << RD_COUNT_SHIFT);
159}
160EXPORT_SYMBOL(arch_read_lock_slow);
161
162void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
163{
164 /*
165 * The trailing underscore on this variable (and curr_ below)
166 * reminds us that the high bits are garbage; we mask them out
167 * when we compare them.
168 */
169 u32 my_ticket_;
170
171 /* Take out the next ticket; this will also stop would-be readers. */
172 if (val & 1)
173 val = get_rwlock(rwlock);
174 rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
175
176 /* Extract my ticket value from the original word. */
177 my_ticket_ = val >> WR_NEXT_SHIFT;
178
179 /*
180 * Wait until the "current" field matches our ticket, and
181 * there are no remaining readers.
182 */
183 for (;;) {
184 u32 curr_ = val >> WR_CURR_SHIFT;
185 u32 readers = val >> RD_COUNT_SHIFT;
186 u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers;
187 if (likely(delta == 0))
188 break;
189
190 /* Delay based on how many lock-holders are still out there. */
191 relax((256 / CYCLES_PER_RELAX_LOOP) * delta);
192
193 /*
194 * Get a non-tns value to check; we don't need to tns
195 * it ourselves. Since we're not tns'ing, we retry
196 * more rapidly to get a valid value.
197 */
198 while ((val = rwlock->lock) & 1)
199 relax(4);
200 }
201}
202EXPORT_SYMBOL(arch_write_lock_slow);
203
204int __tns_atomic_acquire(atomic_t *lock)
205{
206 int ret;
207 u32 iterations = 0;
208
209 BUG_ON(__insn_mfspr(SPR_INTERRUPT_CRITICAL_SECTION));
210 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
211
212 while ((ret = __insn_tns((void *)&lock->counter)) == 1)
213 delay_backoff(iterations++);
214 return ret;
215}
216
217void __tns_atomic_release(atomic_t *p, int v)
218{
219 p->counter = v;
220 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
221}