diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2011-05-04 14:38:26 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2011-05-12 15:52:12 -0400 |
commit | 18aecc2b645bbb07851b196452a2af314222069b (patch) | |
tree | 959f765f69af01046c6e26db12b45c3390799d3e /arch/tile/include/asm/spinlock_64.h | |
parent | be84cb43833ee40a42e08f5425d20310f16229c7 (diff) |
arch/tile: finish enabling support for TILE-Gx 64-bit chip
This support was partially present in the existing code (look for
"__tilegx__" ifdefs) but with this change you can build a working
kernel using the TILE-Gx toolchain and ARCH=tilegx.
Most of these files are new, generally adding a foo_64.c file
where previously there was just a foo_32.c file.
The ARCH=tilegx directive redirects to arch/tile, not arch/tilegx,
using the existing SRCARCH mechanism in the top-level Makefile.
Changes to existing files:
- <asm/bitops.h> and <asm/bitops_32.h> changed to factor the
include of <asm-generic/bitops/non-atomic.h> in the common header.
- <asm/compat.h> and arch/tile/kernel/compat.c changed to remove
the "const" markers I had put on compat_sys_execve() when trying
to match some recent similar changes to the non-compat execve.
It turns out the compat version wasn't "upgraded" to use const.
- <asm/opcode-tile_64.h> and <asm/opcode_constants_64.h> were
previously included accidentally, with the 32-bit contents. Now
they have the proper 64-bit contents.
Finally, I had to hack the existing hacky drivers/input/input-compat.h
to add yet another "#ifdef" for INPUT_COMPAT_TEST (same as x86_64).
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com> [drivers/input]
Diffstat (limited to 'arch/tile/include/asm/spinlock_64.h')
-rw-r--r-- | arch/tile/include/asm/spinlock_64.h | 161 |
1 files changed, 161 insertions, 0 deletions
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h new file mode 100644 index 000000000000..72be5904e020 --- /dev/null +++ b/arch/tile/include/asm/spinlock_64.h | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere | ||
15 | * (the type definitions are in asm/spinlock_types.h) | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SPINLOCK_64_H | ||
19 | #define _ASM_TILE_SPINLOCK_64_H | ||
20 | |||
21 | /* Shifts and masks for the various fields in "lock". */ | ||
22 | #define __ARCH_SPIN_CURRENT_SHIFT 17 | ||
23 | #define __ARCH_SPIN_NEXT_MASK 0x7fff | ||
24 | #define __ARCH_SPIN_NEXT_OVERFLOW 0x8000 | ||
25 | |||
26 | /* | ||
27 | * Return the "current" portion of a ticket lock value, | ||
28 | * i.e. the number that currently owns the lock. | ||
29 | */ | ||
30 | static inline int arch_spin_current(u32 val) | ||
31 | { | ||
32 | return val >> __ARCH_SPIN_CURRENT_SHIFT; | ||
33 | } | ||
34 | |||
35 | /* | ||
36 | * Return the "next" portion of a ticket lock value, | ||
37 | * i.e. the number that the next task to try to acquire the lock will get. | ||
38 | */ | ||
39 | static inline int arch_spin_next(u32 val) | ||
40 | { | ||
41 | return val & __ARCH_SPIN_NEXT_MASK; | ||
42 | } | ||
43 | |||
44 | /* The lock is locked if a task would have to wait to get it. */ | ||
45 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
46 | { | ||
47 | u32 val = lock->lock; | ||
48 | return arch_spin_current(val) != arch_spin_next(val); | ||
49 | } | ||
50 | |||
51 | /* Bump the current ticket so the next task owns the lock. */ | ||
52 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
53 | { | ||
54 | wmb(); /* guarantee anything modified under the lock is visible */ | ||
55 | __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT); | ||
56 | } | ||
57 | |||
58 | void arch_spin_unlock_wait(arch_spinlock_t *lock); | ||
59 | |||
60 | void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val); | ||
61 | |||
62 | /* Grab the "next" ticket number and bump it atomically. | ||
63 | * If the current ticket is not ours, go to the slow path. | ||
64 | * We also take the slow path if the "next" value overflows. | ||
65 | */ | ||
66 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
67 | { | ||
68 | u32 val = __insn_fetchadd4(&lock->lock, 1); | ||
69 | u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW); | ||
70 | if (unlikely(arch_spin_current(val) != ticket)) | ||
71 | arch_spin_lock_slow(lock, ticket); | ||
72 | } | ||
73 | |||
74 | /* Try to get the lock, and return whether we succeeded. */ | ||
75 | int arch_spin_trylock(arch_spinlock_t *lock); | ||
76 | |||
77 | /* We cannot take an interrupt after getting a ticket, so don't enable them. */ | ||
78 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
79 | |||
80 | /* | ||
81 | * Read-write spinlocks, allowing multiple readers | ||
82 | * but only one writer. | ||
83 | * | ||
84 | * We use fetchadd() for readers, and fetchor() with the sign bit | ||
85 | * for writers. | ||
86 | */ | ||
87 | |||
88 | #define __WRITE_LOCK_BIT (1 << 31) | ||
89 | |||
90 | static inline int arch_write_val_locked(int val) | ||
91 | { | ||
92 | return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */ | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * read_can_lock - would read_trylock() succeed? | ||
97 | * @lock: the rwlock in question. | ||
98 | */ | ||
99 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
100 | { | ||
101 | return !arch_write_val_locked(rw->lock); | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * write_can_lock - would write_trylock() succeed? | ||
106 | * @lock: the rwlock in question. | ||
107 | */ | ||
108 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
109 | { | ||
110 | return rw->lock == 0; | ||
111 | } | ||
112 | |||
113 | extern void __read_lock_failed(arch_rwlock_t *rw); | ||
114 | |||
115 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
116 | { | ||
117 | u32 val = __insn_fetchaddgez4(&rw->lock, 1); | ||
118 | if (unlikely(arch_write_val_locked(val))) | ||
119 | __read_lock_failed(rw); | ||
120 | } | ||
121 | |||
122 | extern void __write_lock_failed(arch_rwlock_t *rw, u32 val); | ||
123 | |||
124 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
125 | { | ||
126 | u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); | ||
127 | if (unlikely(val != 0)) | ||
128 | __write_lock_failed(rw, val); | ||
129 | } | ||
130 | |||
131 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
132 | { | ||
133 | __insn_mf(); | ||
134 | __insn_fetchadd4(&rw->lock, -1); | ||
135 | } | ||
136 | |||
137 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
138 | { | ||
139 | __insn_mf(); | ||
140 | rw->lock = 0; | ||
141 | } | ||
142 | |||
143 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
144 | { | ||
145 | return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1)); | ||
146 | } | ||
147 | |||
148 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
149 | { | ||
150 | u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); | ||
151 | if (likely(val == 0)) | ||
152 | return 1; | ||
153 | if (!arch_write_val_locked(val)) | ||
154 | __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT); | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
159 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
160 | |||
161 | #endif /* _ASM_TILE_SPINLOCK_64_H */ | ||