diff options
author | Richard Kuo <rkuo@codeaurora.org> | 2011-10-31 19:28:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-01 10:34:18 -0400 |
commit | 750850189b2b7e43c03f33bf5741887e8ca07d16 (patch) | |
tree | ea5b45d48d910c778e52b2ea27d93c93863480c9 /arch/hexagon | |
parent | a86a7ce30ac04cfd6775dc9a0114d9c3924e682a (diff) |
Hexagon: Add atomic ops support
Signed-off-by: Richard Kuo <rkuo@codeaurora.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/hexagon')
-rw-r--r-- | arch/hexagon/include/asm/atomic.h | 164 |
1 files changed, 164 insertions, 0 deletions
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h new file mode 100644 index 000000000000..e220f9053035 --- /dev/null +++ b/arch/hexagon/include/asm/atomic.h | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Atomic operations for the Hexagon architecture | ||
3 | * | ||
4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
5 | * | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 and | ||
9 | * only version 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
19 | * 02110-1301, USA. | ||
20 | */ | ||
21 | |||
22 | #ifndef _ASM_ATOMIC_H | ||
23 | #define _ASM_ATOMIC_H | ||
24 | |||
25 | #include <linux/types.h> | ||
26 | |||
27 | #define ATOMIC_INIT(i) { (i) } | ||
28 | #define atomic_set(v, i) ((v)->counter = (i)) | ||
29 | |||
30 | /** | ||
31 | * atomic_read - reads a word, atomically | ||
32 | * @v: pointer to atomic value | ||
33 | * | ||
34 | * Assumes all word reads on our architecture are atomic. | ||
35 | */ | ||
36 | #define atomic_read(v) ((v)->counter) | ||
37 | |||
38 | /** | ||
39 | * atomic_xchg - atomic | ||
40 | * @v: pointer to memory to change | ||
41 | * @new: new value (technically passed in a register -- see xchg) | ||
42 | */ | ||
43 | #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) | ||
44 | |||
45 | |||
46 | /** | ||
47 | * atomic_cmpxchg - atomic compare-and-exchange values | ||
48 | * @v: pointer to value to change | ||
49 | * @old: desired old value to match | ||
50 | * @new: new value to put in | ||
51 | * | ||
52 | * Parameters are then pointer, value-in-register, value-in-register, | ||
53 | * and the output is the old value. | ||
54 | * | ||
55 | * Apparently this is complicated for archs that don't support | ||
56 | * the memw_locked like we do (or it's broken or whatever). | ||
57 | * | ||
58 | * Kind of the lynchpin of the rest of the generically defined routines. | ||
59 | * Remember V2 had that bug with dotnew predicate set by memw_locked. | ||
60 | * | ||
61 | * "old" is "expected" old val, __oldval is actual old value | ||
62 | */ | ||
63 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
64 | { | ||
65 | int __oldval; | ||
66 | |||
67 | asm volatile( | ||
68 | "1: %0 = memw_locked(%1);\n" | ||
69 | " { P0 = cmp.eq(%0,%2);\n" | ||
70 | " if (!P0.new) jump:nt 2f; }\n" | ||
71 | " memw_locked(%1,P0) = %3;\n" | ||
72 | " if (!P0) jump 1b;\n" | ||
73 | "2:\n" | ||
74 | : "=&r" (__oldval) | ||
75 | : "r" (&v->counter), "r" (old), "r" (new) | ||
76 | : "memory", "p0" | ||
77 | ); | ||
78 | |||
79 | return __oldval; | ||
80 | } | ||
81 | |||
82 | static inline int atomic_add_return(int i, atomic_t *v) | ||
83 | { | ||
84 | int output; | ||
85 | |||
86 | __asm__ __volatile__ ( | ||
87 | "1: %0 = memw_locked(%1);\n" | ||
88 | " %0 = add(%0,%2);\n" | ||
89 | " memw_locked(%1,P3)=%0;\n" | ||
90 | " if !P3 jump 1b;\n" | ||
91 | : "=&r" (output) | ||
92 | : "r" (&v->counter), "r" (i) | ||
93 | : "memory", "p3" | ||
94 | ); | ||
95 | return output; | ||
96 | |||
97 | } | ||
98 | |||
99 | #define atomic_add(i, v) atomic_add_return(i, (v)) | ||
100 | |||
101 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
102 | { | ||
103 | int output; | ||
104 | __asm__ __volatile__ ( | ||
105 | "1: %0 = memw_locked(%1);\n" | ||
106 | " %0 = sub(%0,%2);\n" | ||
107 | " memw_locked(%1,P3)=%0\n" | ||
108 | " if !P3 jump 1b;\n" | ||
109 | : "=&r" (output) | ||
110 | : "r" (&v->counter), "r" (i) | ||
111 | : "memory", "p3" | ||
112 | ); | ||
113 | return output; | ||
114 | } | ||
115 | |||
116 | #define atomic_sub(i, v) atomic_sub_return(i, (v)) | ||
117 | |||
118 | /** | ||
119 | * atomic_add_unless - add unless the number is a given value | ||
120 | * @v: pointer to value | ||
121 | * @a: amount to add | ||
122 | * @u: unless value is equal to u | ||
123 | * | ||
124 | * Returns 1 if the add happened, 0 if it didn't. | ||
125 | */ | ||
126 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | ||
127 | { | ||
128 | int output, __oldval; | ||
129 | asm volatile( | ||
130 | "1: %0 = memw_locked(%2);" | ||
131 | " {" | ||
132 | " p3 = cmp.eq(%0, %4);" | ||
133 | " if (p3.new) jump:nt 2f;" | ||
134 | " %0 = add(%0, %3);" | ||
135 | " %1 = #0;" | ||
136 | " }" | ||
137 | " memw_locked(%2, p3) = %0;" | ||
138 | " {" | ||
139 | " if !p3 jump 1b;" | ||
140 | " %1 = #1;" | ||
141 | " }" | ||
142 | "2:" | ||
143 | : "=&r" (__oldval), "=&r" (output) | ||
144 | : "r" (v), "r" (a), "r" (u) | ||
145 | : "memory", "p3" | ||
146 | ); | ||
147 | return output; | ||
148 | } | ||
149 | |||
150 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
151 | |||
152 | #define atomic_inc(v) atomic_add(1, (v)) | ||
153 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
154 | |||
155 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | ||
156 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | ||
157 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0) | ||
158 | #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) | ||
159 | |||
160 | |||
161 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | ||
162 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | ||
163 | |||
164 | #endif | ||