aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-alpha/atomic.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-alpha/atomic.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-alpha/atomic.h')
-rw-r--r--include/asm-alpha/atomic.h198
1 files changed, 198 insertions, 0 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
new file mode 100644
index 000000000000..1b383e3cb68c
--- /dev/null
+++ b/include/asm-alpha/atomic.h
@@ -0,0 +1,198 @@
1#ifndef _ALPHA_ATOMIC_H
2#define _ALPHA_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc...
7 *
8 * But use these as seldom as possible since they are much slower
9 * than regular operations.
10 */
11
12
13/*
14 * Counter is volatile to make sure gcc doesn't try to be clever
15 * and move things around on us. We need to use _exactly_ the address
16 * the user gave us, not some alias that contains the same information.
17 */
18typedef struct { volatile int counter; } atomic_t;
19typedef struct { volatile long counter; } atomic64_t;
20
21#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
22#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
23
24#define atomic_read(v) ((v)->counter + 0)
25#define atomic64_read(v) ((v)->counter + 0)
26
27#define atomic_set(v,i) ((v)->counter = (i))
28#define atomic64_set(v,i) ((v)->counter = (i))
29
30/*
31 * To get proper branch prediction for the main line, we must branch
32 * forward to code at the end of this object's .text section, then
33 * branch back to restart the operation.
34 */
35
36static __inline__ void atomic_add(int i, atomic_t * v)
37{
38 unsigned long temp;
39 __asm__ __volatile__(
40 "1: ldl_l %0,%1\n"
41 " addl %0,%2,%0\n"
42 " stl_c %0,%1\n"
43 " beq %0,2f\n"
44 ".subsection 2\n"
45 "2: br 1b\n"
46 ".previous"
47 :"=&r" (temp), "=m" (v->counter)
48 :"Ir" (i), "m" (v->counter));
49}
50
51static __inline__ void atomic64_add(long i, atomic64_t * v)
52{
53 unsigned long temp;
54 __asm__ __volatile__(
55 "1: ldq_l %0,%1\n"
56 " addq %0,%2,%0\n"
57 " stq_c %0,%1\n"
58 " beq %0,2f\n"
59 ".subsection 2\n"
60 "2: br 1b\n"
61 ".previous"
62 :"=&r" (temp), "=m" (v->counter)
63 :"Ir" (i), "m" (v->counter));
64}
65
66static __inline__ void atomic_sub(int i, atomic_t * v)
67{
68 unsigned long temp;
69 __asm__ __volatile__(
70 "1: ldl_l %0,%1\n"
71 " subl %0,%2,%0\n"
72 " stl_c %0,%1\n"
73 " beq %0,2f\n"
74 ".subsection 2\n"
75 "2: br 1b\n"
76 ".previous"
77 :"=&r" (temp), "=m" (v->counter)
78 :"Ir" (i), "m" (v->counter));
79}
80
81static __inline__ void atomic64_sub(long i, atomic64_t * v)
82{
83 unsigned long temp;
84 __asm__ __volatile__(
85 "1: ldq_l %0,%1\n"
86 " subq %0,%2,%0\n"
87 " stq_c %0,%1\n"
88 " beq %0,2f\n"
89 ".subsection 2\n"
90 "2: br 1b\n"
91 ".previous"
92 :"=&r" (temp), "=m" (v->counter)
93 :"Ir" (i), "m" (v->counter));
94}
95
96
97/*
98 * Same as above, but return the result value
99 */
100static __inline__ long atomic_add_return(int i, atomic_t * v)
101{
102 long temp, result;
103 __asm__ __volatile__(
104 "1: ldl_l %0,%1\n"
105 " addl %0,%3,%2\n"
106 " addl %0,%3,%0\n"
107 " stl_c %0,%1\n"
108 " beq %0,2f\n"
109 " mb\n"
110 ".subsection 2\n"
111 "2: br 1b\n"
112 ".previous"
113 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
114 :"Ir" (i), "m" (v->counter) : "memory");
115 return result;
116}
117
118#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
119
120static __inline__ long atomic64_add_return(long i, atomic64_t * v)
121{
122 long temp, result;
123 __asm__ __volatile__(
124 "1: ldq_l %0,%1\n"
125 " addq %0,%3,%2\n"
126 " addq %0,%3,%0\n"
127 " stq_c %0,%1\n"
128 " beq %0,2f\n"
129 " mb\n"
130 ".subsection 2\n"
131 "2: br 1b\n"
132 ".previous"
133 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
134 :"Ir" (i), "m" (v->counter) : "memory");
135 return result;
136}
137
138static __inline__ long atomic_sub_return(int i, atomic_t * v)
139{
140 long temp, result;
141 __asm__ __volatile__(
142 "1: ldl_l %0,%1\n"
143 " subl %0,%3,%2\n"
144 " subl %0,%3,%0\n"
145 " stl_c %0,%1\n"
146 " beq %0,2f\n"
147 " mb\n"
148 ".subsection 2\n"
149 "2: br 1b\n"
150 ".previous"
151 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
152 :"Ir" (i), "m" (v->counter) : "memory");
153 return result;
154}
155
156static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
157{
158 long temp, result;
159 __asm__ __volatile__(
160 "1: ldq_l %0,%1\n"
161 " subq %0,%3,%2\n"
162 " subq %0,%3,%0\n"
163 " stq_c %0,%1\n"
164 " beq %0,2f\n"
165 " mb\n"
166 ".subsection 2\n"
167 "2: br 1b\n"
168 ".previous"
169 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
170 :"Ir" (i), "m" (v->counter) : "memory");
171 return result;
172}
173
174#define atomic_dec_return(v) atomic_sub_return(1,(v))
175#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
176
177#define atomic_inc_return(v) atomic_add_return(1,(v))
178#define atomic64_inc_return(v) atomic64_add_return(1,(v))
179
180#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
181#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
182
183#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
184#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
185#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
186
187#define atomic_inc(v) atomic_add(1,(v))
188#define atomic64_inc(v) atomic64_add(1,(v))
189
190#define atomic_dec(v) atomic_sub(1,(v))
191#define atomic64_dec(v) atomic64_sub(1,(v))
192
193#define smp_mb__before_atomic_dec() smp_mb()
194#define smp_mb__after_atomic_dec() smp_mb()
195#define smp_mb__before_atomic_inc() smp_mb()
196#define smp_mb__after_atomic_inc() smp_mb()
197
198#endif /* _ALPHA_ATOMIC_H */