diff options
Diffstat (limited to 'include/asm-x86/local_64.h')
-rw-r--r-- | include/asm-x86/local_64.h | 222 |
1 files changed, 222 insertions, 0 deletions
diff --git a/include/asm-x86/local_64.h b/include/asm-x86/local_64.h new file mode 100644 index 000000000000..e87492bb0693 --- /dev/null +++ b/include/asm-x86/local_64.h | |||
@@ -0,0 +1,222 @@ | |||
1 | #ifndef _ARCH_X8664_LOCAL_H | ||
2 | #define _ARCH_X8664_LOCAL_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | typedef struct | ||
8 | { | ||
9 | atomic_long_t a; | ||
10 | } local_t; | ||
11 | |||
12 | #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } | ||
13 | |||
14 | #define local_read(l) atomic_long_read(&(l)->a) | ||
15 | #define local_set(l,i) atomic_long_set(&(l)->a, (i)) | ||
16 | |||
17 | static inline void local_inc(local_t *l) | ||
18 | { | ||
19 | __asm__ __volatile__( | ||
20 | "incq %0" | ||
21 | :"=m" (l->a.counter) | ||
22 | :"m" (l->a.counter)); | ||
23 | } | ||
24 | |||
25 | static inline void local_dec(local_t *l) | ||
26 | { | ||
27 | __asm__ __volatile__( | ||
28 | "decq %0" | ||
29 | :"=m" (l->a.counter) | ||
30 | :"m" (l->a.counter)); | ||
31 | } | ||
32 | |||
33 | static inline void local_add(long i, local_t *l) | ||
34 | { | ||
35 | __asm__ __volatile__( | ||
36 | "addq %1,%0" | ||
37 | :"=m" (l->a.counter) | ||
38 | :"ir" (i), "m" (l->a.counter)); | ||
39 | } | ||
40 | |||
41 | static inline void local_sub(long i, local_t *l) | ||
42 | { | ||
43 | __asm__ __volatile__( | ||
44 | "subq %1,%0" | ||
45 | :"=m" (l->a.counter) | ||
46 | :"ir" (i), "m" (l->a.counter)); | ||
47 | } | ||
48 | |||
49 | /** | ||
50 | * local_sub_and_test - subtract value from variable and test result | ||
51 | * @i: integer value to subtract | ||
52 | * @l: pointer to type local_t | ||
53 | * | ||
54 | * Atomically subtracts @i from @l and returns | ||
55 | * true if the result is zero, or false for all | ||
56 | * other cases. | ||
57 | */ | ||
58 | static __inline__ int local_sub_and_test(long i, local_t *l) | ||
59 | { | ||
60 | unsigned char c; | ||
61 | |||
62 | __asm__ __volatile__( | ||
63 | "subq %2,%0; sete %1" | ||
64 | :"=m" (l->a.counter), "=qm" (c) | ||
65 | :"ir" (i), "m" (l->a.counter) : "memory"); | ||
66 | return c; | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * local_dec_and_test - decrement and test | ||
71 | * @l: pointer to type local_t | ||
72 | * | ||
73 | * Atomically decrements @l by 1 and | ||
74 | * returns true if the result is 0, or false for all other | ||
75 | * cases. | ||
76 | */ | ||
77 | static __inline__ int local_dec_and_test(local_t *l) | ||
78 | { | ||
79 | unsigned char c; | ||
80 | |||
81 | __asm__ __volatile__( | ||
82 | "decq %0; sete %1" | ||
83 | :"=m" (l->a.counter), "=qm" (c) | ||
84 | :"m" (l->a.counter) : "memory"); | ||
85 | return c != 0; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * local_inc_and_test - increment and test | ||
90 | * @l: pointer to type local_t | ||
91 | * | ||
92 | * Atomically increments @l by 1 | ||
93 | * and returns true if the result is zero, or false for all | ||
94 | * other cases. | ||
95 | */ | ||
96 | static __inline__ int local_inc_and_test(local_t *l) | ||
97 | { | ||
98 | unsigned char c; | ||
99 | |||
100 | __asm__ __volatile__( | ||
101 | "incq %0; sete %1" | ||
102 | :"=m" (l->a.counter), "=qm" (c) | ||
103 | :"m" (l->a.counter) : "memory"); | ||
104 | return c != 0; | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * local_add_negative - add and test if negative | ||
109 | * @i: integer value to add | ||
110 | * @l: pointer to type local_t | ||
111 | * | ||
112 | * Atomically adds @i to @l and returns true | ||
113 | * if the result is negative, or false when | ||
114 | * result is greater than or equal to zero. | ||
115 | */ | ||
116 | static __inline__ int local_add_negative(long i, local_t *l) | ||
117 | { | ||
118 | unsigned char c; | ||
119 | |||
120 | __asm__ __volatile__( | ||
121 | "addq %2,%0; sets %1" | ||
122 | :"=m" (l->a.counter), "=qm" (c) | ||
123 | :"ir" (i), "m" (l->a.counter) : "memory"); | ||
124 | return c; | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * local_add_return - add and return | ||
129 | * @i: integer value to add | ||
130 | * @l: pointer to type local_t | ||
131 | * | ||
132 | * Atomically adds @i to @l and returns @i + @l | ||
133 | */ | ||
134 | static __inline__ long local_add_return(long i, local_t *l) | ||
135 | { | ||
136 | long __i = i; | ||
137 | __asm__ __volatile__( | ||
138 | "xaddq %0, %1;" | ||
139 | :"+r" (i), "+m" (l->a.counter) | ||
140 | : : "memory"); | ||
141 | return i + __i; | ||
142 | } | ||
143 | |||
144 | static __inline__ long local_sub_return(long i, local_t *l) | ||
145 | { | ||
146 | return local_add_return(-i,l); | ||
147 | } | ||
148 | |||
149 | #define local_inc_return(l) (local_add_return(1,l)) | ||
150 | #define local_dec_return(l) (local_sub_return(1,l)) | ||
151 | |||
152 | #define local_cmpxchg(l, o, n) \ | ||
153 | (cmpxchg_local(&((l)->a.counter), (o), (n))) | ||
154 | /* Always has a lock prefix */ | ||
155 | #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) | ||
156 | |||
157 | /** | ||
158 | * atomic_up_add_unless - add unless the number is a given value | ||
159 | * @l: pointer of type local_t | ||
160 | * @a: the amount to add to l... | ||
161 | * @u: ...unless l is equal to u. | ||
162 | * | ||
163 | * Atomically adds @a to @l, so long as it was not @u. | ||
164 | * Returns non-zero if @l was not @u, and zero otherwise. | ||
165 | */ | ||
166 | #define local_add_unless(l, a, u) \ | ||
167 | ({ \ | ||
168 | long c, old; \ | ||
169 | c = local_read(l); \ | ||
170 | for (;;) { \ | ||
171 | if (unlikely(c == (u))) \ | ||
172 | break; \ | ||
173 | old = local_cmpxchg((l), c, c + (a)); \ | ||
174 | if (likely(old == c)) \ | ||
175 | break; \ | ||
176 | c = old; \ | ||
177 | } \ | ||
178 | c != (u); \ | ||
179 | }) | ||
180 | #define local_inc_not_zero(l) local_add_unless((l), 1, 0) | ||
181 | |||
182 | /* On x86-64 these are better than the atomic variants on SMP kernels | ||
183 | because they dont use a lock prefix. */ | ||
184 | #define __local_inc(l) local_inc(l) | ||
185 | #define __local_dec(l) local_dec(l) | ||
186 | #define __local_add(i,l) local_add((i),(l)) | ||
187 | #define __local_sub(i,l) local_sub((i),(l)) | ||
188 | |||
189 | /* Use these for per-cpu local_t variables: on some archs they are | ||
190 | * much more efficient than these naive implementations. Note they take | ||
191 | * a variable, not an address. | ||
192 | * | ||
193 | * This could be done better if we moved the per cpu data directly | ||
194 | * after GS. | ||
195 | */ | ||
196 | |||
197 | /* Need to disable preemption for the cpu local counters otherwise we could | ||
198 | still access a variable of a previous CPU in a non atomic way. */ | ||
199 | #define cpu_local_wrap_v(l) \ | ||
200 | ({ local_t res__; \ | ||
201 | preempt_disable(); \ | ||
202 | res__ = (l); \ | ||
203 | preempt_enable(); \ | ||
204 | res__; }) | ||
205 | #define cpu_local_wrap(l) \ | ||
206 | ({ preempt_disable(); \ | ||
207 | l; \ | ||
208 | preempt_enable(); }) \ | ||
209 | |||
210 | #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) | ||
211 | #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) | ||
212 | #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) | ||
213 | #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) | ||
214 | #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) | ||
215 | #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) | ||
216 | |||
217 | #define __cpu_local_inc(l) cpu_local_inc(l) | ||
218 | #define __cpu_local_dec(l) cpu_local_dec(l) | ||
219 | #define __cpu_local_add(i, l) cpu_local_add((i), (l)) | ||
220 | #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) | ||
221 | |||
222 | #endif /* _ARCH_X8664_LOCAL_H */ | ||