aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2007-05-08 03:34:44 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:20 -0400
commita075227948636e10aa2cc2d8725fbbab27681d4a (patch)
tree6b459363916c4db4cc62a293f122f4c9172b1d6a /include/asm-i386
parentf43f7b46eb101f50950cfcead0cb0b7a9c4f6823 (diff)
local_t: i386 extension
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/local.h205
-rw-r--r--include/asm-i386/system.h59
2 files changed, 236 insertions, 28 deletions
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index 12060e22f7e2..e13d3e98823f 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -2,47 +2,198 @@
2#define _ARCH_I386_LOCAL_H 2#define _ARCH_I386_LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5#include <asm/system.h>
6#include <asm/atomic.h>
5 7
6typedef struct 8typedef struct
7{ 9{
8 volatile long counter; 10 atomic_long_t a;
9} local_t; 11} local_t;
10 12
11#define LOCAL_INIT(i) { (i) } 13#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
12 14
13#define local_read(v) ((v)->counter) 15#define local_read(l) atomic_long_read(&(l)->a)
14#define local_set(v,i) (((v)->counter) = (i)) 16#define local_set(l,i) atomic_long_set(&(l)->a, (i))
15 17
16static __inline__ void local_inc(local_t *v) 18static __inline__ void local_inc(local_t *l)
17{ 19{
18 __asm__ __volatile__( 20 __asm__ __volatile__(
19 "incl %0" 21 "incl %0"
20 :"+m" (v->counter)); 22 :"+m" (l->a.counter));
21} 23}
22 24
23static __inline__ void local_dec(local_t *v) 25static __inline__ void local_dec(local_t *l)
24{ 26{
25 __asm__ __volatile__( 27 __asm__ __volatile__(
26 "decl %0" 28 "decl %0"
27 :"+m" (v->counter)); 29 :"+m" (l->a.counter));
28} 30}
29 31
30static __inline__ void local_add(long i, local_t *v) 32static __inline__ void local_add(long i, local_t *l)
31{ 33{
32 __asm__ __volatile__( 34 __asm__ __volatile__(
33 "addl %1,%0" 35 "addl %1,%0"
34 :"+m" (v->counter) 36 :"+m" (l->a.counter)
35 :"ir" (i)); 37 :"ir" (i));
36} 38}
37 39
38static __inline__ void local_sub(long i, local_t *v) 40static __inline__ void local_sub(long i, local_t *l)
39{ 41{
40 __asm__ __volatile__( 42 __asm__ __volatile__(
41 "subl %1,%0" 43 "subl %1,%0"
42 :"+m" (v->counter) 44 :"+m" (l->a.counter)
43 :"ir" (i)); 45 :"ir" (i));
44} 46}
45 47
48/**
49 * local_sub_and_test - subtract value from variable and test result
50 * @i: integer value to subtract
51 * @l: pointer of type local_t
52 *
53 * Atomically subtracts @i from @l and returns
54 * true if the result is zero, or false for all
55 * other cases.
56 */
57static __inline__ int local_sub_and_test(long i, local_t *l)
58{
59 unsigned char c;
60
61 __asm__ __volatile__(
62 "subl %2,%0; sete %1"
63 :"+m" (l->a.counter), "=qm" (c)
64 :"ir" (i) : "memory");
65 return c;
66}
67
68/**
69 * local_dec_and_test - decrement and test
70 * @l: pointer of type local_t
71 *
72 * Atomically decrements @l by 1 and
73 * returns true if the result is 0, or false for all other
74 * cases.
75 */
76static __inline__ int local_dec_and_test(local_t *l)
77{
78 unsigned char c;
79
80 __asm__ __volatile__(
81 "decl %0; sete %1"
82 :"+m" (l->a.counter), "=qm" (c)
83 : : "memory");
84 return c != 0;
85}
86
87/**
88 * local_inc_and_test - increment and test
89 * @l: pointer of type local_t
90 *
91 * Atomically increments @l by 1
92 * and returns true if the result is zero, or false for all
93 * other cases.
94 */
95static __inline__ int local_inc_and_test(local_t *l)
96{
97 unsigned char c;
98
99 __asm__ __volatile__(
100 "incl %0; sete %1"
101 :"+m" (l->a.counter), "=qm" (c)
102 : : "memory");
103 return c != 0;
104}
105
106/**
107 * local_add_negative - add and test if negative
108 * @l: pointer of type local_t
109 * @i: integer value to add
110 *
111 * Atomically adds @i to @l and returns true
112 * if the result is negative, or false when
113 * result is greater than or equal to zero.
114 */
115static __inline__ int local_add_negative(long i, local_t *l)
116{
117 unsigned char c;
118
119 __asm__ __volatile__(
120 "addl %2,%0; sets %1"
121 :"+m" (l->a.counter), "=qm" (c)
122 :"ir" (i) : "memory");
123 return c;
124}
125
126/**
127 * local_add_return - add and return
128 * @l: pointer of type local_t
129 * @i: integer value to add
130 *
131 * Atomically adds @i to @l and returns @i + @l
132 */
133static __inline__ long local_add_return(long i, local_t *l)
134{
135 long __i;
136#ifdef CONFIG_M386
137 unsigned long flags;
138 if(unlikely(boot_cpu_data.x86==3))
139 goto no_xadd;
140#endif
141 /* Modern 486+ processor */
142 __i = i;
143 __asm__ __volatile__(
144 "xaddl %0, %1;"
145 :"+r" (i), "+m" (l->a.counter)
146 : : "memory");
147 return i + __i;
148
149#ifdef CONFIG_M386
150no_xadd: /* Legacy 386 processor */
151 local_irq_save(flags);
152 __i = local_read(l);
153 local_set(l, i + __i);
154 local_irq_restore(flags);
155 return i + __i;
156#endif
157}
158
159static __inline__ long local_sub_return(long i, local_t *l)
160{
161 return local_add_return(-i,l);
162}
163
164#define local_inc_return(l) (local_add_return(1,l))
165#define local_dec_return(l) (local_sub_return(1,l))
166
167#define local_cmpxchg(l, o, n) \
168 (cmpxchg_local(&((l)->a.counter), (o), (n)))
169/* Always has a lock prefix */
170#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
171
172/**
173 * local_add_unless - add unless the number is a given value
174 * @l: pointer of type local_t
175 * @a: the amount to add to l...
176 * @u: ...unless l is equal to u.
177 *
178 * Atomically adds @a to @l, so long as it was not @u.
179 * Returns non-zero if @l was not @u, and zero otherwise.
180 */
181#define local_add_unless(l, a, u) \
182({ \
183 long c, old; \
184 c = local_read(l); \
185 for (;;) { \
186 if (unlikely(c == (u))) \
187 break; \
188 old = local_cmpxchg((l), c, c + (a)); \
189 if (likely(old == c)) \
190 break; \
191 c = old; \
192 } \
193 c != (u); \
194})
195#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
196
46/* On x86, these are no better than the atomic variants. */ 197/* On x86, these are no better than the atomic variants. */
47#define __local_inc(l) local_inc(l) 198#define __local_inc(l) local_inc(l)
48#define __local_dec(l) local_dec(l) 199#define __local_dec(l) local_dec(l)
@@ -56,27 +207,27 @@ static __inline__ void local_sub(long i, local_t *v)
56 207
57/* Need to disable preemption for the cpu local counters otherwise we could 208/* Need to disable preemption for the cpu local counters otherwise we could
58 still access a variable of a previous CPU in a non atomic way. */ 209 still access a variable of a previous CPU in a non atomic way. */
59#define cpu_local_wrap_v(v) \ 210#define cpu_local_wrap_v(l) \
60 ({ local_t res__; \ 211 ({ local_t res__; \
61 preempt_disable(); \ 212 preempt_disable(); \
62 res__ = (v); \ 213 res__ = (l); \
63 preempt_enable(); \ 214 preempt_enable(); \
64 res__; }) 215 res__; })
65#define cpu_local_wrap(v) \ 216#define cpu_local_wrap(l) \
66 ({ preempt_disable(); \ 217 ({ preempt_disable(); \
67 v; \ 218 l; \
68 preempt_enable(); }) \ 219 preempt_enable(); }) \
69 220
70#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v))) 221#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
71#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i))) 222#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
72#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v))) 223#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
73#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v))) 224#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
74#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v))) 225#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
75#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v))) 226#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
76 227
77#define __cpu_local_inc(v) cpu_local_inc(v) 228#define __cpu_local_inc(l) cpu_local_inc(l)
78#define __cpu_local_dec(v) cpu_local_dec(v) 229#define __cpu_local_dec(l) cpu_local_dec(l)
79#define __cpu_local_add(i, v) cpu_local_add((i), (v)) 230#define __cpu_local_add(i, l) cpu_local_add((i), (l))
80#define __cpu_local_sub(i, v) cpu_local_sub((i), (v)) 231#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
81 232
82#endif /* _ARCH_I386_LOCAL_H */ 233#endif /* _ARCH_I386_LOCAL_H */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index c3a58c08c495..e0454afb950f 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -305,6 +305,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
305#define sync_cmpxchg(ptr,o,n)\ 305#define sync_cmpxchg(ptr,o,n)\
306 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ 306 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
307 (unsigned long)(n),sizeof(*(ptr)))) 307 (unsigned long)(n),sizeof(*(ptr))))
308#define cmpxchg_local(ptr,o,n)\
309 ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
310 (unsigned long)(n),sizeof(*(ptr))))
308#endif 311#endif
309 312
310static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 313static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -367,6 +370,33 @@ static inline unsigned long __sync_cmpxchg(volatile void *ptr,
367 return old; 370 return old;
368} 371}
369 372
373static inline unsigned long __cmpxchg_local(volatile void *ptr,
374 unsigned long old, unsigned long new, int size)
375{
376 unsigned long prev;
377 switch (size) {
378 case 1:
379 __asm__ __volatile__("cmpxchgb %b1,%2"
380 : "=a"(prev)
381 : "q"(new), "m"(*__xg(ptr)), "0"(old)
382 : "memory");
383 return prev;
384 case 2:
385 __asm__ __volatile__("cmpxchgw %w1,%2"
386 : "=a"(prev)
387 : "r"(new), "m"(*__xg(ptr)), "0"(old)
388 : "memory");
389 return prev;
390 case 4:
391 __asm__ __volatile__("cmpxchgl %1,%2"
392 : "=a"(prev)
393 : "r"(new), "m"(*__xg(ptr)), "0"(old)
394 : "memory");
395 return prev;
396 }
397 return old;
398}
399
370#ifndef CONFIG_X86_CMPXCHG 400#ifndef CONFIG_X86_CMPXCHG
371/* 401/*
372 * Building a kernel capable running on 80386. It may be necessary to 402 * Building a kernel capable running on 80386. It may be necessary to
@@ -403,6 +433,17 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
403 (unsigned long)(n), sizeof(*(ptr))); \ 433 (unsigned long)(n), sizeof(*(ptr))); \
404 __ret; \ 434 __ret; \
405}) 435})
436#define cmpxchg_local(ptr,o,n) \
437({ \
438 __typeof__(*(ptr)) __ret; \
439 if (likely(boot_cpu_data.x86 > 3)) \
440 __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
441 (unsigned long)(n), sizeof(*(ptr))); \
442 else \
443 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
444 (unsigned long)(n), sizeof(*(ptr))); \
445 __ret; \
446})
406#endif 447#endif
407 448
408#ifdef CONFIG_X86_CMPXCHG64 449#ifdef CONFIG_X86_CMPXCHG64
@@ -421,10 +462,26 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
421 return prev; 462 return prev;
422} 463}
423 464
465static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
466 unsigned long long old, unsigned long long new)
467{
468 unsigned long long prev;
469 __asm__ __volatile__("cmpxchg8b %3"
470 : "=A"(prev)
471 : "b"((unsigned long)new),
472 "c"((unsigned long)(new >> 32)),
473 "m"(*__xg(ptr)),
474 "0"(old)
475 : "memory");
476 return prev;
477}
478
424#define cmpxchg64(ptr,o,n)\ 479#define cmpxchg64(ptr,o,n)\
425 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ 480 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
426 (unsigned long long)(n))) 481 (unsigned long long)(n)))
427 482#define cmpxchg64_local(ptr,o,n)\
483 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
484 (unsigned long long)(n)))
428#endif 485#endif
429 486
430/* 487/*