aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/local.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-x86_64/local.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-x86_64/local.h')
-rw-r--r--include/asm-x86_64/local.h73
1 files changed, 73 insertions, 0 deletions
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
new file mode 100644
index 000000000000..169c223a8452
--- /dev/null
+++ b/include/asm-x86_64/local.h
@@ -0,0 +1,73 @@
1#ifndef _ARCH_X8664_LOCAL_H
2#define _ARCH_X8664_LOCAL_H
3
4#include <linux/percpu.h>
5
6typedef struct
7{
8 volatile unsigned int counter;
9} local_t;
10
11#define LOCAL_INIT(i) { (i) }
12
13#define local_read(v) ((v)->counter)
14#define local_set(v,i) (((v)->counter) = (i))
15
16static __inline__ void local_inc(local_t *v)
17{
18 __asm__ __volatile__(
19 "incl %0"
20 :"=m" (v->counter)
21 :"m" (v->counter));
22}
23
24static __inline__ void local_dec(local_t *v)
25{
26 __asm__ __volatile__(
27 "decl %0"
28 :"=m" (v->counter)
29 :"m" (v->counter));
30}
31
32static __inline__ void local_add(unsigned long i, local_t *v)
33{
34 __asm__ __volatile__(
35 "addl %1,%0"
36 :"=m" (v->counter)
37 :"ir" (i), "m" (v->counter));
38}
39
40static __inline__ void local_sub(unsigned long i, local_t *v)
41{
42 __asm__ __volatile__(
43 "subl %1,%0"
44 :"=m" (v->counter)
45 :"ir" (i), "m" (v->counter));
46}
47
48/* On x86, these are no better than the atomic variants. */
49#define __local_inc(l) local_inc(l)
50#define __local_dec(l) local_dec(l)
51#define __local_add(i,l) local_add((i),(l))
52#define __local_sub(i,l) local_sub((i),(l))
53
54/* Use these for per-cpu local_t variables: on some archs they are
55 * much more efficient than these naive implementations. Note they take
56 * a variable, not an address.
57 *
58 * This could be done better if we moved the per cpu data directly
59 * after GS.
60 */
61#define cpu_local_read(v) local_read(&__get_cpu_var(v))
62#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
63#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
64#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
65#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
66#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
67
68#define __cpu_local_inc(v) cpu_local_inc(v)
69#define __cpu_local_dec(v) cpu_local_dec(v)
70#define __cpu_local_add(i, v) cpu_local_add((i), (v))
71#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
72
73#endif /* _ARCH_I386_LOCAL_H */