diff options
Diffstat (limited to 'arch/tile/include/asm/atomic_64.h')
-rw-r--r-- | arch/tile/include/asm/atomic_64.h | 156 |
1 files changed, 156 insertions, 0 deletions
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h new file mode 100644 index 000000000000..1c1e60d8ccb6 --- /dev/null +++ b/arch/tile/include/asm/atomic_64.h | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Do not include directly; use <asm/atomic.h>. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_64_H | ||
18 | #define _ASM_TILE_ATOMIC_64_H | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | ||
21 | |||
22 | #include <arch/spr_def.h> | ||
23 | |||
24 | /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */ | ||
25 | |||
26 | #define atomic_set(v, i) ((v)->counter = (i)) | ||
27 | |||
28 | /* | ||
29 | * The smp_mb() operations throughout are to support the fact that | ||
30 | * Linux requires memory barriers before and after the operation, | ||
31 | * on any routine which updates memory and returns a value. | ||
32 | */ | ||
33 | |||
34 | static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | ||
35 | { | ||
36 | int val; | ||
37 | __insn_mtspr(SPR_CMPEXCH_VALUE, o); | ||
38 | smp_mb(); /* barrier for proper semantics */ | ||
39 | val = __insn_cmpexch4((void *)&v->counter, n); | ||
40 | smp_mb(); /* barrier for proper semantics */ | ||
41 | return val; | ||
42 | } | ||
43 | |||
44 | static inline int atomic_xchg(atomic_t *v, int n) | ||
45 | { | ||
46 | int val; | ||
47 | smp_mb(); /* barrier for proper semantics */ | ||
48 | val = __insn_exch4((void *)&v->counter, n); | ||
49 | smp_mb(); /* barrier for proper semantics */ | ||
50 | return val; | ||
51 | } | ||
52 | |||
53 | static inline void atomic_add(int i, atomic_t *v) | ||
54 | { | ||
55 | __insn_fetchadd4((void *)&v->counter, i); | ||
56 | } | ||
57 | |||
58 | static inline int atomic_add_return(int i, atomic_t *v) | ||
59 | { | ||
60 | int val; | ||
61 | smp_mb(); /* barrier for proper semantics */ | ||
62 | val = __insn_fetchadd4((void *)&v->counter, i) + i; | ||
63 | barrier(); /* the "+ i" above will wait on memory */ | ||
64 | return val; | ||
65 | } | ||
66 | |||
67 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
68 | { | ||
69 | int guess, oldval = v->counter; | ||
70 | do { | ||
71 | if (oldval == u) | ||
72 | break; | ||
73 | guess = oldval; | ||
74 | oldval = atomic_cmpxchg(v, guess, guess + a); | ||
75 | } while (guess != oldval); | ||
76 | return oldval != u; | ||
77 | } | ||
78 | |||
79 | /* Now the true 64-bit operations. */ | ||
80 | |||
81 | #define ATOMIC64_INIT(i) { (i) } | ||
82 | |||
83 | #define atomic64_read(v) ((v)->counter) | ||
84 | #define atomic64_set(v, i) ((v)->counter = (i)) | ||
85 | |||
86 | static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n) | ||
87 | { | ||
88 | long val; | ||
89 | smp_mb(); /* barrier for proper semantics */ | ||
90 | __insn_mtspr(SPR_CMPEXCH_VALUE, o); | ||
91 | val = __insn_cmpexch((void *)&v->counter, n); | ||
92 | smp_mb(); /* barrier for proper semantics */ | ||
93 | return val; | ||
94 | } | ||
95 | |||
96 | static inline long atomic64_xchg(atomic64_t *v, long n) | ||
97 | { | ||
98 | long val; | ||
99 | smp_mb(); /* barrier for proper semantics */ | ||
100 | val = __insn_exch((void *)&v->counter, n); | ||
101 | smp_mb(); /* barrier for proper semantics */ | ||
102 | return val; | ||
103 | } | ||
104 | |||
105 | static inline void atomic64_add(long i, atomic64_t *v) | ||
106 | { | ||
107 | __insn_fetchadd((void *)&v->counter, i); | ||
108 | } | ||
109 | |||
110 | static inline long atomic64_add_return(long i, atomic64_t *v) | ||
111 | { | ||
112 | int val; | ||
113 | smp_mb(); /* barrier for proper semantics */ | ||
114 | val = __insn_fetchadd((void *)&v->counter, i) + i; | ||
115 | barrier(); /* the "+ i" above will wait on memory */ | ||
116 | return val; | ||
117 | } | ||
118 | |||
119 | static inline long atomic64_add_unless(atomic64_t *v, long a, long u) | ||
120 | { | ||
121 | long guess, oldval = v->counter; | ||
122 | do { | ||
123 | if (oldval == u) | ||
124 | break; | ||
125 | guess = oldval; | ||
126 | oldval = atomic64_cmpxchg(v, guess, guess + a); | ||
127 | } while (guess != oldval); | ||
128 | return oldval != u; | ||
129 | } | ||
130 | |||
131 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | ||
132 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | ||
133 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | ||
134 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | ||
135 | #define atomic64_inc(v) atomic64_add(1, (v)) | ||
136 | #define atomic64_dec(v) atomic64_sub(1, (v)) | ||
137 | |||
138 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
139 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | ||
140 | #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) | ||
141 | #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) | ||
142 | |||
143 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
144 | |||
145 | /* Atomic dec and inc don't implement barrier, so provide them if needed. */ | ||
146 | #define smp_mb__before_atomic_dec() smp_mb() | ||
147 | #define smp_mb__after_atomic_dec() smp_mb() | ||
148 | #define smp_mb__before_atomic_inc() smp_mb() | ||
149 | #define smp_mb__after_atomic_inc() smp_mb() | ||
150 | |||
151 | /* Define this to indicate that cmpxchg is an efficient operation. */ | ||
152 | #define __HAVE_ARCH_CMPXCHG | ||
153 | |||
154 | #endif /* !__ASSEMBLY__ */ | ||
155 | |||
156 | #endif /* _ASM_TILE_ATOMIC_64_H */ | ||