diff options
Diffstat (limited to 'include/asm-sh64/atomic.h')
-rw-r--r-- | include/asm-sh64/atomic.h | 158 |
1 files changed, 0 insertions, 158 deletions
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h deleted file mode 100644 index 28f2ea9b567b..000000000000 --- a/include/asm-sh64/atomic.h +++ /dev/null | |||
@@ -1,158 +0,0 @@ | |||
1 | #ifndef __ASM_SH64_ATOMIC_H | ||
2 | #define __ASM_SH64_ATOMIC_H | ||
3 | |||
4 | /* | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | * | ||
9 | * include/asm-sh64/atomic.h | ||
10 | * | ||
11 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
12 | * Copyright (C) 2003 Paul Mundt | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Atomic operations that C can't guarantee us. Useful for | ||
18 | * resource counting etc.. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | typedef struct { volatile int counter; } atomic_t; | ||
23 | |||
24 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) | ||
25 | |||
26 | #define atomic_read(v) ((v)->counter) | ||
27 | #define atomic_set(v,i) ((v)->counter = (i)) | ||
28 | |||
29 | #include <asm/system.h> | ||
30 | |||
31 | /* | ||
32 | * To get proper branch prediction for the main line, we must branch | ||
33 | * forward to code at the end of this object's .text section, then | ||
34 | * branch back to restart the operation. | ||
35 | */ | ||
36 | |||
37 | static __inline__ void atomic_add(int i, atomic_t * v) | ||
38 | { | ||
39 | unsigned long flags; | ||
40 | |||
41 | local_irq_save(flags); | ||
42 | *(long *)v += i; | ||
43 | local_irq_restore(flags); | ||
44 | } | ||
45 | |||
46 | static __inline__ void atomic_sub(int i, atomic_t *v) | ||
47 | { | ||
48 | unsigned long flags; | ||
49 | |||
50 | local_irq_save(flags); | ||
51 | *(long *)v -= i; | ||
52 | local_irq_restore(flags); | ||
53 | } | ||
54 | |||
55 | static __inline__ int atomic_add_return(int i, atomic_t * v) | ||
56 | { | ||
57 | unsigned long temp, flags; | ||
58 | |||
59 | local_irq_save(flags); | ||
60 | temp = *(long *)v; | ||
61 | temp += i; | ||
62 | *(long *)v = temp; | ||
63 | local_irq_restore(flags); | ||
64 | |||
65 | return temp; | ||
66 | } | ||
67 | |||
68 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
69 | |||
70 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | ||
71 | { | ||
72 | unsigned long temp, flags; | ||
73 | |||
74 | local_irq_save(flags); | ||
75 | temp = *(long *)v; | ||
76 | temp -= i; | ||
77 | *(long *)v = temp; | ||
78 | local_irq_restore(flags); | ||
79 | |||
80 | return temp; | ||
81 | } | ||
82 | |||
83 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) | ||
84 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | ||
85 | |||
86 | /* | ||
87 | * atomic_inc_and_test - increment and test | ||
88 | * @v: pointer of type atomic_t | ||
89 | * | ||
90 | * Atomically increments @v by 1 | ||
91 | * and returns true if the result is zero, or false for all | ||
92 | * other cases. | ||
93 | */ | ||
94 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
95 | |||
96 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | ||
97 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | ||
98 | |||
99 | #define atomic_inc(v) atomic_add(1,(v)) | ||
100 | #define atomic_dec(v) atomic_sub(1,(v)) | ||
101 | |||
102 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
103 | { | ||
104 | int ret; | ||
105 | unsigned long flags; | ||
106 | |||
107 | local_irq_save(flags); | ||
108 | ret = v->counter; | ||
109 | if (likely(ret == old)) | ||
110 | v->counter = new; | ||
111 | local_irq_restore(flags); | ||
112 | |||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
117 | |||
118 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
119 | { | ||
120 | int ret; | ||
121 | unsigned long flags; | ||
122 | |||
123 | local_irq_save(flags); | ||
124 | ret = v->counter; | ||
125 | if (ret != u) | ||
126 | v->counter += a; | ||
127 | local_irq_restore(flags); | ||
128 | |||
129 | return ret != u; | ||
130 | } | ||
131 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
132 | |||
133 | static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||
134 | { | ||
135 | unsigned long flags; | ||
136 | |||
137 | local_irq_save(flags); | ||
138 | *(long *)v &= ~mask; | ||
139 | local_irq_restore(flags); | ||
140 | } | ||
141 | |||
142 | static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) | ||
143 | { | ||
144 | unsigned long flags; | ||
145 | |||
146 | local_irq_save(flags); | ||
147 | *(long *)v |= mask; | ||
148 | local_irq_restore(flags); | ||
149 | } | ||
150 | |||
151 | /* Atomic operations are already serializing on SH */ | ||
152 | #define smp_mb__before_atomic_dec() barrier() | ||
153 | #define smp_mb__after_atomic_dec() barrier() | ||
154 | #define smp_mb__before_atomic_inc() barrier() | ||
155 | #define smp_mb__after_atomic_inc() barrier() | ||
156 | |||
157 | #include <asm-generic/atomic.h> | ||
158 | #endif /* __ASM_SH64_ATOMIC_H */ | ||