diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2006-09-16 05:47:18 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-09-25 05:25:24 -0400 |
commit | 8dd5c845bbc26c3517398abc3e5477b4b42e7176 (patch) | |
tree | 76f9a6d4aba323653a65a4354bf7fe65805a7808 | |
parent | 34148c6990d2f0107b53fe4ddf29b1ba30e613d3 (diff) |
[ARM] 3810/1: switch atomic helpers over to raw_local_irq_{save,restore}
Now that we have raw_* variants of local_irq_$FOO(), switch the atomic
helpers over to use those raw_* variants. This is necessary when using
lockdep on pre-ARMv6 hardware, as lockdep uses atomic_t counters in the
trace_hardirqs_off() path.
Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | include/asm-arm/atomic.h | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index 4b0ce3e7de9a..ea88aa6bfc78 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h | |||
@@ -128,10 +128,10 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
128 | unsigned long flags; | 128 | unsigned long flags; |
129 | int val; | 129 | int val; |
130 | 130 | ||
131 | local_irq_save(flags); | 131 | raw_local_irq_save(flags); |
132 | val = v->counter; | 132 | val = v->counter; |
133 | v->counter = val += i; | 133 | v->counter = val += i; |
134 | local_irq_restore(flags); | 134 | raw_local_irq_restore(flags); |
135 | 135 | ||
136 | return val; | 136 | return val; |
137 | } | 137 | } |
@@ -141,10 +141,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
141 | unsigned long flags; | 141 | unsigned long flags; |
142 | int val; | 142 | int val; |
143 | 143 | ||
144 | local_irq_save(flags); | 144 | raw_local_irq_save(flags); |
145 | val = v->counter; | 145 | val = v->counter; |
146 | v->counter = val -= i; | 146 | v->counter = val -= i; |
147 | local_irq_restore(flags); | 147 | raw_local_irq_restore(flags); |
148 | 148 | ||
149 | return val; | 149 | return val; |
150 | } | 150 | } |
@@ -154,11 +154,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |||
154 | int ret; | 154 | int ret; |
155 | unsigned long flags; | 155 | unsigned long flags; |
156 | 156 | ||
157 | local_irq_save(flags); | 157 | raw_local_irq_save(flags); |
158 | ret = v->counter; | 158 | ret = v->counter; |
159 | if (likely(ret == old)) | 159 | if (likely(ret == old)) |
160 | v->counter = new; | 160 | v->counter = new; |
161 | local_irq_restore(flags); | 161 | raw_local_irq_restore(flags); |
162 | 162 | ||
163 | return ret; | 163 | return ret; |
164 | } | 164 | } |
@@ -167,9 +167,9 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
167 | { | 167 | { |
168 | unsigned long flags; | 168 | unsigned long flags; |
169 | 169 | ||
170 | local_irq_save(flags); | 170 | raw_local_irq_save(flags); |
171 | *addr &= ~mask; | 171 | *addr &= ~mask; |
172 | local_irq_restore(flags); | 172 | raw_local_irq_restore(flags); |
173 | } | 173 | } |
174 | 174 | ||
175 | #endif /* __LINUX_ARM_ARCH__ */ | 175 | #endif /* __LINUX_ARM_ARCH__ */ |