diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 09:48:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 09:48:00 -0400 |
commit | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch) | |
tree | 9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/x86/include | |
parent | d6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff) | |
parent | 2291059c852706c6f5ffb400366042b7625066cd (diff) |
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:
- Remove the unused atomic_or_long() method
- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.
- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"
* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/atomic.h | 17 | ||||
-rw-r--r-- | arch/x86/include/asm/atomic64_64.h | 2 |
2 files changed, 2 insertions, 17 deletions
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 6dd1c7dd0473..5e5cd123fdfb 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -24,7 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | static inline int atomic_read(const atomic_t *v) | 25 | static inline int atomic_read(const atomic_t *v) |
26 | { | 26 | { |
27 | return (*(volatile int *)&(v)->counter); | 27 | return ACCESS_ONCE((v)->counter); |
28 | } | 28 | } |
29 | 29 | ||
30 | /** | 30 | /** |
@@ -219,21 +219,6 @@ static inline short int atomic_inc_short(short int *v) | |||
219 | return *v; | 219 | return *v; |
220 | } | 220 | } |
221 | 221 | ||
222 | #ifdef CONFIG_X86_64 | ||
223 | /** | ||
224 | * atomic_or_long - OR of two long integers | ||
225 | * @v1: pointer to type unsigned long | ||
226 | * @v2: pointer to type unsigned long | ||
227 | * | ||
228 | * Atomically ORs @v1 and @v2 | ||
229 | * Returns the result of the OR | ||
230 | */ | ||
231 | static inline void atomic_or_long(unsigned long *v1, unsigned long v2) | ||
232 | { | ||
233 | asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); | ||
234 | } | ||
235 | #endif | ||
236 | |||
237 | /* These are x86-specific, used by some header files */ | 222 | /* These are x86-specific, used by some header files */ |
238 | #define atomic_clear_mask(mask, addr) \ | 223 | #define atomic_clear_mask(mask, addr) \ |
239 | asm volatile(LOCK_PREFIX "andl %0,%1" \ | 224 | asm volatile(LOCK_PREFIX "andl %0,%1" \ |
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 46e9052bbd28..f8d273e18516 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | static inline long atomic64_read(const atomic64_t *v) | 19 | static inline long atomic64_read(const atomic64_t *v) |
20 | { | 20 | { |
21 | return (*(volatile long *)&(v)->counter); | 21 | return ACCESS_ONCE((v)->counter); |
22 | } | 22 | } |
23 | 23 | ||
24 | /** | 24 | /** |