aboutsummaryrefslogtreecommitdiffstats
path: root/arch/hexagon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/hexagon
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/hexagon')
-rw-r--r--arch/hexagon/include/asm/atomic.h68
1 files changed, 37 insertions, 31 deletions
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index de916b11bff5..93d07025f183 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
94 return __oldval; 94 return __oldval;
95} 95}
96 96
97static inline int atomic_add_return(int i, atomic_t *v) 97#define ATOMIC_OP(op) \
98{ 98static inline void atomic_##op(int i, atomic_t *v) \
99 int output; 99{ \
100 100 int output; \
101 __asm__ __volatile__ ( 101 \
102 "1: %0 = memw_locked(%1);\n" 102 __asm__ __volatile__ ( \
103 " %0 = add(%0,%2);\n" 103 "1: %0 = memw_locked(%1);\n" \
104 " memw_locked(%1,P3)=%0;\n" 104 " %0 = "#op "(%0,%2);\n" \
105 " if !P3 jump 1b;\n" 105 " memw_locked(%1,P3)=%0;\n" \
106 : "=&r" (output) 106 " if !P3 jump 1b;\n" \
107 : "r" (&v->counter), "r" (i) 107 : "=&r" (output) \
108 : "memory", "p3" 108 : "r" (&v->counter), "r" (i) \
109 ); 109 : "memory", "p3" \
110 return output; 110 ); \
111 111} \
112
113#define ATOMIC_OP_RETURN(op) \
114static inline int atomic_##op##_return(int i, atomic_t *v) \
115{ \
116 int output; \
117 \
118 __asm__ __volatile__ ( \
119 "1: %0 = memw_locked(%1);\n" \
120 " %0 = "#op "(%0,%2);\n" \
121 " memw_locked(%1,P3)=%0;\n" \
122 " if !P3 jump 1b;\n" \
123 : "=&r" (output) \
124 : "r" (&v->counter), "r" (i) \
125 : "memory", "p3" \
126 ); \
127 return output; \
112} 128}
113 129
114#define atomic_add(i, v) atomic_add_return(i, (v)) 130#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
115 131
116static inline int atomic_sub_return(int i, atomic_t *v) 132ATOMIC_OPS(add)
117{ 133ATOMIC_OPS(sub)
118 int output;
119 __asm__ __volatile__ (
120 "1: %0 = memw_locked(%1);\n"
121 " %0 = sub(%0,%2);\n"
122 " memw_locked(%1,P3)=%0\n"
123 " if !P3 jump 1b;\n"
124 : "=&r" (output)
125 : "r" (&v->counter), "r" (i)
126 : "memory", "p3"
127 );
128 return output;
129}
130 134
131#define atomic_sub(i, v) atomic_sub_return(i, (v)) 135#undef ATOMIC_OPS
136#undef ATOMIC_OP_RETURN
137#undef ATOMIC_OP
132 138
133/** 139/**
134 * __atomic_add_unless - add unless the number is a given value 140 * __atomic_add_unless - add unless the number is a given value