diff options
Diffstat (limited to 'arch/alpha/include/asm/atomic.h')
| -rw-r--r-- | arch/alpha/include/asm/atomic.h | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 85867d3cea64..767bfdd42992 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h | |||
| @@ -14,6 +14,15 @@ | |||
| 14 | * than regular operations. | 14 | * than regular operations. |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | /* | ||
| 18 | * To ensure dependency ordering is preserved for the _relaxed and | ||
| 19 | * _release atomics, an smp_read_barrier_depends() is unconditionally | ||
| 20 | * inserted into the _relaxed variants, which are used to build the | ||
| 21 | * barriered versions. To avoid redundant back-to-back fences, we can | ||
| 22 | * define the _acquire and _fence versions explicitly. | ||
| 23 | */ | ||
| 24 | #define __atomic_op_acquire(op, args...) op##_relaxed(args) | ||
| 25 | #define __atomic_op_fence __atomic_op_release | ||
| 17 | 26 | ||
| 18 | #define ATOMIC_INIT(i) { (i) } | 27 | #define ATOMIC_INIT(i) { (i) } |
| 19 | #define ATOMIC64_INIT(i) { (i) } | 28 | #define ATOMIC64_INIT(i) { (i) } |
| @@ -61,6 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ | |||
| 61 | ".previous" \ | 70 | ".previous" \ |
| 62 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 71 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
| 63 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 72 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
| 73 | smp_read_barrier_depends(); \ | ||
| 64 | return result; \ | 74 | return result; \ |
| 65 | } | 75 | } |
| 66 | 76 | ||
| @@ -78,6 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ | |||
| 78 | ".previous" \ | 88 | ".previous" \ |
| 79 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 89 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
| 80 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 90 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
| 91 | smp_read_barrier_depends(); \ | ||
| 81 | return result; \ | 92 | return result; \ |
| 82 | } | 93 | } |
| 83 | 94 | ||
| @@ -112,6 +123,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ | |||
| 112 | ".previous" \ | 123 | ".previous" \ |
| 113 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 124 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
| 114 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 125 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
| 126 | smp_read_barrier_depends(); \ | ||
| 115 | return result; \ | 127 | return result; \ |
| 116 | } | 128 | } |
| 117 | 129 | ||
| @@ -129,6 +141,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ | |||
| 129 | ".previous" \ | 141 | ".previous" \ |
| 130 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 142 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
| 131 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 143 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
| 144 | smp_read_barrier_depends(); \ | ||
| 132 | return result; \ | 145 | return result; \ |
| 133 | } | 146 | } |
| 134 | 147 | ||
