diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 09:48:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 09:48:00 -0400 |
commit | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch) | |
tree | 9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /lib | |
parent | d6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff) | |
parent | 2291059c852706c6f5ffb400366042b7625066cd (diff) |
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:
- Remove the unused atomic_or_long() method
- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.
- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"
* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/atomic64.c | 83 |
1 files changed, 36 insertions, 47 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c index 08a4f068e61e..1298c05ef528 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i) | |||
70 | } | 70 | } |
71 | EXPORT_SYMBOL(atomic64_set); | 71 | EXPORT_SYMBOL(atomic64_set); |
72 | 72 | ||
73 | void atomic64_add(long long a, atomic64_t *v) | 73 | #define ATOMIC64_OP(op, c_op) \ |
74 | { | 74 | void atomic64_##op(long long a, atomic64_t *v) \ |
75 | unsigned long flags; | 75 | { \ |
76 | raw_spinlock_t *lock = lock_addr(v); | 76 | unsigned long flags; \ |
77 | 77 | raw_spinlock_t *lock = lock_addr(v); \ | |
78 | raw_spin_lock_irqsave(lock, flags); | 78 | \ |
79 | v->counter += a; | 79 | raw_spin_lock_irqsave(lock, flags); \ |
80 | raw_spin_unlock_irqrestore(lock, flags); | 80 | v->counter c_op a; \ |
81 | } | 81 | raw_spin_unlock_irqrestore(lock, flags); \ |
82 | EXPORT_SYMBOL(atomic64_add); | 82 | } \ |
83 | 83 | EXPORT_SYMBOL(atomic64_##op); | |
84 | long long atomic64_add_return(long long a, atomic64_t *v) | 84 | |
85 | { | 85 | #define ATOMIC64_OP_RETURN(op, c_op) \ |
86 | unsigned long flags; | 86 | long long atomic64_##op##_return(long long a, atomic64_t *v) \ |
87 | raw_spinlock_t *lock = lock_addr(v); | 87 | { \ |
88 | long long val; | 88 | unsigned long flags; \ |
89 | 89 | raw_spinlock_t *lock = lock_addr(v); \ | |
90 | raw_spin_lock_irqsave(lock, flags); | 90 | long long val; \ |
91 | val = v->counter += a; | 91 | \ |
92 | raw_spin_unlock_irqrestore(lock, flags); | 92 | raw_spin_lock_irqsave(lock, flags); \ |
93 | return val; | 93 | val = (v->counter c_op a); \ |
94 | } | 94 | raw_spin_unlock_irqrestore(lock, flags); \ |
95 | EXPORT_SYMBOL(atomic64_add_return); | 95 | return val; \ |
96 | 96 | } \ | |
97 | void atomic64_sub(long long a, atomic64_t *v) | 97 | EXPORT_SYMBOL(atomic64_##op##_return); |
98 | { | 98 | |
99 | unsigned long flags; | 99 | #define ATOMIC64_OPS(op, c_op) \ |
100 | raw_spinlock_t *lock = lock_addr(v); | 100 | ATOMIC64_OP(op, c_op) \ |
101 | 101 | ATOMIC64_OP_RETURN(op, c_op) | |
102 | raw_spin_lock_irqsave(lock, flags); | 102 | |
103 | v->counter -= a; | 103 | ATOMIC64_OPS(add, +=) |
104 | raw_spin_unlock_irqrestore(lock, flags); | 104 | ATOMIC64_OPS(sub, -=) |
105 | } | 105 | |
106 | EXPORT_SYMBOL(atomic64_sub); | 106 | #undef ATOMIC64_OPS |
107 | 107 | #undef ATOMIC64_OP_RETURN | |
108 | long long atomic64_sub_return(long long a, atomic64_t *v) | 108 | #undef ATOMIC64_OP |
109 | { | ||
110 | unsigned long flags; | ||
111 | raw_spinlock_t *lock = lock_addr(v); | ||
112 | long long val; | ||
113 | |||
114 | raw_spin_lock_irqsave(lock, flags); | ||
115 | val = v->counter -= a; | ||
116 | raw_spin_unlock_irqrestore(lock, flags); | ||
117 | return val; | ||
118 | } | ||
119 | EXPORT_SYMBOL(atomic64_sub_return); | ||
120 | 109 | ||
121 | long long atomic64_dec_if_positive(atomic64_t *v) | 110 | long long atomic64_dec_if_positive(atomic64_t *v) |
122 | { | 111 | { |