diff options
| author | Yong Zhang <yong.zhang0@gmail.com> | 2011-09-14 03:49:24 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2011-09-14 07:14:11 -0400 |
| commit | cb475de3d12df6912bc95048202ae8c280d4cad5 (patch) | |
| tree | 31c799e706534bb2e3387a544ff4784e74d63967 /lib | |
| parent | f59ca05871a055a73f8e626f2d868f0da248e22c (diff) | |
lib: atomic64: Change the type of local lock to raw_spinlock_t
There are still some leftovers of commit f59ca058
[locking, lib/atomic64: Annotate atomic64_lock::lock as raw]
[ tglx: Seems I picked the wrong version of that patch :( ]
Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Shan Hai <haishan.bai@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Link: http://lkml.kernel.org/r/20110914074924.GA16096@zhy
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/atomic64.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c index 82b33134e141..3975470caf4f 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
| @@ -33,7 +33,7 @@ static union { | |||
| 33 | char pad[L1_CACHE_BYTES]; | 33 | char pad[L1_CACHE_BYTES]; |
| 34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | 34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; |
| 35 | 35 | ||
| 36 | static inline spinlock_t *lock_addr(const atomic64_t *v) | 36 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
| 37 | { | 37 | { |
| 38 | unsigned long addr = (unsigned long) v; | 38 | unsigned long addr = (unsigned long) v; |
| 39 | 39 | ||
| @@ -45,7 +45,7 @@ static inline spinlock_t *lock_addr(const atomic64_t *v) | |||
| 45 | long long atomic64_read(const atomic64_t *v) | 45 | long long atomic64_read(const atomic64_t *v) |
| 46 | { | 46 | { |
| 47 | unsigned long flags; | 47 | unsigned long flags; |
| 48 | spinlock_t *lock = lock_addr(v); | 48 | raw_spinlock_t *lock = lock_addr(v); |
| 49 | long long val; | 49 | long long val; |
| 50 | 50 | ||
| 51 | raw_spin_lock_irqsave(lock, flags); | 51 | raw_spin_lock_irqsave(lock, flags); |
| @@ -58,7 +58,7 @@ EXPORT_SYMBOL(atomic64_read); | |||
| 58 | void atomic64_set(atomic64_t *v, long long i) | 58 | void atomic64_set(atomic64_t *v, long long i) |
| 59 | { | 59 | { |
| 60 | unsigned long flags; | 60 | unsigned long flags; |
| 61 | spinlock_t *lock = lock_addr(v); | 61 | raw_spinlock_t *lock = lock_addr(v); |
| 62 | 62 | ||
| 63 | raw_spin_lock_irqsave(lock, flags); | 63 | raw_spin_lock_irqsave(lock, flags); |
| 64 | v->counter = i; | 64 | v->counter = i; |
| @@ -69,7 +69,7 @@ EXPORT_SYMBOL(atomic64_set); | |||
| 69 | void atomic64_add(long long a, atomic64_t *v) | 69 | void atomic64_add(long long a, atomic64_t *v) |
| 70 | { | 70 | { |
| 71 | unsigned long flags; | 71 | unsigned long flags; |
| 72 | spinlock_t *lock = lock_addr(v); | 72 | raw_spinlock_t *lock = lock_addr(v); |
| 73 | 73 | ||
| 74 | raw_spin_lock_irqsave(lock, flags); | 74 | raw_spin_lock_irqsave(lock, flags); |
| 75 | v->counter += a; | 75 | v->counter += a; |
| @@ -80,7 +80,7 @@ EXPORT_SYMBOL(atomic64_add); | |||
| 80 | long long atomic64_add_return(long long a, atomic64_t *v) | 80 | long long atomic64_add_return(long long a, atomic64_t *v) |
| 81 | { | 81 | { |
| 82 | unsigned long flags; | 82 | unsigned long flags; |
| 83 | spinlock_t *lock = lock_addr(v); | 83 | raw_spinlock_t *lock = lock_addr(v); |
| 84 | long long val; | 84 | long long val; |
| 85 | 85 | ||
| 86 | raw_spin_lock_irqsave(lock, flags); | 86 | raw_spin_lock_irqsave(lock, flags); |
| @@ -93,7 +93,7 @@ EXPORT_SYMBOL(atomic64_add_return); | |||
| 93 | void atomic64_sub(long long a, atomic64_t *v) | 93 | void atomic64_sub(long long a, atomic64_t *v) |
| 94 | { | 94 | { |
| 95 | unsigned long flags; | 95 | unsigned long flags; |
| 96 | spinlock_t *lock = lock_addr(v); | 96 | raw_spinlock_t *lock = lock_addr(v); |
| 97 | 97 | ||
| 98 | raw_spin_lock_irqsave(lock, flags); | 98 | raw_spin_lock_irqsave(lock, flags); |
| 99 | v->counter -= a; | 99 | v->counter -= a; |
| @@ -104,7 +104,7 @@ EXPORT_SYMBOL(atomic64_sub); | |||
| 104 | long long atomic64_sub_return(long long a, atomic64_t *v) | 104 | long long atomic64_sub_return(long long a, atomic64_t *v) |
| 105 | { | 105 | { |
| 106 | unsigned long flags; | 106 | unsigned long flags; |
| 107 | spinlock_t *lock = lock_addr(v); | 107 | raw_spinlock_t *lock = lock_addr(v); |
| 108 | long long val; | 108 | long long val; |
| 109 | 109 | ||
| 110 | raw_spin_lock_irqsave(lock, flags); | 110 | raw_spin_lock_irqsave(lock, flags); |
| @@ -117,7 +117,7 @@ EXPORT_SYMBOL(atomic64_sub_return); | |||
| 117 | long long atomic64_dec_if_positive(atomic64_t *v) | 117 | long long atomic64_dec_if_positive(atomic64_t *v) |
| 118 | { | 118 | { |
| 119 | unsigned long flags; | 119 | unsigned long flags; |
| 120 | spinlock_t *lock = lock_addr(v); | 120 | raw_spinlock_t *lock = lock_addr(v); |
| 121 | long long val; | 121 | long long val; |
| 122 | 122 | ||
| 123 | raw_spin_lock_irqsave(lock, flags); | 123 | raw_spin_lock_irqsave(lock, flags); |
| @@ -132,7 +132,7 @@ EXPORT_SYMBOL(atomic64_dec_if_positive); | |||
| 132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | 132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) |
| 133 | { | 133 | { |
| 134 | unsigned long flags; | 134 | unsigned long flags; |
| 135 | spinlock_t *lock = lock_addr(v); | 135 | raw_spinlock_t *lock = lock_addr(v); |
| 136 | long long val; | 136 | long long val; |
| 137 | 137 | ||
| 138 | raw_spin_lock_irqsave(lock, flags); | 138 | raw_spin_lock_irqsave(lock, flags); |
| @@ -147,7 +147,7 @@ EXPORT_SYMBOL(atomic64_cmpxchg); | |||
| 147 | long long atomic64_xchg(atomic64_t *v, long long new) | 147 | long long atomic64_xchg(atomic64_t *v, long long new) |
| 148 | { | 148 | { |
| 149 | unsigned long flags; | 149 | unsigned long flags; |
| 150 | spinlock_t *lock = lock_addr(v); | 150 | raw_spinlock_t *lock = lock_addr(v); |
| 151 | long long val; | 151 | long long val; |
| 152 | 152 | ||
| 153 | raw_spin_lock_irqsave(lock, flags); | 153 | raw_spin_lock_irqsave(lock, flags); |
| @@ -161,7 +161,7 @@ EXPORT_SYMBOL(atomic64_xchg); | |||
| 161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | 161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
| 162 | { | 162 | { |
| 163 | unsigned long flags; | 163 | unsigned long flags; |
| 164 | spinlock_t *lock = lock_addr(v); | 164 | raw_spinlock_t *lock = lock_addr(v); |
| 165 | int ret = 0; | 165 | int ret = 0; |
| 166 | 166 | ||
| 167 | raw_spin_lock_irqsave(lock, flags); | 167 | raw_spin_lock_irqsave(lock, flags); |
