diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-02 14:01:25 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
commit | 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (patch) | |
tree | e371d17bd73d64332349debbf45962ec67e7269d /include/linux/spinlock.h | |
parent | edc35bd72e2079b25f99c5da7d7a65dbbffc4a26 (diff) |
locking: Convert __raw_spin* functions to arch_spin*
Name space cleanup. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r-- | include/linux/spinlock.h | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 5ef7a4c060b5..de3a022489c6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -14,7 +14,7 @@ | |||
14 | * linux/spinlock_types.h: | 14 | * linux/spinlock_types.h: |
15 | * defines the generic type and initializers | 15 | * defines the generic type and initializers |
16 | * | 16 | * |
17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | 17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
18 | * implementations, mostly inline assembly code | 18 | * implementations, mostly inline assembly code |
19 | * | 19 | * |
20 | * (also included on UP-debug builds:) | 20 | * (also included on UP-debug builds:) |
@@ -34,7 +34,7 @@ | |||
34 | * defines the generic type and initializers | 34 | * defines the generic type and initializers |
35 | * | 35 | * |
36 | * linux/spinlock_up.h: | 36 | * linux/spinlock_up.h: |
37 | * contains the __raw_spin_*()/etc. version of UP | 37 | * contains the arch_spin_*()/etc. version of UP |
38 | * builds. (which are NOPs on non-debug, non-preempt | 38 | * builds. (which are NOPs on non-debug, non-preempt |
39 | * builds) | 39 | * builds) |
40 | * | 40 | * |
@@ -103,17 +103,17 @@ do { \ | |||
103 | do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) | 103 | do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | 106 | #define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
107 | 107 | ||
108 | #ifdef CONFIG_GENERIC_LOCKBREAK | 108 | #ifdef CONFIG_GENERIC_LOCKBREAK |
109 | #define spin_is_contended(lock) ((lock)->break_lock) | 109 | #define spin_is_contended(lock) ((lock)->break_lock) |
110 | #else | 110 | #else |
111 | 111 | ||
112 | #ifdef __raw_spin_is_contended | 112 | #ifdef arch_spin_is_contended |
113 | #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) | 113 | #define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
114 | #else | 114 | #else |
115 | #define spin_is_contended(lock) (((void)(lock), 0)) | 115 | #define spin_is_contended(lock) (((void)(lock), 0)) |
116 | #endif /*__raw_spin_is_contended*/ | 116 | #endif /*arch_spin_is_contended*/ |
117 | #endif | 117 | #endif |
118 | 118 | ||
119 | /* The lock does not imply full memory barrier. */ | 119 | /* The lock does not imply full memory barrier. */ |
@@ -125,7 +125,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
125 | * spin_unlock_wait - wait until the spinlock gets unlocked | 125 | * spin_unlock_wait - wait until the spinlock gets unlocked |
126 | * @lock: the spinlock in question. | 126 | * @lock: the spinlock in question. |
127 | */ | 127 | */ |
128 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | 128 | #define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
129 | 129 | ||
130 | #ifdef CONFIG_DEBUG_SPINLOCK | 130 | #ifdef CONFIG_DEBUG_SPINLOCK |
131 | extern void _raw_spin_lock(spinlock_t *lock); | 131 | extern void _raw_spin_lock(spinlock_t *lock); |
@@ -133,11 +133,11 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
133 | extern int _raw_spin_trylock(spinlock_t *lock); | 133 | extern int _raw_spin_trylock(spinlock_t *lock); |
134 | extern void _raw_spin_unlock(spinlock_t *lock); | 134 | extern void _raw_spin_unlock(spinlock_t *lock); |
135 | #else | 135 | #else |
136 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | 136 | # define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock) |
137 | # define _raw_spin_lock_flags(lock, flags) \ | 137 | # define _raw_spin_lock_flags(lock, flags) \ |
138 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) | 138 | arch_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
139 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) | 139 | # define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock) |
140 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | 140 | # define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock) |
141 | #endif | 141 | #endif |
142 | 142 | ||
143 | /* | 143 | /* |