diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-02 14:01:25 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
commit | 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (patch) | |
tree | e371d17bd73d64332349debbf45962ec67e7269d /include/linux | |
parent | edc35bd72e2079b25f99c5da7d7a65dbbffc4a26 (diff) |
locking: Convert __raw_spin* functions to arch_spin*
Name space cleanup. No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/spinlock.h | 22 | ||||
-rw-r--r-- | include/linux/spinlock_up.h | 26 |
2 files changed, 24 insertions, 24 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 5ef7a4c060b5..de3a022489c6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -14,7 +14,7 @@ | |||
14 | * linux/spinlock_types.h: | 14 | * linux/spinlock_types.h: |
15 | * defines the generic type and initializers | 15 | * defines the generic type and initializers |
16 | * | 16 | * |
17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | 17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
18 | * implementations, mostly inline assembly code | 18 | * implementations, mostly inline assembly code |
19 | * | 19 | * |
20 | * (also included on UP-debug builds:) | 20 | * (also included on UP-debug builds:) |
@@ -34,7 +34,7 @@ | |||
34 | * defines the generic type and initializers | 34 | * defines the generic type and initializers |
35 | * | 35 | * |
36 | * linux/spinlock_up.h: | 36 | * linux/spinlock_up.h: |
37 | * contains the __raw_spin_*()/etc. version of UP | 37 | * contains the arch_spin_*()/etc. version of UP |
38 | * builds. (which are NOPs on non-debug, non-preempt | 38 | * builds. (which are NOPs on non-debug, non-preempt |
39 | * builds) | 39 | * builds) |
40 | * | 40 | * |
@@ -103,17 +103,17 @@ do { \ | |||
103 | do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) | 103 | do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | 106 | #define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
107 | 107 | ||
108 | #ifdef CONFIG_GENERIC_LOCKBREAK | 108 | #ifdef CONFIG_GENERIC_LOCKBREAK |
109 | #define spin_is_contended(lock) ((lock)->break_lock) | 109 | #define spin_is_contended(lock) ((lock)->break_lock) |
110 | #else | 110 | #else |
111 | 111 | ||
112 | #ifdef __raw_spin_is_contended | 112 | #ifdef arch_spin_is_contended |
113 | #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) | 113 | #define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
114 | #else | 114 | #else |
115 | #define spin_is_contended(lock) (((void)(lock), 0)) | 115 | #define spin_is_contended(lock) (((void)(lock), 0)) |
116 | #endif /*__raw_spin_is_contended*/ | 116 | #endif /*arch_spin_is_contended*/ |
117 | #endif | 117 | #endif |
118 | 118 | ||
119 | /* The lock does not imply full memory barrier. */ | 119 | /* The lock does not imply full memory barrier. */ |
@@ -125,7 +125,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
125 | * spin_unlock_wait - wait until the spinlock gets unlocked | 125 | * spin_unlock_wait - wait until the spinlock gets unlocked |
126 | * @lock: the spinlock in question. | 126 | * @lock: the spinlock in question. |
127 | */ | 127 | */ |
128 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | 128 | #define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
129 | 129 | ||
130 | #ifdef CONFIG_DEBUG_SPINLOCK | 130 | #ifdef CONFIG_DEBUG_SPINLOCK |
131 | extern void _raw_spin_lock(spinlock_t *lock); | 131 | extern void _raw_spin_lock(spinlock_t *lock); |
@@ -133,11 +133,11 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
133 | extern int _raw_spin_trylock(spinlock_t *lock); | 133 | extern int _raw_spin_trylock(spinlock_t *lock); |
134 | extern void _raw_spin_unlock(spinlock_t *lock); | 134 | extern void _raw_spin_unlock(spinlock_t *lock); |
135 | #else | 135 | #else |
136 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | 136 | # define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock) |
137 | # define _raw_spin_lock_flags(lock, flags) \ | 137 | # define _raw_spin_lock_flags(lock, flags) \ |
138 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) | 138 | arch_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
139 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) | 139 | # define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock) |
140 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | 140 | # define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock) |
141 | #endif | 141 | #endif |
142 | 142 | ||
143 | /* | 143 | /* |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 8ee2ac1bf636..1d3bcc3cf7c6 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -18,21 +18,21 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifdef CONFIG_DEBUG_SPINLOCK | 20 | #ifdef CONFIG_DEBUG_SPINLOCK |
21 | #define __raw_spin_is_locked(x) ((x)->slock == 0) | 21 | #define arch_spin_is_locked(x) ((x)->slock == 0) |
22 | 22 | ||
23 | static inline void __raw_spin_lock(arch_spinlock_t *lock) | 23 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
24 | { | 24 | { |
25 | lock->slock = 0; | 25 | lock->slock = 0; |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void | 28 | static inline void |
29 | __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | 29 | arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
30 | { | 30 | { |
31 | local_irq_save(flags); | 31 | local_irq_save(flags); |
32 | lock->slock = 0; | 32 | lock->slock = 0; |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline int __raw_spin_trylock(arch_spinlock_t *lock) | 35 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
36 | { | 36 | { |
37 | char oldval = lock->slock; | 37 | char oldval = lock->slock; |
38 | 38 | ||
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) | |||
41 | return oldval > 0; | 41 | return oldval > 0; |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void __raw_spin_unlock(arch_spinlock_t *lock) | 44 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
45 | { | 45 | { |
46 | lock->slock = 1; | 46 | lock->slock = 1; |
47 | } | 47 | } |
@@ -57,20 +57,20 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) | |||
57 | #define __raw_write_unlock(lock) do { (void)(lock); } while (0) | 57 | #define __raw_write_unlock(lock) do { (void)(lock); } while (0) |
58 | 58 | ||
59 | #else /* DEBUG_SPINLOCK */ | 59 | #else /* DEBUG_SPINLOCK */ |
60 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | 60 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
61 | /* for sched.c and kernel_lock.c: */ | 61 | /* for sched.c and kernel_lock.c: */ |
62 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | 62 | # define arch_spin_lock(lock) do { (void)(lock); } while (0) |
63 | # define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | 63 | # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) |
64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | 64 | # define arch_spin_unlock(lock) do { (void)(lock); } while (0) |
65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | 65 | # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) |
66 | #endif /* DEBUG_SPINLOCK */ | 66 | #endif /* DEBUG_SPINLOCK */ |
67 | 67 | ||
68 | #define __raw_spin_is_contended(lock) (((void)(lock), 0)) | 68 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |
69 | 69 | ||
70 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) | 70 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) |
71 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) | 71 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) |
72 | 72 | ||
73 | #define __raw_spin_unlock_wait(lock) \ | 73 | #define arch_spin_unlock_wait(lock) \ |
74 | do { cpu_relax(); } while (__raw_spin_is_locked(lock)) | 74 | do { cpu_relax(); } while (arch_spin_is_locked(lock)) |
75 | 75 | ||
76 | #endif /* __LINUX_SPINLOCK_UP_H */ | 76 | #endif /* __LINUX_SPINLOCK_UP_H */ |