aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:02:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:02:01 -0500
commit8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch)
treeb907c35c79caadafff6ad46a91614e30afd2f967 /arch/m32r/include
parent050cbb09dac0402672edeaeac06094ef8ff1749a (diff)
parentb5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) clockevents: Convert to raw_spinlock clockevents: Make tick_device_lock static debugobjects: Convert to raw_spinlocks perf_event: Convert to raw_spinlock hrtimers: Convert to raw_spinlocks genirq: Convert irq_desc.lock to raw_spinlock smp: Convert smplocks to raw_spinlocks rtmutes: Convert rtmutex.lock to raw_spinlock sched: Convert pi_lock to raw_spinlock sched: Convert cpupri lock to raw_spinlock sched: Convert rt_runtime_lock to raw_spinlock sched: Convert rq->lock to raw_spinlock plist: Make plist debugging raw_spinlock aware bkl: Fixup core_lock fallout locking: Cleanup the name space completely locking: Further name space cleanups alpha: Fix fallout from locking changes locking: Implement new raw_spinlock locking: Convert raw_rwlock functions to arch_rwlock locking: Convert raw_rwlock to arch_rwlock ...
Diffstat (limited to 'arch/m32r/include')
-rw-r--r--arch/m32r/include/asm/spinlock.h48
-rw-r--r--arch/m32r/include/asm/spinlock_types.h8
2 files changed, 28 insertions, 28 deletions
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index dded923883b2..179a06489b10 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -24,19 +24,19 @@
24 * We make no fairness assumptions. They have a cost. 24 * We make no fairness assumptions. They have a cost.
25 */ 25 */
26 26
27#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 27#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
28#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 28#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
29#define __raw_spin_unlock_wait(x) \ 29#define arch_spin_unlock_wait(x) \
30 do { cpu_relax(); } while (__raw_spin_is_locked(x)) 30 do { cpu_relax(); } while (arch_spin_is_locked(x))
31 31
32/** 32/**
33 * __raw_spin_trylock - Try spin lock and return a result 33 * arch_spin_trylock - Try spin lock and return a result
34 * @lock: Pointer to the lock variable 34 * @lock: Pointer to the lock variable
35 * 35 *
36 * __raw_spin_trylock() tries to get the lock and returns a result. 36 * arch_spin_trylock() tries to get the lock and returns a result.
37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure). 37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
38 */ 38 */
39static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39static inline int arch_spin_trylock(arch_spinlock_t *lock)
40{ 40{
41 int oldval; 41 int oldval;
42 unsigned long tmp1, tmp2; 42 unsigned long tmp1, tmp2;
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
50 * } 50 * }
51 */ 51 */
52 __asm__ __volatile__ ( 52 __asm__ __volatile__ (
53 "# __raw_spin_trylock \n\t" 53 "# arch_spin_trylock \n\t"
54 "ldi %1, #0; \n\t" 54 "ldi %1, #0; \n\t"
55 "mvfc %2, psw; \n\t" 55 "mvfc %2, psw; \n\t"
56 "clrpsw #0x40 -> nop; \n\t" 56 "clrpsw #0x40 -> nop; \n\t"
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
69 return (oldval > 0); 69 return (oldval > 0);
70} 70}
71 71
72static inline void __raw_spin_lock(raw_spinlock_t *lock) 72static inline void arch_spin_lock(arch_spinlock_t *lock)
73{ 73{
74 unsigned long tmp0, tmp1; 74 unsigned long tmp0, tmp1;
75 75
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
84 * } 84 * }
85 */ 85 */
86 __asm__ __volatile__ ( 86 __asm__ __volatile__ (
87 "# __raw_spin_lock \n\t" 87 "# arch_spin_lock \n\t"
88 ".fillinsn \n" 88 ".fillinsn \n"
89 "1: \n\t" 89 "1: \n\t"
90 "mvfc %1, psw; \n\t" 90 "mvfc %1, psw; \n\t"
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
111 ); 111 );
112} 112}
113 113
114static inline void __raw_spin_unlock(raw_spinlock_t *lock) 114static inline void arch_spin_unlock(arch_spinlock_t *lock)
115{ 115{
116 mb(); 116 mb();
117 lock->slock = 1; 117 lock->slock = 1;
@@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
140 * read_can_lock - would read_trylock() succeed? 140 * read_can_lock - would read_trylock() succeed?
141 * @lock: the rwlock in question. 141 * @lock: the rwlock in question.
142 */ 142 */
143#define __raw_read_can_lock(x) ((int)(x)->lock > 0) 143#define arch_read_can_lock(x) ((int)(x)->lock > 0)
144 144
145/** 145/**
146 * write_can_lock - would write_trylock() succeed? 146 * write_can_lock - would write_trylock() succeed?
147 * @lock: the rwlock in question. 147 * @lock: the rwlock in question.
148 */ 148 */
149#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 149#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
150 150
151static inline void __raw_read_lock(raw_rwlock_t *rw) 151static inline void arch_read_lock(arch_rwlock_t *rw)
152{ 152{
153 unsigned long tmp0, tmp1; 153 unsigned long tmp0, tmp1;
154 154
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
199 ); 199 );
200} 200}
201 201
202static inline void __raw_write_lock(raw_rwlock_t *rw) 202static inline void arch_write_lock(arch_rwlock_t *rw)
203{ 203{
204 unsigned long tmp0, tmp1, tmp2; 204 unsigned long tmp0, tmp1, tmp2;
205 205
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
252 ); 252 );
253} 253}
254 254
255static inline void __raw_read_unlock(raw_rwlock_t *rw) 255static inline void arch_read_unlock(arch_rwlock_t *rw)
256{ 256{
257 unsigned long tmp0, tmp1; 257 unsigned long tmp0, tmp1;
258 258
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
274 ); 274 );
275} 275}
276 276
277static inline void __raw_write_unlock(raw_rwlock_t *rw) 277static inline void arch_write_unlock(arch_rwlock_t *rw)
278{ 278{
279 unsigned long tmp0, tmp1, tmp2; 279 unsigned long tmp0, tmp1, tmp2;
280 280
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
298 ); 298 );
299} 299}
300 300
301static inline int __raw_read_trylock(raw_rwlock_t *lock) 301static inline int arch_read_trylock(arch_rwlock_t *lock)
302{ 302{
303 atomic_t *count = (atomic_t*)lock; 303 atomic_t *count = (atomic_t*)lock;
304 if (atomic_dec_return(count) >= 0) 304 if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
307 return 0; 307 return 0;
308} 308}
309 309
310static inline int __raw_write_trylock(raw_rwlock_t *lock) 310static inline int arch_write_trylock(arch_rwlock_t *lock)
311{ 311{
312 atomic_t *count = (atomic_t *)lock; 312 atomic_t *count = (atomic_t *)lock;
313 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) 313 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
316 return 0; 316 return 0;
317} 317}
318 318
319#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 319#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
320#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 320#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
321 321
322#define _raw_spin_relax(lock) cpu_relax() 322#define arch_spin_relax(lock) cpu_relax()
323#define _raw_read_relax(lock) cpu_relax() 323#define arch_read_relax(lock) cpu_relax()
324#define _raw_write_relax(lock) cpu_relax() 324#define arch_write_relax(lock) cpu_relax()
325 325
326#endif /* _ASM_M32R_SPINLOCK_H */ 326#endif /* _ASM_M32R_SPINLOCK_H */
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
index 83f52105c0e4..92e27672661f 100644
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
@@ -7,17 +7,17 @@
7 7
8typedef struct { 8typedef struct {
9 volatile int slock; 9 volatile int slock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
13 13
14typedef struct { 14typedef struct {
15 volatile int lock; 15 volatile int lock;
16} raw_rwlock_t; 16} arch_rwlock_t;
17 17
18#define RW_LOCK_BIAS 0x01000000 18#define RW_LOCK_BIAS 0x01000000
19#define RW_LOCK_BIAS_STR "0x01000000" 19#define RW_LOCK_BIAS_STR "0x01000000"
20 20
21#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 21#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
22 22
23#endif /* _ASM_M32R_SPINLOCK_TYPES_H */ 23#endif /* _ASM_M32R_SPINLOCK_TYPES_H */