aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-07-24 07:13:40 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-07-24 07:13:40 -0400
commit4e8fd22bd421d7aa279bcb76189505a1f96bb7bf (patch)
treef23106b362b242eec555b16b34f8ad22f035c65a
parent2c2a68b84752cb1090fd2456e8b83e5bcc0e73c4 (diff)
[PATCH] ARM SMP: Fix ARMv6 spinlock and semaphore implementations
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--include/asm-arm/locks.h4
-rw-r--r--include/asm-arm/spinlock.h35
2 files changed, 21 insertions, 18 deletions
diff --git a/include/asm-arm/locks.h b/include/asm-arm/locks.h
index c26298f3891f..9cb33fcc06c1 100644
--- a/include/asm-arm/locks.h
+++ b/include/asm-arm/locks.h
@@ -61,7 +61,7 @@
61" strex ip, lr, [%0]\n" \ 61" strex ip, lr, [%0]\n" \
62" teq ip, #0\n" \ 62" teq ip, #0\n" \
63" bne 1b\n" \ 63" bne 1b\n" \
64" teq lr, #0\n" \ 64" cmp lr, #0\n" \
65" movle ip, %0\n" \ 65" movle ip, %0\n" \
66" blle " #wake \ 66" blle " #wake \
67 : \ 67 : \
@@ -100,7 +100,7 @@
100 __asm__ __volatile__( \ 100 __asm__ __volatile__( \
101 "@ up_op_read\n" \ 101 "@ up_op_read\n" \
102"1: ldrex lr, [%0]\n" \ 102"1: ldrex lr, [%0]\n" \
103" add lr, lr, %1\n" \ 103" adds lr, lr, %1\n" \
104" strex ip, lr, [%0]\n" \ 104" strex ip, lr, [%0]\n" \
105" teq ip, #0\n" \ 105" teq ip, #0\n" \
106" bne 1b\n" \ 106" bne 1b\n" \
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 182323619caa..9705d5eec94c 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -79,7 +79,8 @@ typedef struct {
79} rwlock_t; 79} rwlock_t;
80 80
81#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } 81#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
82#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0) 82#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
83#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
83 84
84/* 85/*
85 * Write locks are easy - we just set bit 31. When unlocking, we can 86 * Write locks are easy - we just set bit 31. When unlocking, we can
@@ -100,6 +101,21 @@ static inline void _raw_write_lock(rwlock_t *rw)
100 : "cc", "memory"); 101 : "cc", "memory");
101} 102}
102 103
104static inline int _raw_write_trylock(rwlock_t *rw)
105{
106 unsigned long tmp;
107
108 __asm__ __volatile__(
109"1: ldrex %0, [%1]\n"
110" teq %0, #0\n"
111" strexeq %0, %2, [%1]"
112 : "=&r" (tmp)
113 : "r" (&rw->lock), "r" (0x80000000)
114 : "cc", "memory");
115
116 return tmp == 0;
117}
118
103static inline void _raw_write_unlock(rwlock_t *rw) 119static inline void _raw_write_unlock(rwlock_t *rw)
104{ 120{
105 __asm__ __volatile__( 121 __asm__ __volatile__(
@@ -138,6 +154,8 @@ static inline void _raw_read_lock(rwlock_t *rw)
138 154
139static inline void _raw_read_unlock(rwlock_t *rw) 155static inline void _raw_read_unlock(rwlock_t *rw)
140{ 156{
157 unsigned long tmp, tmp2;
158
141 __asm__ __volatile__( 159 __asm__ __volatile__(
142"1: ldrex %0, [%2]\n" 160"1: ldrex %0, [%2]\n"
143" sub %0, %0, #1\n" 161" sub %0, %0, #1\n"
@@ -151,19 +169,4 @@ static inline void _raw_read_unlock(rwlock_t *rw)
151 169
152#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) 170#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
153 171
154static inline int _raw_write_trylock(rwlock_t *rw)
155{
156 unsigned long tmp;
157
158 __asm__ __volatile__(
159"1: ldrex %0, [%1]\n"
160" teq %0, #0\n"
161" strexeq %0, %2, [%1]"
162 : "=&r" (tmp)
163 : "r" (&rw->lock), "r" (0x80000000)
164 : "cc", "memory");
165
166 return tmp == 0;
167}
168
169#endif /* __ASM_SPINLOCK_H */ 172#endif /* __ASM_SPINLOCK_H */