diff options
author | Jesper Nilsson <jesper.nilsson@axis.com> | 2008-01-28 12:07:58 -0500 |
---|---|---|
committer | Jesper Nilsson <jesper.nilsson@axis.com> | 2008-02-08 05:06:39 -0500 |
commit | de1c1419f9eb8f9d719aaaa3e3f3073069ecd657 (patch) | |
tree | 5f8e201b62ae6c197d383333b426870413a57a2c /include/asm-cris | |
parent | 4258fb19320ae083064aef21b042a02a509119b9 (diff) |
CRIS v32: Completely rework spinlocks for ETRAX FS and ARTPEC-3
Diffstat (limited to 'include/asm-cris')
-rw-r--r-- | include/asm-cris/arch-v32/spinlock.h | 172 |
1 files changed, 67 insertions, 105 deletions
diff --git a/include/asm-cris/arch-v32/spinlock.h b/include/asm-cris/arch-v32/spinlock.h index 5f43df0a5fb4..0d5709b983a1 100644 --- a/include/asm-cris/arch-v32/spinlock.h +++ b/include/asm-cris/arch-v32/spinlock.h | |||
@@ -1,40 +1,47 @@ | |||
1 | #ifndef __ASM_ARCH_SPINLOCK_H | 1 | #ifndef __ASM_ARCH_SPINLOCK_H |
2 | #define __ASM_ARCH_SPINLOCK_H | 2 | #define __ASM_ARCH_SPINLOCK_H |
3 | 3 | ||
4 | #include <asm/system.h> | 4 | #include <linux/spinlock_types.h> |
5 | 5 | ||
6 | #define RW_LOCK_BIAS 0x01000000 | 6 | #define RW_LOCK_BIAS 0x01000000 |
7 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } | ||
8 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
9 | |||
10 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) | ||
11 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | ||
12 | 7 | ||
13 | extern void cris_spin_unlock(void *l, int val); | 8 | extern void cris_spin_unlock(void *l, int val); |
14 | extern void cris_spin_lock(void *l); | 9 | extern void cris_spin_lock(void *l); |
15 | extern int cris_spin_trylock(void* l); | 10 | extern int cris_spin_trylock(void *l); |
16 | 11 | ||
17 | static inline void _raw_spin_unlock(spinlock_t *lock) | 12 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
13 | { | ||
14 | return *(volatile signed char *)(&(x)->slock) <= 0; | ||
15 | } | ||
16 | |||
17 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
18 | { | 18 | { |
19 | __asm__ volatile ("move.d %1,%0" \ | 19 | __asm__ volatile ("move.d %1,%0" \ |
20 | : "=m" (lock->lock) \ | 20 | : "=m" (lock->slock) \ |
21 | : "r" (1) \ | 21 | : "r" (1) \ |
22 | : "memory"); | 22 | : "memory"); |
23 | } | 23 | } |
24 | 24 | ||
25 | static inline int _raw_spin_trylock(spinlock_t *lock) | 25 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
26 | { | ||
27 | while (__raw_spin_is_locked(lock)) | ||
28 | cpu_relax(); | ||
29 | } | ||
30 | |||
31 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
26 | { | 32 | { |
27 | return cris_spin_trylock((void*)&lock->lock); | 33 | return cris_spin_trylock((void *)&lock->slock); |
28 | } | 34 | } |
29 | 35 | ||
30 | static inline void _raw_spin_lock(spinlock_t *lock) | 36 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
31 | { | 37 | { |
32 | cris_spin_lock((void*)&lock->lock); | 38 | cris_spin_lock((void *)&lock->slock); |
33 | } | 39 | } |
34 | 40 | ||
35 | static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 41 | static inline void |
42 | __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
36 | { | 43 | { |
37 | _raw_spin_lock(lock); | 44 | __raw_spin_lock(lock); |
38 | } | 45 | } |
39 | 46 | ||
40 | /* | 47 | /* |
@@ -46,120 +53,75 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
46 | * can "mix" irq-safe locks - any writer needs to get a | 53 | * can "mix" irq-safe locks - any writer needs to get a |
47 | * irq-safe write-lock, but readers can get non-irqsafe | 54 | * irq-safe write-lock, but readers can get non-irqsafe |
48 | * read-locks. | 55 | * read-locks. |
56 | * | ||
49 | */ | 57 | */ |
50 | typedef struct { | ||
51 | spinlock_t lock; | ||
52 | volatile int counter; | ||
53 | #ifdef CONFIG_PREEMPT | ||
54 | unsigned int break_lock; | ||
55 | #endif | ||
56 | } rwlock_t; | ||
57 | |||
58 | #define RW_LOCK_UNLOCKED (rwlock_t) { {1}, 0 } | ||
59 | |||
60 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0) | ||
61 | |||
62 | /** | ||
63 | * read_can_lock - would read_trylock() succeed? | ||
64 | * @lock: the rwlock in question. | ||
65 | */ | ||
66 | #define read_can_lock(x) ((int)(x)->counter >= 0) | ||
67 | |||
68 | /** | ||
69 | * write_can_lock - would write_trylock() succeed? | ||
70 | * @lock: the rwlock in question. | ||
71 | */ | ||
72 | #define write_can_lock(x) ((x)->counter == 0) | ||
73 | |||
74 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | ||
75 | 58 | ||
76 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | 59 | static inline int __raw_read_can_lock(raw_rwlock_t *x) |
77 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | ||
78 | |||
79 | static __inline__ void _raw_read_lock(rwlock_t *rw) | ||
80 | { | 60 | { |
81 | unsigned long flags; | 61 | return (int)(x)->lock > 0; |
82 | local_irq_save(flags); | ||
83 | _raw_spin_lock(&rw->lock); | ||
84 | |||
85 | rw->counter++; | ||
86 | |||
87 | _raw_spin_unlock(&rw->lock); | ||
88 | local_irq_restore(flags); | ||
89 | } | 62 | } |
90 | 63 | ||
91 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 64 | static inline int __raw_write_can_lock(raw_rwlock_t *x) |
92 | { | 65 | { |
93 | unsigned long flags; | 66 | return (x)->lock == RW_LOCK_BIAS; |
94 | local_irq_save(flags); | ||
95 | _raw_spin_lock(&rw->lock); | ||
96 | |||
97 | rw->counter--; | ||
98 | |||
99 | _raw_spin_unlock(&rw->lock); | ||
100 | local_irq_restore(flags); | ||
101 | } | 67 | } |
102 | 68 | ||
103 | /* write_lock is less trivial. We optimistically grab the lock and check | 69 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
104 | * if we surprised any readers. If so we release the lock and wait till | ||
105 | * they're all gone before trying again | ||
106 | * | ||
107 | * Also note that we don't use the _irqsave / _irqrestore suffixes here. | ||
108 | * If we're called with interrupts enabled and we've got readers (or other | ||
109 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | ||
110 | * sooner or later anyway. prumpf */ | ||
111 | |||
112 | static __inline__ void _raw_write_lock(rwlock_t *rw) | ||
113 | { | 70 | { |
114 | retry: | 71 | __raw_spin_lock(&rw->slock); |
115 | _raw_spin_lock(&rw->lock); | 72 | while (rw->lock == 0); |
116 | 73 | rw->lock--; | |
117 | if(rw->counter != 0) { | 74 | __raw_spin_unlock(&rw->slock); |
118 | /* this basically never happens */ | ||
119 | _raw_spin_unlock(&rw->lock); | ||
120 | |||
121 | while(rw->counter != 0); | ||
122 | |||
123 | goto retry; | ||
124 | } | ||
125 | |||
126 | /* got it. now leave without unlocking */ | ||
127 | rw->counter = -1; /* remember we are locked */ | ||
128 | } | 75 | } |
129 | 76 | ||
130 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | 77 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
131 | |||
132 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | ||
133 | { | 78 | { |
134 | rw->counter = 0; | 79 | __raw_spin_lock(&rw->slock); |
135 | _raw_spin_unlock(&rw->lock); | 80 | while (rw->lock != RW_LOCK_BIAS); |
81 | rw->lock == 0; | ||
82 | __raw_spin_unlock(&rw->slock); | ||
136 | } | 83 | } |
137 | 84 | ||
138 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | 85 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
139 | { | 86 | { |
140 | _raw_spin_lock(&rw->lock); | 87 | __raw_spin_lock(&rw->slock); |
141 | if (rw->counter != 0) { | 88 | rw->lock++; |
142 | /* this basically never happens */ | 89 | __raw_spin_unlock(&rw->slock); |
143 | _raw_spin_unlock(&rw->lock); | 90 | } |
144 | |||
145 | return 0; | ||
146 | } | ||
147 | 91 | ||
148 | /* got it. now leave without unlocking */ | 92 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
149 | rw->counter = -1; /* remember we are locked */ | 93 | { |
150 | return 1; | 94 | __raw_spin_lock(&rw->slock); |
95 | while (rw->lock != RW_LOCK_BIAS); | ||
96 | rw->lock == RW_LOCK_BIAS; | ||
97 | __raw_spin_unlock(&rw->slock); | ||
151 | } | 98 | } |
152 | 99 | ||
153 | static __inline__ int is_read_locked(rwlock_t *rw) | 100 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
154 | { | 101 | { |
155 | return rw->counter > 0; | 102 | int ret = 0; |
103 | __raw_spin_lock(&rw->slock); | ||
104 | if (rw->lock != 0) { | ||
105 | rw->lock--; | ||
106 | ret = 1; | ||
107 | } | ||
108 | __raw_spin_unlock(&rw->slock); | ||
109 | return ret; | ||
156 | } | 110 | } |
157 | 111 | ||
158 | static __inline__ int is_write_locked(rwlock_t *rw) | 112 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
159 | { | 113 | { |
160 | return rw->counter < 0; | 114 | int ret = 0; |
115 | __raw_spin_lock(&rw->slock); | ||
116 | if (rw->lock == RW_LOCK_BIAS) { | ||
117 | rw->lock == 0; | ||
118 | ret = 1; | ||
119 | } | ||
120 | __raw_spin_unlock(&rw->slock); | ||
121 | return 1; | ||
161 | } | 122 | } |
162 | 123 | ||
124 | |||
163 | #define _raw_spin_relax(lock) cpu_relax() | 125 | #define _raw_spin_relax(lock) cpu_relax() |
164 | #define _raw_read_relax(lock) cpu_relax() | 126 | #define _raw_read_relax(lock) cpu_relax() |
165 | #define _raw_write_relax(lock) cpu_relax() | 127 | #define _raw_write_relax(lock) cpu_relax() |