diff options
Diffstat (limited to 'include/asm-parisc/spinlock.h')
-rw-r--r-- | include/asm-parisc/spinlock.h | 194 |
1 files changed, 0 insertions, 194 deletions
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h deleted file mode 100644 index f3d2090a18dc..000000000000 --- a/include/asm-parisc/spinlock.h +++ /dev/null | |||
@@ -1,194 +0,0 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #include <asm/system.h> | ||
5 | #include <asm/processor.h> | ||
6 | #include <asm/spinlock_types.h> | ||
7 | |||
8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | ||
9 | { | ||
10 | volatile unsigned int *a = __ldcw_align(x); | ||
11 | return *a == 0; | ||
12 | } | ||
13 | |||
14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) | ||
15 | #define __raw_spin_unlock_wait(x) \ | ||
16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
17 | |||
18 | static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | ||
19 | unsigned long flags) | ||
20 | { | ||
21 | volatile unsigned int *a; | ||
22 | |||
23 | mb(); | ||
24 | a = __ldcw_align(x); | ||
25 | while (__ldcw(a) == 0) | ||
26 | while (*a == 0) | ||
27 | if (flags & PSW_SM_I) { | ||
28 | local_irq_enable(); | ||
29 | cpu_relax(); | ||
30 | local_irq_disable(); | ||
31 | } else | ||
32 | cpu_relax(); | ||
33 | mb(); | ||
34 | } | ||
35 | |||
36 | static inline void __raw_spin_unlock(raw_spinlock_t *x) | ||
37 | { | ||
38 | volatile unsigned int *a; | ||
39 | mb(); | ||
40 | a = __ldcw_align(x); | ||
41 | *a = 1; | ||
42 | mb(); | ||
43 | } | ||
44 | |||
45 | static inline int __raw_spin_trylock(raw_spinlock_t *x) | ||
46 | { | ||
47 | volatile unsigned int *a; | ||
48 | int ret; | ||
49 | |||
50 | mb(); | ||
51 | a = __ldcw_align(x); | ||
52 | ret = __ldcw(a) != 0; | ||
53 | mb(); | ||
54 | |||
55 | return ret; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
60 | * Linux rwlocks are unfair to writers; they can be starved for an indefinite | ||
61 | * time by readers. With care, they can also be taken in interrupt context. | ||
62 | * | ||
63 | * In the PA-RISC implementation, we have a spinlock and a counter. | ||
64 | * Readers use the lock to serialise their access to the counter (which | ||
65 | * records how many readers currently hold the lock). | ||
66 | * Writers hold the spinlock, preventing any readers or other writers from | ||
67 | * grabbing the rwlock. | ||
68 | */ | ||
69 | |||
70 | /* Note that we have to ensure interrupts are disabled in case we're | ||
71 | * interrupted by some other code that wants to grab the same read lock */ | ||
72 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) | ||
73 | { | ||
74 | unsigned long flags; | ||
75 | local_irq_save(flags); | ||
76 | __raw_spin_lock_flags(&rw->lock, flags); | ||
77 | rw->counter++; | ||
78 | __raw_spin_unlock(&rw->lock); | ||
79 | local_irq_restore(flags); | ||
80 | } | ||
81 | |||
82 | /* Note that we have to ensure interrupts are disabled in case we're | ||
83 | * interrupted by some other code that wants to grab the same read lock */ | ||
84 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | local_irq_save(flags); | ||
88 | __raw_spin_lock_flags(&rw->lock, flags); | ||
89 | rw->counter--; | ||
90 | __raw_spin_unlock(&rw->lock); | ||
91 | local_irq_restore(flags); | ||
92 | } | ||
93 | |||
94 | /* Note that we have to ensure interrupts are disabled in case we're | ||
95 | * interrupted by some other code that wants to grab the same read lock */ | ||
96 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | ||
97 | { | ||
98 | unsigned long flags; | ||
99 | retry: | ||
100 | local_irq_save(flags); | ||
101 | if (__raw_spin_trylock(&rw->lock)) { | ||
102 | rw->counter++; | ||
103 | __raw_spin_unlock(&rw->lock); | ||
104 | local_irq_restore(flags); | ||
105 | return 1; | ||
106 | } | ||
107 | |||
108 | local_irq_restore(flags); | ||
109 | /* If write-locked, we fail to acquire the lock */ | ||
110 | if (rw->counter < 0) | ||
111 | return 0; | ||
112 | |||
113 | /* Wait until we have a realistic chance at the lock */ | ||
114 | while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) | ||
115 | cpu_relax(); | ||
116 | |||
117 | goto retry; | ||
118 | } | ||
119 | |||
120 | /* Note that we have to ensure interrupts are disabled in case we're | ||
121 | * interrupted by some other code that wants to read_trylock() this lock */ | ||
122 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) | ||
123 | { | ||
124 | unsigned long flags; | ||
125 | retry: | ||
126 | local_irq_save(flags); | ||
127 | __raw_spin_lock_flags(&rw->lock, flags); | ||
128 | |||
129 | if (rw->counter != 0) { | ||
130 | __raw_spin_unlock(&rw->lock); | ||
131 | local_irq_restore(flags); | ||
132 | |||
133 | while (rw->counter != 0) | ||
134 | cpu_relax(); | ||
135 | |||
136 | goto retry; | ||
137 | } | ||
138 | |||
139 | rw->counter = -1; /* mark as write-locked */ | ||
140 | mb(); | ||
141 | local_irq_restore(flags); | ||
142 | } | ||
143 | |||
144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | ||
145 | { | ||
146 | rw->counter = 0; | ||
147 | __raw_spin_unlock(&rw->lock); | ||
148 | } | ||
149 | |||
150 | /* Note that we have to ensure interrupts are disabled in case we're | ||
151 | * interrupted by some other code that wants to read_trylock() this lock */ | ||
152 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | ||
153 | { | ||
154 | unsigned long flags; | ||
155 | int result = 0; | ||
156 | |||
157 | local_irq_save(flags); | ||
158 | if (__raw_spin_trylock(&rw->lock)) { | ||
159 | if (rw->counter == 0) { | ||
160 | rw->counter = -1; | ||
161 | result = 1; | ||
162 | } else { | ||
163 | /* Read-locked. Oh well. */ | ||
164 | __raw_spin_unlock(&rw->lock); | ||
165 | } | ||
166 | } | ||
167 | local_irq_restore(flags); | ||
168 | |||
169 | return result; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * read_can_lock - would read_trylock() succeed? | ||
174 | * @lock: the rwlock in question. | ||
175 | */ | ||
176 | static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | ||
177 | { | ||
178 | return rw->counter >= 0; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * write_can_lock - would write_trylock() succeed? | ||
183 | * @lock: the rwlock in question. | ||
184 | */ | ||
185 | static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | ||
186 | { | ||
187 | return !rw->counter; | ||
188 | } | ||
189 | |||
190 | #define _raw_spin_relax(lock) cpu_relax() | ||
191 | #define _raw_read_relax(lock) cpu_relax() | ||
192 | #define _raw_write_relax(lock) cpu_relax() | ||
193 | |||
194 | #endif /* __ASM_SPINLOCK_H */ | ||