diff options
author | Ingo Molnar <mingo@elte.hu> | 2005-09-10 03:25:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-10 13:06:21 -0400 |
commit | fb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch) | |
tree | a006d078aa02e421a7dc4793c335308204859d36 /include/asm-parisc | |
parent | 4327edf6b8a7ac7dce144313947995538842d8fd (diff) |
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van
de Ven) does a major cleanup of the spinlock code. It does the following
things:
- consolidates and enhances the spinlock/rwlock debugging code
- simplifies the asm/spinlock.h files
- encapsulates the raw spinlock type and moves generic spinlock
features (such as ->break_lock) into the generic code.
- cleans up the spinlock code hierarchy to get rid of the spaghetti.
Most notably there's now only a single variant of the debugging code,
located in lib/spinlock_debug.c. (previously we had one SMP debugging
variant per architecture, plus a separate generic one for UP builds)
Also, i've enhanced the rwlock debugging facility, it will now track
write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too.
All locks have lockup detection now, which will work for both soft and hard
spin/rwlock lockups.
The arch-level include files now only contain the minimally necessary
subset of the spinlock code - all the rest that can be generalized now
lives in the generic headers:
include/asm-i386/spinlock_types.h | 16
include/asm-x86_64/spinlock_types.h | 16
I have also split up the various spinlock variants into separate files,
making it easier to see which does what. The new layout is:
SMP | UP
----------------------------|-----------------------------------
asm/spinlock_types_smp.h | linux/spinlock_types_up.h
linux/spinlock_types.h | linux/spinlock_types.h
asm/spinlock_smp.h | linux/spinlock_up.h
linux/spinlock_api_smp.h | linux/spinlock_api_up.h
linux/spinlock.h | linux/spinlock.h
/*
* here's the role of the various spinlock/rwlock related include files:
*
* on SMP builds:
*
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
*
* linux/spinlock_api_smp.h:
* contains the prototypes for the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*
* on UP builds:
*
* linux/spinlock_type_up.h:
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
* linux/spinlock_up.h:
* contains the __raw_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
* (included on UP-non-debug builds:)
*
* linux/spinlock_api_up.h:
* builds the _spin_*() APIs.
*
* linux/spinlock.h: builds the final spin_*() APIs.
*/
All SMP and UP architectures are converted by this patch.
arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via
crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should
be mostly fine.
From: Grant Grundler <grundler@parisc-linux.org>
Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU).
Builds 32-bit SMP kernel (not booted or tested). I did not try to build
non-SMP kernels. That should be trivial to fix up later if necessary.
I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids
some ugly nesting of linux/*.h and asm/*.h files. Those particular locks
are well tested and contained entirely inside arch specific code. I do NOT
expect any new issues to arise with them.
If someone does ever need to use debug/metrics with them, then they will
need to unravel this hairball between spinlocks, atomic ops, and bit ops
that exist only because parisc has exactly one atomic instruction: LDCW
(load and clear word).
From: "Luck, Tony" <tony.luck@intel.com>
ia64 fix
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjanv@infradead.org>
Signed-off-by: Grant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se>
Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-parisc')
-rw-r--r-- | include/asm-parisc/atomic.h | 12 | ||||
-rw-r--r-- | include/asm-parisc/bitops.h | 2 | ||||
-rw-r--r-- | include/asm-parisc/cacheflush.h | 1 | ||||
-rw-r--r-- | include/asm-parisc/processor.h | 1 | ||||
-rw-r--r-- | include/asm-parisc/spinlock.h | 163 | ||||
-rw-r--r-- | include/asm-parisc/spinlock_types.h | 21 | ||||
-rw-r--r-- | include/asm-parisc/system.h | 24 |
7 files changed, 59 insertions, 165 deletions
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index e24f7579adb0..048a2c7fd0c0 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h | |||
@@ -24,19 +24,19 @@ | |||
24 | # define ATOMIC_HASH_SIZE 4 | 24 | # define ATOMIC_HASH_SIZE 4 |
25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 25 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
26 | 26 | ||
27 | extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 27 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
28 | 28 | ||
29 | /* Can't use _raw_spin_lock_irq because of #include problems, so | 29 | /* Can't use raw_spin_lock_irq because of #include problems, so |
30 | * this is the substitute */ | 30 | * this is the substitute */ |
31 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 31 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
32 | spinlock_t *s = ATOMIC_HASH(l); \ | 32 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
33 | local_irq_save(f); \ | 33 | local_irq_save(f); \ |
34 | _raw_spin_lock(s); \ | 34 | __raw_spin_lock(s); \ |
35 | } while(0) | 35 | } while(0) |
36 | 36 | ||
37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 37 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
38 | spinlock_t *s = ATOMIC_HASH(l); \ | 38 | raw_spinlock_t *s = ATOMIC_HASH(l); \ |
39 | _raw_spin_unlock(s); \ | 39 | __raw_spin_unlock(s); \ |
40 | local_irq_restore(f); \ | 40 | local_irq_restore(f); \ |
41 | } while(0) | 41 | } while(0) |
42 | 42 | ||
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index 928e5ef850bd..af7db694b22d 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _PARISC_BITOPS_H | 2 | #define _PARISC_BITOPS_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/system.h> | 5 | #include <asm/spinlock.h> |
6 | #include <asm/byteorder.h> | 6 | #include <asm/byteorder.h> |
7 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
8 | 8 | ||
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h index 06732719d927..aa592d8c0e39 100644 --- a/include/asm-parisc/cacheflush.h +++ b/include/asm-parisc/cacheflush.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */ | ||
6 | 7 | ||
7 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". | 8 | /* The usual comment is "Caches aren't brain-dead on the <architecture>". |
8 | * Unfortunately, that doesn't apply to PA-RISC. */ | 9 | * Unfortunately, that doesn't apply to PA-RISC. */ |
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h index 0b61f51d8467..a9dfadd05658 100644 --- a/include/asm-parisc/processor.h +++ b/include/asm-parisc/processor.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
12 | #include <linux/config.h> | 12 | #include <linux/config.h> |
13 | #include <linux/threads.h> | 13 | #include <linux/threads.h> |
14 | #include <linux/spinlock_types.h> | ||
14 | 15 | ||
15 | #include <asm/hardware.h> | 16 | #include <asm/hardware.h> |
16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 679ea1c651ef..43eaa6e742e0 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h | |||
@@ -2,30 +2,25 @@ | |||
2 | #define __ASM_SPINLOCK_H | 2 | #define __ASM_SPINLOCK_H |
3 | 3 | ||
4 | #include <asm/system.h> | 4 | #include <asm/system.h> |
5 | #include <asm/processor.h> | ||
6 | #include <asm/spinlock_types.h> | ||
5 | 7 | ||
6 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked | 8 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked |
7 | * since it only has load-and-zero. Moreover, at least on some PA processors, | 9 | * since it only has load-and-zero. Moreover, at least on some PA processors, |
8 | * the semaphore address has to be 16-byte aligned. | 10 | * the semaphore address has to be 16-byte aligned. |
9 | */ | 11 | */ |
10 | 12 | ||
11 | #ifndef CONFIG_DEBUG_SPINLOCK | 13 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
12 | |||
13 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
14 | #undef SPIN_LOCK_UNLOCKED | ||
15 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
16 | |||
17 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
18 | |||
19 | static inline int spin_is_locked(spinlock_t *x) | ||
20 | { | 14 | { |
21 | volatile unsigned int *a = __ldcw_align(x); | 15 | volatile unsigned int *a = __ldcw_align(x); |
22 | return *a == 0; | 16 | return *a == 0; |
23 | } | 17 | } |
24 | 18 | ||
25 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | 19 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
26 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 20 | #define __raw_spin_unlock_wait(x) \ |
21 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | ||
27 | 22 | ||
28 | static inline void _raw_spin_lock(spinlock_t *x) | 23 | static inline void __raw_spin_lock(raw_spinlock_t *x) |
29 | { | 24 | { |
30 | volatile unsigned int *a; | 25 | volatile unsigned int *a; |
31 | 26 | ||
@@ -36,7 +31,7 @@ static inline void _raw_spin_lock(spinlock_t *x) | |||
36 | mb(); | 31 | mb(); |
37 | } | 32 | } |
38 | 33 | ||
39 | static inline void _raw_spin_unlock(spinlock_t *x) | 34 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
40 | { | 35 | { |
41 | volatile unsigned int *a; | 36 | volatile unsigned int *a; |
42 | mb(); | 37 | mb(); |
@@ -45,7 +40,7 @@ static inline void _raw_spin_unlock(spinlock_t *x) | |||
45 | mb(); | 40 | mb(); |
46 | } | 41 | } |
47 | 42 | ||
48 | static inline int _raw_spin_trylock(spinlock_t *x) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
49 | { | 44 | { |
50 | volatile unsigned int *a; | 45 | volatile unsigned int *a; |
51 | int ret; | 46 | int ret; |
@@ -57,131 +52,38 @@ static inline int _raw_spin_trylock(spinlock_t *x) | |||
57 | 52 | ||
58 | return ret; | 53 | return ret; |
59 | } | 54 | } |
60 | |||
61 | #define spin_lock_own(LOCK, LOCATION) ((void)0) | ||
62 | |||
63 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
64 | |||
65 | #define SPINLOCK_MAGIC 0x1D244B3C | ||
66 | |||
67 | #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL } | ||
68 | #undef SPIN_LOCK_UNLOCKED | ||
69 | #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED | ||
70 | |||
71 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | ||
72 | |||
73 | #define CHECK_LOCK(x) \ | ||
74 | do { \ | ||
75 | if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \ | ||
76 | printk(KERN_ERR "%s:%d: spin_is_locked" \ | ||
77 | " on uninitialized spinlock %p.\n", \ | ||
78 | __FILE__, __LINE__, (x)); \ | ||
79 | } \ | ||
80 | } while(0) | ||
81 | |||
82 | #define spin_is_locked(x) \ | ||
83 | ({ \ | ||
84 | CHECK_LOCK(x); \ | ||
85 | volatile unsigned int *a = __ldcw_align(x); \ | ||
86 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
87 | (x)->babble--; \ | ||
88 | printk("KERN_WARNING \ | ||
89 | %s:%d: spin_is_locked(%s/%p) already" \ | ||
90 | " locked by %s:%d in %s at %p(%d)\n", \ | ||
91 | __FILE__,__LINE__, (x)->module, (x), \ | ||
92 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
93 | (x)->previous, (x)->oncpu); \ | ||
94 | } \ | ||
95 | *a == 0; \ | ||
96 | }) | ||
97 | |||
98 | #define spin_unlock_wait(x) \ | ||
99 | do { \ | ||
100 | CHECK_LOCK(x); \ | ||
101 | volatile unsigned int *a = __ldcw_align(x); \ | ||
102 | if (unlikely((*a == 0) && (x)->babble)) { \ | ||
103 | (x)->babble--; \ | ||
104 | printk("KERN_WARNING \ | ||
105 | %s:%d: spin_unlock_wait(%s/%p)" \ | ||
106 | " owned by %s:%d in %s at %p(%d)\n", \ | ||
107 | __FILE__,__LINE__, (x)->module, (x), \ | ||
108 | (x)->bfile, (x)->bline, (x)->task->comm,\ | ||
109 | (x)->previous, (x)->oncpu); \ | ||
110 | } \ | ||
111 | barrier(); \ | ||
112 | } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0) | ||
113 | |||
114 | extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no); | ||
115 | extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int); | ||
116 | extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int); | ||
117 | |||
118 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
119 | |||
120 | #define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__) | ||
121 | #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__) | ||
122 | #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__) | ||
123 | |||
124 | /* just in case we need it */ | ||
125 | #define spin_lock_own(LOCK, LOCATION) \ | ||
126 | do { \ | ||
127 | volatile unsigned int *a = __ldcw_align(LOCK); \ | ||
128 | if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \ | ||
129 | printk("KERN_WARNING \ | ||
130 | %s: called on %d from %p but lock %s on %d\n", \ | ||
131 | LOCATION, smp_processor_id(), \ | ||
132 | __builtin_return_address(0), \ | ||
133 | (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \ | ||
134 | } while (0) | ||
135 | |||
136 | #endif /* !(CONFIG_DEBUG_SPINLOCK) */ | ||
137 | 55 | ||
138 | /* | 56 | /* |
139 | * Read-write spinlocks, allowing multiple readers | 57 | * Read-write spinlocks, allowing multiple readers |
140 | * but only one writer. | 58 | * but only one writer. |
141 | */ | 59 | */ |
142 | typedef struct { | ||
143 | spinlock_t lock; | ||
144 | volatile int counter; | ||
145 | #ifdef CONFIG_PREEMPT | ||
146 | unsigned int break_lock; | ||
147 | #endif | ||
148 | } rwlock_t; | ||
149 | |||
150 | #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 } | ||
151 | |||
152 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0) | ||
153 | 60 | ||
154 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 61 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
155 | 62 | ||
156 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | 63 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow |
157 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | 64 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ |
158 | 65 | ||
159 | #ifdef CONFIG_DEBUG_RWLOCK | 66 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
160 | extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline); | ||
161 | #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__) | ||
162 | #else | ||
163 | static __inline__ void _raw_read_lock(rwlock_t *rw) | ||
164 | { | 67 | { |
165 | unsigned long flags; | 68 | unsigned long flags; |
166 | local_irq_save(flags); | 69 | local_irq_save(flags); |
167 | _raw_spin_lock(&rw->lock); | 70 | __raw_spin_lock(&rw->lock); |
168 | 71 | ||
169 | rw->counter++; | 72 | rw->counter++; |
170 | 73 | ||
171 | _raw_spin_unlock(&rw->lock); | 74 | __raw_spin_unlock(&rw->lock); |
172 | local_irq_restore(flags); | 75 | local_irq_restore(flags); |
173 | } | 76 | } |
174 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
175 | 77 | ||
176 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | 78 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
177 | { | 79 | { |
178 | unsigned long flags; | 80 | unsigned long flags; |
179 | local_irq_save(flags); | 81 | local_irq_save(flags); |
180 | _raw_spin_lock(&rw->lock); | 82 | __raw_spin_lock(&rw->lock); |
181 | 83 | ||
182 | rw->counter--; | 84 | rw->counter--; |
183 | 85 | ||
184 | _raw_spin_unlock(&rw->lock); | 86 | __raw_spin_unlock(&rw->lock); |
185 | local_irq_restore(flags); | 87 | local_irq_restore(flags); |
186 | } | 88 | } |
187 | 89 | ||
@@ -194,20 +96,17 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) | |||
194 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | 96 | * writers) in interrupt handlers someone fucked up and we'd dead-lock |
195 | * sooner or later anyway. prumpf */ | 97 | * sooner or later anyway. prumpf */ |
196 | 98 | ||
197 | #ifdef CONFIG_DEBUG_RWLOCK | 99 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
198 | extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline); | ||
199 | #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__) | ||
200 | #else | ||
201 | static __inline__ void _raw_write_lock(rwlock_t *rw) | ||
202 | { | 100 | { |
203 | retry: | 101 | retry: |
204 | _raw_spin_lock(&rw->lock); | 102 | __raw_spin_lock(&rw->lock); |
205 | 103 | ||
206 | if(rw->counter != 0) { | 104 | if(rw->counter != 0) { |
207 | /* this basically never happens */ | 105 | /* this basically never happens */ |
208 | _raw_spin_unlock(&rw->lock); | 106 | __raw_spin_unlock(&rw->lock); |
209 | 107 | ||
210 | while(rw->counter != 0); | 108 | while (rw->counter != 0) |
109 | cpu_relax(); | ||
211 | 110 | ||
212 | goto retry; | 111 | goto retry; |
213 | } | 112 | } |
@@ -215,26 +114,21 @@ retry: | |||
215 | /* got it. now leave without unlocking */ | 114 | /* got it. now leave without unlocking */ |
216 | rw->counter = -1; /* remember we are locked */ | 115 | rw->counter = -1; /* remember we are locked */ |
217 | } | 116 | } |
218 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
219 | 117 | ||
220 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | 118 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ |
221 | 119 | ||
222 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | 120 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
223 | { | 121 | { |
224 | rw->counter = 0; | 122 | rw->counter = 0; |
225 | _raw_spin_unlock(&rw->lock); | 123 | __raw_spin_unlock(&rw->lock); |
226 | } | 124 | } |
227 | 125 | ||
228 | #ifdef CONFIG_DEBUG_RWLOCK | 126 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
229 | extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline); | ||
230 | #define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__) | ||
231 | #else | ||
232 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | ||
233 | { | 127 | { |
234 | _raw_spin_lock(&rw->lock); | 128 | __raw_spin_lock(&rw->lock); |
235 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
236 | /* this basically never happens */ | 130 | /* this basically never happens */ |
237 | _raw_spin_unlock(&rw->lock); | 131 | __raw_spin_unlock(&rw->lock); |
238 | 132 | ||
239 | return 0; | 133 | return 0; |
240 | } | 134 | } |
@@ -243,14 +137,13 @@ static __inline__ int _raw_write_trylock(rwlock_t *rw) | |||
243 | rw->counter = -1; /* remember we are locked */ | 137 | rw->counter = -1; /* remember we are locked */ |
244 | return 1; | 138 | return 1; |
245 | } | 139 | } |
246 | #endif /* CONFIG_DEBUG_RWLOCK */ | ||
247 | 140 | ||
248 | static __inline__ int is_read_locked(rwlock_t *rw) | 141 | static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) |
249 | { | 142 | { |
250 | return rw->counter > 0; | 143 | return rw->counter > 0; |
251 | } | 144 | } |
252 | 145 | ||
253 | static __inline__ int is_write_locked(rwlock_t *rw) | 146 | static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) |
254 | { | 147 | { |
255 | return rw->counter < 0; | 148 | return rw->counter < 0; |
256 | } | 149 | } |
diff --git a/include/asm-parisc/spinlock_types.h b/include/asm-parisc/spinlock_types.h new file mode 100644 index 000000000000..785bba822fbf --- /dev/null +++ b/include/asm-parisc/spinlock_types.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef __ASM_SPINLOCK_TYPES_H | ||
2 | #define __ASM_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock[4]; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | ||
13 | |||
14 | typedef struct { | ||
15 | raw_spinlock_t lock; | ||
16 | volatile int counter; | ||
17 | } raw_rwlock_t; | ||
18 | |||
19 | #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } | ||
20 | |||
21 | #endif | ||
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h index 81c543339036..26ff844a21c1 100644 --- a/include/asm-parisc/system.h +++ b/include/asm-parisc/system.h | |||
@@ -160,29 +160,7 @@ static inline void set_eiem(unsigned long val) | |||
160 | }) | 160 | }) |
161 | 161 | ||
162 | #ifdef CONFIG_SMP | 162 | #ifdef CONFIG_SMP |
163 | /* | 163 | # define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) |
164 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
165 | */ | ||
166 | |||
167 | typedef struct { | ||
168 | volatile unsigned int lock[4]; | ||
169 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
170 | unsigned long magic; | ||
171 | volatile unsigned int babble; | ||
172 | const char *module; | ||
173 | char *bfile; | ||
174 | int bline; | ||
175 | int oncpu; | ||
176 | void *previous; | ||
177 | struct task_struct * task; | ||
178 | #endif | ||
179 | #ifdef CONFIG_PREEMPT | ||
180 | unsigned int break_lock; | ||
181 | #endif | ||
182 | } spinlock_t; | ||
183 | |||
184 | #define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) | ||
185 | |||
186 | #endif | 164 | #endif |
187 | 165 | ||
188 | #define KERNEL_START (0x10100000 - 0x1000) | 166 | #define KERNEL_START (0x10100000 - 0x1000) |