diff options
Diffstat (limited to 'arch/parisc')
-rw-r--r-- | arch/parisc/include/asm/atomic.h | 4 | ||||
-rw-r--r-- | arch/parisc/include/asm/spinlock.h | 44 |
2 files changed, 24 insertions, 24 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 3a4ea778d4b6..716634d1f546 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -34,12 +34,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | |||
34 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 34 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
35 | arch_spinlock_t *s = ATOMIC_HASH(l); \ | 35 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
36 | local_irq_save(f); \ | 36 | local_irq_save(f); \ |
37 | __raw_spin_lock(s); \ | 37 | arch_spin_lock(s); \ |
38 | } while(0) | 38 | } while(0) |
39 | 39 | ||
40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
41 | arch_spinlock_t *s = ATOMIC_HASH(l); \ | 41 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
42 | __raw_spin_unlock(s); \ | 42 | arch_spin_unlock(s); \ |
43 | local_irq_restore(f); \ | 43 | local_irq_restore(f); \ |
44 | } while(0) | 44 | } while(0) |
45 | 45 | ||
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 69e8dca26744..235e7e386e2a 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -5,17 +5,17 @@ | |||
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/spinlock_types.h> | 6 | #include <asm/spinlock_types.h> |
7 | 7 | ||
8 | static inline int __raw_spin_is_locked(arch_spinlock_t *x) | 8 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
9 | { | 9 | { |
10 | volatile unsigned int *a = __ldcw_align(x); | 10 | volatile unsigned int *a = __ldcw_align(x); |
11 | return *a == 0; | 11 | return *a == 0; |
12 | } | 12 | } |
13 | 13 | ||
14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) | 14 | #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) |
15 | #define __raw_spin_unlock_wait(x) \ | 15 | #define arch_spin_unlock_wait(x) \ |
16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 16 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
17 | 17 | ||
18 | static inline void __raw_spin_lock_flags(arch_spinlock_t *x, | 18 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, |
19 | unsigned long flags) | 19 | unsigned long flags) |
20 | { | 20 | { |
21 | volatile unsigned int *a; | 21 | volatile unsigned int *a; |
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *x, | |||
33 | mb(); | 33 | mb(); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void __raw_spin_unlock(arch_spinlock_t *x) | 36 | static inline void arch_spin_unlock(arch_spinlock_t *x) |
37 | { | 37 | { |
38 | volatile unsigned int *a; | 38 | volatile unsigned int *a; |
39 | mb(); | 39 | mb(); |
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *x) | |||
42 | mb(); | 42 | mb(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline int __raw_spin_trylock(arch_spinlock_t *x) | 45 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
46 | { | 46 | { |
47 | volatile unsigned int *a; | 47 | volatile unsigned int *a; |
48 | int ret; | 48 | int ret; |
@@ -73,9 +73,9 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw) | |||
73 | { | 73 | { |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | local_irq_save(flags); | 75 | local_irq_save(flags); |
76 | __raw_spin_lock_flags(&rw->lock, flags); | 76 | arch_spin_lock_flags(&rw->lock, flags); |
77 | rw->counter++; | 77 | rw->counter++; |
78 | __raw_spin_unlock(&rw->lock); | 78 | arch_spin_unlock(&rw->lock); |
79 | local_irq_restore(flags); | 79 | local_irq_restore(flags); |
80 | } | 80 | } |
81 | 81 | ||
@@ -85,9 +85,9 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | |||
85 | { | 85 | { |
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | local_irq_save(flags); | 87 | local_irq_save(flags); |
88 | __raw_spin_lock_flags(&rw->lock, flags); | 88 | arch_spin_lock_flags(&rw->lock, flags); |
89 | rw->counter--; | 89 | rw->counter--; |
90 | __raw_spin_unlock(&rw->lock); | 90 | arch_spin_unlock(&rw->lock); |
91 | local_irq_restore(flags); | 91 | local_irq_restore(flags); |
92 | } | 92 | } |
93 | 93 | ||
@@ -98,9 +98,9 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
98 | unsigned long flags; | 98 | unsigned long flags; |
99 | retry: | 99 | retry: |
100 | local_irq_save(flags); | 100 | local_irq_save(flags); |
101 | if (__raw_spin_trylock(&rw->lock)) { | 101 | if (arch_spin_trylock(&rw->lock)) { |
102 | rw->counter++; | 102 | rw->counter++; |
103 | __raw_spin_unlock(&rw->lock); | 103 | arch_spin_unlock(&rw->lock); |
104 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
105 | return 1; | 105 | return 1; |
106 | } | 106 | } |
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | /* Wait until we have a realistic chance at the lock */ | 113 | /* Wait until we have a realistic chance at the lock */ |
114 | while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) | 114 | while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) |
115 | cpu_relax(); | 115 | cpu_relax(); |
116 | 116 | ||
117 | goto retry; | 117 | goto retry; |
@@ -124,10 +124,10 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw) | |||
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | retry: | 125 | retry: |
126 | local_irq_save(flags); | 126 | local_irq_save(flags); |
127 | __raw_spin_lock_flags(&rw->lock, flags); | 127 | arch_spin_lock_flags(&rw->lock, flags); |
128 | 128 | ||
129 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
130 | __raw_spin_unlock(&rw->lock); | 130 | arch_spin_unlock(&rw->lock); |
131 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
132 | 132 | ||
133 | while (rw->counter != 0) | 133 | while (rw->counter != 0) |
@@ -144,7 +144,7 @@ retry: | |||
144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
145 | { | 145 | { |
146 | rw->counter = 0; | 146 | rw->counter = 0; |
147 | __raw_spin_unlock(&rw->lock); | 147 | arch_spin_unlock(&rw->lock); |
148 | } | 148 | } |
149 | 149 | ||
150 | /* Note that we have to ensure interrupts are disabled in case we're | 150 | /* Note that we have to ensure interrupts are disabled in case we're |
@@ -155,13 +155,13 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | |||
155 | int result = 0; | 155 | int result = 0; |
156 | 156 | ||
157 | local_irq_save(flags); | 157 | local_irq_save(flags); |
158 | if (__raw_spin_trylock(&rw->lock)) { | 158 | if (arch_spin_trylock(&rw->lock)) { |
159 | if (rw->counter == 0) { | 159 | if (rw->counter == 0) { |
160 | rw->counter = -1; | 160 | rw->counter = -1; |
161 | result = 1; | 161 | result = 1; |
162 | } else { | 162 | } else { |
163 | /* Read-locked. Oh well. */ | 163 | /* Read-locked. Oh well. */ |
164 | __raw_spin_unlock(&rw->lock); | 164 | arch_spin_unlock(&rw->lock); |
165 | } | 165 | } |
166 | } | 166 | } |
167 | local_irq_restore(flags); | 167 | local_irq_restore(flags); |
@@ -190,8 +190,8 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | |||
190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) |
191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) |
192 | 192 | ||
193 | #define _raw_spin_relax(lock) cpu_relax() | 193 | #define arch_spin_relax(lock) cpu_relax() |
194 | #define _raw_read_relax(lock) cpu_relax() | 194 | #define arch_read_relax(lock) cpu_relax() |
195 | #define _raw_write_relax(lock) cpu_relax() | 195 | #define arch_write_relax(lock) cpu_relax() |
196 | 196 | ||
197 | #endif /* __ASM_SPINLOCK_H */ | 197 | #endif /* __ASM_SPINLOCK_H */ |