aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:02:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 12:02:01 -0500
commit8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch)
treeb907c35c79caadafff6ad46a91614e30afd2f967 /arch/ia64
parent050cbb09dac0402672edeaeac06094ef8ff1749a (diff)
parentb5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) clockevents: Convert to raw_spinlock clockevents: Make tick_device_lock static debugobjects: Convert to raw_spinlocks perf_event: Convert to raw_spinlock hrtimers: Convert to raw_spinlocks genirq: Convert irq_desc.lock to raw_spinlock smp: Convert smplocks to raw_spinlocks rtmutes: Convert rtmutex.lock to raw_spinlock sched: Convert pi_lock to raw_spinlock sched: Convert cpupri lock to raw_spinlock sched: Convert rt_runtime_lock to raw_spinlock sched: Convert rq->lock to raw_spinlock plist: Make plist debugging raw_spinlock aware bkl: Fixup core_lock fallout locking: Cleanup the name space completely locking: Further name space cleanups alpha: Fix fallout from locking changes locking: Implement new raw_spinlock locking: Convert raw_rwlock functions to arch_rwlock locking: Convert raw_rwlock to arch_rwlock ...
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h76
-rw-r--r--arch/ia64/include/asm/spinlock_types.h8
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
6 files changed, 50 insertions, 50 deletions
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
127 * @addr: Address to start counting from 127 * @addr: Address to start counting from
128 * 128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store 129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also __raw_spin_unlock(). 130 * with release semantics. See also arch_spin_unlock().
131 */ 131 */
132static __inline__ void 132static __inline__ void
133__clear_bit_unlock(int nr, void *addr) 133__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 239ecdc9516d..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20#define __raw_spin_lock_init(x) ((x)->lock = 0) 20#define arch_spin_lock_init(x) ((x)->lock = 0)
21 21
22/* 22/*
23 * Ticket locks are conceptually two parts, one indicating the current head of 23 * Ticket locks are conceptually two parts, one indicating the current head of
@@ -38,7 +38,7 @@
38#define TICKET_BITS 15 38#define TICKET_BITS 15
39#define TICKET_MASK ((1 << TICKET_BITS) - 1) 39#define TICKET_MASK ((1 << TICKET_BITS) - 1)
40 40
41static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 41static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
42{ 42{
43 int *p = (int *)&lock->lock, ticket, serve; 43 int *p = (int *)&lock->lock, ticket, serve;
44 44
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
58 } 58 }
59} 59}
60 60
61static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 61static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
62{ 62{
63 int tmp = ACCESS_ONCE(lock->lock); 63 int tmp = ACCESS_ONCE(lock->lock);
64 64
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
67 return 0; 67 return 0;
68} 68}
69 69
70static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 70static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
71{ 71{
72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; 72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
73 73
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
75 ACCESS_ONCE(*p) = (tmp + 2) & ~1; 75 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
76} 76}
77 77
78static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) 78static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
79{ 79{
80 int *p = (int *)&lock->lock, ticket; 80 int *p = (int *)&lock->lock, ticket;
81 81
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
89 } 89 }
90} 90}
91 91
92static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 92static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
93{ 93{
94 long tmp = ACCESS_ONCE(lock->lock); 94 long tmp = ACCESS_ONCE(lock->lock);
95 95
96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); 96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
97} 97}
98 98
99static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 99static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
100{ 100{
101 long tmp = ACCESS_ONCE(lock->lock); 101 long tmp = ACCESS_ONCE(lock->lock);
102 102
103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
104} 104}
105 105
106static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 106static inline int arch_spin_is_locked(arch_spinlock_t *lock)
107{ 107{
108 return __ticket_spin_is_locked(lock); 108 return __ticket_spin_is_locked(lock);
109} 109}
110 110
111static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 111static inline int arch_spin_is_contended(arch_spinlock_t *lock)
112{ 112{
113 return __ticket_spin_is_contended(lock); 113 return __ticket_spin_is_contended(lock);
114} 114}
115#define __raw_spin_is_contended __raw_spin_is_contended 115#define arch_spin_is_contended arch_spin_is_contended
116 116
117static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 117static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
118{ 118{
119 __ticket_spin_lock(lock); 119 __ticket_spin_lock(lock);
120} 120}
121 121
122static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 122static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
123{ 123{
124 return __ticket_spin_trylock(lock); 124 return __ticket_spin_trylock(lock);
125} 125}
126 126
127static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 127static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
128{ 128{
129 __ticket_spin_unlock(lock); 129 __ticket_spin_unlock(lock);
130} 130}
131 131
132static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 132static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 __raw_spin_lock(lock); 135 arch_spin_lock(lock);
136} 136}
137 137
138static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
139{ 139{
140 __ticket_spin_unlock_wait(lock); 140 __ticket_spin_unlock_wait(lock);
141} 141}
142 142
143#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 143#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
144#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 144#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
145 145
146#ifdef ASM_SUPPORTED 146#ifdef ASM_SUPPORTED
147 147
148static __always_inline void 148static __always_inline void
149__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) 149arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
150{ 150{
151 __asm__ __volatile__ ( 151 __asm__ __volatile__ (
152 "tbit.nz p6, p0 = %1,%2\n" 152 "tbit.nz p6, p0 = %1,%2\n"
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
169 : "p6", "p7", "r2", "memory"); 169 : "p6", "p7", "r2", "memory");
170} 170}
171 171
172#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
173 173
174#else /* !ASM_SUPPORTED */ 174#else /* !ASM_SUPPORTED */
175 175
176#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
177 177
178#define __raw_read_lock(rw) \ 178#define arch_read_lock(rw) \
179do { \ 179do { \
180 raw_rwlock_t *__read_lock_ptr = (rw); \ 180 arch_rwlock_t *__read_lock_ptr = (rw); \
181 \ 181 \
182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -188,16 +188,16 @@ do { \
188 188
189#endif /* !ASM_SUPPORTED */ 189#endif /* !ASM_SUPPORTED */
190 190
191#define __raw_read_unlock(rw) \ 191#define arch_read_unlock(rw) \
192do { \ 192do { \
193 raw_rwlock_t *__read_lock_ptr = (rw); \ 193 arch_rwlock_t *__read_lock_ptr = (rw); \
194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
195} while (0) 195} while (0)
196 196
197#ifdef ASM_SUPPORTED 197#ifdef ASM_SUPPORTED
198 198
199static __always_inline void 199static __always_inline void
200__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) 200arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
201{ 201{
202 __asm__ __volatile__ ( 202 __asm__ __volatile__ (
203 "tbit.nz p6, p0 = %1, %2\n" 203 "tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
222} 222}
223 223
224#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
225 225
226#define __raw_write_trylock(rw) \ 226#define arch_write_trylock(rw) \
227({ \ 227({ \
228 register long result; \ 228 register long result; \
229 \ 229 \
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
235 (result == 0); \ 235 (result == 0); \
236}) 236})
237 237
238static inline void __raw_write_unlock(raw_rwlock_t *x) 238static inline void arch_write_unlock(arch_rwlock_t *x)
239{ 239{
240 u8 *y = (u8 *)x; 240 u8 *y = (u8 *)x;
241 barrier(); 241 barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
244 244
245#else /* !ASM_SUPPORTED */ 245#else /* !ASM_SUPPORTED */
246 246
247#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247#define arch_write_lock_flags(l, flags) arch_write_lock(l)
248 248
249#define __raw_write_lock(l) \ 249#define arch_write_lock(l) \
250({ \ 250({ \
251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
257 } while (ia64_val); \ 257 } while (ia64_val); \
258}) 258})
259 259
260#define __raw_write_trylock(rw) \ 260#define arch_write_trylock(rw) \
261({ \ 261({ \
262 __u64 ia64_val; \ 262 __u64 ia64_val; \
263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
265 (ia64_val == 0); \ 265 (ia64_val == 0); \
266}) 266})
267 267
268static inline void __raw_write_unlock(raw_rwlock_t *x) 268static inline void arch_write_unlock(arch_rwlock_t *x)
269{ 269{
270 barrier(); 270 barrier();
271 x->write_lock = 0; 271 x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
273 273
274#endif /* !ASM_SUPPORTED */ 274#endif /* !ASM_SUPPORTED */
275 275
276static inline int __raw_read_trylock(raw_rwlock_t *x) 276static inline int arch_read_trylock(arch_rwlock_t *x)
277{ 277{
278 union { 278 union {
279 raw_rwlock_t lock; 279 arch_rwlock_t lock;
280 __u32 word; 280 __u32 word;
281 } old, new; 281 } old, new;
282 old.lock = new.lock = *x; 282 old.lock = new.lock = *x;
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
286} 286}
287 287
288#define _raw_spin_relax(lock) cpu_relax() 288#define arch_spin_relax(lock) cpu_relax()
289#define _raw_read_relax(lock) cpu_relax() 289#define arch_read_relax(lock) cpu_relax()
290#define _raw_write_relax(lock) cpu_relax() 290#define arch_write_relax(lock) cpu_relax()
291 291
292#endif /* _ASM_IA64_SPINLOCK_H */ 292#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4a..e2b42a52a6d3 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int read_counter : 31; 15 volatile unsigned int read_counter : 31;
16 volatile unsigned int write_lock : 1; 16 volatile unsigned int write_lock : 1;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
20 20
21#endif 21#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index dab4d393908c..95ac77aeae9b 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
793 goto unlock_iosapic_lock; 793 goto unlock_iosapic_lock;
794 } 794 }
795 795
796 spin_lock(&irq_desc[irq].lock); 796 raw_spin_lock(&irq_desc[irq].lock);
797 dest = get_target_cpu(gsi, irq); 797 dest = get_target_cpu(gsi, irq);
798 dmode = choose_dmode(); 798 dmode = choose_dmode();
799 err = register_intr(gsi, irq, dmode, polarity, trigger); 799 err = register_intr(gsi, irq, dmode, polarity, trigger);
800 if (err < 0) { 800 if (err < 0) {
801 spin_unlock(&irq_desc[irq].lock); 801 raw_spin_unlock(&irq_desc[irq].lock);
802 irq = err; 802 irq = err;
803 goto unlock_iosapic_lock; 803 goto unlock_iosapic_lock;
804 } 804 }
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
818 cpu_logical_id(dest), dest, irq_to_vector(irq)); 818 cpu_logical_id(dest), dest, irq_to_vector(irq));
819 819
820 spin_unlock(&irq_desc[irq].lock); 820 raw_spin_unlock(&irq_desc[irq].lock);
821 unlock_iosapic_lock: 821 unlock_iosapic_lock:
822 spin_unlock_irqrestore(&iosapic_lock, flags); 822 spin_unlock_irqrestore(&iosapic_lock, flags);
823 return irq; 823 return irq;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7c..94ee9d067cbd 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
71 } 71 }
72 72
73 if (i < NR_IRQS) { 73 if (i < NR_IRQS) {
74 spin_lock_irqsave(&irq_desc[i].lock, flags); 74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
75 action = irq_desc[i].action; 75 action = irq_desc[i].action;
76 if (!action) 76 if (!action)
77 goto skip; 77 goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
91 91
92 seq_putc(p, '\n'); 92 seq_putc(p, '\n');
93skip: 93skip:
94 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
95 } else if (i == NR_IRQS) 95 } else if (i == NR_IRQS)
96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97 return 0; 97 return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a1..70e4bad23432 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
345 345
346 desc = irq_desc + irq; 346 desc = irq_desc + irq;
347 cfg = irq_cfg + irq; 347 cfg = irq_cfg + irq;
348 spin_lock(&desc->lock); 348 raw_spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count) 349 if (!cfg->move_cleanup_count)
350 goto unlock; 350 goto unlock;
351 351
@@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
358 spin_unlock_irqrestore(&vector_lock, flags); 358 spin_unlock_irqrestore(&vector_lock, flags);
359 cfg->move_cleanup_count--; 359 cfg->move_cleanup_count--;
360 unlock: 360 unlock:
361 spin_unlock(&desc->lock); 361 raw_spin_unlock(&desc->lock);
362 } 362 }
363 return IRQ_HANDLED; 363 return IRQ_HANDLED;
364} 364}