diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 12:02:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 12:02:01 -0500 |
commit | 8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch) | |
tree | b907c35c79caadafff6ad46a91614e30afd2f967 /arch | |
parent | 050cbb09dac0402672edeaeac06094ef8ff1749a (diff) | |
parent | b5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits)
clockevents: Convert to raw_spinlock
clockevents: Make tick_device_lock static
debugobjects: Convert to raw_spinlocks
perf_event: Convert to raw_spinlock
hrtimers: Convert to raw_spinlocks
genirq: Convert irq_desc.lock to raw_spinlock
smp: Convert smplocks to raw_spinlocks
rtmutes: Convert rtmutex.lock to raw_spinlock
sched: Convert pi_lock to raw_spinlock
sched: Convert cpupri lock to raw_spinlock
sched: Convert rt_runtime_lock to raw_spinlock
sched: Convert rq->lock to raw_spinlock
plist: Make plist debugging raw_spinlock aware
bkl: Fixup core_lock fallout
locking: Cleanup the name space completely
locking: Further name space cleanups
alpha: Fix fallout from locking changes
locking: Implement new raw_spinlock
locking: Convert raw_rwlock functions to arch_rwlock
locking: Convert raw_rwlock to arch_rwlock
...
Diffstat (limited to 'arch')
74 files changed, 701 insertions, 701 deletions
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index 46bfff58f670..471c07292e0b 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h | |||
@@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr) | |||
435 | set_hae(msb); \ | 435 | set_hae(msb); \ |
436 | } | 436 | } |
437 | 437 | ||
438 | extern spinlock_t t2_hae_lock; | 438 | extern raw_spinlock_t t2_hae_lock; |
439 | 439 | ||
440 | /* | 440 | /* |
441 | * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since | 441 | * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since |
@@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) | |||
448 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 448 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
449 | unsigned long result, msb; | 449 | unsigned long result, msb; |
450 | unsigned long flags; | 450 | unsigned long flags; |
451 | spin_lock_irqsave(&t2_hae_lock, flags); | 451 | raw_spin_lock_irqsave(&t2_hae_lock, flags); |
452 | 452 | ||
453 | t2_set_hae; | 453 | t2_set_hae; |
454 | 454 | ||
455 | result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); | 455 | result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); |
456 | spin_unlock_irqrestore(&t2_hae_lock, flags); | 456 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); |
457 | return __kernel_extbl(result, addr & 3); | 457 | return __kernel_extbl(result, addr & 3); |
458 | } | 458 | } |
459 | 459 | ||
@@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) | |||
462 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 462 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
463 | unsigned long result, msb; | 463 | unsigned long result, msb; |
464 | unsigned long flags; | 464 | unsigned long flags; |
465 | spin_lock_irqsave(&t2_hae_lock, flags); | 465 | raw_spin_lock_irqsave(&t2_hae_lock, flags); |
466 | 466 | ||
467 | t2_set_hae; | 467 | t2_set_hae; |
468 | 468 | ||
469 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); | 469 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); |
470 | spin_unlock_irqrestore(&t2_hae_lock, flags); | 470 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); |
471 | return __kernel_extwl(result, addr & 3); | 471 | return __kernel_extwl(result, addr & 3); |
472 | } | 472 | } |
473 | 473 | ||
@@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) | |||
480 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 480 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
481 | unsigned long result, msb; | 481 | unsigned long result, msb; |
482 | unsigned long flags; | 482 | unsigned long flags; |
483 | spin_lock_irqsave(&t2_hae_lock, flags); | 483 | raw_spin_lock_irqsave(&t2_hae_lock, flags); |
484 | 484 | ||
485 | t2_set_hae; | 485 | t2_set_hae; |
486 | 486 | ||
487 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); | 487 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); |
488 | spin_unlock_irqrestore(&t2_hae_lock, flags); | 488 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); |
489 | return result & 0xffffffffUL; | 489 | return result & 0xffffffffUL; |
490 | } | 490 | } |
491 | 491 | ||
@@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) | |||
494 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 494 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
495 | unsigned long r0, r1, work, msb; | 495 | unsigned long r0, r1, work, msb; |
496 | unsigned long flags; | 496 | unsigned long flags; |
497 | spin_lock_irqsave(&t2_hae_lock, flags); | 497 | raw_spin_lock_irqsave(&t2_hae_lock, flags); |
498 | 498 | ||
499 | t2_set_hae; | 499 | t2_set_hae; |
500 | 500 | ||
501 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; | 501 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; |
502 | r0 = *(vuip)(work); | 502 | r0 = *(vuip)(work); |
503 | r1 = *(vuip)(work + (4 << 5)); | 503 | r1 = *(vuip)(work + (4 << 5)); |
504 | spin_unlock_irqrestore(&t2_hae_lock, flags); | 504 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); |
505 | return r1 << 32 | r0; | 505 | return r1 << 32 | r0; |
506 | } | 506 | } |
507 | 507 | ||
@@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) | |||
510 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 510 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
511 | unsigned long msb, w; | 511 | unsigned long msb, w; |
512 | unsigned long flags; | 512 | unsigned long flags; |
513 | spin_lock_irqsave(&t2_hae_lock, flags); | 513 | raw_spin_lock_irqsave(&t2_hae_lock, flags); |
514 | 514 | ||
515 | t2_set_hae; | 515 | t2_set_hae; |
516 | 516 | ||
517 | w = __kernel_insbl(b, addr & 3); | 517 | w = __kernel_insbl(b, addr & 3); |
518 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; | 518 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; |
519 | spin_unlock_irqrestore(&t2_hae_lock, flags); | 519 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); |
520 | } | 520 | } |
521 | 521 | ||
522 | __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) | 522 | __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) |
@@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) | |||
524 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 524 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
525 | unsigned long msb, w; | 525 | unsigned long msb, w; |
526 | unsigned long flags; | 526 | unsigned long flags; |
527 | spin_lock_irqsave(&t2_hae_lock, flags); | 527 | raw_spin_lock_irqsave(&t2_hae_lock, flags); |
528 | 528 | ||
529 | t2_set_hae; | 529 | t2_set_hae; |
530 | 530 | ||
531 | w = __kernel_inswl(b, addr & 3); | 531 | w = __kernel_inswl(b, addr & 3); |
532 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; | 532 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; |
533 | spin_unlock_irqrestore(&t2_hae_lock, flags); | 533 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); |
534 | } | 534 | } |
535 | 535 | ||
536 | /* | 536 | /* |
@@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) | |||
542 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 542 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
543 | unsigned long msb; | 543 | unsigned long msb; |
544 | unsigned long flags; | 544 | unsigned long flags; |
545 | spin_lock_irqsave(&t2_hae_lock, flags); | 545 | raw_spin_lock_irqsave(&t2_hae_lock, flags); |
546 | 546 | ||
547 | t2_set_hae; | 547 | t2_set_hae; |
548 | 548 | ||
549 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; | 549 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; |
550 | spin_unlock_irqrestore(&t2_hae_lock, flags); | 550 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); |
551 | } | 551 | } |
552 | 552 | ||
553 | __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) | 553 | __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) |
@@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) | |||
555 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | 555 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; |
556 | unsigned long msb, work; | 556 | unsigned long msb, work; |
557 | unsigned long flags; | 557 | unsigned long flags; |
558 | spin_lock_irqsave(&t2_hae_lock, flags); | 558 | raw_spin_lock_irqsave(&t2_hae_lock, flags); |
559 | 559 | ||
560 | t2_set_hae; | 560 | t2_set_hae; |
561 | 561 | ||
562 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; | 562 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; |
563 | *(vuip)work = b; | 563 | *(vuip)work = b; |
564 | *(vuip)(work + (4 << 5)) = b >> 32; | 564 | *(vuip)(work + (4 << 5)) = b >> 32; |
565 | spin_unlock_irqrestore(&t2_hae_lock, flags); | 565 | raw_spin_unlock_irqrestore(&t2_hae_lock, flags); |
566 | } | 566 | } |
567 | 567 | ||
568 | __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) | 568 | __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) |
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index e38fb95cb335..d0faca1e992d 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h | |||
@@ -12,18 +12,18 @@ | |||
12 | * We make no fairness assumptions. They have a cost. | 12 | * We make no fairness assumptions. They have a cost. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 15 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
16 | #define __raw_spin_is_locked(x) ((x)->lock != 0) | 16 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
17 | #define __raw_spin_unlock_wait(x) \ | 17 | #define arch_spin_unlock_wait(x) \ |
18 | do { cpu_relax(); } while ((x)->lock) | 18 | do { cpu_relax(); } while ((x)->lock) |
19 | 19 | ||
20 | static inline void __raw_spin_unlock(raw_spinlock_t * lock) | 20 | static inline void arch_spin_unlock(arch_spinlock_t * lock) |
21 | { | 21 | { |
22 | mb(); | 22 | mb(); |
23 | lock->lock = 0; | 23 | lock->lock = 0; |
24 | } | 24 | } |
25 | 25 | ||
26 | static inline void __raw_spin_lock(raw_spinlock_t * lock) | 26 | static inline void arch_spin_lock(arch_spinlock_t * lock) |
27 | { | 27 | { |
28 | long tmp; | 28 | long tmp; |
29 | 29 | ||
@@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock) | |||
43 | : "m"(lock->lock) : "memory"); | 43 | : "m"(lock->lock) : "memory"); |
44 | } | 44 | } |
45 | 45 | ||
46 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 46 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
47 | { | 47 | { |
48 | return !test_and_set_bit(0, &lock->lock); | 48 | return !test_and_set_bit(0, &lock->lock); |
49 | } | 49 | } |
50 | 50 | ||
51 | /***********************************************************/ | 51 | /***********************************************************/ |
52 | 52 | ||
53 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | 53 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
54 | { | 54 | { |
55 | return (lock->lock & 1) == 0; | 55 | return (lock->lock & 1) == 0; |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | 58 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
59 | { | 59 | { |
60 | return lock->lock == 0; | 60 | return lock->lock == 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline void __raw_read_lock(raw_rwlock_t *lock) | 63 | static inline void arch_read_lock(arch_rwlock_t *lock) |
64 | { | 64 | { |
65 | long regx; | 65 | long regx; |
66 | 66 | ||
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock) | |||
80 | : "m" (*lock) : "memory"); | 80 | : "m" (*lock) : "memory"); |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline void __raw_write_lock(raw_rwlock_t *lock) | 83 | static inline void arch_write_lock(arch_rwlock_t *lock) |
84 | { | 84 | { |
85 | long regx; | 85 | long regx; |
86 | 86 | ||
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock) | |||
100 | : "m" (*lock) : "memory"); | 100 | : "m" (*lock) : "memory"); |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline int __raw_read_trylock(raw_rwlock_t * lock) | 103 | static inline int arch_read_trylock(arch_rwlock_t * lock) |
104 | { | 104 | { |
105 | long regx; | 105 | long regx; |
106 | int success; | 106 | int success; |
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock) | |||
122 | return success; | 122 | return success; |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline int __raw_write_trylock(raw_rwlock_t * lock) | 125 | static inline int arch_write_trylock(arch_rwlock_t * lock) |
126 | { | 126 | { |
127 | long regx; | 127 | long regx; |
128 | int success; | 128 | int success; |
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock) | |||
144 | return success; | 144 | return success; |
145 | } | 145 | } |
146 | 146 | ||
147 | static inline void __raw_read_unlock(raw_rwlock_t * lock) | 147 | static inline void arch_read_unlock(arch_rwlock_t * lock) |
148 | { | 148 | { |
149 | long regx; | 149 | long regx; |
150 | __asm__ __volatile__( | 150 | __asm__ __volatile__( |
@@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock) | |||
160 | : "m" (*lock) : "memory"); | 160 | : "m" (*lock) : "memory"); |
161 | } | 161 | } |
162 | 162 | ||
163 | static inline void __raw_write_unlock(raw_rwlock_t * lock) | 163 | static inline void arch_write_unlock(arch_rwlock_t * lock) |
164 | { | 164 | { |
165 | mb(); | 165 | mb(); |
166 | lock->lock = 0; | 166 | lock->lock = 0; |
167 | } | 167 | } |
168 | 168 | ||
169 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 169 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
170 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 170 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
171 | 171 | ||
172 | #define _raw_spin_relax(lock) cpu_relax() | 172 | #define arch_spin_relax(lock) cpu_relax() |
173 | #define _raw_read_relax(lock) cpu_relax() | 173 | #define arch_read_relax(lock) cpu_relax() |
174 | #define _raw_write_relax(lock) cpu_relax() | 174 | #define arch_write_relax(lock) cpu_relax() |
175 | 175 | ||
176 | #endif /* _ALPHA_SPINLOCK_H */ | 176 | #endif /* _ALPHA_SPINLOCK_H */ |
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 8141eb5ebf0d..54c2afce0a1d 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h | |||
@@ -7,14 +7,14 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | volatile unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c index d9980d47ab81..e6d90568b65d 100644 --- a/arch/alpha/kernel/core_t2.c +++ b/arch/alpha/kernel/core_t2.c | |||
@@ -74,7 +74,7 @@ | |||
74 | # define DBG(args) | 74 | # define DBG(args) |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | DEFINE_SPINLOCK(t2_hae_lock); | 77 | DEFINE_RAW_SPINLOCK(t2_hae_lock); |
78 | 78 | ||
79 | static volatile unsigned int t2_mcheck_any_expected; | 79 | static volatile unsigned int t2_mcheck_any_expected; |
80 | static volatile unsigned int t2_mcheck_last_taken; | 80 | static volatile unsigned int t2_mcheck_last_taken; |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index c0de072b8305..5f2cf23c4648 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v) | |||
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | if (irq < ACTUAL_NR_IRQS) { | 83 | if (irq < ACTUAL_NR_IRQS) { |
84 | spin_lock_irqsave(&irq_desc[irq].lock, flags); | 84 | raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); |
85 | action = irq_desc[irq].action; | 85 | action = irq_desc[irq].action; |
86 | if (!action) | 86 | if (!action) |
87 | goto unlock; | 87 | goto unlock; |
@@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v) | |||
105 | 105 | ||
106 | seq_putc(p, '\n'); | 106 | seq_putc(p, '\n'); |
107 | unlock: | 107 | unlock: |
108 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | 108 | raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); |
109 | } else if (irq == ACTUAL_NR_IRQS) { | 109 | } else if (irq == ACTUAL_NR_IRQS) { |
110 | #ifdef CONFIG_SMP | 110 | #ifdef CONFIG_SMP |
111 | seq_puts(p, "IPI: "); | 111 | seq_puts(p, "IPI: "); |
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index acac5302e4ea..8920b2d6e3b8 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h | |||
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *); | |||
26 | */ | 26 | */ |
27 | #define do_bad_IRQ(irq,desc) \ | 27 | #define do_bad_IRQ(irq,desc) \ |
28 | do { \ | 28 | do { \ |
29 | spin_lock(&desc->lock); \ | 29 | raw_spin_lock(&desc->lock); \ |
30 | handle_bad_irq(irq, desc); \ | 30 | handle_bad_irq(irq, desc); \ |
31 | spin_unlock(&desc->lock); \ | 31 | raw_spin_unlock(&desc->lock); \ |
32 | } while(0) | 32 | } while(0) |
33 | 33 | ||
34 | #endif | 34 | #endif |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c13681ac1ede..c91c64cab922 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -17,13 +17,13 @@ | |||
17 | * Locked value: 1 | 17 | * Locked value: 1 |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) | 20 | #define arch_spin_is_locked(x) ((x)->lock != 0) |
21 | #define __raw_spin_unlock_wait(lock) \ | 21 | #define arch_spin_unlock_wait(lock) \ |
22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 22 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
23 | 23 | ||
24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 24 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
25 | 25 | ||
26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 26 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
27 | { | 27 | { |
28 | unsigned long tmp; | 28 | unsigned long tmp; |
29 | 29 | ||
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
43 | smp_mb(); | 43 | smp_mb(); |
44 | } | 44 | } |
45 | 45 | ||
46 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 46 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
47 | { | 47 | { |
48 | unsigned long tmp; | 48 | unsigned long tmp; |
49 | 49 | ||
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
63 | } | 63 | } |
64 | } | 64 | } |
65 | 65 | ||
66 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 66 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
67 | { | 67 | { |
68 | smp_mb(); | 68 | smp_mb(); |
69 | 69 | ||
@@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
86 | * just write zero since the lock is exclusively held. | 86 | * just write zero since the lock is exclusively held. |
87 | */ | 87 | */ |
88 | 88 | ||
89 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 89 | static inline void arch_write_lock(arch_rwlock_t *rw) |
90 | { | 90 | { |
91 | unsigned long tmp; | 91 | unsigned long tmp; |
92 | 92 | ||
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
106 | smp_mb(); | 106 | smp_mb(); |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 109 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
110 | { | 110 | { |
111 | unsigned long tmp; | 111 | unsigned long tmp; |
112 | 112 | ||
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
126 | } | 126 | } |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 129 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
130 | { | 130 | { |
131 | smp_mb(); | 131 | smp_mb(); |
132 | 132 | ||
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /* write_can_lock - would write_trylock() succeed? */ | 144 | /* write_can_lock - would write_trylock() succeed? */ |
145 | #define __raw_write_can_lock(x) ((x)->lock == 0) | 145 | #define arch_write_can_lock(x) ((x)->lock == 0) |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * Read locks are a bit more hairy: | 148 | * Read locks are a bit more hairy: |
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
156 | * currently active. However, we know we won't have any write | 156 | * currently active. However, we know we won't have any write |
157 | * locks. | 157 | * locks. |
158 | */ | 158 | */ |
159 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 159 | static inline void arch_read_lock(arch_rwlock_t *rw) |
160 | { | 160 | { |
161 | unsigned long tmp, tmp2; | 161 | unsigned long tmp, tmp2; |
162 | 162 | ||
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
176 | smp_mb(); | 176 | smp_mb(); |
177 | } | 177 | } |
178 | 178 | ||
179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 179 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
180 | { | 180 | { |
181 | unsigned long tmp, tmp2; | 181 | unsigned long tmp, tmp2; |
182 | 182 | ||
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
198 | : "cc"); | 198 | : "cc"); |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 201 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
202 | { | 202 | { |
203 | unsigned long tmp, tmp2 = 1; | 203 | unsigned long tmp, tmp2 = 1; |
204 | 204 | ||
@@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
215 | } | 215 | } |
216 | 216 | ||
217 | /* read_can_lock - would read_trylock() succeed? */ | 217 | /* read_can_lock - would read_trylock() succeed? */ |
218 | #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) | 218 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
219 | 219 | ||
220 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 220 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
221 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 221 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
222 | 222 | ||
223 | #define _raw_spin_relax(lock) cpu_relax() | 223 | #define arch_spin_relax(lock) cpu_relax() |
224 | #define _raw_read_relax(lock) cpu_relax() | 224 | #define arch_read_relax(lock) cpu_relax() |
225 | #define _raw_write_relax(lock) cpu_relax() | 225 | #define arch_write_relax(lock) cpu_relax() |
226 | 226 | ||
227 | #endif /* __ASM_SPINLOCK_H */ | 227 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 43e83f6d2ee5..d14d197ae04a 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h | |||
@@ -7,14 +7,14 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | volatile unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c9a8619f3856..b7cb45bb91e8 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | if (i < NR_IRQS) { | 71 | if (i < NR_IRQS) { |
72 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 72 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
73 | action = irq_desc[i].action; | 73 | action = irq_desc[i].action; |
74 | if (!action) | 74 | if (!action) |
75 | goto unlock; | 75 | goto unlock; |
@@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
84 | 84 | ||
85 | seq_putc(p, '\n'); | 85 | seq_putc(p, '\n'); |
86 | unlock: | 86 | unlock: |
87 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 87 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
88 | } else if (i == NR_IRQS) { | 88 | } else if (i == NR_IRQS) { |
89 | #ifdef CONFIG_FIQ | 89 | #ifdef CONFIG_FIQ |
90 | show_fiq_list(p, v); | 90 | show_fiq_list(p, v); |
@@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) | |||
139 | } | 139 | } |
140 | 140 | ||
141 | desc = irq_desc + irq; | 141 | desc = irq_desc + irq; |
142 | spin_lock_irqsave(&desc->lock, flags); | 142 | raw_spin_lock_irqsave(&desc->lock, flags); |
143 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; | 143 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
144 | if (iflags & IRQF_VALID) | 144 | if (iflags & IRQF_VALID) |
145 | desc->status &= ~IRQ_NOREQUEST; | 145 | desc->status &= ~IRQ_NOREQUEST; |
@@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) | |||
147 | desc->status &= ~IRQ_NOPROBE; | 147 | desc->status &= ~IRQ_NOPROBE; |
148 | if (!(iflags & IRQF_NOAUTOEN)) | 148 | if (!(iflags & IRQF_NOAUTOEN)) |
149 | desc->status &= ~IRQ_NOAUTOEN; | 149 | desc->status &= ~IRQ_NOAUTOEN; |
150 | spin_unlock_irqrestore(&desc->lock, flags); | 150 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
151 | } | 151 | } |
152 | 152 | ||
153 | void __init init_IRQ(void) | 153 | void __init init_IRQ(void) |
@@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | |||
166 | { | 166 | { |
167 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); | 167 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); |
168 | 168 | ||
169 | spin_lock_irq(&desc->lock); | 169 | raw_spin_lock_irq(&desc->lock); |
170 | desc->chip->set_affinity(irq, cpumask_of(cpu)); | 170 | desc->chip->set_affinity(irq, cpumask_of(cpu)); |
171 | spin_unlock_irq(&desc->lock); | 171 | raw_spin_unlock_irq(&desc->lock); |
172 | } | 172 | } |
173 | 173 | ||
174 | /* | 174 | /* |
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c index feb0e54a91de..038f24d47023 100644 --- a/arch/arm/mach-ns9xxx/irq.c +++ b/arch/arm/mach-ns9xxx/irq.c | |||
@@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) | |||
66 | struct irqaction *action; | 66 | struct irqaction *action; |
67 | irqreturn_t action_ret; | 67 | irqreturn_t action_ret; |
68 | 68 | ||
69 | spin_lock(&desc->lock); | 69 | raw_spin_lock(&desc->lock); |
70 | 70 | ||
71 | BUG_ON(desc->status & IRQ_INPROGRESS); | 71 | BUG_ON(desc->status & IRQ_INPROGRESS); |
72 | 72 | ||
@@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) | |||
78 | goto out_mask; | 78 | goto out_mask; |
79 | 79 | ||
80 | desc->status |= IRQ_INPROGRESS; | 80 | desc->status |= IRQ_INPROGRESS; |
81 | spin_unlock(&desc->lock); | 81 | raw_spin_unlock(&desc->lock); |
82 | 82 | ||
83 | action_ret = handle_IRQ_event(irq, action); | 83 | action_ret = handle_IRQ_event(irq, action); |
84 | 84 | ||
@@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) | |||
87 | * Maybe this function should go to kernel/irq/chip.c? */ | 87 | * Maybe this function should go to kernel/irq/chip.c? */ |
88 | note_interrupt(irq, desc, action_ret); | 88 | note_interrupt(irq, desc, action_ret); |
89 | 89 | ||
90 | spin_lock(&desc->lock); | 90 | raw_spin_lock(&desc->lock); |
91 | desc->status &= ~IRQ_INPROGRESS; | 91 | desc->status &= ~IRQ_INPROGRESS; |
92 | 92 | ||
93 | if (desc->status & IRQ_DISABLED) | 93 | if (desc->status & IRQ_DISABLED) |
@@ -97,7 +97,7 @@ out_mask: | |||
97 | /* ack unconditionally to unmask lower prio irqs */ | 97 | /* ack unconditionally to unmask lower prio irqs */ |
98 | desc->chip->ack(irq); | 98 | desc->chip->ack(irq); |
99 | 99 | ||
100 | spin_unlock(&desc->lock); | 100 | raw_spin_unlock(&desc->lock); |
101 | } | 101 | } |
102 | #define handle_irq handle_prio_irq | 102 | #define handle_irq handle_prio_irq |
103 | #endif | 103 | #endif |
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c index 09904d22309f..9604f7758f9a 100644 --- a/arch/avr32/kernel/irq.c +++ b/arch/avr32/kernel/irq.c | |||
@@ -42,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
42 | } | 42 | } |
43 | 43 | ||
44 | if (i < NR_IRQS) { | 44 | if (i < NR_IRQS) { |
45 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 45 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
46 | action = irq_desc[i].action; | 46 | action = irq_desc[i].action; |
47 | if (!action) | 47 | if (!action) |
48 | goto unlock; | 48 | goto unlock; |
@@ -57,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
57 | 57 | ||
58 | seq_putc(p, '\n'); | 58 | seq_putc(p, '\n'); |
59 | unlock: | 59 | unlock: |
60 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 60 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
61 | } | 61 | } |
62 | 62 | ||
63 | return 0; | 63 | return 0; |
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index b0c7f0ee4b03..1942ccfedbe0 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h | |||
@@ -17,84 +17,84 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); | |||
17 | asmlinkage void __raw_spin_lock_asm(volatile int *ptr); | 17 | asmlinkage void __raw_spin_lock_asm(volatile int *ptr); |
18 | asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); | 18 | asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); |
19 | asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); | 19 | asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); |
20 | asmlinkage void __raw_read_lock_asm(volatile int *ptr); | 20 | asmlinkage void arch_read_lock_asm(volatile int *ptr); |
21 | asmlinkage int __raw_read_trylock_asm(volatile int *ptr); | 21 | asmlinkage int arch_read_trylock_asm(volatile int *ptr); |
22 | asmlinkage void __raw_read_unlock_asm(volatile int *ptr); | 22 | asmlinkage void arch_read_unlock_asm(volatile int *ptr); |
23 | asmlinkage void __raw_write_lock_asm(volatile int *ptr); | 23 | asmlinkage void arch_write_lock_asm(volatile int *ptr); |
24 | asmlinkage int __raw_write_trylock_asm(volatile int *ptr); | 24 | asmlinkage int arch_write_trylock_asm(volatile int *ptr); |
25 | asmlinkage void __raw_write_unlock_asm(volatile int *ptr); | 25 | asmlinkage void arch_write_unlock_asm(volatile int *ptr); |
26 | 26 | ||
27 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 27 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
28 | { | 28 | { |
29 | return __raw_spin_is_locked_asm(&lock->lock); | 29 | return __raw_spin_is_locked_asm(&lock->lock); |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 32 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
33 | { | 33 | { |
34 | __raw_spin_lock_asm(&lock->lock); | 34 | __raw_spin_lock_asm(&lock->lock); |
35 | } | 35 | } |
36 | 36 | ||
37 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 37 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
38 | 38 | ||
39 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
40 | { | 40 | { |
41 | return __raw_spin_trylock_asm(&lock->lock); | 41 | return __raw_spin_trylock_asm(&lock->lock); |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 44 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
45 | { | 45 | { |
46 | __raw_spin_unlock_asm(&lock->lock); | 46 | __raw_spin_unlock_asm(&lock->lock); |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 49 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
50 | { | 50 | { |
51 | while (__raw_spin_is_locked(lock)) | 51 | while (arch_spin_is_locked(lock)) |
52 | cpu_relax(); | 52 | cpu_relax(); |
53 | } | 53 | } |
54 | 54 | ||
55 | static inline int __raw_read_can_lock(raw_rwlock_t *rw) | 55 | static inline int arch_read_can_lock(arch_rwlock_t *rw) |
56 | { | 56 | { |
57 | return __raw_uncached_fetch_asm(&rw->lock) > 0; | 57 | return __raw_uncached_fetch_asm(&rw->lock) > 0; |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline int __raw_write_can_lock(raw_rwlock_t *rw) | 60 | static inline int arch_write_can_lock(arch_rwlock_t *rw) |
61 | { | 61 | { |
62 | return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; | 62 | return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 65 | static inline void arch_read_lock(arch_rwlock_t *rw) |
66 | { | 66 | { |
67 | __raw_read_lock_asm(&rw->lock); | 67 | arch_read_lock_asm(&rw->lock); |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 70 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
71 | { | 71 | { |
72 | return __raw_read_trylock_asm(&rw->lock); | 72 | return arch_read_trylock_asm(&rw->lock); |
73 | } | 73 | } |
74 | 74 | ||
75 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 75 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
76 | { | 76 | { |
77 | __raw_read_unlock_asm(&rw->lock); | 77 | arch_read_unlock_asm(&rw->lock); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 80 | static inline void arch_write_lock(arch_rwlock_t *rw) |
81 | { | 81 | { |
82 | __raw_write_lock_asm(&rw->lock); | 82 | arch_write_lock_asm(&rw->lock); |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 85 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
86 | { | 86 | { |
87 | return __raw_write_trylock_asm(&rw->lock); | 87 | return arch_write_trylock_asm(&rw->lock); |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 90 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
91 | { | 91 | { |
92 | __raw_write_unlock_asm(&rw->lock); | 92 | arch_write_unlock_asm(&rw->lock); |
93 | } | 93 | } |
94 | 94 | ||
95 | #define _raw_spin_relax(lock) cpu_relax() | 95 | #define arch_spin_relax(lock) cpu_relax() |
96 | #define _raw_read_relax(lock) cpu_relax() | 96 | #define arch_read_relax(lock) cpu_relax() |
97 | #define _raw_write_relax(lock) cpu_relax() | 97 | #define arch_write_relax(lock) cpu_relax() |
98 | 98 | ||
99 | #endif | 99 | #endif |
100 | 100 | ||
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index be75762c0610..1a33608c958b 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h | |||
@@ -15,14 +15,14 @@ | |||
15 | 15 | ||
16 | typedef struct { | 16 | typedef struct { |
17 | volatile unsigned int lock; | 17 | volatile unsigned int lock; |
18 | } raw_spinlock_t; | 18 | } arch_spinlock_t; |
19 | 19 | ||
20 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 20 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
21 | 21 | ||
22 | typedef struct { | 22 | typedef struct { |
23 | volatile unsigned int lock; | 23 | volatile unsigned int lock; |
24 | } raw_rwlock_t; | 24 | } arch_rwlock_t; |
25 | 25 | ||
26 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 26 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
27 | 27 | ||
28 | #endif | 28 | #endif |
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index db9f9c91f11f..64cff54a8a58 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c | |||
@@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq) | |||
23 | 23 | ||
24 | static struct irq_desc bad_irq_desc = { | 24 | static struct irq_desc bad_irq_desc = { |
25 | .handle_irq = handle_bad_irq, | 25 | .handle_irq = handle_bad_irq, |
26 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), | 26 | .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), |
27 | }; | 27 | }; |
28 | 28 | ||
29 | #ifdef CONFIG_CPUMASK_OFFSTACK | 29 | #ifdef CONFIG_CPUMASK_OFFSTACK |
@@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
39 | unsigned long flags; | 39 | unsigned long flags; |
40 | 40 | ||
41 | if (i < NR_IRQS) { | 41 | if (i < NR_IRQS) { |
42 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 42 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
43 | action = irq_desc[i].action; | 43 | action = irq_desc[i].action; |
44 | if (!action) | 44 | if (!action) |
45 | goto skip; | 45 | goto skip; |
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
53 | 53 | ||
54 | seq_putc(p, '\n'); | 54 | seq_putc(p, '\n'); |
55 | skip: | 55 | skip: |
56 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 56 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
57 | } else if (i == NR_IRQS) { | 57 | } else if (i == NR_IRQS) { |
58 | seq_printf(p, "NMI: "); | 58 | seq_printf(p, "NMI: "); |
59 | for_each_online_cpu(j) | 59 | for_each_online_cpu(j) |
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 78cb3d38f899..9636bace00e8 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp) | |||
1140 | if (fp->ipend & ~0x3F) { | 1140 | if (fp->ipend & ~0x3F) { |
1141 | for (i = 0; i < (NR_IRQS - 1); i++) { | 1141 | for (i = 0; i < (NR_IRQS - 1); i++) { |
1142 | if (!in_atomic) | 1142 | if (!in_atomic) |
1143 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 1143 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
1144 | 1144 | ||
1145 | action = irq_desc[i].action; | 1145 | action = irq_desc[i].action; |
1146 | if (!action) | 1146 | if (!action) |
@@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp) | |||
1155 | verbose_printk("\n"); | 1155 | verbose_printk("\n"); |
1156 | unlock: | 1156 | unlock: |
1157 | if (!in_atomic) | 1157 | if (!in_atomic) |
1158 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 1158 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
1159 | } | 1159 | } |
1160 | } | 1160 | } |
1161 | 1161 | ||
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 367a53ea10c5..f171a6600fbc 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h | |||
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val); | |||
9 | extern void cris_spin_lock(void *l); | 9 | extern void cris_spin_lock(void *l); |
10 | extern int cris_spin_trylock(void *l); | 10 | extern int cris_spin_trylock(void *l); |
11 | 11 | ||
12 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 12 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
13 | { | 13 | { |
14 | return *(volatile signed char *)(&(x)->slock) <= 0; | 14 | return *(volatile signed char *)(&(x)->slock) <= 0; |
15 | } | 15 | } |
16 | 16 | ||
17 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 17 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
18 | { | 18 | { |
19 | __asm__ volatile ("move.d %1,%0" \ | 19 | __asm__ volatile ("move.d %1,%0" \ |
20 | : "=m" (lock->slock) \ | 20 | : "=m" (lock->slock) \ |
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
22 | : "memory"); | 22 | : "memory"); |
23 | } | 23 | } |
24 | 24 | ||
25 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 25 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
26 | { | 26 | { |
27 | while (__raw_spin_is_locked(lock)) | 27 | while (arch_spin_is_locked(lock)) |
28 | cpu_relax(); | 28 | cpu_relax(); |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 31 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
32 | { | 32 | { |
33 | return cris_spin_trylock((void *)&lock->slock); | 33 | return cris_spin_trylock((void *)&lock->slock); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 36 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
37 | { | 37 | { |
38 | cris_spin_lock((void *)&lock->slock); | 38 | cris_spin_lock((void *)&lock->slock); |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline void | 41 | static inline void |
42 | __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 42 | arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
43 | { | 43 | { |
44 | __raw_spin_lock(lock); | 44 | arch_spin_lock(lock); |
45 | } | 45 | } |
46 | 46 | ||
47 | /* | 47 | /* |
@@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | |||
56 | * | 56 | * |
57 | */ | 57 | */ |
58 | 58 | ||
59 | static inline int __raw_read_can_lock(raw_rwlock_t *x) | 59 | static inline int arch_read_can_lock(arch_rwlock_t *x) |
60 | { | 60 | { |
61 | return (int)(x)->lock > 0; | 61 | return (int)(x)->lock > 0; |
62 | } | 62 | } |
63 | 63 | ||
64 | static inline int __raw_write_can_lock(raw_rwlock_t *x) | 64 | static inline int arch_write_can_lock(arch_rwlock_t *x) |
65 | { | 65 | { |
66 | return (x)->lock == RW_LOCK_BIAS; | 66 | return (x)->lock == RW_LOCK_BIAS; |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 69 | static inline void arch_read_lock(arch_rwlock_t *rw) |
70 | { | 70 | { |
71 | __raw_spin_lock(&rw->slock); | 71 | arch_spin_lock(&rw->slock); |
72 | while (rw->lock == 0); | 72 | while (rw->lock == 0); |
73 | rw->lock--; | 73 | rw->lock--; |
74 | __raw_spin_unlock(&rw->slock); | 74 | arch_spin_unlock(&rw->slock); |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 77 | static inline void arch_write_lock(arch_rwlock_t *rw) |
78 | { | 78 | { |
79 | __raw_spin_lock(&rw->slock); | 79 | arch_spin_lock(&rw->slock); |
80 | while (rw->lock != RW_LOCK_BIAS); | 80 | while (rw->lock != RW_LOCK_BIAS); |
81 | rw->lock = 0; | 81 | rw->lock = 0; |
82 | __raw_spin_unlock(&rw->slock); | 82 | arch_spin_unlock(&rw->slock); |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 85 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
86 | { | 86 | { |
87 | __raw_spin_lock(&rw->slock); | 87 | arch_spin_lock(&rw->slock); |
88 | rw->lock++; | 88 | rw->lock++; |
89 | __raw_spin_unlock(&rw->slock); | 89 | arch_spin_unlock(&rw->slock); |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 92 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
93 | { | 93 | { |
94 | __raw_spin_lock(&rw->slock); | 94 | arch_spin_lock(&rw->slock); |
95 | while (rw->lock != RW_LOCK_BIAS); | 95 | while (rw->lock != RW_LOCK_BIAS); |
96 | rw->lock = RW_LOCK_BIAS; | 96 | rw->lock = RW_LOCK_BIAS; |
97 | __raw_spin_unlock(&rw->slock); | 97 | arch_spin_unlock(&rw->slock); |
98 | } | 98 | } |
99 | 99 | ||
100 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 100 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
101 | { | 101 | { |
102 | int ret = 0; | 102 | int ret = 0; |
103 | __raw_spin_lock(&rw->slock); | 103 | arch_spin_lock(&rw->slock); |
104 | if (rw->lock != 0) { | 104 | if (rw->lock != 0) { |
105 | rw->lock--; | 105 | rw->lock--; |
106 | ret = 1; | 106 | ret = 1; |
107 | } | 107 | } |
108 | __raw_spin_unlock(&rw->slock); | 108 | arch_spin_unlock(&rw->slock); |
109 | return ret; | 109 | return ret; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 112 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
113 | { | 113 | { |
114 | int ret = 0; | 114 | int ret = 0; |
115 | __raw_spin_lock(&rw->slock); | 115 | arch_spin_lock(&rw->slock); |
116 | if (rw->lock == RW_LOCK_BIAS) { | 116 | if (rw->lock == RW_LOCK_BIAS) { |
117 | rw->lock = 0; | 117 | rw->lock = 0; |
118 | ret = 1; | 118 | ret = 1; |
119 | } | 119 | } |
120 | __raw_spin_unlock(&rw->slock); | 120 | arch_spin_unlock(&rw->slock); |
121 | return 1; | 121 | return 1; |
122 | } | 122 | } |
123 | 123 | ||
124 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) | 124 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) |
125 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) | 125 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) |
126 | 126 | ||
127 | #define _raw_spin_relax(lock) cpu_relax() | 127 | #define arch_spin_relax(lock) cpu_relax() |
128 | #define _raw_read_relax(lock) cpu_relax() | 128 | #define arch_read_relax(lock) cpu_relax() |
129 | #define _raw_write_relax(lock) cpu_relax() | 129 | #define arch_write_relax(lock) cpu_relax() |
130 | 130 | ||
131 | #endif /* __ASM_ARCH_SPINLOCK_H */ | 131 | #endif /* __ASM_ARCH_SPINLOCK_H */ |
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 0ca7d9892cc6..b5ce0724a88f 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c | |||
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
52 | } | 52 | } |
53 | 53 | ||
54 | if (i < NR_IRQS) { | 54 | if (i < NR_IRQS) { |
55 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 55 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
56 | action = irq_desc[i].action; | 56 | action = irq_desc[i].action; |
57 | if (!action) | 57 | if (!action) |
58 | goto skip; | 58 | goto skip; |
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
71 | 71 | ||
72 | seq_putc(p, '\n'); | 72 | seq_putc(p, '\n'); |
73 | skip: | 73 | skip: |
74 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 74 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
75 | } | 75 | } |
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index af3e824b91b3..62d1aba615dc 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c | |||
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | if (i < NR_IRQS) { | 71 | if (i < NR_IRQS) { |
72 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 72 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
73 | action = irq_desc[i].action; | 73 | action = irq_desc[i].action; |
74 | if (action) { | 74 | if (action) { |
75 | seq_printf(p, "%3d: ", i); | 75 | seq_printf(p, "%3d: ", i); |
@@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
85 | seq_putc(p, '\n'); | 85 | seq_putc(p, '\n'); |
86 | } | 86 | } |
87 | 87 | ||
88 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 88 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
89 | } else if (i == NR_IRQS) { | 89 | } else if (i == NR_IRQS) { |
90 | seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); | 90 | seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); |
91 | } | 91 | } |
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index 5c913d472119..c25dc2c2b1da 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c | |||
@@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
186 | seq_puts(p, " CPU0"); | 186 | seq_puts(p, " CPU0"); |
187 | 187 | ||
188 | if (i < NR_IRQS) { | 188 | if (i < NR_IRQS) { |
189 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 189 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
190 | action = irq_desc[i].action; | 190 | action = irq_desc[i].action; |
191 | if (!action) | 191 | if (!action) |
192 | goto unlock; | 192 | goto unlock; |
@@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
200 | seq_printf(p, ", %s", action->name); | 200 | seq_printf(p, ", %s", action->name); |
201 | seq_putc(p, '\n'); | 201 | seq_putc(p, '\n'); |
202 | unlock: | 202 | unlock: |
203 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 203 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
204 | } | 204 | } |
205 | return 0; | 205 | return 0; |
206 | } | 206 | } |
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787bc9fb..6ebc229a1c51 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h | |||
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) | |||
127 | * @addr: Address to start counting from | 127 | * @addr: Address to start counting from |
128 | * | 128 | * |
129 | * Similarly to clear_bit_unlock, the implementation uses a store | 129 | * Similarly to clear_bit_unlock, the implementation uses a store |
130 | * with release semantics. See also __raw_spin_unlock(). | 130 | * with release semantics. See also arch_spin_unlock(). |
131 | */ | 131 | */ |
132 | static __inline__ void | 132 | static __inline__ void |
133 | __clear_bit_unlock(int nr, void *addr) | 133 | __clear_bit_unlock(int nr, void *addr) |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc9516d..1a91c9121d17 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) | 20 | #define arch_spin_lock_init(x) ((x)->lock = 0) |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Ticket locks are conceptually two parts, one indicating the current head of | 23 | * Ticket locks are conceptually two parts, one indicating the current head of |
@@ -38,7 +38,7 @@ | |||
38 | #define TICKET_BITS 15 | 38 | #define TICKET_BITS 15 |
39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) | 39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) |
40 | 40 | ||
41 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 41 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
42 | { | 42 | { |
43 | int *p = (int *)&lock->lock, ticket, serve; | 43 | int *p = (int *)&lock->lock, ticket, serve; |
44 | 44 | ||
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
58 | } | 58 | } |
59 | } | 59 | } |
60 | 60 | ||
61 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 61 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
62 | { | 62 | { |
63 | int tmp = ACCESS_ONCE(lock->lock); | 63 | int tmp = ACCESS_ONCE(lock->lock); |
64 | 64 | ||
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 70 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; | 72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
73 | 73 | ||
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | 75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; |
76 | } | 76 | } |
77 | 77 | ||
78 | static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | 78 | static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) |
79 | { | 79 | { |
80 | int *p = (int *)&lock->lock, ticket; | 80 | int *p = (int *)&lock->lock, ticket; |
81 | 81 | ||
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | |||
89 | } | 89 | } |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
93 | { | 93 | { |
94 | long tmp = ACCESS_ONCE(lock->lock); | 94 | long tmp = ACCESS_ONCE(lock->lock); |
95 | 95 | ||
96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); | 96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 99 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
100 | { | 100 | { |
101 | long tmp = ACCESS_ONCE(lock->lock); | 101 | long tmp = ACCESS_ONCE(lock->lock); |
102 | 102 | ||
103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; | 103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 106 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
107 | { | 107 | { |
108 | return __ticket_spin_is_locked(lock); | 108 | return __ticket_spin_is_locked(lock); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 111 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
112 | { | 112 | { |
113 | return __ticket_spin_is_contended(lock); | 113 | return __ticket_spin_is_contended(lock); |
114 | } | 114 | } |
115 | #define __raw_spin_is_contended __raw_spin_is_contended | 115 | #define arch_spin_is_contended arch_spin_is_contended |
116 | 116 | ||
117 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 117 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
118 | { | 118 | { |
119 | __ticket_spin_lock(lock); | 119 | __ticket_spin_lock(lock); |
120 | } | 120 | } |
121 | 121 | ||
122 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | 122 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
123 | { | 123 | { |
124 | return __ticket_spin_trylock(lock); | 124 | return __ticket_spin_trylock(lock); |
125 | } | 125 | } |
126 | 126 | ||
127 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 127 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
128 | { | 128 | { |
129 | __ticket_spin_unlock(lock); | 129 | __ticket_spin_unlock(lock); |
130 | } | 130 | } |
131 | 131 | ||
132 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | 132 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
133 | unsigned long flags) | 133 | unsigned long flags) |
134 | { | 134 | { |
135 | __raw_spin_lock(lock); | 135 | arch_spin_lock(lock); |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 138 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
139 | { | 139 | { |
140 | __ticket_spin_unlock_wait(lock); | 140 | __ticket_spin_unlock_wait(lock); |
141 | } | 141 | } |
142 | 142 | ||
143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
144 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) | 144 | #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
145 | 145 | ||
146 | #ifdef ASM_SUPPORTED | 146 | #ifdef ASM_SUPPORTED |
147 | 147 | ||
148 | static __always_inline void | 148 | static __always_inline void |
149 | __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | 149 | arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
150 | { | 150 | { |
151 | __asm__ __volatile__ ( | 151 | __asm__ __volatile__ ( |
152 | "tbit.nz p6, p0 = %1,%2\n" | 152 | "tbit.nz p6, p0 = %1,%2\n" |
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
169 | : "p6", "p7", "r2", "memory"); | 169 | : "p6", "p7", "r2", "memory"); |
170 | } | 170 | } |
171 | 171 | ||
172 | #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) | 172 | #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) |
173 | 173 | ||
174 | #else /* !ASM_SUPPORTED */ | 174 | #else /* !ASM_SUPPORTED */ |
175 | 175 | ||
176 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 176 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
177 | 177 | ||
178 | #define __raw_read_lock(rw) \ | 178 | #define arch_read_lock(rw) \ |
179 | do { \ | 179 | do { \ |
180 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 180 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
181 | \ | 181 | \ |
182 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 182 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
183 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 183 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
@@ -188,16 +188,16 @@ do { \ | |||
188 | 188 | ||
189 | #endif /* !ASM_SUPPORTED */ | 189 | #endif /* !ASM_SUPPORTED */ |
190 | 190 | ||
191 | #define __raw_read_unlock(rw) \ | 191 | #define arch_read_unlock(rw) \ |
192 | do { \ | 192 | do { \ |
193 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 193 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
195 | } while (0) | 195 | } while (0) |
196 | 196 | ||
197 | #ifdef ASM_SUPPORTED | 197 | #ifdef ASM_SUPPORTED |
198 | 198 | ||
199 | static __always_inline void | 199 | static __always_inline void |
200 | __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | 200 | arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
201 | { | 201 | { |
202 | __asm__ __volatile__ ( | 202 | __asm__ __volatile__ ( |
203 | "tbit.nz p6, p0 = %1, %2\n" | 203 | "tbit.nz p6, p0 = %1, %2\n" |
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); | 221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); |
222 | } | 222 | } |
223 | 223 | ||
224 | #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) | 224 | #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) |
225 | 225 | ||
226 | #define __raw_write_trylock(rw) \ | 226 | #define arch_write_trylock(rw) \ |
227 | ({ \ | 227 | ({ \ |
228 | register long result; \ | 228 | register long result; \ |
229 | \ | 229 | \ |
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
235 | (result == 0); \ | 235 | (result == 0); \ |
236 | }) | 236 | }) |
237 | 237 | ||
238 | static inline void __raw_write_unlock(raw_rwlock_t *x) | 238 | static inline void arch_write_unlock(arch_rwlock_t *x) |
239 | { | 239 | { |
240 | u8 *y = (u8 *)x; | 240 | u8 *y = (u8 *)x; |
241 | barrier(); | 241 | barrier(); |
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
244 | 244 | ||
245 | #else /* !ASM_SUPPORTED */ | 245 | #else /* !ASM_SUPPORTED */ |
246 | 246 | ||
247 | #define __raw_write_lock_flags(l, flags) __raw_write_lock(l) | 247 | #define arch_write_lock_flags(l, flags) arch_write_lock(l) |
248 | 248 | ||
249 | #define __raw_write_lock(l) \ | 249 | #define arch_write_lock(l) \ |
250 | ({ \ | 250 | ({ \ |
251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
257 | } while (ia64_val); \ | 257 | } while (ia64_val); \ |
258 | }) | 258 | }) |
259 | 259 | ||
260 | #define __raw_write_trylock(rw) \ | 260 | #define arch_write_trylock(rw) \ |
261 | ({ \ | 261 | ({ \ |
262 | __u64 ia64_val; \ | 262 | __u64 ia64_val; \ |
263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
265 | (ia64_val == 0); \ | 265 | (ia64_val == 0); \ |
266 | }) | 266 | }) |
267 | 267 | ||
268 | static inline void __raw_write_unlock(raw_rwlock_t *x) | 268 | static inline void arch_write_unlock(arch_rwlock_t *x) |
269 | { | 269 | { |
270 | barrier(); | 270 | barrier(); |
271 | x->write_lock = 0; | 271 | x->write_lock = 0; |
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
273 | 273 | ||
274 | #endif /* !ASM_SUPPORTED */ | 274 | #endif /* !ASM_SUPPORTED */ |
275 | 275 | ||
276 | static inline int __raw_read_trylock(raw_rwlock_t *x) | 276 | static inline int arch_read_trylock(arch_rwlock_t *x) |
277 | { | 277 | { |
278 | union { | 278 | union { |
279 | raw_rwlock_t lock; | 279 | arch_rwlock_t lock; |
280 | __u32 word; | 280 | __u32 word; |
281 | } old, new; | 281 | } old, new; |
282 | old.lock = new.lock = *x; | 282 | old.lock = new.lock = *x; |
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) | |||
285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; | 285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; |
286 | } | 286 | } |
287 | 287 | ||
288 | #define _raw_spin_relax(lock) cpu_relax() | 288 | #define arch_spin_relax(lock) cpu_relax() |
289 | #define _raw_read_relax(lock) cpu_relax() | 289 | #define arch_read_relax(lock) cpu_relax() |
290 | #define _raw_write_relax(lock) cpu_relax() | 290 | #define arch_write_relax(lock) cpu_relax() |
291 | 291 | ||
292 | #endif /* _ASM_IA64_SPINLOCK_H */ | 292 | #endif /* _ASM_IA64_SPINLOCK_H */ |
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4a..e2b42a52a6d3 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h | |||
@@ -7,15 +7,15 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int read_counter : 31; | 15 | volatile unsigned int read_counter : 31; |
16 | volatile unsigned int write_lock : 1; | 16 | volatile unsigned int write_lock : 1; |
17 | } raw_rwlock_t; | 17 | } arch_rwlock_t; |
18 | 18 | ||
19 | #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } | 19 | #define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } |
20 | 20 | ||
21 | #endif | 21 | #endif |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d393908c..95ac77aeae9b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi, | |||
793 | goto unlock_iosapic_lock; | 793 | goto unlock_iosapic_lock; |
794 | } | 794 | } |
795 | 795 | ||
796 | spin_lock(&irq_desc[irq].lock); | 796 | raw_spin_lock(&irq_desc[irq].lock); |
797 | dest = get_target_cpu(gsi, irq); | 797 | dest = get_target_cpu(gsi, irq); |
798 | dmode = choose_dmode(); | 798 | dmode = choose_dmode(); |
799 | err = register_intr(gsi, irq, dmode, polarity, trigger); | 799 | err = register_intr(gsi, irq, dmode, polarity, trigger); |
800 | if (err < 0) { | 800 | if (err < 0) { |
801 | spin_unlock(&irq_desc[irq].lock); | 801 | raw_spin_unlock(&irq_desc[irq].lock); |
802 | irq = err; | 802 | irq = err; |
803 | goto unlock_iosapic_lock; | 803 | goto unlock_iosapic_lock; |
804 | } | 804 | } |
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi, | |||
817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | 817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), |
818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); | 818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); |
819 | 819 | ||
820 | spin_unlock(&irq_desc[irq].lock); | 820 | raw_spin_unlock(&irq_desc[irq].lock); |
821 | unlock_iosapic_lock: | 821 | unlock_iosapic_lock: |
822 | spin_unlock_irqrestore(&iosapic_lock, flags); | 822 | spin_unlock_irqrestore(&iosapic_lock, flags); |
823 | return irq; | 823 | return irq; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d8951229e7c..94ee9d067cbd 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | if (i < NR_IRQS) { | 73 | if (i < NR_IRQS) { |
74 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 74 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
75 | action = irq_desc[i].action; | 75 | action = irq_desc[i].action; |
76 | if (!action) | 76 | if (!action) |
77 | goto skip; | 77 | goto skip; |
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
91 | 91 | ||
92 | seq_putc(p, '\n'); | 92 | seq_putc(p, '\n'); |
93 | skip: | 93 | skip: |
94 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 94 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
95 | } else if (i == NR_IRQS) | 95 | } else if (i == NR_IRQS) |
96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
97 | return 0; | 97 | return 0; |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a1..70e4bad23432 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
345 | 345 | ||
346 | desc = irq_desc + irq; | 346 | desc = irq_desc + irq; |
347 | cfg = irq_cfg + irq; | 347 | cfg = irq_cfg + irq; |
348 | spin_lock(&desc->lock); | 348 | raw_spin_lock(&desc->lock); |
349 | if (!cfg->move_cleanup_count) | 349 | if (!cfg->move_cleanup_count) |
350 | goto unlock; | 350 | goto unlock; |
351 | 351 | ||
@@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
358 | spin_unlock_irqrestore(&vector_lock, flags); | 358 | spin_unlock_irqrestore(&vector_lock, flags); |
359 | cfg->move_cleanup_count--; | 359 | cfg->move_cleanup_count--; |
360 | unlock: | 360 | unlock: |
361 | spin_unlock(&desc->lock); | 361 | raw_spin_unlock(&desc->lock); |
362 | } | 362 | } |
363 | return IRQ_HANDLED; | 363 | return IRQ_HANDLED; |
364 | } | 364 | } |
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index dded923883b2..179a06489b10 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h | |||
@@ -24,19 +24,19 @@ | |||
24 | * We make no fairness assumptions. They have a cost. | 24 | * We make no fairness assumptions. They have a cost. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) | 27 | #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) |
28 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 28 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
29 | #define __raw_spin_unlock_wait(x) \ | 29 | #define arch_spin_unlock_wait(x) \ |
30 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 30 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
31 | 31 | ||
32 | /** | 32 | /** |
33 | * __raw_spin_trylock - Try spin lock and return a result | 33 | * arch_spin_trylock - Try spin lock and return a result |
34 | * @lock: Pointer to the lock variable | 34 | * @lock: Pointer to the lock variable |
35 | * | 35 | * |
36 | * __raw_spin_trylock() tries to get the lock and returns a result. | 36 | * arch_spin_trylock() tries to get the lock and returns a result. |
37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). | 37 | * On the m32r, the result value is 1 (= Success) or 0 (= Failure). |
38 | */ | 38 | */ |
39 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
40 | { | 40 | { |
41 | int oldval; | 41 | int oldval; |
42 | unsigned long tmp1, tmp2; | 42 | unsigned long tmp1, tmp2; |
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
50 | * } | 50 | * } |
51 | */ | 51 | */ |
52 | __asm__ __volatile__ ( | 52 | __asm__ __volatile__ ( |
53 | "# __raw_spin_trylock \n\t" | 53 | "# arch_spin_trylock \n\t" |
54 | "ldi %1, #0; \n\t" | 54 | "ldi %1, #0; \n\t" |
55 | "mvfc %2, psw; \n\t" | 55 | "mvfc %2, psw; \n\t" |
56 | "clrpsw #0x40 -> nop; \n\t" | 56 | "clrpsw #0x40 -> nop; \n\t" |
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
69 | return (oldval > 0); | 69 | return (oldval > 0); |
70 | } | 70 | } |
71 | 71 | ||
72 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 72 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
73 | { | 73 | { |
74 | unsigned long tmp0, tmp1; | 74 | unsigned long tmp0, tmp1; |
75 | 75 | ||
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
84 | * } | 84 | * } |
85 | */ | 85 | */ |
86 | __asm__ __volatile__ ( | 86 | __asm__ __volatile__ ( |
87 | "# __raw_spin_lock \n\t" | 87 | "# arch_spin_lock \n\t" |
88 | ".fillinsn \n" | 88 | ".fillinsn \n" |
89 | "1: \n\t" | 89 | "1: \n\t" |
90 | "mvfc %1, psw; \n\t" | 90 | "mvfc %1, psw; \n\t" |
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
111 | ); | 111 | ); |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 114 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
115 | { | 115 | { |
116 | mb(); | 116 | mb(); |
117 | lock->slock = 1; | 117 | lock->slock = 1; |
@@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
140 | * read_can_lock - would read_trylock() succeed? | 140 | * read_can_lock - would read_trylock() succeed? |
141 | * @lock: the rwlock in question. | 141 | * @lock: the rwlock in question. |
142 | */ | 142 | */ |
143 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 143 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) |
144 | 144 | ||
145 | /** | 145 | /** |
146 | * write_can_lock - would write_trylock() succeed? | 146 | * write_can_lock - would write_trylock() succeed? |
147 | * @lock: the rwlock in question. | 147 | * @lock: the rwlock in question. |
148 | */ | 148 | */ |
149 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 149 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
150 | 150 | ||
151 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 151 | static inline void arch_read_lock(arch_rwlock_t *rw) |
152 | { | 152 | { |
153 | unsigned long tmp0, tmp1; | 153 | unsigned long tmp0, tmp1; |
154 | 154 | ||
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
199 | ); | 199 | ); |
200 | } | 200 | } |
201 | 201 | ||
202 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 202 | static inline void arch_write_lock(arch_rwlock_t *rw) |
203 | { | 203 | { |
204 | unsigned long tmp0, tmp1, tmp2; | 204 | unsigned long tmp0, tmp1, tmp2; |
205 | 205 | ||
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
252 | ); | 252 | ); |
253 | } | 253 | } |
254 | 254 | ||
255 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 255 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
256 | { | 256 | { |
257 | unsigned long tmp0, tmp1; | 257 | unsigned long tmp0, tmp1; |
258 | 258 | ||
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
274 | ); | 274 | ); |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 277 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
278 | { | 278 | { |
279 | unsigned long tmp0, tmp1, tmp2; | 279 | unsigned long tmp0, tmp1, tmp2; |
280 | 280 | ||
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
298 | ); | 298 | ); |
299 | } | 299 | } |
300 | 300 | ||
301 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 301 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
302 | { | 302 | { |
303 | atomic_t *count = (atomic_t*)lock; | 303 | atomic_t *count = (atomic_t*)lock; |
304 | if (atomic_dec_return(count) >= 0) | 304 | if (atomic_dec_return(count) >= 0) |
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 310 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
311 | { | 311 | { |
312 | atomic_t *count = (atomic_t *)lock; | 312 | atomic_t *count = (atomic_t *)lock; |
313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
@@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
316 | return 0; | 316 | return 0; |
317 | } | 317 | } |
318 | 318 | ||
319 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 319 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
320 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 320 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
321 | 321 | ||
322 | #define _raw_spin_relax(lock) cpu_relax() | 322 | #define arch_spin_relax(lock) cpu_relax() |
323 | #define _raw_read_relax(lock) cpu_relax() | 323 | #define arch_read_relax(lock) cpu_relax() |
324 | #define _raw_write_relax(lock) cpu_relax() | 324 | #define arch_write_relax(lock) cpu_relax() |
325 | 325 | ||
326 | #endif /* _ASM_M32R_SPINLOCK_H */ | 326 | #endif /* _ASM_M32R_SPINLOCK_H */ |
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 83f52105c0e4..92e27672661f 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h | |||
@@ -7,17 +7,17 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile int slock; | 9 | volatile int slock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile int lock; | 15 | volatile int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define RW_LOCK_BIAS 0x01000000 | 18 | #define RW_LOCK_BIAS 0x01000000 |
19 | #define RW_LOCK_BIAS_STR "0x01000000" | 19 | #define RW_LOCK_BIAS_STR "0x01000000" |
20 | 20 | ||
21 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 21 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
22 | 22 | ||
23 | #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ | 23 | #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ |
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 8dfd31e87c4c..3c71f776872c 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c | |||
@@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
40 | } | 40 | } |
41 | 41 | ||
42 | if (i < NR_IRQS) { | 42 | if (i < NR_IRQS) { |
43 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 43 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
44 | action = irq_desc[i].action; | 44 | action = irq_desc[i].action; |
45 | if (!action) | 45 | if (!action) |
46 | goto skip; | 46 | goto skip; |
@@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
59 | 59 | ||
60 | seq_putc(p, '\n'); | 60 | seq_putc(p, '\n'); |
61 | skip: | 61 | skip: |
62 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 62 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
63 | } | 63 | } |
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 7d5ddd62d4d2..0f06034d1fe0 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c | |||
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | if (i < nr_irq) { | 70 | if (i < nr_irq) { |
71 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 71 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
72 | action = irq_desc[i].action; | 72 | action = irq_desc[i].action; |
73 | if (!action) | 73 | if (!action) |
74 | goto skip; | 74 | goto skip; |
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
89 | 89 | ||
90 | seq_putc(p, '\n'); | 90 | seq_putc(p, '\n'); |
91 | skip: | 91 | skip: |
92 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 92 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
93 | } | 93 | } |
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 5b60a09a0f08..21ef9efbde43 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
@@ -34,33 +34,33 @@ | |||
34 | * becomes equal to the the initial value of the tail. | 34 | * becomes equal to the the initial value of the tail. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned int counters = ACCESS_ONCE(lock->lock); | 39 | unsigned int counters = ACCESS_ONCE(lock->lock); |
40 | 40 | ||
41 | return ((counters >> 14) ^ counters) & 0x1fff; | 41 | return ((counters >> 14) ^ counters) & 0x1fff; |
42 | } | 42 | } |
43 | 43 | ||
44 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 44 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
45 | #define __raw_spin_unlock_wait(x) \ | 45 | #define arch_spin_unlock_wait(x) \ |
46 | while (__raw_spin_is_locked(x)) { cpu_relax(); } | 46 | while (arch_spin_is_locked(x)) { cpu_relax(); } |
47 | 47 | ||
48 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 48 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
49 | { | 49 | { |
50 | unsigned int counters = ACCESS_ONCE(lock->lock); | 50 | unsigned int counters = ACCESS_ONCE(lock->lock); |
51 | 51 | ||
52 | return (((counters >> 14) - counters) & 0x1fff) > 1; | 52 | return (((counters >> 14) - counters) & 0x1fff) > 1; |
53 | } | 53 | } |
54 | #define __raw_spin_is_contended __raw_spin_is_contended | 54 | #define arch_spin_is_contended arch_spin_is_contended |
55 | 55 | ||
56 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 56 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
57 | { | 57 | { |
58 | int my_ticket; | 58 | int my_ticket; |
59 | int tmp; | 59 | int tmp; |
60 | 60 | ||
61 | if (R10000_LLSC_WAR) { | 61 | if (R10000_LLSC_WAR) { |
62 | __asm__ __volatile__ ( | 62 | __asm__ __volatile__ ( |
63 | " .set push # __raw_spin_lock \n" | 63 | " .set push # arch_spin_lock \n" |
64 | " .set noreorder \n" | 64 | " .set noreorder \n" |
65 | " \n" | 65 | " \n" |
66 | "1: ll %[ticket], %[ticket_ptr] \n" | 66 | "1: ll %[ticket], %[ticket_ptr] \n" |
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
94 | [my_ticket] "=&r" (my_ticket)); | 94 | [my_ticket] "=&r" (my_ticket)); |
95 | } else { | 95 | } else { |
96 | __asm__ __volatile__ ( | 96 | __asm__ __volatile__ ( |
97 | " .set push # __raw_spin_lock \n" | 97 | " .set push # arch_spin_lock \n" |
98 | " .set noreorder \n" | 98 | " .set noreorder \n" |
99 | " \n" | 99 | " \n" |
100 | " ll %[ticket], %[ticket_ptr] \n" | 100 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
134 | smp_llsc_mb(); | 134 | smp_llsc_mb(); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 137 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
138 | { | 138 | { |
139 | int tmp; | 139 | int tmp; |
140 | 140 | ||
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
142 | 142 | ||
143 | if (R10000_LLSC_WAR) { | 143 | if (R10000_LLSC_WAR) { |
144 | __asm__ __volatile__ ( | 144 | __asm__ __volatile__ ( |
145 | " # __raw_spin_unlock \n" | 145 | " # arch_spin_unlock \n" |
146 | "1: ll %[ticket], %[ticket_ptr] \n" | 146 | "1: ll %[ticket], %[ticket_ptr] \n" |
147 | " addiu %[ticket], %[ticket], 1 \n" | 147 | " addiu %[ticket], %[ticket], 1 \n" |
148 | " ori %[ticket], %[ticket], 0x2000 \n" | 148 | " ori %[ticket], %[ticket], 0x2000 \n" |
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
153 | [ticket] "=&r" (tmp)); | 153 | [ticket] "=&r" (tmp)); |
154 | } else { | 154 | } else { |
155 | __asm__ __volatile__ ( | 155 | __asm__ __volatile__ ( |
156 | " .set push # __raw_spin_unlock \n" | 156 | " .set push # arch_spin_unlock \n" |
157 | " .set noreorder \n" | 157 | " .set noreorder \n" |
158 | " \n" | 158 | " \n" |
159 | " ll %[ticket], %[ticket_ptr] \n" | 159 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
174 | } | 174 | } |
175 | } | 175 | } |
176 | 176 | ||
177 | static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | 177 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
178 | { | 178 | { |
179 | int tmp, tmp2, tmp3; | 179 | int tmp, tmp2, tmp3; |
180 | 180 | ||
181 | if (R10000_LLSC_WAR) { | 181 | if (R10000_LLSC_WAR) { |
182 | __asm__ __volatile__ ( | 182 | __asm__ __volatile__ ( |
183 | " .set push # __raw_spin_trylock \n" | 183 | " .set push # arch_spin_trylock \n" |
184 | " .set noreorder \n" | 184 | " .set noreorder \n" |
185 | " \n" | 185 | " \n" |
186 | "1: ll %[ticket], %[ticket_ptr] \n" | 186 | "1: ll %[ticket], %[ticket_ptr] \n" |
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
204 | [now_serving] "=&r" (tmp3)); | 204 | [now_serving] "=&r" (tmp3)); |
205 | } else { | 205 | } else { |
206 | __asm__ __volatile__ ( | 206 | __asm__ __volatile__ ( |
207 | " .set push # __raw_spin_trylock \n" | 207 | " .set push # arch_spin_trylock \n" |
208 | " .set noreorder \n" | 208 | " .set noreorder \n" |
209 | " \n" | 209 | " \n" |
210 | " ll %[ticket], %[ticket_ptr] \n" | 210 | " ll %[ticket], %[ticket_ptr] \n" |
@@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) | |||
248 | * read_can_lock - would read_trylock() succeed? | 248 | * read_can_lock - would read_trylock() succeed? |
249 | * @lock: the rwlock in question. | 249 | * @lock: the rwlock in question. |
250 | */ | 250 | */ |
251 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) | 251 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * write_can_lock - would write_trylock() succeed? | 254 | * write_can_lock - would write_trylock() succeed? |
255 | * @lock: the rwlock in question. | 255 | * @lock: the rwlock in question. |
256 | */ | 256 | */ |
257 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 257 | #define arch_write_can_lock(rw) (!(rw)->lock) |
258 | 258 | ||
259 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 259 | static inline void arch_read_lock(arch_rwlock_t *rw) |
260 | { | 260 | { |
261 | unsigned int tmp; | 261 | unsigned int tmp; |
262 | 262 | ||
263 | if (R10000_LLSC_WAR) { | 263 | if (R10000_LLSC_WAR) { |
264 | __asm__ __volatile__( | 264 | __asm__ __volatile__( |
265 | " .set noreorder # __raw_read_lock \n" | 265 | " .set noreorder # arch_read_lock \n" |
266 | "1: ll %1, %2 \n" | 266 | "1: ll %1, %2 \n" |
267 | " bltz %1, 1b \n" | 267 | " bltz %1, 1b \n" |
268 | " addu %1, 1 \n" | 268 | " addu %1, 1 \n" |
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
275 | : "memory"); | 275 | : "memory"); |
276 | } else { | 276 | } else { |
277 | __asm__ __volatile__( | 277 | __asm__ __volatile__( |
278 | " .set noreorder # __raw_read_lock \n" | 278 | " .set noreorder # arch_read_lock \n" |
279 | "1: ll %1, %2 \n" | 279 | "1: ll %1, %2 \n" |
280 | " bltz %1, 2f \n" | 280 | " bltz %1, 2f \n" |
281 | " addu %1, 1 \n" | 281 | " addu %1, 1 \n" |
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
301 | /* Note the use of sub, not subu which will make the kernel die with an | 301 | /* Note the use of sub, not subu which will make the kernel die with an |
302 | overflow exception if we ever try to unlock an rwlock that is already | 302 | overflow exception if we ever try to unlock an rwlock that is already |
303 | unlocked or is being held by a writer. */ | 303 | unlocked or is being held by a writer. */ |
304 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 304 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
305 | { | 305 | { |
306 | unsigned int tmp; | 306 | unsigned int tmp; |
307 | 307 | ||
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
309 | 309 | ||
310 | if (R10000_LLSC_WAR) { | 310 | if (R10000_LLSC_WAR) { |
311 | __asm__ __volatile__( | 311 | __asm__ __volatile__( |
312 | "1: ll %1, %2 # __raw_read_unlock \n" | 312 | "1: ll %1, %2 # arch_read_unlock \n" |
313 | " sub %1, 1 \n" | 313 | " sub %1, 1 \n" |
314 | " sc %1, %0 \n" | 314 | " sc %1, %0 \n" |
315 | " beqzl %1, 1b \n" | 315 | " beqzl %1, 1b \n" |
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
318 | : "memory"); | 318 | : "memory"); |
319 | } else { | 319 | } else { |
320 | __asm__ __volatile__( | 320 | __asm__ __volatile__( |
321 | " .set noreorder # __raw_read_unlock \n" | 321 | " .set noreorder # arch_read_unlock \n" |
322 | "1: ll %1, %2 \n" | 322 | "1: ll %1, %2 \n" |
323 | " sub %1, 1 \n" | 323 | " sub %1, 1 \n" |
324 | " sc %1, %0 \n" | 324 | " sc %1, %0 \n" |
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
338 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 338 | static inline void arch_write_lock(arch_rwlock_t *rw) |
339 | { | 339 | { |
340 | unsigned int tmp; | 340 | unsigned int tmp; |
341 | 341 | ||
342 | if (R10000_LLSC_WAR) { | 342 | if (R10000_LLSC_WAR) { |
343 | __asm__ __volatile__( | 343 | __asm__ __volatile__( |
344 | " .set noreorder # __raw_write_lock \n" | 344 | " .set noreorder # arch_write_lock \n" |
345 | "1: ll %1, %2 \n" | 345 | "1: ll %1, %2 \n" |
346 | " bnez %1, 1b \n" | 346 | " bnez %1, 1b \n" |
347 | " lui %1, 0x8000 \n" | 347 | " lui %1, 0x8000 \n" |
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
354 | : "memory"); | 354 | : "memory"); |
355 | } else { | 355 | } else { |
356 | __asm__ __volatile__( | 356 | __asm__ __volatile__( |
357 | " .set noreorder # __raw_write_lock \n" | 357 | " .set noreorder # arch_write_lock \n" |
358 | "1: ll %1, %2 \n" | 358 | "1: ll %1, %2 \n" |
359 | " bnez %1, 2f \n" | 359 | " bnez %1, 2f \n" |
360 | " lui %1, 0x8000 \n" | 360 | " lui %1, 0x8000 \n" |
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
377 | smp_llsc_mb(); | 377 | smp_llsc_mb(); |
378 | } | 378 | } |
379 | 379 | ||
380 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 380 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
381 | { | 381 | { |
382 | smp_mb(); | 382 | smp_mb(); |
383 | 383 | ||
384 | __asm__ __volatile__( | 384 | __asm__ __volatile__( |
385 | " # __raw_write_unlock \n" | 385 | " # arch_write_unlock \n" |
386 | " sw $0, %0 \n" | 386 | " sw $0, %0 \n" |
387 | : "=m" (rw->lock) | 387 | : "=m" (rw->lock) |
388 | : "m" (rw->lock) | 388 | : "m" (rw->lock) |
389 | : "memory"); | 389 | : "memory"); |
390 | } | 390 | } |
391 | 391 | ||
392 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 392 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
393 | { | 393 | { |
394 | unsigned int tmp; | 394 | unsigned int tmp; |
395 | int ret; | 395 | int ret; |
396 | 396 | ||
397 | if (R10000_LLSC_WAR) { | 397 | if (R10000_LLSC_WAR) { |
398 | __asm__ __volatile__( | 398 | __asm__ __volatile__( |
399 | " .set noreorder # __raw_read_trylock \n" | 399 | " .set noreorder # arch_read_trylock \n" |
400 | " li %2, 0 \n" | 400 | " li %2, 0 \n" |
401 | "1: ll %1, %3 \n" | 401 | "1: ll %1, %3 \n" |
402 | " bltz %1, 2f \n" | 402 | " bltz %1, 2f \n" |
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
413 | : "memory"); | 413 | : "memory"); |
414 | } else { | 414 | } else { |
415 | __asm__ __volatile__( | 415 | __asm__ __volatile__( |
416 | " .set noreorder # __raw_read_trylock \n" | 416 | " .set noreorder # arch_read_trylock \n" |
417 | " li %2, 0 \n" | 417 | " li %2, 0 \n" |
418 | "1: ll %1, %3 \n" | 418 | "1: ll %1, %3 \n" |
419 | " bltz %1, 2f \n" | 419 | " bltz %1, 2f \n" |
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
433 | return ret; | 433 | return ret; |
434 | } | 434 | } |
435 | 435 | ||
436 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 436 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
437 | { | 437 | { |
438 | unsigned int tmp; | 438 | unsigned int tmp; |
439 | int ret; | 439 | int ret; |
440 | 440 | ||
441 | if (R10000_LLSC_WAR) { | 441 | if (R10000_LLSC_WAR) { |
442 | __asm__ __volatile__( | 442 | __asm__ __volatile__( |
443 | " .set noreorder # __raw_write_trylock \n" | 443 | " .set noreorder # arch_write_trylock \n" |
444 | " li %2, 0 \n" | 444 | " li %2, 0 \n" |
445 | "1: ll %1, %3 \n" | 445 | "1: ll %1, %3 \n" |
446 | " bnez %1, 2f \n" | 446 | " bnez %1, 2f \n" |
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
457 | : "memory"); | 457 | : "memory"); |
458 | } else { | 458 | } else { |
459 | __asm__ __volatile__( | 459 | __asm__ __volatile__( |
460 | " .set noreorder # __raw_write_trylock \n" | 460 | " .set noreorder # arch_write_trylock \n" |
461 | " li %2, 0 \n" | 461 | " li %2, 0 \n" |
462 | "1: ll %1, %3 \n" | 462 | "1: ll %1, %3 \n" |
463 | " bnez %1, 2f \n" | 463 | " bnez %1, 2f \n" |
@@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
480 | return ret; | 480 | return ret; |
481 | } | 481 | } |
482 | 482 | ||
483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 483 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 484 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
485 | 485 | ||
486 | #define _raw_spin_relax(lock) cpu_relax() | 486 | #define arch_spin_relax(lock) cpu_relax() |
487 | #define _raw_read_relax(lock) cpu_relax() | 487 | #define arch_read_relax(lock) cpu_relax() |
488 | #define _raw_write_relax(lock) cpu_relax() | 488 | #define arch_write_relax(lock) cpu_relax() |
489 | 489 | ||
490 | #endif /* _ASM_SPINLOCK_H */ | 490 | #endif /* _ASM_SPINLOCK_H */ |
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index adeedaa116c1..ee197c2f9c98 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h | |||
@@ -12,14 +12,14 @@ typedef struct { | |||
12 | * bits 15..28: ticket | 12 | * bits 15..28: ticket |
13 | */ | 13 | */ |
14 | unsigned int lock; | 14 | unsigned int lock; |
15 | } raw_spinlock_t; | 15 | } arch_spinlock_t; |
16 | 16 | ||
17 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 17 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
18 | 18 | ||
19 | typedef struct { | 19 | typedef struct { |
20 | volatile unsigned int lock; | 20 | volatile unsigned int lock; |
21 | } raw_rwlock_t; | 21 | } arch_rwlock_t; |
22 | 22 | ||
23 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | 23 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
24 | 24 | ||
25 | #endif | 25 | #endif |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7b845ba9dff4..8b0b4181219f 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | if (i < NR_IRQS) { | 101 | if (i < NR_IRQS) { |
102 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 102 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
103 | action = irq_desc[i].action; | 103 | action = irq_desc[i].action; |
104 | if (!action) | 104 | if (!action) |
105 | goto skip; | 105 | goto skip; |
@@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
118 | 118 | ||
119 | seq_putc(p, '\n'); | 119 | seq_putc(p, '\n'); |
120 | skip: | 120 | skip: |
121 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 121 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
122 | } else if (i == NR_IRQS) { | 122 | } else if (i == NR_IRQS) { |
123 | seq_putc(p, '\n'); | 123 | seq_putc(p, '\n'); |
124 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 124 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 6d39e222b170..6153b6a05ccf 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c | |||
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask) | |||
159 | 159 | ||
160 | if (current_cpu_type() == CPU_VR4111 || | 160 | if (current_cpu_type() == CPU_VR4111 || |
161 | current_cpu_type() == CPU_VR4121) { | 161 | current_cpu_type() == CPU_VR4121) { |
162 | spin_lock_irqsave(&desc->lock, flags); | 162 | raw_spin_lock_irqsave(&desc->lock, flags); |
163 | icu1_set(MPIUINTREG, mask); | 163 | icu1_set(MPIUINTREG, mask); |
164 | spin_unlock_irqrestore(&desc->lock, flags); | 164 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
165 | } | 165 | } |
166 | } | 166 | } |
167 | 167 | ||
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask) | |||
174 | 174 | ||
175 | if (current_cpu_type() == CPU_VR4111 || | 175 | if (current_cpu_type() == CPU_VR4111 || |
176 | current_cpu_type() == CPU_VR4121) { | 176 | current_cpu_type() == CPU_VR4121) { |
177 | spin_lock_irqsave(&desc->lock, flags); | 177 | raw_spin_lock_irqsave(&desc->lock, flags); |
178 | icu1_clear(MPIUINTREG, mask); | 178 | icu1_clear(MPIUINTREG, mask); |
179 | spin_unlock_irqrestore(&desc->lock, flags); | 179 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
180 | } | 180 | } |
181 | } | 181 | } |
182 | 182 | ||
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask) | |||
189 | 189 | ||
190 | if (current_cpu_type() == CPU_VR4111 || | 190 | if (current_cpu_type() == CPU_VR4111 || |
191 | current_cpu_type() == CPU_VR4121) { | 191 | current_cpu_type() == CPU_VR4121) { |
192 | spin_lock_irqsave(&desc->lock, flags); | 192 | raw_spin_lock_irqsave(&desc->lock, flags); |
193 | icu1_set(MAIUINTREG, mask); | 193 | icu1_set(MAIUINTREG, mask); |
194 | spin_unlock_irqrestore(&desc->lock, flags); | 194 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
195 | } | 195 | } |
196 | } | 196 | } |
197 | 197 | ||
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask) | |||
204 | 204 | ||
205 | if (current_cpu_type() == CPU_VR4111 || | 205 | if (current_cpu_type() == CPU_VR4111 || |
206 | current_cpu_type() == CPU_VR4121) { | 206 | current_cpu_type() == CPU_VR4121) { |
207 | spin_lock_irqsave(&desc->lock, flags); | 207 | raw_spin_lock_irqsave(&desc->lock, flags); |
208 | icu1_clear(MAIUINTREG, mask); | 208 | icu1_clear(MAIUINTREG, mask); |
209 | spin_unlock_irqrestore(&desc->lock, flags); | 209 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
210 | } | 210 | } |
211 | } | 211 | } |
212 | 212 | ||
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask) | |||
219 | 219 | ||
220 | if (current_cpu_type() == CPU_VR4111 || | 220 | if (current_cpu_type() == CPU_VR4111 || |
221 | current_cpu_type() == CPU_VR4121) { | 221 | current_cpu_type() == CPU_VR4121) { |
222 | spin_lock_irqsave(&desc->lock, flags); | 222 | raw_spin_lock_irqsave(&desc->lock, flags); |
223 | icu1_set(MKIUINTREG, mask); | 223 | icu1_set(MKIUINTREG, mask); |
224 | spin_unlock_irqrestore(&desc->lock, flags); | 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
225 | } | 225 | } |
226 | } | 226 | } |
227 | 227 | ||
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask) | |||
234 | 234 | ||
235 | if (current_cpu_type() == CPU_VR4111 || | 235 | if (current_cpu_type() == CPU_VR4111 || |
236 | current_cpu_type() == CPU_VR4121) { | 236 | current_cpu_type() == CPU_VR4121) { |
237 | spin_lock_irqsave(&desc->lock, flags); | 237 | raw_spin_lock_irqsave(&desc->lock, flags); |
238 | icu1_clear(MKIUINTREG, mask); | 238 | icu1_clear(MKIUINTREG, mask); |
239 | spin_unlock_irqrestore(&desc->lock, flags); | 239 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
240 | } | 240 | } |
241 | } | 241 | } |
242 | 242 | ||
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask) | |||
247 | struct irq_desc *desc = irq_desc + ETHERNET_IRQ; | 247 | struct irq_desc *desc = irq_desc + ETHERNET_IRQ; |
248 | unsigned long flags; | 248 | unsigned long flags; |
249 | 249 | ||
250 | spin_lock_irqsave(&desc->lock, flags); | 250 | raw_spin_lock_irqsave(&desc->lock, flags); |
251 | icu1_set(MMACINTREG, mask); | 251 | icu1_set(MMACINTREG, mask); |
252 | spin_unlock_irqrestore(&desc->lock, flags); | 252 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
253 | } | 253 | } |
254 | 254 | ||
255 | EXPORT_SYMBOL(vr41xx_enable_macint); | 255 | EXPORT_SYMBOL(vr41xx_enable_macint); |
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask) | |||
259 | struct irq_desc *desc = irq_desc + ETHERNET_IRQ; | 259 | struct irq_desc *desc = irq_desc + ETHERNET_IRQ; |
260 | unsigned long flags; | 260 | unsigned long flags; |
261 | 261 | ||
262 | spin_lock_irqsave(&desc->lock, flags); | 262 | raw_spin_lock_irqsave(&desc->lock, flags); |
263 | icu1_clear(MMACINTREG, mask); | 263 | icu1_clear(MMACINTREG, mask); |
264 | spin_unlock_irqrestore(&desc->lock, flags); | 264 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
265 | } | 265 | } |
266 | 266 | ||
267 | EXPORT_SYMBOL(vr41xx_disable_macint); | 267 | EXPORT_SYMBOL(vr41xx_disable_macint); |
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask) | |||
271 | struct irq_desc *desc = irq_desc + DSIU_IRQ; | 271 | struct irq_desc *desc = irq_desc + DSIU_IRQ; |
272 | unsigned long flags; | 272 | unsigned long flags; |
273 | 273 | ||
274 | spin_lock_irqsave(&desc->lock, flags); | 274 | raw_spin_lock_irqsave(&desc->lock, flags); |
275 | icu1_set(MDSIUINTREG, mask); | 275 | icu1_set(MDSIUINTREG, mask); |
276 | spin_unlock_irqrestore(&desc->lock, flags); | 276 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
277 | } | 277 | } |
278 | 278 | ||
279 | EXPORT_SYMBOL(vr41xx_enable_dsiuint); | 279 | EXPORT_SYMBOL(vr41xx_enable_dsiuint); |
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask) | |||
283 | struct irq_desc *desc = irq_desc + DSIU_IRQ; | 283 | struct irq_desc *desc = irq_desc + DSIU_IRQ; |
284 | unsigned long flags; | 284 | unsigned long flags; |
285 | 285 | ||
286 | spin_lock_irqsave(&desc->lock, flags); | 286 | raw_spin_lock_irqsave(&desc->lock, flags); |
287 | icu1_clear(MDSIUINTREG, mask); | 287 | icu1_clear(MDSIUINTREG, mask); |
288 | spin_unlock_irqrestore(&desc->lock, flags); | 288 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
289 | } | 289 | } |
290 | 290 | ||
291 | EXPORT_SYMBOL(vr41xx_disable_dsiuint); | 291 | EXPORT_SYMBOL(vr41xx_disable_dsiuint); |
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask) | |||
295 | struct irq_desc *desc = irq_desc + FIR_IRQ; | 295 | struct irq_desc *desc = irq_desc + FIR_IRQ; |
296 | unsigned long flags; | 296 | unsigned long flags; |
297 | 297 | ||
298 | spin_lock_irqsave(&desc->lock, flags); | 298 | raw_spin_lock_irqsave(&desc->lock, flags); |
299 | icu2_set(MFIRINTREG, mask); | 299 | icu2_set(MFIRINTREG, mask); |
300 | spin_unlock_irqrestore(&desc->lock, flags); | 300 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
301 | } | 301 | } |
302 | 302 | ||
303 | EXPORT_SYMBOL(vr41xx_enable_firint); | 303 | EXPORT_SYMBOL(vr41xx_enable_firint); |
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask) | |||
307 | struct irq_desc *desc = irq_desc + FIR_IRQ; | 307 | struct irq_desc *desc = irq_desc + FIR_IRQ; |
308 | unsigned long flags; | 308 | unsigned long flags; |
309 | 309 | ||
310 | spin_lock_irqsave(&desc->lock, flags); | 310 | raw_spin_lock_irqsave(&desc->lock, flags); |
311 | icu2_clear(MFIRINTREG, mask); | 311 | icu2_clear(MFIRINTREG, mask); |
312 | spin_unlock_irqrestore(&desc->lock, flags); | 312 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
313 | } | 313 | } |
314 | 314 | ||
315 | EXPORT_SYMBOL(vr41xx_disable_firint); | 315 | EXPORT_SYMBOL(vr41xx_disable_firint); |
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void) | |||
322 | if (current_cpu_type() == CPU_VR4122 || | 322 | if (current_cpu_type() == CPU_VR4122 || |
323 | current_cpu_type() == CPU_VR4131 || | 323 | current_cpu_type() == CPU_VR4131 || |
324 | current_cpu_type() == CPU_VR4133) { | 324 | current_cpu_type() == CPU_VR4133) { |
325 | spin_lock_irqsave(&desc->lock, flags); | 325 | raw_spin_lock_irqsave(&desc->lock, flags); |
326 | icu2_write(MPCIINTREG, PCIINT0); | 326 | icu2_write(MPCIINTREG, PCIINT0); |
327 | spin_unlock_irqrestore(&desc->lock, flags); | 327 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
328 | } | 328 | } |
329 | } | 329 | } |
330 | 330 | ||
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void) | |||
338 | if (current_cpu_type() == CPU_VR4122 || | 338 | if (current_cpu_type() == CPU_VR4122 || |
339 | current_cpu_type() == CPU_VR4131 || | 339 | current_cpu_type() == CPU_VR4131 || |
340 | current_cpu_type() == CPU_VR4133) { | 340 | current_cpu_type() == CPU_VR4133) { |
341 | spin_lock_irqsave(&desc->lock, flags); | 341 | raw_spin_lock_irqsave(&desc->lock, flags); |
342 | icu2_write(MPCIINTREG, 0); | 342 | icu2_write(MPCIINTREG, 0); |
343 | spin_unlock_irqrestore(&desc->lock, flags); | 343 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
344 | } | 344 | } |
345 | } | 345 | } |
346 | 346 | ||
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void) | |||
354 | if (current_cpu_type() == CPU_VR4122 || | 354 | if (current_cpu_type() == CPU_VR4122 || |
355 | current_cpu_type() == CPU_VR4131 || | 355 | current_cpu_type() == CPU_VR4131 || |
356 | current_cpu_type() == CPU_VR4133) { | 356 | current_cpu_type() == CPU_VR4133) { |
357 | spin_lock_irqsave(&desc->lock, flags); | 357 | raw_spin_lock_irqsave(&desc->lock, flags); |
358 | icu2_write(MSCUINTREG, SCUINT0); | 358 | icu2_write(MSCUINTREG, SCUINT0); |
359 | spin_unlock_irqrestore(&desc->lock, flags); | 359 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
360 | } | 360 | } |
361 | } | 361 | } |
362 | 362 | ||
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void) | |||
370 | if (current_cpu_type() == CPU_VR4122 || | 370 | if (current_cpu_type() == CPU_VR4122 || |
371 | current_cpu_type() == CPU_VR4131 || | 371 | current_cpu_type() == CPU_VR4131 || |
372 | current_cpu_type() == CPU_VR4133) { | 372 | current_cpu_type() == CPU_VR4133) { |
373 | spin_lock_irqsave(&desc->lock, flags); | 373 | raw_spin_lock_irqsave(&desc->lock, flags); |
374 | icu2_write(MSCUINTREG, 0); | 374 | icu2_write(MSCUINTREG, 0); |
375 | spin_unlock_irqrestore(&desc->lock, flags); | 375 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
376 | } | 376 | } |
377 | } | 377 | } |
378 | 378 | ||
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask) | |||
386 | if (current_cpu_type() == CPU_VR4122 || | 386 | if (current_cpu_type() == CPU_VR4122 || |
387 | current_cpu_type() == CPU_VR4131 || | 387 | current_cpu_type() == CPU_VR4131 || |
388 | current_cpu_type() == CPU_VR4133) { | 388 | current_cpu_type() == CPU_VR4133) { |
389 | spin_lock_irqsave(&desc->lock, flags); | 389 | raw_spin_lock_irqsave(&desc->lock, flags); |
390 | icu2_set(MCSIINTREG, mask); | 390 | icu2_set(MCSIINTREG, mask); |
391 | spin_unlock_irqrestore(&desc->lock, flags); | 391 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
392 | } | 392 | } |
393 | } | 393 | } |
394 | 394 | ||
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask) | |||
402 | if (current_cpu_type() == CPU_VR4122 || | 402 | if (current_cpu_type() == CPU_VR4122 || |
403 | current_cpu_type() == CPU_VR4131 || | 403 | current_cpu_type() == CPU_VR4131 || |
404 | current_cpu_type() == CPU_VR4133) { | 404 | current_cpu_type() == CPU_VR4133) { |
405 | spin_lock_irqsave(&desc->lock, flags); | 405 | raw_spin_lock_irqsave(&desc->lock, flags); |
406 | icu2_clear(MCSIINTREG, mask); | 406 | icu2_clear(MCSIINTREG, mask); |
407 | spin_unlock_irqrestore(&desc->lock, flags); | 407 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
408 | } | 408 | } |
409 | } | 409 | } |
410 | 410 | ||
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void) | |||
418 | if (current_cpu_type() == CPU_VR4122 || | 418 | if (current_cpu_type() == CPU_VR4122 || |
419 | current_cpu_type() == CPU_VR4131 || | 419 | current_cpu_type() == CPU_VR4131 || |
420 | current_cpu_type() == CPU_VR4133) { | 420 | current_cpu_type() == CPU_VR4133) { |
421 | spin_lock_irqsave(&desc->lock, flags); | 421 | raw_spin_lock_irqsave(&desc->lock, flags); |
422 | icu2_write(MBCUINTREG, BCUINTR); | 422 | icu2_write(MBCUINTREG, BCUINTR); |
423 | spin_unlock_irqrestore(&desc->lock, flags); | 423 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
424 | } | 424 | } |
425 | } | 425 | } |
426 | 426 | ||
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void) | |||
434 | if (current_cpu_type() == CPU_VR4122 || | 434 | if (current_cpu_type() == CPU_VR4122 || |
435 | current_cpu_type() == CPU_VR4131 || | 435 | current_cpu_type() == CPU_VR4131 || |
436 | current_cpu_type() == CPU_VR4133) { | 436 | current_cpu_type() == CPU_VR4133) { |
437 | spin_lock_irqsave(&desc->lock, flags); | 437 | raw_spin_lock_irqsave(&desc->lock, flags); |
438 | icu2_write(MBCUINTREG, 0); | 438 | icu2_write(MBCUINTREG, 0); |
439 | spin_unlock_irqrestore(&desc->lock, flags); | 439 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
440 | } | 440 | } |
441 | } | 441 | } |
442 | 442 | ||
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) | |||
486 | 486 | ||
487 | pin = SYSINT1_IRQ_TO_PIN(irq); | 487 | pin = SYSINT1_IRQ_TO_PIN(irq); |
488 | 488 | ||
489 | spin_lock_irq(&desc->lock); | 489 | raw_spin_lock_irq(&desc->lock); |
490 | 490 | ||
491 | intassign0 = icu1_read(INTASSIGN0); | 491 | intassign0 = icu1_read(INTASSIGN0); |
492 | intassign1 = icu1_read(INTASSIGN1); | 492 | intassign1 = icu1_read(INTASSIGN1); |
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) | |||
525 | intassign1 |= (uint16_t)assign << 9; | 525 | intassign1 |= (uint16_t)assign << 9; |
526 | break; | 526 | break; |
527 | default: | 527 | default: |
528 | spin_unlock_irq(&desc->lock); | 528 | raw_spin_unlock_irq(&desc->lock); |
529 | return -EINVAL; | 529 | return -EINVAL; |
530 | } | 530 | } |
531 | 531 | ||
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) | |||
533 | icu1_write(INTASSIGN0, intassign0); | 533 | icu1_write(INTASSIGN0, intassign0); |
534 | icu1_write(INTASSIGN1, intassign1); | 534 | icu1_write(INTASSIGN1, intassign1); |
535 | 535 | ||
536 | spin_unlock_irq(&desc->lock); | 536 | raw_spin_unlock_irq(&desc->lock); |
537 | 537 | ||
538 | return 0; | 538 | return 0; |
539 | } | 539 | } |
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) | |||
546 | 546 | ||
547 | pin = SYSINT2_IRQ_TO_PIN(irq); | 547 | pin = SYSINT2_IRQ_TO_PIN(irq); |
548 | 548 | ||
549 | spin_lock_irq(&desc->lock); | 549 | raw_spin_lock_irq(&desc->lock); |
550 | 550 | ||
551 | intassign2 = icu1_read(INTASSIGN2); | 551 | intassign2 = icu1_read(INTASSIGN2); |
552 | intassign3 = icu1_read(INTASSIGN3); | 552 | intassign3 = icu1_read(INTASSIGN3); |
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) | |||
593 | intassign3 |= (uint16_t)assign << 12; | 593 | intassign3 |= (uint16_t)assign << 12; |
594 | break; | 594 | break; |
595 | default: | 595 | default: |
596 | spin_unlock_irq(&desc->lock); | 596 | raw_spin_unlock_irq(&desc->lock); |
597 | return -EINVAL; | 597 | return -EINVAL; |
598 | } | 598 | } |
599 | 599 | ||
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) | |||
601 | icu1_write(INTASSIGN2, intassign2); | 601 | icu1_write(INTASSIGN2, intassign2); |
602 | icu1_write(INTASSIGN3, intassign3); | 602 | icu1_write(INTASSIGN3, intassign3); |
603 | 603 | ||
604 | spin_unlock_irq(&desc->lock); | 604 | raw_spin_unlock_irq(&desc->lock); |
605 | 605 | ||
606 | return 0; | 606 | return 0; |
607 | } | 607 | } |
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 4c3c58ef5cda..e2d5ed891f37 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c | |||
@@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
215 | 215 | ||
216 | /* display information rows, one per active CPU */ | 216 | /* display information rows, one per active CPU */ |
217 | case 1 ... NR_IRQS - 1: | 217 | case 1 ... NR_IRQS - 1: |
218 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 218 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
219 | 219 | ||
220 | action = irq_desc[i].action; | 220 | action = irq_desc[i].action; |
221 | if (action) { | 221 | if (action) { |
@@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
235 | seq_putc(p, '\n'); | 235 | seq_putc(p, '\n'); |
236 | } | 236 | } |
237 | 237 | ||
238 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 238 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
239 | break; | 239 | break; |
240 | 240 | ||
241 | /* polish off with NMI and error counters */ | 241 | /* polish off with NMI and error counters */ |
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 8bc9e96699b2..716634d1f546 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -27,19 +27,19 @@ | |||
27 | # define ATOMIC_HASH_SIZE 4 | 27 | # define ATOMIC_HASH_SIZE 4 |
28 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | 28 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
29 | 29 | ||
30 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | 30 | extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
31 | 31 | ||
32 | /* Can't use raw_spin_lock_irq because of #include problems, so | 32 | /* Can't use raw_spin_lock_irq because of #include problems, so |
33 | * this is the substitute */ | 33 | * this is the substitute */ |
34 | #define _atomic_spin_lock_irqsave(l,f) do { \ | 34 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
35 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | 35 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
36 | local_irq_save(f); \ | 36 | local_irq_save(f); \ |
37 | __raw_spin_lock(s); \ | 37 | arch_spin_lock(s); \ |
38 | } while(0) | 38 | } while(0) |
39 | 39 | ||
40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | 40 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
41 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | 41 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
42 | __raw_spin_unlock(s); \ | 42 | arch_spin_unlock(s); \ |
43 | local_irq_restore(f); \ | 43 | local_irq_restore(f); \ |
44 | } while(0) | 44 | } while(0) |
45 | 45 | ||
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index fae03e136fa8..74036f436a3b 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -5,17 +5,17 @@ | |||
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/spinlock_types.h> | 6 | #include <asm/spinlock_types.h> |
7 | 7 | ||
8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) | 8 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
9 | { | 9 | { |
10 | volatile unsigned int *a = __ldcw_align(x); | 10 | volatile unsigned int *a = __ldcw_align(x); |
11 | return *a == 0; | 11 | return *a == 0; |
12 | } | 12 | } |
13 | 13 | ||
14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) | 14 | #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) |
15 | #define __raw_spin_unlock_wait(x) \ | 15 | #define arch_spin_unlock_wait(x) \ |
16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 16 | do { cpu_relax(); } while (arch_spin_is_locked(x)) |
17 | 17 | ||
18 | static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | 18 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, |
19 | unsigned long flags) | 19 | unsigned long flags) |
20 | { | 20 | { |
21 | volatile unsigned int *a; | 21 | volatile unsigned int *a; |
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x, | |||
33 | mb(); | 33 | mb(); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void __raw_spin_unlock(raw_spinlock_t *x) | 36 | static inline void arch_spin_unlock(arch_spinlock_t *x) |
37 | { | 37 | { |
38 | volatile unsigned int *a; | 38 | volatile unsigned int *a; |
39 | mb(); | 39 | mb(); |
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x) | |||
42 | mb(); | 42 | mb(); |
43 | } | 43 | } |
44 | 44 | ||
45 | static inline int __raw_spin_trylock(raw_spinlock_t *x) | 45 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
46 | { | 46 | { |
47 | volatile unsigned int *a; | 47 | volatile unsigned int *a; |
48 | int ret; | 48 | int ret; |
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) | |||
69 | 69 | ||
70 | /* Note that we have to ensure interrupts are disabled in case we're | 70 | /* Note that we have to ensure interrupts are disabled in case we're |
71 | * interrupted by some other code that wants to grab the same read lock */ | 71 | * interrupted by some other code that wants to grab the same read lock */ |
72 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) | 72 | static __inline__ void arch_read_lock(arch_rwlock_t *rw) |
73 | { | 73 | { |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | local_irq_save(flags); | 75 | local_irq_save(flags); |
76 | __raw_spin_lock_flags(&rw->lock, flags); | 76 | arch_spin_lock_flags(&rw->lock, flags); |
77 | rw->counter++; | 77 | rw->counter++; |
78 | __raw_spin_unlock(&rw->lock); | 78 | arch_spin_unlock(&rw->lock); |
79 | local_irq_restore(flags); | 79 | local_irq_restore(flags); |
80 | } | 80 | } |
81 | 81 | ||
82 | /* Note that we have to ensure interrupts are disabled in case we're | 82 | /* Note that we have to ensure interrupts are disabled in case we're |
83 | * interrupted by some other code that wants to grab the same read lock */ | 83 | * interrupted by some other code that wants to grab the same read lock */ |
84 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) | 84 | static __inline__ void arch_read_unlock(arch_rwlock_t *rw) |
85 | { | 85 | { |
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | local_irq_save(flags); | 87 | local_irq_save(flags); |
88 | __raw_spin_lock_flags(&rw->lock, flags); | 88 | arch_spin_lock_flags(&rw->lock, flags); |
89 | rw->counter--; | 89 | rw->counter--; |
90 | __raw_spin_unlock(&rw->lock); | 90 | arch_spin_unlock(&rw->lock); |
91 | local_irq_restore(flags); | 91 | local_irq_restore(flags); |
92 | } | 92 | } |
93 | 93 | ||
94 | /* Note that we have to ensure interrupts are disabled in case we're | 94 | /* Note that we have to ensure interrupts are disabled in case we're |
95 | * interrupted by some other code that wants to grab the same read lock */ | 95 | * interrupted by some other code that wants to grab the same read lock */ |
96 | static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | 96 | static __inline__ int arch_read_trylock(arch_rwlock_t *rw) |
97 | { | 97 | { |
98 | unsigned long flags; | 98 | unsigned long flags; |
99 | retry: | 99 | retry: |
100 | local_irq_save(flags); | 100 | local_irq_save(flags); |
101 | if (__raw_spin_trylock(&rw->lock)) { | 101 | if (arch_spin_trylock(&rw->lock)) { |
102 | rw->counter++; | 102 | rw->counter++; |
103 | __raw_spin_unlock(&rw->lock); | 103 | arch_spin_unlock(&rw->lock); |
104 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
105 | return 1; | 105 | return 1; |
106 | } | 106 | } |
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
111 | return 0; | 111 | return 0; |
112 | 112 | ||
113 | /* Wait until we have a realistic chance at the lock */ | 113 | /* Wait until we have a realistic chance at the lock */ |
114 | while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) | 114 | while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) |
115 | cpu_relax(); | 115 | cpu_relax(); |
116 | 116 | ||
117 | goto retry; | 117 | goto retry; |
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) | |||
119 | 119 | ||
120 | /* Note that we have to ensure interrupts are disabled in case we're | 120 | /* Note that we have to ensure interrupts are disabled in case we're |
121 | * interrupted by some other code that wants to read_trylock() this lock */ | 121 | * interrupted by some other code that wants to read_trylock() this lock */ |
122 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) | 122 | static __inline__ void arch_write_lock(arch_rwlock_t *rw) |
123 | { | 123 | { |
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | retry: | 125 | retry: |
126 | local_irq_save(flags); | 126 | local_irq_save(flags); |
127 | __raw_spin_lock_flags(&rw->lock, flags); | 127 | arch_spin_lock_flags(&rw->lock, flags); |
128 | 128 | ||
129 | if (rw->counter != 0) { | 129 | if (rw->counter != 0) { |
130 | __raw_spin_unlock(&rw->lock); | 130 | arch_spin_unlock(&rw->lock); |
131 | local_irq_restore(flags); | 131 | local_irq_restore(flags); |
132 | 132 | ||
133 | while (rw->counter != 0) | 133 | while (rw->counter != 0) |
@@ -141,27 +141,27 @@ retry: | |||
141 | local_irq_restore(flags); | 141 | local_irq_restore(flags); |
142 | } | 142 | } |
143 | 143 | ||
144 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | 144 | static __inline__ void arch_write_unlock(arch_rwlock_t *rw) |
145 | { | 145 | { |
146 | rw->counter = 0; | 146 | rw->counter = 0; |
147 | __raw_spin_unlock(&rw->lock); | 147 | arch_spin_unlock(&rw->lock); |
148 | } | 148 | } |
149 | 149 | ||
150 | /* Note that we have to ensure interrupts are disabled in case we're | 150 | /* Note that we have to ensure interrupts are disabled in case we're |
151 | * interrupted by some other code that wants to read_trylock() this lock */ | 151 | * interrupted by some other code that wants to read_trylock() this lock */ |
152 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | 152 | static __inline__ int arch_write_trylock(arch_rwlock_t *rw) |
153 | { | 153 | { |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | int result = 0; | 155 | int result = 0; |
156 | 156 | ||
157 | local_irq_save(flags); | 157 | local_irq_save(flags); |
158 | if (__raw_spin_trylock(&rw->lock)) { | 158 | if (arch_spin_trylock(&rw->lock)) { |
159 | if (rw->counter == 0) { | 159 | if (rw->counter == 0) { |
160 | rw->counter = -1; | 160 | rw->counter = -1; |
161 | result = 1; | 161 | result = 1; |
162 | } else { | 162 | } else { |
163 | /* Read-locked. Oh well. */ | 163 | /* Read-locked. Oh well. */ |
164 | __raw_spin_unlock(&rw->lock); | 164 | arch_spin_unlock(&rw->lock); |
165 | } | 165 | } |
166 | } | 166 | } |
167 | local_irq_restore(flags); | 167 | local_irq_restore(flags); |
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) | |||
173 | * read_can_lock - would read_trylock() succeed? | 173 | * read_can_lock - would read_trylock() succeed? |
174 | * @lock: the rwlock in question. | 174 | * @lock: the rwlock in question. |
175 | */ | 175 | */ |
176 | static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | 176 | static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) |
177 | { | 177 | { |
178 | return rw->counter >= 0; | 178 | return rw->counter >= 0; |
179 | } | 179 | } |
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | |||
182 | * write_can_lock - would write_trylock() succeed? | 182 | * write_can_lock - would write_trylock() succeed? |
183 | * @lock: the rwlock in question. | 183 | * @lock: the rwlock in question. |
184 | */ | 184 | */ |
185 | static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | 185 | static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) |
186 | { | 186 | { |
187 | return !rw->counter; | 187 | return !rw->counter; |
188 | } | 188 | } |
189 | 189 | ||
190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 190 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 191 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
192 | 192 | ||
193 | #define _raw_spin_relax(lock) cpu_relax() | 193 | #define arch_spin_relax(lock) cpu_relax() |
194 | #define _raw_read_relax(lock) cpu_relax() | 194 | #define arch_read_relax(lock) cpu_relax() |
195 | #define _raw_write_relax(lock) cpu_relax() | 195 | #define arch_write_relax(lock) cpu_relax() |
196 | 196 | ||
197 | #endif /* __ASM_SPINLOCK_H */ | 197 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 3f72f47cf4b2..8c373aa28a86 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h | |||
@@ -4,18 +4,18 @@ | |||
4 | typedef struct { | 4 | typedef struct { |
5 | #ifdef CONFIG_PA20 | 5 | #ifdef CONFIG_PA20 |
6 | volatile unsigned int slock; | 6 | volatile unsigned int slock; |
7 | # define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 7 | # define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
8 | #else | 8 | #else |
9 | volatile unsigned int lock[4]; | 9 | volatile unsigned int lock[4]; |
10 | # define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } | 10 | # define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } |
11 | #endif | 11 | #endif |
12 | } raw_spinlock_t; | 12 | } arch_spinlock_t; |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | raw_spinlock_t lock; | 15 | arch_spinlock_t lock; |
16 | volatile int counter; | 16 | volatile int counter; |
17 | } raw_rwlock_t; | 17 | } arch_rwlock_t; |
18 | 18 | ||
19 | #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } | 19 | #define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } |
20 | 20 | ||
21 | #endif | 21 | #endif |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 2e7610cb33d5..f47465e8d040 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
180 | if (i < NR_IRQS) { | 180 | if (i < NR_IRQS) { |
181 | struct irqaction *action; | 181 | struct irqaction *action; |
182 | 182 | ||
183 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 183 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
184 | action = irq_desc[i].action; | 184 | action = irq_desc[i].action; |
185 | if (!action) | 185 | if (!action) |
186 | goto skip; | 186 | goto skip; |
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
224 | 224 | ||
225 | seq_putc(p, '\n'); | 225 | seq_putc(p, '\n'); |
226 | skip: | 226 | skip: |
227 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 227 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
228 | } | 228 | } |
229 | 229 | ||
230 | return 0; | 230 | return 0; |
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index e3eb739fab19..353963d42059 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #include <asm/atomic.h> | 12 | #include <asm/atomic.h> |
13 | 13 | ||
14 | #ifdef CONFIG_SMP | 14 | #ifdef CONFIG_SMP |
15 | raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { | 15 | arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { |
16 | [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED | 16 | [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED |
17 | }; | 17 | }; |
18 | #endif | 18 | #endif |
19 | 19 | ||
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 168fce726201..20de73c36682 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h | |||
@@ -58,7 +58,7 @@ struct rtas_t { | |||
58 | unsigned long entry; /* physical address pointer */ | 58 | unsigned long entry; /* physical address pointer */ |
59 | unsigned long base; /* physical address pointer */ | 59 | unsigned long base; /* physical address pointer */ |
60 | unsigned long size; | 60 | unsigned long size; |
61 | raw_spinlock_t lock; | 61 | arch_spinlock_t lock; |
62 | struct rtas_args args; | 62 | struct rtas_args args; |
63 | struct device_node *dev; /* virtual address pointer */ | 63 | struct device_node *dev; /* virtual address pointer */ |
64 | }; | 64 | }; |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 198266cf9e2d..764094cff681 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <asm/asm-compat.h> | 28 | #include <asm/asm-compat.h> |
29 | #include <asm/synch.h> | 29 | #include <asm/synch.h> |
30 | 30 | ||
31 | #define __raw_spin_is_locked(x) ((x)->slock != 0) | 31 | #define arch_spin_is_locked(x) ((x)->slock != 0) |
32 | 32 | ||
33 | #ifdef CONFIG_PPC64 | 33 | #ifdef CONFIG_PPC64 |
34 | /* use 0x800000yy when locked, where yy == CPU number */ | 34 | /* use 0x800000yy when locked, where yy == CPU number */ |
@@ -54,7 +54,7 @@ | |||
54 | * This returns the old value in the lock, so we succeeded | 54 | * This returns the old value in the lock, so we succeeded |
55 | * in getting the lock if the return value is 0. | 55 | * in getting the lock if the return value is 0. |
56 | */ | 56 | */ |
57 | static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) | 57 | static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp, token; | 59 | unsigned long tmp, token; |
60 | 60 | ||
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) | |||
73 | return tmp; | 73 | return tmp; |
74 | } | 74 | } |
75 | 75 | ||
76 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 76 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
77 | { | 77 | { |
78 | CLEAR_IO_SYNC; | 78 | CLEAR_IO_SYNC; |
79 | return arch_spin_trylock(lock) == 0; | 79 | return __arch_spin_trylock(lock) == 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | /* | 82 | /* |
@@ -96,19 +96,19 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
96 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | 96 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) |
97 | /* We only yield to the hypervisor if we are in shared processor mode */ | 97 | /* We only yield to the hypervisor if we are in shared processor mode */ |
98 | #define SHARED_PROCESSOR (get_lppaca()->shared_proc) | 98 | #define SHARED_PROCESSOR (get_lppaca()->shared_proc) |
99 | extern void __spin_yield(raw_spinlock_t *lock); | 99 | extern void __spin_yield(arch_spinlock_t *lock); |
100 | extern void __rw_yield(raw_rwlock_t *lock); | 100 | extern void __rw_yield(arch_rwlock_t *lock); |
101 | #else /* SPLPAR || ISERIES */ | 101 | #else /* SPLPAR || ISERIES */ |
102 | #define __spin_yield(x) barrier() | 102 | #define __spin_yield(x) barrier() |
103 | #define __rw_yield(x) barrier() | 103 | #define __rw_yield(x) barrier() |
104 | #define SHARED_PROCESSOR 0 | 104 | #define SHARED_PROCESSOR 0 |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 107 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
108 | { | 108 | { |
109 | CLEAR_IO_SYNC; | 109 | CLEAR_IO_SYNC; |
110 | while (1) { | 110 | while (1) { |
111 | if (likely(arch_spin_trylock(lock) == 0)) | 111 | if (likely(__arch_spin_trylock(lock) == 0)) |
112 | break; | 112 | break; |
113 | do { | 113 | do { |
114 | HMT_low(); | 114 | HMT_low(); |
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
120 | } | 120 | } |
121 | 121 | ||
122 | static inline | 122 | static inline |
123 | void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 123 | void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
124 | { | 124 | { |
125 | unsigned long flags_dis; | 125 | unsigned long flags_dis; |
126 | 126 | ||
127 | CLEAR_IO_SYNC; | 127 | CLEAR_IO_SYNC; |
128 | while (1) { | 128 | while (1) { |
129 | if (likely(arch_spin_trylock(lock) == 0)) | 129 | if (likely(__arch_spin_trylock(lock) == 0)) |
130 | break; | 130 | break; |
131 | local_save_flags(flags_dis); | 131 | local_save_flags(flags_dis); |
132 | local_irq_restore(flags); | 132 | local_irq_restore(flags); |
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | |||
140 | } | 140 | } |
141 | } | 141 | } |
142 | 142 | ||
143 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 143 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
144 | { | 144 | { |
145 | SYNC_IO; | 145 | SYNC_IO; |
146 | __asm__ __volatile__("# __raw_spin_unlock\n\t" | 146 | __asm__ __volatile__("# arch_spin_unlock\n\t" |
147 | LWSYNC_ON_SMP: : :"memory"); | 147 | LWSYNC_ON_SMP: : :"memory"); |
148 | lock->slock = 0; | 148 | lock->slock = 0; |
149 | } | 149 | } |
150 | 150 | ||
151 | #ifdef CONFIG_PPC64 | 151 | #ifdef CONFIG_PPC64 |
152 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | 152 | extern void arch_spin_unlock_wait(arch_spinlock_t *lock); |
153 | #else | 153 | #else |
154 | #define __raw_spin_unlock_wait(lock) \ | 154 | #define arch_spin_unlock_wait(lock) \ |
155 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 155 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
156 | #endif | 156 | #endif |
157 | 157 | ||
158 | /* | 158 | /* |
@@ -166,8 +166,8 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
166 | * read-locks. | 166 | * read-locks. |
167 | */ | 167 | */ |
168 | 168 | ||
169 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) | 169 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
170 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 170 | #define arch_write_can_lock(rw) (!(rw)->lock) |
171 | 171 | ||
172 | #ifdef CONFIG_PPC64 | 172 | #ifdef CONFIG_PPC64 |
173 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" | 173 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" |
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); | |||
181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
183 | */ | 183 | */ |
184 | static inline long arch_read_trylock(raw_rwlock_t *rw) | 184 | static inline long __arch_read_trylock(arch_rwlock_t *rw) |
185 | { | 185 | { |
186 | long tmp; | 186 | long tmp; |
187 | 187 | ||
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw) | |||
205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
207 | */ | 207 | */ |
208 | static inline long arch_write_trylock(raw_rwlock_t *rw) | 208 | static inline long __arch_write_trylock(arch_rwlock_t *rw) |
209 | { | 209 | { |
210 | long tmp, token; | 210 | long tmp, token; |
211 | 211 | ||
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(raw_rwlock_t *rw) | |||
225 | return tmp; | 225 | return tmp; |
226 | } | 226 | } |
227 | 227 | ||
228 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 228 | static inline void arch_read_lock(arch_rwlock_t *rw) |
229 | { | 229 | { |
230 | while (1) { | 230 | while (1) { |
231 | if (likely(arch_read_trylock(rw) > 0)) | 231 | if (likely(__arch_read_trylock(rw) > 0)) |
232 | break; | 232 | break; |
233 | do { | 233 | do { |
234 | HMT_low(); | 234 | HMT_low(); |
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
239 | } | 239 | } |
240 | } | 240 | } |
241 | 241 | ||
242 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 242 | static inline void arch_write_lock(arch_rwlock_t *rw) |
243 | { | 243 | { |
244 | while (1) { | 244 | while (1) { |
245 | if (likely(arch_write_trylock(rw) == 0)) | 245 | if (likely(__arch_write_trylock(rw) == 0)) |
246 | break; | 246 | break; |
247 | do { | 247 | do { |
248 | HMT_low(); | 248 | HMT_low(); |
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
253 | } | 253 | } |
254 | } | 254 | } |
255 | 255 | ||
256 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 256 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
257 | { | 257 | { |
258 | return arch_read_trylock(rw) > 0; | 258 | return __arch_read_trylock(rw) > 0; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 261 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
262 | { | 262 | { |
263 | return arch_write_trylock(rw) == 0; | 263 | return __arch_write_trylock(rw) == 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 266 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
267 | { | 267 | { |
268 | long tmp; | 268 | long tmp; |
269 | 269 | ||
@@ -280,19 +280,19 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
280 | : "cr0", "xer", "memory"); | 280 | : "cr0", "xer", "memory"); |
281 | } | 281 | } |
282 | 282 | ||
283 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 283 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
284 | { | 284 | { |
285 | __asm__ __volatile__("# write_unlock\n\t" | 285 | __asm__ __volatile__("# write_unlock\n\t" |
286 | LWSYNC_ON_SMP: : :"memory"); | 286 | LWSYNC_ON_SMP: : :"memory"); |
287 | rw->lock = 0; | 287 | rw->lock = 0; |
288 | } | 288 | } |
289 | 289 | ||
290 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 290 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
291 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 291 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
292 | 292 | ||
293 | #define _raw_spin_relax(lock) __spin_yield(lock) | 293 | #define arch_spin_relax(lock) __spin_yield(lock) |
294 | #define _raw_read_relax(lock) __rw_yield(lock) | 294 | #define arch_read_relax(lock) __rw_yield(lock) |
295 | #define _raw_write_relax(lock) __rw_yield(lock) | 295 | #define arch_write_relax(lock) __rw_yield(lock) |
296 | 296 | ||
297 | #endif /* __KERNEL__ */ | 297 | #endif /* __KERNEL__ */ |
298 | #endif /* __ASM_SPINLOCK_H */ | 298 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 74236c9f05b1..2351adc4fdc4 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h | |||
@@ -7,14 +7,14 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int slock; | 9 | volatile unsigned int slock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile signed int lock; | 15 | volatile signed int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index f6dca4f4b295..9040330b0530 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -210,7 +210,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
210 | if (!desc) | 210 | if (!desc) |
211 | return 0; | 211 | return 0; |
212 | 212 | ||
213 | spin_lock_irqsave(&desc->lock, flags); | 213 | raw_spin_lock_irqsave(&desc->lock, flags); |
214 | 214 | ||
215 | action = desc->action; | 215 | action = desc->action; |
216 | if (!action || !action->handler) | 216 | if (!action || !action->handler) |
@@ -237,7 +237,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
237 | seq_putc(p, '\n'); | 237 | seq_putc(p, '\n'); |
238 | 238 | ||
239 | skip: | 239 | skip: |
240 | spin_unlock_irqrestore(&desc->lock, flags); | 240 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
241 | 241 | ||
242 | return 0; | 242 | return 0; |
243 | } | 243 | } |
@@ -1112,7 +1112,7 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
1112 | if (!desc) | 1112 | if (!desc) |
1113 | continue; | 1113 | continue; |
1114 | 1114 | ||
1115 | spin_lock_irqsave(&desc->lock, flags); | 1115 | raw_spin_lock_irqsave(&desc->lock, flags); |
1116 | 1116 | ||
1117 | if (desc->action && desc->action->handler) { | 1117 | if (desc->action && desc->action->handler) { |
1118 | seq_printf(m, "%5d ", i); | 1118 | seq_printf(m, "%5d ", i); |
@@ -1131,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
1131 | seq_printf(m, "%s\n", p); | 1131 | seq_printf(m, "%s\n", p); |
1132 | } | 1132 | } |
1133 | 1133 | ||
1134 | spin_unlock_irqrestore(&desc->lock, flags); | 1134 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | return 0; | 1137 | return 0; |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index bf90361bb70f..fd0d29493fd6 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include <asm/mmu.h> | 42 | #include <asm/mmu.h> |
43 | 43 | ||
44 | struct rtas_t rtas = { | 44 | struct rtas_t rtas = { |
45 | .lock = __RAW_SPIN_LOCK_UNLOCKED | 45 | .lock = __ARCH_SPIN_LOCK_UNLOCKED |
46 | }; | 46 | }; |
47 | EXPORT_SYMBOL(rtas); | 47 | EXPORT_SYMBOL(rtas); |
48 | 48 | ||
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void) | |||
80 | 80 | ||
81 | local_irq_save(flags); | 81 | local_irq_save(flags); |
82 | preempt_disable(); | 82 | preempt_disable(); |
83 | __raw_spin_lock_flags(&rtas.lock, flags); | 83 | arch_spin_lock_flags(&rtas.lock, flags); |
84 | return flags; | 84 | return flags; |
85 | } | 85 | } |
86 | 86 | ||
87 | static void unlock_rtas(unsigned long flags) | 87 | static void unlock_rtas(unsigned long flags) |
88 | { | 88 | { |
89 | __raw_spin_unlock(&rtas.lock); | 89 | arch_spin_unlock(&rtas.lock); |
90 | local_irq_restore(flags); | 90 | local_irq_restore(flags); |
91 | preempt_enable(); | 91 | preempt_enable(); |
92 | } | 92 | } |
@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node, | |||
978 | return 1; | 978 | return 1; |
979 | } | 979 | } |
980 | 980 | ||
981 | static raw_spinlock_t timebase_lock; | 981 | static arch_spinlock_t timebase_lock; |
982 | static u64 timebase = 0; | 982 | static u64 timebase = 0; |
983 | 983 | ||
984 | void __cpuinit rtas_give_timebase(void) | 984 | void __cpuinit rtas_give_timebase(void) |
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void) | |||
987 | 987 | ||
988 | local_irq_save(flags); | 988 | local_irq_save(flags); |
989 | hard_irq_disable(); | 989 | hard_irq_disable(); |
990 | __raw_spin_lock(&timebase_lock); | 990 | arch_spin_lock(&timebase_lock); |
991 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); | 991 | rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); |
992 | timebase = get_tb(); | 992 | timebase = get_tb(); |
993 | __raw_spin_unlock(&timebase_lock); | 993 | arch_spin_unlock(&timebase_lock); |
994 | 994 | ||
995 | while (timebase) | 995 | while (timebase) |
996 | barrier(); | 996 | barrier(); |
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void) | |||
1002 | { | 1002 | { |
1003 | while (!timebase) | 1003 | while (!timebase) |
1004 | barrier(); | 1004 | barrier(); |
1005 | __raw_spin_lock(&timebase_lock); | 1005 | arch_spin_lock(&timebase_lock); |
1006 | set_tb(timebase >> 32, timebase & 0xffffffff); | 1006 | set_tb(timebase >> 32, timebase & 0xffffffff); |
1007 | timebase = 0; | 1007 | timebase = 0; |
1008 | __raw_spin_unlock(&timebase_lock); | 1008 | arch_spin_unlock(&timebase_lock); |
1009 | } | 1009 | } |
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 79d0fa3a470d..58e14fba11b1 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/smp.h> | 25 | #include <asm/smp.h> |
26 | #include <asm/firmware.h> | 26 | #include <asm/firmware.h> |
27 | 27 | ||
28 | void __spin_yield(raw_spinlock_t *lock) | 28 | void __spin_yield(arch_spinlock_t *lock) |
29 | { | 29 | { |
30 | unsigned int lock_value, holder_cpu, yield_count; | 30 | unsigned int lock_value, holder_cpu, yield_count; |
31 | 31 | ||
@@ -55,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock) | |||
55 | * This turns out to be the same for read and write locks, since | 55 | * This turns out to be the same for read and write locks, since |
56 | * we only know the holder if it is write-locked. | 56 | * we only know the holder if it is write-locked. |
57 | */ | 57 | */ |
58 | void __rw_yield(raw_rwlock_t *rw) | 58 | void __rw_yield(arch_rwlock_t *rw) |
59 | { | 59 | { |
60 | int lock_value; | 60 | int lock_value; |
61 | unsigned int holder_cpu, yield_count; | 61 | unsigned int holder_cpu, yield_count; |
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) | |||
82 | } | 82 | } |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 85 | void arch_spin_unlock_wait(arch_spinlock_t *lock) |
86 | { | 86 | { |
87 | while (lock->slock) { | 87 | while (lock->slock) { |
88 | HMT_low(); | 88 | HMT_low(); |
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock) | |||
92 | HMT_medium(); | 92 | HMT_medium(); |
93 | } | 93 | } |
94 | 94 | ||
95 | EXPORT_SYMBOL(__raw_spin_unlock_wait); | 95 | EXPORT_SYMBOL(arch_spin_unlock_wait); |
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index cc0c854291d7..0bac3a3dbecf 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c | |||
@@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
86 | u32 status, enable; | 86 | u32 status, enable; |
87 | 87 | ||
88 | /* Mask off the cascaded IRQ */ | 88 | /* Mask off the cascaded IRQ */ |
89 | spin_lock(&desc->lock); | 89 | raw_spin_lock(&desc->lock); |
90 | desc->chip->mask(virq); | 90 | desc->chip->mask(virq); |
91 | spin_unlock(&desc->lock); | 91 | raw_spin_unlock(&desc->lock); |
92 | 92 | ||
93 | /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs | 93 | /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs |
94 | * are pending. 'ffs()' is 1 based */ | 94 | * are pending. 'ffs()' is 1 based */ |
@@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | /* Processing done; can reenable the cascade now */ | 106 | /* Processing done; can reenable the cascade now */ |
107 | spin_lock(&desc->lock); | 107 | raw_spin_lock(&desc->lock); |
108 | desc->chip->ack(virq); | 108 | desc->chip->ack(virq); |
109 | if (!(desc->status & IRQ_DISABLED)) | 109 | if (!(desc->status & IRQ_DISABLED)) |
110 | desc->chip->unmask(virq); | 110 | desc->chip->unmask(virq); |
111 | spin_unlock(&desc->lock); | 111 | raw_spin_unlock(&desc->lock); |
112 | } | 112 | } |
113 | 113 | ||
114 | static int media5200_irq_map(struct irq_host *h, unsigned int virq, | 114 | static int media5200_irq_map(struct irq_host *h, unsigned int virq, |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 7267effc8078..6829cf7e2bda 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -237,7 +237,7 @@ extern int noirqdebug; | |||
237 | 237 | ||
238 | static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) | 238 | static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) |
239 | { | 239 | { |
240 | spin_lock(&desc->lock); | 240 | raw_spin_lock(&desc->lock); |
241 | 241 | ||
242 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 242 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
243 | 243 | ||
@@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) | |||
265 | goto out_eoi; | 265 | goto out_eoi; |
266 | 266 | ||
267 | desc->status &= ~IRQ_PENDING; | 267 | desc->status &= ~IRQ_PENDING; |
268 | spin_unlock(&desc->lock); | 268 | raw_spin_unlock(&desc->lock); |
269 | action_ret = handle_IRQ_event(irq, action); | 269 | action_ret = handle_IRQ_event(irq, action); |
270 | if (!noirqdebug) | 270 | if (!noirqdebug) |
271 | note_interrupt(irq, desc, action_ret); | 271 | note_interrupt(irq, desc, action_ret); |
272 | spin_lock(&desc->lock); | 272 | raw_spin_lock(&desc->lock); |
273 | 273 | ||
274 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | 274 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); |
275 | 275 | ||
276 | desc->status &= ~IRQ_INPROGRESS; | 276 | desc->status &= ~IRQ_INPROGRESS; |
277 | out_eoi: | 277 | out_eoi: |
278 | desc->chip->eoi(irq); | 278 | desc->chip->eoi(irq); |
279 | spin_unlock(&desc->lock); | 279 | raw_spin_unlock(&desc->lock); |
280 | } | 280 | } |
281 | 281 | ||
282 | static int iic_host_map(struct irq_host *h, unsigned int virq, | 282 | static int iic_host_map(struct irq_host *h, unsigned int virq, |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 07762259c60a..86c4b29eea89 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs() | |||
217 | struct irq_desc *desc = irq_to_desc(irq); | 217 | struct irq_desc *desc = irq_to_desc(irq); |
218 | 218 | ||
219 | if (desc && desc->chip && desc->chip->startup) { | 219 | if (desc && desc->chip && desc->chip->startup) { |
220 | spin_lock_irqsave(&desc->lock, flags); | 220 | raw_spin_lock_irqsave(&desc->lock, flags); |
221 | desc->chip->startup(irq); | 221 | desc->chip->startup(irq); |
222 | spin_unlock_irqrestore(&desc->lock, flags); | 222 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
223 | } | 223 | } |
224 | } | 224 | } |
225 | } | 225 | } |
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index a4619347aa7e..242f8095c2df 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c | |||
@@ -71,7 +71,7 @@ static void pas_restart(char *cmd) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | #ifdef CONFIG_SMP | 73 | #ifdef CONFIG_SMP |
74 | static raw_spinlock_t timebase_lock; | 74 | static arch_spinlock_t timebase_lock; |
75 | static unsigned long timebase; | 75 | static unsigned long timebase; |
76 | 76 | ||
77 | static void __devinit pas_give_timebase(void) | 77 | static void __devinit pas_give_timebase(void) |
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void) | |||
80 | 80 | ||
81 | local_irq_save(flags); | 81 | local_irq_save(flags); |
82 | hard_irq_disable(); | 82 | hard_irq_disable(); |
83 | __raw_spin_lock(&timebase_lock); | 83 | arch_spin_lock(&timebase_lock); |
84 | mtspr(SPRN_TBCTL, TBCTL_FREEZE); | 84 | mtspr(SPRN_TBCTL, TBCTL_FREEZE); |
85 | isync(); | 85 | isync(); |
86 | timebase = get_tb(); | 86 | timebase = get_tb(); |
87 | __raw_spin_unlock(&timebase_lock); | 87 | arch_spin_unlock(&timebase_lock); |
88 | 88 | ||
89 | while (timebase) | 89 | while (timebase) |
90 | barrier(); | 90 | barrier(); |
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void) | |||
97 | while (!timebase) | 97 | while (!timebase) |
98 | smp_rmb(); | 98 | smp_rmb(); |
99 | 99 | ||
100 | __raw_spin_lock(&timebase_lock); | 100 | arch_spin_lock(&timebase_lock); |
101 | set_tb(timebase >> 32, timebase & 0xffffffff); | 101 | set_tb(timebase >> 32, timebase & 0xffffffff); |
102 | timebase = 0; | 102 | timebase = 0; |
103 | __raw_spin_unlock(&timebase_lock); | 103 | arch_spin_unlock(&timebase_lock); |
104 | } | 104 | } |
105 | 105 | ||
106 | struct smp_ops_t pas_smp_ops = { | 106 | struct smp_ops_t pas_smp_ops = { |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 7d01b58f3989..b9b9e11609ec 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -906,7 +906,7 @@ void xics_migrate_irqs_away(void) | |||
906 | || desc->chip->set_affinity == NULL) | 906 | || desc->chip->set_affinity == NULL) |
907 | continue; | 907 | continue; |
908 | 908 | ||
909 | spin_lock_irqsave(&desc->lock, flags); | 909 | raw_spin_lock_irqsave(&desc->lock, flags); |
910 | 910 | ||
911 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | 911 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); |
912 | if (status) { | 912 | if (status) { |
@@ -930,7 +930,7 @@ void xics_migrate_irqs_away(void) | |||
930 | cpumask_setall(irq_to_desc(virq)->affinity); | 930 | cpumask_setall(irq_to_desc(virq)->affinity); |
931 | desc->chip->set_affinity(virq, cpu_all_mask); | 931 | desc->chip->set_affinity(virq, cpu_all_mask); |
932 | unlock: | 932 | unlock: |
933 | spin_unlock_irqrestore(&desc->lock, flags); | 933 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
934 | } | 934 | } |
935 | } | 935 | } |
936 | #endif | 936 | #endif |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 62e50258cdef..c6e11b077108 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
173 | u32 intr_index; | 173 | u32 intr_index; |
174 | u32 have_shift = 0; | 174 | u32 have_shift = 0; |
175 | 175 | ||
176 | spin_lock(&desc->lock); | 176 | raw_spin_lock(&desc->lock); |
177 | if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { | 177 | if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { |
178 | if (desc->chip->mask_ack) | 178 | if (desc->chip->mask_ack) |
179 | desc->chip->mask_ack(irq); | 179 | desc->chip->mask_ack(irq); |
@@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
225 | break; | 225 | break; |
226 | } | 226 | } |
227 | unlock: | 227 | unlock: |
228 | spin_unlock(&desc->lock); | 228 | raw_spin_unlock(&desc->lock); |
229 | } | 229 | } |
230 | 230 | ||
231 | static int __devinit fsl_of_msi_probe(struct of_device *dev, | 231 | static int __devinit fsl_of_msi_probe(struct of_device *dev, |
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 7d10074b3304..6f220a913e42 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c | |||
@@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
225 | int src; | 225 | int src; |
226 | int subvirq; | 226 | int subvirq; |
227 | 227 | ||
228 | spin_lock(&desc->lock); | 228 | raw_spin_lock(&desc->lock); |
229 | if (desc->status & IRQ_LEVEL) | 229 | if (desc->status & IRQ_LEVEL) |
230 | desc->chip->mask(virq); | 230 | desc->chip->mask(virq); |
231 | else | 231 | else |
232 | desc->chip->mask_ack(virq); | 232 | desc->chip->mask_ack(virq); |
233 | spin_unlock(&desc->lock); | 233 | raw_spin_unlock(&desc->lock); |
234 | 234 | ||
235 | msr = mfdcr(uic->dcrbase + UIC_MSR); | 235 | msr = mfdcr(uic->dcrbase + UIC_MSR); |
236 | if (!msr) /* spurious interrupt */ | 236 | if (!msr) /* spurious interrupt */ |
@@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
242 | generic_handle_irq(subvirq); | 242 | generic_handle_irq(subvirq); |
243 | 243 | ||
244 | uic_irq_ret: | 244 | uic_irq_ret: |
245 | spin_lock(&desc->lock); | 245 | raw_spin_lock(&desc->lock); |
246 | if (desc->status & IRQ_LEVEL) | 246 | if (desc->status & IRQ_LEVEL) |
247 | desc->chip->ack(virq); | 247 | desc->chip->ack(virq); |
248 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | 248 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) |
249 | desc->chip->unmask(virq); | 249 | desc->chip->unmask(virq); |
250 | spin_unlock(&desc->lock); | 250 | raw_spin_unlock(&desc->lock); |
251 | } | 251 | } |
252 | 252 | ||
253 | static struct uic * __init uic_init_one(struct device_node *node) | 253 | static struct uic * __init uic_init_one(struct device_node *node) |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7ab..a587907d77f3 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
52 | * (the type definitions are in asm/spinlock_types.h) | 52 | * (the type definitions are in asm/spinlock_types.h) |
53 | */ | 53 | */ |
54 | 54 | ||
55 | #define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) | 55 | #define arch_spin_is_locked(x) ((x)->owner_cpu != 0) |
56 | #define __raw_spin_unlock_wait(lock) \ | 56 | #define arch_spin_unlock_wait(lock) \ |
57 | do { while (__raw_spin_is_locked(lock)) \ | 57 | do { while (arch_spin_is_locked(lock)) \ |
58 | _raw_spin_relax(lock); } while (0) | 58 | arch_spin_relax(lock); } while (0) |
59 | 59 | ||
60 | extern void _raw_spin_lock_wait(raw_spinlock_t *); | 60 | extern void arch_spin_lock_wait(arch_spinlock_t *); |
61 | extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); | 61 | extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); |
62 | extern int _raw_spin_trylock_retry(raw_spinlock_t *); | 62 | extern int arch_spin_trylock_retry(arch_spinlock_t *); |
63 | extern void _raw_spin_relax(raw_spinlock_t *lock); | 63 | extern void arch_spin_relax(arch_spinlock_t *lock); |
64 | 64 | ||
65 | static inline void __raw_spin_lock(raw_spinlock_t *lp) | 65 | static inline void arch_spin_lock(arch_spinlock_t *lp) |
66 | { | 66 | { |
67 | int old; | 67 | int old; |
68 | 68 | ||
69 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 69 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); |
70 | if (likely(old == 0)) | 70 | if (likely(old == 0)) |
71 | return; | 71 | return; |
72 | _raw_spin_lock_wait(lp); | 72 | arch_spin_lock_wait(lp); |
73 | } | 73 | } |
74 | 74 | ||
75 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, | 75 | static inline void arch_spin_lock_flags(arch_spinlock_t *lp, |
76 | unsigned long flags) | 76 | unsigned long flags) |
77 | { | 77 | { |
78 | int old; | 78 | int old; |
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, | |||
80 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 80 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); |
81 | if (likely(old == 0)) | 81 | if (likely(old == 0)) |
82 | return; | 82 | return; |
83 | _raw_spin_lock_wait_flags(lp, flags); | 83 | arch_spin_lock_wait_flags(lp, flags); |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline int __raw_spin_trylock(raw_spinlock_t *lp) | 86 | static inline int arch_spin_trylock(arch_spinlock_t *lp) |
87 | { | 87 | { |
88 | int old; | 88 | int old; |
89 | 89 | ||
90 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); | 90 | old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); |
91 | if (likely(old == 0)) | 91 | if (likely(old == 0)) |
92 | return 1; | 92 | return 1; |
93 | return _raw_spin_trylock_retry(lp); | 93 | return arch_spin_trylock_retry(lp); |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline void __raw_spin_unlock(raw_spinlock_t *lp) | 96 | static inline void arch_spin_unlock(arch_spinlock_t *lp) |
97 | { | 97 | { |
98 | _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); | 98 | _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); |
99 | } | 99 | } |
@@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp) | |||
113 | * read_can_lock - would read_trylock() succeed? | 113 | * read_can_lock - would read_trylock() succeed? |
114 | * @lock: the rwlock in question. | 114 | * @lock: the rwlock in question. |
115 | */ | 115 | */ |
116 | #define __raw_read_can_lock(x) ((int)(x)->lock >= 0) | 116 | #define arch_read_can_lock(x) ((int)(x)->lock >= 0) |
117 | 117 | ||
118 | /** | 118 | /** |
119 | * write_can_lock - would write_trylock() succeed? | 119 | * write_can_lock - would write_trylock() succeed? |
120 | * @lock: the rwlock in question. | 120 | * @lock: the rwlock in question. |
121 | */ | 121 | */ |
122 | #define __raw_write_can_lock(x) ((x)->lock == 0) | 122 | #define arch_write_can_lock(x) ((x)->lock == 0) |
123 | 123 | ||
124 | extern void _raw_read_lock_wait(raw_rwlock_t *lp); | 124 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); |
125 | extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); | 125 | extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); |
126 | extern int _raw_read_trylock_retry(raw_rwlock_t *lp); | 126 | extern int _raw_read_trylock_retry(arch_rwlock_t *lp); |
127 | extern void _raw_write_lock_wait(raw_rwlock_t *lp); | 127 | extern void _raw_write_lock_wait(arch_rwlock_t *lp); |
128 | extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); | 128 | extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); |
129 | extern int _raw_write_trylock_retry(raw_rwlock_t *lp); | 129 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); |
130 | 130 | ||
131 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 131 | static inline void arch_read_lock(arch_rwlock_t *rw) |
132 | { | 132 | { |
133 | unsigned int old; | 133 | unsigned int old; |
134 | old = rw->lock & 0x7fffffffU; | 134 | old = rw->lock & 0x7fffffffU; |
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
136 | _raw_read_lock_wait(rw); | 136 | _raw_read_lock_wait(rw); |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) | 139 | static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
140 | { | 140 | { |
141 | unsigned int old; | 141 | unsigned int old; |
142 | old = rw->lock & 0x7fffffffU; | 142 | old = rw->lock & 0x7fffffffU; |
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) | |||
144 | _raw_read_lock_wait_flags(rw, flags); | 144 | _raw_read_lock_wait_flags(rw, flags); |
145 | } | 145 | } |
146 | 146 | ||
147 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 147 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
148 | { | 148 | { |
149 | unsigned int old, cmp; | 149 | unsigned int old, cmp; |
150 | 150 | ||
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
155 | } while (cmp != old); | 155 | } while (cmp != old); |
156 | } | 156 | } |
157 | 157 | ||
158 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 158 | static inline void arch_write_lock(arch_rwlock_t *rw) |
159 | { | 159 | { |
160 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 160 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) |
161 | _raw_write_lock_wait(rw); | 161 | _raw_write_lock_wait(rw); |
162 | } | 162 | } |
163 | 163 | ||
164 | static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) | 164 | static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
165 | { | 165 | { |
166 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 166 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) |
167 | _raw_write_lock_wait_flags(rw, flags); | 167 | _raw_write_lock_wait_flags(rw, flags); |
168 | } | 168 | } |
169 | 169 | ||
170 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 170 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
171 | { | 171 | { |
172 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); | 172 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); |
173 | } | 173 | } |
174 | 174 | ||
175 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 175 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
176 | { | 176 | { |
177 | unsigned int old; | 177 | unsigned int old; |
178 | old = rw->lock & 0x7fffffffU; | 178 | old = rw->lock & 0x7fffffffU; |
@@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
181 | return _raw_read_trylock_retry(rw); | 181 | return _raw_read_trylock_retry(rw); |
182 | } | 182 | } |
183 | 183 | ||
184 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 184 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
185 | { | 185 | { |
186 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) | 186 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) |
187 | return 1; | 187 | return 1; |
188 | return _raw_write_trylock_retry(rw); | 188 | return _raw_write_trylock_retry(rw); |
189 | } | 189 | } |
190 | 190 | ||
191 | #define _raw_read_relax(lock) cpu_relax() | 191 | #define arch_read_relax(lock) cpu_relax() |
192 | #define _raw_write_relax(lock) cpu_relax() | 192 | #define arch_write_relax(lock) cpu_relax() |
193 | 193 | ||
194 | #endif /* __ASM_SPINLOCK_H */ | 194 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 654abc40de04..9c76656a0af0 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h | |||
@@ -7,14 +7,14 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int owner_cpu; | 9 | volatile unsigned int owner_cpu; |
10 | } __attribute__ ((aligned (4))) raw_spinlock_t; | 10 | } __attribute__ ((aligned (4))) arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | volatile unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f7e0d30250b7..10754a375668 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) | |||
39 | _raw_yield(); | 39 | _raw_yield(); |
40 | } | 40 | } |
41 | 41 | ||
42 | void _raw_spin_lock_wait(raw_spinlock_t *lp) | 42 | void arch_spin_lock_wait(arch_spinlock_t *lp) |
43 | { | 43 | { |
44 | int count = spin_retry; | 44 | int count = spin_retry; |
45 | unsigned int cpu = ~smp_processor_id(); | 45 | unsigned int cpu = ~smp_processor_id(); |
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp) | |||
51 | _raw_yield_cpu(~owner); | 51 | _raw_yield_cpu(~owner); |
52 | count = spin_retry; | 52 | count = spin_retry; |
53 | } | 53 | } |
54 | if (__raw_spin_is_locked(lp)) | 54 | if (arch_spin_is_locked(lp)) |
55 | continue; | 55 | continue; |
56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
57 | return; | 57 | return; |
58 | } | 58 | } |
59 | } | 59 | } |
60 | EXPORT_SYMBOL(_raw_spin_lock_wait); | 60 | EXPORT_SYMBOL(arch_spin_lock_wait); |
61 | 61 | ||
62 | void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) | 62 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) |
63 | { | 63 | { |
64 | int count = spin_retry; | 64 | int count = spin_retry; |
65 | unsigned int cpu = ~smp_processor_id(); | 65 | unsigned int cpu = ~smp_processor_id(); |
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) | |||
72 | _raw_yield_cpu(~owner); | 72 | _raw_yield_cpu(~owner); |
73 | count = spin_retry; | 73 | count = spin_retry; |
74 | } | 74 | } |
75 | if (__raw_spin_is_locked(lp)) | 75 | if (arch_spin_is_locked(lp)) |
76 | continue; | 76 | continue; |
77 | local_irq_disable(); | 77 | local_irq_disable(); |
78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
@@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) | |||
80 | local_irq_restore(flags); | 80 | local_irq_restore(flags); |
81 | } | 81 | } |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(_raw_spin_lock_wait_flags); | 83 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); |
84 | 84 | ||
85 | int _raw_spin_trylock_retry(raw_spinlock_t *lp) | 85 | int arch_spin_trylock_retry(arch_spinlock_t *lp) |
86 | { | 86 | { |
87 | unsigned int cpu = ~smp_processor_id(); | 87 | unsigned int cpu = ~smp_processor_id(); |
88 | int count; | 88 | int count; |
89 | 89 | ||
90 | for (count = spin_retry; count > 0; count--) { | 90 | for (count = spin_retry; count > 0; count--) { |
91 | if (__raw_spin_is_locked(lp)) | 91 | if (arch_spin_is_locked(lp)) |
92 | continue; | 92 | continue; |
93 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 93 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
94 | return 1; | 94 | return 1; |
95 | } | 95 | } |
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
98 | EXPORT_SYMBOL(_raw_spin_trylock_retry); | 98 | EXPORT_SYMBOL(arch_spin_trylock_retry); |
99 | 99 | ||
100 | void _raw_spin_relax(raw_spinlock_t *lock) | 100 | void arch_spin_relax(arch_spinlock_t *lock) |
101 | { | 101 | { |
102 | unsigned int cpu = lock->owner_cpu; | 102 | unsigned int cpu = lock->owner_cpu; |
103 | if (cpu != 0) | 103 | if (cpu != 0) |
104 | _raw_yield_cpu(~cpu); | 104 | _raw_yield_cpu(~cpu); |
105 | } | 105 | } |
106 | EXPORT_SYMBOL(_raw_spin_relax); | 106 | EXPORT_SYMBOL(arch_spin_relax); |
107 | 107 | ||
108 | void _raw_read_lock_wait(raw_rwlock_t *rw) | 108 | void _raw_read_lock_wait(arch_rwlock_t *rw) |
109 | { | 109 | { |
110 | unsigned int old; | 110 | unsigned int old; |
111 | int count = spin_retry; | 111 | int count = spin_retry; |
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) | |||
115 | _raw_yield(); | 115 | _raw_yield(); |
116 | count = spin_retry; | 116 | count = spin_retry; |
117 | } | 117 | } |
118 | if (!__raw_read_can_lock(rw)) | 118 | if (!arch_read_can_lock(rw)) |
119 | continue; | 119 | continue; |
120 | old = rw->lock & 0x7fffffffU; | 120 | old = rw->lock & 0x7fffffffU; |
121 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 121 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) |
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) | |||
124 | } | 124 | } |
125 | EXPORT_SYMBOL(_raw_read_lock_wait); | 125 | EXPORT_SYMBOL(_raw_read_lock_wait); |
126 | 126 | ||
127 | void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) | 127 | void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) |
128 | { | 128 | { |
129 | unsigned int old; | 129 | unsigned int old; |
130 | int count = spin_retry; | 130 | int count = spin_retry; |
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) | |||
135 | _raw_yield(); | 135 | _raw_yield(); |
136 | count = spin_retry; | 136 | count = spin_retry; |
137 | } | 137 | } |
138 | if (!__raw_read_can_lock(rw)) | 138 | if (!arch_read_can_lock(rw)) |
139 | continue; | 139 | continue; |
140 | old = rw->lock & 0x7fffffffU; | 140 | old = rw->lock & 0x7fffffffU; |
141 | local_irq_disable(); | 141 | local_irq_disable(); |
@@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) | |||
145 | } | 145 | } |
146 | EXPORT_SYMBOL(_raw_read_lock_wait_flags); | 146 | EXPORT_SYMBOL(_raw_read_lock_wait_flags); |
147 | 147 | ||
148 | int _raw_read_trylock_retry(raw_rwlock_t *rw) | 148 | int _raw_read_trylock_retry(arch_rwlock_t *rw) |
149 | { | 149 | { |
150 | unsigned int old; | 150 | unsigned int old; |
151 | int count = spin_retry; | 151 | int count = spin_retry; |
152 | 152 | ||
153 | while (count-- > 0) { | 153 | while (count-- > 0) { |
154 | if (!__raw_read_can_lock(rw)) | 154 | if (!arch_read_can_lock(rw)) |
155 | continue; | 155 | continue; |
156 | old = rw->lock & 0x7fffffffU; | 156 | old = rw->lock & 0x7fffffffU; |
157 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 157 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) |
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw) | |||
161 | } | 161 | } |
162 | EXPORT_SYMBOL(_raw_read_trylock_retry); | 162 | EXPORT_SYMBOL(_raw_read_trylock_retry); |
163 | 163 | ||
164 | void _raw_write_lock_wait(raw_rwlock_t *rw) | 164 | void _raw_write_lock_wait(arch_rwlock_t *rw) |
165 | { | 165 | { |
166 | int count = spin_retry; | 166 | int count = spin_retry; |
167 | 167 | ||
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) | |||
170 | _raw_yield(); | 170 | _raw_yield(); |
171 | count = spin_retry; | 171 | count = spin_retry; |
172 | } | 172 | } |
173 | if (!__raw_write_can_lock(rw)) | 173 | if (!arch_write_can_lock(rw)) |
174 | continue; | 174 | continue; |
175 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 175 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
176 | return; | 176 | return; |
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) | |||
178 | } | 178 | } |
179 | EXPORT_SYMBOL(_raw_write_lock_wait); | 179 | EXPORT_SYMBOL(_raw_write_lock_wait); |
180 | 180 | ||
181 | void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) | 181 | void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) |
182 | { | 182 | { |
183 | int count = spin_retry; | 183 | int count = spin_retry; |
184 | 184 | ||
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) | |||
188 | _raw_yield(); | 188 | _raw_yield(); |
189 | count = spin_retry; | 189 | count = spin_retry; |
190 | } | 190 | } |
191 | if (!__raw_write_can_lock(rw)) | 191 | if (!arch_write_can_lock(rw)) |
192 | continue; | 192 | continue; |
193 | local_irq_disable(); | 193 | local_irq_disable(); |
194 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 194 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
@@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) | |||
197 | } | 197 | } |
198 | EXPORT_SYMBOL(_raw_write_lock_wait_flags); | 198 | EXPORT_SYMBOL(_raw_write_lock_wait_flags); |
199 | 199 | ||
200 | int _raw_write_trylock_retry(raw_rwlock_t *rw) | 200 | int _raw_write_trylock_retry(arch_rwlock_t *rw) |
201 | { | 201 | { |
202 | int count = spin_retry; | 202 | int count = spin_retry; |
203 | 203 | ||
204 | while (count-- > 0) { | 204 | while (count-- > 0) { |
205 | if (!__raw_write_can_lock(rw)) | 205 | if (!arch_write_can_lock(rw)) |
206 | continue; | 206 | continue; |
207 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 207 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
208 | return 1; | 208 | return 1; |
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0053fd..bdc0f3b6c56a 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h | |||
@@ -23,10 +23,10 @@ | |||
23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #define __raw_spin_is_locked(x) ((x)->lock <= 0) | 26 | #define arch_spin_is_locked(x) ((x)->lock <= 0) |
27 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 27 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
28 | #define __raw_spin_unlock_wait(x) \ | 28 | #define arch_spin_unlock_wait(x) \ |
29 | do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) | 29 | do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Simple spin lock operations. There are two variants, one clears IRQ's | 32 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -34,14 +34,14 @@ | |||
34 | * | 34 | * |
35 | * We make no fairness assumptions. They have a cost. | 35 | * We make no fairness assumptions. They have a cost. |
36 | */ | 36 | */ |
37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 37 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
38 | { | 38 | { |
39 | unsigned long tmp; | 39 | unsigned long tmp; |
40 | unsigned long oldval; | 40 | unsigned long oldval; |
41 | 41 | ||
42 | __asm__ __volatile__ ( | 42 | __asm__ __volatile__ ( |
43 | "1: \n\t" | 43 | "1: \n\t" |
44 | "movli.l @%2, %0 ! __raw_spin_lock \n\t" | 44 | "movli.l @%2, %0 ! arch_spin_lock \n\t" |
45 | "mov %0, %1 \n\t" | 45 | "mov %0, %1 \n\t" |
46 | "mov #0, %0 \n\t" | 46 | "mov #0, %0 \n\t" |
47 | "movco.l %0, @%2 \n\t" | 47 | "movco.l %0, @%2 \n\t" |
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
54 | ); | 54 | ); |
55 | } | 55 | } |
56 | 56 | ||
57 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 57 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
58 | { | 58 | { |
59 | unsigned long tmp; | 59 | unsigned long tmp; |
60 | 60 | ||
61 | __asm__ __volatile__ ( | 61 | __asm__ __volatile__ ( |
62 | "mov #1, %0 ! __raw_spin_unlock \n\t" | 62 | "mov #1, %0 ! arch_spin_unlock \n\t" |
63 | "mov.l %0, @%1 \n\t" | 63 | "mov.l %0, @%1 \n\t" |
64 | : "=&z" (tmp) | 64 | : "=&z" (tmp) |
65 | : "r" (&lock->lock) | 65 | : "r" (&lock->lock) |
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
67 | ); | 67 | ); |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 70 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned long tmp, oldval; | 72 | unsigned long tmp, oldval; |
73 | 73 | ||
74 | __asm__ __volatile__ ( | 74 | __asm__ __volatile__ ( |
75 | "1: \n\t" | 75 | "1: \n\t" |
76 | "movli.l @%2, %0 ! __raw_spin_trylock \n\t" | 76 | "movli.l @%2, %0 ! arch_spin_trylock \n\t" |
77 | "mov %0, %1 \n\t" | 77 | "mov %0, %1 \n\t" |
78 | "mov #0, %0 \n\t" | 78 | "mov #0, %0 \n\t" |
79 | "movco.l %0, @%2 \n\t" | 79 | "movco.l %0, @%2 \n\t" |
@@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
100 | * read_can_lock - would read_trylock() succeed? | 100 | * read_can_lock - would read_trylock() succeed? |
101 | * @lock: the rwlock in question. | 101 | * @lock: the rwlock in question. |
102 | */ | 102 | */ |
103 | #define __raw_read_can_lock(x) ((x)->lock > 0) | 103 | #define arch_read_can_lock(x) ((x)->lock > 0) |
104 | 104 | ||
105 | /** | 105 | /** |
106 | * write_can_lock - would write_trylock() succeed? | 106 | * write_can_lock - would write_trylock() succeed? |
107 | * @lock: the rwlock in question. | 107 | * @lock: the rwlock in question. |
108 | */ | 108 | */ |
109 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 109 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
110 | 110 | ||
111 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 111 | static inline void arch_read_lock(arch_rwlock_t *rw) |
112 | { | 112 | { |
113 | unsigned long tmp; | 113 | unsigned long tmp; |
114 | 114 | ||
115 | __asm__ __volatile__ ( | 115 | __asm__ __volatile__ ( |
116 | "1: \n\t" | 116 | "1: \n\t" |
117 | "movli.l @%1, %0 ! __raw_read_lock \n\t" | 117 | "movli.l @%1, %0 ! arch_read_lock \n\t" |
118 | "cmp/pl %0 \n\t" | 118 | "cmp/pl %0 \n\t" |
119 | "bf 1b \n\t" | 119 | "bf 1b \n\t" |
120 | "add #-1, %0 \n\t" | 120 | "add #-1, %0 \n\t" |
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
126 | ); | 126 | ); |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 129 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
130 | { | 130 | { |
131 | unsigned long tmp; | 131 | unsigned long tmp; |
132 | 132 | ||
133 | __asm__ __volatile__ ( | 133 | __asm__ __volatile__ ( |
134 | "1: \n\t" | 134 | "1: \n\t" |
135 | "movli.l @%1, %0 ! __raw_read_unlock \n\t" | 135 | "movli.l @%1, %0 ! arch_read_unlock \n\t" |
136 | "add #1, %0 \n\t" | 136 | "add #1, %0 \n\t" |
137 | "movco.l %0, @%1 \n\t" | 137 | "movco.l %0, @%1 \n\t" |
138 | "bf 1b \n\t" | 138 | "bf 1b \n\t" |
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
142 | ); | 142 | ); |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 145 | static inline void arch_write_lock(arch_rwlock_t *rw) |
146 | { | 146 | { |
147 | unsigned long tmp; | 147 | unsigned long tmp; |
148 | 148 | ||
149 | __asm__ __volatile__ ( | 149 | __asm__ __volatile__ ( |
150 | "1: \n\t" | 150 | "1: \n\t" |
151 | "movli.l @%1, %0 ! __raw_write_lock \n\t" | 151 | "movli.l @%1, %0 ! arch_write_lock \n\t" |
152 | "cmp/hs %2, %0 \n\t" | 152 | "cmp/hs %2, %0 \n\t" |
153 | "bf 1b \n\t" | 153 | "bf 1b \n\t" |
154 | "sub %2, %0 \n\t" | 154 | "sub %2, %0 \n\t" |
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
160 | ); | 160 | ); |
161 | } | 161 | } |
162 | 162 | ||
163 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 163 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
164 | { | 164 | { |
165 | __asm__ __volatile__ ( | 165 | __asm__ __volatile__ ( |
166 | "mov.l %1, @%0 ! __raw_write_unlock \n\t" | 166 | "mov.l %1, @%0 ! arch_write_unlock \n\t" |
167 | : | 167 | : |
168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | 168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) |
169 | : "t", "memory" | 169 | : "t", "memory" |
170 | ); | 170 | ); |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | 173 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
174 | { | 174 | { |
175 | unsigned long tmp, oldval; | 175 | unsigned long tmp, oldval; |
176 | 176 | ||
177 | __asm__ __volatile__ ( | 177 | __asm__ __volatile__ ( |
178 | "1: \n\t" | 178 | "1: \n\t" |
179 | "movli.l @%2, %0 ! __raw_read_trylock \n\t" | 179 | "movli.l @%2, %0 ! arch_read_trylock \n\t" |
180 | "mov %0, %1 \n\t" | 180 | "mov %0, %1 \n\t" |
181 | "cmp/pl %0 \n\t" | 181 | "cmp/pl %0 \n\t" |
182 | "bf 2f \n\t" | 182 | "bf 2f \n\t" |
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) | |||
193 | return (oldval > 0); | 193 | return (oldval > 0); |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 196 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
197 | { | 197 | { |
198 | unsigned long tmp, oldval; | 198 | unsigned long tmp, oldval; |
199 | 199 | ||
200 | __asm__ __volatile__ ( | 200 | __asm__ __volatile__ ( |
201 | "1: \n\t" | 201 | "1: \n\t" |
202 | "movli.l @%2, %0 ! __raw_write_trylock \n\t" | 202 | "movli.l @%2, %0 ! arch_write_trylock \n\t" |
203 | "mov %0, %1 \n\t" | 203 | "mov %0, %1 \n\t" |
204 | "cmp/hs %3, %0 \n\t" | 204 | "cmp/hs %3, %0 \n\t" |
205 | "bf 2f \n\t" | 205 | "bf 2f \n\t" |
@@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
216 | return (oldval > (RW_LOCK_BIAS - 1)); | 216 | return (oldval > (RW_LOCK_BIAS - 1)); |
217 | } | 217 | } |
218 | 218 | ||
219 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 219 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
220 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 220 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
221 | 221 | ||
222 | #define _raw_spin_relax(lock) cpu_relax() | 222 | #define arch_spin_relax(lock) cpu_relax() |
223 | #define _raw_read_relax(lock) cpu_relax() | 223 | #define arch_read_relax(lock) cpu_relax() |
224 | #define _raw_write_relax(lock) cpu_relax() | 224 | #define arch_write_relax(lock) cpu_relax() |
225 | 225 | ||
226 | #endif /* __ASM_SH_SPINLOCK_H */ | 226 | #endif /* __ASM_SH_SPINLOCK_H */ |
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index b4d244e7b60c..9b7560db06ca 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h | |||
@@ -7,15 +7,15 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int lock; | 9 | volatile unsigned int lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | volatile unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define RW_LOCK_BIAS 0x01000000 | 18 | #define RW_LOCK_BIAS 0x01000000 |
19 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 19 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
20 | 20 | ||
21 | #endif | 21 | #endif |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index e1913f28f418..d2d41d046657 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
76 | if (!desc) | 76 | if (!desc) |
77 | return 0; | 77 | return 0; |
78 | 78 | ||
79 | spin_lock_irqsave(&desc->lock, flags); | 79 | raw_spin_lock_irqsave(&desc->lock, flags); |
80 | for_each_online_cpu(j) | 80 | for_each_online_cpu(j) |
81 | any_count |= kstat_irqs_cpu(i, j); | 81 | any_count |= kstat_irqs_cpu(i, j); |
82 | action = desc->action; | 82 | action = desc->action; |
@@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
97 | 97 | ||
98 | seq_putc(p, '\n'); | 98 | seq_putc(p, '\n'); |
99 | out: | 99 | out: |
100 | spin_unlock_irqrestore(&desc->lock, flags); | 100 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
101 | return 0; | 101 | return 0; |
102 | } | 102 | } |
103 | #endif | 103 | #endif |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 857630cff636..7f9b9dba38a6 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
@@ -10,12 +10,12 @@ | |||
10 | 10 | ||
11 | #include <asm/psr.h> | 11 | #include <asm/psr.h> |
12 | 12 | ||
13 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) | 13 | #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
14 | 14 | ||
15 | #define __raw_spin_unlock_wait(lock) \ | 15 | #define arch_spin_unlock_wait(lock) \ |
16 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | 16 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
17 | 17 | ||
18 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 18 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
19 | { | 19 | { |
20 | __asm__ __volatile__( | 20 | __asm__ __volatile__( |
21 | "\n1:\n\t" | 21 | "\n1:\n\t" |
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
35 | : "g2", "memory", "cc"); | 35 | : "g2", "memory", "cc"); |
36 | } | 36 | } |
37 | 37 | ||
38 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 38 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
39 | { | 39 | { |
40 | unsigned int result; | 40 | unsigned int result; |
41 | __asm__ __volatile__("ldstub [%1], %0" | 41 | __asm__ __volatile__("ldstub [%1], %0" |
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
45 | return (result == 0); | 45 | return (result == 0); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 48 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
49 | { | 49 | { |
50 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | 50 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); |
51 | } | 51 | } |
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
65 | * Sort of like atomic_t's on Sparc, but even more clever. | 65 | * Sort of like atomic_t's on Sparc, but even more clever. |
66 | * | 66 | * |
67 | * ------------------------------------ | 67 | * ------------------------------------ |
68 | * | 24-bit counter | wlock | raw_rwlock_t | 68 | * | 24-bit counter | wlock | arch_rwlock_t |
69 | * ------------------------------------ | 69 | * ------------------------------------ |
70 | * 31 8 7 0 | 70 | * 31 8 7 0 |
71 | * | 71 | * |
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
76 | * | 76 | * |
77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
78 | */ | 78 | */ |
79 | static inline void arch_read_lock(raw_rwlock_t *rw) | 79 | static inline void __arch_read_lock(arch_rwlock_t *rw) |
80 | { | 80 | { |
81 | register raw_rwlock_t *lp asm("g1"); | 81 | register arch_rwlock_t *lp asm("g1"); |
82 | lp = rw; | 82 | lp = rw; |
83 | __asm__ __volatile__( | 83 | __asm__ __volatile__( |
84 | "mov %%o7, %%g4\n\t" | 84 | "mov %%o7, %%g4\n\t" |
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw) | |||
89 | : "g2", "g4", "memory", "cc"); | 89 | : "g2", "g4", "memory", "cc"); |
90 | } | 90 | } |
91 | 91 | ||
92 | #define __raw_read_lock(lock) \ | 92 | #define arch_read_lock(lock) \ |
93 | do { unsigned long flags; \ | 93 | do { unsigned long flags; \ |
94 | local_irq_save(flags); \ | 94 | local_irq_save(flags); \ |
95 | arch_read_lock(lock); \ | 95 | __arch_read_lock(lock); \ |
96 | local_irq_restore(flags); \ | 96 | local_irq_restore(flags); \ |
97 | } while(0) | 97 | } while(0) |
98 | 98 | ||
99 | static inline void arch_read_unlock(raw_rwlock_t *rw) | 99 | static inline void __arch_read_unlock(arch_rwlock_t *rw) |
100 | { | 100 | { |
101 | register raw_rwlock_t *lp asm("g1"); | 101 | register arch_rwlock_t *lp asm("g1"); |
102 | lp = rw; | 102 | lp = rw; |
103 | __asm__ __volatile__( | 103 | __asm__ __volatile__( |
104 | "mov %%o7, %%g4\n\t" | 104 | "mov %%o7, %%g4\n\t" |
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw) | |||
109 | : "g2", "g4", "memory", "cc"); | 109 | : "g2", "g4", "memory", "cc"); |
110 | } | 110 | } |
111 | 111 | ||
112 | #define __raw_read_unlock(lock) \ | 112 | #define arch_read_unlock(lock) \ |
113 | do { unsigned long flags; \ | 113 | do { unsigned long flags; \ |
114 | local_irq_save(flags); \ | 114 | local_irq_save(flags); \ |
115 | arch_read_unlock(lock); \ | 115 | __arch_read_unlock(lock); \ |
116 | local_irq_restore(flags); \ | 116 | local_irq_restore(flags); \ |
117 | } while(0) | 117 | } while(0) |
118 | 118 | ||
119 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 119 | static inline void arch_write_lock(arch_rwlock_t *rw) |
120 | { | 120 | { |
121 | register raw_rwlock_t *lp asm("g1"); | 121 | register arch_rwlock_t *lp asm("g1"); |
122 | lp = rw; | 122 | lp = rw; |
123 | __asm__ __volatile__( | 123 | __asm__ __volatile__( |
124 | "mov %%o7, %%g4\n\t" | 124 | "mov %%o7, %%g4\n\t" |
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
130 | *(volatile __u32 *)&lp->lock = ~0U; | 130 | *(volatile __u32 *)&lp->lock = ~0U; |
131 | } | 131 | } |
132 | 132 | ||
133 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 133 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
134 | { | 134 | { |
135 | unsigned int val; | 135 | unsigned int val; |
136 | 136 | ||
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
150 | return (val == 0); | 150 | return (val == 0); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline int arch_read_trylock(raw_rwlock_t *rw) | 153 | static inline int __arch_read_trylock(arch_rwlock_t *rw) |
154 | { | 154 | { |
155 | register raw_rwlock_t *lp asm("g1"); | 155 | register arch_rwlock_t *lp asm("g1"); |
156 | register int res asm("o0"); | 156 | register int res asm("o0"); |
157 | lp = rw; | 157 | lp = rw; |
158 | __asm__ __volatile__( | 158 | __asm__ __volatile__( |
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) | |||
165 | return res; | 165 | return res; |
166 | } | 166 | } |
167 | 167 | ||
168 | #define __raw_read_trylock(lock) \ | 168 | #define arch_read_trylock(lock) \ |
169 | ({ unsigned long flags; \ | 169 | ({ unsigned long flags; \ |
170 | int res; \ | 170 | int res; \ |
171 | local_irq_save(flags); \ | 171 | local_irq_save(flags); \ |
172 | res = arch_read_trylock(lock); \ | 172 | res = __arch_read_trylock(lock); \ |
173 | local_irq_restore(flags); \ | 173 | local_irq_restore(flags); \ |
174 | res; \ | 174 | res; \ |
175 | }) | 175 | }) |
176 | 176 | ||
177 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 177 | #define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
178 | 178 | ||
179 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 179 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
180 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 180 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
181 | #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) | 181 | #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) |
182 | 182 | ||
183 | #define _raw_spin_relax(lock) cpu_relax() | 183 | #define arch_spin_relax(lock) cpu_relax() |
184 | #define _raw_read_relax(lock) cpu_relax() | 184 | #define arch_read_relax(lock) cpu_relax() |
185 | #define _raw_write_relax(lock) cpu_relax() | 185 | #define arch_write_relax(lock) cpu_relax() |
186 | 186 | ||
187 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) | 187 | #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) |
188 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 188 | #define arch_write_can_lock(rw) (!(rw)->lock) |
189 | 189 | ||
190 | #endif /* !(__ASSEMBLY__) */ | 190 | #endif /* !(__ASSEMBLY__) */ |
191 | 191 | ||
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 43e514783582..073936a8b275 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
@@ -21,13 +21,13 @@ | |||
21 | * the spinner sections must be pre-V9 branches. | 21 | * the spinner sections must be pre-V9 branches. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) | 24 | #define arch_spin_is_locked(lp) ((lp)->lock != 0) |
25 | 25 | ||
26 | #define __raw_spin_unlock_wait(lp) \ | 26 | #define arch_spin_unlock_wait(lp) \ |
27 | do { rmb(); \ | 27 | do { rmb(); \ |
28 | } while((lp)->lock) | 28 | } while((lp)->lock) |
29 | 29 | ||
30 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 30 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
31 | { | 31 | { |
32 | unsigned long tmp; | 32 | unsigned long tmp; |
33 | 33 | ||
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
46 | : "memory"); | 46 | : "memory"); |
47 | } | 47 | } |
48 | 48 | ||
49 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 49 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
50 | { | 50 | { |
51 | unsigned long result; | 51 | unsigned long result; |
52 | 52 | ||
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
59 | return (result == 0UL); | 59 | return (result == 0UL); |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 62 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
63 | { | 63 | { |
64 | __asm__ __volatile__( | 64 | __asm__ __volatile__( |
65 | " stb %%g0, [%0]" | 65 | " stb %%g0, [%0]" |
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
68 | : "memory"); | 68 | : "memory"); |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 71 | static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
72 | { | 72 | { |
73 | unsigned long tmp1, tmp2; | 73 | unsigned long tmp1, tmp2; |
74 | 74 | ||
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
92 | 92 | ||
93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | 93 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
94 | 94 | ||
95 | static void inline arch_read_lock(raw_rwlock_t *lock) | 95 | static void inline arch_read_lock(arch_rwlock_t *lock) |
96 | { | 96 | { |
97 | unsigned long tmp1, tmp2; | 97 | unsigned long tmp1, tmp2; |
98 | 98 | ||
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock) | |||
115 | : "memory"); | 115 | : "memory"); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int inline arch_read_trylock(raw_rwlock_t *lock) | 118 | static int inline arch_read_trylock(arch_rwlock_t *lock) |
119 | { | 119 | { |
120 | int tmp1, tmp2; | 120 | int tmp1, tmp2; |
121 | 121 | ||
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock) | |||
136 | return tmp1; | 136 | return tmp1; |
137 | } | 137 | } |
138 | 138 | ||
139 | static void inline arch_read_unlock(raw_rwlock_t *lock) | 139 | static void inline arch_read_unlock(arch_rwlock_t *lock) |
140 | { | 140 | { |
141 | unsigned long tmp1, tmp2; | 141 | unsigned long tmp1, tmp2; |
142 | 142 | ||
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock) | |||
152 | : "memory"); | 152 | : "memory"); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void inline arch_write_lock(raw_rwlock_t *lock) | 155 | static void inline arch_write_lock(arch_rwlock_t *lock) |
156 | { | 156 | { |
157 | unsigned long mask, tmp1, tmp2; | 157 | unsigned long mask, tmp1, tmp2; |
158 | 158 | ||
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock) | |||
177 | : "memory"); | 177 | : "memory"); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void inline arch_write_unlock(raw_rwlock_t *lock) | 180 | static void inline arch_write_unlock(arch_rwlock_t *lock) |
181 | { | 181 | { |
182 | __asm__ __volatile__( | 182 | __asm__ __volatile__( |
183 | " stw %%g0, [%0]" | 183 | " stw %%g0, [%0]" |
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock) | |||
186 | : "memory"); | 186 | : "memory"); |
187 | } | 187 | } |
188 | 188 | ||
189 | static int inline arch_write_trylock(raw_rwlock_t *lock) | 189 | static int inline arch_write_trylock(arch_rwlock_t *lock) |
190 | { | 190 | { |
191 | unsigned long mask, tmp1, tmp2, result; | 191 | unsigned long mask, tmp1, tmp2, result; |
192 | 192 | ||
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) | |||
210 | return result; | 210 | return result; |
211 | } | 211 | } |
212 | 212 | ||
213 | #define __raw_read_lock(p) arch_read_lock(p) | 213 | #define arch_read_lock(p) arch_read_lock(p) |
214 | #define __raw_read_lock_flags(p, f) arch_read_lock(p) | 214 | #define arch_read_lock_flags(p, f) arch_read_lock(p) |
215 | #define __raw_read_trylock(p) arch_read_trylock(p) | 215 | #define arch_read_trylock(p) arch_read_trylock(p) |
216 | #define __raw_read_unlock(p) arch_read_unlock(p) | 216 | #define arch_read_unlock(p) arch_read_unlock(p) |
217 | #define __raw_write_lock(p) arch_write_lock(p) | 217 | #define arch_write_lock(p) arch_write_lock(p) |
218 | #define __raw_write_lock_flags(p, f) arch_write_lock(p) | 218 | #define arch_write_lock_flags(p, f) arch_write_lock(p) |
219 | #define __raw_write_unlock(p) arch_write_unlock(p) | 219 | #define arch_write_unlock(p) arch_write_unlock(p) |
220 | #define __raw_write_trylock(p) arch_write_trylock(p) | 220 | #define arch_write_trylock(p) arch_write_trylock(p) |
221 | 221 | ||
222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 222 | #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
223 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 223 | #define arch_write_can_lock(rw) (!(rw)->lock) |
224 | 224 | ||
225 | #define _raw_spin_relax(lock) cpu_relax() | 225 | #define arch_spin_relax(lock) cpu_relax() |
226 | #define _raw_read_relax(lock) cpu_relax() | 226 | #define arch_read_relax(lock) cpu_relax() |
227 | #define _raw_write_relax(lock) cpu_relax() | 227 | #define arch_write_relax(lock) cpu_relax() |
228 | 228 | ||
229 | #endif /* !(__ASSEMBLY__) */ | 229 | #endif /* !(__ASSEMBLY__) */ |
230 | 230 | ||
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 37cbe01c585b..9c454fdeaad8 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h | |||
@@ -7,14 +7,14 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned char lock; | 9 | volatile unsigned char lock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | volatile unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index ce996f97855f..8d6882bb480a 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
176 | } | 176 | } |
177 | 177 | ||
178 | if (i < NR_IRQS) { | 178 | if (i < NR_IRQS) { |
179 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 179 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
180 | action = irq_desc[i].action; | 180 | action = irq_desc[i].action; |
181 | if (!action) | 181 | if (!action) |
182 | goto skip; | 182 | goto skip; |
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
195 | 195 | ||
196 | seq_putc(p, '\n'); | 196 | seq_putc(p, '\n'); |
197 | skip: | 197 | skip: |
198 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 198 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
199 | } else if (i == NR_IRQS) { | 199 | } else if (i == NR_IRQS) { |
200 | seq_printf(p, "NMI: "); | 200 | seq_printf(p, "NMI: "); |
201 | for_each_online_cpu(j) | 201 | for_each_online_cpu(j) |
@@ -785,14 +785,14 @@ void fixup_irqs(void) | |||
785 | for (irq = 0; irq < NR_IRQS; irq++) { | 785 | for (irq = 0; irq < NR_IRQS; irq++) { |
786 | unsigned long flags; | 786 | unsigned long flags; |
787 | 787 | ||
788 | spin_lock_irqsave(&irq_desc[irq].lock, flags); | 788 | raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); |
789 | if (irq_desc[irq].action && | 789 | if (irq_desc[irq].action && |
790 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | 790 | !(irq_desc[irq].status & IRQ_PER_CPU)) { |
791 | if (irq_desc[irq].chip->set_affinity) | 791 | if (irq_desc[irq].chip->set_affinity) |
792 | irq_desc[irq].chip->set_affinity(irq, | 792 | irq_desc[irq].chip->set_affinity(irq, |
793 | irq_desc[irq].affinity); | 793 | irq_desc[irq].affinity); |
794 | } | 794 | } |
795 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | 795 | raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); |
796 | } | 796 | } |
797 | 797 | ||
798 | tick_ops->disable_irq(); | 798 | tick_ops->disable_irq(); |
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 039270b9b73b..89474ba0741e 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
@@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
34 | } | 34 | } |
35 | 35 | ||
36 | if (i < NR_IRQS) { | 36 | if (i < NR_IRQS) { |
37 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 37 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
38 | action = irq_desc[i].action; | 38 | action = irq_desc[i].action; |
39 | if (!action) | 39 | if (!action) |
40 | goto skip; | 40 | goto skip; |
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
53 | 53 | ||
54 | seq_putc(p, '\n'); | 54 | seq_putc(p, '\n'); |
55 | skip: | 55 | skip: |
56 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 56 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
57 | } else if (i == NR_IRQS) | 57 | } else if (i == NR_IRQS) |
58 | seq_putc(p, '\n'); | 58 | seq_putc(p, '\n'); |
59 | 59 | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index efb38994859c..dd59a85a918f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
731 | 731 | ||
732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
733 | 733 | ||
734 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) | 734 | static inline int arch_spin_is_locked(struct arch_spinlock *lock) |
735 | { | 735 | { |
736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); | 736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); |
737 | } | 737 | } |
738 | 738 | ||
739 | static inline int __raw_spin_is_contended(struct raw_spinlock *lock) | 739 | static inline int arch_spin_is_contended(struct arch_spinlock *lock) |
740 | { | 740 | { |
741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); | 741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); |
742 | } | 742 | } |
743 | #define __raw_spin_is_contended __raw_spin_is_contended | 743 | #define arch_spin_is_contended arch_spin_is_contended |
744 | 744 | ||
745 | static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) | 745 | static __always_inline void arch_spin_lock(struct arch_spinlock *lock) |
746 | { | 746 | { |
747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); | 747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); |
748 | } | 748 | } |
749 | 749 | ||
750 | static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, | 750 | static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, |
751 | unsigned long flags) | 751 | unsigned long flags) |
752 | { | 752 | { |
753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); | 753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); |
754 | } | 754 | } |
755 | 755 | ||
756 | static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) | 756 | static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) |
757 | { | 757 | { |
758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); | 758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); |
759 | } | 759 | } |
760 | 760 | ||
761 | static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) | 761 | static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) |
762 | { | 762 | { |
763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); | 763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); |
764 | } | 764 | } |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 9357473c8da0..b1e70d51e40c 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -318,14 +318,14 @@ struct pv_mmu_ops { | |||
318 | phys_addr_t phys, pgprot_t flags); | 318 | phys_addr_t phys, pgprot_t flags); |
319 | }; | 319 | }; |
320 | 320 | ||
321 | struct raw_spinlock; | 321 | struct arch_spinlock; |
322 | struct pv_lock_ops { | 322 | struct pv_lock_ops { |
323 | int (*spin_is_locked)(struct raw_spinlock *lock); | 323 | int (*spin_is_locked)(struct arch_spinlock *lock); |
324 | int (*spin_is_contended)(struct raw_spinlock *lock); | 324 | int (*spin_is_contended)(struct arch_spinlock *lock); |
325 | void (*spin_lock)(struct raw_spinlock *lock); | 325 | void (*spin_lock)(struct arch_spinlock *lock); |
326 | void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); | 326 | void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); |
327 | int (*spin_trylock)(struct raw_spinlock *lock); | 327 | int (*spin_trylock)(struct arch_spinlock *lock); |
328 | void (*spin_unlock)(struct raw_spinlock *lock); | 328 | void (*spin_unlock)(struct arch_spinlock *lock); |
329 | }; | 329 | }; |
330 | 330 | ||
331 | /* This contains all the paravirt structures: we get a convenient | 331 | /* This contains all the paravirt structures: we get a convenient |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4e77853321db..3089f70c0c52 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -58,7 +58,7 @@ | |||
58 | #if (NR_CPUS < 256) | 58 | #if (NR_CPUS < 256) |
59 | #define TICKET_SHIFT 8 | 59 | #define TICKET_SHIFT 8 |
60 | 60 | ||
61 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 61 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
62 | { | 62 | { |
63 | short inc = 0x0100; | 63 | short inc = 0x0100; |
64 | 64 | ||
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
77 | : "memory", "cc"); | 77 | : "memory", "cc"); |
78 | } | 78 | } |
79 | 79 | ||
80 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 80 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
81 | { | 81 | { |
82 | int tmp, new; | 82 | int tmp, new; |
83 | 83 | ||
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
96 | return tmp; | 96 | return tmp; |
97 | } | 97 | } |
98 | 98 | ||
99 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 99 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
100 | { | 100 | { |
101 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" | 101 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
102 | : "+m" (lock->slock) | 102 | : "+m" (lock->slock) |
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
106 | #else | 106 | #else |
107 | #define TICKET_SHIFT 16 | 107 | #define TICKET_SHIFT 16 |
108 | 108 | ||
109 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 109 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
110 | { | 110 | { |
111 | int inc = 0x00010000; | 111 | int inc = 0x00010000; |
112 | int tmp; | 112 | int tmp; |
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
127 | : "memory", "cc"); | 127 | : "memory", "cc"); |
128 | } | 128 | } |
129 | 129 | ||
130 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 130 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
131 | { | 131 | { |
132 | int tmp; | 132 | int tmp; |
133 | int new; | 133 | int new; |
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
149 | return tmp; | 149 | return tmp; |
150 | } | 150 | } |
151 | 151 | ||
152 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 152 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
153 | { | 153 | { |
154 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" | 154 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
155 | : "+m" (lock->slock) | 155 | : "+m" (lock->slock) |
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
158 | } | 158 | } |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 161 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
162 | { | 162 | { |
163 | int tmp = ACCESS_ONCE(lock->slock); | 163 | int tmp = ACCESS_ONCE(lock->slock); |
164 | 164 | ||
165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); | 165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); |
166 | } | 166 | } |
167 | 167 | ||
168 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 168 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
169 | { | 169 | { |
170 | int tmp = ACCESS_ONCE(lock->slock); | 170 | int tmp = ACCESS_ONCE(lock->slock); |
171 | 171 | ||
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | |||
174 | 174 | ||
175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS | 175 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
176 | 176 | ||
177 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 177 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
178 | { | 178 | { |
179 | return __ticket_spin_is_locked(lock); | 179 | return __ticket_spin_is_locked(lock); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 182 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
183 | { | 183 | { |
184 | return __ticket_spin_is_contended(lock); | 184 | return __ticket_spin_is_contended(lock); |
185 | } | 185 | } |
186 | #define __raw_spin_is_contended __raw_spin_is_contended | 186 | #define arch_spin_is_contended arch_spin_is_contended |
187 | 187 | ||
188 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 188 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
189 | { | 189 | { |
190 | __ticket_spin_lock(lock); | 190 | __ticket_spin_lock(lock); |
191 | } | 191 | } |
192 | 192 | ||
193 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | 193 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
194 | { | 194 | { |
195 | return __ticket_spin_trylock(lock); | 195 | return __ticket_spin_trylock(lock); |
196 | } | 196 | } |
197 | 197 | ||
198 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 198 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
199 | { | 199 | { |
200 | __ticket_spin_unlock(lock); | 200 | __ticket_spin_unlock(lock); |
201 | } | 201 | } |
202 | 202 | ||
203 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | 203 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
204 | unsigned long flags) | 204 | unsigned long flags) |
205 | { | 205 | { |
206 | __raw_spin_lock(lock); | 206 | arch_spin_lock(lock); |
207 | } | 207 | } |
208 | 208 | ||
209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ | 209 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
210 | 210 | ||
211 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 211 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
212 | { | 212 | { |
213 | while (__raw_spin_is_locked(lock)) | 213 | while (arch_spin_is_locked(lock)) |
214 | cpu_relax(); | 214 | cpu_relax(); |
215 | } | 215 | } |
216 | 216 | ||
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | |||
232 | * read_can_lock - would read_trylock() succeed? | 232 | * read_can_lock - would read_trylock() succeed? |
233 | * @lock: the rwlock in question. | 233 | * @lock: the rwlock in question. |
234 | */ | 234 | */ |
235 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | 235 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
236 | { | 236 | { |
237 | return (int)(lock)->lock > 0; | 237 | return (int)(lock)->lock > 0; |
238 | } | 238 | } |
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock) | |||
241 | * write_can_lock - would write_trylock() succeed? | 241 | * write_can_lock - would write_trylock() succeed? |
242 | * @lock: the rwlock in question. | 242 | * @lock: the rwlock in question. |
243 | */ | 243 | */ |
244 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | 244 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
245 | { | 245 | { |
246 | return (lock)->lock == RW_LOCK_BIAS; | 246 | return (lock)->lock == RW_LOCK_BIAS; |
247 | } | 247 | } |
248 | 248 | ||
249 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 249 | static inline void arch_read_lock(arch_rwlock_t *rw) |
250 | { | 250 | { |
251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | 251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
252 | "jns 1f\n" | 252 | "jns 1f\n" |
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) | |||
255 | ::LOCK_PTR_REG (rw) : "memory"); | 255 | ::LOCK_PTR_REG (rw) : "memory"); |
256 | } | 256 | } |
257 | 257 | ||
258 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 258 | static inline void arch_write_lock(arch_rwlock_t *rw) |
259 | { | 259 | { |
260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | 260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" |
261 | "jz 1f\n" | 261 | "jz 1f\n" |
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) | |||
264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | 264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); |
265 | } | 265 | } |
266 | 266 | ||
267 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 267 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
268 | { | 268 | { |
269 | atomic_t *count = (atomic_t *)lock; | 269 | atomic_t *count = (atomic_t *)lock; |
270 | 270 | ||
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) | |||
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | 277 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
278 | { | 278 | { |
279 | atomic_t *count = (atomic_t *)lock; | 279 | atomic_t *count = (atomic_t *)lock; |
280 | 280 | ||
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) | |||
284 | return 0; | 284 | return 0; |
285 | } | 285 | } |
286 | 286 | ||
287 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 287 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
288 | { | 288 | { |
289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | 289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); |
290 | } | 290 | } |
291 | 291 | ||
292 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 292 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
293 | { | 293 | { |
294 | asm volatile(LOCK_PREFIX "addl %1, %0" | 294 | asm volatile(LOCK_PREFIX "addl %1, %0" |
295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | 295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); |
296 | } | 296 | } |
297 | 297 | ||
298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 298 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 299 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
300 | 300 | ||
301 | #define _raw_spin_relax(lock) cpu_relax() | 301 | #define arch_spin_relax(lock) cpu_relax() |
302 | #define _raw_read_relax(lock) cpu_relax() | 302 | #define arch_read_relax(lock) cpu_relax() |
303 | #define _raw_write_relax(lock) cpu_relax() | 303 | #define arch_write_relax(lock) cpu_relax() |
304 | 304 | ||
305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | 305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ |
306 | static inline void smp_mb__after_lock(void) { } | 306 | static inline void smp_mb__after_lock(void) { } |
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 845f81c87091..dcb48b2edc11 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h | |||
@@ -5,16 +5,16 @@ | |||
5 | # error "please don't include this file directly" | 5 | # error "please don't include this file directly" |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct raw_spinlock { | 8 | typedef struct arch_spinlock { |
9 | unsigned int slock; | 9 | unsigned int slock; |
10 | } raw_spinlock_t; | 10 | } arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | unsigned int lock; | 15 | unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } arch_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 18 | #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
19 | 19 | ||
20 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ | 20 | #endif /* _ASM_X86_SPINLOCK_TYPES_H */ |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d5d498fbee4b..11a5851f1f50 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2431 | continue; | 2431 | continue; |
2432 | 2432 | ||
2433 | cfg = irq_cfg(irq); | 2433 | cfg = irq_cfg(irq); |
2434 | spin_lock(&desc->lock); | 2434 | raw_spin_lock(&desc->lock); |
2435 | 2435 | ||
2436 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | 2436 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2437 | goto unlock; | 2437 | goto unlock; |
@@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2450 | } | 2450 | } |
2451 | __get_cpu_var(vector_irq)[vector] = -1; | 2451 | __get_cpu_var(vector_irq)[vector] = -1; |
2452 | unlock: | 2452 | unlock: |
2453 | spin_unlock(&desc->lock); | 2453 | raw_spin_unlock(&desc->lock); |
2454 | } | 2454 | } |
2455 | 2455 | ||
2456 | irq_exit(); | 2456 | irq_exit(); |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index b8ce165dde5d..0a0aa1cec8f1 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -188,7 +188,7 @@ void dump_stack(void) | |||
188 | } | 188 | } |
189 | EXPORT_SYMBOL(dump_stack); | 189 | EXPORT_SYMBOL(dump_stack); |
190 | 190 | ||
191 | static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; | 191 | static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
192 | static int die_owner = -1; | 192 | static int die_owner = -1; |
193 | static unsigned int die_nest_count; | 193 | static unsigned int die_nest_count; |
194 | 194 | ||
@@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void) | |||
207 | /* racy, but better than risking deadlock. */ | 207 | /* racy, but better than risking deadlock. */ |
208 | raw_local_irq_save(flags); | 208 | raw_local_irq_save(flags); |
209 | cpu = smp_processor_id(); | 209 | cpu = smp_processor_id(); |
210 | if (!__raw_spin_trylock(&die_lock)) { | 210 | if (!arch_spin_trylock(&die_lock)) { |
211 | if (cpu == die_owner) | 211 | if (cpu == die_owner) |
212 | /* nested oops. should stop eventually */; | 212 | /* nested oops. should stop eventually */; |
213 | else | 213 | else |
214 | __raw_spin_lock(&die_lock); | 214 | arch_spin_lock(&die_lock); |
215 | } | 215 | } |
216 | die_nest_count++; | 216 | die_nest_count++; |
217 | die_owner = cpu; | 217 | die_owner = cpu; |
@@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) | |||
231 | die_nest_count--; | 231 | die_nest_count--; |
232 | if (!die_nest_count) | 232 | if (!die_nest_count) |
233 | /* Nest count reaches zero, release the lock. */ | 233 | /* Nest count reaches zero, release the lock. */ |
234 | __raw_spin_unlock(&die_lock); | 234 | arch_spin_unlock(&die_lock); |
235 | raw_local_irq_restore(flags); | 235 | raw_local_irq_restore(flags); |
236 | oops_exit(); | 236 | oops_exit(); |
237 | 237 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 664bcb7384ac..91fd0c70a18a 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
149 | if (!desc) | 149 | if (!desc) |
150 | return 0; | 150 | return 0; |
151 | 151 | ||
152 | spin_lock_irqsave(&desc->lock, flags); | 152 | raw_spin_lock_irqsave(&desc->lock, flags); |
153 | for_each_online_cpu(j) | 153 | for_each_online_cpu(j) |
154 | any_count |= kstat_irqs_cpu(i, j); | 154 | any_count |= kstat_irqs_cpu(i, j); |
155 | action = desc->action; | 155 | action = desc->action; |
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
170 | 170 | ||
171 | seq_putc(p, '\n'); | 171 | seq_putc(p, '\n'); |
172 | out: | 172 | out: |
173 | spin_unlock_irqrestore(&desc->lock, flags); | 173 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
174 | return 0; | 174 | return 0; |
175 | } | 175 | } |
176 | 176 | ||
@@ -294,12 +294,12 @@ void fixup_irqs(void) | |||
294 | continue; | 294 | continue; |
295 | 295 | ||
296 | /* interrupt's are disabled at this point */ | 296 | /* interrupt's are disabled at this point */ |
297 | spin_lock(&desc->lock); | 297 | raw_spin_lock(&desc->lock); |
298 | 298 | ||
299 | affinity = desc->affinity; | 299 | affinity = desc->affinity; |
300 | if (!irq_has_action(irq) || | 300 | if (!irq_has_action(irq) || |
301 | cpumask_equal(affinity, cpu_online_mask)) { | 301 | cpumask_equal(affinity, cpu_online_mask)) { |
302 | spin_unlock(&desc->lock); | 302 | raw_spin_unlock(&desc->lock); |
303 | continue; | 303 | continue; |
304 | } | 304 | } |
305 | 305 | ||
@@ -326,7 +326,7 @@ void fixup_irqs(void) | |||
326 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) | 326 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) |
327 | desc->chip->unmask(irq); | 327 | desc->chip->unmask(irq); |
328 | 328 | ||
329 | spin_unlock(&desc->lock); | 329 | raw_spin_unlock(&desc->lock); |
330 | 330 | ||
331 | if (break_affinity && set_affinity) | 331 | if (break_affinity && set_affinity) |
332 | printk("Broke affinity for irq %i\n", irq); | 332 | printk("Broke affinity for irq %i\n", irq); |
@@ -356,10 +356,10 @@ void fixup_irqs(void) | |||
356 | irq = __get_cpu_var(vector_irq)[vector]; | 356 | irq = __get_cpu_var(vector_irq)[vector]; |
357 | 357 | ||
358 | desc = irq_to_desc(irq); | 358 | desc = irq_to_desc(irq); |
359 | spin_lock(&desc->lock); | 359 | raw_spin_lock(&desc->lock); |
360 | if (desc->chip->retrigger) | 360 | if (desc->chip->retrigger) |
361 | desc->chip->retrigger(irq); | 361 | desc->chip->retrigger(irq); |
362 | spin_unlock(&desc->lock); | 362 | raw_spin_unlock(&desc->lock); |
363 | } | 363 | } |
364 | } | 364 | } |
365 | } | 365 | } |
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 3a7c5a44082e..676b8c77a976 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c | |||
@@ -8,9 +8,9 @@ | |||
8 | #include <asm/paravirt.h> | 8 | #include <asm/paravirt.h> |
9 | 9 | ||
10 | static inline void | 10 | static inline void |
11 | default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 11 | default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
12 | { | 12 | { |
13 | __raw_spin_lock(lock); | 13 | arch_spin_lock(lock); |
14 | } | 14 | } |
15 | 15 | ||
16 | struct pv_lock_ops pv_lock_ops = { | 16 | struct pv_lock_ops pv_lock_ops = { |
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index eed156851f5d..0aa5fed8b9e6 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; | |||
33 | * we want to have the fastest, inlined, non-debug version | 33 | * we want to have the fastest, inlined, non-debug version |
34 | * of a critical section, to be able to prove TSC time-warps: | 34 | * of a critical section, to be able to prove TSC time-warps: |
35 | */ | 35 | */ |
36 | static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; | 36 | static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
37 | 37 | ||
38 | static __cpuinitdata cycles_t last_tsc; | 38 | static __cpuinitdata cycles_t last_tsc; |
39 | static __cpuinitdata cycles_t max_warp; | 39 | static __cpuinitdata cycles_t max_warp; |
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void) | |||
62 | * previous TSC that was measured (possibly on | 62 | * previous TSC that was measured (possibly on |
63 | * another CPU) and update the previous TSC timestamp. | 63 | * another CPU) and update the previous TSC timestamp. |
64 | */ | 64 | */ |
65 | __raw_spin_lock(&sync_lock); | 65 | arch_spin_lock(&sync_lock); |
66 | prev = last_tsc; | 66 | prev = last_tsc; |
67 | rdtsc_barrier(); | 67 | rdtsc_barrier(); |
68 | now = get_cycles(); | 68 | now = get_cycles(); |
69 | rdtsc_barrier(); | 69 | rdtsc_barrier(); |
70 | last_tsc = now; | 70 | last_tsc = now; |
71 | __raw_spin_unlock(&sync_lock); | 71 | arch_spin_unlock(&sync_lock); |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * Be nice every now and then (and also check whether | 74 | * Be nice every now and then (and also check whether |
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void) | |||
87 | * we saw a time-warp of the TSC going backwards: | 87 | * we saw a time-warp of the TSC going backwards: |
88 | */ | 88 | */ |
89 | if (unlikely(prev > now)) { | 89 | if (unlikely(prev > now)) { |
90 | __raw_spin_lock(&sync_lock); | 90 | arch_spin_lock(&sync_lock); |
91 | max_warp = max(max_warp, prev - now); | 91 | max_warp = max(max_warp, prev - now); |
92 | nr_warps++; | 92 | nr_warps++; |
93 | __raw_spin_unlock(&sync_lock); | 93 | arch_spin_unlock(&sync_lock); |
94 | } | 94 | } |
95 | } | 95 | } |
96 | WARN(!(now-start), | 96 | WARN(!(now-start), |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 36a5141108df..24ded31b5aec 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -120,14 +120,14 @@ struct xen_spinlock { | |||
120 | unsigned short spinners; /* count of waiting cpus */ | 120 | unsigned short spinners; /* count of waiting cpus */ |
121 | }; | 121 | }; |
122 | 122 | ||
123 | static int xen_spin_is_locked(struct raw_spinlock *lock) | 123 | static int xen_spin_is_locked(struct arch_spinlock *lock) |
124 | { | 124 | { |
125 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 125 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
126 | 126 | ||
127 | return xl->lock != 0; | 127 | return xl->lock != 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int xen_spin_is_contended(struct raw_spinlock *lock) | 130 | static int xen_spin_is_contended(struct arch_spinlock *lock) |
131 | { | 131 | { |
132 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 132 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
133 | 133 | ||
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock) | |||
136 | return xl->spinners != 0; | 136 | return xl->spinners != 0; |
137 | } | 137 | } |
138 | 138 | ||
139 | static int xen_spin_trylock(struct raw_spinlock *lock) | 139 | static int xen_spin_trylock(struct arch_spinlock *lock) |
140 | { | 140 | { |
141 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 141 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
142 | u8 old = 1; | 142 | u8 old = 1; |
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock | |||
181 | __get_cpu_var(lock_spinners) = prev; | 181 | __get_cpu_var(lock_spinners) = prev; |
182 | } | 182 | } |
183 | 183 | ||
184 | static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) | 184 | static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) |
185 | { | 185 | { |
186 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 186 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
187 | struct xen_spinlock *prev; | 187 | struct xen_spinlock *prev; |
@@ -254,7 +254,7 @@ out: | |||
254 | return ret; | 254 | return ret; |
255 | } | 255 | } |
256 | 256 | ||
257 | static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) | 257 | static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) |
258 | { | 258 | { |
259 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 259 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
260 | unsigned timeout; | 260 | unsigned timeout; |
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) | |||
291 | spin_time_accum_total(start_spin); | 291 | spin_time_accum_total(start_spin); |
292 | } | 292 | } |
293 | 293 | ||
294 | static void xen_spin_lock(struct raw_spinlock *lock) | 294 | static void xen_spin_lock(struct arch_spinlock *lock) |
295 | { | 295 | { |
296 | __xen_spin_lock(lock, false); | 296 | __xen_spin_lock(lock, false); |
297 | } | 297 | } |
298 | 298 | ||
299 | static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) | 299 | static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) |
300 | { | 300 | { |
301 | __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); | 301 | __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); |
302 | } | 302 | } |
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) | |||
317 | } | 317 | } |
318 | } | 318 | } |
319 | 319 | ||
320 | static void xen_spin_unlock(struct raw_spinlock *lock) | 320 | static void xen_spin_unlock(struct arch_spinlock *lock) |
321 | { | 321 | { |
322 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; | 322 | struct xen_spinlock *xl = (struct xen_spinlock *)lock; |
323 | 323 | ||
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index a1badb32fcda..8cd38484e130 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c | |||
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
90 | } | 90 | } |
91 | 91 | ||
92 | if (i < NR_IRQS) { | 92 | if (i < NR_IRQS) { |
93 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 93 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
94 | action = irq_desc[i].action; | 94 | action = irq_desc[i].action; |
95 | if (!action) | 95 | if (!action) |
96 | goto skip; | 96 | goto skip; |
@@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
109 | 109 | ||
110 | seq_putc(p, '\n'); | 110 | seq_putc(p, '\n'); |
111 | skip: | 111 | skip: |
112 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 112 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
113 | } else if (i == NR_IRQS) { | 113 | } else if (i == NR_IRQS) { |
114 | seq_printf(p, "NMI: "); | 114 | seq_printf(p, "NMI: "); |
115 | for_each_online_cpu(j) | 115 | for_each_online_cpu(j) |