diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-02 13:49:50 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
commit | 445c89514be242b1b0080056d50bdc1b72adeb5c (patch) | |
tree | 96ed062794ad0fb6a649713c83f009eea382e8b2 /arch/s390 | |
parent | 6b6b4792f89346e47437682c7ba3438e6681c0f9 (diff) |
locking: Convert raw_spinlock to arch_spinlock
The raw_spin* namespace was taken by lockdep for the architecture
specific implementations. raw_spin_* would be the ideal name space for
the spinlocks which are not converted to sleeping locks in preempt-rt.
Linus suggested to convert the raw_ to arch_ locks and cleanup the
name space instead of using an artifical name like core_spin,
atomic_spin or whatever
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 16 | ||||
-rw-r--r-- | arch/s390/include/asm/spinlock_types.h | 2 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 8 |
3 files changed, 13 insertions, 13 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7ab..6121fa4b83d9 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned int *lock, | |||
57 | do { while (__raw_spin_is_locked(lock)) \ | 57 | do { while (__raw_spin_is_locked(lock)) \ |
58 | _raw_spin_relax(lock); } while (0) | 58 | _raw_spin_relax(lock); } while (0) |
59 | 59 | ||
60 | extern void _raw_spin_lock_wait(raw_spinlock_t *); | 60 | extern void _raw_spin_lock_wait(arch_spinlock_t *); |
61 | extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); | 61 | extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); |
62 | extern int _raw_spin_trylock_retry(raw_spinlock_t *); | 62 | extern int _raw_spin_trylock_retry(arch_spinlock_t *); |
63 | extern void _raw_spin_relax(raw_spinlock_t *lock); | 63 | extern void _raw_spin_relax(arch_spinlock_t *lock); |
64 | 64 | ||
65 | static inline void __raw_spin_lock(raw_spinlock_t *lp) | 65 | static inline void __raw_spin_lock(arch_spinlock_t *lp) |
66 | { | 66 | { |
67 | int old; | 67 | int old; |
68 | 68 | ||
@@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lp) | |||
72 | _raw_spin_lock_wait(lp); | 72 | _raw_spin_lock_wait(lp); |
73 | } | 73 | } |
74 | 74 | ||
75 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, | 75 | static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, |
76 | unsigned long flags) | 76 | unsigned long flags) |
77 | { | 77 | { |
78 | int old; | 78 | int old; |
@@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, | |||
83 | _raw_spin_lock_wait_flags(lp, flags); | 83 | _raw_spin_lock_wait_flags(lp, flags); |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline int __raw_spin_trylock(raw_spinlock_t *lp) | 86 | static inline int __raw_spin_trylock(arch_spinlock_t *lp) |
87 | { | 87 | { |
88 | int old; | 88 | int old; |
89 | 89 | ||
@@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lp) | |||
93 | return _raw_spin_trylock_retry(lp); | 93 | return _raw_spin_trylock_retry(lp); |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline void __raw_spin_unlock(raw_spinlock_t *lp) | 96 | static inline void __raw_spin_unlock(arch_spinlock_t *lp) |
97 | { | 97 | { |
98 | _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); | 98 | _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); |
99 | } | 99 | } |
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 654abc40de04..a93638eee3f7 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int owner_cpu; | 9 | volatile unsigned int owner_cpu; |
10 | } __attribute__ ((aligned (4))) raw_spinlock_t; | 10 | } __attribute__ ((aligned (4))) arch_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
13 | 13 | ||
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f7e0d30250b7..d4cbf71a6077 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) | |||
39 | _raw_yield(); | 39 | _raw_yield(); |
40 | } | 40 | } |
41 | 41 | ||
42 | void _raw_spin_lock_wait(raw_spinlock_t *lp) | 42 | void _raw_spin_lock_wait(arch_spinlock_t *lp) |
43 | { | 43 | { |
44 | int count = spin_retry; | 44 | int count = spin_retry; |
45 | unsigned int cpu = ~smp_processor_id(); | 45 | unsigned int cpu = ~smp_processor_id(); |
@@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp) | |||
59 | } | 59 | } |
60 | EXPORT_SYMBOL(_raw_spin_lock_wait); | 60 | EXPORT_SYMBOL(_raw_spin_lock_wait); |
61 | 61 | ||
62 | void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) | 62 | void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) |
63 | { | 63 | { |
64 | int count = spin_retry; | 64 | int count = spin_retry; |
65 | unsigned int cpu = ~smp_processor_id(); | 65 | unsigned int cpu = ~smp_processor_id(); |
@@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) | |||
82 | } | 82 | } |
83 | EXPORT_SYMBOL(_raw_spin_lock_wait_flags); | 83 | EXPORT_SYMBOL(_raw_spin_lock_wait_flags); |
84 | 84 | ||
85 | int _raw_spin_trylock_retry(raw_spinlock_t *lp) | 85 | int _raw_spin_trylock_retry(arch_spinlock_t *lp) |
86 | { | 86 | { |
87 | unsigned int cpu = ~smp_processor_id(); | 87 | unsigned int cpu = ~smp_processor_id(); |
88 | int count; | 88 | int count; |
@@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp) | |||
97 | } | 97 | } |
98 | EXPORT_SYMBOL(_raw_spin_trylock_retry); | 98 | EXPORT_SYMBOL(_raw_spin_trylock_retry); |
99 | 99 | ||
100 | void _raw_spin_relax(raw_spinlock_t *lock) | 100 | void _raw_spin_relax(arch_spinlock_t *lock) |
101 | { | 101 | { |
102 | unsigned int cpu = lock->owner_cpu; | 102 | unsigned int cpu = lock->owner_cpu; |
103 | if (cpu != 0) | 103 | if (cpu != 0) |