diff options
Diffstat (limited to 'arch/s390/lib/spinlock.c')
| -rw-r--r-- | arch/s390/lib/spinlock.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index d4cbf71a6077..f4596452f072 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
| @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) | |||
| 39 | _raw_yield(); | 39 | _raw_yield(); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | void _raw_spin_lock_wait(arch_spinlock_t *lp) | 42 | void arch_spin_lock_wait(arch_spinlock_t *lp) |
| 43 | { | 43 | { |
| 44 | int count = spin_retry; | 44 | int count = spin_retry; |
| 45 | unsigned int cpu = ~smp_processor_id(); | 45 | unsigned int cpu = ~smp_processor_id(); |
| @@ -51,15 +51,15 @@ void _raw_spin_lock_wait(arch_spinlock_t *lp) | |||
| 51 | _raw_yield_cpu(~owner); | 51 | _raw_yield_cpu(~owner); |
| 52 | count = spin_retry; | 52 | count = spin_retry; |
| 53 | } | 53 | } |
| 54 | if (__raw_spin_is_locked(lp)) | 54 | if (arch_spin_is_locked(lp)) |
| 55 | continue; | 55 | continue; |
| 56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 56 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
| 57 | return; | 57 | return; |
| 58 | } | 58 | } |
| 59 | } | 59 | } |
| 60 | EXPORT_SYMBOL(_raw_spin_lock_wait); | 60 | EXPORT_SYMBOL(arch_spin_lock_wait); |
| 61 | 61 | ||
| 62 | void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | 62 | void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) |
| 63 | { | 63 | { |
| 64 | int count = spin_retry; | 64 | int count = spin_retry; |
| 65 | unsigned int cpu = ~smp_processor_id(); | 65 | unsigned int cpu = ~smp_processor_id(); |
| @@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | |||
| 72 | _raw_yield_cpu(~owner); | 72 | _raw_yield_cpu(~owner); |
| 73 | count = spin_retry; | 73 | count = spin_retry; |
| 74 | } | 74 | } |
| 75 | if (__raw_spin_is_locked(lp)) | 75 | if (arch_spin_is_locked(lp)) |
| 76 | continue; | 76 | continue; |
| 77 | local_irq_disable(); | 77 | local_irq_disable(); |
| 78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 78 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
| @@ -80,30 +80,30 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) | |||
| 80 | local_irq_restore(flags); | 80 | local_irq_restore(flags); |
| 81 | } | 81 | } |
| 82 | } | 82 | } |
| 83 | EXPORT_SYMBOL(_raw_spin_lock_wait_flags); | 83 | EXPORT_SYMBOL(arch_spin_lock_wait_flags); |
| 84 | 84 | ||
| 85 | int _raw_spin_trylock_retry(arch_spinlock_t *lp) | 85 | int arch_spin_trylock_retry(arch_spinlock_t *lp) |
| 86 | { | 86 | { |
| 87 | unsigned int cpu = ~smp_processor_id(); | 87 | unsigned int cpu = ~smp_processor_id(); |
| 88 | int count; | 88 | int count; |
| 89 | 89 | ||
| 90 | for (count = spin_retry; count > 0; count--) { | 90 | for (count = spin_retry; count > 0; count--) { |
| 91 | if (__raw_spin_is_locked(lp)) | 91 | if (arch_spin_is_locked(lp)) |
| 92 | continue; | 92 | continue; |
| 93 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) | 93 | if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) |
| 94 | return 1; | 94 | return 1; |
| 95 | } | 95 | } |
| 96 | return 0; | 96 | return 0; |
| 97 | } | 97 | } |
| 98 | EXPORT_SYMBOL(_raw_spin_trylock_retry); | 98 | EXPORT_SYMBOL(arch_spin_trylock_retry); |
| 99 | 99 | ||
| 100 | void _raw_spin_relax(arch_spinlock_t *lock) | 100 | void arch_spin_relax(arch_spinlock_t *lock) |
| 101 | { | 101 | { |
| 102 | unsigned int cpu = lock->owner_cpu; | 102 | unsigned int cpu = lock->owner_cpu; |
| 103 | if (cpu != 0) | 103 | if (cpu != 0) |
| 104 | _raw_yield_cpu(~cpu); | 104 | _raw_yield_cpu(~cpu); |
| 105 | } | 105 | } |
| 106 | EXPORT_SYMBOL(_raw_spin_relax); | 106 | EXPORT_SYMBOL(arch_spin_relax); |
| 107 | 107 | ||
| 108 | void _raw_read_lock_wait(raw_rwlock_t *rw) | 108 | void _raw_read_lock_wait(raw_rwlock_t *rw) |
| 109 | { | 109 | { |
