diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-12-02 13:49:50 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:32 -0500 |
commit | 445c89514be242b1b0080056d50bdc1b72adeb5c (patch) | |
tree | 96ed062794ad0fb6a649713c83f009eea382e8b2 /arch/x86/include/asm/paravirt.h | |
parent | 6b6b4792f89346e47437682c7ba3438e6681c0f9 (diff) |
locking: Convert raw_spinlock to arch_spinlock
The raw_spin* namespace was taken by lockdep for the architecture
specific implementations. raw_spin_* would be the ideal name space for
the spinlocks which are not converted to sleeping locks in preempt-rt.
Linus suggested to convert the raw_ to arch_ locks and cleanup the
name space instead of using an artifical name like core_spin,
atomic_spin or whatever
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'arch/x86/include/asm/paravirt.h')
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index efb38994859c..5655f75f10b7 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | |||
731 | 731 | ||
732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) | 732 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
733 | 733 | ||
734 | static inline int __raw_spin_is_locked(struct raw_spinlock *lock) | 734 | static inline int __raw_spin_is_locked(struct arch_spinlock *lock) |
735 | { | 735 | { |
736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); | 736 | return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); |
737 | } | 737 | } |
738 | 738 | ||
739 | static inline int __raw_spin_is_contended(struct raw_spinlock *lock) | 739 | static inline int __raw_spin_is_contended(struct arch_spinlock *lock) |
740 | { | 740 | { |
741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); | 741 | return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); |
742 | } | 742 | } |
743 | #define __raw_spin_is_contended __raw_spin_is_contended | 743 | #define __raw_spin_is_contended __raw_spin_is_contended |
744 | 744 | ||
745 | static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) | 745 | static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) |
746 | { | 746 | { |
747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); | 747 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); |
748 | } | 748 | } |
749 | 749 | ||
750 | static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, | 750 | static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, |
751 | unsigned long flags) | 751 | unsigned long flags) |
752 | { | 752 | { |
753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); | 753 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); |
754 | } | 754 | } |
755 | 755 | ||
756 | static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) | 756 | static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) |
757 | { | 757 | { |
758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); | 758 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); |
759 | } | 759 | } |
760 | 760 | ||
761 | static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) | 761 | static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) |
762 | { | 762 | { |
763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); | 763 | PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); |
764 | } | 764 | } |