aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKyle McMartin <kyle@redhat.com>2009-02-08 17:39:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-02-09 11:15:39 -0500
commita5ef7ca0e2636bad0ccd07b996d775348ae2b65e (patch)
treefcc1ef7e4bd95ce58d5bbb74ad129bdc248252ca
parentd5b562330ec766292a3ac54ae5e0673610bd5b3d (diff)
x86: spinlocks: define dummy __raw_spin_is_contended
Architectures other than mips and x86 are not using ticket spinlocks. Therefore, the contention on the lock is meaningless, since there is nobody known to be waiting on it (arguably /fairly/ unfair locks). Dummy it out to return 0 on other architectures. Signed-off-by: Kyle McMartin <kyle@redhat.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/mips/include/asm/spinlock.h1
-rw-r--r--arch/x86/include/asm/paravirt.h1
-rw-r--r--arch/x86/include/asm/spinlock.h1
-rw-r--r--include/linux/spinlock.h5
4 files changed, 8 insertions, 0 deletions
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 1a1f320c30d8..0884947ebe27 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -51,6 +51,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
51 51
52 return (((counters >> 14) - counters) & 0x1fff) > 1; 52 return (((counters >> 14) - counters) & 0x1fff) > 1;
53} 53}
54#define __raw_spin_is_contended __raw_spin_is_contended
54 55
55static inline void __raw_spin_lock(raw_spinlock_t *lock) 56static inline void __raw_spin_lock(raw_spinlock_t *lock)
56{ 57{
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ba3e2ff6aedc..c09a14127584 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1402,6 +1402,7 @@ static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1402{ 1402{
1403 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 1403 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1404} 1404}
1405#define __raw_spin_is_contended __raw_spin_is_contended
1405 1406
1406static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) 1407static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1407{ 1408{
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index d17c91981da2..8247e94ac6b1 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -245,6 +245,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
245{ 245{
246 return __ticket_spin_is_contended(lock); 246 return __ticket_spin_is_contended(lock);
247} 247}
248#define __raw_spin_is_contended __raw_spin_is_contended
248 249
249static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 250static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
250{ 251{
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e0c0fccced46..a0c66a2e00ad 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -124,7 +124,12 @@ do { \
124#ifdef CONFIG_GENERIC_LOCKBREAK 124#ifdef CONFIG_GENERIC_LOCKBREAK
125#define spin_is_contended(lock) ((lock)->break_lock) 125#define spin_is_contended(lock) ((lock)->break_lock)
126#else 126#else
127
128#ifdef __raw_spin_is_contended
127#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 129#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
130#else
131#define spin_is_contended(lock) (((void)(lock), 0))
132#endif /*__raw_spin_is_contended*/
128#endif 133#endif
129 134
130/** 135/**