aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2013-08-09 10:21:51 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2013-08-09 10:53:14 -0400
commitb798df09f919c52823110a74bd568c6a4e98e6b2 (patch)
treeb784c1a97e891ade376bf68c8718608e5b9b4bf5 /arch
parent8db732668a48e93d00d881517a08136e8fa71000 (diff)
x86, ticketlock: Collapse a layer of functions
Now that the paravirtualization layer doesn't exist at the spinlock level any more, we can collapse the __ticket_ functions into the arch_ functions. Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org> Link: http://lkml.kernel.org/r/1376058122-8248-4-git-send-email-raghavendra.kt@linux.vnet.ibm.com Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Attilio Rao <attilio.rao@citrix.com> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/spinlock.h35
1 files changed, 5 insertions, 30 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 4d542444bea3..744241048a11 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -76,7 +76,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
76 * in the high part, because a wide xadd increment of the low part would carry 76 * in the high part, because a wide xadd increment of the low part would carry
77 * up and contaminate the high part. 77 * up and contaminate the high part.
78 */ 78 */
79static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock) 79static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
80{ 80{
81 register struct __raw_tickets inc = { .tail = 1 }; 81 register struct __raw_tickets inc = { .tail = 1 };
82 82
@@ -96,7 +96,7 @@ static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
96out: barrier(); /* make sure nothing creeps before the lock is taken */ 96out: barrier(); /* make sure nothing creeps before the lock is taken */
97} 97}
98 98
99static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 99static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
100{ 100{
101 arch_spinlock_t old, new; 101 arch_spinlock_t old, new;
102 102
@@ -110,7 +110,7 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
110 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 110 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
111} 111}
112 112
113static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 113static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
114{ 114{
115 __ticket_t next = lock->tickets.head + 1; 115 __ticket_t next = lock->tickets.head + 1;
116 116
@@ -118,46 +118,21 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
118 __ticket_unlock_kick(lock, next); 118 __ticket_unlock_kick(lock, next);
119} 119}
120 120
121static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 121static inline int arch_spin_is_locked(arch_spinlock_t *lock)
122{ 122{
123 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 123 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
124 124
125 return tmp.tail != tmp.head; 125 return tmp.tail != tmp.head;
126} 126}
127 127
128static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 128static inline int arch_spin_is_contended(arch_spinlock_t *lock)
129{ 129{
130 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 130 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
131 131
132 return (__ticket_t)(tmp.tail - tmp.head) > 1; 132 return (__ticket_t)(tmp.tail - tmp.head) > 1;
133} 133}
134
135static inline int arch_spin_is_locked(arch_spinlock_t *lock)
136{
137 return __ticket_spin_is_locked(lock);
138}
139
140static inline int arch_spin_is_contended(arch_spinlock_t *lock)
141{
142 return __ticket_spin_is_contended(lock);
143}
144#define arch_spin_is_contended arch_spin_is_contended 134#define arch_spin_is_contended arch_spin_is_contended
145 135
146static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
147{
148 __ticket_spin_lock(lock);
149}
150
151static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
152{
153 return __ticket_spin_trylock(lock);
154}
155
156static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
157{
158 __ticket_spin_unlock(lock);
159}
160
161static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 136static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
162 unsigned long flags) 137 unsigned long flags)
163{ 138{