aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-07-13 18:14:26 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-08-29 16:46:34 -0400
commit229855d6f3b40d01a903120c433d75e483a0b06d (patch)
treec11667543b5c5953c71e986a8a98a2c4600dbdad /arch/x86/include
parent2994488fe5bb721de1ded53af1a2fc41f47f6ddc (diff)
x86, ticketlock: Make __ticket_spin_trylock common
Make trylock code common regardless of ticket size. (Also, rename arch_spinlock.slock to head_tail.) Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/spinlock.h51
-rw-r--r--arch/x86/include/asm/spinlock_types.h6
2 files changed, 16 insertions, 41 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index b69e0b473de6..f5695eeb83ff 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -69,60 +69,33 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
69 barrier(); /* make sure nothing creeps before the lock is taken */ 69 barrier(); /* make sure nothing creeps before the lock is taken */
70} 70}
71 71
72#if (NR_CPUS < 256)
73static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 72static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
74{ 73{
75 unsigned int tmp, new; 74 arch_spinlock_t old, new;
76 75
77 asm volatile("movzwl %2, %0\n\t" 76 old.tickets = ACCESS_ONCE(lock->tickets);
78 "cmpb %h0,%b0\n\t" 77 if (old.tickets.head != old.tickets.tail)
79 "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" 78 return 0;
80 "jne 1f\n\t"
81 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
82 "1:"
83 "sete %b1\n\t"
84 "movzbl %b1,%0\n\t"
85 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
86 :
87 : "memory", "cc");
88 79
89 return tmp; 80 new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
81
82 /* cmpxchg is a full barrier, so nothing can move before it */
83 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
90} 84}
91 85
86#if (NR_CPUS < 256)
92static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 87static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
93{ 88{
94 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 89 asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
95 : "+m" (lock->slock) 90 : "+m" (lock->head_tail)
96 : 91 :
97 : "memory", "cc"); 92 : "memory", "cc");
98} 93}
99#else 94#else
100static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
101{
102 unsigned tmp;
103 unsigned new;
104
105 asm volatile("movl %2,%0\n\t"
106 "movl %0,%1\n\t"
107 "roll $16, %0\n\t"
108 "cmpl %0,%1\n\t"
109 "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
110 "jne 1f\n\t"
111 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
112 "1:"
113 "sete %b1\n\t"
114 "movzbl %b1,%0\n\t"
115 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
116 :
117 : "memory", "cc");
118
119 return tmp;
120}
121
122static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 95static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
123{ 96{
124 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 97 asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
125 : "+m" (lock->slock) 98 : "+m" (lock->head_tail)
126 : 99 :
127 : "memory", "cc"); 100 : "memory", "cc");
128} 101}
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 1c51bd231e49..8ebd5df7451e 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -9,8 +9,10 @@
9 9
10#if (CONFIG_NR_CPUS < 256) 10#if (CONFIG_NR_CPUS < 256)
11typedef u8 __ticket_t; 11typedef u8 __ticket_t;
12typedef u16 __ticketpair_t;
12#else 13#else
13typedef u16 __ticket_t; 14typedef u16 __ticket_t;
15typedef u32 __ticketpair_t;
14#endif 16#endif
15 17
16#define TICKET_SHIFT (sizeof(__ticket_t) * 8) 18#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
@@ -18,14 +20,14 @@ typedef u16 __ticket_t;
18 20
19typedef struct arch_spinlock { 21typedef struct arch_spinlock {
20 union { 22 union {
21 unsigned int slock; 23 __ticketpair_t head_tail;
22 struct __raw_tickets { 24 struct __raw_tickets {
23 __ticket_t head, tail; 25 __ticket_t head, tail;
24 } tickets; 26 } tickets;
25 }; 27 };
26} arch_spinlock_t; 28} arch_spinlock_t;
27 29
28#define __ARCH_SPIN_LOCK_UNLOCKED { { .slock = 0 } } 30#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
29 31
30#include <asm/rwlock.h> 32#include <asm/rwlock.h>
31 33