aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-07-02 18:26:36 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2011-08-29 16:45:19 -0400
commit84eb950db13ca40a0572ce9957e14723500943d6 (patch)
treef2912697d8dbaade7fb1a948a6ba439a501fc03a
parent8b8bc2f7311c3223213dbe346d9cc2e299fdb5eb (diff)
x86, ticketlock: Clean up types and accessors
A few cleanups to the way spinlocks are defined and accessed: - define __ticket_t which is the size of a spinlock ticket (ie, enough bits to hold all the cpus) - Define struct arch_spinlock as a union containing plain slock and the head and tail tickets - Use head and tail to implement some of the spinlock predicates. - Make all ticket variables unsigned. - Use TICKET_SHIFT to form constants Most of this will be used in later patches. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/spinlock.h24
-rw-r--r--arch/x86/include/asm/spinlock_types.h20
2 files changed, 28 insertions, 16 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index ee67edf86fdd..ea2a04f69ca9 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -55,11 +55,9 @@
55 * much between them in performance though, especially as locks are out of line. 55 * much between them in performance though, especially as locks are out of line.
56 */ 56 */
57#if (NR_CPUS < 256) 57#if (NR_CPUS < 256)
58#define TICKET_SHIFT 8
59
60static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 58static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
61{ 59{
62 short inc = 0x0100; 60 unsigned short inc = 1 << TICKET_SHIFT;
63 61
64 asm volatile ( 62 asm volatile (
65 LOCK_PREFIX "xaddw %w0, %1\n" 63 LOCK_PREFIX "xaddw %w0, %1\n"
@@ -78,7 +76,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
78 76
79static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 77static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
80{ 78{
81 int tmp, new; 79 unsigned int tmp, new;
82 80
83 asm volatile("movzwl %2, %0\n\t" 81 asm volatile("movzwl %2, %0\n\t"
84 "cmpb %h0,%b0\n\t" 82 "cmpb %h0,%b0\n\t"
@@ -103,12 +101,10 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
103 : "memory", "cc"); 101 : "memory", "cc");
104} 102}
105#else 103#else
106#define TICKET_SHIFT 16
107
108static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 104static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
109{ 105{
110 int inc = 0x00010000; 106 unsigned inc = 1 << TICKET_SHIFT;
111 int tmp; 107 unsigned tmp;
112 108
113 asm volatile(LOCK_PREFIX "xaddl %0, %1\n" 109 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
114 "movzwl %w0, %2\n\t" 110 "movzwl %w0, %2\n\t"
@@ -128,8 +124,8 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
128 124
129static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 125static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
130{ 126{
131 int tmp; 127 unsigned tmp;
132 int new; 128 unsigned new;
133 129
134 asm volatile("movl %2,%0\n\t" 130 asm volatile("movl %2,%0\n\t"
135 "movl %0,%1\n\t" 131 "movl %0,%1\n\t"
@@ -159,16 +155,16 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
159 155
160static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 156static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
161{ 157{
162 int tmp = ACCESS_ONCE(lock->slock); 158 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
163 159
164 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); 160 return !!(tmp.tail ^ tmp.head);
165} 161}
166 162
167static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 163static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
168{ 164{
169 int tmp = ACCESS_ONCE(lock->slock); 165 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
170 166
171 return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; 167 return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
172} 168}
173 169
174#ifndef CONFIG_PARAVIRT_SPINLOCKS 170#ifndef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 7c7a486fcb68..1c51bd231e49 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,11 +5,27 @@
5# error "please don't include this file directly" 5# error "please don't include this file directly"
6#endif 6#endif
7 7
8#include <linux/types.h>
9
10#if (CONFIG_NR_CPUS < 256)
11typedef u8 __ticket_t;
12#else
13typedef u16 __ticket_t;
14#endif
15
16#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
17#define TICKET_MASK ((__ticket_t)((1 << TICKET_SHIFT) - 1))
18
8typedef struct arch_spinlock { 19typedef struct arch_spinlock {
9 unsigned int slock; 20 union {
21 unsigned int slock;
22 struct __raw_tickets {
23 __ticket_t head, tail;
24 } tickets;
25 };
10} arch_spinlock_t; 26} arch_spinlock_t;
11 27
12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 28#define __ARCH_SPIN_LOCK_UNLOCKED { { .slock = 0 } }
13 29
14#include <asm/rwlock.h> 30#include <asm/rwlock.h>
15 31