diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 60 |
1 files changed, 30 insertions, 30 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index ea2a04f69ca9..5240cdefa683 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -57,21 +57,21 @@ | |||
57 | #if (NR_CPUS < 256) | 57 | #if (NR_CPUS < 256) |
58 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) | 58 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
59 | { | 59 | { |
60 | unsigned short inc = 1 << TICKET_SHIFT; | 60 | register union { |
61 | 61 | struct __raw_tickets tickets; | |
62 | asm volatile ( | 62 | unsigned short slock; |
63 | LOCK_PREFIX "xaddw %w0, %1\n" | 63 | } inc = { .slock = 1 << TICKET_SHIFT }; |
64 | "1:\t" | 64 | |
65 | "cmpb %h0, %b0\n\t" | 65 | asm volatile (LOCK_PREFIX "xaddw %w0, %1\n" |
66 | "je 2f\n\t" | 66 | : "+Q" (inc), "+m" (lock->slock) : : "memory", "cc"); |
67 | "rep ; nop\n\t" | 67 | |
68 | "movb %1, %b0\n\t" | 68 | for (;;) { |
69 | /* don't need lfence here, because loads are in-order */ | 69 | if (inc.tickets.head == inc.tickets.tail) |
70 | "jmp 1b\n" | 70 | break; |
71 | "2:" | 71 | cpu_relax(); |
72 | : "+Q" (inc), "+m" (lock->slock) | 72 | inc.tickets.head = ACCESS_ONCE(lock->tickets.head); |
73 | : | 73 | } |
74 | : "memory", "cc"); | 74 | barrier(); /* make sure nothing creeps before the lock is taken */ |
75 | } | 75 | } |
76 | 76 | ||
77 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) | 77 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
@@ -104,22 +104,22 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) | |||
104 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) | 104 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
105 | { | 105 | { |
106 | unsigned inc = 1 << TICKET_SHIFT; | 106 | unsigned inc = 1 << TICKET_SHIFT; |
107 | unsigned tmp; | 107 | __ticket_t tmp; |
108 | 108 | ||
109 | asm volatile(LOCK_PREFIX "xaddl %0, %1\n" | 109 | asm volatile(LOCK_PREFIX "xaddl %0, %1\n\t" |
110 | "movzwl %w0, %2\n\t" | 110 | : "+r" (inc), "+m" (lock->slock) |
111 | "shrl $16, %0\n\t" | 111 | : : "memory", "cc"); |
112 | "1:\t" | 112 | |
113 | "cmpl %0, %2\n\t" | 113 | tmp = inc; |
114 | "je 2f\n\t" | 114 | inc >>= TICKET_SHIFT; |
115 | "rep ; nop\n\t" | 115 | |
116 | "movzwl %1, %2\n\t" | 116 | for (;;) { |
117 | /* don't need lfence here, because loads are in-order */ | 117 | if ((__ticket_t)inc == tmp) |
118 | "jmp 1b\n" | 118 | break; |
119 | "2:" | 119 | cpu_relax(); |
120 | : "+r" (inc), "+m" (lock->slock), "=&r" (tmp) | 120 | tmp = ACCESS_ONCE(lock->tickets.head); |
121 | : | 121 | } |
122 | : "memory", "cc"); | 122 | barrier(); /* make sure nothing creeps before the lock is taken */ |
123 | } | 123 | } |
124 | 124 | ||
125 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) | 125 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |