diff options
Diffstat (limited to 'arch/x86/include/asm/spinlock.h')
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 35 |
1 files changed, 5 insertions, 30 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4d542444bea3..744241048a11 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -76,7 +76,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, | |||
76 | * in the high part, because a wide xadd increment of the low part would carry | 76 | * in the high part, because a wide xadd increment of the low part would carry |
77 | * up and contaminate the high part. | 77 | * up and contaminate the high part. |
78 | */ | 78 | */ |
79 | static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock) | 79 | static __always_inline void arch_spin_lock(struct arch_spinlock *lock) |
80 | { | 80 | { |
81 | register struct __raw_tickets inc = { .tail = 1 }; | 81 | register struct __raw_tickets inc = { .tail = 1 }; |
82 | 82 | ||
@@ -96,7 +96,7 @@ static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock) | |||
96 | out: barrier(); /* make sure nothing creeps before the lock is taken */ | 96 | out: barrier(); /* make sure nothing creeps before the lock is taken */ |
97 | } | 97 | } |
98 | 98 | ||
99 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) | 99 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
100 | { | 100 | { |
101 | arch_spinlock_t old, new; | 101 | arch_spinlock_t old, new; |
102 | 102 | ||
@@ -110,7 +110,7 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) | |||
110 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; | 110 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; |
111 | } | 111 | } |
112 | 112 | ||
113 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) | 113 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
114 | { | 114 | { |
115 | __ticket_t next = lock->tickets.head + 1; | 115 | __ticket_t next = lock->tickets.head + 1; |
116 | 116 | ||
@@ -118,46 +118,21 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) | |||
118 | __ticket_unlock_kick(lock, next); | 118 | __ticket_unlock_kick(lock, next); |
119 | } | 119 | } |
120 | 120 | ||
121 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) | 121 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
122 | { | 122 | { |
123 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); | 123 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); |
124 | 124 | ||
125 | return tmp.tail != tmp.head; | 125 | return tmp.tail != tmp.head; |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) | 128 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
129 | { | 129 | { |
130 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); | 130 | struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); |
131 | 131 | ||
132 | return (__ticket_t)(tmp.tail - tmp.head) > 1; | 132 | return (__ticket_t)(tmp.tail - tmp.head) > 1; |
133 | } | 133 | } |
134 | |||
135 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
136 | { | ||
137 | return __ticket_spin_is_locked(lock); | ||
138 | } | ||
139 | |||
140 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | ||
141 | { | ||
142 | return __ticket_spin_is_contended(lock); | ||
143 | } | ||
144 | #define arch_spin_is_contended arch_spin_is_contended | 134 | #define arch_spin_is_contended arch_spin_is_contended |
145 | 135 | ||
146 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) | ||
147 | { | ||
148 | __ticket_spin_lock(lock); | ||
149 | } | ||
150 | |||
151 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
152 | { | ||
153 | return __ticket_spin_trylock(lock); | ||
154 | } | ||
155 | |||
156 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
157 | { | ||
158 | __ticket_spin_unlock(lock); | ||
159 | } | ||
160 | |||
161 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, | 136 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
162 | unsigned long flags) | 137 | unsigned long flags) |
163 | { | 138 | { |