aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/mcs_spinlock.c8
-rw-r--r--kernel/locking/mcs_spinlock.h4
-rw-r--r--kernel/locking/mutex.c4
-rw-r--r--kernel/locking/qrwlock.c9
-rw-r--r--kernel/locking/rwsem-xadd.c4
5 files changed, 13 insertions, 16 deletions
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index be9ee1559fca..9887a905a762 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -1,6 +1,4 @@
1
2#include <linux/percpu.h> 1#include <linux/percpu.h>
3#include <linux/mutex.h>
4#include <linux/sched.h> 2#include <linux/sched.h>
5#include "mcs_spinlock.h" 3#include "mcs_spinlock.h"
6 4
@@ -79,7 +77,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
79 break; 77 break;
80 } 78 }
81 79
82 arch_mutex_cpu_relax(); 80 cpu_relax_lowlatency();
83 } 81 }
84 82
85 return next; 83 return next;
@@ -120,7 +118,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
120 if (need_resched()) 118 if (need_resched())
121 goto unqueue; 119 goto unqueue;
122 120
123 arch_mutex_cpu_relax(); 121 cpu_relax_lowlatency();
124 } 122 }
125 return true; 123 return true;
126 124
@@ -146,7 +144,7 @@ unqueue:
146 if (smp_load_acquire(&node->locked)) 144 if (smp_load_acquire(&node->locked))
147 return true; 145 return true;
148 146
149 arch_mutex_cpu_relax(); 147 cpu_relax_lowlatency();
150 148
151 /* 149 /*
152 * Or we race against a concurrent unqueue()'s step-B, in which 150 * Or we race against a concurrent unqueue()'s step-B, in which
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 74356dc0ce29..23e89c5930e9 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -27,7 +27,7 @@ struct mcs_spinlock {
27#define arch_mcs_spin_lock_contended(l) \ 27#define arch_mcs_spin_lock_contended(l) \
28do { \ 28do { \
29 while (!(smp_load_acquire(l))) \ 29 while (!(smp_load_acquire(l))) \
30 arch_mutex_cpu_relax(); \ 30 cpu_relax_lowlatency(); \
31} while (0) 31} while (0)
32#endif 32#endif
33 33
@@ -104,7 +104,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
104 return; 104 return;
105 /* Wait until the next pointer is set */ 105 /* Wait until the next pointer is set */
106 while (!(next = ACCESS_ONCE(node->next))) 106 while (!(next = ACCESS_ONCE(node->next)))
107 arch_mutex_cpu_relax(); 107 cpu_relax_lowlatency();
108 } 108 }
109 109
110 /* Pass lock to next waiter. */ 110 /* Pass lock to next waiter. */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index d3100521388c..ae712b25e492 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -146,7 +146,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
146 if (need_resched()) 146 if (need_resched())
147 break; 147 break;
148 148
149 arch_mutex_cpu_relax(); 149 cpu_relax_lowlatency();
150 } 150 }
151 rcu_read_unlock(); 151 rcu_read_unlock();
152 152
@@ -464,7 +464,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
464 * memory barriers as we'll eventually observe the right 464 * memory barriers as we'll eventually observe the right
465 * values at the cost of a few extra spins. 465 * values at the cost of a few extra spins.
466 */ 466 */
467 arch_mutex_cpu_relax(); 467 cpu_relax_lowlatency();
468 } 468 }
469 osq_unlock(&lock->osq); 469 osq_unlock(&lock->osq);
470slowpath: 470slowpath:
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index fb5b8ac411a5..f956ede7f90d 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -20,7 +20,6 @@
20#include <linux/cpumask.h> 20#include <linux/cpumask.h>
21#include <linux/percpu.h> 21#include <linux/percpu.h>
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <linux/mutex.h>
24#include <asm/qrwlock.h> 23#include <asm/qrwlock.h>
25 24
26/** 25/**
@@ -35,7 +34,7 @@ static __always_inline void
35rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) 34rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
36{ 35{
37 while ((cnts & _QW_WMASK) == _QW_LOCKED) { 36 while ((cnts & _QW_WMASK) == _QW_LOCKED) {
38 arch_mutex_cpu_relax(); 37 cpu_relax_lowlatency();
39 cnts = smp_load_acquire((u32 *)&lock->cnts); 38 cnts = smp_load_acquire((u32 *)&lock->cnts);
40 } 39 }
41} 40}
@@ -75,7 +74,7 @@ void queue_read_lock_slowpath(struct qrwlock *lock)
75 * to make sure that the write lock isn't taken. 74 * to make sure that the write lock isn't taken.
76 */ 75 */
77 while (atomic_read(&lock->cnts) & _QW_WMASK) 76 while (atomic_read(&lock->cnts) & _QW_WMASK)
78 arch_mutex_cpu_relax(); 77 cpu_relax_lowlatency();
79 78
80 cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS; 79 cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
81 rspin_until_writer_unlock(lock, cnts); 80 rspin_until_writer_unlock(lock, cnts);
@@ -114,7 +113,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
114 cnts | _QW_WAITING) == cnts)) 113 cnts | _QW_WAITING) == cnts))
115 break; 114 break;
116 115
117 arch_mutex_cpu_relax(); 116 cpu_relax_lowlatency();
118 } 117 }
119 118
120 /* When no more readers, set the locked flag */ 119 /* When no more readers, set the locked flag */
@@ -125,7 +124,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
125 _QW_LOCKED) == _QW_WAITING)) 124 _QW_LOCKED) == _QW_WAITING))
126 break; 125 break;
127 126
128 arch_mutex_cpu_relax(); 127 cpu_relax_lowlatency();
129 } 128 }
130unlock: 129unlock:
131 arch_spin_unlock(&lock->lock); 130 arch_spin_unlock(&lock->lock);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a2391ac135c8..d6203faf2eb1 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -329,7 +329,7 @@ bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
329 if (need_resched()) 329 if (need_resched())
330 break; 330 break;
331 331
332 arch_mutex_cpu_relax(); 332 cpu_relax_lowlatency();
333 } 333 }
334 rcu_read_unlock(); 334 rcu_read_unlock();
335 335
@@ -381,7 +381,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
381 * memory barriers as we'll eventually observe the right 381 * memory barriers as we'll eventually observe the right
382 * values at the cost of a few extra spins. 382 * values at the cost of a few extra spins.
383 */ 383 */
384 arch_mutex_cpu_relax(); 384 cpu_relax_lowlatency();
385 } 385 }
386 osq_unlock(&sem->osq); 386 osq_unlock(&sem->osq);
387done: 387done: