summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2016-10-25 05:03:14 -0400
committerIngo Molnar <mingo@kernel.org>2016-11-16 04:15:10 -0500
commitf2f09a4cee3507dba0e24b87ba2961a5c377d3a7 (patch)
tree43a4dc4e093c4cb3efb9140ad6f76822a7055c33
parent22b6430d36659b37ed139b7fd87fcc7237fb0cfd (diff)
locking/core: Remove cpu_relax_lowlatency() users
With the s390 special case of a yielding cpu_relax() implementation gone, we can now remove all users of cpu_relax_lowlatency() and replace them with cpu_relax(). Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Noam Camus <noamc@ezchip.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: linuxppc-dev@lists.ozlabs.org Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1477386195-32736-5-git-send-email-borntraeger@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--kernel/locking/mcs_spinlock.h4
-rw-r--r--kernel/locking/mutex.c4
-rw-r--r--kernel/locking/osq_lock.c6
-rw-r--r--kernel/locking/qrwlock.c6
-rw-r--r--kernel/locking/rwsem-xadd.c4
-rw-r--r--lib/lockref.c2
8 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8832f8ec1583..383d13416442 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -723,7 +723,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
723 if (busywait_stop(timeout_us, cpu)) 723 if (busywait_stop(timeout_us, cpu))
724 break; 724 break;
725 725
726 cpu_relax_lowlatency(); 726 cpu_relax();
727 } while (!need_resched()); 727 } while (!need_resched());
728 728
729 return false; 729 return false;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5dc128a8da83..5dc34653274a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -342,7 +342,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
342 endtime = busy_clock() + vq->busyloop_timeout; 342 endtime = busy_clock() + vq->busyloop_timeout;
343 while (vhost_can_busy_poll(vq->dev, endtime) && 343 while (vhost_can_busy_poll(vq->dev, endtime) &&
344 vhost_vq_avail_empty(vq->dev, vq)) 344 vhost_vq_avail_empty(vq->dev, vq))
345 cpu_relax_lowlatency(); 345 cpu_relax();
346 preempt_enable(); 346 preempt_enable();
347 r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), 347 r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
348 out_num, in_num, NULL, NULL); 348 out_num, in_num, NULL, NULL);
@@ -533,7 +533,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
533 while (vhost_can_busy_poll(&net->dev, endtime) && 533 while (vhost_can_busy_poll(&net->dev, endtime) &&
534 !sk_has_rx_data(sk) && 534 !sk_has_rx_data(sk) &&
535 vhost_vq_avail_empty(&net->dev, vq)) 535 vhost_vq_avail_empty(&net->dev, vq))
536 cpu_relax_lowlatency(); 536 cpu_relax();
537 537
538 preempt_enable(); 538 preempt_enable();
539 539
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index c835270f0c2f..6a385aabcce7 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -28,7 +28,7 @@ struct mcs_spinlock {
28#define arch_mcs_spin_lock_contended(l) \ 28#define arch_mcs_spin_lock_contended(l) \
29do { \ 29do { \
30 while (!(smp_load_acquire(l))) \ 30 while (!(smp_load_acquire(l))) \
31 cpu_relax_lowlatency(); \ 31 cpu_relax(); \
32} while (0) 32} while (0)
33#endif 33#endif
34 34
@@ -108,7 +108,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
108 return; 108 return;
109 /* Wait until the next pointer is set */ 109 /* Wait until the next pointer is set */
110 while (!(next = READ_ONCE(node->next))) 110 while (!(next = READ_ONCE(node->next)))
111 cpu_relax_lowlatency(); 111 cpu_relax();
112 } 112 }
113 113
114 /* Pass lock to next waiter. */ 114 /* Pass lock to next waiter. */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 17a88e929e6a..a65e09a046ac 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -369,7 +369,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
369 break; 369 break;
370 } 370 }
371 371
372 cpu_relax_lowlatency(); 372 cpu_relax();
373 } 373 }
374 rcu_read_unlock(); 374 rcu_read_unlock();
375 375
@@ -492,7 +492,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
492 * memory barriers as we'll eventually observe the right 492 * memory barriers as we'll eventually observe the right
493 * values at the cost of a few extra spins. 493 * values at the cost of a few extra spins.
494 */ 494 */
495 cpu_relax_lowlatency(); 495 cpu_relax();
496 } 496 }
497 497
498 if (!waiter) 498 if (!waiter)
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 05a37857ab55..4ea2710b9d6c 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -75,7 +75,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
75 break; 75 break;
76 } 76 }
77 77
78 cpu_relax_lowlatency(); 78 cpu_relax();
79 } 79 }
80 80
81 return next; 81 return next;
@@ -122,7 +122,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
122 if (need_resched()) 122 if (need_resched())
123 goto unqueue; 123 goto unqueue;
124 124
125 cpu_relax_lowlatency(); 125 cpu_relax();
126 } 126 }
127 return true; 127 return true;
128 128
@@ -148,7 +148,7 @@ unqueue:
148 if (smp_load_acquire(&node->locked)) 148 if (smp_load_acquire(&node->locked))
149 return true; 149 return true;
150 150
151 cpu_relax_lowlatency(); 151 cpu_relax();
152 152
153 /* 153 /*
154 * Or we race against a concurrent unqueue()'s step-B, in which 154 * Or we race against a concurrent unqueue()'s step-B, in which
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 19248ddf37ce..cc3ed0ccdfa2 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -54,7 +54,7 @@ static __always_inline void
54rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) 54rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
55{ 55{
56 while ((cnts & _QW_WMASK) == _QW_LOCKED) { 56 while ((cnts & _QW_WMASK) == _QW_LOCKED) {
57 cpu_relax_lowlatency(); 57 cpu_relax();
58 cnts = atomic_read_acquire(&lock->cnts); 58 cnts = atomic_read_acquire(&lock->cnts);
59 } 59 }
60} 60}
@@ -130,7 +130,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
130 (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0)) 130 (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
131 break; 131 break;
132 132
133 cpu_relax_lowlatency(); 133 cpu_relax();
134 } 134 }
135 135
136 /* When no more readers, set the locked flag */ 136 /* When no more readers, set the locked flag */
@@ -141,7 +141,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
141 _QW_LOCKED) == _QW_WAITING)) 141 _QW_LOCKED) == _QW_WAITING))
142 break; 142 break;
143 143
144 cpu_relax_lowlatency(); 144 cpu_relax();
145 } 145 }
146unlock: 146unlock:
147 arch_spin_unlock(&lock->wait_lock); 147 arch_spin_unlock(&lock->wait_lock);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 2337b4bb2366..2fa2e2e64950 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -368,7 +368,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
368 return false; 368 return false;
369 } 369 }
370 370
371 cpu_relax_lowlatency(); 371 cpu_relax();
372 } 372 }
373 rcu_read_unlock(); 373 rcu_read_unlock();
374out: 374out:
@@ -423,7 +423,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
423 * memory barriers as we'll eventually observe the right 423 * memory barriers as we'll eventually observe the right
424 * values at the cost of a few extra spins. 424 * values at the cost of a few extra spins.
425 */ 425 */
426 cpu_relax_lowlatency(); 426 cpu_relax();
427 } 427 }
428 osq_unlock(&sem->osq); 428 osq_unlock(&sem->osq);
429done: 429done:
diff --git a/lib/lockref.c b/lib/lockref.c
index 5a92189ad711..c4bfcb8836cd 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -20,7 +20,7 @@
20 if (likely(old.lock_count == prev.lock_count)) { \ 20 if (likely(old.lock_count == prev.lock_count)) { \
21 SUCCESS; \ 21 SUCCESS; \
22 } \ 22 } \
23 cpu_relax_lowlatency(); \ 23 cpu_relax(); \
24 } \ 24 } \
25} while (0) 25} while (0)
26 26