aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--include/linux/swait.h24
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/srcutiny.c4
-rw-r--r--kernel/rcu/tree.c8
-rw-r--r--kernel/rcu/tree_exp.h4
-rw-r--r--kernel/rcu/tree_plugin.h12
-rw-r--r--kernel/sched/swait.c10
-rw-r--r--virt/kvm/arm/arm.c4
-rw-r--r--virt/kvm/arm/psci.c2
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/kvm_main.c4
16 files changed, 48 insertions, 48 deletions
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7cd76f93a438..f7ea8e21656b 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -515,7 +515,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
515 dvcpu->arch.wait = 0; 515 dvcpu->arch.wait = 0;
516 516
517 if (swq_has_sleeper(&dvcpu->wq)) 517 if (swq_has_sleeper(&dvcpu->wq))
518 swake_up(&dvcpu->wq); 518 swake_up_one(&dvcpu->wq);
519 519
520 return 0; 520 return 0;
521} 521}
@@ -1204,7 +1204,7 @@ static void kvm_mips_comparecount_func(unsigned long data)
1204 1204
1205 vcpu->arch.wait = 0; 1205 vcpu->arch.wait = 0;
1206 if (swq_has_sleeper(&vcpu->wq)) 1206 if (swq_has_sleeper(&vcpu->wq))
1207 swake_up(&vcpu->wq); 1207 swake_up_one(&vcpu->wq);
1208} 1208}
1209 1209
1210/* low level hrtimer wake routine */ 1210/* low level hrtimer wake routine */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de686b340f4a..ee4a8854985e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -216,7 +216,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
216 216
217 wqp = kvm_arch_vcpu_wq(vcpu); 217 wqp = kvm_arch_vcpu_wq(vcpu);
218 if (swq_has_sleeper(wqp)) { 218 if (swq_has_sleeper(wqp)) {
219 swake_up(wqp); 219 swake_up_one(wqp);
220 ++vcpu->stat.halt_wakeup; 220 ++vcpu->stat.halt_wakeup;
221 } 221 }
222 222
@@ -3188,7 +3188,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
3188 } 3188 }
3189 } 3189 }
3190 3190
3191 prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE); 3191 prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
3192 3192
3193 if (kvmppc_vcore_check_block(vc)) { 3193 if (kvmppc_vcore_check_block(vc)) {
3194 finish_swait(&vc->wq, &wait); 3194 finish_swait(&vc->wq, &wait);
@@ -3311,7 +3311,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3311 kvmppc_start_thread(vcpu, vc); 3311 kvmppc_start_thread(vcpu, vc);
3312 trace_kvm_guest_enter(vcpu); 3312 trace_kvm_guest_enter(vcpu);
3313 } else if (vc->vcore_state == VCORE_SLEEPING) { 3313 } else if (vc->vcore_state == VCORE_SLEEPING) {
3314 swake_up(&vc->wq); 3314 swake_up_one(&vc->wq);
3315 } 3315 }
3316 3316
3317 } 3317 }
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index daa09f89ca2d..fcb55b02990e 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1145,7 +1145,7 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1145 * yield-candidate. 1145 * yield-candidate.
1146 */ 1146 */
1147 vcpu->preempted = true; 1147 vcpu->preempted = true;
1148 swake_up(&vcpu->wq); 1148 swake_up_one(&vcpu->wq);
1149 vcpu->stat.halt_wakeup++; 1149 vcpu->stat.halt_wakeup++;
1150 } 1150 }
1151 /* 1151 /*
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5b2300b818af..a37bda38d205 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -154,7 +154,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
154 154
155 for (;;) { 155 for (;;) {
156 if (!n.halted) 156 if (!n.halted)
157 prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); 157 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
158 if (hlist_unhashed(&n.link)) 158 if (hlist_unhashed(&n.link))
159 break; 159 break;
160 160
@@ -188,7 +188,7 @@ static void apf_task_wake_one(struct kvm_task_sleep_node *n)
188 if (n->halted) 188 if (n->halted)
189 smp_send_reschedule(n->cpu); 189 smp_send_reschedule(n->cpu);
190 else if (swq_has_sleeper(&n->wq)) 190 else if (swq_has_sleeper(&n->wq))
191 swake_up(&n->wq); 191 swake_up_one(&n->wq);
192} 192}
193 193
194static void apf_task_wake_all(void) 194static void apf_task_wake_all(void)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b5cd8465d44f..d536d457517b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1379,7 +1379,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
1379 * using swait_active() is safe. 1379 * using swait_active() is safe.
1380 */ 1380 */
1381 if (swait_active(q)) 1381 if (swait_active(q))
1382 swake_up(q); 1382 swake_up_one(q);
1383 1383
1384 if (apic_lvtt_tscdeadline(apic)) 1384 if (apic_lvtt_tscdeadline(apic))
1385 ktimer->expired_tscdeadline = ktimer->tscdeadline; 1385 ktimer->expired_tscdeadline = ktimer->tscdeadline;
diff --git a/include/linux/swait.h b/include/linux/swait.h
index dd032061112d..73e06e9986d4 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -16,7 +16,7 @@
16 * wait-queues, but the semantics are actually completely different, and 16 * wait-queues, but the semantics are actually completely different, and
17 * every single user we have ever had has been buggy (or pointless). 17 * every single user we have ever had has been buggy (or pointless).
18 * 18 *
19 * A "swake_up()" only wakes up _one_ waiter, which is not at all what 19 * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
20 * "wake_up()" does, and has led to problems. In other cases, it has 20 * "wake_up()" does, and has led to problems. In other cases, it has
21 * been fine, because there's only ever one waiter (kvm), but in that 21 * been fine, because there's only ever one waiter (kvm), but in that
22 * case gthe whole "simple" wait-queue is just pointless to begin with, 22 * case gthe whole "simple" wait-queue is just pointless to begin with,
@@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
115 * CPU0 - waker CPU1 - waiter 115 * CPU0 - waker CPU1 - waiter
116 * 116 *
117 * for (;;) { 117 * for (;;) {
118 * @cond = true; prepare_to_swait(&wq_head, &wait, state); 118 * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
119 * smp_mb(); // smp_mb() from set_current_state() 119 * smp_mb(); // smp_mb() from set_current_state()
120 * if (swait_active(wq_head)) if (@cond) 120 * if (swait_active(wq_head)) if (@cond)
121 * wake_up(wq_head); break; 121 * wake_up(wq_head); break;
@@ -157,11 +157,11 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
157 return swait_active(wq); 157 return swait_active(wq);
158} 158}
159 159
160extern void swake_up(struct swait_queue_head *q); 160extern void swake_up_one(struct swait_queue_head *q);
161extern void swake_up_all(struct swait_queue_head *q); 161extern void swake_up_all(struct swait_queue_head *q);
162extern void swake_up_locked(struct swait_queue_head *q); 162extern void swake_up_locked(struct swait_queue_head *q);
163 163
164extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); 164extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
165extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); 165extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
166 166
167extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); 167extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
@@ -196,7 +196,7 @@ __out: __ret; \
196 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ 196 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
197 schedule()) 197 schedule())
198 198
199#define swait_event(wq, condition) \ 199#define swait_event_exclusive(wq, condition) \
200do { \ 200do { \
201 if (condition) \ 201 if (condition) \
202 break; \ 202 break; \
@@ -208,7 +208,7 @@ do { \
208 TASK_UNINTERRUPTIBLE, timeout, \ 208 TASK_UNINTERRUPTIBLE, timeout, \
209 __ret = schedule_timeout(__ret)) 209 __ret = schedule_timeout(__ret))
210 210
211#define swait_event_timeout(wq, condition, timeout) \ 211#define swait_event_timeout_exclusive(wq, condition, timeout) \
212({ \ 212({ \
213 long __ret = timeout; \ 213 long __ret = timeout; \
214 if (!___wait_cond_timeout(condition)) \ 214 if (!___wait_cond_timeout(condition)) \
@@ -220,7 +220,7 @@ do { \
220 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ 220 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
221 schedule()) 221 schedule())
222 222
223#define swait_event_interruptible(wq, condition) \ 223#define swait_event_interruptible_exclusive(wq, condition) \
224({ \ 224({ \
225 int __ret = 0; \ 225 int __ret = 0; \
226 if (!(condition)) \ 226 if (!(condition)) \
@@ -233,7 +233,7 @@ do { \
233 TASK_INTERRUPTIBLE, timeout, \ 233 TASK_INTERRUPTIBLE, timeout, \
234 __ret = schedule_timeout(__ret)) 234 __ret = schedule_timeout(__ret))
235 235
236#define swait_event_interruptible_timeout(wq, condition, timeout) \ 236#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
237({ \ 237({ \
238 long __ret = timeout; \ 238 long __ret = timeout; \
239 if (!___wait_cond_timeout(condition)) \ 239 if (!___wait_cond_timeout(condition)) \
@@ -246,7 +246,7 @@ do { \
246 (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) 246 (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
247 247
248/** 248/**
249 * swait_event_idle - wait without system load contribution 249 * swait_event_idle_exclusive - wait without system load contribution
250 * @wq: the waitqueue to wait on 250 * @wq: the waitqueue to wait on
251 * @condition: a C expression for the event to wait for 251 * @condition: a C expression for the event to wait for
252 * 252 *
@@ -257,7 +257,7 @@ do { \
257 * condition and doesn't want to contribute to system load. Signals are 257 * condition and doesn't want to contribute to system load. Signals are
258 * ignored. 258 * ignored.
259 */ 259 */
260#define swait_event_idle(wq, condition) \ 260#define swait_event_idle_exclusive(wq, condition) \
261do { \ 261do { \
262 if (condition) \ 262 if (condition) \
263 break; \ 263 break; \
@@ -270,7 +270,7 @@ do { \
270 __ret = schedule_timeout(__ret)) 270 __ret = schedule_timeout(__ret))
271 271
272/** 272/**
273 * swait_event_idle_timeout - wait up to timeout without load contribution 273 * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
274 * @wq: the waitqueue to wait on 274 * @wq: the waitqueue to wait on
275 * @condition: a C expression for the event to wait for 275 * @condition: a C expression for the event to wait for
276 * @timeout: timeout at which we'll give up in jiffies 276 * @timeout: timeout at which we'll give up in jiffies
@@ -288,7 +288,7 @@ do { \
288 * or the remaining jiffies (at least 1) if the @condition evaluated 288 * or the remaining jiffies (at least 1) if the @condition evaluated
289 * to %true before the @timeout elapsed. 289 * to %true before the @timeout elapsed.
290 */ 290 */
291#define swait_event_idle_timeout(wq, condition, timeout) \ 291#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
292({ \ 292({ \
293 long __ret = timeout; \ 293 long __ret = timeout; \
294 if (!___wait_cond_timeout(condition)) \ 294 if (!___wait_cond_timeout(condition)) \
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 87331565e505..70178f6ffdc4 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -92,7 +92,7 @@ static void s2idle_enter(void)
92 /* Push all the CPUs into the idle loop. */ 92 /* Push all the CPUs into the idle loop. */
93 wake_up_all_idle_cpus(); 93 wake_up_all_idle_cpus();
94 /* Make the current CPU wait so it can enter the idle loop too. */ 94 /* Make the current CPU wait so it can enter the idle loop too. */
95 swait_event(s2idle_wait_head, 95 swait_event_exclusive(s2idle_wait_head,
96 s2idle_state == S2IDLE_STATE_WAKE); 96 s2idle_state == S2IDLE_STATE_WAKE);
97 97
98 cpuidle_pause(); 98 cpuidle_pause();
@@ -160,7 +160,7 @@ void s2idle_wake(void)
160 raw_spin_lock_irqsave(&s2idle_lock, flags); 160 raw_spin_lock_irqsave(&s2idle_lock, flags);
161 if (s2idle_state > S2IDLE_STATE_NONE) { 161 if (s2idle_state > S2IDLE_STATE_NONE) {
162 s2idle_state = S2IDLE_STATE_WAKE; 162 s2idle_state = S2IDLE_STATE_WAKE;
163 swake_up(&s2idle_wait_head); 163 swake_up_one(&s2idle_wait_head);
164 } 164 }
165 raw_spin_unlock_irqrestore(&s2idle_lock, flags); 165 raw_spin_unlock_irqrestore(&s2idle_lock, flags);
166} 166}
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 622792abe41a..04fc2ed71af8 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -110,7 +110,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
110 110
111 WRITE_ONCE(sp->srcu_lock_nesting[idx], newval); 111 WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
112 if (!newval && READ_ONCE(sp->srcu_gp_waiting)) 112 if (!newval && READ_ONCE(sp->srcu_gp_waiting))
113 swake_up(&sp->srcu_wq); 113 swake_up_one(&sp->srcu_wq);
114} 114}
115EXPORT_SYMBOL_GPL(__srcu_read_unlock); 115EXPORT_SYMBOL_GPL(__srcu_read_unlock);
116 116
@@ -140,7 +140,7 @@ void srcu_drive_gp(struct work_struct *wp)
140 idx = sp->srcu_idx; 140 idx = sp->srcu_idx;
141 WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx); 141 WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
142 WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ 142 WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
143 swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); 143 swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
144 WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ 144 WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
145 145
146 /* Invoke the callbacks we removed above. */ 146 /* Invoke the callbacks we removed above. */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index aa7cade1b9f3..91f888d3b23a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1727,7 +1727,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1727 !READ_ONCE(rsp->gp_flags) || 1727 !READ_ONCE(rsp->gp_flags) ||
1728 !rsp->gp_kthread) 1728 !rsp->gp_kthread)
1729 return; 1729 return;
1730 swake_up(&rsp->gp_wq); 1730 swake_up_one(&rsp->gp_wq);
1731} 1731}
1732 1732
1733/* 1733/*
@@ -2002,7 +2002,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
2002} 2002}
2003 2003
2004/* 2004/*
2005 * Helper function for swait_event_idle() wakeup at force-quiescent-state 2005 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
2006 * time. 2006 * time.
2007 */ 2007 */
2008static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) 2008static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
@@ -2144,7 +2144,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2144 READ_ONCE(rsp->gpnum), 2144 READ_ONCE(rsp->gpnum),
2145 TPS("reqwait")); 2145 TPS("reqwait"));
2146 rsp->gp_state = RCU_GP_WAIT_GPS; 2146 rsp->gp_state = RCU_GP_WAIT_GPS;
2147 swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) & 2147 swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
2148 RCU_GP_FLAG_INIT); 2148 RCU_GP_FLAG_INIT);
2149 rsp->gp_state = RCU_GP_DONE_GPS; 2149 rsp->gp_state = RCU_GP_DONE_GPS;
2150 /* Locking provides needed memory barrier. */ 2150 /* Locking provides needed memory barrier. */
@@ -2176,7 +2176,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2176 READ_ONCE(rsp->gpnum), 2176 READ_ONCE(rsp->gpnum),
2177 TPS("fqswait")); 2177 TPS("fqswait"));
2178 rsp->gp_state = RCU_GP_WAIT_FQS; 2178 rsp->gp_state = RCU_GP_WAIT_FQS;
2179 ret = swait_event_idle_timeout(rsp->gp_wq, 2179 ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
2180 rcu_gp_fqs_check_wake(rsp, &gf), j); 2180 rcu_gp_fqs_check_wake(rsp, &gf), j);
2181 rsp->gp_state = RCU_GP_DOING_FQS; 2181 rsp->gp_state = RCU_GP_DOING_FQS;
2182 /* Locking provides needed memory barriers. */ 2182 /* Locking provides needed memory barriers. */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d40708e8c5d6..d428cc1064c8 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
213 if (wake) { 213 if (wake) {
214 smp_mb(); /* EGP done before wake_up(). */ 214 smp_mb(); /* EGP done before wake_up(). */
215 swake_up(&rsp->expedited_wq); 215 swake_up_one(&rsp->expedited_wq);
216 } 216 }
217 break; 217 break;
218 } 218 }
@@ -518,7 +518,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
518 jiffies_start = jiffies; 518 jiffies_start = jiffies;
519 519
520 for (;;) { 520 for (;;) {
521 ret = swait_event_timeout( 521 ret = swait_event_timeout_exclusive(
522 rsp->expedited_wq, 522 rsp->expedited_wq,
523 sync_rcu_preempt_exp_done_unlocked(rnp_root), 523 sync_rcu_preempt_exp_done_unlocked(rnp_root),
524 jiffies_stall); 524 jiffies_stall);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 7fd12039e512..ad53d133f709 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1854,8 +1854,8 @@ static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
1854 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); 1854 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1855 del_timer(&rdp->nocb_timer); 1855 del_timer(&rdp->nocb_timer);
1856 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1856 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1857 smp_mb(); /* ->nocb_leader_sleep before swake_up(). */ 1857 smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */
1858 swake_up(&rdp_leader->nocb_wq); 1858 swake_up_one(&rdp_leader->nocb_wq);
1859 } else { 1859 } else {
1860 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1860 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1861 } 1861 }
@@ -2082,7 +2082,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2082 */ 2082 */
2083 trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait")); 2083 trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
2084 for (;;) { 2084 for (;;) {
2085 swait_event_interruptible( 2085 swait_event_interruptible_exclusive(
2086 rnp->nocb_gp_wq[c & 0x1], 2086 rnp->nocb_gp_wq[c & 0x1],
2087 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); 2087 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
2088 if (likely(d)) 2088 if (likely(d))
@@ -2111,7 +2111,7 @@ wait_again:
2111 /* Wait for callbacks to appear. */ 2111 /* Wait for callbacks to appear. */
2112 if (!rcu_nocb_poll) { 2112 if (!rcu_nocb_poll) {
2113 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep")); 2113 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
2114 swait_event_interruptible(my_rdp->nocb_wq, 2114 swait_event_interruptible_exclusive(my_rdp->nocb_wq,
2115 !READ_ONCE(my_rdp->nocb_leader_sleep)); 2115 !READ_ONCE(my_rdp->nocb_leader_sleep));
2116 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); 2116 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
2117 my_rdp->nocb_leader_sleep = true; 2117 my_rdp->nocb_leader_sleep = true;
@@ -2176,7 +2176,7 @@ wait_again:
2176 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 2176 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2177 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2177 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2178 /* List was empty, so wake up the follower. */ 2178 /* List was empty, so wake up the follower. */
2179 swake_up(&rdp->nocb_wq); 2179 swake_up_one(&rdp->nocb_wq);
2180 } 2180 }
2181 } 2181 }
2182 2182
@@ -2193,7 +2193,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
2193{ 2193{
2194 for (;;) { 2194 for (;;) {
2195 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep")); 2195 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
2196 swait_event_interruptible(rdp->nocb_wq, 2196 swait_event_interruptible_exclusive(rdp->nocb_wq,
2197 READ_ONCE(rdp->nocb_follower_head)); 2197 READ_ONCE(rdp->nocb_follower_head));
2198 if (smp_load_acquire(&rdp->nocb_follower_head)) { 2198 if (smp_load_acquire(&rdp->nocb_follower_head)) {
2199 /* ^^^ Ensure CB invocation follows _head test. */ 2199 /* ^^^ Ensure CB invocation follows _head test. */
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
index 66890de93ee5..66b59ac77c22 100644
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -32,7 +32,7 @@ void swake_up_locked(struct swait_queue_head *q)
32} 32}
33EXPORT_SYMBOL(swake_up_locked); 33EXPORT_SYMBOL(swake_up_locked);
34 34
35void swake_up(struct swait_queue_head *q) 35void swake_up_one(struct swait_queue_head *q)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 38
@@ -40,7 +40,7 @@ void swake_up(struct swait_queue_head *q)
40 swake_up_locked(q); 40 swake_up_locked(q);
41 raw_spin_unlock_irqrestore(&q->lock, flags); 41 raw_spin_unlock_irqrestore(&q->lock, flags);
42} 42}
43EXPORT_SYMBOL(swake_up); 43EXPORT_SYMBOL(swake_up_one);
44 44
45/* 45/*
46 * Does not allow usage from IRQ disabled, since we must be able to 46 * Does not allow usage from IRQ disabled, since we must be able to
@@ -76,7 +76,7 @@ static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *w
76 list_add_tail(&wait->task_list, &q->task_list); 76 list_add_tail(&wait->task_list, &q->task_list);
77} 77}
78 78
79void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) 79void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
80{ 80{
81 unsigned long flags; 81 unsigned long flags;
82 82
@@ -85,7 +85,7 @@ void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int
85 set_current_state(state); 85 set_current_state(state);
86 raw_spin_unlock_irqrestore(&q->lock, flags); 86 raw_spin_unlock_irqrestore(&q->lock, flags);
87} 87}
88EXPORT_SYMBOL(prepare_to_swait); 88EXPORT_SYMBOL(prepare_to_swait_exclusive);
89 89
90long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) 90long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
91{ 91{
@@ -95,7 +95,7 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait
95 raw_spin_lock_irqsave(&q->lock, flags); 95 raw_spin_lock_irqsave(&q->lock, flags);
96 if (unlikely(signal_pending_state(state, current))) { 96 if (unlikely(signal_pending_state(state, current))) {
97 /* 97 /*
98 * See prepare_to_wait_event(). TL;DR, subsequent swake_up() 98 * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
99 * must not see us. 99 * must not see us.
100 */ 100 */
101 list_del_init(&wait->task_list); 101 list_del_init(&wait->task_list);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 04e554cae3a2..108250e4d376 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -604,7 +604,7 @@ void kvm_arm_resume_guest(struct kvm *kvm)
604 604
605 kvm_for_each_vcpu(i, vcpu, kvm) { 605 kvm_for_each_vcpu(i, vcpu, kvm) {
606 vcpu->arch.pause = false; 606 vcpu->arch.pause = false;
607 swake_up(kvm_arch_vcpu_wq(vcpu)); 607 swake_up_one(kvm_arch_vcpu_wq(vcpu));
608 } 608 }
609} 609}
610 610
@@ -612,7 +612,7 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
612{ 612{
613 struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); 613 struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
614 614
615 swait_event_interruptible(*wq, ((!vcpu->arch.power_off) && 615 swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
616 (!vcpu->arch.pause))); 616 (!vcpu->arch.pause)));
617 617
618 if (vcpu->arch.power_off || vcpu->arch.pause) { 618 if (vcpu->arch.power_off || vcpu->arch.pause) {
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index c95ab4c5a475..9b73d3ad918a 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -155,7 +155,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
155 smp_mb(); /* Make sure the above is visible */ 155 smp_mb(); /* Make sure the above is visible */
156 156
157 wq = kvm_arch_vcpu_wq(vcpu); 157 wq = kvm_arch_vcpu_wq(vcpu);
158 swake_up(wq); 158 swake_up_one(wq);
159 159
160 return PSCI_RET_SUCCESS; 160 return PSCI_RET_SUCCESS;
161} 161}
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 57bcb27dcf30..23c2519c5b32 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -107,7 +107,7 @@ static void async_pf_execute(struct work_struct *work)
107 trace_kvm_async_pf_completed(addr, gva); 107 trace_kvm_async_pf_completed(addr, gva);
108 108
109 if (swq_has_sleeper(&vcpu->wq)) 109 if (swq_has_sleeper(&vcpu->wq))
110 swake_up(&vcpu->wq); 110 swake_up_one(&vcpu->wq);
111 111
112 mmput(mm); 112 mmput(mm);
113 kvm_put_kvm(vcpu->kvm); 113 kvm_put_kvm(vcpu->kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ada21f47f22b..940a4aed5b2d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2167,7 +2167,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
2167 kvm_arch_vcpu_blocking(vcpu); 2167 kvm_arch_vcpu_blocking(vcpu);
2168 2168
2169 for (;;) { 2169 for (;;) {
2170 prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2170 prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
2171 2171
2172 if (kvm_vcpu_check_block(vcpu) < 0) 2172 if (kvm_vcpu_check_block(vcpu) < 0)
2173 break; 2173 break;
@@ -2209,7 +2209,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
2209 2209
2210 wqp = kvm_arch_vcpu_wq(vcpu); 2210 wqp = kvm_arch_vcpu_wq(vcpu);
2211 if (swq_has_sleeper(wqp)) { 2211 if (swq_has_sleeper(wqp)) {
2212 swake_up(wqp); 2212 swake_up_one(wqp);
2213 ++vcpu->stat.halt_wakeup; 2213 ++vcpu->stat.halt_wakeup;
2214 return true; 2214 return true;
2215 } 2215 }