aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-02 12:17:57 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-08-30 19:02:43 -0400
commit2bbfc25b09dff6335acf4103c6c7c4591e62988b (patch)
tree7e5a725c18fe63ef88acffd7e99c818079925d7d /kernel
parent82fcecfa81855924cc69f3078113cf63dd6c2964 (diff)
rcu: Drop "wake" parameter from rcu_report_exp_rdp()
The rcu_report_exp_rdp() function is always invoked with its "wake" argument set to "true", so this commit drops this parameter. The only potential call site that would use "false" is in the code driving the expedited grace period, and that code uses rcu_report_exp_cpu_mult() instead, which therefore retains its "wake" parameter. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.c9
-rw-r--r--kernel/rcu/tree_exp.h9
-rw-r--r--kernel/rcu/tree_plugin.h6
3 files changed, 10 insertions, 14 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 158c58d47b07..e1927147a4a5 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -165,8 +165,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
165static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 165static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
166static void invoke_rcu_core(void); 166static void invoke_rcu_core(void);
167static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 167static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
168static void rcu_report_exp_rdp(struct rcu_state *rsp, 168static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp);
169 struct rcu_data *rdp, bool wake);
170static void sync_sched_exp_online_cleanup(int cpu); 169static void sync_sched_exp_online_cleanup(int cpu);
171 170
172/* rcuc/rcub kthread realtime priority */ 171/* rcuc/rcub kthread realtime priority */
@@ -239,8 +238,7 @@ void rcu_sched_qs(void)
239 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) 238 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
240 return; 239 return;
241 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false); 240 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
242 rcu_report_exp_rdp(&rcu_sched_state, 241 rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
243 this_cpu_ptr(&rcu_sched_data), true);
244} 242}
245 243
246void rcu_softirq_qs(void) 244void rcu_softirq_qs(void)
@@ -3758,8 +3756,7 @@ void rcu_report_dead(unsigned int cpu)
3758 3756
3759 /* QS for any half-done expedited RCU-sched GP. */ 3757 /* QS for any half-done expedited RCU-sched GP. */
3760 preempt_disable(); 3758 preempt_disable();
3761 rcu_report_exp_rdp(&rcu_sched_state, 3759 rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(rcu_sched_state.rda));
3762 this_cpu_ptr(rcu_sched_state.rda), true);
3763 preempt_enable(); 3760 preempt_enable();
3764 rcu_preempt_deferred_qs(current); 3761 rcu_preempt_deferred_qs(current);
3765 for_each_rcu_flavor(rsp) 3762 for_each_rcu_flavor(rsp)
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index f9d5bbd8adce..0f8f225c1b46 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -259,11 +259,10 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
259/* 259/*
260 * Report expedited quiescent state for specified rcu_data (CPU). 260 * Report expedited quiescent state for specified rcu_data (CPU).
261 */ 261 */
262static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp, 262static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp)
263 bool wake)
264{ 263{
265 WRITE_ONCE(rdp->deferred_qs, false); 264 WRITE_ONCE(rdp->deferred_qs, false);
266 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake); 265 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true);
267} 266}
268 267
269/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ 268/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
@@ -352,7 +351,7 @@ static void sync_sched_exp_handler(void *data)
352 return; 351 return;
353 if (rcu_is_cpu_rrupt_from_idle()) { 352 if (rcu_is_cpu_rrupt_from_idle()) {
354 rcu_report_exp_rdp(&rcu_sched_state, 353 rcu_report_exp_rdp(&rcu_sched_state,
355 this_cpu_ptr(&rcu_sched_data), true); 354 this_cpu_ptr(&rcu_sched_data));
356 return; 355 return;
357 } 356 }
358 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true); 357 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
@@ -750,7 +749,7 @@ static void sync_rcu_exp_handler(void *info)
750 if (!t->rcu_read_lock_nesting) { 749 if (!t->rcu_read_lock_nesting) {
751 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || 750 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
752 rcu_dynticks_curr_cpu_in_eqs()) { 751 rcu_dynticks_curr_cpu_in_eqs()) {
753 rcu_report_exp_rdp(rsp, rdp, true); 752 rcu_report_exp_rdp(rsp, rdp);
754 } else { 753 } else {
755 rdp->deferred_qs = true; 754 rdp->deferred_qs = true;
756 resched_cpu(rdp->cpu); 755 resched_cpu(rdp->cpu);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1ff742a3c8d1..9f0d054e6c20 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -285,7 +285,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
285 * still in a quiescent state in any case.) 285 * still in a quiescent state in any case.)
286 */ 286 */
287 if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs) 287 if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
288 rcu_report_exp_rdp(rdp->rsp, rdp, true); 288 rcu_report_exp_rdp(rdp->rsp, rdp);
289 else 289 else
290 WARN_ON_ONCE(rdp->deferred_qs); 290 WARN_ON_ONCE(rdp->deferred_qs);
291} 291}
@@ -383,7 +383,7 @@ static void rcu_preempt_note_context_switch(bool preempt)
383 */ 383 */
384 rcu_preempt_qs(); 384 rcu_preempt_qs();
385 if (rdp->deferred_qs) 385 if (rdp->deferred_qs)
386 rcu_report_exp_rdp(rcu_state_p, rdp, true); 386 rcu_report_exp_rdp(rcu_state_p, rdp);
387} 387}
388 388
389/* 389/*
@@ -508,7 +508,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
508 * blocked-tasks list below. 508 * blocked-tasks list below.
509 */ 509 */
510 if (rdp->deferred_qs) { 510 if (rdp->deferred_qs) {
511 rcu_report_exp_rdp(rcu_state_p, rdp, true); 511 rcu_report_exp_rdp(rcu_state_p, rdp);
512 if (!t->rcu_read_unlock_special.s) { 512 if (!t->rcu_read_unlock_special.s) {
513 local_irq_restore(flags); 513 local_irq_restore(flags);
514 return; 514 return;