diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-03-03 17:49:26 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-05-27 15:57:59 -0400 |
commit | 727b705baf7d091a9bc5494d7f1d9699b6932531 (patch) | |
tree | c89b9bb4fa1e684c43bcad4c79bb6b878c57976b | |
parent | e63c887cfed2077b2db29f27024d0a9f88151c40 (diff) |
rcu: Eliminate a few RCU_BOOST #ifdefs in favor of IS_ENABLED()
This commit removes a few RCU_BOOST #ifdefs, replacing them with
IS_ENABLED()-protected return statements. This relies on the
optimizer to remove any resulting dead code. There are several other
RCU_BOOST #ifdefs, however these rely on some per-CPU variables that
are available only under RCU_BOOST. These might be converted later,
if the simplification proves to outweigh the increase in memory footprint.
One hoped-for advantage is more easily locating compiler errors in
obscure combinations of Kconfig parameters.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: <linux-rt-users@vger.kernel.org>
-rw-r--r-- | kernel/rcu/tree.h | 2 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 35 |
2 files changed, 20 insertions, 17 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a69d3dab2ec4..dd5ce4034875 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -170,7 +170,6 @@ struct rcu_node { | |||
170 | /* if there is no such task. If there */ | 170 | /* if there is no such task. If there */ |
171 | /* is no current expedited grace period, */ | 171 | /* is no current expedited grace period, */ |
172 | /* then there can cannot be any such task. */ | 172 | /* then there can cannot be any such task. */ |
173 | #ifdef CONFIG_RCU_BOOST | ||
174 | struct list_head *boost_tasks; | 173 | struct list_head *boost_tasks; |
175 | /* Pointer to first task that needs to be */ | 174 | /* Pointer to first task that needs to be */ |
176 | /* priority boosted, or NULL if no priority */ | 175 | /* priority boosted, or NULL if no priority */ |
@@ -208,7 +207,6 @@ struct rcu_node { | |||
208 | unsigned long n_balk_nos; | 207 | unsigned long n_balk_nos; |
209 | /* Refused to boost: not sure why, though. */ | 208 | /* Refused to boost: not sure why, though. */ |
210 | /* This can happen due to race conditions. */ | 209 | /* This can happen due to race conditions. */ |
211 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
212 | #ifdef CONFIG_RCU_NOCB_CPU | 210 | #ifdef CONFIG_RCU_NOCB_CPU |
213 | wait_queue_head_t nocb_gp_wq[2]; | 211 | wait_queue_head_t nocb_gp_wq[2]; |
214 | /* Place for rcu_nocb_kthread() to wait GP. */ | 212 | /* Place for rcu_nocb_kthread() to wait GP. */ |
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 9a04764dd239..8f8142778684 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -43,7 +43,17 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | |||
43 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | 43 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
44 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | 44 | DEFINE_PER_CPU(char, rcu_cpu_has_work); |
45 | 45 | ||
46 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 46 | #else /* #ifdef CONFIG_RCU_BOOST */ |
47 | |||
48 | /* | ||
49 | * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, | ||
50 | * all uses are in dead code. Provide a definition to keep the compiler | ||
51 | * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. | ||
52 | * This probably needs to be excluded from -rt builds. | ||
53 | */ | ||
54 | #define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) | ||
55 | |||
56 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
47 | 57 | ||
48 | #ifdef CONFIG_RCU_NOCB_CPU | 58 | #ifdef CONFIG_RCU_NOCB_CPU |
49 | static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ | 59 | static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ |
@@ -180,10 +190,9 @@ static void rcu_preempt_note_context_switch(void) | |||
180 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { | 190 | if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { |
181 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); | 191 | list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); |
182 | rnp->gp_tasks = &t->rcu_node_entry; | 192 | rnp->gp_tasks = &t->rcu_node_entry; |
183 | #ifdef CONFIG_RCU_BOOST | 193 | if (IS_ENABLED(CONFIG_RCU_BOOST) && |
184 | if (rnp->boost_tasks != NULL) | 194 | rnp->boost_tasks != NULL) |
185 | rnp->boost_tasks = rnp->gp_tasks; | 195 | rnp->boost_tasks = rnp->gp_tasks; |
186 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
187 | } else { | 196 | } else { |
188 | list_add(&t->rcu_node_entry, &rnp->blkd_tasks); | 197 | list_add(&t->rcu_node_entry, &rnp->blkd_tasks); |
189 | if (rnp->qsmask & rdp->grpmask) | 198 | if (rnp->qsmask & rdp->grpmask) |
@@ -263,9 +272,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
263 | bool empty_exp_now; | 272 | bool empty_exp_now; |
264 | unsigned long flags; | 273 | unsigned long flags; |
265 | struct list_head *np; | 274 | struct list_head *np; |
266 | #ifdef CONFIG_RCU_BOOST | ||
267 | bool drop_boost_mutex = false; | 275 | bool drop_boost_mutex = false; |
268 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
269 | struct rcu_node *rnp; | 276 | struct rcu_node *rnp; |
270 | union rcu_special special; | 277 | union rcu_special special; |
271 | 278 | ||
@@ -331,12 +338,12 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
331 | rnp->gp_tasks = np; | 338 | rnp->gp_tasks = np; |
332 | if (&t->rcu_node_entry == rnp->exp_tasks) | 339 | if (&t->rcu_node_entry == rnp->exp_tasks) |
333 | rnp->exp_tasks = np; | 340 | rnp->exp_tasks = np; |
334 | #ifdef CONFIG_RCU_BOOST | 341 | if (IS_ENABLED(CONFIG_RCU_BOOST)) { |
335 | if (&t->rcu_node_entry == rnp->boost_tasks) | 342 | if (&t->rcu_node_entry == rnp->boost_tasks) |
336 | rnp->boost_tasks = np; | 343 | rnp->boost_tasks = np; |
337 | /* Snapshot ->boost_mtx ownership with rcu_node lock held. */ | 344 | /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ |
338 | drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; | 345 | drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; |
339 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 346 | } |
340 | 347 | ||
341 | /* | 348 | /* |
342 | * If this was the last task on the current list, and if | 349 | * If this was the last task on the current list, and if |
@@ -358,11 +365,9 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
358 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 365 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
359 | } | 366 | } |
360 | 367 | ||
361 | #ifdef CONFIG_RCU_BOOST | ||
362 | /* Unboost if we were boosted. */ | 368 | /* Unboost if we were boosted. */ |
363 | if (drop_boost_mutex) | 369 | if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) |
364 | rt_mutex_unlock(&rnp->boost_mtx); | 370 | rt_mutex_unlock(&rnp->boost_mtx); |
365 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
366 | 371 | ||
367 | /* | 372 | /* |
368 | * If this was the last task on the expedited lists, | 373 | * If this was the last task on the expedited lists, |