diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-06-15 18:47:09 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-06-16 02:07:21 -0400 |
commit | a46e0899eec7a3069bcadd45dfba7bf67c6ed016 (patch) | |
tree | 78158b4056fe1365d5086f66769abdc3ef2643c3 /kernel/rcutree_plugin.h | |
parent | 09223371deac67d08ca0b70bd18787920284c967 (diff) |
rcu: use softirq instead of kthreads except when RCU_BOOST=y
This patch #ifdefs RCU kthreads out of the kernel unless RCU_BOOST=y,
thus eliminating context-switch overhead if RCU priority boosting has
not been configured.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 38d09c5f2b41..2772386c0421 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -602,11 +602,15 @@ static void rcu_preempt_process_callbacks(void) | |||
602 | &__get_cpu_var(rcu_preempt_data)); | 602 | &__get_cpu_var(rcu_preempt_data)); |
603 | } | 603 | } |
604 | 604 | ||
605 | #ifdef CONFIG_RCU_BOOST | ||
606 | |||
605 | static void rcu_preempt_do_callbacks(void) | 607 | static void rcu_preempt_do_callbacks(void) |
606 | { | 608 | { |
607 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); | 609 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); |
608 | } | 610 | } |
609 | 611 | ||
612 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
613 | |||
610 | /* | 614 | /* |
611 | * Queue a preemptible-RCU callback for invocation after a grace period. | 615 | * Queue a preemptible-RCU callback for invocation after a grace period. |
612 | */ | 616 | */ |
@@ -1002,10 +1006,6 @@ static void rcu_preempt_process_callbacks(void) | |||
1002 | { | 1006 | { |
1003 | } | 1007 | } |
1004 | 1008 | ||
1005 | static void rcu_preempt_do_callbacks(void) | ||
1006 | { | ||
1007 | } | ||
1008 | |||
1009 | /* | 1009 | /* |
1010 | * Wait for an rcu-preempt grace period, but make it happen quickly. | 1010 | * Wait for an rcu-preempt grace period, but make it happen quickly. |
1011 | * But because preemptible RCU does not exist, map to rcu-sched. | 1011 | * But because preemptible RCU does not exist, map to rcu-sched. |
@@ -1258,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |||
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | /* | 1260 | /* |
1261 | * Wake up the per-CPU kthread to invoke RCU callbacks. | ||
1262 | */ | ||
1263 | static void invoke_rcu_callbacks_kthread(void) | ||
1264 | { | ||
1265 | unsigned long flags; | ||
1266 | |||
1267 | local_irq_save(flags); | ||
1268 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
1269 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
1270 | local_irq_restore(flags); | ||
1271 | return; | ||
1272 | } | ||
1273 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | ||
1274 | local_irq_restore(flags); | ||
1275 | } | ||
1276 | |||
1277 | /* | ||
1261 | * Set the affinity of the boost kthread. The CPU-hotplug locks are | 1278 | * Set the affinity of the boost kthread. The CPU-hotplug locks are |
1262 | * held, so no one should be messing with the existence of the boost | 1279 | * held, so no one should be messing with the existence of the boost |
1263 | * kthread. | 1280 | * kthread. |
@@ -1297,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1297 | 1314 | ||
1298 | if (&rcu_preempt_state != rsp) | 1315 | if (&rcu_preempt_state != rsp) |
1299 | return 0; | 1316 | return 0; |
1317 | rsp->boost = 1; | ||
1300 | if (rnp->boost_kthread_task != NULL) | 1318 | if (rnp->boost_kthread_task != NULL) |
1301 | return 0; | 1319 | return 0; |
1302 | t = kthread_create(rcu_boost_kthread, (void *)rnp, | 1320 | t = kthread_create(rcu_boost_kthread, (void *)rnp, |
@@ -1319,22 +1337,15 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |||
1319 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1337 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1320 | } | 1338 | } |
1321 | 1339 | ||
1322 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 1340 | static void invoke_rcu_callbacks_kthread(void) |
1323 | cpumask_var_t cm) | ||
1324 | { | 1341 | { |
1342 | WARN_ON_ONCE(1); | ||
1325 | } | 1343 | } |
1326 | 1344 | ||
1327 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | 1345 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) |
1328 | { | 1346 | { |
1329 | } | 1347 | } |
1330 | 1348 | ||
1331 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | ||
1332 | struct rcu_node *rnp, | ||
1333 | int rnp_index) | ||
1334 | { | ||
1335 | return 0; | ||
1336 | } | ||
1337 | |||
1338 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 1349 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1339 | 1350 | ||
1340 | #ifndef CONFIG_SMP | 1351 | #ifndef CONFIG_SMP |
@@ -1509,7 +1520,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | |||
1509 | * | 1520 | * |
1510 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 1521 | * Because it is not legal to invoke rcu_process_callbacks() with irqs |
1511 | * disabled, we do one pass of force_quiescent_state(), then do a | 1522 | * disabled, we do one pass of force_quiescent_state(), then do a |
1512 | * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked | 1523 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
1513 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. | 1524 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. |
1514 | */ | 1525 | */ |
1515 | int rcu_needs_cpu(int cpu) | 1526 | int rcu_needs_cpu(int cpu) |
@@ -1560,7 +1571,7 @@ int rcu_needs_cpu(int cpu) | |||
1560 | 1571 | ||
1561 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | 1572 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ |
1562 | if (c) | 1573 | if (c) |
1563 | invoke_rcu_cpu_kthread(); | 1574 | invoke_rcu_core(); |
1564 | return c; | 1575 | return c; |
1565 | } | 1576 | } |
1566 | 1577 | ||