diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-06-15 18:47:09 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-06-16 02:07:21 -0400 |
commit | a46e0899eec7a3069bcadd45dfba7bf67c6ed016 (patch) | |
tree | 78158b4056fe1365d5086f66769abdc3ef2643c3 /kernel/rcutree.c | |
parent | 09223371deac67d08ca0b70bd18787920284c967 (diff) |
rcu: use softirq instead of kthreads except when RCU_BOOST=y
This patch #ifdefs RCU kthreads out of the kernel unless RCU_BOOST=y,
thus eliminating context-switch overhead if RCU priority boosting has
not been configured.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 59 |
1 files changed, 44 insertions, 15 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index ae5c9ea68662..429d4949f0eb 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state; | |||
87 | int rcu_scheduler_active __read_mostly; | 87 | int rcu_scheduler_active __read_mostly; |
88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
89 | 89 | ||
90 | #ifdef CONFIG_RCU_BOOST | ||
91 | |||
90 | /* | 92 | /* |
91 | * Control variables for per-CPU and per-rcu_node kthreads. These | 93 | * Control variables for per-CPU and per-rcu_node kthreads. These |
92 | * handle all flavors of RCU. | 94 | * handle all flavors of RCU. |
@@ -98,9 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | |||
98 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | 100 | DEFINE_PER_CPU(char, rcu_cpu_has_work); |
99 | static char rcu_kthreads_spawnable; | 101 | static char rcu_kthreads_spawnable; |
100 | 102 | ||
103 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
104 | |||
101 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | 105 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
102 | static void invoke_rcu_cpu_kthread(void); | 106 | static void invoke_rcu_core(void); |
103 | static void __invoke_rcu_cpu_kthread(void); | 107 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); |
104 | 108 | ||
105 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ | 109 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ |
106 | 110 | ||
@@ -1089,6 +1093,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1089 | int need_report = 0; | 1093 | int need_report = 0; |
1090 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1094 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
1091 | struct rcu_node *rnp; | 1095 | struct rcu_node *rnp; |
1096 | #ifdef CONFIG_RCU_BOOST | ||
1092 | struct task_struct *t; | 1097 | struct task_struct *t; |
1093 | 1098 | ||
1094 | /* Stop the CPU's kthread. */ | 1099 | /* Stop the CPU's kthread. */ |
@@ -1097,6 +1102,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1097 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | 1102 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; |
1098 | kthread_stop(t); | 1103 | kthread_stop(t); |
1099 | } | 1104 | } |
1105 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
1100 | 1106 | ||
1101 | /* Exclude any attempts to start a new grace period. */ | 1107 | /* Exclude any attempts to start a new grace period. */ |
1102 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1108 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
@@ -1232,7 +1238,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1232 | 1238 | ||
1233 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1239 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
1234 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1240 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1235 | invoke_rcu_cpu_kthread(); | 1241 | invoke_rcu_core(); |
1236 | } | 1242 | } |
1237 | 1243 | ||
1238 | /* | 1244 | /* |
@@ -1278,7 +1284,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
1278 | } | 1284 | } |
1279 | rcu_preempt_check_callbacks(cpu); | 1285 | rcu_preempt_check_callbacks(cpu); |
1280 | if (rcu_pending(cpu)) | 1286 | if (rcu_pending(cpu)) |
1281 | invoke_rcu_cpu_kthread(); | 1287 | invoke_rcu_core(); |
1282 | } | 1288 | } |
1283 | 1289 | ||
1284 | #ifdef CONFIG_SMP | 1290 | #ifdef CONFIG_SMP |
@@ -1444,9 +1450,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1444 | 1450 | ||
1445 | /* If there are callbacks ready, invoke them. */ | 1451 | /* If there are callbacks ready, invoke them. */ |
1446 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1452 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1447 | __invoke_rcu_cpu_kthread(); | 1453 | invoke_rcu_callbacks(rsp, rdp); |
1448 | } | 1454 | } |
1449 | 1455 | ||
1456 | #ifdef CONFIG_RCU_BOOST | ||
1457 | |||
1450 | static void rcu_kthread_do_work(void) | 1458 | static void rcu_kthread_do_work(void) |
1451 | { | 1459 | { |
1452 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | 1460 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); |
@@ -1454,6 +1462,8 @@ static void rcu_kthread_do_work(void) | |||
1454 | rcu_preempt_do_callbacks(); | 1462 | rcu_preempt_do_callbacks(); |
1455 | } | 1463 | } |
1456 | 1464 | ||
1465 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
1466 | |||
1457 | /* | 1467 | /* |
1458 | * Do softirq processing for the current CPU. | 1468 | * Do softirq processing for the current CPU. |
1459 | */ | 1469 | */ |
@@ -1474,25 +1484,22 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1474 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task | 1484 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task |
1475 | * cannot disappear out from under us. | 1485 | * cannot disappear out from under us. |
1476 | */ | 1486 | */ |
1477 | static void __invoke_rcu_cpu_kthread(void) | 1487 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) |
1478 | { | 1488 | { |
1479 | unsigned long flags; | 1489 | if (likely(!rsp->boost)) { |
1480 | 1490 | rcu_do_batch(rsp, rdp); | |
1481 | local_irq_save(flags); | ||
1482 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
1483 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
1484 | local_irq_restore(flags); | ||
1485 | return; | 1491 | return; |
1486 | } | 1492 | } |
1487 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | 1493 | invoke_rcu_callbacks_kthread(); |
1488 | local_irq_restore(flags); | ||
1489 | } | 1494 | } |
1490 | 1495 | ||
1491 | static void invoke_rcu_cpu_kthread(void) | 1496 | static void invoke_rcu_core(void) |
1492 | { | 1497 | { |
1493 | raise_softirq(RCU_SOFTIRQ); | 1498 | raise_softirq(RCU_SOFTIRQ); |
1494 | } | 1499 | } |
1495 | 1500 | ||
1501 | #ifdef CONFIG_RCU_BOOST | ||
1502 | |||
1496 | /* | 1503 | /* |
1497 | * Wake up the specified per-rcu_node-structure kthread. | 1504 | * Wake up the specified per-rcu_node-structure kthread. |
1498 | * Because the per-rcu_node kthreads are immortal, we don't need | 1505 | * Because the per-rcu_node kthreads are immortal, we don't need |
@@ -1818,6 +1825,18 @@ static int __init rcu_spawn_kthreads(void) | |||
1818 | } | 1825 | } |
1819 | early_initcall(rcu_spawn_kthreads); | 1826 | early_initcall(rcu_spawn_kthreads); |
1820 | 1827 | ||
1828 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
1829 | |||
1830 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1831 | { | ||
1832 | } | ||
1833 | |||
1834 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1835 | { | ||
1836 | } | ||
1837 | |||
1838 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
1839 | |||
1821 | static void | 1840 | static void |
1822 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | 1841 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), |
1823 | struct rcu_state *rsp) | 1842 | struct rcu_state *rsp) |
@@ -2224,6 +2243,8 @@ static void __cpuinit rcu_prepare_cpu(int cpu) | |||
2224 | rcu_preempt_init_percpu_data(cpu); | 2243 | rcu_preempt_init_percpu_data(cpu); |
2225 | } | 2244 | } |
2226 | 2245 | ||
2246 | #ifdef CONFIG_RCU_BOOST | ||
2247 | |||
2227 | static void __cpuinit rcu_prepare_kthreads(int cpu) | 2248 | static void __cpuinit rcu_prepare_kthreads(int cpu) |
2228 | { | 2249 | { |
2229 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 2250 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); |
@@ -2237,6 +2258,14 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
2237 | } | 2258 | } |
2238 | } | 2259 | } |
2239 | 2260 | ||
2261 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
2262 | |||
2263 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
2264 | { | ||
2265 | } | ||
2266 | |||
2267 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
2268 | |||
2240 | /* | 2269 | /* |
2241 | * Handle CPU online/offline notification events. | 2270 | * Handle CPU online/offline notification events. |
2242 | */ | 2271 | */ |