diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-08-04 10:24:21 -0400 |
|---|---|---|
| committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-07 19:27:29 -0400 |
| commit | 84a8f446ffd70c2799a96268aaa4d47c22a83ff0 (patch) | |
| tree | 61e32afc24cfa6b7c33bcef6148851035b4149e6 /kernel | |
| parent | 37fe5f0e2713608573c5df5e529e13a135625629 (diff) | |
rcu: Defer rcu_tasks_kthread() creation till first call_rcu_tasks()
It is expected that many sites will have CONFIG_TASKS_RCU=y, but
will never actually invoke call_rcu_tasks(). For such sites, creating
rcu_tasks_kthread() at boot is wasteful. This commit therefore defers
creation of this kthread until the time of the first call_rcu_tasks().
This of course means that the first call_rcu_tasks() must be invoked
from process context after the scheduler is fully operational.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/rcu/update.c | 33 |
1 files changed, 26 insertions, 7 deletions
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 444c8a303963..e1d71741958f 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c | |||
| @@ -375,7 +375,12 @@ DEFINE_SRCU(tasks_rcu_exit_srcu); | |||
| 375 | static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; | 375 | static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; |
| 376 | module_param(rcu_task_stall_timeout, int, 0644); | 376 | module_param(rcu_task_stall_timeout, int, 0644); |
| 377 | 377 | ||
| 378 | /* Post an RCU-tasks callback. */ | 378 | static void rcu_spawn_tasks_kthread(void); |
| 379 | |||
| 380 | /* | ||
| 381 | * Post an RCU-tasks callback. First call must be from process context | ||
| 382 | * after the scheduler if fully operational. | ||
| 383 | */ | ||
| 379 | void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp)) | 384 | void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp)) |
| 380 | { | 385 | { |
| 381 | unsigned long flags; | 386 | unsigned long flags; |
| @@ -388,8 +393,10 @@ void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp)) | |||
| 388 | *rcu_tasks_cbs_tail = rhp; | 393 | *rcu_tasks_cbs_tail = rhp; |
| 389 | rcu_tasks_cbs_tail = &rhp->next; | 394 | rcu_tasks_cbs_tail = &rhp->next; |
| 390 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | 395 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); |
| 391 | if (needwake) | 396 | if (needwake) { |
| 397 | rcu_spawn_tasks_kthread(); | ||
| 392 | wake_up(&rcu_tasks_cbs_wq); | 398 | wake_up(&rcu_tasks_cbs_wq); |
| 399 | } | ||
| 393 | } | 400 | } |
| 394 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | 401 | EXPORT_SYMBOL_GPL(call_rcu_tasks); |
| 395 | 402 | ||
| @@ -615,15 +622,27 @@ static int __noreturn rcu_tasks_kthread(void *arg) | |||
| 615 | } | 622 | } |
| 616 | } | 623 | } |
| 617 | 624 | ||
| 618 | /* Spawn rcu_tasks_kthread() at boot time. */ | 625 | /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ |
| 619 | static int __init rcu_spawn_tasks_kthread(void) | 626 | static void rcu_spawn_tasks_kthread(void) |
| 620 | { | 627 | { |
| 621 | struct task_struct __maybe_unused *t; | 628 | static DEFINE_MUTEX(rcu_tasks_kthread_mutex); |
| 629 | static struct task_struct *rcu_tasks_kthread_ptr; | ||
| 630 | struct task_struct *t; | ||
| 622 | 631 | ||
| 632 | if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) { | ||
| 633 | smp_mb(); /* Ensure caller sees full kthread. */ | ||
| 634 | return; | ||
| 635 | } | ||
| 636 | mutex_lock(&rcu_tasks_kthread_mutex); | ||
| 637 | if (rcu_tasks_kthread_ptr) { | ||
| 638 | mutex_unlock(&rcu_tasks_kthread_mutex); | ||
| 639 | return; | ||
| 640 | } | ||
| 623 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); | 641 | t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); |
| 624 | BUG_ON(IS_ERR(t)); | 642 | BUG_ON(IS_ERR(t)); |
| 625 | return 0; | 643 | smp_mb(); /* Ensure others see full kthread. */ |
| 644 | ACCESS_ONCE(rcu_tasks_kthread_ptr) = t; | ||
| 645 | mutex_unlock(&rcu_tasks_kthread_mutex); | ||
| 626 | } | 646 | } |
| 627 | early_initcall(rcu_spawn_tasks_kthread); | ||
| 628 | 647 | ||
| 629 | #endif /* #ifdef CONFIG_TASKS_RCU */ | 648 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
