diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2016-05-02 14:58:56 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2016-06-15 18:45:00 -0400 |
commit | 4929c913bda505dbe44bb42c00da06011fee6c9d (patch) | |
tree | aa11d64d780bba090f5e964308ab88c397340754 /kernel/rcu | |
parent | 570dd3c7424179b831decb655ea9dd1ecea38adc (diff) |
rcu: Make call_rcu_tasks() tolerate first call with irqs disabled
Currently, if the very first call to call_rcu_tasks() has irqs disabled,
it will create the rcu_tasks_kthread with irqs disabled, which will
result in a splat in the memory allocator, which kthread_run() invokes
with the expectation that irqs are enabled.
This commit fixes this problem by deferring kthread creation if called
with irqs disabled. The first call to call_rcu_tasks() that has irqs
enabled will create the kthread.
This bug was detected by rcutorture changes that were motivated by
Iftekhar Ahmed's mutation-testing efforts.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/update.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 3e888cd5a594..f0d8322bc3ec 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c | |||
@@ -528,6 +528,7 @@ static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; | |||
528 | module_param(rcu_task_stall_timeout, int, 0644); | 528 | module_param(rcu_task_stall_timeout, int, 0644); |
529 | 529 | ||
530 | static void rcu_spawn_tasks_kthread(void); | 530 | static void rcu_spawn_tasks_kthread(void); |
531 | static struct task_struct *rcu_tasks_kthread_ptr; | ||
531 | 532 | ||
532 | /* | 533 | /* |
533 | * Post an RCU-tasks callback. First call must be from process context | 534 | * Post an RCU-tasks callback. First call must be from process context |
@@ -537,6 +538,7 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) | |||
537 | { | 538 | { |
538 | unsigned long flags; | 539 | unsigned long flags; |
539 | bool needwake; | 540 | bool needwake; |
541 | bool havetask = READ_ONCE(rcu_tasks_kthread_ptr); | ||
540 | 542 | ||
541 | rhp->next = NULL; | 543 | rhp->next = NULL; |
542 | rhp->func = func; | 544 | rhp->func = func; |
@@ -545,7 +547,9 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) | |||
545 | *rcu_tasks_cbs_tail = rhp; | 547 | *rcu_tasks_cbs_tail = rhp; |
546 | rcu_tasks_cbs_tail = &rhp->next; | 548 | rcu_tasks_cbs_tail = &rhp->next; |
547 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | 549 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); |
548 | if (needwake) { | 550 | /* We can't create the thread unless interrupts are enabled. */ |
551 | if ((needwake && havetask) || | ||
552 | (!havetask && !irqs_disabled_flags(flags))) { | ||
549 | rcu_spawn_tasks_kthread(); | 553 | rcu_spawn_tasks_kthread(); |
550 | wake_up(&rcu_tasks_cbs_wq); | 554 | wake_up(&rcu_tasks_cbs_wq); |
551 | } | 555 | } |
@@ -790,7 +794,6 @@ static int __noreturn rcu_tasks_kthread(void *arg) | |||
790 | static void rcu_spawn_tasks_kthread(void) | 794 | static void rcu_spawn_tasks_kthread(void) |
791 | { | 795 | { |
792 | static DEFINE_MUTEX(rcu_tasks_kthread_mutex); | 796 | static DEFINE_MUTEX(rcu_tasks_kthread_mutex); |
793 | static struct task_struct *rcu_tasks_kthread_ptr; | ||
794 | struct task_struct *t; | 797 | struct task_struct *t; |
795 | 798 | ||
796 | if (READ_ONCE(rcu_tasks_kthread_ptr)) { | 799 | if (READ_ONCE(rcu_tasks_kthread_ptr)) { |