diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-07-28 17:39:25 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-07 19:27:27 -0400 |
commit | c7b24d2b9a0f2ce19fdf631d3148c80a8f6010b1 (patch) | |
tree | 0845fff0ca580e0394746e6545b9f038bc4b8a44 /kernel | |
parent | 52db30ab23b6d00cf80b22a510c4ea4be4458031 (diff) |
rcu: Improve RCU-tasks energy efficiency
The current RCU-tasks implementation uses strict polling to detect
callback arrivals. This works quite well, but is not so good for
energy efficiency. This commit therefore replaces the strict polling
with a wait queue.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcu/update.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index bad7dbd4c2e3..444c8a303963 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c | |||
@@ -365,6 +365,7 @@ early_initcall(check_cpu_stall_init); | |||
365 | /* Global list of callbacks and associated lock. */ | 365 | /* Global list of callbacks and associated lock. */ |
366 | static struct rcu_head *rcu_tasks_cbs_head; | 366 | static struct rcu_head *rcu_tasks_cbs_head; |
367 | static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; | 367 | static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; |
368 | static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); | ||
368 | static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); | 369 | static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); |
369 | 370 | ||
370 | /* Track exiting tasks in order to allow them to be waited for. */ | 371 | /* Track exiting tasks in order to allow them to be waited for. */ |
@@ -378,13 +379,17 @@ module_param(rcu_task_stall_timeout, int, 0644); | |||
378 | void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp)) | 379 | void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp)) |
379 | { | 380 | { |
380 | unsigned long flags; | 381 | unsigned long flags; |
382 | bool needwake; | ||
381 | 383 | ||
382 | rhp->next = NULL; | 384 | rhp->next = NULL; |
383 | rhp->func = func; | 385 | rhp->func = func; |
384 | raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); | 386 | raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); |
387 | needwake = !rcu_tasks_cbs_head; | ||
385 | *rcu_tasks_cbs_tail = rhp; | 388 | *rcu_tasks_cbs_tail = rhp; |
386 | rcu_tasks_cbs_tail = &rhp->next; | 389 | rcu_tasks_cbs_tail = &rhp->next; |
387 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); | 390 | raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); |
391 | if (needwake) | ||
392 | wake_up(&rcu_tasks_cbs_wq); | ||
388 | } | 393 | } |
389 | EXPORT_SYMBOL_GPL(call_rcu_tasks); | 394 | EXPORT_SYMBOL_GPL(call_rcu_tasks); |
390 | 395 | ||
@@ -495,8 +500,12 @@ static int __noreturn rcu_tasks_kthread(void *arg) | |||
495 | 500 | ||
496 | /* If there were none, wait a bit and start over. */ | 501 | /* If there were none, wait a bit and start over. */ |
497 | if (!list) { | 502 | if (!list) { |
498 | schedule_timeout_interruptible(HZ); | 503 | wait_event_interruptible(rcu_tasks_cbs_wq, |
499 | WARN_ON(signal_pending(current)); | 504 | rcu_tasks_cbs_head); |
505 | if (!rcu_tasks_cbs_head) { | ||
506 | WARN_ON(signal_pending(current)); | ||
507 | schedule_timeout_interruptible(HZ/10); | ||
508 | } | ||
500 | continue; | 509 | continue; |
501 | } | 510 | } |
502 | 511 | ||
@@ -602,6 +611,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) | |||
602 | list = next; | 611 | list = next; |
603 | cond_resched(); | 612 | cond_resched(); |
604 | } | 613 | } |
614 | schedule_timeout_uninterruptible(HZ/10); | ||
605 | } | 615 | } |
606 | } | 616 | } |
607 | 617 | ||