aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-07-11 14:26:31 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2012-09-26 09:46:55 -0400
commitc5d900bf676b1e2a61c44483932c8088651bbb4e (patch)
tree881ee6420ba291d68d451986d5ed9832ee95661f /kernel/rcutree.c
parent2b1d5024e17be459aa6385763ca3faa8f01c52d9 (diff)
rcu: Allow rcu_user_enter()/exit() to nest
Allow calls to rcu_user_enter() even if we are already in userspace (as seen by RCU) and allow calls to rcu_user_exit() even if we are already in the kernel. This makes the APIs more flexible to be called from architectures. Exception entries for example won't need to know if they come from userspace before calling rcu_user_exit(). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Alessio Igor Bogani <abogani@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Avi Kivity <avi@redhat.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Kevin Hilman <khilman@ti.com> Cc: Max Krasnyansky <maxk@qualcomm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Sven-Thorsten Dietrich <thebigcorporation@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c41
1 files changed, 33 insertions, 8 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 79fa2db1595b..d62c04482228 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -366,11 +366,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
366 */ 366 */
367static void rcu_eqs_enter(bool user) 367static void rcu_eqs_enter(bool user)
368{ 368{
369 unsigned long flags;
370 long long oldval; 369 long long oldval;
371 struct rcu_dynticks *rdtp; 370 struct rcu_dynticks *rdtp;
372 371
373 local_irq_save(flags);
374 rdtp = &__get_cpu_var(rcu_dynticks); 372 rdtp = &__get_cpu_var(rcu_dynticks);
375 oldval = rdtp->dynticks_nesting; 373 oldval = rdtp->dynticks_nesting;
376 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); 374 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
@@ -379,7 +377,6 @@ static void rcu_eqs_enter(bool user)
379 else 377 else
380 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 378 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
381 rcu_eqs_enter_common(rdtp, oldval, user); 379 rcu_eqs_enter_common(rdtp, oldval, user);
382 local_irq_restore(flags);
383} 380}
384 381
385/** 382/**
@@ -396,7 +393,11 @@ static void rcu_eqs_enter(bool user)
396 */ 393 */
397void rcu_idle_enter(void) 394void rcu_idle_enter(void)
398{ 395{
396 unsigned long flags;
397
398 local_irq_save(flags);
399 rcu_eqs_enter(0); 399 rcu_eqs_enter(0);
400 local_irq_restore(flags);
400} 401}
401EXPORT_SYMBOL_GPL(rcu_idle_enter); 402EXPORT_SYMBOL_GPL(rcu_idle_enter);
402 403
@@ -411,6 +412,9 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
411 */ 412 */
412void rcu_user_enter(void) 413void rcu_user_enter(void)
413{ 414{
415 unsigned long flags;
416 struct rcu_dynticks *rdtp;
417
414 /* 418 /*
415 * Some contexts may involve an exception occuring in an irq, 419 * Some contexts may involve an exception occuring in an irq,
416 * leading to that nesting: 420 * leading to that nesting:
@@ -422,7 +426,15 @@ void rcu_user_enter(void)
422 if (in_interrupt()) 426 if (in_interrupt())
423 return; 427 return;
424 428
425 rcu_eqs_enter(1); 429 WARN_ON_ONCE(!current->mm);
430
431 local_irq_save(flags);
432 rdtp = &__get_cpu_var(rcu_dynticks);
433 if (!rdtp->in_user) {
434 rdtp->in_user = true;
435 rcu_eqs_enter(1);
436 }
437 local_irq_restore(flags);
426} 438}
427 439
428/** 440/**
@@ -516,11 +528,9 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
516 */ 528 */
517static void rcu_eqs_exit(bool user) 529static void rcu_eqs_exit(bool user)
518{ 530{
519 unsigned long flags;
520 struct rcu_dynticks *rdtp; 531 struct rcu_dynticks *rdtp;
521 long long oldval; 532 long long oldval;
522 533
523 local_irq_save(flags);
524 rdtp = &__get_cpu_var(rcu_dynticks); 534 rdtp = &__get_cpu_var(rcu_dynticks);
525 oldval = rdtp->dynticks_nesting; 535 oldval = rdtp->dynticks_nesting;
526 WARN_ON_ONCE(oldval < 0); 536 WARN_ON_ONCE(oldval < 0);
@@ -529,7 +539,6 @@ static void rcu_eqs_exit(bool user)
529 else 539 else
530 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 540 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
531 rcu_eqs_exit_common(rdtp, oldval, user); 541 rcu_eqs_exit_common(rdtp, oldval, user);
532 local_irq_restore(flags);
533} 542}
534 543
535/** 544/**
@@ -545,7 +554,11 @@ static void rcu_eqs_exit(bool user)
545 */ 554 */
546void rcu_idle_exit(void) 555void rcu_idle_exit(void)
547{ 556{
557 unsigned long flags;
558
559 local_irq_save(flags);
548 rcu_eqs_exit(0); 560 rcu_eqs_exit(0);
561 local_irq_restore(flags);
549} 562}
550EXPORT_SYMBOL_GPL(rcu_idle_exit); 563EXPORT_SYMBOL_GPL(rcu_idle_exit);
551 564
@@ -558,6 +571,9 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
558 */ 571 */
559void rcu_user_exit(void) 572void rcu_user_exit(void)
560{ 573{
574 unsigned long flags;
575 struct rcu_dynticks *rdtp;
576
561 /* 577 /*
562 * Some contexts may involve an exception occuring in an irq, 578 * Some contexts may involve an exception occuring in an irq,
563 * leading to that nesting: 579 * leading to that nesting:
@@ -569,7 +585,13 @@ void rcu_user_exit(void)
569 if (in_interrupt()) 585 if (in_interrupt())
570 return; 586 return;
571 587
572 rcu_eqs_exit(1); 588 local_irq_save(flags);
589 rdtp = &__get_cpu_var(rcu_dynticks);
590 if (rdtp->in_user) {
591 rdtp->in_user = false;
592 rcu_eqs_exit(1);
593 }
594 local_irq_restore(flags);
573} 595}
574 596
575/** 597/**
@@ -2586,6 +2608,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2586 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 2608 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2587 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); 2609 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
2588 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 2610 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
2611#ifdef CONFIG_RCU_USER_QS
2612 WARN_ON_ONCE(rdp->dynticks->in_user);
2613#endif
2589 rdp->cpu = cpu; 2614 rdp->cpu = cpu;
2590 rdp->rsp = rsp; 2615 rdp->rsp = rsp;
2591 raw_spin_unlock_irqrestore(&rnp->lock, flags); 2616 raw_spin_unlock_irqrestore(&rnp->lock, flags);