aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-09-02 17:13:44 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-11-03 22:19:26 -0500
commit28ced795cbb43f01146feb96d03a72facdee9911 (patch)
tree19766802a6044c9310aa7d9b6a73ee93346a12bd /kernel/rcu/tree.c
parentd7e29933969e5ca7c112ce1368a07911f4485dc2 (diff)
rcu: Remove rcu_dynticks * parameters when they are always this_cpu_ptr(&rcu_dynticks)
For some functions in kernel/rcu/tree* the rdtp parameter is always this_cpu_ptr(rdtp). Remove the parameter if constant and calculate the pointer in function. This will have the advantage that it is obvious that the address are all per cpu offsets and thus it will enable the use of this_cpu_ops in the future. Signed-off-by: Christoph Lameter <cl@linux.com> [ paulmck: Forward-ported to rcu/dev, whitespace adjustment. ] Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9815447d22e0..c0673c56fb1a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -510,11 +510,11 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
510 * we really have entered idle, and must do the appropriate accounting. 510 * we really have entered idle, and must do the appropriate accounting.
511 * The caller must have disabled interrupts. 511 * The caller must have disabled interrupts.
512 */ 512 */
513static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, 513static void rcu_eqs_enter_common(long long oldval, bool user)
514 bool user)
515{ 514{
516 struct rcu_state *rsp; 515 struct rcu_state *rsp;
517 struct rcu_data *rdp; 516 struct rcu_data *rdp;
517 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
518 518
519 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); 519 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
520 if (!user && !is_idle_task(current)) { 520 if (!user && !is_idle_task(current)) {
@@ -565,7 +565,7 @@ static void rcu_eqs_enter(bool user)
565 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); 565 WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
566 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { 566 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
567 rdtp->dynticks_nesting = 0; 567 rdtp->dynticks_nesting = 0;
568 rcu_eqs_enter_common(rdtp, oldval, user); 568 rcu_eqs_enter_common(oldval, user);
569 } else { 569 } else {
570 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; 570 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
571 } 571 }
@@ -589,7 +589,7 @@ void rcu_idle_enter(void)
589 589
590 local_irq_save(flags); 590 local_irq_save(flags);
591 rcu_eqs_enter(false); 591 rcu_eqs_enter(false);
592 rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0); 592 rcu_sysidle_enter(0);
593 local_irq_restore(flags); 593 local_irq_restore(flags);
594} 594}
595EXPORT_SYMBOL_GPL(rcu_idle_enter); 595EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -639,8 +639,8 @@ void rcu_irq_exit(void)
639 if (rdtp->dynticks_nesting) 639 if (rdtp->dynticks_nesting)
640 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); 640 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
641 else 641 else
642 rcu_eqs_enter_common(rdtp, oldval, true); 642 rcu_eqs_enter_common(oldval, true);
643 rcu_sysidle_enter(rdtp, 1); 643 rcu_sysidle_enter(1);
644 local_irq_restore(flags); 644 local_irq_restore(flags);
645} 645}
646 646
@@ -651,9 +651,10 @@ void rcu_irq_exit(void)
651 * we really have exited idle, and must do the appropriate accounting. 651 * we really have exited idle, and must do the appropriate accounting.
652 * The caller must have disabled interrupts. 652 * The caller must have disabled interrupts.
653 */ 653 */
654static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, 654static void rcu_eqs_exit_common(long long oldval, int user)
655 int user)
656{ 655{
656 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
657
657 rcu_dynticks_task_exit(); 658 rcu_dynticks_task_exit();
658 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ 659 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
659 atomic_inc(&rdtp->dynticks); 660 atomic_inc(&rdtp->dynticks);
@@ -691,7 +692,7 @@ static void rcu_eqs_exit(bool user)
691 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; 692 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
692 } else { 693 } else {
693 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 694 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
694 rcu_eqs_exit_common(rdtp, oldval, user); 695 rcu_eqs_exit_common(oldval, user);
695 } 696 }
696} 697}
697 698
@@ -712,7 +713,7 @@ void rcu_idle_exit(void)
712 713
713 local_irq_save(flags); 714 local_irq_save(flags);
714 rcu_eqs_exit(false); 715 rcu_eqs_exit(false);
715 rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0); 716 rcu_sysidle_exit(0);
716 local_irq_restore(flags); 717 local_irq_restore(flags);
717} 718}
718EXPORT_SYMBOL_GPL(rcu_idle_exit); 719EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -763,8 +764,8 @@ void rcu_irq_enter(void)
763 if (oldval) 764 if (oldval)
764 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); 765 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
765 else 766 else
766 rcu_eqs_exit_common(rdtp, oldval, true); 767 rcu_eqs_exit_common(oldval, true);
767 rcu_sysidle_exit(rdtp, 1); 768 rcu_sysidle_exit(1);
768 local_irq_restore(flags); 769 local_irq_restore(flags);
769} 770}
770 771