diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-10-22 10:12:34 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-12-11 13:31:28 -0500 |
commit | b40d293eb36ba40cd428b6d178db911174689702 (patch) | |
tree | 78109a13c0bf86608f3caaea547fd9e948aee743 /kernel/rcutree_plugin.h | |
parent | 34240697d619c439c55f21989680024dcb604aab (diff) |
rcu: Omit self-awaken when setting up expedited grace period
When setting up an expedited grace period, if there were no readers, the
task will awaken itself. This commit removes this useless self-awakening.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 708dc579634d..0f095d1cc16d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -410,7 +410,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
410 | * then we need to report up the rcu_node hierarchy. | 410 | * then we need to report up the rcu_node hierarchy. |
411 | */ | 411 | */ |
412 | if (!empty_exp && empty_exp_now) | 412 | if (!empty_exp && empty_exp_now) |
413 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | 413 | rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); |
414 | } else { | 414 | } else { |
415 | local_irq_restore(flags); | 415 | local_irq_restore(flags); |
416 | } | 416 | } |
@@ -732,9 +732,13 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |||
732 | * recursively up the tree. (Calm down, calm down, we do the recursion | 732 | * recursively up the tree. (Calm down, calm down, we do the recursion |
733 | * iteratively!) | 733 | * iteratively!) |
734 | * | 734 | * |
735 | * Most callers will set the "wake" flag, but the task initiating the | ||
736 | * expedited grace period need not wake itself. | ||
737 | * | ||
735 | * Caller must hold sync_rcu_preempt_exp_mutex. | 738 | * Caller must hold sync_rcu_preempt_exp_mutex. |
736 | */ | 739 | */ |
737 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | 740 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
741 | bool wake) | ||
738 | { | 742 | { |
739 | unsigned long flags; | 743 | unsigned long flags; |
740 | unsigned long mask; | 744 | unsigned long mask; |
@@ -747,7 +751,8 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |||
747 | } | 751 | } |
748 | if (rnp->parent == NULL) { | 752 | if (rnp->parent == NULL) { |
749 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 753 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
750 | wake_up(&sync_rcu_preempt_exp_wq); | 754 | if (wake) |
755 | wake_up(&sync_rcu_preempt_exp_wq); | ||
751 | break; | 756 | break; |
752 | } | 757 | } |
753 | mask = rnp->grpmask; | 758 | mask = rnp->grpmask; |
@@ -780,7 +785,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |||
780 | must_wait = 1; | 785 | must_wait = 1; |
781 | } | 786 | } |
782 | if (!must_wait) | 787 | if (!must_wait) |
783 | rcu_report_exp_rnp(rsp, rnp); | 788 | rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ |
784 | } | 789 | } |
785 | 790 | ||
786 | /* | 791 | /* |
@@ -1072,7 +1077,8 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |||
1072 | * report on tasks preempted in RCU read-side critical sections during | 1077 | * report on tasks preempted in RCU read-side critical sections during |
1073 | * expedited RCU grace periods. | 1078 | * expedited RCU grace periods. |
1074 | */ | 1079 | */ |
1075 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | 1080 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
1081 | bool wake) | ||
1076 | { | 1082 | { |
1077 | return; | 1083 | return; |
1078 | } | 1084 | } |