diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-10-22 10:12:34 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-12-11 13:31:28 -0500 |
commit | b40d293eb36ba40cd428b6d178db911174689702 (patch) | |
tree | 78109a13c0bf86608f3caaea547fd9e948aee743 /kernel | |
parent | 34240697d619c439c55f21989680024dcb604aab (diff) |
rcu: Omit self-awaken when setting up expedited grace period
When setting up an expedited grace period, if there were no readers, the
task will awaken itself. This commit removes this useless self-awakening.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 2 | ||||
-rw-r--r-- | kernel/rcutree.h | 3 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 16 |
3 files changed, 14 insertions, 7 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 69b6cdd4f944..8afb2e89745b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1320,7 +1320,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1320 | else | 1320 | else |
1321 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1321 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1322 | if (need_report & RCU_OFL_TASKS_EXP_GP) | 1322 | if (need_report & RCU_OFL_TASKS_EXP_GP) |
1323 | rcu_report_exp_rnp(rsp, rnp); | 1323 | rcu_report_exp_rnp(rsp, rnp, true); |
1324 | rcu_node_kthread_setaffinity(rnp, -1); | 1324 | rcu_node_kthread_setaffinity(rnp, -1); |
1325 | } | 1325 | } |
1326 | 1326 | ||
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 0963fa1541ac..fd2f87db2ab1 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -444,7 +444,8 @@ static void rcu_preempt_check_callbacks(int cpu); | |||
444 | static void rcu_preempt_process_callbacks(void); | 444 | static void rcu_preempt_process_callbacks(void); |
445 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 445 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
446 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) | 446 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) |
447 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp); | 447 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
448 | bool wake); | ||
448 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ | 449 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ |
449 | static int rcu_preempt_pending(int cpu); | 450 | static int rcu_preempt_pending(int cpu); |
450 | static int rcu_preempt_needs_cpu(int cpu); | 451 | static int rcu_preempt_needs_cpu(int cpu); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 708dc579634d..0f095d1cc16d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -410,7 +410,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t) | |||
410 | * then we need to report up the rcu_node hierarchy. | 410 | * then we need to report up the rcu_node hierarchy. |
411 | */ | 411 | */ |
412 | if (!empty_exp && empty_exp_now) | 412 | if (!empty_exp && empty_exp_now) |
413 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | 413 | rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); |
414 | } else { | 414 | } else { |
415 | local_irq_restore(flags); | 415 | local_irq_restore(flags); |
416 | } | 416 | } |
@@ -732,9 +732,13 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | |||
732 | * recursively up the tree. (Calm down, calm down, we do the recursion | 732 | * recursively up the tree. (Calm down, calm down, we do the recursion |
733 | * iteratively!) | 733 | * iteratively!) |
734 | * | 734 | * |
735 | * Most callers will set the "wake" flag, but the task initiating the | ||
736 | * expedited grace period need not wake itself. | ||
737 | * | ||
735 | * Caller must hold sync_rcu_preempt_exp_mutex. | 738 | * Caller must hold sync_rcu_preempt_exp_mutex. |
736 | */ | 739 | */ |
737 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | 740 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
741 | bool wake) | ||
738 | { | 742 | { |
739 | unsigned long flags; | 743 | unsigned long flags; |
740 | unsigned long mask; | 744 | unsigned long mask; |
@@ -747,7 +751,8 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |||
747 | } | 751 | } |
748 | if (rnp->parent == NULL) { | 752 | if (rnp->parent == NULL) { |
749 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 753 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
750 | wake_up(&sync_rcu_preempt_exp_wq); | 754 | if (wake) |
755 | wake_up(&sync_rcu_preempt_exp_wq); | ||
751 | break; | 756 | break; |
752 | } | 757 | } |
753 | mask = rnp->grpmask; | 758 | mask = rnp->grpmask; |
@@ -780,7 +785,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | |||
780 | must_wait = 1; | 785 | must_wait = 1; |
781 | } | 786 | } |
782 | if (!must_wait) | 787 | if (!must_wait) |
783 | rcu_report_exp_rnp(rsp, rnp); | 788 | rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */ |
784 | } | 789 | } |
785 | 790 | ||
786 | /* | 791 | /* |
@@ -1072,7 +1077,8 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | |||
1072 | * report on tasks preempted in RCU read-side critical sections during | 1077 | * report on tasks preempted in RCU read-side critical sections during |
1073 | * expedited RCU grace periods. | 1078 | * expedited RCU grace periods. |
1074 | */ | 1079 | */ |
1075 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | 1080 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, |
1081 | bool wake) | ||
1076 | { | 1082 | { |
1077 | return; | 1083 | return; |
1078 | } | 1084 | } |