aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-09-13 12:15:10 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-17 18:06:33 -0400
commitc3422bea5f09b0e85704f51f2b01271630b8940b (patch)
treefc4a18241b0f7f5d71211f6787428197769cb9f5 /kernel/rcutree.c
parentb0e165c035b13e1074fa0b555318bd9cb7102558 (diff)
rcu: Simplify rcu_read_unlock_special() quiescent-state accounting
The earlier approach required two scheduling-clock ticks to note an preemptable-RCU quiescent state in the situation in which the scheduling-clock interrupt is unlucky enough to always interrupt an RCU read-side critical section. With this change, the quiescent state is instead noted by the outermost rcu_read_unlock() immediately following the first scheduling-clock tick, or, alternatively, by the first subsequent context switch. Therefore, this change also speeds up grace periods. Suggested-by: Josh Triplett <josh@joshtriplett.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12528585111945-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e9a4ae94647f..6c99553e9f15 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -107,27 +107,23 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
107 */ 107 */
108void rcu_sched_qs(int cpu) 108void rcu_sched_qs(int cpu)
109{ 109{
110 unsigned long flags;
111 struct rcu_data *rdp; 110 struct rcu_data *rdp;
112 111
113 local_irq_save(flags);
114 rdp = &per_cpu(rcu_sched_data, cpu); 112 rdp = &per_cpu(rcu_sched_data, cpu);
115 rdp->passed_quiesc = 1;
116 rdp->passed_quiesc_completed = rdp->completed; 113 rdp->passed_quiesc_completed = rdp->completed;
117 rcu_preempt_qs(cpu); 114 barrier();
118 local_irq_restore(flags); 115 rdp->passed_quiesc = 1;
116 rcu_preempt_note_context_switch(cpu);
119} 117}
120 118
121void rcu_bh_qs(int cpu) 119void rcu_bh_qs(int cpu)
122{ 120{
123 unsigned long flags;
124 struct rcu_data *rdp; 121 struct rcu_data *rdp;
125 122
126 local_irq_save(flags);
127 rdp = &per_cpu(rcu_bh_data, cpu); 123 rdp = &per_cpu(rcu_bh_data, cpu);
128 rdp->passed_quiesc = 1;
129 rdp->passed_quiesc_completed = rdp->completed; 124 rdp->passed_quiesc_completed = rdp->completed;
130 local_irq_restore(flags); 125 barrier();
126 rdp->passed_quiesc = 1;
131} 127}
132 128
133#ifdef CONFIG_NO_HZ 129#ifdef CONFIG_NO_HZ
@@ -615,6 +611,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
615 611
616 /* Advance to a new grace period and initialize state. */ 612 /* Advance to a new grace period and initialize state. */
617 rsp->gpnum++; 613 rsp->gpnum++;
614 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
618 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 615 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
619 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 616 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
620 record_gp_stall_check_time(rsp); 617 record_gp_stall_check_time(rsp);