aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_trace.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-06-29 20:06:39 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-07-17 17:59:00 -0400
commit2cd6ffafec066118365f6d7eb7a42ea16c1f032c (patch)
tree39656499f5a78c4b61528904e3464c2403a0b83b /kernel/rcu/tree_trace.c
parent704dd435ac7eaefa89fcd82fd2876b8330e00ff3 (diff)
rcu: Extend expedited funnel locking to rcu_data structure
The strictly rcu_node based funnel-locking scheme works well in many cases, but systems with CONFIG_RCU_FANOUT_LEAF=64 won't necessarily get all that much concurrency. This commit therefore extends the funnel locking into the per-CPU rcu_data structure, providing concurrency equal to the number of CPUs. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_trace.c')
-rw-r--r--kernel/rcu/tree_trace.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index d9982a2ce305..ec62369f1b02 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -185,11 +185,12 @@ static int show_rcuexp(struct seq_file *m, void *v)
185{ 185{
186 struct rcu_state *rsp = (struct rcu_state *)m->private; 186 struct rcu_state *rsp = (struct rcu_state *)m->private;
187 187
188 seq_printf(m, "t=%lu tf=%lu wd1=%lu wd2=%lu n=%lu enq=%d sc=%lu\n", 188 seq_printf(m, "t=%lu tf=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
189 rsp->expedited_sequence, 189 rsp->expedited_sequence,
190 atomic_long_read(&rsp->expedited_tryfail), 190 atomic_long_read(&rsp->expedited_tryfail),
191 atomic_long_read(&rsp->expedited_workdone1), 191 atomic_long_read(&rsp->expedited_workdone1),
192 atomic_long_read(&rsp->expedited_workdone2), 192 atomic_long_read(&rsp->expedited_workdone2),
193 atomic_long_read(&rsp->expedited_workdone3),
193 atomic_long_read(&rsp->expedited_normal), 194 atomic_long_read(&rsp->expedited_normal),
194 atomic_read(&rsp->expedited_need_qs), 195 atomic_read(&rsp->expedited_need_qs),
195 rsp->expedited_sequence / 2); 196 rsp->expedited_sequence / 2);