aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2010-10-01 00:26:52 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-11-30 01:01:55 -0500
commit9e571a82f0cb205a65a0ea41657f19f22b7fabb8 (patch)
tree64a83a06e18d0fd82af91560a2bf0947e0e3d071
parent24278d148316d2180be6df40e06db013d8b232b8 (diff)
rcu: add tracing for TINY_RCU and TINY_PREEMPT_RCU
Add tracing for the tiny RCU implementations, including statistics on boosting in the case of TINY_PREEMPT_RCU and RCU_BOOST. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/rcutiny.c4
-rw-r--r--kernel/rcutiny_plugin.h232
3 files changed, 226 insertions, 11 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 48efefcac12a..929adf6cb6b4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -384,7 +384,6 @@ config PREEMPT_RCU
384 384
385config RCU_TRACE 385config RCU_TRACE
386 bool "Enable tracing for RCU" 386 bool "Enable tracing for RCU"
387 depends on TREE_RCU || TREE_PREEMPT_RCU
388 help 387 help
389 This option provides tracing in RCU which presents stats 388 This option provides tracing in RCU which presents stats
390 in debugfs for debugging RCU implementation. 389 in debugfs for debugging RCU implementation.
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 93d166582cbb..034493724749 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -144,6 +144,7 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
144{ 144{
145 struct rcu_head *next, *list; 145 struct rcu_head *next, *list;
146 unsigned long flags; 146 unsigned long flags;
147 RCU_TRACE(int cb_count = 0);
147 148
148 /* If no RCU callbacks ready to invoke, just return. */ 149 /* If no RCU callbacks ready to invoke, just return. */
149 if (&rcp->rcucblist == rcp->donetail) 150 if (&rcp->rcucblist == rcp->donetail)
@@ -169,7 +170,9 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
169 list->func(list); 170 list->func(list);
170 local_bh_enable(); 171 local_bh_enable();
171 list = next; 172 list = next;
173 RCU_TRACE(cb_count++);
172 } 174 }
175 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
173} 176}
174 177
175/* 178/*
@@ -252,6 +255,7 @@ static void __call_rcu(struct rcu_head *head,
252 local_irq_save(flags); 255 local_irq_save(flags);
253 *rcp->curtail = head; 256 *rcp->curtail = head;
254 rcp->curtail = &head->next; 257 rcp->curtail = &head->next;
258 RCU_TRACE(rcp->qlen++);
255 local_irq_restore(flags); 259 local_irq_restore(flags);
256} 260}
257 261
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 24f43165f222..f4e0df082d3c 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -23,12 +23,21 @@
23 */ 23 */
24 24
25#include <linux/kthread.h> 25#include <linux/kthread.h>
26#include <linux/debugfs.h>
27#include <linux/seq_file.h>
28
29#ifdef CONFIG_RCU_TRACE
30#define RCU_TRACE(stmt) stmt
31#else /* #ifdef CONFIG_RCU_TRACE */
32#define RCU_TRACE(stmt)
33#endif /* #else #ifdef CONFIG_RCU_TRACE */
26 34
27/* Global control variables for rcupdate callback mechanism. */ 35/* Global control variables for rcupdate callback mechanism. */
28struct rcu_ctrlblk { 36struct rcu_ctrlblk {
29 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ 37 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
30 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ 38 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
31 struct rcu_head **curtail; /* ->next pointer of last CB. */ 39 struct rcu_head **curtail; /* ->next pointer of last CB. */
40 RCU_TRACE(long qlen); /* Number of pending CBs. */
32}; 41};
33 42
34/* Definition for rcupdate control block. */ 43/* Definition for rcupdate control block. */
@@ -90,8 +99,26 @@ struct rcu_preempt_ctrlblk {
90 u8 gpcpu; /* Last grace period blocked by the CPU. */ 99 u8 gpcpu; /* Last grace period blocked by the CPU. */
91 u8 completed; /* Last grace period completed. */ 100 u8 completed; /* Last grace period completed. */
92 /* If all three are equal, RCU is idle. */ 101 /* If all three are equal, RCU is idle. */
102#ifdef CONFIG_RCU_BOOST
93 s8 boosted_this_gp; /* Has boosting already happened? */ 103 s8 boosted_this_gp; /* Has boosting already happened? */
94 unsigned long boost_time; /* When to start boosting (jiffies) */ 104 unsigned long boost_time; /* When to start boosting (jiffies) */
105#endif /* #ifdef CONFIG_RCU_BOOST */
106#ifdef CONFIG_RCU_TRACE
107 unsigned long n_grace_periods;
108#ifdef CONFIG_RCU_BOOST
109 unsigned long n_tasks_boosted;
110 unsigned long n_exp_boosts;
111 unsigned long n_normal_boosts;
112 unsigned long n_normal_balk_blkd_tasks;
113 unsigned long n_normal_balk_gp_tasks;
114 unsigned long n_normal_balk_boost_tasks;
115 unsigned long n_normal_balk_boosted;
116 unsigned long n_normal_balk_notyet;
117 unsigned long n_normal_balk_nos;
118 unsigned long n_exp_balk_blkd_tasks;
119 unsigned long n_exp_balk_nos;
120#endif /* #ifdef CONFIG_RCU_BOOST */
121#endif /* #ifdef CONFIG_RCU_TRACE */
95}; 122};
96 123
97static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { 124static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
@@ -170,6 +197,65 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t)
170 return np; 197 return np;
171} 198}
172 199
200#ifdef CONFIG_RCU_TRACE
201
202#ifdef CONFIG_RCU_BOOST
203static void rcu_initiate_boost_trace(void);
204static void rcu_initiate_exp_boost_trace(void);
205#endif /* #ifdef CONFIG_RCU_BOOST */
206
207/*
208 * Dump additional statistice for TINY_PREEMPT_RCU.
209 */
210static void show_tiny_preempt_stats(struct seq_file *m)
211{
212 seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
213 rcu_preempt_ctrlblk.rcb.qlen,
214 rcu_preempt_ctrlblk.n_grace_periods,
215 rcu_preempt_ctrlblk.gpnum,
216 rcu_preempt_ctrlblk.gpcpu,
217 rcu_preempt_ctrlblk.completed,
218 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
219 "N."[!rcu_preempt_ctrlblk.gp_tasks],
220 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
221#ifdef CONFIG_RCU_BOOST
222 seq_printf(m, " ttb=%c btg=",
223 "B."[!rcu_preempt_ctrlblk.boost_tasks]);
224 switch (rcu_preempt_ctrlblk.boosted_this_gp) {
225 case -1:
226 seq_puts(m, "exp");
227 break;
228 case 0:
229 seq_puts(m, "no");
230 break;
231 case 1:
232 seq_puts(m, "done");
233 break;
234 default:
235 seq_printf(m, "?%d?", rcu_preempt_ctrlblk.boosted_this_gp);
236 }
237 seq_printf(m, " ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
238 rcu_preempt_ctrlblk.n_tasks_boosted,
239 rcu_preempt_ctrlblk.n_exp_boosts,
240 rcu_preempt_ctrlblk.n_normal_boosts,
241 (int)(jiffies & 0xffff),
242 (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
243 seq_printf(m, " %s: nt=%lu gt=%lu bt=%lu b=%lu ny=%lu nos=%lu\n",
244 "normal balk",
245 rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks,
246 rcu_preempt_ctrlblk.n_normal_balk_gp_tasks,
247 rcu_preempt_ctrlblk.n_normal_balk_boost_tasks,
248 rcu_preempt_ctrlblk.n_normal_balk_boosted,
249 rcu_preempt_ctrlblk.n_normal_balk_notyet,
250 rcu_preempt_ctrlblk.n_normal_balk_nos);
251 seq_printf(m, " exp balk: bt=%lu nos=%lu\n",
252 rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks,
253 rcu_preempt_ctrlblk.n_exp_balk_nos);
254#endif /* #ifdef CONFIG_RCU_BOOST */
255}
256
257#endif /* #ifdef CONFIG_RCU_TRACE */
258
173#ifdef CONFIG_RCU_BOOST 259#ifdef CONFIG_RCU_BOOST
174 260
175#include "rtmutex_common.h" 261#include "rtmutex_common.h"
@@ -197,6 +283,7 @@ static int rcu_boost(void)
197 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; 283 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
198 raw_local_irq_restore(flags); 284 raw_local_irq_restore(flags);
199 rt_mutex_lock(&mtx); 285 rt_mutex_lock(&mtx);
286 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
200 rt_mutex_unlock(&mtx); 287 rt_mutex_unlock(&mtx);
201 return rcu_preempt_ctrlblk.boost_tasks != NULL; 288 return rcu_preempt_ctrlblk.boost_tasks != NULL;
202} 289}
@@ -206,16 +293,27 @@ static int rcu_boost(void)
206 * the current grace period, and, if so, tell the rcu_kthread_task to 293 * the current grace period, and, if so, tell the rcu_kthread_task to
207 * start boosting them. If there is an expedited boost in progress, 294 * start boosting them. If there is an expedited boost in progress,
208 * we wait for it to complete. 295 * we wait for it to complete.
296 *
297 * If there are no blocked readers blocking the current grace period,
298 * return 0 to let the caller know, otherwise return 1. Note that this
299 * return value is independent of whether or not boosting was done.
209 */ 300 */
210static void rcu_initiate_boost(void) 301static int rcu_initiate_boost(void)
211{ 302{
303 if (!rcu_preempt_blocked_readers_cgp()) {
304 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks++);
305 return 0;
306 }
212 if (rcu_preempt_ctrlblk.gp_tasks != NULL && 307 if (rcu_preempt_ctrlblk.gp_tasks != NULL &&
213 rcu_preempt_ctrlblk.boost_tasks == NULL && 308 rcu_preempt_ctrlblk.boost_tasks == NULL &&
214 rcu_preempt_ctrlblk.boosted_this_gp == 0 && 309 rcu_preempt_ctrlblk.boosted_this_gp == 0 &&
215 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) { 310 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) {
216 rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks; 311 rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks;
217 invoke_rcu_kthread(); 312 invoke_rcu_kthread();
218 } 313 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
314 } else
315 RCU_TRACE(rcu_initiate_boost_trace());
316 return 1;
219} 317}
220 318
221/* 319/*
@@ -231,7 +329,9 @@ static void rcu_initiate_expedited_boost(void)
231 rcu_preempt_ctrlblk.blkd_tasks.next; 329 rcu_preempt_ctrlblk.blkd_tasks.next;
232 rcu_preempt_ctrlblk.boosted_this_gp = -1; 330 rcu_preempt_ctrlblk.boosted_this_gp = -1;
233 invoke_rcu_kthread(); 331 invoke_rcu_kthread();
234 } 332 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
333 } else
334 RCU_TRACE(rcu_initiate_exp_boost_trace());
235 raw_local_irq_restore(flags); 335 raw_local_irq_restore(flags);
236} 336}
237 337
@@ -258,10 +358,13 @@ static int rcu_boost(void)
258} 358}
259 359
260/* 360/*
261 * If there is no RCU priority boosting, we don't initiate boosting. 361 * If there is no RCU priority boosting, we don't initiate boosting,
362 * but we do indicate whether there are blocked readers blocking the
363 * current grace period.
262 */ 364 */
263static void rcu_initiate_boost(void) 365static int rcu_initiate_boost(void)
264{ 366{
367 return rcu_preempt_blocked_readers_cgp();
265} 368}
266 369
267/* 370/*
@@ -308,13 +411,14 @@ static void rcu_preempt_cpu_qs(void)
308 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 411 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
309 412
310 /* If there is no GP then there is nothing more to do. */ 413 /* If there is no GP then there is nothing more to do. */
311 if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp()) 414 if (!rcu_preempt_gp_in_progress())
312 return; 415 return;
313 /* If there are blocked readers, go check up on boosting. */ 416 /*
314 if (rcu_preempt_blocked_readers_cgp()) { 417 * Check up on boosting. If there are no readers blocking the
315 rcu_initiate_boost(); 418 * current grace period, leave.
419 */
420 if (rcu_initiate_boost())
316 return; 421 return;
317 }
318 422
319 /* Advance callbacks. */ 423 /* Advance callbacks. */
320 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum; 424 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
@@ -339,6 +443,7 @@ static void rcu_preempt_start_gp(void)
339 443
340 /* Official start of GP. */ 444 /* Official start of GP. */
341 rcu_preempt_ctrlblk.gpnum++; 445 rcu_preempt_ctrlblk.gpnum++;
446 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
342 447
343 /* Any blocked RCU readers block new GP. */ 448 /* Any blocked RCU readers block new GP. */
344 if (rcu_preempt_blocked_readers_any()) 449 if (rcu_preempt_blocked_readers_any())
@@ -591,6 +696,7 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
591 local_irq_save(flags); 696 local_irq_save(flags);
592 *rcu_preempt_ctrlblk.nexttail = head; 697 *rcu_preempt_ctrlblk.nexttail = head;
593 rcu_preempt_ctrlblk.nexttail = &head->next; 698 rcu_preempt_ctrlblk.nexttail = &head->next;
699 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
594 rcu_preempt_start_gp(); /* checks to see if GP needed. */ 700 rcu_preempt_start_gp(); /* checks to see if GP needed. */
595 local_irq_restore(flags); 701 local_irq_restore(flags);
596} 702}
@@ -747,6 +853,18 @@ void exit_rcu(void)
747 853
748#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ 854#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
749 855
856#ifdef CONFIG_RCU_TRACE
857
858/*
859 * Because preemptible RCU does not exist, it is not necessary to
860 * dump out its statistics.
861 */
862static void show_tiny_preempt_stats(struct seq_file *m)
863{
864}
865
866#endif /* #ifdef CONFIG_RCU_TRACE */
867
750/* 868/*
751 * Because preemptible RCU does not exist, it is never necessary to 869 * Because preemptible RCU does not exist, it is never necessary to
752 * boost preempted RCU readers. 870 * boost preempted RCU readers.
@@ -802,3 +920,97 @@ void __init rcu_scheduler_starting(void)
802#else /* #ifdef CONFIG_RCU_BOOST */ 920#else /* #ifdef CONFIG_RCU_BOOST */
803#define RCU_BOOST_PRIO 1 921#define RCU_BOOST_PRIO 1
804#endif /* #else #ifdef CONFIG_RCU_BOOST */ 922#endif /* #else #ifdef CONFIG_RCU_BOOST */
923
924#ifdef CONFIG_RCU_TRACE
925
926#ifdef CONFIG_RCU_BOOST
927
928static void rcu_initiate_boost_trace(void)
929{
930 if (rcu_preempt_ctrlblk.gp_tasks == NULL)
931 rcu_preempt_ctrlblk.n_normal_balk_gp_tasks++;
932 else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
933 rcu_preempt_ctrlblk.n_normal_balk_boost_tasks++;
934 else if (rcu_preempt_ctrlblk.boosted_this_gp != 0)
935 rcu_preempt_ctrlblk.n_normal_balk_boosted++;
936 else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
937 rcu_preempt_ctrlblk.n_normal_balk_notyet++;
938 else
939 rcu_preempt_ctrlblk.n_normal_balk_nos++;
940}
941
942static void rcu_initiate_exp_boost_trace(void)
943{
944 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
945 rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks++;
946 else
947 rcu_preempt_ctrlblk.n_exp_balk_nos++;
948}
949
950#endif /* #ifdef CONFIG_RCU_BOOST */
951
952static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
953{
954 unsigned long flags;
955
956 raw_local_irq_save(flags);
957 rcp->qlen -= n;
958 raw_local_irq_restore(flags);
959}
960
961/*
962 * Dump statistics for TINY_RCU, such as they are.
963 */
964static int show_tiny_stats(struct seq_file *m, void *unused)
965{
966 show_tiny_preempt_stats(m);
967 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
968 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
969 return 0;
970}
971
972static int show_tiny_stats_open(struct inode *inode, struct file *file)
973{
974 return single_open(file, show_tiny_stats, NULL);
975}
976
977static const struct file_operations show_tiny_stats_fops = {
978 .owner = THIS_MODULE,
979 .open = show_tiny_stats_open,
980 .read = seq_read,
981 .llseek = seq_lseek,
982 .release = single_release,
983};
984
985static struct dentry *rcudir;
986
987static int __init rcutiny_trace_init(void)
988{
989 struct dentry *retval;
990
991 rcudir = debugfs_create_dir("rcu", NULL);
992 if (!rcudir)
993 goto free_out;
994 retval = debugfs_create_file("rcudata", 0444, rcudir,
995 NULL, &show_tiny_stats_fops);
996 if (!retval)
997 goto free_out;
998 return 0;
999free_out:
1000 debugfs_remove_recursive(rcudir);
1001 return 1;
1002}
1003
1004static void __exit rcutiny_trace_cleanup(void)
1005{
1006 debugfs_remove_recursive(rcudir);
1007}
1008
1009module_init(rcutiny_trace_init);
1010module_exit(rcutiny_trace_cleanup);
1011
1012MODULE_AUTHOR("Paul E. McKenney");
1013MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1014MODULE_LICENSE("GPL");
1015
1016#endif /* #ifdef CONFIG_RCU_TRACE */