summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYafang Shao <laoar.shao@gmail.com>2019-03-26 08:13:11 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2019-04-08 09:22:51 -0400
commit4f5fbd78a7b40bab538ae0d316363530da751e42 (patch)
treec52bf4c503efed8061343460556bb8aa2bfe8c4f
parent2a09b5de235a6b5f76193a2ed46546a2944f98bf (diff)
rcu: validate arguments for rcu tracepoints
When CONFIG_RCU_TRACE is not set, all these tracepoints are defined as do-nothing macro. We'd better make those inline functions that take proper arguments. As RCU_TRACE() is defined as do-nothing marco as well when CONFIG_RCU_TRACE is not set, so we can clean it up. Link: http://lkml.kernel.org/r/1553602391-11926-4-git-send-email-laoar.shao@gmail.com Reviewed-by: Paul E. McKenney <paulmck@linux.ibm.com> Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--include/trace/events/rcu.h81
-rw-r--r--kernel/rcu/rcu.h9
-rw-r--r--kernel/rcu/tree.c8
3 files changed, 31 insertions, 67 deletions
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index f0c4d10e614b..e3f357b89432 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -7,6 +7,12 @@
7 7
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10#ifdef CONFIG_RCU_TRACE
11#define TRACE_EVENT_RCU TRACE_EVENT
12#else
13#define TRACE_EVENT_RCU TRACE_EVENT_NOP
14#endif
15
10/* 16/*
11 * Tracepoint for start/end markers used for utilization calculations. 17 * Tracepoint for start/end markers used for utilization calculations.
12 * By convention, the string is of the following forms: 18 * By convention, the string is of the following forms:
@@ -35,8 +41,6 @@ TRACE_EVENT(rcu_utilization,
35 TP_printk("%s", __entry->s) 41 TP_printk("%s", __entry->s)
36); 42);
37 43
38#ifdef CONFIG_RCU_TRACE
39
40#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 44#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
41 45
42/* 46/*
@@ -62,7 +66,7 @@ TRACE_EVENT(rcu_utilization,
62 * "end": End a grace period. 66 * "end": End a grace period.
63 * "cpuend": CPU first notices a grace-period end. 67 * "cpuend": CPU first notices a grace-period end.
64 */ 68 */
65TRACE_EVENT(rcu_grace_period, 69TRACE_EVENT_RCU(rcu_grace_period,
66 70
67 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent), 71 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
68 72
@@ -101,7 +105,7 @@ TRACE_EVENT(rcu_grace_period,
101 * "Cleanup": Clean up rcu_node structure after previous GP. 105 * "Cleanup": Clean up rcu_node structure after previous GP.
102 * "CleanupMore": Clean up, and another GP is needed. 106 * "CleanupMore": Clean up, and another GP is needed.
103 */ 107 */
104TRACE_EVENT(rcu_future_grace_period, 108TRACE_EVENT_RCU(rcu_future_grace_period,
105 109
106 TP_PROTO(const char *rcuname, unsigned long gp_seq, 110 TP_PROTO(const char *rcuname, unsigned long gp_seq,
107 unsigned long gp_seq_req, u8 level, int grplo, int grphi, 111 unsigned long gp_seq_req, u8 level, int grplo, int grphi,
@@ -141,7 +145,7 @@ TRACE_EVENT(rcu_future_grace_period,
141 * rcu_node structure, and the mask of CPUs that will be waited for. 145 * rcu_node structure, and the mask of CPUs that will be waited for.
142 * All but the type of RCU are extracted from the rcu_node structure. 146 * All but the type of RCU are extracted from the rcu_node structure.
143 */ 147 */
144TRACE_EVENT(rcu_grace_period_init, 148TRACE_EVENT_RCU(rcu_grace_period_init,
145 149
146 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level, 150 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
147 int grplo, int grphi, unsigned long qsmask), 151 int grplo, int grphi, unsigned long qsmask),
@@ -186,7 +190,7 @@ TRACE_EVENT(rcu_grace_period_init,
186 * "endwake": Woke piggybackers up. 190 * "endwake": Woke piggybackers up.
187 * "done": Someone else did the expedited grace period for us. 191 * "done": Someone else did the expedited grace period for us.
188 */ 192 */
189TRACE_EVENT(rcu_exp_grace_period, 193TRACE_EVENT_RCU(rcu_exp_grace_period,
190 194
191 TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent), 195 TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
192 196
@@ -218,7 +222,7 @@ TRACE_EVENT(rcu_exp_grace_period,
218 * "nxtlvl": Advance to next level of rcu_node funnel 222 * "nxtlvl": Advance to next level of rcu_node funnel
219 * "wait": Wait for someone else to do expedited GP 223 * "wait": Wait for someone else to do expedited GP
220 */ 224 */
221TRACE_EVENT(rcu_exp_funnel_lock, 225TRACE_EVENT_RCU(rcu_exp_funnel_lock,
222 226
223 TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi, 227 TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
224 const char *gpevent), 228 const char *gpevent),
@@ -269,7 +273,7 @@ TRACE_EVENT(rcu_exp_funnel_lock,
269 * "WaitQueue": Enqueue partially done, timed wait for it to complete. 273 * "WaitQueue": Enqueue partially done, timed wait for it to complete.
270 * "WokeQueue": Partial enqueue now complete. 274 * "WokeQueue": Partial enqueue now complete.
271 */ 275 */
272TRACE_EVENT(rcu_nocb_wake, 276TRACE_EVENT_RCU(rcu_nocb_wake,
273 277
274 TP_PROTO(const char *rcuname, int cpu, const char *reason), 278 TP_PROTO(const char *rcuname, int cpu, const char *reason),
275 279
@@ -297,7 +301,7 @@ TRACE_EVENT(rcu_nocb_wake,
297 * include SRCU), the grace-period number that the task is blocking 301 * include SRCU), the grace-period number that the task is blocking
298 * (the current or the next), and the task's PID. 302 * (the current or the next), and the task's PID.
299 */ 303 */
300TRACE_EVENT(rcu_preempt_task, 304TRACE_EVENT_RCU(rcu_preempt_task,
301 305
302 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq), 306 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
303 307
@@ -324,7 +328,7 @@ TRACE_EVENT(rcu_preempt_task,
324 * read-side critical section exiting that critical section. Track the 328 * read-side critical section exiting that critical section. Track the
325 * type of RCU (which one day might include SRCU) and the task's PID. 329 * type of RCU (which one day might include SRCU) and the task's PID.
326 */ 330 */
327TRACE_EVENT(rcu_unlock_preempted_task, 331TRACE_EVENT_RCU(rcu_unlock_preempted_task,
328 332
329 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid), 333 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
330 334
@@ -353,7 +357,7 @@ TRACE_EVENT(rcu_unlock_preempted_task,
353 * whether there are any blocked tasks blocking the current grace period. 357 * whether there are any blocked tasks blocking the current grace period.
354 * All but the type of RCU are extracted from the rcu_node structure. 358 * All but the type of RCU are extracted from the rcu_node structure.
355 */ 359 */
356TRACE_EVENT(rcu_quiescent_state_report, 360TRACE_EVENT_RCU(rcu_quiescent_state_report,
357 361
358 TP_PROTO(const char *rcuname, unsigned long gp_seq, 362 TP_PROTO(const char *rcuname, unsigned long gp_seq,
359 unsigned long mask, unsigned long qsmask, 363 unsigned long mask, unsigned long qsmask,
@@ -396,7 +400,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
396 * state, which can be "dti" for dyntick-idle mode or "kick" when kicking 400 * state, which can be "dti" for dyntick-idle mode or "kick" when kicking
397 * a CPU that has been in dyntick-idle mode for too long. 401 * a CPU that has been in dyntick-idle mode for too long.
398 */ 402 */
399TRACE_EVENT(rcu_fqs, 403TRACE_EVENT_RCU(rcu_fqs,
400 404
401 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent), 405 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
402 406
@@ -436,7 +440,7 @@ TRACE_EVENT(rcu_fqs,
436 * events use two separate counters, and that the "++=" and "--=" events 440 * events use two separate counters, and that the "++=" and "--=" events
437 * for irq/NMI will change the counter by two, otherwise by one. 441 * for irq/NMI will change the counter by two, otherwise by one.
438 */ 442 */
439TRACE_EVENT(rcu_dyntick, 443TRACE_EVENT_RCU(rcu_dyntick,
440 444
441 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks), 445 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
442 446
@@ -468,7 +472,7 @@ TRACE_EVENT(rcu_dyntick,
468 * number of lazy callbacks queued, and the fourth element is the 472 * number of lazy callbacks queued, and the fourth element is the
469 * total number of callbacks queued. 473 * total number of callbacks queued.
470 */ 474 */
471TRACE_EVENT(rcu_callback, 475TRACE_EVENT_RCU(rcu_callback,
472 476
473 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, 477 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
474 long qlen), 478 long qlen),
@@ -504,7 +508,7 @@ TRACE_EVENT(rcu_callback,
504 * the fourth argument is the number of lazy callbacks queued, and the 508 * the fourth argument is the number of lazy callbacks queued, and the
505 * fifth argument is the total number of callbacks queued. 509 * fifth argument is the total number of callbacks queued.
506 */ 510 */
507TRACE_EVENT(rcu_kfree_callback, 511TRACE_EVENT_RCU(rcu_kfree_callback,
508 512
509 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, 513 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
510 long qlen_lazy, long qlen), 514 long qlen_lazy, long qlen),
@@ -539,7 +543,7 @@ TRACE_EVENT(rcu_kfree_callback,
539 * the total number of callbacks queued, and the fourth argument is 543 * the total number of callbacks queued, and the fourth argument is
540 * the current RCU-callback batch limit. 544 * the current RCU-callback batch limit.
541 */ 545 */
542TRACE_EVENT(rcu_batch_start, 546TRACE_EVENT_RCU(rcu_batch_start,
543 547
544 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), 548 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
545 549
@@ -569,7 +573,7 @@ TRACE_EVENT(rcu_batch_start,
569 * The first argument is the type of RCU, and the second argument is 573 * The first argument is the type of RCU, and the second argument is
570 * a pointer to the RCU callback itself. 574 * a pointer to the RCU callback itself.
571 */ 575 */
572TRACE_EVENT(rcu_invoke_callback, 576TRACE_EVENT_RCU(rcu_invoke_callback,
573 577
574 TP_PROTO(const char *rcuname, struct rcu_head *rhp), 578 TP_PROTO(const char *rcuname, struct rcu_head *rhp),
575 579
@@ -598,7 +602,7 @@ TRACE_EVENT(rcu_invoke_callback,
598 * is the offset of the callback within the enclosing RCU-protected 602 * is the offset of the callback within the enclosing RCU-protected
599 * data structure. 603 * data structure.
600 */ 604 */
601TRACE_EVENT(rcu_invoke_kfree_callback, 605TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
602 606
603 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), 607 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
604 608
@@ -631,7 +635,7 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
631 * and the sixth argument (risk) is the return value from 635 * and the sixth argument (risk) is the return value from
632 * rcu_is_callbacks_kthread(). 636 * rcu_is_callbacks_kthread().
633 */ 637 */
634TRACE_EVENT(rcu_batch_end, 638TRACE_EVENT_RCU(rcu_batch_end,
635 639
636 TP_PROTO(const char *rcuname, int callbacks_invoked, 640 TP_PROTO(const char *rcuname, int callbacks_invoked,
637 char cb, char nr, char iit, char risk), 641 char cb, char nr, char iit, char risk),
@@ -673,7 +677,7 @@ TRACE_EVENT(rcu_batch_end,
673 * callback address can be NULL. 677 * callback address can be NULL.
674 */ 678 */
675#define RCUTORTURENAME_LEN 8 679#define RCUTORTURENAME_LEN 8
676TRACE_EVENT(rcu_torture_read, 680TRACE_EVENT_RCU(rcu_torture_read,
677 681
678 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, 682 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
679 unsigned long secs, unsigned long c_old, unsigned long c), 683 unsigned long secs, unsigned long c_old, unsigned long c),
@@ -721,7 +725,7 @@ TRACE_EVENT(rcu_torture_read,
721 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument 725 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
722 * is the count of remaining callbacks, and "done" is the piggybacking count. 726 * is the count of remaining callbacks, and "done" is the piggybacking count.
723 */ 727 */
724TRACE_EVENT(rcu_barrier, 728TRACE_EVENT_RCU(rcu_barrier,
725 729
726 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), 730 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
727 731
@@ -748,41 +752,6 @@ TRACE_EVENT(rcu_barrier,
748 __entry->done) 752 __entry->done)
749); 753);
750 754
751#else /* #ifdef CONFIG_RCU_TRACE */
752
753#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
754#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
755 level, grplo, grphi, event) \
756 do { } while (0)
757#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
758 qsmask) do { } while (0)
759#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
760 do { } while (0)
761#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
762 do { } while (0)
763#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
764#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
765#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
766#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
767 grplo, grphi, gp_tasks) do { } \
768 while (0)
769#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
770#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
771#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
772#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
773 do { } while (0)
774#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
775 do { } while (0)
776#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
777#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
778#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
779 do { } while (0)
780#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
781 do { } while (0)
782#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
783
784#endif /* #else #ifdef CONFIG_RCU_TRACE */
785
786#endif /* _TRACE_RCU_H */ 755#endif /* _TRACE_RCU_H */
787 756
788/* This part must be outside protection */ 757/* This part must be outside protection */
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index acee72c0b24b..442ace406ac9 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -11,11 +11,6 @@
11#define __LINUX_RCU_H 11#define __LINUX_RCU_H
12 12
13#include <trace/events/rcu.h> 13#include <trace/events/rcu.h>
14#ifdef CONFIG_RCU_TRACE
15#define RCU_TRACE(stmt) stmt
16#else /* #ifdef CONFIG_RCU_TRACE */
17#define RCU_TRACE(stmt)
18#endif /* #else #ifdef CONFIG_RCU_TRACE */
19 14
20/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ 15/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
21#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) 16#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
@@ -216,12 +211,12 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
216 211
217 rcu_lock_acquire(&rcu_callback_map); 212 rcu_lock_acquire(&rcu_callback_map);
218 if (__is_kfree_rcu_offset(offset)) { 213 if (__is_kfree_rcu_offset(offset)) {
219 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);) 214 trace_rcu_invoke_kfree_callback(rn, head, offset);
220 kfree((void *)head - offset); 215 kfree((void *)head - offset);
221 rcu_lock_release(&rcu_callback_map); 216 rcu_lock_release(&rcu_callback_map);
222 return true; 217 return true;
223 } else { 218 } else {
224 RCU_TRACE(trace_rcu_invoke_callback(rn, head);) 219 trace_rcu_invoke_callback(rn, head);
225 f = head->func; 220 f = head->func;
226 WRITE_ONCE(head->func, (rcu_callback_t)0L); 221 WRITE_ONCE(head->func, (rcu_callback_t)0L);
227 f(head); 222 f(head);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index acd6ccf56faf..906563a1cdea 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2352,14 +2352,14 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
2352 */ 2352 */
2353int rcutree_dying_cpu(unsigned int cpu) 2353int rcutree_dying_cpu(unsigned int cpu)
2354{ 2354{
2355 RCU_TRACE(bool blkd;) 2355 bool blkd;
2356 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);) 2356 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2357 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) 2357 struct rcu_node *rnp = rdp->mynode;
2358 2358
2359 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2359 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2360 return 0; 2360 return 0;
2361 2361
2362 RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);) 2362 blkd = !!(rnp->qsmask & rdp->grpmask);
2363 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, 2363 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
2364 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 2364 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2365 return 0; 2365 return 0;