aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/power.h3
-rw-r--r--include/trace/events/printk.h41
-rw-r--r--include/trace/events/rcu.h63
-rw-r--r--include/trace/events/regmap.h36
-rw-r--r--include/trace/events/sched.h77
-rw-r--r--include/trace/events/signal.h85
-rw-r--r--include/trace/events/sunrpc.h177
-rw-r--r--include/trace/events/writeback.h7
8 files changed, 349 insertions, 140 deletions
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 1bcc2a8c00e2..cae9a94f025d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -65,7 +65,6 @@ TRACE_EVENT(machine_suspend,
65 TP_printk("state=%lu", (unsigned long)__entry->state) 65 TP_printk("state=%lu", (unsigned long)__entry->state)
66); 66);
67 67
68/* This code will be removed after deprecation time exceeded (2.6.41) */
69#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED 68#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED
70 69
71/* 70/*
@@ -151,6 +150,8 @@ enum {
151 events get removed */ 150 events get removed */
152static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {}; 151static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
153static inline void trace_power_end(u64 cpuid) {}; 152static inline void trace_power_end(u64 cpuid) {};
153static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
154static inline void trace_power_end_rcuidle(u64 cpuid) {};
154static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {}; 155static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
155#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */ 156#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
156 157
diff --git a/include/trace/events/printk.h b/include/trace/events/printk.h
new file mode 100644
index 000000000000..94ec79cc011a
--- /dev/null
+++ b/include/trace/events/printk.h
@@ -0,0 +1,41 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM printk
3
4#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_PRINTK_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT_CONDITION(console,
10 TP_PROTO(const char *log_buf, unsigned start, unsigned end,
11 unsigned log_buf_len),
12
13 TP_ARGS(log_buf, start, end, log_buf_len),
14
15 TP_CONDITION(start != end),
16
17 TP_STRUCT__entry(
18 __dynamic_array(char, msg, end - start + 1)
19 ),
20
21 TP_fast_assign(
22 if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
23 memcpy(__get_dynamic_array(msg),
24 log_buf + (start & (log_buf_len - 1)),
25 log_buf_len - (start & (log_buf_len - 1)));
26 memcpy((char *)__get_dynamic_array(msg) +
27 log_buf_len - (start & (log_buf_len - 1)),
28 log_buf, end & (log_buf_len - 1));
29 } else
30 memcpy(__get_dynamic_array(msg),
31 log_buf + (start & (log_buf_len - 1)),
32 end - start);
33 ((char *)__get_dynamic_array(msg))[end - start] = 0;
34 ),
35
36 TP_printk("%s", __get_str(msg))
37);
38#endif /* _TRACE_PRINTK_H */
39
40/* This part must be outside protection */
41#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d2d88bed891b..337099783f37 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -313,19 +313,22 @@ TRACE_EVENT(rcu_prep_idle,
313/* 313/*
314 * Tracepoint for the registration of a single RCU callback function. 314 * Tracepoint for the registration of a single RCU callback function.
315 * The first argument is the type of RCU, the second argument is 315 * The first argument is the type of RCU, the second argument is
316 * a pointer to the RCU callback itself, and the third element is the 316 * a pointer to the RCU callback itself, the third element is the
317 * new RCU callback queue length for the current CPU. 317 * number of lazy callbacks queued, and the fourth element is the
318 * total number of callbacks queued.
318 */ 319 */
319TRACE_EVENT(rcu_callback, 320TRACE_EVENT(rcu_callback,
320 321
321 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen), 322 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
323 long qlen),
322 324
323 TP_ARGS(rcuname, rhp, qlen), 325 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
324 326
325 TP_STRUCT__entry( 327 TP_STRUCT__entry(
326 __field(char *, rcuname) 328 __field(char *, rcuname)
327 __field(void *, rhp) 329 __field(void *, rhp)
328 __field(void *, func) 330 __field(void *, func)
331 __field(long, qlen_lazy)
329 __field(long, qlen) 332 __field(long, qlen)
330 ), 333 ),
331 334
@@ -333,11 +336,13 @@ TRACE_EVENT(rcu_callback,
333 __entry->rcuname = rcuname; 336 __entry->rcuname = rcuname;
334 __entry->rhp = rhp; 337 __entry->rhp = rhp;
335 __entry->func = rhp->func; 338 __entry->func = rhp->func;
339 __entry->qlen_lazy = qlen_lazy;
336 __entry->qlen = qlen; 340 __entry->qlen = qlen;
337 ), 341 ),
338 342
339 TP_printk("%s rhp=%p func=%pf %ld", 343 TP_printk("%s rhp=%p func=%pf %ld/%ld",
340 __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen) 344 __entry->rcuname, __entry->rhp, __entry->func,
345 __entry->qlen_lazy, __entry->qlen)
341); 346);
342 347
343/* 348/*
@@ -345,20 +350,21 @@ TRACE_EVENT(rcu_callback,
345 * kfree() form. The first argument is the RCU type, the second argument 350 * kfree() form. The first argument is the RCU type, the second argument
346 * is a pointer to the RCU callback, the third argument is the offset 351 * is a pointer to the RCU callback, the third argument is the offset
347 * of the callback within the enclosing RCU-protected data structure, 352 * of the callback within the enclosing RCU-protected data structure,
348 * and the fourth argument is the new RCU callback queue length for the 353 * the fourth argument is the number of lazy callbacks queued, and the
349 * current CPU. 354 * fifth argument is the total number of callbacks queued.
350 */ 355 */
351TRACE_EVENT(rcu_kfree_callback, 356TRACE_EVENT(rcu_kfree_callback,
352 357
353 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset, 358 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
354 long qlen), 359 long qlen_lazy, long qlen),
355 360
356 TP_ARGS(rcuname, rhp, offset, qlen), 361 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
357 362
358 TP_STRUCT__entry( 363 TP_STRUCT__entry(
359 __field(char *, rcuname) 364 __field(char *, rcuname)
360 __field(void *, rhp) 365 __field(void *, rhp)
361 __field(unsigned long, offset) 366 __field(unsigned long, offset)
367 __field(long, qlen_lazy)
362 __field(long, qlen) 368 __field(long, qlen)
363 ), 369 ),
364 370
@@ -366,41 +372,45 @@ TRACE_EVENT(rcu_kfree_callback,
366 __entry->rcuname = rcuname; 372 __entry->rcuname = rcuname;
367 __entry->rhp = rhp; 373 __entry->rhp = rhp;
368 __entry->offset = offset; 374 __entry->offset = offset;
375 __entry->qlen_lazy = qlen_lazy;
369 __entry->qlen = qlen; 376 __entry->qlen = qlen;
370 ), 377 ),
371 378
372 TP_printk("%s rhp=%p func=%ld %ld", 379 TP_printk("%s rhp=%p func=%ld %ld/%ld",
373 __entry->rcuname, __entry->rhp, __entry->offset, 380 __entry->rcuname, __entry->rhp, __entry->offset,
374 __entry->qlen) 381 __entry->qlen_lazy, __entry->qlen)
375); 382);
376 383
377/* 384/*
378 * Tracepoint for marking the beginning rcu_do_batch, performed to start 385 * Tracepoint for marking the beginning rcu_do_batch, performed to start
379 * RCU callback invocation. The first argument is the RCU flavor, 386 * RCU callback invocation. The first argument is the RCU flavor,
380 * the second is the total number of callbacks (including those that 387 * the second is the number of lazy callbacks queued, the third is
381 * are not yet ready to be invoked), and the third argument is the 388 * the total number of callbacks queued, and the fourth argument is
382 * current RCU-callback batch limit. 389 * the current RCU-callback batch limit.
383 */ 390 */
384TRACE_EVENT(rcu_batch_start, 391TRACE_EVENT(rcu_batch_start,
385 392
386 TP_PROTO(char *rcuname, long qlen, int blimit), 393 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
387 394
388 TP_ARGS(rcuname, qlen, blimit), 395 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
389 396
390 TP_STRUCT__entry( 397 TP_STRUCT__entry(
391 __field(char *, rcuname) 398 __field(char *, rcuname)
399 __field(long, qlen_lazy)
392 __field(long, qlen) 400 __field(long, qlen)
393 __field(int, blimit) 401 __field(int, blimit)
394 ), 402 ),
395 403
396 TP_fast_assign( 404 TP_fast_assign(
397 __entry->rcuname = rcuname; 405 __entry->rcuname = rcuname;
406 __entry->qlen_lazy = qlen_lazy;
398 __entry->qlen = qlen; 407 __entry->qlen = qlen;
399 __entry->blimit = blimit; 408 __entry->blimit = blimit;
400 ), 409 ),
401 410
402 TP_printk("%s CBs=%ld bl=%d", 411 TP_printk("%s CBs=%ld/%ld bl=%d",
403 __entry->rcuname, __entry->qlen, __entry->blimit) 412 __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
413 __entry->blimit)
404); 414);
405 415
406/* 416/*
@@ -531,16 +541,21 @@ TRACE_EVENT(rcu_torture_read,
531#else /* #ifdef CONFIG_RCU_TRACE */ 541#else /* #ifdef CONFIG_RCU_TRACE */
532 542
533#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) 543#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
534#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0) 544#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
545 qsmask) do { } while (0)
535#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) 546#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
536#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) 547#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
537#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) 548#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
549 grplo, grphi, gp_tasks) do { } \
550 while (0)
538#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) 551#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
539#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) 552#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
540#define trace_rcu_prep_idle(reason) do { } while (0) 553#define trace_rcu_prep_idle(reason) do { } while (0)
541#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) 554#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
542#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) 555#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
543#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) 556 do { } while (0)
557#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
558 do { } while (0)
544#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) 559#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
545#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) 560#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
546#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ 561#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 50fe9ba61442..41a7dbd570e2 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -139,6 +139,42 @@ TRACE_EVENT(regcache_sync,
139 __get_str(type), __get_str(status)) 139 __get_str(type), __get_str(status))
140); 140);
141 141
142DECLARE_EVENT_CLASS(regmap_bool,
143
144 TP_PROTO(struct device *dev, bool flag),
145
146 TP_ARGS(dev, flag),
147
148 TP_STRUCT__entry(
149 __string( name, dev_name(dev) )
150 __field( int, flag )
151 ),
152
153 TP_fast_assign(
154 __assign_str(name, dev_name(dev));
155 __entry->flag = flag;
156 ),
157
158 TP_printk("%s flag=%d", __get_str(name),
159 (int)__entry->flag)
160);
161
162DEFINE_EVENT(regmap_bool, regmap_cache_only,
163
164 TP_PROTO(struct device *dev, bool flag),
165
166 TP_ARGS(dev, flag)
167
168);
169
170DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
171
172 TP_PROTO(struct device *dev, bool flag),
173
174 TP_ARGS(dev, flag)
175
176);
177
142#endif /* _TRACE_REGMAP_H */ 178#endif /* _TRACE_REGMAP_H */
143 179
144/* This part must be outside protection */ 180/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6ba596b07a72..fbc7b1ad929b 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -6,6 +6,7 @@
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9#include <linux/binfmts.h>
9 10
10/* 11/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread: 12 * Tracepoint for calling kthread_stop, performed to end a kthread:
@@ -276,6 +277,32 @@ TRACE_EVENT(sched_process_fork,
276); 277);
277 278
278/* 279/*
280 * Tracepoint for exec:
281 */
282TRACE_EVENT(sched_process_exec,
283
284 TP_PROTO(struct task_struct *p, pid_t old_pid,
285 struct linux_binprm *bprm),
286
287 TP_ARGS(p, old_pid, bprm),
288
289 TP_STRUCT__entry(
290 __string( filename, bprm->filename )
291 __field( pid_t, pid )
292 __field( pid_t, old_pid )
293 ),
294
295 TP_fast_assign(
296 __assign_str(filename, bprm->filename);
297 __entry->pid = p->pid;
298 __entry->old_pid = p->pid;
299 ),
300
301 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
302 __entry->pid, __entry->old_pid)
303);
304
305/*
279 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE 306 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
280 * adding sched_stat support to SCHED_FIFO/RR would be welcome. 307 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
281 */ 308 */
@@ -370,56 +397,6 @@ TRACE_EVENT(sched_stat_runtime,
370 (unsigned long long)__entry->vruntime) 397 (unsigned long long)__entry->vruntime)
371); 398);
372 399
373#ifdef CREATE_TRACE_POINTS
374static inline u64 trace_get_sleeptime(struct task_struct *tsk)
375{
376#ifdef CONFIG_SCHEDSTATS
377 u64 block, sleep;
378
379 block = tsk->se.statistics.block_start;
380 sleep = tsk->se.statistics.sleep_start;
381 tsk->se.statistics.block_start = 0;
382 tsk->se.statistics.sleep_start = 0;
383
384 return block ? block : sleep ? sleep : 0;
385#else
386 return 0;
387#endif
388}
389#endif
390
391/*
392 * Tracepoint for accounting sleeptime (time the task is sleeping
393 * or waiting for I/O).
394 */
395TRACE_EVENT(sched_stat_sleeptime,
396
397 TP_PROTO(struct task_struct *tsk, u64 now),
398
399 TP_ARGS(tsk, now),
400
401 TP_STRUCT__entry(
402 __array( char, comm, TASK_COMM_LEN )
403 __field( pid_t, pid )
404 __field( u64, sleeptime )
405 ),
406
407 TP_fast_assign(
408 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
409 __entry->pid = tsk->pid;
410 __entry->sleeptime = trace_get_sleeptime(tsk);
411 __entry->sleeptime = __entry->sleeptime ?
412 now - __entry->sleeptime : 0;
413 )
414 TP_perf_assign(
415 __perf_count(__entry->sleeptime);
416 ),
417
418 TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
419 __entry->comm, __entry->pid,
420 (unsigned long long)__entry->sleeptime)
421);
422
423/* 400/*
424 * Tracepoint for showing priority inheritance modifying a tasks 401 * Tracepoint for showing priority inheritance modifying a tasks
425 * priority. 402 * priority.
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index 17df43464df0..39a8a430d90f 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -23,11 +23,23 @@
23 } \ 23 } \
24 } while (0) 24 } while (0)
25 25
26#ifndef TRACE_HEADER_MULTI_READ
27enum {
28 TRACE_SIGNAL_DELIVERED,
29 TRACE_SIGNAL_IGNORED,
30 TRACE_SIGNAL_ALREADY_PENDING,
31 TRACE_SIGNAL_OVERFLOW_FAIL,
32 TRACE_SIGNAL_LOSE_INFO,
33};
34#endif
35
26/** 36/**
27 * signal_generate - called when a signal is generated 37 * signal_generate - called when a signal is generated
28 * @sig: signal number 38 * @sig: signal number
29 * @info: pointer to struct siginfo 39 * @info: pointer to struct siginfo
30 * @task: pointer to struct task_struct 40 * @task: pointer to struct task_struct
41 * @group: shared or private
42 * @result: TRACE_SIGNAL_*
31 * 43 *
32 * Current process sends a 'sig' signal to 'task' process with 44 * Current process sends a 'sig' signal to 'task' process with
33 * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, 45 * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
@@ -37,9 +49,10 @@
37 */ 49 */
38TRACE_EVENT(signal_generate, 50TRACE_EVENT(signal_generate,
39 51
40 TP_PROTO(int sig, struct siginfo *info, struct task_struct *task), 52 TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
53 int group, int result),
41 54
42 TP_ARGS(sig, info, task), 55 TP_ARGS(sig, info, task, group, result),
43 56
44 TP_STRUCT__entry( 57 TP_STRUCT__entry(
45 __field( int, sig ) 58 __field( int, sig )
@@ -47,6 +60,8 @@ TRACE_EVENT(signal_generate,
47 __field( int, code ) 60 __field( int, code )
48 __array( char, comm, TASK_COMM_LEN ) 61 __array( char, comm, TASK_COMM_LEN )
49 __field( pid_t, pid ) 62 __field( pid_t, pid )
63 __field( int, group )
64 __field( int, result )
50 ), 65 ),
51 66
52 TP_fast_assign( 67 TP_fast_assign(
@@ -54,11 +69,14 @@ TRACE_EVENT(signal_generate,
54 TP_STORE_SIGINFO(__entry, info); 69 TP_STORE_SIGINFO(__entry, info);
55 memcpy(__entry->comm, task->comm, TASK_COMM_LEN); 70 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
56 __entry->pid = task->pid; 71 __entry->pid = task->pid;
72 __entry->group = group;
73 __entry->result = result;
57 ), 74 ),
58 75
59 TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d", 76 TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
60 __entry->sig, __entry->errno, __entry->code, 77 __entry->sig, __entry->errno, __entry->code,
61 __entry->comm, __entry->pid) 78 __entry->comm, __entry->pid, __entry->group,
79 __entry->result)
62); 80);
63 81
64/** 82/**
@@ -101,65 +119,6 @@ TRACE_EVENT(signal_deliver,
101 __entry->sa_handler, __entry->sa_flags) 119 __entry->sa_handler, __entry->sa_flags)
102); 120);
103 121
104DECLARE_EVENT_CLASS(signal_queue_overflow,
105
106 TP_PROTO(int sig, int group, struct siginfo *info),
107
108 TP_ARGS(sig, group, info),
109
110 TP_STRUCT__entry(
111 __field( int, sig )
112 __field( int, group )
113 __field( int, errno )
114 __field( int, code )
115 ),
116
117 TP_fast_assign(
118 __entry->sig = sig;
119 __entry->group = group;
120 TP_STORE_SIGINFO(__entry, info);
121 ),
122
123 TP_printk("sig=%d group=%d errno=%d code=%d",
124 __entry->sig, __entry->group, __entry->errno, __entry->code)
125);
126
127/**
128 * signal_overflow_fail - called when signal queue is overflow
129 * @sig: signal number
130 * @group: signal to process group or not (bool)
131 * @info: pointer to struct siginfo
132 *
133 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
134 * siginfo queue is overflow, and the signal is dropped.
135 * 'group' is not 0 if the signal will be sent to a process group.
136 * 'sig' is always one of RT signals.
137 */
138DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
139
140 TP_PROTO(int sig, int group, struct siginfo *info),
141
142 TP_ARGS(sig, group, info)
143);
144
145/**
146 * signal_lose_info - called when siginfo is lost
147 * @sig: signal number
148 * @group: signal to process group or not (bool)
149 * @info: pointer to struct siginfo
150 *
151 * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
152 * queue is overflow.
153 * 'group' is not 0 if the signal will be sent to a process group.
154 * 'sig' is always one of non-RT signals.
155 */
156DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
157
158 TP_PROTO(int sig, int group, struct siginfo *info),
159
160 TP_ARGS(sig, group, info)
161);
162
163#endif /* _TRACE_SIGNAL_H */ 122#endif /* _TRACE_SIGNAL_H */
164 123
165/* This part must be outside protection */ 124/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
new file mode 100644
index 000000000000..43be87d5dd58
--- /dev/null
+++ b/include/trace/events/sunrpc.h
@@ -0,0 +1,177 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sunrpc
3
4#if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SUNRPC_H
6
7#include <linux/sunrpc/sched.h>
8#include <linux/sunrpc/clnt.h>
9#include <linux/tracepoint.h>
10
11DECLARE_EVENT_CLASS(rpc_task_status,
12
13 TP_PROTO(struct rpc_task *task),
14
15 TP_ARGS(task),
16
17 TP_STRUCT__entry(
18 __field(const struct rpc_task *, task)
19 __field(const struct rpc_clnt *, clnt)
20 __field(int, status)
21 ),
22
23 TP_fast_assign(
24 __entry->task = task;
25 __entry->clnt = task->tk_client;
26 __entry->status = task->tk_status;
27 ),
28
29 TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
30);
31
32DEFINE_EVENT(rpc_task_status, rpc_call_status,
33 TP_PROTO(struct rpc_task *task),
34
35 TP_ARGS(task)
36);
37
38DEFINE_EVENT(rpc_task_status, rpc_bind_status,
39 TP_PROTO(struct rpc_task *task),
40
41 TP_ARGS(task)
42);
43
44TRACE_EVENT(rpc_connect_status,
45 TP_PROTO(struct rpc_task *task, int status),
46
47 TP_ARGS(task, status),
48
49 TP_STRUCT__entry(
50 __field(const struct rpc_task *, task)
51 __field(const struct rpc_clnt *, clnt)
52 __field(int, status)
53 ),
54
55 TP_fast_assign(
56 __entry->task = task;
57 __entry->clnt = task->tk_client;
58 __entry->status = status;
59 ),
60
61 TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
62);
63
64DECLARE_EVENT_CLASS(rpc_task_running,
65
66 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
67
68 TP_ARGS(clnt, task, action),
69
70 TP_STRUCT__entry(
71 __field(const struct rpc_clnt *, clnt)
72 __field(const struct rpc_task *, task)
73 __field(const void *, action)
74 __field(unsigned long, runstate)
75 __field(int, status)
76 __field(unsigned short, flags)
77 ),
78
79 TP_fast_assign(
80 __entry->clnt = clnt;
81 __entry->task = task;
82 __entry->action = action;
83 __entry->runstate = task->tk_runstate;
84 __entry->status = task->tk_status;
85 __entry->flags = task->tk_flags;
86 ),
87
88 TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d action=%pf",
89 __entry->task,
90 __entry->clnt,
91 __entry->flags,
92 __entry->runstate,
93 __entry->status,
94 __entry->action
95 )
96);
97
98DEFINE_EVENT(rpc_task_running, rpc_task_begin,
99
100 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
101
102 TP_ARGS(clnt, task, action)
103
104);
105
106DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
107
108 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
109
110 TP_ARGS(clnt, task, action)
111
112);
113
114DEFINE_EVENT(rpc_task_running, rpc_task_complete,
115
116 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
117
118 TP_ARGS(clnt, task, action)
119
120);
121
122DECLARE_EVENT_CLASS(rpc_task_queued,
123
124 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
125
126 TP_ARGS(clnt, task, q),
127
128 TP_STRUCT__entry(
129 __field(const struct rpc_clnt *, clnt)
130 __field(const struct rpc_task *, task)
131 __field(unsigned long, timeout)
132 __field(unsigned long, runstate)
133 __field(int, status)
134 __field(unsigned short, flags)
135 __string(q_name, rpc_qname(q))
136 ),
137
138 TP_fast_assign(
139 __entry->clnt = clnt;
140 __entry->task = task;
141 __entry->timeout = task->tk_timeout;
142 __entry->runstate = task->tk_runstate;
143 __entry->status = task->tk_status;
144 __entry->flags = task->tk_flags;
145 __assign_str(q_name, rpc_qname(q));
146 ),
147
148 TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
149 __entry->task,
150 __entry->clnt,
151 __entry->flags,
152 __entry->runstate,
153 __entry->status,
154 __entry->timeout,
155 __get_str(q_name)
156 )
157);
158
159DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
160
161 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
162
163 TP_ARGS(clnt, task, q)
164
165);
166
167DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
168
169 TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
170
171 TP_ARGS(clnt, task, q)
172
173);
174
175#endif /* _TRACE_SUNRPC_H */
176
177#include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2d0dd3e03e41..7b81887b023f 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -46,7 +46,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
46 __field(int, reason) 46 __field(int, reason)
47 ), 47 ),
48 TP_fast_assign( 48 TP_fast_assign(
49 strncpy(__entry->name, dev_name(bdi->dev), 32); 49 struct device *dev = bdi->dev;
50 if (!dev)
51 dev = default_backing_dev_info.dev;
52 strncpy(__entry->name, dev_name(dev), 32);
50 __entry->nr_pages = work->nr_pages; 53 __entry->nr_pages = work->nr_pages;
51 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 54 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
52 __entry->sync_mode = work->sync_mode; 55 __entry->sync_mode = work->sync_mode;
@@ -425,7 +428,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
425 428
426 TP_fast_assign( 429 TP_fast_assign(
427 strncpy(__entry->name, 430 strncpy(__entry->name,
428 dev_name(inode->i_mapping->backing_dev_info->dev), 32); 431 dev_name(inode_to_bdi(inode)->dev), 32);
429 __entry->ino = inode->i_ino; 432 __entry->ino = inode->i_ino;
430 __entry->state = inode->i_state; 433 __entry->state = inode->i_state;
431 __entry->dirtied_when = inode->dirtied_when; 434 __entry->dirtied_when = inode->dirtied_when;