aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/events
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/irq.h57
-rw-r--r--include/trace/events/kmem.h194
-rw-r--r--include/trace/events/lockdep.h60
-rw-r--r--include/trace/events/sched.h339
-rw-r--r--include/trace/events/skb.h40
5 files changed, 690 insertions, 0 deletions
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
new file mode 100644
index 000000000000..75e3468e4493
--- /dev/null
+++ b/include/trace/events/irq.h
@@ -0,0 +1,57 @@
1#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_IRQ_H
3
4#include <linux/tracepoint.h>
5#include <linux/interrupt.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM irq
9
10/*
11 * Tracepoint for entry of interrupt handler:
12 */
13TRACE_FORMAT(irq_handler_entry,
14 TP_PROTO(int irq, struct irqaction *action),
15 TP_ARGS(irq, action),
16 TP_FMT("irq=%d handler=%s", irq, action->name)
17 );
18
19/*
20 * Tracepoint for return of an interrupt handler:
21 */
22TRACE_EVENT(irq_handler_exit,
23
24 TP_PROTO(int irq, struct irqaction *action, int ret),
25
26 TP_ARGS(irq, action, ret),
27
28 TP_STRUCT__entry(
29 __field( int, irq )
30 __field( int, ret )
31 ),
32
33 TP_fast_assign(
34 __entry->irq = irq;
35 __entry->ret = ret;
36 ),
37
38 TP_printk("irq=%d return=%s",
39 __entry->irq, __entry->ret ? "handled" : "unhandled")
40);
41
42TRACE_FORMAT(softirq_entry,
43 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
44 TP_ARGS(h, vec),
45 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
46 );
47
48TRACE_FORMAT(softirq_exit,
49 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
50 TP_ARGS(h, vec),
51 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
52 );
53
54#endif /* _TRACE_IRQ_H */
55
56/* This part must be outside protection */
57#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
new file mode 100644
index 000000000000..c22c42f980b5
--- /dev/null
+++ b/include/trace/events/kmem.h
@@ -0,0 +1,194 @@
1#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KMEM_H
3
4#include <linux/types.h>
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kmem
9
10TRACE_EVENT(kmalloc,
11
12 TP_PROTO(unsigned long call_site,
13 const void *ptr,
14 size_t bytes_req,
15 size_t bytes_alloc,
16 gfp_t gfp_flags),
17
18 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
19
20 TP_STRUCT__entry(
21 __field( unsigned long, call_site )
22 __field( const void *, ptr )
23 __field( size_t, bytes_req )
24 __field( size_t, bytes_alloc )
25 __field( gfp_t, gfp_flags )
26 ),
27
28 TP_fast_assign(
29 __entry->call_site = call_site;
30 __entry->ptr = ptr;
31 __entry->bytes_req = bytes_req;
32 __entry->bytes_alloc = bytes_alloc;
33 __entry->gfp_flags = gfp_flags;
34 ),
35
36 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%08x",
37 __entry->call_site,
38 __entry->ptr,
39 __entry->bytes_req,
40 __entry->bytes_alloc,
41 __entry->gfp_flags)
42);
43
44TRACE_EVENT(kmem_cache_alloc,
45
46 TP_PROTO(unsigned long call_site,
47 const void *ptr,
48 size_t bytes_req,
49 size_t bytes_alloc,
50 gfp_t gfp_flags),
51
52 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
53
54 TP_STRUCT__entry(
55 __field( unsigned long, call_site )
56 __field( const void *, ptr )
57 __field( size_t, bytes_req )
58 __field( size_t, bytes_alloc )
59 __field( gfp_t, gfp_flags )
60 ),
61
62 TP_fast_assign(
63 __entry->call_site = call_site;
64 __entry->ptr = ptr;
65 __entry->bytes_req = bytes_req;
66 __entry->bytes_alloc = bytes_alloc;
67 __entry->gfp_flags = gfp_flags;
68 ),
69
70 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%08x",
71 __entry->call_site,
72 __entry->ptr,
73 __entry->bytes_req,
74 __entry->bytes_alloc,
75 __entry->gfp_flags)
76);
77
78TRACE_EVENT(kmalloc_node,
79
80 TP_PROTO(unsigned long call_site,
81 const void *ptr,
82 size_t bytes_req,
83 size_t bytes_alloc,
84 gfp_t gfp_flags,
85 int node),
86
87 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
88
89 TP_STRUCT__entry(
90 __field( unsigned long, call_site )
91 __field( const void *, ptr )
92 __field( size_t, bytes_req )
93 __field( size_t, bytes_alloc )
94 __field( gfp_t, gfp_flags )
95 __field( int, node )
96 ),
97
98 TP_fast_assign(
99 __entry->call_site = call_site;
100 __entry->ptr = ptr;
101 __entry->bytes_req = bytes_req;
102 __entry->bytes_alloc = bytes_alloc;
103 __entry->gfp_flags = gfp_flags;
104 __entry->node = node;
105 ),
106
107 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%08x node=%d",
108 __entry->call_site,
109 __entry->ptr,
110 __entry->bytes_req,
111 __entry->bytes_alloc,
112 __entry->gfp_flags,
113 __entry->node)
114);
115
116TRACE_EVENT(kmem_cache_alloc_node,
117
118 TP_PROTO(unsigned long call_site,
119 const void *ptr,
120 size_t bytes_req,
121 size_t bytes_alloc,
122 gfp_t gfp_flags,
123 int node),
124
125 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
126
127 TP_STRUCT__entry(
128 __field( unsigned long, call_site )
129 __field( const void *, ptr )
130 __field( size_t, bytes_req )
131 __field( size_t, bytes_alloc )
132 __field( gfp_t, gfp_flags )
133 __field( int, node )
134 ),
135
136 TP_fast_assign(
137 __entry->call_site = call_site;
138 __entry->ptr = ptr;
139 __entry->bytes_req = bytes_req;
140 __entry->bytes_alloc = bytes_alloc;
141 __entry->gfp_flags = gfp_flags;
142 __entry->node = node;
143 ),
144
145 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%08x node=%d",
146 __entry->call_site,
147 __entry->ptr,
148 __entry->bytes_req,
149 __entry->bytes_alloc,
150 __entry->gfp_flags,
151 __entry->node)
152);
153
154TRACE_EVENT(kfree,
155
156 TP_PROTO(unsigned long call_site, const void *ptr),
157
158 TP_ARGS(call_site, ptr),
159
160 TP_STRUCT__entry(
161 __field( unsigned long, call_site )
162 __field( const void *, ptr )
163 ),
164
165 TP_fast_assign(
166 __entry->call_site = call_site;
167 __entry->ptr = ptr;
168 ),
169
170 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
171);
172
173TRACE_EVENT(kmem_cache_free,
174
175 TP_PROTO(unsigned long call_site, const void *ptr),
176
177 TP_ARGS(call_site, ptr),
178
179 TP_STRUCT__entry(
180 __field( unsigned long, call_site )
181 __field( const void *, ptr )
182 ),
183
184 TP_fast_assign(
185 __entry->call_site = call_site;
186 __entry->ptr = ptr;
187 ),
188
189 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
190);
191#endif /* _TRACE_KMEM_H */
192
193/* This part must be outside protection */
194#include <trace/define_trace.h>
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lockdep.h
new file mode 100644
index 000000000000..45e326b5c7f3
--- /dev/null
+++ b/include/trace/events/lockdep.h
@@ -0,0 +1,60 @@
1#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_LOCKDEP_H
3
4#include <linux/lockdep.h>
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM lockdep
9
10#ifdef CONFIG_LOCKDEP
11
12TRACE_FORMAT(lock_acquire,
13 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
14 int trylock, int read, int check,
15 struct lockdep_map *next_lock, unsigned long ip),
16 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
17 TP_FMT("%s%s%s", trylock ? "try " : "",
18 read ? "read " : "", lock->name)
19 );
20
21TRACE_FORMAT(lock_release,
22 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
23 TP_ARGS(lock, nested, ip),
24 TP_FMT("%s", lock->name)
25 );
26
27#ifdef CONFIG_LOCK_STAT
28
29TRACE_FORMAT(lock_contended,
30 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
31 TP_ARGS(lock, ip),
32 TP_FMT("%s", lock->name)
33 );
34
35TRACE_EVENT(lock_acquired,
36 TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime),
37
38 TP_ARGS(lock, ip, waittime),
39
40 TP_STRUCT__entry(
41 __field(const char *, name)
42 __field(unsigned long, wait_usec)
43 __field(unsigned long, wait_nsec_rem)
44 ),
45 TP_fast_assign(
46 __entry->name = lock->name;
47 __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
48 __entry->wait_usec = (unsigned long) waittime;
49 ),
50 TP_printk("%s (%lu.%03lu us)", __entry->name, __entry->wait_usec,
51 __entry->wait_nsec_rem)
52);
53
54#endif
55#endif
56
57#endif /* _TRACE_LOCKDEP_H */
58
59/* This part must be outside protection */
60#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
new file mode 100644
index 000000000000..ffa1cab586b9
--- /dev/null
+++ b/include/trace/events/sched.h
@@ -0,0 +1,339 @@
1#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_SCHED_H
3
4#include <linux/sched.h>
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM sched
9
10/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread:
12 */
13TRACE_EVENT(sched_kthread_stop,
14
15 TP_PROTO(struct task_struct *t),
16
17 TP_ARGS(t),
18
19 TP_STRUCT__entry(
20 __array( char, comm, TASK_COMM_LEN )
21 __field( pid_t, pid )
22 ),
23
24 TP_fast_assign(
25 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
26 __entry->pid = t->pid;
27 ),
28
29 TP_printk("task %s:%d", __entry->comm, __entry->pid)
30);
31
32/*
33 * Tracepoint for the return value of the kthread stopping:
34 */
35TRACE_EVENT(sched_kthread_stop_ret,
36
37 TP_PROTO(int ret),
38
39 TP_ARGS(ret),
40
41 TP_STRUCT__entry(
42 __field( int, ret )
43 ),
44
45 TP_fast_assign(
46 __entry->ret = ret;
47 ),
48
49 TP_printk("ret %d", __entry->ret)
50);
51
52/*
53 * Tracepoint for waiting on task to unschedule:
54 *
55 * (NOTE: the 'rq' argument is not used by generic trace events,
56 * but used by the latency tracer plugin. )
57 */
58TRACE_EVENT(sched_wait_task,
59
60 TP_PROTO(struct rq *rq, struct task_struct *p),
61
62 TP_ARGS(rq, p),
63
64 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN )
66 __field( pid_t, pid )
67 __field( int, prio )
68 ),
69
70 TP_fast_assign(
71 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 __entry->pid = p->pid;
73 __entry->prio = p->prio;
74 ),
75
76 TP_printk("task %s:%d [%d]",
77 __entry->comm, __entry->pid, __entry->prio)
78);
79
80/*
81 * Tracepoint for waking up a task:
82 *
83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. )
85 */
86TRACE_EVENT(sched_wakeup,
87
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
89
90 TP_ARGS(rq, p, success),
91
92 TP_STRUCT__entry(
93 __array( char, comm, TASK_COMM_LEN )
94 __field( pid_t, pid )
95 __field( int, prio )
96 __field( int, success )
97 ),
98
99 TP_fast_assign(
100 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
101 __entry->pid = p->pid;
102 __entry->prio = p->prio;
103 __entry->success = success;
104 ),
105
106 TP_printk("task %s:%d [%d] success=%d",
107 __entry->comm, __entry->pid, __entry->prio,
108 __entry->success)
109);
110
111/*
112 * Tracepoint for waking up a new task:
113 *
114 * (NOTE: the 'rq' argument is not used by generic trace events,
115 * but used by the latency tracer plugin. )
116 */
117TRACE_EVENT(sched_wakeup_new,
118
119 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
120
121 TP_ARGS(rq, p, success),
122
123 TP_STRUCT__entry(
124 __array( char, comm, TASK_COMM_LEN )
125 __field( pid_t, pid )
126 __field( int, prio )
127 __field( int, success )
128 ),
129
130 TP_fast_assign(
131 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
132 __entry->pid = p->pid;
133 __entry->prio = p->prio;
134 __entry->success = success;
135 ),
136
137 TP_printk("task %s:%d [%d] success=%d",
138 __entry->comm, __entry->pid, __entry->prio,
139 __entry->success)
140);
141
142/*
143 * Tracepoint for task switches, performed by the scheduler:
144 *
145 * (NOTE: the 'rq' argument is not used by generic trace events,
146 * but used by the latency tracer plugin. )
147 */
148TRACE_EVENT(sched_switch,
149
150 TP_PROTO(struct rq *rq, struct task_struct *prev,
151 struct task_struct *next),
152
153 TP_ARGS(rq, prev, next),
154
155 TP_STRUCT__entry(
156 __array( char, prev_comm, TASK_COMM_LEN )
157 __field( pid_t, prev_pid )
158 __field( int, prev_prio )
159 __array( char, next_comm, TASK_COMM_LEN )
160 __field( pid_t, next_pid )
161 __field( int, next_prio )
162 ),
163
164 TP_fast_assign(
165 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
166 __entry->prev_pid = prev->pid;
167 __entry->prev_prio = prev->prio;
168 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
169 __entry->next_pid = next->pid;
170 __entry->next_prio = next->prio;
171 ),
172
173 TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
174 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
175 __entry->next_comm, __entry->next_pid, __entry->next_prio)
176);
177
178/*
179 * Tracepoint for a task being migrated:
180 */
181TRACE_EVENT(sched_migrate_task,
182
183 TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
184
185 TP_ARGS(p, orig_cpu, dest_cpu),
186
187 TP_STRUCT__entry(
188 __array( char, comm, TASK_COMM_LEN )
189 __field( pid_t, pid )
190 __field( int, prio )
191 __field( int, orig_cpu )
192 __field( int, dest_cpu )
193 ),
194
195 TP_fast_assign(
196 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
197 __entry->pid = p->pid;
198 __entry->prio = p->prio;
199 __entry->orig_cpu = orig_cpu;
200 __entry->dest_cpu = dest_cpu;
201 ),
202
203 TP_printk("task %s:%d [%d] from: %d to: %d",
204 __entry->comm, __entry->pid, __entry->prio,
205 __entry->orig_cpu, __entry->dest_cpu)
206);
207
208/*
209 * Tracepoint for freeing a task:
210 */
211TRACE_EVENT(sched_process_free,
212
213 TP_PROTO(struct task_struct *p),
214
215 TP_ARGS(p),
216
217 TP_STRUCT__entry(
218 __array( char, comm, TASK_COMM_LEN )
219 __field( pid_t, pid )
220 __field( int, prio )
221 ),
222
223 TP_fast_assign(
224 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
225 __entry->pid = p->pid;
226 __entry->prio = p->prio;
227 ),
228
229 TP_printk("task %s:%d [%d]",
230 __entry->comm, __entry->pid, __entry->prio)
231);
232
233/*
234 * Tracepoint for a task exiting:
235 */
236TRACE_EVENT(sched_process_exit,
237
238 TP_PROTO(struct task_struct *p),
239
240 TP_ARGS(p),
241
242 TP_STRUCT__entry(
243 __array( char, comm, TASK_COMM_LEN )
244 __field( pid_t, pid )
245 __field( int, prio )
246 ),
247
248 TP_fast_assign(
249 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
250 __entry->pid = p->pid;
251 __entry->prio = p->prio;
252 ),
253
254 TP_printk("task %s:%d [%d]",
255 __entry->comm, __entry->pid, __entry->prio)
256);
257
258/*
259 * Tracepoint for a waiting task:
260 */
261TRACE_EVENT(sched_process_wait,
262
263 TP_PROTO(struct pid *pid),
264
265 TP_ARGS(pid),
266
267 TP_STRUCT__entry(
268 __array( char, comm, TASK_COMM_LEN )
269 __field( pid_t, pid )
270 __field( int, prio )
271 ),
272
273 TP_fast_assign(
274 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
275 __entry->pid = pid_nr(pid);
276 __entry->prio = current->prio;
277 ),
278
279 TP_printk("task %s:%d [%d]",
280 __entry->comm, __entry->pid, __entry->prio)
281);
282
283/*
284 * Tracepoint for do_fork:
285 */
286TRACE_EVENT(sched_process_fork,
287
288 TP_PROTO(struct task_struct *parent, struct task_struct *child),
289
290 TP_ARGS(parent, child),
291
292 TP_STRUCT__entry(
293 __array( char, parent_comm, TASK_COMM_LEN )
294 __field( pid_t, parent_pid )
295 __array( char, child_comm, TASK_COMM_LEN )
296 __field( pid_t, child_pid )
297 ),
298
299 TP_fast_assign(
300 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
301 __entry->parent_pid = parent->pid;
302 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
303 __entry->child_pid = child->pid;
304 ),
305
306 TP_printk("parent %s:%d child %s:%d",
307 __entry->parent_comm, __entry->parent_pid,
308 __entry->child_comm, __entry->child_pid)
309);
310
311/*
312 * Tracepoint for sending a signal:
313 */
314TRACE_EVENT(sched_signal_send,
315
316 TP_PROTO(int sig, struct task_struct *p),
317
318 TP_ARGS(sig, p),
319
320 TP_STRUCT__entry(
321 __field( int, sig )
322 __array( char, comm, TASK_COMM_LEN )
323 __field( pid_t, pid )
324 ),
325
326 TP_fast_assign(
327 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
328 __entry->pid = p->pid;
329 __entry->sig = sig;
330 ),
331
332 TP_printk("sig: %d task %s:%d",
333 __entry->sig, __entry->comm, __entry->pid)
334);
335
336#endif /* _TRACE_SCHED_H */
337
338/* This part must be outside protection */
339#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
new file mode 100644
index 000000000000..1e8fabb57c06
--- /dev/null
+++ b/include/trace/events/skb.h
@@ -0,0 +1,40 @@
1#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_SKB_H
3
4#include <linux/skbuff.h>
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM skb
9
10/*
11 * Tracepoint for free an sk_buff:
12 */
13TRACE_EVENT(kfree_skb,
14
15 TP_PROTO(struct sk_buff *skb, void *location),
16
17 TP_ARGS(skb, location),
18
19 TP_STRUCT__entry(
20 __field( void *, skbaddr )
21 __field( unsigned short, protocol )
22 __field( void *, location )
23 ),
24
25 TP_fast_assign(
26 __entry->skbaddr = skb;
27 if (skb) {
28 __entry->protocol = ntohs(skb->protocol);
29 }
30 __entry->location = location;
31 ),
32
33 TP_printk("skbaddr=%p protocol=%u location=%p",
34 __entry->skbaddr, __entry->protocol, __entry->location)
35);
36
37#endif /* _TRACE_SKB_H */
38
39/* This part must be outside protection */
40#include <trace/define_trace.h>