aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/events
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/block.h493
-rw-r--r--include/trace/events/irq.h145
-rw-r--r--include/trace/events/kmem.h231
-rw-r--r--include/trace/events/lockdep.h96
-rw-r--r--include/trace/events/napi.h11
-rw-r--r--include/trace/events/sched.h346
-rw-r--r--include/trace/events/skb.h40
-rw-r--r--include/trace/events/workqueue.h100
8 files changed, 1462 insertions, 0 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
new file mode 100644
index 000000000000..d6b05f42dd44
--- /dev/null
+++ b/include/trace/events/block.h
@@ -0,0 +1,493 @@
1#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_BLOCK_H
3
4#include <linux/blktrace_api.h>
5#include <linux/blkdev.h>
6#include <linux/tracepoint.h>
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM block
10
11TRACE_EVENT(block_rq_abort,
12
13 TP_PROTO(struct request_queue *q, struct request *rq),
14
15 TP_ARGS(q, rq),
16
17 TP_STRUCT__entry(
18 __field( dev_t, dev )
19 __field( sector_t, sector )
20 __field( unsigned int, nr_sector )
21 __field( int, errors )
22 __array( char, rwbs, 6 )
23 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
24 ),
25
26 TP_fast_assign(
27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
28 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
29 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
30 __entry->errors = rq->errors;
31
32 blk_fill_rwbs_rq(__entry->rwbs, rq);
33 blk_dump_cmd(__get_str(cmd), rq);
34 ),
35
36 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
37 MAJOR(__entry->dev), MINOR(__entry->dev),
38 __entry->rwbs, __get_str(cmd),
39 (unsigned long long)__entry->sector,
40 __entry->nr_sector, __entry->errors)
41);
42
43TRACE_EVENT(block_rq_insert,
44
45 TP_PROTO(struct request_queue *q, struct request *rq),
46
47 TP_ARGS(q, rq),
48
49 TP_STRUCT__entry(
50 __field( dev_t, dev )
51 __field( sector_t, sector )
52 __field( unsigned int, nr_sector )
53 __field( unsigned int, bytes )
54 __array( char, rwbs, 6 )
55 __array( char, comm, TASK_COMM_LEN )
56 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
57 ),
58
59 TP_fast_assign(
60 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
61 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
62 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
63 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
64
65 blk_fill_rwbs_rq(__entry->rwbs, rq);
66 blk_dump_cmd(__get_str(cmd), rq);
67 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
68 ),
69
70 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
71 MAJOR(__entry->dev), MINOR(__entry->dev),
72 __entry->rwbs, __entry->bytes, __get_str(cmd),
73 (unsigned long long)__entry->sector,
74 __entry->nr_sector, __entry->comm)
75);
76
77TRACE_EVENT(block_rq_issue,
78
79 TP_PROTO(struct request_queue *q, struct request *rq),
80
81 TP_ARGS(q, rq),
82
83 TP_STRUCT__entry(
84 __field( dev_t, dev )
85 __field( sector_t, sector )
86 __field( unsigned int, nr_sector )
87 __field( unsigned int, bytes )
88 __array( char, rwbs, 6 )
89 __array( char, comm, TASK_COMM_LEN )
90 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
91 ),
92
93 TP_fast_assign(
94 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
95 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
96 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
97 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
98
99 blk_fill_rwbs_rq(__entry->rwbs, rq);
100 blk_dump_cmd(__get_str(cmd), rq);
101 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
102 ),
103
104 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
105 MAJOR(__entry->dev), MINOR(__entry->dev),
106 __entry->rwbs, __entry->bytes, __get_str(cmd),
107 (unsigned long long)__entry->sector,
108 __entry->nr_sector, __entry->comm)
109);
110
111TRACE_EVENT(block_rq_requeue,
112
113 TP_PROTO(struct request_queue *q, struct request *rq),
114
115 TP_ARGS(q, rq),
116
117 TP_STRUCT__entry(
118 __field( dev_t, dev )
119 __field( sector_t, sector )
120 __field( unsigned int, nr_sector )
121 __field( int, errors )
122 __array( char, rwbs, 6 )
123 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
124 ),
125
126 TP_fast_assign(
127 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
128 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
129 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
130 __entry->errors = rq->errors;
131
132 blk_fill_rwbs_rq(__entry->rwbs, rq);
133 blk_dump_cmd(__get_str(cmd), rq);
134 ),
135
136 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
137 MAJOR(__entry->dev), MINOR(__entry->dev),
138 __entry->rwbs, __get_str(cmd),
139 (unsigned long long)__entry->sector,
140 __entry->nr_sector, __entry->errors)
141);
142
143TRACE_EVENT(block_rq_complete,
144
145 TP_PROTO(struct request_queue *q, struct request *rq),
146
147 TP_ARGS(q, rq),
148
149 TP_STRUCT__entry(
150 __field( dev_t, dev )
151 __field( sector_t, sector )
152 __field( unsigned int, nr_sector )
153 __field( int, errors )
154 __array( char, rwbs, 6 )
155 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
156 ),
157
158 TP_fast_assign(
159 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
160 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
161 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
162 __entry->errors = rq->errors;
163
164 blk_fill_rwbs_rq(__entry->rwbs, rq);
165 blk_dump_cmd(__get_str(cmd), rq);
166 ),
167
168 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
169 MAJOR(__entry->dev), MINOR(__entry->dev),
170 __entry->rwbs, __get_str(cmd),
171 (unsigned long long)__entry->sector,
172 __entry->nr_sector, __entry->errors)
173);
174TRACE_EVENT(block_bio_bounce,
175
176 TP_PROTO(struct request_queue *q, struct bio *bio),
177
178 TP_ARGS(q, bio),
179
180 TP_STRUCT__entry(
181 __field( dev_t, dev )
182 __field( sector_t, sector )
183 __field( unsigned int, nr_sector )
184 __array( char, rwbs, 6 )
185 __array( char, comm, TASK_COMM_LEN )
186 ),
187
188 TP_fast_assign(
189 __entry->dev = bio->bi_bdev->bd_dev;
190 __entry->sector = bio->bi_sector;
191 __entry->nr_sector = bio->bi_size >> 9;
192 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
193 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
194 ),
195
196 TP_printk("%d,%d %s %llu + %u [%s]",
197 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
198 (unsigned long long)__entry->sector,
199 __entry->nr_sector, __entry->comm)
200);
201
202TRACE_EVENT(block_bio_complete,
203
204 TP_PROTO(struct request_queue *q, struct bio *bio),
205
206 TP_ARGS(q, bio),
207
208 TP_STRUCT__entry(
209 __field( dev_t, dev )
210 __field( sector_t, sector )
211 __field( unsigned, nr_sector )
212 __field( int, error )
213 __array( char, rwbs, 6 )
214 ),
215
216 TP_fast_assign(
217 __entry->dev = bio->bi_bdev->bd_dev;
218 __entry->sector = bio->bi_sector;
219 __entry->nr_sector = bio->bi_size >> 9;
220 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
221 ),
222
223 TP_printk("%d,%d %s %llu + %u [%d]",
224 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
225 (unsigned long long)__entry->sector,
226 __entry->nr_sector, __entry->error)
227);
228
229TRACE_EVENT(block_bio_backmerge,
230
231 TP_PROTO(struct request_queue *q, struct bio *bio),
232
233 TP_ARGS(q, bio),
234
235 TP_STRUCT__entry(
236 __field( dev_t, dev )
237 __field( sector_t, sector )
238 __field( unsigned int, nr_sector )
239 __array( char, rwbs, 6 )
240 __array( char, comm, TASK_COMM_LEN )
241 ),
242
243 TP_fast_assign(
244 __entry->dev = bio->bi_bdev->bd_dev;
245 __entry->sector = bio->bi_sector;
246 __entry->nr_sector = bio->bi_size >> 9;
247 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
248 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
249 ),
250
251 TP_printk("%d,%d %s %llu + %u [%s]",
252 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
253 (unsigned long long)__entry->sector,
254 __entry->nr_sector, __entry->comm)
255);
256
257TRACE_EVENT(block_bio_frontmerge,
258
259 TP_PROTO(struct request_queue *q, struct bio *bio),
260
261 TP_ARGS(q, bio),
262
263 TP_STRUCT__entry(
264 __field( dev_t, dev )
265 __field( sector_t, sector )
266 __field( unsigned, nr_sector )
267 __array( char, rwbs, 6 )
268 __array( char, comm, TASK_COMM_LEN )
269 ),
270
271 TP_fast_assign(
272 __entry->dev = bio->bi_bdev->bd_dev;
273 __entry->sector = bio->bi_sector;
274 __entry->nr_sector = bio->bi_size >> 9;
275 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
276 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
277 ),
278
279 TP_printk("%d,%d %s %llu + %u [%s]",
280 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
281 (unsigned long long)__entry->sector,
282 __entry->nr_sector, __entry->comm)
283);
284
285TRACE_EVENT(block_bio_queue,
286
287 TP_PROTO(struct request_queue *q, struct bio *bio),
288
289 TP_ARGS(q, bio),
290
291 TP_STRUCT__entry(
292 __field( dev_t, dev )
293 __field( sector_t, sector )
294 __field( unsigned int, nr_sector )
295 __array( char, rwbs, 6 )
296 __array( char, comm, TASK_COMM_LEN )
297 ),
298
299 TP_fast_assign(
300 __entry->dev = bio->bi_bdev->bd_dev;
301 __entry->sector = bio->bi_sector;
302 __entry->nr_sector = bio->bi_size >> 9;
303 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
304 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
305 ),
306
307 TP_printk("%d,%d %s %llu + %u [%s]",
308 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
309 (unsigned long long)__entry->sector,
310 __entry->nr_sector, __entry->comm)
311);
312
313TRACE_EVENT(block_getrq,
314
315 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
316
317 TP_ARGS(q, bio, rw),
318
319 TP_STRUCT__entry(
320 __field( dev_t, dev )
321 __field( sector_t, sector )
322 __field( unsigned int, nr_sector )
323 __array( char, rwbs, 6 )
324 __array( char, comm, TASK_COMM_LEN )
325 ),
326
327 TP_fast_assign(
328 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
329 __entry->sector = bio ? bio->bi_sector : 0;
330 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
331 blk_fill_rwbs(__entry->rwbs,
332 bio ? bio->bi_rw : 0, __entry->nr_sector);
333 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
334 ),
335
336 TP_printk("%d,%d %s %llu + %u [%s]",
337 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
338 (unsigned long long)__entry->sector,
339 __entry->nr_sector, __entry->comm)
340);
341
342TRACE_EVENT(block_sleeprq,
343
344 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
345
346 TP_ARGS(q, bio, rw),
347
348 TP_STRUCT__entry(
349 __field( dev_t, dev )
350 __field( sector_t, sector )
351 __field( unsigned int, nr_sector )
352 __array( char, rwbs, 6 )
353 __array( char, comm, TASK_COMM_LEN )
354 ),
355
356 TP_fast_assign(
357 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
358 __entry->sector = bio ? bio->bi_sector : 0;
359 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
360 blk_fill_rwbs(__entry->rwbs,
361 bio ? bio->bi_rw : 0, __entry->nr_sector);
362 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
363 ),
364
365 TP_printk("%d,%d %s %llu + %u [%s]",
366 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
367 (unsigned long long)__entry->sector,
368 __entry->nr_sector, __entry->comm)
369);
370
371TRACE_EVENT(block_plug,
372
373 TP_PROTO(struct request_queue *q),
374
375 TP_ARGS(q),
376
377 TP_STRUCT__entry(
378 __array( char, comm, TASK_COMM_LEN )
379 ),
380
381 TP_fast_assign(
382 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
383 ),
384
385 TP_printk("[%s]", __entry->comm)
386);
387
388TRACE_EVENT(block_unplug_timer,
389
390 TP_PROTO(struct request_queue *q),
391
392 TP_ARGS(q),
393
394 TP_STRUCT__entry(
395 __field( int, nr_rq )
396 __array( char, comm, TASK_COMM_LEN )
397 ),
398
399 TP_fast_assign(
400 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
401 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
402 ),
403
404 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
405);
406
407TRACE_EVENT(block_unplug_io,
408
409 TP_PROTO(struct request_queue *q),
410
411 TP_ARGS(q),
412
413 TP_STRUCT__entry(
414 __field( int, nr_rq )
415 __array( char, comm, TASK_COMM_LEN )
416 ),
417
418 TP_fast_assign(
419 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
420 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
421 ),
422
423 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
424);
425
426TRACE_EVENT(block_split,
427
428 TP_PROTO(struct request_queue *q, struct bio *bio,
429 unsigned int new_sector),
430
431 TP_ARGS(q, bio, new_sector),
432
433 TP_STRUCT__entry(
434 __field( dev_t, dev )
435 __field( sector_t, sector )
436 __field( sector_t, new_sector )
437 __array( char, rwbs, 6 )
438 __array( char, comm, TASK_COMM_LEN )
439 ),
440
441 TP_fast_assign(
442 __entry->dev = bio->bi_bdev->bd_dev;
443 __entry->sector = bio->bi_sector;
444 __entry->new_sector = new_sector;
445 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
446 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
447 ),
448
449 TP_printk("%d,%d %s %llu / %llu [%s]",
450 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
451 (unsigned long long)__entry->sector,
452 (unsigned long long)__entry->new_sector,
453 __entry->comm)
454);
455
456TRACE_EVENT(block_remap,
457
458 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
459 sector_t from),
460
461 TP_ARGS(q, bio, dev, from),
462
463 TP_STRUCT__entry(
464 __field( dev_t, dev )
465 __field( sector_t, sector )
466 __field( unsigned int, nr_sector )
467 __field( dev_t, old_dev )
468 __field( sector_t, old_sector )
469 __array( char, rwbs, 6 )
470 ),
471
472 TP_fast_assign(
473 __entry->dev = bio->bi_bdev->bd_dev;
474 __entry->sector = bio->bi_sector;
475 __entry->nr_sector = bio->bi_size >> 9;
476 __entry->old_dev = dev;
477 __entry->old_sector = from;
478 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
479 ),
480
481 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
482 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
483 (unsigned long long)__entry->sector,
484 __entry->nr_sector,
485 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
486 (unsigned long long)__entry->old_sector)
487);
488
489#endif /* _TRACE_BLOCK_H */
490
491/* This part must be outside protection */
492#include <trace/define_trace.h>
493
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
new file mode 100644
index 000000000000..b0c7ede55eb1
--- /dev/null
+++ b/include/trace/events/irq.h
@@ -0,0 +1,145 @@
1#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_IRQ_H
3
4#include <linux/tracepoint.h>
5#include <linux/interrupt.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM irq
9
10#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
11#define show_softirq_name(val) \
12 __print_symbolic(val, \
13 softirq_name(HI), \
14 softirq_name(TIMER), \
15 softirq_name(NET_TX), \
16 softirq_name(NET_RX), \
17 softirq_name(BLOCK), \
18 softirq_name(TASKLET), \
19 softirq_name(SCHED), \
20 softirq_name(HRTIMER), \
21 softirq_name(RCU))
22
23/**
24 * irq_handler_entry - called immediately before the irq action handler
25 * @irq: irq number
26 * @action: pointer to struct irqaction
27 *
28 * The struct irqaction pointed to by @action contains various
29 * information about the handler, including the device name,
30 * @action->name, and the device id, @action->dev_id. When used in
31 * conjunction with the irq_handler_exit tracepoint, we can figure
32 * out irq handler latencies.
33 */
34TRACE_EVENT(irq_handler_entry,
35
36 TP_PROTO(int irq, struct irqaction *action),
37
38 TP_ARGS(irq, action),
39
40 TP_STRUCT__entry(
41 __field( int, irq )
42 __string( name, action->name )
43 ),
44
45 TP_fast_assign(
46 __entry->irq = irq;
47 __assign_str(name, action->name);
48 ),
49
50 TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name))
51);
52
53/**
54 * irq_handler_exit - called immediately after the irq action handler returns
55 * @irq: irq number
56 * @action: pointer to struct irqaction
57 * @ret: return value
58 *
59 * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
60 * @action->handler scuccessully handled this irq. Otherwise, the irq might be
61 * a shared irq line, or the irq was not handled successfully. Can be used in
62 * conjunction with the irq_handler_entry to understand irq handler latencies.
63 */
64TRACE_EVENT(irq_handler_exit,
65
66 TP_PROTO(int irq, struct irqaction *action, int ret),
67
68 TP_ARGS(irq, action, ret),
69
70 TP_STRUCT__entry(
71 __field( int, irq )
72 __field( int, ret )
73 ),
74
75 TP_fast_assign(
76 __entry->irq = irq;
77 __entry->ret = ret;
78 ),
79
80 TP_printk("irq=%d return=%s",
81 __entry->irq, __entry->ret ? "handled" : "unhandled")
82);
83
84/**
85 * softirq_entry - called immediately before the softirq handler
86 * @h: pointer to struct softirq_action
87 * @vec: pointer to first struct softirq_action in softirq_vec array
88 *
89 * The @h parameter, contains a pointer to the struct softirq_action
90 * which has a pointer to the action handler that is called. By subtracting
91 * the @vec pointer from the @h pointer, we can determine the softirq
92 * number. Also, when used in combination with the softirq_exit tracepoint
93 * we can determine the softirq latency.
94 */
95TRACE_EVENT(softirq_entry,
96
97 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
98
99 TP_ARGS(h, vec),
100
101 TP_STRUCT__entry(
102 __field( int, vec )
103 ),
104
105 TP_fast_assign(
106 __entry->vec = (int)(h - vec);
107 ),
108
109 TP_printk("softirq=%d action=%s", __entry->vec,
110 show_softirq_name(__entry->vec))
111);
112
113/**
114 * softirq_exit - called immediately after the softirq handler returns
115 * @h: pointer to struct softirq_action
116 * @vec: pointer to first struct softirq_action in softirq_vec array
117 *
118 * The @h parameter contains a pointer to the struct softirq_action
119 * that has handled the softirq. By subtracting the @vec pointer from
120 * the @h pointer, we can determine the softirq number. Also, when used in
121 * combination with the softirq_entry tracepoint we can determine the softirq
122 * latency.
123 */
124TRACE_EVENT(softirq_exit,
125
126 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
127
128 TP_ARGS(h, vec),
129
130 TP_STRUCT__entry(
131 __field( int, vec )
132 ),
133
134 TP_fast_assign(
135 __entry->vec = (int)(h - vec);
136 ),
137
138 TP_printk("softirq=%d action=%s", __entry->vec,
139 show_softirq_name(__entry->vec))
140);
141
142#endif /* _TRACE_IRQ_H */
143
144/* This part must be outside protection */
145#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
new file mode 100644
index 000000000000..9baba50d6512
--- /dev/null
+++ b/include/trace/events/kmem.h
@@ -0,0 +1,231 @@
1#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KMEM_H
3
4#include <linux/types.h>
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kmem
9
10/*
11 * The order of these masks is important. Matching masks will be seen
12 * first and the left over flags will end up showing by themselves.
13 *
14 * For example, if we have GFP_KERNEL before GFP_USER we wil get:
15 *
16 * GFP_KERNEL|GFP_HARDWALL
17 *
18 * Thus most bits set go first.
19 */
20#define show_gfp_flags(flags) \
21 (flags) ? __print_flags(flags, "|", \
22 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
23 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
24 {(unsigned long)GFP_USER, "GFP_USER"}, \
25 {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
26 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
27 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
28 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
29 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
30 {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
31 {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
32 {(unsigned long)__GFP_IO, "GFP_IO"}, \
33 {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
34 {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
35 {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
36 {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
37 {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
38 {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
39 {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
40 {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
41 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
42 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
43 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
44 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
45 ) : "GFP_NOWAIT"
46
47TRACE_EVENT(kmalloc,
48
49 TP_PROTO(unsigned long call_site,
50 const void *ptr,
51 size_t bytes_req,
52 size_t bytes_alloc,
53 gfp_t gfp_flags),
54
55 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
56
57 TP_STRUCT__entry(
58 __field( unsigned long, call_site )
59 __field( const void *, ptr )
60 __field( size_t, bytes_req )
61 __field( size_t, bytes_alloc )
62 __field( gfp_t, gfp_flags )
63 ),
64
65 TP_fast_assign(
66 __entry->call_site = call_site;
67 __entry->ptr = ptr;
68 __entry->bytes_req = bytes_req;
69 __entry->bytes_alloc = bytes_alloc;
70 __entry->gfp_flags = gfp_flags;
71 ),
72
73 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
74 __entry->call_site,
75 __entry->ptr,
76 __entry->bytes_req,
77 __entry->bytes_alloc,
78 show_gfp_flags(__entry->gfp_flags))
79);
80
81TRACE_EVENT(kmem_cache_alloc,
82
83 TP_PROTO(unsigned long call_site,
84 const void *ptr,
85 size_t bytes_req,
86 size_t bytes_alloc,
87 gfp_t gfp_flags),
88
89 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
90
91 TP_STRUCT__entry(
92 __field( unsigned long, call_site )
93 __field( const void *, ptr )
94 __field( size_t, bytes_req )
95 __field( size_t, bytes_alloc )
96 __field( gfp_t, gfp_flags )
97 ),
98
99 TP_fast_assign(
100 __entry->call_site = call_site;
101 __entry->ptr = ptr;
102 __entry->bytes_req = bytes_req;
103 __entry->bytes_alloc = bytes_alloc;
104 __entry->gfp_flags = gfp_flags;
105 ),
106
107 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
108 __entry->call_site,
109 __entry->ptr,
110 __entry->bytes_req,
111 __entry->bytes_alloc,
112 show_gfp_flags(__entry->gfp_flags))
113);
114
115TRACE_EVENT(kmalloc_node,
116
117 TP_PROTO(unsigned long call_site,
118 const void *ptr,
119 size_t bytes_req,
120 size_t bytes_alloc,
121 gfp_t gfp_flags,
122 int node),
123
124 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
125
126 TP_STRUCT__entry(
127 __field( unsigned long, call_site )
128 __field( const void *, ptr )
129 __field( size_t, bytes_req )
130 __field( size_t, bytes_alloc )
131 __field( gfp_t, gfp_flags )
132 __field( int, node )
133 ),
134
135 TP_fast_assign(
136 __entry->call_site = call_site;
137 __entry->ptr = ptr;
138 __entry->bytes_req = bytes_req;
139 __entry->bytes_alloc = bytes_alloc;
140 __entry->gfp_flags = gfp_flags;
141 __entry->node = node;
142 ),
143
144 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
145 __entry->call_site,
146 __entry->ptr,
147 __entry->bytes_req,
148 __entry->bytes_alloc,
149 show_gfp_flags(__entry->gfp_flags),
150 __entry->node)
151);
152
153TRACE_EVENT(kmem_cache_alloc_node,
154
155 TP_PROTO(unsigned long call_site,
156 const void *ptr,
157 size_t bytes_req,
158 size_t bytes_alloc,
159 gfp_t gfp_flags,
160 int node),
161
162 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
163
164 TP_STRUCT__entry(
165 __field( unsigned long, call_site )
166 __field( const void *, ptr )
167 __field( size_t, bytes_req )
168 __field( size_t, bytes_alloc )
169 __field( gfp_t, gfp_flags )
170 __field( int, node )
171 ),
172
173 TP_fast_assign(
174 __entry->call_site = call_site;
175 __entry->ptr = ptr;
176 __entry->bytes_req = bytes_req;
177 __entry->bytes_alloc = bytes_alloc;
178 __entry->gfp_flags = gfp_flags;
179 __entry->node = node;
180 ),
181
182 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
183 __entry->call_site,
184 __entry->ptr,
185 __entry->bytes_req,
186 __entry->bytes_alloc,
187 show_gfp_flags(__entry->gfp_flags),
188 __entry->node)
189);
190
191TRACE_EVENT(kfree,
192
193 TP_PROTO(unsigned long call_site, const void *ptr),
194
195 TP_ARGS(call_site, ptr),
196
197 TP_STRUCT__entry(
198 __field( unsigned long, call_site )
199 __field( const void *, ptr )
200 ),
201
202 TP_fast_assign(
203 __entry->call_site = call_site;
204 __entry->ptr = ptr;
205 ),
206
207 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
208);
209
210TRACE_EVENT(kmem_cache_free,
211
212 TP_PROTO(unsigned long call_site, const void *ptr),
213
214 TP_ARGS(call_site, ptr),
215
216 TP_STRUCT__entry(
217 __field( unsigned long, call_site )
218 __field( const void *, ptr )
219 ),
220
221 TP_fast_assign(
222 __entry->call_site = call_site;
223 __entry->ptr = ptr;
224 ),
225
226 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
227);
228#endif /* _TRACE_KMEM_H */
229
230/* This part must be outside protection */
231#include <trace/define_trace.h>
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lockdep.h
new file mode 100644
index 000000000000..0e956c9dfd7e
--- /dev/null
+++ b/include/trace/events/lockdep.h
@@ -0,0 +1,96 @@
1#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_LOCKDEP_H
3
4#include <linux/lockdep.h>
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM lockdep
9
10#ifdef CONFIG_LOCKDEP
11
12TRACE_EVENT(lock_acquire,
13
14 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
15 int trylock, int read, int check,
16 struct lockdep_map *next_lock, unsigned long ip),
17
18 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
19
20 TP_STRUCT__entry(
21 __field(unsigned int, flags)
22 __string(name, lock->name)
23 ),
24
25 TP_fast_assign(
26 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
27 __assign_str(name, lock->name);
28 ),
29
30 TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "",
31 (__entry->flags & 2) ? "read " : "",
32 __get_str(name))
33);
34
35TRACE_EVENT(lock_release,
36
37 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
38
39 TP_ARGS(lock, nested, ip),
40
41 TP_STRUCT__entry(
42 __string(name, lock->name)
43 ),
44
45 TP_fast_assign(
46 __assign_str(name, lock->name);
47 ),
48
49 TP_printk("%s", __get_str(name))
50);
51
52#ifdef CONFIG_LOCK_STAT
53
54TRACE_EVENT(lock_contended,
55
56 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
57
58 TP_ARGS(lock, ip),
59
60 TP_STRUCT__entry(
61 __string(name, lock->name)
62 ),
63
64 TP_fast_assign(
65 __assign_str(name, lock->name);
66 ),
67
68 TP_printk("%s", __get_str(name))
69);
70
71TRACE_EVENT(lock_acquired,
72 TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime),
73
74 TP_ARGS(lock, ip, waittime),
75
76 TP_STRUCT__entry(
77 __string(name, lock->name)
78 __field(unsigned long, wait_usec)
79 __field(unsigned long, wait_nsec_rem)
80 ),
81 TP_fast_assign(
82 __assign_str(name, lock->name);
83 __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
84 __entry->wait_usec = (unsigned long) waittime;
85 ),
86 TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec,
87 __entry->wait_nsec_rem)
88);
89
90#endif
91#endif
92
93#endif /* _TRACE_LOCKDEP_H */
94
95/* This part must be outside protection */
96#include <trace/define_trace.h>
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
new file mode 100644
index 000000000000..a8989c4547e7
--- /dev/null
+++ b/include/trace/events/napi.h
@@ -0,0 +1,11 @@
1#ifndef _TRACE_NAPI_H_
2#define _TRACE_NAPI_H_
3
4#include <linux/netdevice.h>
5#include <linux/tracepoint.h>
6
7DECLARE_TRACE(napi_poll,
8 TP_PROTO(struct napi_struct *napi),
9 TP_ARGS(napi));
10
11#endif
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
new file mode 100644
index 000000000000..24ab5bcff7b2
--- /dev/null
+++ b/include/trace/events/sched.h
@@ -0,0 +1,346 @@
1#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_SCHED_H
3
4#include <linux/sched.h>
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM sched
9
10/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread:
12 */
13TRACE_EVENT(sched_kthread_stop,
14
15 TP_PROTO(struct task_struct *t),
16
17 TP_ARGS(t),
18
19 TP_STRUCT__entry(
20 __array( char, comm, TASK_COMM_LEN )
21 __field( pid_t, pid )
22 ),
23
24 TP_fast_assign(
25 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
26 __entry->pid = t->pid;
27 ),
28
29 TP_printk("task %s:%d", __entry->comm, __entry->pid)
30);
31
32/*
33 * Tracepoint for the return value of the kthread stopping:
34 */
35TRACE_EVENT(sched_kthread_stop_ret,
36
37 TP_PROTO(int ret),
38
39 TP_ARGS(ret),
40
41 TP_STRUCT__entry(
42 __field( int, ret )
43 ),
44
45 TP_fast_assign(
46 __entry->ret = ret;
47 ),
48
49 TP_printk("ret %d", __entry->ret)
50);
51
52/*
53 * Tracepoint for waiting on task to unschedule:
54 *
55 * (NOTE: the 'rq' argument is not used by generic trace events,
56 * but used by the latency tracer plugin. )
57 */
58TRACE_EVENT(sched_wait_task,
59
60 TP_PROTO(struct rq *rq, struct task_struct *p),
61
62 TP_ARGS(rq, p),
63
64 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN )
66 __field( pid_t, pid )
67 __field( int, prio )
68 ),
69
70 TP_fast_assign(
71 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 __entry->pid = p->pid;
73 __entry->prio = p->prio;
74 ),
75
76 TP_printk("task %s:%d [%d]",
77 __entry->comm, __entry->pid, __entry->prio)
78);
79
80/*
81 * Tracepoint for waking up a task:
82 *
83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. )
85 */
86TRACE_EVENT(sched_wakeup,
87
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
89
90 TP_ARGS(rq, p, success),
91
92 TP_STRUCT__entry(
93 __array( char, comm, TASK_COMM_LEN )
94 __field( pid_t, pid )
95 __field( int, prio )
96 __field( int, success )
97 ),
98
99 TP_fast_assign(
100 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
101 __entry->pid = p->pid;
102 __entry->prio = p->prio;
103 __entry->success = success;
104 ),
105
106 TP_printk("task %s:%d [%d] success=%d",
107 __entry->comm, __entry->pid, __entry->prio,
108 __entry->success)
109);
110
111/*
112 * Tracepoint for waking up a new task:
113 *
114 * (NOTE: the 'rq' argument is not used by generic trace events,
115 * but used by the latency tracer plugin. )
116 */
117TRACE_EVENT(sched_wakeup_new,
118
119 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
120
121 TP_ARGS(rq, p, success),
122
123 TP_STRUCT__entry(
124 __array( char, comm, TASK_COMM_LEN )
125 __field( pid_t, pid )
126 __field( int, prio )
127 __field( int, success )
128 ),
129
130 TP_fast_assign(
131 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
132 __entry->pid = p->pid;
133 __entry->prio = p->prio;
134 __entry->success = success;
135 ),
136
137 TP_printk("task %s:%d [%d] success=%d",
138 __entry->comm, __entry->pid, __entry->prio,
139 __entry->success)
140);
141
142/*
143 * Tracepoint for task switches, performed by the scheduler:
144 *
145 * (NOTE: the 'rq' argument is not used by generic trace events,
146 * but used by the latency tracer plugin. )
147 */
148TRACE_EVENT(sched_switch,
149
150 TP_PROTO(struct rq *rq, struct task_struct *prev,
151 struct task_struct *next),
152
153 TP_ARGS(rq, prev, next),
154
155 TP_STRUCT__entry(
156 __array( char, prev_comm, TASK_COMM_LEN )
157 __field( pid_t, prev_pid )
158 __field( int, prev_prio )
159 __field( long, prev_state )
160 __array( char, next_comm, TASK_COMM_LEN )
161 __field( pid_t, next_pid )
162 __field( int, next_prio )
163 ),
164
165 TP_fast_assign(
166 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
167 __entry->prev_pid = prev->pid;
168 __entry->prev_prio = prev->prio;
169 __entry->prev_state = prev->state;
170 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
171 __entry->next_pid = next->pid;
172 __entry->next_prio = next->prio;
173 ),
174
175 TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]",
176 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
177 __entry->prev_state ?
178 __print_flags(__entry->prev_state, "|",
179 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
180 { 16, "Z" }, { 32, "X" }, { 64, "x" },
181 { 128, "W" }) : "R",
182 __entry->next_comm, __entry->next_pid, __entry->next_prio)
183);
184
185/*
186 * Tracepoint for a task being migrated:
187 */
188TRACE_EVENT(sched_migrate_task,
189
190 TP_PROTO(struct task_struct *p, int dest_cpu),
191
192 TP_ARGS(p, dest_cpu),
193
194 TP_STRUCT__entry(
195 __array( char, comm, TASK_COMM_LEN )
196 __field( pid_t, pid )
197 __field( int, prio )
198 __field( int, orig_cpu )
199 __field( int, dest_cpu )
200 ),
201
202 TP_fast_assign(
203 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
204 __entry->pid = p->pid;
205 __entry->prio = p->prio;
206 __entry->orig_cpu = task_cpu(p);
207 __entry->dest_cpu = dest_cpu;
208 ),
209
210 TP_printk("task %s:%d [%d] from: %d to: %d",
211 __entry->comm, __entry->pid, __entry->prio,
212 __entry->orig_cpu, __entry->dest_cpu)
213);
214
215/*
216 * Tracepoint for freeing a task:
217 */
218TRACE_EVENT(sched_process_free,
219
220 TP_PROTO(struct task_struct *p),
221
222 TP_ARGS(p),
223
224 TP_STRUCT__entry(
225 __array( char, comm, TASK_COMM_LEN )
226 __field( pid_t, pid )
227 __field( int, prio )
228 ),
229
230 TP_fast_assign(
231 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
232 __entry->pid = p->pid;
233 __entry->prio = p->prio;
234 ),
235
236 TP_printk("task %s:%d [%d]",
237 __entry->comm, __entry->pid, __entry->prio)
238);
239
240/*
241 * Tracepoint for a task exiting:
242 */
243TRACE_EVENT(sched_process_exit,
244
245 TP_PROTO(struct task_struct *p),
246
247 TP_ARGS(p),
248
249 TP_STRUCT__entry(
250 __array( char, comm, TASK_COMM_LEN )
251 __field( pid_t, pid )
252 __field( int, prio )
253 ),
254
255 TP_fast_assign(
256 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
257 __entry->pid = p->pid;
258 __entry->prio = p->prio;
259 ),
260
261 TP_printk("task %s:%d [%d]",
262 __entry->comm, __entry->pid, __entry->prio)
263);
264
265/*
266 * Tracepoint for a waiting task:
267 */
268TRACE_EVENT(sched_process_wait,
269
270 TP_PROTO(struct pid *pid),
271
272 TP_ARGS(pid),
273
274 TP_STRUCT__entry(
275 __array( char, comm, TASK_COMM_LEN )
276 __field( pid_t, pid )
277 __field( int, prio )
278 ),
279
280 TP_fast_assign(
281 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
282 __entry->pid = pid_nr(pid);
283 __entry->prio = current->prio;
284 ),
285
286 TP_printk("task %s:%d [%d]",
287 __entry->comm, __entry->pid, __entry->prio)
288);
289
290/*
291 * Tracepoint for do_fork:
292 */
293TRACE_EVENT(sched_process_fork,
294
295 TP_PROTO(struct task_struct *parent, struct task_struct *child),
296
297 TP_ARGS(parent, child),
298
299 TP_STRUCT__entry(
300 __array( char, parent_comm, TASK_COMM_LEN )
301 __field( pid_t, parent_pid )
302 __array( char, child_comm, TASK_COMM_LEN )
303 __field( pid_t, child_pid )
304 ),
305
306 TP_fast_assign(
307 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
308 __entry->parent_pid = parent->pid;
309 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
310 __entry->child_pid = child->pid;
311 ),
312
313 TP_printk("parent %s:%d child %s:%d",
314 __entry->parent_comm, __entry->parent_pid,
315 __entry->child_comm, __entry->child_pid)
316);
317
318/*
319 * Tracepoint for sending a signal:
320 */
321TRACE_EVENT(sched_signal_send,
322
323 TP_PROTO(int sig, struct task_struct *p),
324
325 TP_ARGS(sig, p),
326
327 TP_STRUCT__entry(
328 __field( int, sig )
329 __array( char, comm, TASK_COMM_LEN )
330 __field( pid_t, pid )
331 ),
332
333 TP_fast_assign(
334 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
335 __entry->pid = p->pid;
336 __entry->sig = sig;
337 ),
338
339 TP_printk("sig: %d task %s:%d",
340 __entry->sig, __entry->comm, __entry->pid)
341);
342
343#endif /* _TRACE_SCHED_H */
344
345/* This part must be outside protection */
346#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
new file mode 100644
index 000000000000..1e8fabb57c06
--- /dev/null
+++ b/include/trace/events/skb.h
@@ -0,0 +1,40 @@
1#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_SKB_H
3
4#include <linux/skbuff.h>
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM skb
9
10/*
11 * Tracepoint for free an sk_buff:
12 */
13TRACE_EVENT(kfree_skb,
14
15 TP_PROTO(struct sk_buff *skb, void *location),
16
17 TP_ARGS(skb, location),
18
19 TP_STRUCT__entry(
20 __field( void *, skbaddr )
21 __field( unsigned short, protocol )
22 __field( void *, location )
23 ),
24
25 TP_fast_assign(
26 __entry->skbaddr = skb;
27 if (skb) {
28 __entry->protocol = ntohs(skb->protocol);
29 }
30 __entry->location = location;
31 ),
32
33 TP_printk("skbaddr=%p protocol=%u location=%p",
34 __entry->skbaddr, __entry->protocol, __entry->location)
35);
36
37#endif /* _TRACE_SKB_H */
38
39/* This part must be outside protection */
40#include <trace/define_trace.h>
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
new file mode 100644
index 000000000000..035f1bff288e
--- /dev/null
+++ b/include/trace/events/workqueue.h
@@ -0,0 +1,100 @@
1#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_WORKQUEUE_H
3
4#include <linux/workqueue.h>
5#include <linux/sched.h>
6#include <linux/tracepoint.h>
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM workqueue
10
11TRACE_EVENT(workqueue_insertion,
12
13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
14
15 TP_ARGS(wq_thread, work),
16
17 TP_STRUCT__entry(
18 __array(char, thread_comm, TASK_COMM_LEN)
19 __field(pid_t, thread_pid)
20 __field(work_func_t, func)
21 ),
22
23 TP_fast_assign(
24 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
25 __entry->thread_pid = wq_thread->pid;
26 __entry->func = work->func;
27 ),
28
29 TP_printk("thread=%s:%d func=%pF", __entry->thread_comm,
30 __entry->thread_pid, __entry->func)
31);
32
33TRACE_EVENT(workqueue_execution,
34
35 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
36
37 TP_ARGS(wq_thread, work),
38
39 TP_STRUCT__entry(
40 __array(char, thread_comm, TASK_COMM_LEN)
41 __field(pid_t, thread_pid)
42 __field(work_func_t, func)
43 ),
44
45 TP_fast_assign(
46 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
47 __entry->thread_pid = wq_thread->pid;
48 __entry->func = work->func;
49 ),
50
51 TP_printk("thread=%s:%d func=%pF", __entry->thread_comm,
52 __entry->thread_pid, __entry->func)
53);
54
55/* Trace the creation of one workqueue thread on a cpu */
56TRACE_EVENT(workqueue_creation,
57
58 TP_PROTO(struct task_struct *wq_thread, int cpu),
59
60 TP_ARGS(wq_thread, cpu),
61
62 TP_STRUCT__entry(
63 __array(char, thread_comm, TASK_COMM_LEN)
64 __field(pid_t, thread_pid)
65 __field(int, cpu)
66 ),
67
68 TP_fast_assign(
69 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
70 __entry->thread_pid = wq_thread->pid;
71 __entry->cpu = cpu;
72 ),
73
74 TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm,
75 __entry->thread_pid, __entry->cpu)
76);
77
78TRACE_EVENT(workqueue_destruction,
79
80 TP_PROTO(struct task_struct *wq_thread),
81
82 TP_ARGS(wq_thread),
83
84 TP_STRUCT__entry(
85 __array(char, thread_comm, TASK_COMM_LEN)
86 __field(pid_t, thread_pid)
87 ),
88
89 TP_fast_assign(
90 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
91 __entry->thread_pid = wq_thread->pid;
92 ),
93
94 TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid)
95);
96
97#endif /* _TRACE_WORKQUEUE_H */
98
99/* This part must be outside protection */
100#include <trace/define_trace.h>