aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/events
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2009-07-23 00:47:29 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2009-07-23 00:47:29 -0400
commitbd072111e7319d90a7b8127f91c2806b9a6f279e (patch)
tree1686978814a2387ebfc16f9f5778a7f0caaf319b /include/trace/events
parent24d01c0681bfbc10a99304c48a89ad213d2d7a4b (diff)
parent4be3bd7849165e7efa6b0b35a23d6a3598d97465 (diff)
Merge commit 'v2.6.31-rc4' into next
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/block.h493
-rw-r--r--include/trace/events/ext4.h677
-rw-r--r--include/trace/events/irq.h145
-rw-r--r--include/trace/events/jbd2.h168
-rw-r--r--include/trace/events/kmem.h231
-rw-r--r--include/trace/events/lockdep.h96
-rw-r--r--include/trace/events/napi.h11
-rw-r--r--include/trace/events/sched.h346
-rw-r--r--include/trace/events/skb.h40
-rw-r--r--include/trace/events/workqueue.h100
10 files changed, 2307 insertions, 0 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
new file mode 100644
index 000000000000..9a74b468a229
--- /dev/null
+++ b/include/trace/events/block.h
@@ -0,0 +1,493 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM block
3
4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BLOCK_H
6
7#include <linux/blktrace_api.h>
8#include <linux/blkdev.h>
9#include <linux/tracepoint.h>
10
11TRACE_EVENT(block_rq_abort,
12
13 TP_PROTO(struct request_queue *q, struct request *rq),
14
15 TP_ARGS(q, rq),
16
17 TP_STRUCT__entry(
18 __field( dev_t, dev )
19 __field( sector_t, sector )
20 __field( unsigned int, nr_sector )
21 __field( int, errors )
22 __array( char, rwbs, 6 )
23 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
24 ),
25
26 TP_fast_assign(
27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
28 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
29 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
30 __entry->errors = rq->errors;
31
32 blk_fill_rwbs_rq(__entry->rwbs, rq);
33 blk_dump_cmd(__get_str(cmd), rq);
34 ),
35
36 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
37 MAJOR(__entry->dev), MINOR(__entry->dev),
38 __entry->rwbs, __get_str(cmd),
39 (unsigned long long)__entry->sector,
40 __entry->nr_sector, __entry->errors)
41);
42
43TRACE_EVENT(block_rq_insert,
44
45 TP_PROTO(struct request_queue *q, struct request *rq),
46
47 TP_ARGS(q, rq),
48
49 TP_STRUCT__entry(
50 __field( dev_t, dev )
51 __field( sector_t, sector )
52 __field( unsigned int, nr_sector )
53 __field( unsigned int, bytes )
54 __array( char, rwbs, 6 )
55 __array( char, comm, TASK_COMM_LEN )
56 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
57 ),
58
59 TP_fast_assign(
60 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
61 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
62 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
63 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
64
65 blk_fill_rwbs_rq(__entry->rwbs, rq);
66 blk_dump_cmd(__get_str(cmd), rq);
67 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
68 ),
69
70 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
71 MAJOR(__entry->dev), MINOR(__entry->dev),
72 __entry->rwbs, __entry->bytes, __get_str(cmd),
73 (unsigned long long)__entry->sector,
74 __entry->nr_sector, __entry->comm)
75);
76
77TRACE_EVENT(block_rq_issue,
78
79 TP_PROTO(struct request_queue *q, struct request *rq),
80
81 TP_ARGS(q, rq),
82
83 TP_STRUCT__entry(
84 __field( dev_t, dev )
85 __field( sector_t, sector )
86 __field( unsigned int, nr_sector )
87 __field( unsigned int, bytes )
88 __array( char, rwbs, 6 )
89 __array( char, comm, TASK_COMM_LEN )
90 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
91 ),
92
93 TP_fast_assign(
94 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
95 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
96 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
97 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
98
99 blk_fill_rwbs_rq(__entry->rwbs, rq);
100 blk_dump_cmd(__get_str(cmd), rq);
101 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
102 ),
103
104 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
105 MAJOR(__entry->dev), MINOR(__entry->dev),
106 __entry->rwbs, __entry->bytes, __get_str(cmd),
107 (unsigned long long)__entry->sector,
108 __entry->nr_sector, __entry->comm)
109);
110
111TRACE_EVENT(block_rq_requeue,
112
113 TP_PROTO(struct request_queue *q, struct request *rq),
114
115 TP_ARGS(q, rq),
116
117 TP_STRUCT__entry(
118 __field( dev_t, dev )
119 __field( sector_t, sector )
120 __field( unsigned int, nr_sector )
121 __field( int, errors )
122 __array( char, rwbs, 6 )
123 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
124 ),
125
126 TP_fast_assign(
127 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
128 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
129 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
130 __entry->errors = rq->errors;
131
132 blk_fill_rwbs_rq(__entry->rwbs, rq);
133 blk_dump_cmd(__get_str(cmd), rq);
134 ),
135
136 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
137 MAJOR(__entry->dev), MINOR(__entry->dev),
138 __entry->rwbs, __get_str(cmd),
139 (unsigned long long)__entry->sector,
140 __entry->nr_sector, __entry->errors)
141);
142
143TRACE_EVENT(block_rq_complete,
144
145 TP_PROTO(struct request_queue *q, struct request *rq),
146
147 TP_ARGS(q, rq),
148
149 TP_STRUCT__entry(
150 __field( dev_t, dev )
151 __field( sector_t, sector )
152 __field( unsigned int, nr_sector )
153 __field( int, errors )
154 __array( char, rwbs, 6 )
155 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
156 ),
157
158 TP_fast_assign(
159 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
160 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
161 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
162 __entry->errors = rq->errors;
163
164 blk_fill_rwbs_rq(__entry->rwbs, rq);
165 blk_dump_cmd(__get_str(cmd), rq);
166 ),
167
168 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
169 MAJOR(__entry->dev), MINOR(__entry->dev),
170 __entry->rwbs, __get_str(cmd),
171 (unsigned long long)__entry->sector,
172 __entry->nr_sector, __entry->errors)
173);
174TRACE_EVENT(block_bio_bounce,
175
176 TP_PROTO(struct request_queue *q, struct bio *bio),
177
178 TP_ARGS(q, bio),
179
180 TP_STRUCT__entry(
181 __field( dev_t, dev )
182 __field( sector_t, sector )
183 __field( unsigned int, nr_sector )
184 __array( char, rwbs, 6 )
185 __array( char, comm, TASK_COMM_LEN )
186 ),
187
188 TP_fast_assign(
189 __entry->dev = bio->bi_bdev->bd_dev;
190 __entry->sector = bio->bi_sector;
191 __entry->nr_sector = bio->bi_size >> 9;
192 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
193 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
194 ),
195
196 TP_printk("%d,%d %s %llu + %u [%s]",
197 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
198 (unsigned long long)__entry->sector,
199 __entry->nr_sector, __entry->comm)
200);
201
202TRACE_EVENT(block_bio_complete,
203
204 TP_PROTO(struct request_queue *q, struct bio *bio),
205
206 TP_ARGS(q, bio),
207
208 TP_STRUCT__entry(
209 __field( dev_t, dev )
210 __field( sector_t, sector )
211 __field( unsigned, nr_sector )
212 __field( int, error )
213 __array( char, rwbs, 6 )
214 ),
215
216 TP_fast_assign(
217 __entry->dev = bio->bi_bdev->bd_dev;
218 __entry->sector = bio->bi_sector;
219 __entry->nr_sector = bio->bi_size >> 9;
220 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
221 ),
222
223 TP_printk("%d,%d %s %llu + %u [%d]",
224 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
225 (unsigned long long)__entry->sector,
226 __entry->nr_sector, __entry->error)
227);
228
229TRACE_EVENT(block_bio_backmerge,
230
231 TP_PROTO(struct request_queue *q, struct bio *bio),
232
233 TP_ARGS(q, bio),
234
235 TP_STRUCT__entry(
236 __field( dev_t, dev )
237 __field( sector_t, sector )
238 __field( unsigned int, nr_sector )
239 __array( char, rwbs, 6 )
240 __array( char, comm, TASK_COMM_LEN )
241 ),
242
243 TP_fast_assign(
244 __entry->dev = bio->bi_bdev->bd_dev;
245 __entry->sector = bio->bi_sector;
246 __entry->nr_sector = bio->bi_size >> 9;
247 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
248 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
249 ),
250
251 TP_printk("%d,%d %s %llu + %u [%s]",
252 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
253 (unsigned long long)__entry->sector,
254 __entry->nr_sector, __entry->comm)
255);
256
257TRACE_EVENT(block_bio_frontmerge,
258
259 TP_PROTO(struct request_queue *q, struct bio *bio),
260
261 TP_ARGS(q, bio),
262
263 TP_STRUCT__entry(
264 __field( dev_t, dev )
265 __field( sector_t, sector )
266 __field( unsigned, nr_sector )
267 __array( char, rwbs, 6 )
268 __array( char, comm, TASK_COMM_LEN )
269 ),
270
271 TP_fast_assign(
272 __entry->dev = bio->bi_bdev->bd_dev;
273 __entry->sector = bio->bi_sector;
274 __entry->nr_sector = bio->bi_size >> 9;
275 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
276 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
277 ),
278
279 TP_printk("%d,%d %s %llu + %u [%s]",
280 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
281 (unsigned long long)__entry->sector,
282 __entry->nr_sector, __entry->comm)
283);
284
285TRACE_EVENT(block_bio_queue,
286
287 TP_PROTO(struct request_queue *q, struct bio *bio),
288
289 TP_ARGS(q, bio),
290
291 TP_STRUCT__entry(
292 __field( dev_t, dev )
293 __field( sector_t, sector )
294 __field( unsigned int, nr_sector )
295 __array( char, rwbs, 6 )
296 __array( char, comm, TASK_COMM_LEN )
297 ),
298
299 TP_fast_assign(
300 __entry->dev = bio->bi_bdev->bd_dev;
301 __entry->sector = bio->bi_sector;
302 __entry->nr_sector = bio->bi_size >> 9;
303 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
304 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
305 ),
306
307 TP_printk("%d,%d %s %llu + %u [%s]",
308 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
309 (unsigned long long)__entry->sector,
310 __entry->nr_sector, __entry->comm)
311);
312
313TRACE_EVENT(block_getrq,
314
315 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
316
317 TP_ARGS(q, bio, rw),
318
319 TP_STRUCT__entry(
320 __field( dev_t, dev )
321 __field( sector_t, sector )
322 __field( unsigned int, nr_sector )
323 __array( char, rwbs, 6 )
324 __array( char, comm, TASK_COMM_LEN )
325 ),
326
327 TP_fast_assign(
328 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
329 __entry->sector = bio ? bio->bi_sector : 0;
330 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
331 blk_fill_rwbs(__entry->rwbs,
332 bio ? bio->bi_rw : 0, __entry->nr_sector);
333 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
334 ),
335
336 TP_printk("%d,%d %s %llu + %u [%s]",
337 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
338 (unsigned long long)__entry->sector,
339 __entry->nr_sector, __entry->comm)
340);
341
342TRACE_EVENT(block_sleeprq,
343
344 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
345
346 TP_ARGS(q, bio, rw),
347
348 TP_STRUCT__entry(
349 __field( dev_t, dev )
350 __field( sector_t, sector )
351 __field( unsigned int, nr_sector )
352 __array( char, rwbs, 6 )
353 __array( char, comm, TASK_COMM_LEN )
354 ),
355
356 TP_fast_assign(
357 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
358 __entry->sector = bio ? bio->bi_sector : 0;
359 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
360 blk_fill_rwbs(__entry->rwbs,
361 bio ? bio->bi_rw : 0, __entry->nr_sector);
362 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
363 ),
364
365 TP_printk("%d,%d %s %llu + %u [%s]",
366 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
367 (unsigned long long)__entry->sector,
368 __entry->nr_sector, __entry->comm)
369);
370
371TRACE_EVENT(block_plug,
372
373 TP_PROTO(struct request_queue *q),
374
375 TP_ARGS(q),
376
377 TP_STRUCT__entry(
378 __array( char, comm, TASK_COMM_LEN )
379 ),
380
381 TP_fast_assign(
382 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
383 ),
384
385 TP_printk("[%s]", __entry->comm)
386);
387
388TRACE_EVENT(block_unplug_timer,
389
390 TP_PROTO(struct request_queue *q),
391
392 TP_ARGS(q),
393
394 TP_STRUCT__entry(
395 __field( int, nr_rq )
396 __array( char, comm, TASK_COMM_LEN )
397 ),
398
399 TP_fast_assign(
400 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
401 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
402 ),
403
404 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
405);
406
407TRACE_EVENT(block_unplug_io,
408
409 TP_PROTO(struct request_queue *q),
410
411 TP_ARGS(q),
412
413 TP_STRUCT__entry(
414 __field( int, nr_rq )
415 __array( char, comm, TASK_COMM_LEN )
416 ),
417
418 TP_fast_assign(
419 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
420 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
421 ),
422
423 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
424);
425
426TRACE_EVENT(block_split,
427
428 TP_PROTO(struct request_queue *q, struct bio *bio,
429 unsigned int new_sector),
430
431 TP_ARGS(q, bio, new_sector),
432
433 TP_STRUCT__entry(
434 __field( dev_t, dev )
435 __field( sector_t, sector )
436 __field( sector_t, new_sector )
437 __array( char, rwbs, 6 )
438 __array( char, comm, TASK_COMM_LEN )
439 ),
440
441 TP_fast_assign(
442 __entry->dev = bio->bi_bdev->bd_dev;
443 __entry->sector = bio->bi_sector;
444 __entry->new_sector = new_sector;
445 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
446 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
447 ),
448
449 TP_printk("%d,%d %s %llu / %llu [%s]",
450 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
451 (unsigned long long)__entry->sector,
452 (unsigned long long)__entry->new_sector,
453 __entry->comm)
454);
455
456TRACE_EVENT(block_remap,
457
458 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
459 sector_t from),
460
461 TP_ARGS(q, bio, dev, from),
462
463 TP_STRUCT__entry(
464 __field( dev_t, dev )
465 __field( sector_t, sector )
466 __field( unsigned int, nr_sector )
467 __field( dev_t, old_dev )
468 __field( sector_t, old_sector )
469 __array( char, rwbs, 6 )
470 ),
471
472 TP_fast_assign(
473 __entry->dev = bio->bi_bdev->bd_dev;
474 __entry->sector = bio->bi_sector;
475 __entry->nr_sector = bio->bi_size >> 9;
476 __entry->old_dev = dev;
477 __entry->old_sector = from;
478 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
479 ),
480
481 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
482 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
483 (unsigned long long)__entry->sector,
484 __entry->nr_sector,
485 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
486 (unsigned long long)__entry->old_sector)
487);
488
489#endif /* _TRACE_BLOCK_H */
490
491/* This part must be outside protection */
492#include <trace/define_trace.h>
493
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
new file mode 100644
index 000000000000..7d8b5bc74185
--- /dev/null
+++ b/include/trace/events/ext4.h
@@ -0,0 +1,677 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM ext4
3
4#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_EXT4_H
6
7#include <linux/writeback.h>
8#include "../../../fs/ext4/ext4.h"
9#include "../../../fs/ext4/mballoc.h"
10#include <linux/tracepoint.h>
11
12TRACE_EVENT(ext4_free_inode,
13 TP_PROTO(struct inode *inode),
14
15 TP_ARGS(inode),
16
17 TP_STRUCT__entry(
18 __field( dev_t, dev )
19 __field( ino_t, ino )
20 __field( umode_t, mode )
21 __field( uid_t, uid )
22 __field( gid_t, gid )
23 __field( blkcnt_t, blocks )
24 ),
25
26 TP_fast_assign(
27 __entry->dev = inode->i_sb->s_dev;
28 __entry->ino = inode->i_ino;
29 __entry->mode = inode->i_mode;
30 __entry->uid = inode->i_uid;
31 __entry->gid = inode->i_gid;
32 __entry->blocks = inode->i_blocks;
33 ),
34
35 TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu",
36 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode,
37 __entry->uid, __entry->gid,
38 (unsigned long long) __entry->blocks)
39);
40
41TRACE_EVENT(ext4_request_inode,
42 TP_PROTO(struct inode *dir, int mode),
43
44 TP_ARGS(dir, mode),
45
46 TP_STRUCT__entry(
47 __field( dev_t, dev )
48 __field( ino_t, dir )
49 __field( umode_t, mode )
50 ),
51
52 TP_fast_assign(
53 __entry->dev = dir->i_sb->s_dev;
54 __entry->dir = dir->i_ino;
55 __entry->mode = mode;
56 ),
57
58 TP_printk("dev %s dir %lu mode %d",
59 jbd2_dev_to_name(__entry->dev), __entry->dir, __entry->mode)
60);
61
62TRACE_EVENT(ext4_allocate_inode,
63 TP_PROTO(struct inode *inode, struct inode *dir, int mode),
64
65 TP_ARGS(inode, dir, mode),
66
67 TP_STRUCT__entry(
68 __field( dev_t, dev )
69 __field( ino_t, ino )
70 __field( ino_t, dir )
71 __field( umode_t, mode )
72 ),
73
74 TP_fast_assign(
75 __entry->dev = inode->i_sb->s_dev;
76 __entry->ino = inode->i_ino;
77 __entry->dir = dir->i_ino;
78 __entry->mode = mode;
79 ),
80
81 TP_printk("dev %s ino %lu dir %lu mode %d",
82 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->dir, __entry->mode)
83);
84
85TRACE_EVENT(ext4_write_begin,
86
87 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
88 unsigned int flags),
89
90 TP_ARGS(inode, pos, len, flags),
91
92 TP_STRUCT__entry(
93 __field( dev_t, dev )
94 __field( ino_t, ino )
95 __field( loff_t, pos )
96 __field( unsigned int, len )
97 __field( unsigned int, flags )
98 ),
99
100 TP_fast_assign(
101 __entry->dev = inode->i_sb->s_dev;
102 __entry->ino = inode->i_ino;
103 __entry->pos = pos;
104 __entry->len = len;
105 __entry->flags = flags;
106 ),
107
108 TP_printk("dev %s ino %lu pos %llu len %u flags %u",
109 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len,
110 __entry->flags)
111);
112
113TRACE_EVENT(ext4_ordered_write_end,
114 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
115 unsigned int copied),
116
117 TP_ARGS(inode, pos, len, copied),
118
119 TP_STRUCT__entry(
120 __field( dev_t, dev )
121 __field( ino_t, ino )
122 __field( loff_t, pos )
123 __field( unsigned int, len )
124 __field( unsigned int, copied )
125 ),
126
127 TP_fast_assign(
128 __entry->dev = inode->i_sb->s_dev;
129 __entry->ino = inode->i_ino;
130 __entry->pos = pos;
131 __entry->len = len;
132 __entry->copied = copied;
133 ),
134
135 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
136 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len,
137 __entry->copied)
138);
139
140TRACE_EVENT(ext4_writeback_write_end,
141 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
142 unsigned int copied),
143
144 TP_ARGS(inode, pos, len, copied),
145
146 TP_STRUCT__entry(
147 __field( dev_t, dev )
148 __field( ino_t, ino )
149 __field( loff_t, pos )
150 __field( unsigned int, len )
151 __field( unsigned int, copied )
152 ),
153
154 TP_fast_assign(
155 __entry->dev = inode->i_sb->s_dev;
156 __entry->ino = inode->i_ino;
157 __entry->pos = pos;
158 __entry->len = len;
159 __entry->copied = copied;
160 ),
161
162 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
163 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len,
164 __entry->copied)
165);
166
167TRACE_EVENT(ext4_journalled_write_end,
168 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
169 unsigned int copied),
170 TP_ARGS(inode, pos, len, copied),
171
172 TP_STRUCT__entry(
173 __field( dev_t, dev )
174 __field( ino_t, ino )
175 __field( loff_t, pos )
176 __field( unsigned int, len )
177 __field( unsigned int, copied )
178 ),
179
180 TP_fast_assign(
181 __entry->dev = inode->i_sb->s_dev;
182 __entry->ino = inode->i_ino;
183 __entry->pos = pos;
184 __entry->len = len;
185 __entry->copied = copied;
186 ),
187
188 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
189 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len,
190 __entry->copied)
191);
192
193TRACE_EVENT(ext4_writepage,
194 TP_PROTO(struct inode *inode, struct page *page),
195
196 TP_ARGS(inode, page),
197
198 TP_STRUCT__entry(
199 __field( dev_t, dev )
200 __field( ino_t, ino )
201 __field( pgoff_t, index )
202
203 ),
204
205 TP_fast_assign(
206 __entry->dev = inode->i_sb->s_dev;
207 __entry->ino = inode->i_ino;
208 __entry->index = page->index;
209 ),
210
211 TP_printk("dev %s ino %lu page_index %lu",
212 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index)
213);
214
215TRACE_EVENT(ext4_da_writepages,
216 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
217
218 TP_ARGS(inode, wbc),
219
220 TP_STRUCT__entry(
221 __field( dev_t, dev )
222 __field( ino_t, ino )
223 __field( long, nr_to_write )
224 __field( long, pages_skipped )
225 __field( loff_t, range_start )
226 __field( loff_t, range_end )
227 __field( char, nonblocking )
228 __field( char, for_kupdate )
229 __field( char, for_reclaim )
230 __field( char, for_writepages )
231 __field( char, range_cyclic )
232 ),
233
234 TP_fast_assign(
235 __entry->dev = inode->i_sb->s_dev;
236 __entry->ino = inode->i_ino;
237 __entry->nr_to_write = wbc->nr_to_write;
238 __entry->pages_skipped = wbc->pages_skipped;
239 __entry->range_start = wbc->range_start;
240 __entry->range_end = wbc->range_end;
241 __entry->nonblocking = wbc->nonblocking;
242 __entry->for_kupdate = wbc->for_kupdate;
243 __entry->for_reclaim = wbc->for_reclaim;
244 __entry->for_writepages = wbc->for_writepages;
245 __entry->range_cyclic = wbc->range_cyclic;
246 ),
247
248 TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d for_writepages %d range_cyclic %d",
249 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write,
250 __entry->pages_skipped, __entry->range_start,
251 __entry->range_end, __entry->nonblocking,
252 __entry->for_kupdate, __entry->for_reclaim,
253 __entry->for_writepages, __entry->range_cyclic)
254);
255
256TRACE_EVENT(ext4_da_writepages_result,
257 TP_PROTO(struct inode *inode, struct writeback_control *wbc,
258 int ret, int pages_written),
259
260 TP_ARGS(inode, wbc, ret, pages_written),
261
262 TP_STRUCT__entry(
263 __field( dev_t, dev )
264 __field( ino_t, ino )
265 __field( int, ret )
266 __field( int, pages_written )
267 __field( long, pages_skipped )
268 __field( char, encountered_congestion )
269 __field( char, more_io )
270 __field( char, no_nrwrite_index_update )
271 ),
272
273 TP_fast_assign(
274 __entry->dev = inode->i_sb->s_dev;
275 __entry->ino = inode->i_ino;
276 __entry->ret = ret;
277 __entry->pages_written = pages_written;
278 __entry->pages_skipped = wbc->pages_skipped;
279 __entry->encountered_congestion = wbc->encountered_congestion;
280 __entry->more_io = wbc->more_io;
281 __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update;
282 ),
283
284 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d",
285 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->ret,
286 __entry->pages_written, __entry->pages_skipped,
287 __entry->encountered_congestion, __entry->more_io,
288 __entry->no_nrwrite_index_update)
289);
290
291TRACE_EVENT(ext4_da_write_begin,
292 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
293 unsigned int flags),
294
295 TP_ARGS(inode, pos, len, flags),
296
297 TP_STRUCT__entry(
298 __field( dev_t, dev )
299 __field( ino_t, ino )
300 __field( loff_t, pos )
301 __field( unsigned int, len )
302 __field( unsigned int, flags )
303 ),
304
305 TP_fast_assign(
306 __entry->dev = inode->i_sb->s_dev;
307 __entry->ino = inode->i_ino;
308 __entry->pos = pos;
309 __entry->len = len;
310 __entry->flags = flags;
311 ),
312
313 TP_printk("dev %s ino %lu pos %llu len %u flags %u",
314 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len,
315 __entry->flags)
316);
317
318TRACE_EVENT(ext4_da_write_end,
319 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
320 unsigned int copied),
321
322 TP_ARGS(inode, pos, len, copied),
323
324 TP_STRUCT__entry(
325 __field( dev_t, dev )
326 __field( ino_t, ino )
327 __field( loff_t, pos )
328 __field( unsigned int, len )
329 __field( unsigned int, copied )
330 ),
331
332 TP_fast_assign(
333 __entry->dev = inode->i_sb->s_dev;
334 __entry->ino = inode->i_ino;
335 __entry->pos = pos;
336 __entry->len = len;
337 __entry->copied = copied;
338 ),
339
340 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
341 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len,
342 __entry->copied)
343);
344
345TRACE_EVENT(ext4_discard_blocks,
346 TP_PROTO(struct super_block *sb, unsigned long long blk,
347 unsigned long long count),
348
349 TP_ARGS(sb, blk, count),
350
351 TP_STRUCT__entry(
352 __field( dev_t, dev )
353 __field( __u64, blk )
354 __field( __u64, count )
355
356 ),
357
358 TP_fast_assign(
359 __entry->dev = sb->s_dev;
360 __entry->blk = blk;
361 __entry->count = count;
362 ),
363
364 TP_printk("dev %s blk %llu count %llu",
365 jbd2_dev_to_name(__entry->dev), __entry->blk, __entry->count)
366);
367
368TRACE_EVENT(ext4_mb_new_inode_pa,
369 TP_PROTO(struct ext4_allocation_context *ac,
370 struct ext4_prealloc_space *pa),
371
372 TP_ARGS(ac, pa),
373
374 TP_STRUCT__entry(
375 __field( dev_t, dev )
376 __field( ino_t, ino )
377 __field( __u64, pa_pstart )
378 __field( __u32, pa_len )
379 __field( __u64, pa_lstart )
380
381 ),
382
383 TP_fast_assign(
384 __entry->dev = ac->ac_sb->s_dev;
385 __entry->ino = ac->ac_inode->i_ino;
386 __entry->pa_pstart = pa->pa_pstart;
387 __entry->pa_len = pa->pa_len;
388 __entry->pa_lstart = pa->pa_lstart;
389 ),
390
391 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
392 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart,
393 __entry->pa_len, __entry->pa_lstart)
394);
395
396TRACE_EVENT(ext4_mb_new_group_pa,
397 TP_PROTO(struct ext4_allocation_context *ac,
398 struct ext4_prealloc_space *pa),
399
400 TP_ARGS(ac, pa),
401
402 TP_STRUCT__entry(
403 __field( dev_t, dev )
404 __field( ino_t, ino )
405 __field( __u64, pa_pstart )
406 __field( __u32, pa_len )
407 __field( __u64, pa_lstart )
408
409 ),
410
411 TP_fast_assign(
412 __entry->dev = ac->ac_sb->s_dev;
413 __entry->ino = ac->ac_inode->i_ino;
414 __entry->pa_pstart = pa->pa_pstart;
415 __entry->pa_len = pa->pa_len;
416 __entry->pa_lstart = pa->pa_lstart;
417 ),
418
419 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
420 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart,
421 __entry->pa_len, __entry->pa_lstart)
422);
423
424TRACE_EVENT(ext4_mb_release_inode_pa,
425 TP_PROTO(struct ext4_allocation_context *ac,
426 struct ext4_prealloc_space *pa,
427 unsigned long long block, unsigned int count),
428
429 TP_ARGS(ac, pa, block, count),
430
431 TP_STRUCT__entry(
432 __field( dev_t, dev )
433 __field( ino_t, ino )
434 __field( __u64, block )
435 __field( __u32, count )
436
437 ),
438
439 TP_fast_assign(
440 __entry->dev = ac->ac_sb->s_dev;
441 __entry->ino = ac->ac_inode->i_ino;
442 __entry->block = block;
443 __entry->count = count;
444 ),
445
446 TP_printk("dev %s ino %lu block %llu count %u",
447 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block,
448 __entry->count)
449);
450
451TRACE_EVENT(ext4_mb_release_group_pa,
452 TP_PROTO(struct ext4_allocation_context *ac,
453 struct ext4_prealloc_space *pa),
454
455 TP_ARGS(ac, pa),
456
457 TP_STRUCT__entry(
458 __field( dev_t, dev )
459 __field( ino_t, ino )
460 __field( __u64, pa_pstart )
461 __field( __u32, pa_len )
462
463 ),
464
465 TP_fast_assign(
466 __entry->dev = ac->ac_sb->s_dev;
467 __entry->ino = ac->ac_inode->i_ino;
468 __entry->pa_pstart = pa->pa_pstart;
469 __entry->pa_len = pa->pa_len;
470 ),
471
472 TP_printk("dev %s pstart %llu len %u",
473 jbd2_dev_to_name(__entry->dev), __entry->pa_pstart, __entry->pa_len)
474);
475
476TRACE_EVENT(ext4_discard_preallocations,
477 TP_PROTO(struct inode *inode),
478
479 TP_ARGS(inode),
480
481 TP_STRUCT__entry(
482 __field( dev_t, dev )
483 __field( ino_t, ino )
484
485 ),
486
487 TP_fast_assign(
488 __entry->dev = inode->i_sb->s_dev;
489 __entry->ino = inode->i_ino;
490 ),
491
492 TP_printk("dev %s ino %lu",
493 jbd2_dev_to_name(__entry->dev), __entry->ino)
494);
495
496TRACE_EVENT(ext4_mb_discard_preallocations,
497 TP_PROTO(struct super_block *sb, int needed),
498
499 TP_ARGS(sb, needed),
500
501 TP_STRUCT__entry(
502 __field( dev_t, dev )
503 __field( int, needed )
504
505 ),
506
507 TP_fast_assign(
508 __entry->dev = sb->s_dev;
509 __entry->needed = needed;
510 ),
511
512 TP_printk("dev %s needed %d",
513 jbd2_dev_to_name(__entry->dev), __entry->needed)
514);
515
516TRACE_EVENT(ext4_request_blocks,
517 TP_PROTO(struct ext4_allocation_request *ar),
518
519 TP_ARGS(ar),
520
521 TP_STRUCT__entry(
522 __field( dev_t, dev )
523 __field( ino_t, ino )
524 __field( unsigned int, flags )
525 __field( unsigned int, len )
526 __field( __u64, logical )
527 __field( __u64, goal )
528 __field( __u64, lleft )
529 __field( __u64, lright )
530 __field( __u64, pleft )
531 __field( __u64, pright )
532 ),
533
534 TP_fast_assign(
535 __entry->dev = ar->inode->i_sb->s_dev;
536 __entry->ino = ar->inode->i_ino;
537 __entry->flags = ar->flags;
538 __entry->len = ar->len;
539 __entry->logical = ar->logical;
540 __entry->goal = ar->goal;
541 __entry->lleft = ar->lleft;
542 __entry->lright = ar->lright;
543 __entry->pleft = ar->pleft;
544 __entry->pright = ar->pright;
545 ),
546
547 TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
548 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags,
549 __entry->len,
550 (unsigned long long) __entry->logical,
551 (unsigned long long) __entry->goal,
552 (unsigned long long) __entry->lleft,
553 (unsigned long long) __entry->lright,
554 (unsigned long long) __entry->pleft,
555 (unsigned long long) __entry->pright)
556);
557
558TRACE_EVENT(ext4_allocate_blocks,
559 TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
560
561 TP_ARGS(ar, block),
562
563 TP_STRUCT__entry(
564 __field( dev_t, dev )
565 __field( ino_t, ino )
566 __field( __u64, block )
567 __field( unsigned int, flags )
568 __field( unsigned int, len )
569 __field( __u64, logical )
570 __field( __u64, goal )
571 __field( __u64, lleft )
572 __field( __u64, lright )
573 __field( __u64, pleft )
574 __field( __u64, pright )
575 ),
576
577 TP_fast_assign(
578 __entry->dev = ar->inode->i_sb->s_dev;
579 __entry->ino = ar->inode->i_ino;
580 __entry->block = block;
581 __entry->flags = ar->flags;
582 __entry->len = ar->len;
583 __entry->logical = ar->logical;
584 __entry->goal = ar->goal;
585 __entry->lleft = ar->lleft;
586 __entry->lright = ar->lright;
587 __entry->pleft = ar->pleft;
588 __entry->pright = ar->pright;
589 ),
590
591 TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
592 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags,
593 __entry->len, __entry->block,
594 (unsigned long long) __entry->logical,
595 (unsigned long long) __entry->goal,
596 (unsigned long long) __entry->lleft,
597 (unsigned long long) __entry->lright,
598 (unsigned long long) __entry->pleft,
599 (unsigned long long) __entry->pright)
600);
601
602TRACE_EVENT(ext4_free_blocks,
603 TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
604 int metadata),
605
606 TP_ARGS(inode, block, count, metadata),
607
608 TP_STRUCT__entry(
609 __field( dev_t, dev )
610 __field( ino_t, ino )
611 __field( __u64, block )
612 __field( unsigned long, count )
613 __field( int, metadata )
614
615 ),
616
617 TP_fast_assign(
618 __entry->dev = inode->i_sb->s_dev;
619 __entry->ino = inode->i_ino;
620 __entry->block = block;
621 __entry->count = count;
622 __entry->metadata = metadata;
623 ),
624
625 TP_printk("dev %s ino %lu block %llu count %lu metadata %d",
626 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block,
627 __entry->count, __entry->metadata)
628);
629
630TRACE_EVENT(ext4_sync_file,
631 TP_PROTO(struct file *file, struct dentry *dentry, int datasync),
632
633 TP_ARGS(file, dentry, datasync),
634
635 TP_STRUCT__entry(
636 __field( dev_t, dev )
637 __field( ino_t, ino )
638 __field( ino_t, parent )
639 __field( int, datasync )
640 ),
641
642 TP_fast_assign(
643 __entry->dev = dentry->d_inode->i_sb->s_dev;
644 __entry->ino = dentry->d_inode->i_ino;
645 __entry->datasync = datasync;
646 __entry->parent = dentry->d_parent->d_inode->i_ino;
647 ),
648
649 TP_printk("dev %s ino %ld parent %ld datasync %d ",
650 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->parent,
651 __entry->datasync)
652);
653
654TRACE_EVENT(ext4_sync_fs,
655 TP_PROTO(struct super_block *sb, int wait),
656
657 TP_ARGS(sb, wait),
658
659 TP_STRUCT__entry(
660 __field( dev_t, dev )
661 __field( int, wait )
662
663 ),
664
665 TP_fast_assign(
666 __entry->dev = sb->s_dev;
667 __entry->wait = wait;
668 ),
669
670 TP_printk("dev %s wait %d", jbd2_dev_to_name(__entry->dev),
671 __entry->wait)
672);
673
674#endif /* _TRACE_EXT4_H */
675
676/* This part must be outside protection */
677#include <trace/define_trace.h>
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
new file mode 100644
index 000000000000..1cb0c3aa11e6
--- /dev/null
+++ b/include/trace/events/irq.h
@@ -0,0 +1,145 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM irq
3
4#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_IRQ_H
6
7#include <linux/tracepoint.h>
8#include <linux/interrupt.h>
9
10#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
11#define show_softirq_name(val) \
12 __print_symbolic(val, \
13 softirq_name(HI), \
14 softirq_name(TIMER), \
15 softirq_name(NET_TX), \
16 softirq_name(NET_RX), \
17 softirq_name(BLOCK), \
18 softirq_name(TASKLET), \
19 softirq_name(SCHED), \
20 softirq_name(HRTIMER), \
21 softirq_name(RCU))
22
23/**
24 * irq_handler_entry - called immediately before the irq action handler
25 * @irq: irq number
26 * @action: pointer to struct irqaction
27 *
28 * The struct irqaction pointed to by @action contains various
29 * information about the handler, including the device name,
30 * @action->name, and the device id, @action->dev_id. When used in
31 * conjunction with the irq_handler_exit tracepoint, we can figure
32 * out irq handler latencies.
33 */
34TRACE_EVENT(irq_handler_entry,
35
36 TP_PROTO(int irq, struct irqaction *action),
37
38 TP_ARGS(irq, action),
39
40 TP_STRUCT__entry(
41 __field( int, irq )
42 __string( name, action->name )
43 ),
44
45 TP_fast_assign(
46 __entry->irq = irq;
47 __assign_str(name, action->name);
48 ),
49
50 TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name))
51);
52
53/**
54 * irq_handler_exit - called immediately after the irq action handler returns
55 * @irq: irq number
56 * @action: pointer to struct irqaction
57 * @ret: return value
58 *
59 * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
60 * @action->handler scuccessully handled this irq. Otherwise, the irq might be
61 * a shared irq line, or the irq was not handled successfully. Can be used in
62 * conjunction with the irq_handler_entry to understand irq handler latencies.
63 */
64TRACE_EVENT(irq_handler_exit,
65
66 TP_PROTO(int irq, struct irqaction *action, int ret),
67
68 TP_ARGS(irq, action, ret),
69
70 TP_STRUCT__entry(
71 __field( int, irq )
72 __field( int, ret )
73 ),
74
75 TP_fast_assign(
76 __entry->irq = irq;
77 __entry->ret = ret;
78 ),
79
80 TP_printk("irq=%d return=%s",
81 __entry->irq, __entry->ret ? "handled" : "unhandled")
82);
83
84/**
85 * softirq_entry - called immediately before the softirq handler
86 * @h: pointer to struct softirq_action
87 * @vec: pointer to first struct softirq_action in softirq_vec array
88 *
89 * The @h parameter, contains a pointer to the struct softirq_action
90 * which has a pointer to the action handler that is called. By subtracting
91 * the @vec pointer from the @h pointer, we can determine the softirq
92 * number. Also, when used in combination with the softirq_exit tracepoint
93 * we can determine the softirq latency.
94 */
95TRACE_EVENT(softirq_entry,
96
97 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
98
99 TP_ARGS(h, vec),
100
101 TP_STRUCT__entry(
102 __field( int, vec )
103 ),
104
105 TP_fast_assign(
106 __entry->vec = (int)(h - vec);
107 ),
108
109 TP_printk("softirq=%d action=%s", __entry->vec,
110 show_softirq_name(__entry->vec))
111);
112
113/**
114 * softirq_exit - called immediately after the softirq handler returns
115 * @h: pointer to struct softirq_action
116 * @vec: pointer to first struct softirq_action in softirq_vec array
117 *
118 * The @h parameter contains a pointer to the struct softirq_action
119 * that has handled the softirq. By subtracting the @vec pointer from
120 * the @h pointer, we can determine the softirq number. Also, when used in
121 * combination with the softirq_entry tracepoint we can determine the softirq
122 * latency.
123 */
124TRACE_EVENT(softirq_exit,
125
126 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
127
128 TP_ARGS(h, vec),
129
130 TP_STRUCT__entry(
131 __field( int, vec )
132 ),
133
134 TP_fast_assign(
135 __entry->vec = (int)(h - vec);
136 ),
137
138 TP_printk("softirq=%d action=%s", __entry->vec,
139 show_softirq_name(__entry->vec))
140);
141
142#endif /* _TRACE_IRQ_H */
143
144/* This part must be outside protection */
145#include <trace/define_trace.h>
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
new file mode 100644
index 000000000000..10813fa0c8d0
--- /dev/null
+++ b/include/trace/events/jbd2.h
@@ -0,0 +1,168 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM jbd2
3
4#if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_JBD2_H
6
7#include <linux/jbd2.h>
8#include <linux/tracepoint.h>
9
10TRACE_EVENT(jbd2_checkpoint,
11
12 TP_PROTO(journal_t *journal, int result),
13
14 TP_ARGS(journal, result),
15
16 TP_STRUCT__entry(
17 __field( dev_t, dev )
18 __field( int, result )
19 ),
20
21 TP_fast_assign(
22 __entry->dev = journal->j_fs_dev->bd_dev;
23 __entry->result = result;
24 ),
25
26 TP_printk("dev %s result %d",
27 jbd2_dev_to_name(__entry->dev), __entry->result)
28);
29
30TRACE_EVENT(jbd2_start_commit,
31
32 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
33
34 TP_ARGS(journal, commit_transaction),
35
36 TP_STRUCT__entry(
37 __field( dev_t, dev )
38 __field( char, sync_commit )
39 __field( int, transaction )
40 ),
41
42 TP_fast_assign(
43 __entry->dev = journal->j_fs_dev->bd_dev;
44 __entry->sync_commit = commit_transaction->t_synchronous_commit;
45 __entry->transaction = commit_transaction->t_tid;
46 ),
47
48 TP_printk("dev %s transaction %d sync %d",
49 jbd2_dev_to_name(__entry->dev), __entry->transaction,
50 __entry->sync_commit)
51);
52
53TRACE_EVENT(jbd2_commit_locking,
54
55 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
56
57 TP_ARGS(journal, commit_transaction),
58
59 TP_STRUCT__entry(
60 __field( dev_t, dev )
61 __field( char, sync_commit )
62 __field( int, transaction )
63 ),
64
65 TP_fast_assign(
66 __entry->dev = journal->j_fs_dev->bd_dev;
67 __entry->sync_commit = commit_transaction->t_synchronous_commit;
68 __entry->transaction = commit_transaction->t_tid;
69 ),
70
71 TP_printk("dev %s transaction %d sync %d",
72 jbd2_dev_to_name(__entry->dev), __entry->transaction,
73 __entry->sync_commit)
74);
75
76TRACE_EVENT(jbd2_commit_flushing,
77
78 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
79
80 TP_ARGS(journal, commit_transaction),
81
82 TP_STRUCT__entry(
83 __field( dev_t, dev )
84 __field( char, sync_commit )
85 __field( int, transaction )
86 ),
87
88 TP_fast_assign(
89 __entry->dev = journal->j_fs_dev->bd_dev;
90 __entry->sync_commit = commit_transaction->t_synchronous_commit;
91 __entry->transaction = commit_transaction->t_tid;
92 ),
93
94 TP_printk("dev %s transaction %d sync %d",
95 jbd2_dev_to_name(__entry->dev), __entry->transaction,
96 __entry->sync_commit)
97);
98
99TRACE_EVENT(jbd2_commit_logging,
100
101 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
102
103 TP_ARGS(journal, commit_transaction),
104
105 TP_STRUCT__entry(
106 __field( dev_t, dev )
107 __field( char, sync_commit )
108 __field( int, transaction )
109 ),
110
111 TP_fast_assign(
112 __entry->dev = journal->j_fs_dev->bd_dev;
113 __entry->sync_commit = commit_transaction->t_synchronous_commit;
114 __entry->transaction = commit_transaction->t_tid;
115 ),
116
117 TP_printk("dev %s transaction %d sync %d",
118 jbd2_dev_to_name(__entry->dev), __entry->transaction,
119 __entry->sync_commit)
120);
121
122TRACE_EVENT(jbd2_end_commit,
123 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
124
125 TP_ARGS(journal, commit_transaction),
126
127 TP_STRUCT__entry(
128 __field( dev_t, dev )
129 __field( char, sync_commit )
130 __field( int, transaction )
131 __field( int, head )
132 ),
133
134 TP_fast_assign(
135 __entry->dev = journal->j_fs_dev->bd_dev;
136 __entry->sync_commit = commit_transaction->t_synchronous_commit;
137 __entry->transaction = commit_transaction->t_tid;
138 __entry->head = journal->j_tail_sequence;
139 ),
140
141 TP_printk("dev %s transaction %d sync %d head %d",
142 jbd2_dev_to_name(__entry->dev), __entry->transaction,
143 __entry->sync_commit, __entry->head)
144);
145
146TRACE_EVENT(jbd2_submit_inode_data,
147 TP_PROTO(struct inode *inode),
148
149 TP_ARGS(inode),
150
151 TP_STRUCT__entry(
152 __field( dev_t, dev )
153 __field( ino_t, ino )
154 ),
155
156 TP_fast_assign(
157 __entry->dev = inode->i_sb->s_dev;
158 __entry->ino = inode->i_ino;
159 ),
160
161 TP_printk("dev %s ino %lu",
162 jbd2_dev_to_name(__entry->dev), __entry->ino)
163);
164
165#endif /* _TRACE_JBD2_H */
166
167/* This part must be outside protection */
168#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
new file mode 100644
index 000000000000..1493c541f9c4
--- /dev/null
+++ b/include/trace/events/kmem.h
@@ -0,0 +1,231 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM kmem
3
4#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_KMEM_H
6
7#include <linux/types.h>
8#include <linux/tracepoint.h>
9
10/*
11 * The order of these masks is important. Matching masks will be seen
12 * first and the left over flags will end up showing by themselves.
13 *
14 * For example, if we have GFP_KERNEL before GFP_USER we wil get:
15 *
16 * GFP_KERNEL|GFP_HARDWALL
17 *
18 * Thus most bits set go first.
19 */
20#define show_gfp_flags(flags) \
21 (flags) ? __print_flags(flags, "|", \
22 {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
23 {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
24 {(unsigned long)GFP_USER, "GFP_USER"}, \
25 {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
26 {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
27 {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
28 {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
29 {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
30 {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
31 {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
32 {(unsigned long)__GFP_IO, "GFP_IO"}, \
33 {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
34 {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
35 {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
36 {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
37 {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
38 {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
39 {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
40 {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
41 {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
42 {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
43 {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
44 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
45 ) : "GFP_NOWAIT"
46
47TRACE_EVENT(kmalloc,
48
49 TP_PROTO(unsigned long call_site,
50 const void *ptr,
51 size_t bytes_req,
52 size_t bytes_alloc,
53 gfp_t gfp_flags),
54
55 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
56
57 TP_STRUCT__entry(
58 __field( unsigned long, call_site )
59 __field( const void *, ptr )
60 __field( size_t, bytes_req )
61 __field( size_t, bytes_alloc )
62 __field( gfp_t, gfp_flags )
63 ),
64
65 TP_fast_assign(
66 __entry->call_site = call_site;
67 __entry->ptr = ptr;
68 __entry->bytes_req = bytes_req;
69 __entry->bytes_alloc = bytes_alloc;
70 __entry->gfp_flags = gfp_flags;
71 ),
72
73 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
74 __entry->call_site,
75 __entry->ptr,
76 __entry->bytes_req,
77 __entry->bytes_alloc,
78 show_gfp_flags(__entry->gfp_flags))
79);
80
81TRACE_EVENT(kmem_cache_alloc,
82
83 TP_PROTO(unsigned long call_site,
84 const void *ptr,
85 size_t bytes_req,
86 size_t bytes_alloc,
87 gfp_t gfp_flags),
88
89 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
90
91 TP_STRUCT__entry(
92 __field( unsigned long, call_site )
93 __field( const void *, ptr )
94 __field( size_t, bytes_req )
95 __field( size_t, bytes_alloc )
96 __field( gfp_t, gfp_flags )
97 ),
98
99 TP_fast_assign(
100 __entry->call_site = call_site;
101 __entry->ptr = ptr;
102 __entry->bytes_req = bytes_req;
103 __entry->bytes_alloc = bytes_alloc;
104 __entry->gfp_flags = gfp_flags;
105 ),
106
107 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
108 __entry->call_site,
109 __entry->ptr,
110 __entry->bytes_req,
111 __entry->bytes_alloc,
112 show_gfp_flags(__entry->gfp_flags))
113);
114
115TRACE_EVENT(kmalloc_node,
116
117 TP_PROTO(unsigned long call_site,
118 const void *ptr,
119 size_t bytes_req,
120 size_t bytes_alloc,
121 gfp_t gfp_flags,
122 int node),
123
124 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
125
126 TP_STRUCT__entry(
127 __field( unsigned long, call_site )
128 __field( const void *, ptr )
129 __field( size_t, bytes_req )
130 __field( size_t, bytes_alloc )
131 __field( gfp_t, gfp_flags )
132 __field( int, node )
133 ),
134
135 TP_fast_assign(
136 __entry->call_site = call_site;
137 __entry->ptr = ptr;
138 __entry->bytes_req = bytes_req;
139 __entry->bytes_alloc = bytes_alloc;
140 __entry->gfp_flags = gfp_flags;
141 __entry->node = node;
142 ),
143
144 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
145 __entry->call_site,
146 __entry->ptr,
147 __entry->bytes_req,
148 __entry->bytes_alloc,
149 show_gfp_flags(__entry->gfp_flags),
150 __entry->node)
151);
152
153TRACE_EVENT(kmem_cache_alloc_node,
154
155 TP_PROTO(unsigned long call_site,
156 const void *ptr,
157 size_t bytes_req,
158 size_t bytes_alloc,
159 gfp_t gfp_flags,
160 int node),
161
162 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
163
164 TP_STRUCT__entry(
165 __field( unsigned long, call_site )
166 __field( const void *, ptr )
167 __field( size_t, bytes_req )
168 __field( size_t, bytes_alloc )
169 __field( gfp_t, gfp_flags )
170 __field( int, node )
171 ),
172
173 TP_fast_assign(
174 __entry->call_site = call_site;
175 __entry->ptr = ptr;
176 __entry->bytes_req = bytes_req;
177 __entry->bytes_alloc = bytes_alloc;
178 __entry->gfp_flags = gfp_flags;
179 __entry->node = node;
180 ),
181
182 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
183 __entry->call_site,
184 __entry->ptr,
185 __entry->bytes_req,
186 __entry->bytes_alloc,
187 show_gfp_flags(__entry->gfp_flags),
188 __entry->node)
189);
190
191TRACE_EVENT(kfree,
192
193 TP_PROTO(unsigned long call_site, const void *ptr),
194
195 TP_ARGS(call_site, ptr),
196
197 TP_STRUCT__entry(
198 __field( unsigned long, call_site )
199 __field( const void *, ptr )
200 ),
201
202 TP_fast_assign(
203 __entry->call_site = call_site;
204 __entry->ptr = ptr;
205 ),
206
207 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
208);
209
210TRACE_EVENT(kmem_cache_free,
211
212 TP_PROTO(unsigned long call_site, const void *ptr),
213
214 TP_ARGS(call_site, ptr),
215
216 TP_STRUCT__entry(
217 __field( unsigned long, call_site )
218 __field( const void *, ptr )
219 ),
220
221 TP_fast_assign(
222 __entry->call_site = call_site;
223 __entry->ptr = ptr;
224 ),
225
226 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
227);
228#endif /* _TRACE_KMEM_H */
229
230/* This part must be outside protection */
231#include <trace/define_trace.h>
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lockdep.h
new file mode 100644
index 000000000000..bcf1d209a00d
--- /dev/null
+++ b/include/trace/events/lockdep.h
@@ -0,0 +1,96 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM lockdep
3
4#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_LOCKDEP_H
6
7#include <linux/lockdep.h>
8#include <linux/tracepoint.h>
9
10#ifdef CONFIG_LOCKDEP
11
12TRACE_EVENT(lock_acquire,
13
14 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
15 int trylock, int read, int check,
16 struct lockdep_map *next_lock, unsigned long ip),
17
18 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
19
20 TP_STRUCT__entry(
21 __field(unsigned int, flags)
22 __string(name, lock->name)
23 ),
24
25 TP_fast_assign(
26 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
27 __assign_str(name, lock->name);
28 ),
29
30 TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "",
31 (__entry->flags & 2) ? "read " : "",
32 __get_str(name))
33);
34
35TRACE_EVENT(lock_release,
36
37 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
38
39 TP_ARGS(lock, nested, ip),
40
41 TP_STRUCT__entry(
42 __string(name, lock->name)
43 ),
44
45 TP_fast_assign(
46 __assign_str(name, lock->name);
47 ),
48
49 TP_printk("%s", __get_str(name))
50);
51
52#ifdef CONFIG_LOCK_STAT
53
54TRACE_EVENT(lock_contended,
55
56 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
57
58 TP_ARGS(lock, ip),
59
60 TP_STRUCT__entry(
61 __string(name, lock->name)
62 ),
63
64 TP_fast_assign(
65 __assign_str(name, lock->name);
66 ),
67
68 TP_printk("%s", __get_str(name))
69);
70
71TRACE_EVENT(lock_acquired,
72 TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime),
73
74 TP_ARGS(lock, ip, waittime),
75
76 TP_STRUCT__entry(
77 __string(name, lock->name)
78 __field(unsigned long, wait_usec)
79 __field(unsigned long, wait_nsec_rem)
80 ),
81 TP_fast_assign(
82 __assign_str(name, lock->name);
83 __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
84 __entry->wait_usec = (unsigned long) waittime;
85 ),
86 TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec,
87 __entry->wait_nsec_rem)
88);
89
90#endif
91#endif
92
93#endif /* _TRACE_LOCKDEP_H */
94
95/* This part must be outside protection */
96#include <trace/define_trace.h>
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
new file mode 100644
index 000000000000..a8989c4547e7
--- /dev/null
+++ b/include/trace/events/napi.h
@@ -0,0 +1,11 @@
1#ifndef _TRACE_NAPI_H_
2#define _TRACE_NAPI_H_
3
4#include <linux/netdevice.h>
5#include <linux/tracepoint.h>
6
7DECLARE_TRACE(napi_poll,
8 TP_PROTO(struct napi_struct *napi),
9 TP_ARGS(napi));
10
11#endif
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
new file mode 100644
index 000000000000..8949bb7eb082
--- /dev/null
+++ b/include/trace/events/sched.h
@@ -0,0 +1,346 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
9
10/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread:
12 */
13TRACE_EVENT(sched_kthread_stop,
14
15 TP_PROTO(struct task_struct *t),
16
17 TP_ARGS(t),
18
19 TP_STRUCT__entry(
20 __array( char, comm, TASK_COMM_LEN )
21 __field( pid_t, pid )
22 ),
23
24 TP_fast_assign(
25 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
26 __entry->pid = t->pid;
27 ),
28
29 TP_printk("task %s:%d", __entry->comm, __entry->pid)
30);
31
32/*
33 * Tracepoint for the return value of the kthread stopping:
34 */
35TRACE_EVENT(sched_kthread_stop_ret,
36
37 TP_PROTO(int ret),
38
39 TP_ARGS(ret),
40
41 TP_STRUCT__entry(
42 __field( int, ret )
43 ),
44
45 TP_fast_assign(
46 __entry->ret = ret;
47 ),
48
49 TP_printk("ret %d", __entry->ret)
50);
51
52/*
53 * Tracepoint for waiting on task to unschedule:
54 *
55 * (NOTE: the 'rq' argument is not used by generic trace events,
56 * but used by the latency tracer plugin. )
57 */
58TRACE_EVENT(sched_wait_task,
59
60 TP_PROTO(struct rq *rq, struct task_struct *p),
61
62 TP_ARGS(rq, p),
63
64 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN )
66 __field( pid_t, pid )
67 __field( int, prio )
68 ),
69
70 TP_fast_assign(
71 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 __entry->pid = p->pid;
73 __entry->prio = p->prio;
74 ),
75
76 TP_printk("task %s:%d [%d]",
77 __entry->comm, __entry->pid, __entry->prio)
78);
79
80/*
81 * Tracepoint for waking up a task:
82 *
83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. )
85 */
86TRACE_EVENT(sched_wakeup,
87
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
89
90 TP_ARGS(rq, p, success),
91
92 TP_STRUCT__entry(
93 __array( char, comm, TASK_COMM_LEN )
94 __field( pid_t, pid )
95 __field( int, prio )
96 __field( int, success )
97 ),
98
99 TP_fast_assign(
100 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
101 __entry->pid = p->pid;
102 __entry->prio = p->prio;
103 __entry->success = success;
104 ),
105
106 TP_printk("task %s:%d [%d] success=%d",
107 __entry->comm, __entry->pid, __entry->prio,
108 __entry->success)
109);
110
111/*
112 * Tracepoint for waking up a new task:
113 *
114 * (NOTE: the 'rq' argument is not used by generic trace events,
115 * but used by the latency tracer plugin. )
116 */
117TRACE_EVENT(sched_wakeup_new,
118
119 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
120
121 TP_ARGS(rq, p, success),
122
123 TP_STRUCT__entry(
124 __array( char, comm, TASK_COMM_LEN )
125 __field( pid_t, pid )
126 __field( int, prio )
127 __field( int, success )
128 ),
129
130 TP_fast_assign(
131 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
132 __entry->pid = p->pid;
133 __entry->prio = p->prio;
134 __entry->success = success;
135 ),
136
137 TP_printk("task %s:%d [%d] success=%d",
138 __entry->comm, __entry->pid, __entry->prio,
139 __entry->success)
140);
141
142/*
143 * Tracepoint for task switches, performed by the scheduler:
144 *
145 * (NOTE: the 'rq' argument is not used by generic trace events,
146 * but used by the latency tracer plugin. )
147 */
148TRACE_EVENT(sched_switch,
149
150 TP_PROTO(struct rq *rq, struct task_struct *prev,
151 struct task_struct *next),
152
153 TP_ARGS(rq, prev, next),
154
155 TP_STRUCT__entry(
156 __array( char, prev_comm, TASK_COMM_LEN )
157 __field( pid_t, prev_pid )
158 __field( int, prev_prio )
159 __field( long, prev_state )
160 __array( char, next_comm, TASK_COMM_LEN )
161 __field( pid_t, next_pid )
162 __field( int, next_prio )
163 ),
164
165 TP_fast_assign(
166 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
167 __entry->prev_pid = prev->pid;
168 __entry->prev_prio = prev->prio;
169 __entry->prev_state = prev->state;
170 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
171 __entry->next_pid = next->pid;
172 __entry->next_prio = next->prio;
173 ),
174
175 TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]",
176 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
177 __entry->prev_state ?
178 __print_flags(__entry->prev_state, "|",
179 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
180 { 16, "Z" }, { 32, "X" }, { 64, "x" },
181 { 128, "W" }) : "R",
182 __entry->next_comm, __entry->next_pid, __entry->next_prio)
183);
184
185/*
186 * Tracepoint for a task being migrated:
187 */
188TRACE_EVENT(sched_migrate_task,
189
190 TP_PROTO(struct task_struct *p, int dest_cpu),
191
192 TP_ARGS(p, dest_cpu),
193
194 TP_STRUCT__entry(
195 __array( char, comm, TASK_COMM_LEN )
196 __field( pid_t, pid )
197 __field( int, prio )
198 __field( int, orig_cpu )
199 __field( int, dest_cpu )
200 ),
201
202 TP_fast_assign(
203 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
204 __entry->pid = p->pid;
205 __entry->prio = p->prio;
206 __entry->orig_cpu = task_cpu(p);
207 __entry->dest_cpu = dest_cpu;
208 ),
209
210 TP_printk("task %s:%d [%d] from: %d to: %d",
211 __entry->comm, __entry->pid, __entry->prio,
212 __entry->orig_cpu, __entry->dest_cpu)
213);
214
215/*
216 * Tracepoint for freeing a task:
217 */
218TRACE_EVENT(sched_process_free,
219
220 TP_PROTO(struct task_struct *p),
221
222 TP_ARGS(p),
223
224 TP_STRUCT__entry(
225 __array( char, comm, TASK_COMM_LEN )
226 __field( pid_t, pid )
227 __field( int, prio )
228 ),
229
230 TP_fast_assign(
231 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
232 __entry->pid = p->pid;
233 __entry->prio = p->prio;
234 ),
235
236 TP_printk("task %s:%d [%d]",
237 __entry->comm, __entry->pid, __entry->prio)
238);
239
240/*
241 * Tracepoint for a task exiting:
242 */
243TRACE_EVENT(sched_process_exit,
244
245 TP_PROTO(struct task_struct *p),
246
247 TP_ARGS(p),
248
249 TP_STRUCT__entry(
250 __array( char, comm, TASK_COMM_LEN )
251 __field( pid_t, pid )
252 __field( int, prio )
253 ),
254
255 TP_fast_assign(
256 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
257 __entry->pid = p->pid;
258 __entry->prio = p->prio;
259 ),
260
261 TP_printk("task %s:%d [%d]",
262 __entry->comm, __entry->pid, __entry->prio)
263);
264
265/*
266 * Tracepoint for a waiting task:
267 */
268TRACE_EVENT(sched_process_wait,
269
270 TP_PROTO(struct pid *pid),
271
272 TP_ARGS(pid),
273
274 TP_STRUCT__entry(
275 __array( char, comm, TASK_COMM_LEN )
276 __field( pid_t, pid )
277 __field( int, prio )
278 ),
279
280 TP_fast_assign(
281 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
282 __entry->pid = pid_nr(pid);
283 __entry->prio = current->prio;
284 ),
285
286 TP_printk("task %s:%d [%d]",
287 __entry->comm, __entry->pid, __entry->prio)
288);
289
290/*
291 * Tracepoint for do_fork:
292 */
293TRACE_EVENT(sched_process_fork,
294
295 TP_PROTO(struct task_struct *parent, struct task_struct *child),
296
297 TP_ARGS(parent, child),
298
299 TP_STRUCT__entry(
300 __array( char, parent_comm, TASK_COMM_LEN )
301 __field( pid_t, parent_pid )
302 __array( char, child_comm, TASK_COMM_LEN )
303 __field( pid_t, child_pid )
304 ),
305
306 TP_fast_assign(
307 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
308 __entry->parent_pid = parent->pid;
309 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
310 __entry->child_pid = child->pid;
311 ),
312
313 TP_printk("parent %s:%d child %s:%d",
314 __entry->parent_comm, __entry->parent_pid,
315 __entry->child_comm, __entry->child_pid)
316);
317
318/*
319 * Tracepoint for sending a signal:
320 */
321TRACE_EVENT(sched_signal_send,
322
323 TP_PROTO(int sig, struct task_struct *p),
324
325 TP_ARGS(sig, p),
326
327 TP_STRUCT__entry(
328 __field( int, sig )
329 __array( char, comm, TASK_COMM_LEN )
330 __field( pid_t, pid )
331 ),
332
333 TP_fast_assign(
334 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
335 __entry->pid = p->pid;
336 __entry->sig = sig;
337 ),
338
339 TP_printk("sig: %d task %s:%d",
340 __entry->sig, __entry->comm, __entry->pid)
341);
342
343#endif /* _TRACE_SCHED_H */
344
345/* This part must be outside protection */
346#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
new file mode 100644
index 000000000000..e499863b9669
--- /dev/null
+++ b/include/trace/events/skb.h
@@ -0,0 +1,40 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM skb
3
4#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SKB_H
6
7#include <linux/skbuff.h>
8#include <linux/tracepoint.h>
9
10/*
11 * Tracepoint for free an sk_buff:
12 */
13TRACE_EVENT(kfree_skb,
14
15 TP_PROTO(struct sk_buff *skb, void *location),
16
17 TP_ARGS(skb, location),
18
19 TP_STRUCT__entry(
20 __field( void *, skbaddr )
21 __field( unsigned short, protocol )
22 __field( void *, location )
23 ),
24
25 TP_fast_assign(
26 __entry->skbaddr = skb;
27 if (skb) {
28 __entry->protocol = ntohs(skb->protocol);
29 }
30 __entry->location = location;
31 ),
32
33 TP_printk("skbaddr=%p protocol=%u location=%p",
34 __entry->skbaddr, __entry->protocol, __entry->location)
35);
36
37#endif /* _TRACE_SKB_H */
38
39/* This part must be outside protection */
40#include <trace/define_trace.h>
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
new file mode 100644
index 000000000000..fcfd9a1e4b96
--- /dev/null
+++ b/include/trace/events/workqueue.h
@@ -0,0 +1,100 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM workqueue
3
4#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_WORKQUEUE_H
6
7#include <linux/workqueue.h>
8#include <linux/sched.h>
9#include <linux/tracepoint.h>
10
11TRACE_EVENT(workqueue_insertion,
12
13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
14
15 TP_ARGS(wq_thread, work),
16
17 TP_STRUCT__entry(
18 __array(char, thread_comm, TASK_COMM_LEN)
19 __field(pid_t, thread_pid)
20 __field(work_func_t, func)
21 ),
22
23 TP_fast_assign(
24 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
25 __entry->thread_pid = wq_thread->pid;
26 __entry->func = work->func;
27 ),
28
29 TP_printk("thread=%s:%d func=%pF", __entry->thread_comm,
30 __entry->thread_pid, __entry->func)
31);
32
33TRACE_EVENT(workqueue_execution,
34
35 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
36
37 TP_ARGS(wq_thread, work),
38
39 TP_STRUCT__entry(
40 __array(char, thread_comm, TASK_COMM_LEN)
41 __field(pid_t, thread_pid)
42 __field(work_func_t, func)
43 ),
44
45 TP_fast_assign(
46 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
47 __entry->thread_pid = wq_thread->pid;
48 __entry->func = work->func;
49 ),
50
51 TP_printk("thread=%s:%d func=%pF", __entry->thread_comm,
52 __entry->thread_pid, __entry->func)
53);
54
55/* Trace the creation of one workqueue thread on a cpu */
56TRACE_EVENT(workqueue_creation,
57
58 TP_PROTO(struct task_struct *wq_thread, int cpu),
59
60 TP_ARGS(wq_thread, cpu),
61
62 TP_STRUCT__entry(
63 __array(char, thread_comm, TASK_COMM_LEN)
64 __field(pid_t, thread_pid)
65 __field(int, cpu)
66 ),
67
68 TP_fast_assign(
69 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
70 __entry->thread_pid = wq_thread->pid;
71 __entry->cpu = cpu;
72 ),
73
74 TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm,
75 __entry->thread_pid, __entry->cpu)
76);
77
78TRACE_EVENT(workqueue_destruction,
79
80 TP_PROTO(struct task_struct *wq_thread),
81
82 TP_ARGS(wq_thread),
83
84 TP_STRUCT__entry(
85 __array(char, thread_comm, TASK_COMM_LEN)
86 __field(pid_t, thread_pid)
87 ),
88
89 TP_fast_assign(
90 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
91 __entry->thread_pid = wq_thread->pid;
92 ),
93
94 TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid)
95);
96
97#endif /* _TRACE_WORKQUEUE_H */
98
99/* This part must be outside protection */
100#include <trace/define_trace.h>