aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/bcache.h36
-rw-r--r--include/trace/events/block.h26
-rw-r--r--include/trace/events/btrfs.h23
-rw-r--r--include/trace/events/compaction.h42
-rw-r--r--include/trace/events/f2fs.h111
-rw-r--r--include/trace/events/migrate.h26
-rw-r--r--include/trace/events/net.h158
-rw-r--r--include/trace/events/power.h48
-rw-r--r--include/trace/events/ras.h10
-rw-r--r--include/trace/events/sched.h87
-rw-r--r--include/trace/events/sunrpc.h5
-rw-r--r--include/trace/events/v4l2.h157
-rw-r--r--include/trace/events/writeback.h6
-rw-r--r--include/trace/ftrace.h36
14 files changed, 679 insertions, 92 deletions
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index e2b9576d00e2..7110897c3dfa 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -24,10 +24,10 @@ DECLARE_EVENT_CLASS(bcache_request,
24 __entry->dev = bio->bi_bdev->bd_dev; 24 __entry->dev = bio->bi_bdev->bd_dev;
25 __entry->orig_major = d->disk->major; 25 __entry->orig_major = d->disk->major;
26 __entry->orig_minor = d->disk->first_minor; 26 __entry->orig_minor = d->disk->first_minor;
27 __entry->sector = bio->bi_sector; 27 __entry->sector = bio->bi_iter.bi_sector;
28 __entry->orig_sector = bio->bi_sector - 16; 28 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
29 __entry->nr_sector = bio->bi_size >> 9; 29 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
31 ), 31 ),
32 32
33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", 33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -99,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio,
99 99
100 TP_fast_assign( 100 TP_fast_assign(
101 __entry->dev = bio->bi_bdev->bd_dev; 101 __entry->dev = bio->bi_bdev->bd_dev;
102 __entry->sector = bio->bi_sector; 102 __entry->sector = bio->bi_iter.bi_sector;
103 __entry->nr_sector = bio->bi_size >> 9; 103 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
104 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 104 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
105 ), 105 ),
106 106
107 TP_printk("%d,%d %s %llu + %u", 107 TP_printk("%d,%d %s %llu + %u",
@@ -134,9 +134,9 @@ TRACE_EVENT(bcache_read,
134 134
135 TP_fast_assign( 135 TP_fast_assign(
136 __entry->dev = bio->bi_bdev->bd_dev; 136 __entry->dev = bio->bi_bdev->bd_dev;
137 __entry->sector = bio->bi_sector; 137 __entry->sector = bio->bi_iter.bi_sector;
138 __entry->nr_sector = bio->bi_size >> 9; 138 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
139 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 139 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
140 __entry->cache_hit = hit; 140 __entry->cache_hit = hit;
141 __entry->bypass = bypass; 141 __entry->bypass = bypass;
142 ), 142 ),
@@ -162,9 +162,9 @@ TRACE_EVENT(bcache_write,
162 162
163 TP_fast_assign( 163 TP_fast_assign(
164 __entry->dev = bio->bi_bdev->bd_dev; 164 __entry->dev = bio->bi_bdev->bd_dev;
165 __entry->sector = bio->bi_sector; 165 __entry->sector = bio->bi_iter.bi_sector;
166 __entry->nr_sector = bio->bi_size >> 9; 166 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
167 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 167 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
168 __entry->writeback = writeback; 168 __entry->writeback = writeback;
169 __entry->bypass = bypass; 169 __entry->bypass = bypass;
170 ), 170 ),
@@ -247,7 +247,7 @@ TRACE_EVENT(bcache_btree_write,
247 TP_fast_assign( 247 TP_fast_assign(
248 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 248 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
249 __entry->block = b->written; 249 __entry->block = b->written;
250 __entry->keys = b->sets[b->nsets].data->keys; 250 __entry->keys = b->keys.set[b->keys.nsets].data->keys;
251 ), 251 ),
252 252
253 TP_printk("bucket %zu", __entry->bucket) 253 TP_printk("bucket %zu", __entry->bucket)
@@ -411,7 +411,7 @@ TRACE_EVENT(bcache_alloc_invalidate,
411 ), 411 ),
412 412
413 TP_fast_assign( 413 TP_fast_assign(
414 __entry->free = fifo_used(&ca->free); 414 __entry->free = fifo_used(&ca->free[RESERVE_NONE]);
415 __entry->free_inc = fifo_used(&ca->free_inc); 415 __entry->free_inc = fifo_used(&ca->free_inc);
416 __entry->free_inc_size = ca->free_inc.size; 416 __entry->free_inc_size = ca->free_inc.size;
417 __entry->unused = fifo_used(&ca->unused); 417 __entry->unused = fifo_used(&ca->unused);
@@ -422,8 +422,8 @@ TRACE_EVENT(bcache_alloc_invalidate,
422); 422);
423 423
424TRACE_EVENT(bcache_alloc_fail, 424TRACE_EVENT(bcache_alloc_fail,
425 TP_PROTO(struct cache *ca), 425 TP_PROTO(struct cache *ca, unsigned reserve),
426 TP_ARGS(ca), 426 TP_ARGS(ca, reserve),
427 427
428 TP_STRUCT__entry( 428 TP_STRUCT__entry(
429 __field(unsigned, free ) 429 __field(unsigned, free )
@@ -433,7 +433,7 @@ TRACE_EVENT(bcache_alloc_fail,
433 ), 433 ),
434 434
435 TP_fast_assign( 435 TP_fast_assign(
436 __entry->free = fifo_used(&ca->free); 436 __entry->free = fifo_used(&ca->free[reserve]);
437 __entry->free_inc = fifo_used(&ca->free_inc); 437 __entry->free_inc = fifo_used(&ca->free_inc);
438 __entry->unused = fifo_used(&ca->unused); 438 __entry->unused = fifo_used(&ca->unused);
439 __entry->blocked = atomic_read(&ca->set->prio_blocked); 439 __entry->blocked = atomic_read(&ca->set->prio_blocked);
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 4c2301d2ef1a..e76ae19a8d6f 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -243,9 +243,9 @@ TRACE_EVENT(block_bio_bounce,
243 TP_fast_assign( 243 TP_fast_assign(
244 __entry->dev = bio->bi_bdev ? 244 __entry->dev = bio->bi_bdev ?
245 bio->bi_bdev->bd_dev : 0; 245 bio->bi_bdev->bd_dev : 0;
246 __entry->sector = bio->bi_sector; 246 __entry->sector = bio->bi_iter.bi_sector;
247 __entry->nr_sector = bio_sectors(bio); 247 __entry->nr_sector = bio_sectors(bio);
248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
250 ), 250 ),
251 251
@@ -280,10 +280,10 @@ TRACE_EVENT(block_bio_complete,
280 280
281 TP_fast_assign( 281 TP_fast_assign(
282 __entry->dev = bio->bi_bdev->bd_dev; 282 __entry->dev = bio->bi_bdev->bd_dev;
283 __entry->sector = bio->bi_sector; 283 __entry->sector = bio->bi_iter.bi_sector;
284 __entry->nr_sector = bio_sectors(bio); 284 __entry->nr_sector = bio_sectors(bio);
285 __entry->error = error; 285 __entry->error = error;
286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
287 ), 287 ),
288 288
289 TP_printk("%d,%d %s %llu + %u [%d]", 289 TP_printk("%d,%d %s %llu + %u [%d]",
@@ -308,9 +308,9 @@ DECLARE_EVENT_CLASS(block_bio_merge,
308 308
309 TP_fast_assign( 309 TP_fast_assign(
310 __entry->dev = bio->bi_bdev->bd_dev; 310 __entry->dev = bio->bi_bdev->bd_dev;
311 __entry->sector = bio->bi_sector; 311 __entry->sector = bio->bi_iter.bi_sector;
312 __entry->nr_sector = bio_sectors(bio); 312 __entry->nr_sector = bio_sectors(bio);
313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
315 ), 315 ),
316 316
@@ -375,9 +375,9 @@ TRACE_EVENT(block_bio_queue,
375 375
376 TP_fast_assign( 376 TP_fast_assign(
377 __entry->dev = bio->bi_bdev->bd_dev; 377 __entry->dev = bio->bi_bdev->bd_dev;
378 __entry->sector = bio->bi_sector; 378 __entry->sector = bio->bi_iter.bi_sector;
379 __entry->nr_sector = bio_sectors(bio); 379 __entry->nr_sector = bio_sectors(bio);
380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
382 ), 382 ),
383 383
@@ -403,7 +403,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
403 403
404 TP_fast_assign( 404 TP_fast_assign(
405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
406 __entry->sector = bio ? bio->bi_sector : 0; 406 __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
407 __entry->nr_sector = bio ? bio_sectors(bio) : 0; 407 __entry->nr_sector = bio ? bio_sectors(bio) : 0;
408 blk_fill_rwbs(__entry->rwbs, 408 blk_fill_rwbs(__entry->rwbs,
409 bio ? bio->bi_rw : 0, __entry->nr_sector); 409 bio ? bio->bi_rw : 0, __entry->nr_sector);
@@ -538,9 +538,9 @@ TRACE_EVENT(block_split,
538 538
539 TP_fast_assign( 539 TP_fast_assign(
540 __entry->dev = bio->bi_bdev->bd_dev; 540 __entry->dev = bio->bi_bdev->bd_dev;
541 __entry->sector = bio->bi_sector; 541 __entry->sector = bio->bi_iter.bi_sector;
542 __entry->new_sector = new_sector; 542 __entry->new_sector = new_sector;
543 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 543 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
544 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 544 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
545 ), 545 ),
546 546
@@ -579,11 +579,11 @@ TRACE_EVENT(block_bio_remap,
579 579
580 TP_fast_assign( 580 TP_fast_assign(
581 __entry->dev = bio->bi_bdev->bd_dev; 581 __entry->dev = bio->bi_bdev->bd_dev;
582 __entry->sector = bio->bi_sector; 582 __entry->sector = bio->bi_iter.bi_sector;
583 __entry->nr_sector = bio_sectors(bio); 583 __entry->nr_sector = bio_sectors(bio);
584 __entry->old_dev = dev; 584 __entry->old_dev = dev;
585 __entry->old_sector = from; 585 __entry->old_sector = from;
586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
587 ), 587 ),
588 588
589 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 589 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4832d75dcbae..3176cdc32937 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -208,17 +208,18 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
208 __entry->refs, __entry->compress_type) 208 __entry->refs, __entry->compress_type)
209); 209);
210 210
211#define show_ordered_flags(flags) \ 211#define show_ordered_flags(flags) \
212 __print_symbolic(flags, \ 212 __print_flags(flags, "|", \
213 { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \ 213 { (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \
214 { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \ 214 { (1 << BTRFS_ORDERED_COMPLETE), "COMPLETE" }, \
215 { BTRFS_ORDERED_NOCOW, "NOCOW" }, \ 215 { (1 << BTRFS_ORDERED_NOCOW), "NOCOW" }, \
216 { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \ 216 { (1 << BTRFS_ORDERED_COMPRESSED), "COMPRESSED" }, \
217 { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \ 217 { (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \
218 { BTRFS_ORDERED_DIRECT, "DIRECT" }, \ 218 { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
219 { BTRFS_ORDERED_IOERR, "IOERR" }, \ 219 { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
220 { BTRFS_ORDERED_UPDATED_ISIZE, "UPDATED_ISIZE" }, \ 220 { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
221 { BTRFS_ORDERED_LOGGED_CSUM, "LOGGED_CSUM" }) 221 { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \
222 { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
222 223
223 224
224DECLARE_EVENT_CLASS(btrfs__ordered_extent, 225DECLARE_EVENT_CLASS(btrfs__ordered_extent,
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index fde1b3e94c7d..06f544ef2f6f 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -67,6 +67,48 @@ TRACE_EVENT(mm_compaction_migratepages,
67 __entry->nr_failed) 67 __entry->nr_failed)
68); 68);
69 69
70TRACE_EVENT(mm_compaction_begin,
71 TP_PROTO(unsigned long zone_start, unsigned long migrate_start,
72 unsigned long free_start, unsigned long zone_end),
73
74 TP_ARGS(zone_start, migrate_start, free_start, zone_end),
75
76 TP_STRUCT__entry(
77 __field(unsigned long, zone_start)
78 __field(unsigned long, migrate_start)
79 __field(unsigned long, free_start)
80 __field(unsigned long, zone_end)
81 ),
82
83 TP_fast_assign(
84 __entry->zone_start = zone_start;
85 __entry->migrate_start = migrate_start;
86 __entry->free_start = free_start;
87 __entry->zone_end = zone_end;
88 ),
89
90 TP_printk("zone_start=%lu migrate_start=%lu free_start=%lu zone_end=%lu",
91 __entry->zone_start,
92 __entry->migrate_start,
93 __entry->free_start,
94 __entry->zone_end)
95);
96
97TRACE_EVENT(mm_compaction_end,
98 TP_PROTO(int status),
99
100 TP_ARGS(status),
101
102 TP_STRUCT__entry(
103 __field(int, status)
104 ),
105
106 TP_fast_assign(
107 __entry->status = status;
108 ),
109
110 TP_printk("status=%d", __entry->status)
111);
70 112
71#endif /* _TRACE_COMPACTION_H */ 113#endif /* _TRACE_COMPACTION_H */
72 114
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index e0dc355fa317..67f38faac589 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -16,15 +16,28 @@
16 { META, "META" }, \ 16 { META, "META" }, \
17 { META_FLUSH, "META_FLUSH" }) 17 { META_FLUSH, "META_FLUSH" })
18 18
19#define show_bio_type(type) \ 19#define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA))
20 __print_symbolic(type, \ 20#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
21 { READ, "READ" }, \ 21
22 { READA, "READAHEAD" }, \ 22#define show_bio_type(type) show_bio_base(type), show_bio_extra(type)
23 { READ_SYNC, "READ_SYNC" }, \ 23
24 { WRITE, "WRITE" }, \ 24#define show_bio_base(type) \
25 { WRITE_SYNC, "WRITE_SYNC" }, \ 25 __print_symbolic(F2FS_BIO_MASK(type), \
26 { WRITE_FLUSH, "WRITE_FLUSH" }, \ 26 { READ, "READ" }, \
27 { WRITE_FUA, "WRITE_FUA" }) 27 { READA, "READAHEAD" }, \
28 { READ_SYNC, "READ_SYNC" }, \
29 { WRITE, "WRITE" }, \
30 { WRITE_SYNC, "WRITE_SYNC" }, \
31 { WRITE_FLUSH, "WRITE_FLUSH" }, \
32 { WRITE_FUA, "WRITE_FUA" }, \
33 { WRITE_FLUSH_FUA, "WRITE_FLUSH_FUA" })
34
35#define show_bio_extra(type) \
36 __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \
37 { REQ_META, "(M)" }, \
38 { REQ_PRIO, "(P)" }, \
39 { REQ_META | REQ_PRIO, "(MP)" }, \
40 { 0, " \b" })
28 41
29#define show_data_type(type) \ 42#define show_data_type(type) \
30 __print_symbolic(type, \ 43 __print_symbolic(type, \
@@ -421,7 +434,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
421 __entry->err) 434 __entry->err)
422); 435);
423 436
424TRACE_EVENT_CONDITION(f2fs_readpage, 437TRACE_EVENT_CONDITION(f2fs_submit_page_bio,
425 438
426 TP_PROTO(struct page *page, sector_t blkaddr, int type), 439 TP_PROTO(struct page *page, sector_t blkaddr, int type),
427 440
@@ -446,7 +459,7 @@ TRACE_EVENT_CONDITION(f2fs_readpage,
446 ), 459 ),
447 460
448 TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " 461 TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
449 "blkaddr = 0x%llx, bio_type = %s", 462 "blkaddr = 0x%llx, bio_type = %s%s",
450 show_dev_ino(__entry), 463 show_dev_ino(__entry),
451 (unsigned long)__entry->index, 464 (unsigned long)__entry->index,
452 (unsigned long long)__entry->blkaddr, 465 (unsigned long long)__entry->blkaddr,
@@ -598,36 +611,54 @@ TRACE_EVENT(f2fs_reserve_new_block,
598 __entry->ofs_in_node) 611 __entry->ofs_in_node)
599); 612);
600 613
601TRACE_EVENT(f2fs_do_submit_bio, 614DECLARE_EVENT_CLASS(f2fs__submit_bio,
602 615
603 TP_PROTO(struct super_block *sb, int btype, bool sync, struct bio *bio), 616 TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
604 617
605 TP_ARGS(sb, btype, sync, bio), 618 TP_ARGS(sb, rw, type, bio),
606 619
607 TP_STRUCT__entry( 620 TP_STRUCT__entry(
608 __field(dev_t, dev) 621 __field(dev_t, dev)
609 __field(int, btype) 622 __field(int, rw)
610 __field(bool, sync) 623 __field(int, type)
611 __field(sector_t, sector) 624 __field(sector_t, sector)
612 __field(unsigned int, size) 625 __field(unsigned int, size)
613 ), 626 ),
614 627
615 TP_fast_assign( 628 TP_fast_assign(
616 __entry->dev = sb->s_dev; 629 __entry->dev = sb->s_dev;
617 __entry->btype = btype; 630 __entry->rw = rw;
618 __entry->sync = sync; 631 __entry->type = type;
619 __entry->sector = bio->bi_sector; 632 __entry->sector = bio->bi_iter.bi_sector;
620 __entry->size = bio->bi_size; 633 __entry->size = bio->bi_iter.bi_size;
621 ), 634 ),
622 635
623 TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u", 636 TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u",
624 show_dev(__entry), 637 show_dev(__entry),
625 show_block_type(__entry->btype), 638 show_bio_type(__entry->rw),
626 __entry->sync ? "sync" : "no sync", 639 show_block_type(__entry->type),
627 (unsigned long long)__entry->sector, 640 (unsigned long long)__entry->sector,
628 __entry->size) 641 __entry->size)
629); 642);
630 643
644DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio,
645
646 TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
647
648 TP_ARGS(sb, rw, type, bio),
649
650 TP_CONDITION(bio)
651);
652
653DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio,
654
655 TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio),
656
657 TP_ARGS(sb, rw, type, bio),
658
659 TP_CONDITION(bio)
660);
661
631DECLARE_EVENT_CLASS(f2fs__page, 662DECLARE_EVENT_CLASS(f2fs__page,
632 663
633 TP_PROTO(struct page *page, int type), 664 TP_PROTO(struct page *page, int type),
@@ -674,15 +705,16 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
674 TP_ARGS(page, type) 705 TP_ARGS(page, type)
675); 706);
676 707
677TRACE_EVENT(f2fs_submit_write_page, 708TRACE_EVENT(f2fs_submit_page_mbio,
678 709
679 TP_PROTO(struct page *page, block_t blk_addr, int type), 710 TP_PROTO(struct page *page, int rw, int type, block_t blk_addr),
680 711
681 TP_ARGS(page, blk_addr, type), 712 TP_ARGS(page, rw, type, blk_addr),
682 713
683 TP_STRUCT__entry( 714 TP_STRUCT__entry(
684 __field(dev_t, dev) 715 __field(dev_t, dev)
685 __field(ino_t, ino) 716 __field(ino_t, ino)
717 __field(int, rw)
686 __field(int, type) 718 __field(int, type)
687 __field(pgoff_t, index) 719 __field(pgoff_t, index)
688 __field(block_t, block) 720 __field(block_t, block)
@@ -691,13 +723,15 @@ TRACE_EVENT(f2fs_submit_write_page,
691 TP_fast_assign( 723 TP_fast_assign(
692 __entry->dev = page->mapping->host->i_sb->s_dev; 724 __entry->dev = page->mapping->host->i_sb->s_dev;
693 __entry->ino = page->mapping->host->i_ino; 725 __entry->ino = page->mapping->host->i_ino;
726 __entry->rw = rw;
694 __entry->type = type; 727 __entry->type = type;
695 __entry->index = page->index; 728 __entry->index = page->index;
696 __entry->block = blk_addr; 729 __entry->block = blk_addr;
697 ), 730 ),
698 731
699 TP_printk("dev = (%d,%d), ino = %lu, %s, index = %lu, blkaddr = 0x%llx", 732 TP_printk("dev = (%d,%d), ino = %lu, %s%s, %s, index = %lu, blkaddr = 0x%llx",
700 show_dev_ino(__entry), 733 show_dev_ino(__entry),
734 show_bio_type(__entry->rw),
701 show_block_type(__entry->type), 735 show_block_type(__entry->type),
702 (unsigned long)__entry->index, 736 (unsigned long)__entry->index,
703 (unsigned long long)__entry->block) 737 (unsigned long long)__entry->block)
@@ -727,6 +761,29 @@ TRACE_EVENT(f2fs_write_checkpoint,
727 __entry->msg) 761 __entry->msg)
728); 762);
729 763
764TRACE_EVENT(f2fs_issue_discard,
765
766 TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen),
767
768 TP_ARGS(sb, blkstart, blklen),
769
770 TP_STRUCT__entry(
771 __field(dev_t, dev)
772 __field(block_t, blkstart)
773 __field(block_t, blklen)
774 ),
775
776 TP_fast_assign(
777 __entry->dev = sb->s_dev;
778 __entry->blkstart = blkstart;
779 __entry->blklen = blklen;
780 ),
781
782 TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx",
783 show_dev(__entry),
784 (unsigned long long)__entry->blkstart,
785 (unsigned long long)__entry->blklen)
786);
730#endif /* _TRACE_F2FS_H */ 787#endif /* _TRACE_F2FS_H */
731 788
732 /* This part must be outside protection */ 789 /* This part must be outside protection */
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index ec2a6ccfd7e5..3075ffbb9a83 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -45,6 +45,32 @@ TRACE_EVENT(mm_migrate_pages,
45 __print_symbolic(__entry->reason, MIGRATE_REASON)) 45 __print_symbolic(__entry->reason, MIGRATE_REASON))
46); 46);
47 47
48TRACE_EVENT(mm_numa_migrate_ratelimit,
49
50 TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
51
52 TP_ARGS(p, dst_nid, nr_pages),
53
54 TP_STRUCT__entry(
55 __array( char, comm, TASK_COMM_LEN)
56 __field( pid_t, pid)
57 __field( int, dst_nid)
58 __field( unsigned long, nr_pages)
59 ),
60
61 TP_fast_assign(
62 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
63 __entry->pid = p->pid;
64 __entry->dst_nid = dst_nid;
65 __entry->nr_pages = nr_pages;
66 ),
67
68 TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
69 __entry->comm,
70 __entry->pid,
71 __entry->dst_nid,
72 __entry->nr_pages)
73);
48#endif /* _TRACE_MIGRATE_H */ 74#endif /* _TRACE_MIGRATE_H */
49 75
50/* This part must be outside protection */ 76/* This part must be outside protection */
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index f99645d05a8f..a34f27b2e394 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -6,9 +6,67 @@
6 6
7#include <linux/skbuff.h> 7#include <linux/skbuff.h>
8#include <linux/netdevice.h> 8#include <linux/netdevice.h>
9#include <linux/if_vlan.h>
9#include <linux/ip.h> 10#include <linux/ip.h>
10#include <linux/tracepoint.h> 11#include <linux/tracepoint.h>
11 12
13TRACE_EVENT(net_dev_start_xmit,
14
15 TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
16
17 TP_ARGS(skb, dev),
18
19 TP_STRUCT__entry(
20 __string( name, dev->name )
21 __field( u16, queue_mapping )
22 __field( const void *, skbaddr )
23 __field( bool, vlan_tagged )
24 __field( u16, vlan_proto )
25 __field( u16, vlan_tci )
26 __field( u16, protocol )
27 __field( u8, ip_summed )
28 __field( unsigned int, len )
29 __field( unsigned int, data_len )
30 __field( int, network_offset )
31 __field( bool, transport_offset_valid)
32 __field( int, transport_offset)
33 __field( u8, tx_flags )
34 __field( u16, gso_size )
35 __field( u16, gso_segs )
36 __field( u16, gso_type )
37 ),
38
39 TP_fast_assign(
40 __assign_str(name, dev->name);
41 __entry->queue_mapping = skb->queue_mapping;
42 __entry->skbaddr = skb;
43 __entry->vlan_tagged = vlan_tx_tag_present(skb);
44 __entry->vlan_proto = ntohs(skb->vlan_proto);
45 __entry->vlan_tci = vlan_tx_tag_get(skb);
46 __entry->protocol = ntohs(skb->protocol);
47 __entry->ip_summed = skb->ip_summed;
48 __entry->len = skb->len;
49 __entry->data_len = skb->data_len;
50 __entry->network_offset = skb_network_offset(skb);
51 __entry->transport_offset_valid =
52 skb_transport_header_was_set(skb);
53 __entry->transport_offset = skb_transport_offset(skb);
54 __entry->tx_flags = skb_shinfo(skb)->tx_flags;
55 __entry->gso_size = skb_shinfo(skb)->gso_size;
56 __entry->gso_segs = skb_shinfo(skb)->gso_segs;
57 __entry->gso_type = skb_shinfo(skb)->gso_type;
58 ),
59
60 TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x",
61 __get_str(name), __entry->queue_mapping, __entry->skbaddr,
62 __entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
63 __entry->protocol, __entry->ip_summed, __entry->len,
64 __entry->data_len,
65 __entry->network_offset, __entry->transport_offset_valid,
66 __entry->transport_offset, __entry->tx_flags,
67 __entry->gso_size, __entry->gso_segs, __entry->gso_type)
68);
69
12TRACE_EVENT(net_dev_xmit, 70TRACE_EVENT(net_dev_xmit,
13 71
14 TP_PROTO(struct sk_buff *skb, 72 TP_PROTO(struct sk_buff *skb,
@@ -78,6 +136,106 @@ DEFINE_EVENT(net_dev_template, netif_rx,
78 136
79 TP_ARGS(skb) 137 TP_ARGS(skb)
80); 138);
139
140DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
141
142 TP_PROTO(const struct sk_buff *skb),
143
144 TP_ARGS(skb),
145
146 TP_STRUCT__entry(
147 __string( name, skb->dev->name )
148 __field( unsigned int, napi_id )
149 __field( u16, queue_mapping )
150 __field( const void *, skbaddr )
151 __field( bool, vlan_tagged )
152 __field( u16, vlan_proto )
153 __field( u16, vlan_tci )
154 __field( u16, protocol )
155 __field( u8, ip_summed )
156 __field( u32, rxhash )
157 __field( bool, l4_rxhash )
158 __field( unsigned int, len )
159 __field( unsigned int, data_len )
160 __field( unsigned int, truesize )
161 __field( bool, mac_header_valid)
162 __field( int, mac_header )
163 __field( unsigned char, nr_frags )
164 __field( u16, gso_size )
165 __field( u16, gso_type )
166 ),
167
168 TP_fast_assign(
169 __assign_str(name, skb->dev->name);
170#ifdef CONFIG_NET_RX_BUSY_POLL
171 __entry->napi_id = skb->napi_id;
172#else
173 __entry->napi_id = 0;
174#endif
175 __entry->queue_mapping = skb->queue_mapping;
176 __entry->skbaddr = skb;
177 __entry->vlan_tagged = vlan_tx_tag_present(skb);
178 __entry->vlan_proto = ntohs(skb->vlan_proto);
179 __entry->vlan_tci = vlan_tx_tag_get(skb);
180 __entry->protocol = ntohs(skb->protocol);
181 __entry->ip_summed = skb->ip_summed;
182 __entry->rxhash = skb->rxhash;
183 __entry->l4_rxhash = skb->l4_rxhash;
184 __entry->len = skb->len;
185 __entry->data_len = skb->data_len;
186 __entry->truesize = skb->truesize;
187 __entry->mac_header_valid = skb_mac_header_was_set(skb);
188 __entry->mac_header = skb_mac_header(skb) - skb->data;
189 __entry->nr_frags = skb_shinfo(skb)->nr_frags;
190 __entry->gso_size = skb_shinfo(skb)->gso_size;
191 __entry->gso_type = skb_shinfo(skb)->gso_type;
192 ),
193
194 TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d rxhash=0x%08x l4_rxhash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
195 __get_str(name), __entry->napi_id, __entry->queue_mapping,
196 __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
197 __entry->vlan_tci, __entry->protocol, __entry->ip_summed,
198 __entry->rxhash, __entry->l4_rxhash, __entry->len,
199 __entry->data_len, __entry->truesize,
200 __entry->mac_header_valid, __entry->mac_header,
201 __entry->nr_frags, __entry->gso_size, __entry->gso_type)
202);
203
204DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
205
206 TP_PROTO(const struct sk_buff *skb),
207
208 TP_ARGS(skb)
209);
210
211DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry,
212
213 TP_PROTO(const struct sk_buff *skb),
214
215 TP_ARGS(skb)
216);
217
218DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
219
220 TP_PROTO(const struct sk_buff *skb),
221
222 TP_ARGS(skb)
223);
224
225DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
226
227 TP_PROTO(const struct sk_buff *skb),
228
229 TP_ARGS(skb)
230);
231
232DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
233
234 TP_PROTO(const struct sk_buff *skb),
235
236 TP_ARGS(skb)
237);
238
81#endif /* _TRACE_NET_H */ 239#endif /* _TRACE_NET_H */
82 240
83/* This part must be outside protection */ 241/* This part must be outside protection */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index cda100d6762d..e5bf9a76f169 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -35,6 +35,54 @@ DEFINE_EVENT(cpu, cpu_idle,
35 TP_ARGS(state, cpu_id) 35 TP_ARGS(state, cpu_id)
36); 36);
37 37
38TRACE_EVENT(pstate_sample,
39
40 TP_PROTO(u32 core_busy,
41 u32 scaled_busy,
42 u32 state,
43 u64 mperf,
44 u64 aperf,
45 u32 freq
46 ),
47
48 TP_ARGS(core_busy,
49 scaled_busy,
50 state,
51 mperf,
52 aperf,
53 freq
54 ),
55
56 TP_STRUCT__entry(
57 __field(u32, core_busy)
58 __field(u32, scaled_busy)
59 __field(u32, state)
60 __field(u64, mperf)
61 __field(u64, aperf)
62 __field(u32, freq)
63
64 ),
65
66 TP_fast_assign(
67 __entry->core_busy = core_busy;
68 __entry->scaled_busy = scaled_busy;
69 __entry->state = state;
70 __entry->mperf = mperf;
71 __entry->aperf = aperf;
72 __entry->freq = freq;
73 ),
74
75 TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
76 (unsigned long)__entry->core_busy,
77 (unsigned long)__entry->scaled_busy,
78 (unsigned long)__entry->state,
79 (unsigned long long)__entry->mperf,
80 (unsigned long long)__entry->aperf,
81 (unsigned long)__entry->freq
82 )
83
84);
85
38/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */ 86/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
39#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING 87#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING
40#define _PWR_EVENT_AVOID_DOUBLE_DEFINING 88#define _PWR_EVENT_AVOID_DOUBLE_DEFINING
diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h
index 88b878383797..1c875ad1ee5f 100644
--- a/include/trace/events/ras.h
+++ b/include/trace/events/ras.h
@@ -5,7 +5,7 @@
5#define _TRACE_AER_H 5#define _TRACE_AER_H
6 6
7#include <linux/tracepoint.h> 7#include <linux/tracepoint.h>
8#include <linux/edac.h> 8#include <linux/aer.h>
9 9
10 10
11/* 11/*
@@ -63,10 +63,10 @@ TRACE_EVENT(aer_event,
63 63
64 TP_printk("%s PCIe Bus Error: severity=%s, %s\n", 64 TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
65 __get_str(dev_name), 65 __get_str(dev_name),
66 __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" : 66 __entry->severity == AER_CORRECTABLE ? "Corrected" :
67 __entry->severity == HW_EVENT_ERR_FATAL ? 67 __entry->severity == AER_FATAL ?
68 "Fatal" : "Uncorrected", 68 "Fatal" : "Uncorrected, non-fatal",
69 __entry->severity == HW_EVENT_ERR_CORRECTED ? 69 __entry->severity == AER_CORRECTABLE ?
70 __print_flags(__entry->status, "|", aer_correctable_errors) : 70 __print_flags(__entry->status, "|", aer_correctable_errors) :
71 __print_flags(__entry->status, "|", aer_uncorrectable_errors)) 71 __print_flags(__entry->status, "|", aer_uncorrectable_errors))
72); 72);
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 04c308413a5d..67e1bbf83695 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -443,6 +443,93 @@ TRACE_EVENT(sched_process_hang,
443); 443);
444#endif /* CONFIG_DETECT_HUNG_TASK */ 444#endif /* CONFIG_DETECT_HUNG_TASK */
445 445
446DECLARE_EVENT_CLASS(sched_move_task_template,
447
448 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
449
450 TP_ARGS(tsk, src_cpu, dst_cpu),
451
452 TP_STRUCT__entry(
453 __field( pid_t, pid )
454 __field( pid_t, tgid )
455 __field( pid_t, ngid )
456 __field( int, src_cpu )
457 __field( int, src_nid )
458 __field( int, dst_cpu )
459 __field( int, dst_nid )
460 ),
461
462 TP_fast_assign(
463 __entry->pid = task_pid_nr(tsk);
464 __entry->tgid = task_tgid_nr(tsk);
465 __entry->ngid = task_numa_group_id(tsk);
466 __entry->src_cpu = src_cpu;
467 __entry->src_nid = cpu_to_node(src_cpu);
468 __entry->dst_cpu = dst_cpu;
469 __entry->dst_nid = cpu_to_node(dst_cpu);
470 ),
471
472 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
473 __entry->pid, __entry->tgid, __entry->ngid,
474 __entry->src_cpu, __entry->src_nid,
475 __entry->dst_cpu, __entry->dst_nid)
476);
477
478/*
479 * Tracks migration of tasks from one runqueue to another. Can be used to
480 * detect if automatic NUMA balancing is bouncing between nodes
481 */
482DEFINE_EVENT(sched_move_task_template, sched_move_numa,
483 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
484
485 TP_ARGS(tsk, src_cpu, dst_cpu)
486);
487
488DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
489 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
490
491 TP_ARGS(tsk, src_cpu, dst_cpu)
492);
493
494TRACE_EVENT(sched_swap_numa,
495
496 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
497 struct task_struct *dst_tsk, int dst_cpu),
498
499 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
500
501 TP_STRUCT__entry(
502 __field( pid_t, src_pid )
503 __field( pid_t, src_tgid )
504 __field( pid_t, src_ngid )
505 __field( int, src_cpu )
506 __field( int, src_nid )
507 __field( pid_t, dst_pid )
508 __field( pid_t, dst_tgid )
509 __field( pid_t, dst_ngid )
510 __field( int, dst_cpu )
511 __field( int, dst_nid )
512 ),
513
514 TP_fast_assign(
515 __entry->src_pid = task_pid_nr(src_tsk);
516 __entry->src_tgid = task_tgid_nr(src_tsk);
517 __entry->src_ngid = task_numa_group_id(src_tsk);
518 __entry->src_cpu = src_cpu;
519 __entry->src_nid = cpu_to_node(src_cpu);
520 __entry->dst_pid = task_pid_nr(dst_tsk);
521 __entry->dst_tgid = task_tgid_nr(dst_tsk);
522 __entry->dst_ngid = task_numa_group_id(dst_tsk);
523 __entry->dst_cpu = dst_cpu;
524 __entry->dst_nid = cpu_to_node(dst_cpu);
525 ),
526
527 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
528 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
529 __entry->src_cpu, __entry->src_nid,
530 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
531 __entry->dst_cpu, __entry->dst_nid)
532);
446#endif /* _TRACE_SCHED_H */ 533#endif /* _TRACE_SCHED_H */
447 534
448/* This part must be outside protection */ 535/* This part must be outside protection */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index d51d16c7afd8..1fef3e6e9436 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
83 ), 83 ),
84 84
85 TP_fast_assign( 85 TP_fast_assign(
86 __entry->client_id = clnt->cl_clid; 86 __entry->client_id = clnt ? clnt->cl_clid : -1;
87 __entry->task_id = task->tk_pid; 87 __entry->task_id = task->tk_pid;
88 __entry->action = action; 88 __entry->action = action;
89 __entry->runstate = task->tk_runstate; 89 __entry->runstate = task->tk_runstate;
@@ -91,7 +91,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
91 __entry->flags = task->tk_flags; 91 __entry->flags = task->tk_flags;
92 ), 92 ),
93 93
94 TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf", 94 TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
95 __entry->task_id, __entry->client_id, 95 __entry->task_id, __entry->client_id,
96 __entry->flags, 96 __entry->flags,
97 __entry->runstate, 97 __entry->runstate,
@@ -301,6 +301,7 @@ DECLARE_EVENT_CLASS(xs_socket_event_done,
301 301
302DEFINE_RPC_SOCKET_EVENT(rpc_socket_state_change); 302DEFINE_RPC_SOCKET_EVENT(rpc_socket_state_change);
303DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_connect); 303DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_connect);
304DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_error);
304DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); 305DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
305DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); 306DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
306DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); 307DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
new file mode 100644
index 000000000000..ef94ecad1c94
--- /dev/null
+++ b/include/trace/events/v4l2.h
@@ -0,0 +1,157 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM v4l2
3
4#if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_V4L2_H
6
7#include <linux/tracepoint.h>
8
9#define show_type(type) \
10 __print_symbolic(type, \
11 { V4L2_BUF_TYPE_VIDEO_CAPTURE, "VIDEO_CAPTURE" }, \
12 { V4L2_BUF_TYPE_VIDEO_OUTPUT, "VIDEO_OUTPUT" }, \
13 { V4L2_BUF_TYPE_VIDEO_OVERLAY, "VIDEO_OVERLAY" }, \
14 { V4L2_BUF_TYPE_VBI_CAPTURE, "VBI_CAPTURE" }, \
15 { V4L2_BUF_TYPE_VBI_OUTPUT, "VBI_OUTPUT" }, \
16 { V4L2_BUF_TYPE_SLICED_VBI_CAPTURE, "SLICED_VBI_CAPTURE" }, \
17 { V4L2_BUF_TYPE_SLICED_VBI_OUTPUT, "SLICED_VBI_OUTPUT" }, \
18 { V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY, "VIDEO_OUTPUT_OVERLAY" },\
19 { V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" },\
20 { V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" }, \
21 { V4L2_BUF_TYPE_PRIVATE, "PRIVATE" })
22
23#define show_field(field) \
24 __print_symbolic(field, \
25 { V4L2_FIELD_ANY, "ANY" }, \
26 { V4L2_FIELD_NONE, "NONE" }, \
27 { V4L2_FIELD_TOP, "TOP" }, \
28 { V4L2_FIELD_BOTTOM, "BOTTOM" }, \
29 { V4L2_FIELD_INTERLACED, "INTERLACED" }, \
30 { V4L2_FIELD_SEQ_TB, "SEQ_TB" }, \
31 { V4L2_FIELD_SEQ_BT, "SEQ_BT" }, \
32 { V4L2_FIELD_ALTERNATE, "ALTERNATE" }, \
33 { V4L2_FIELD_INTERLACED_TB, "INTERLACED_TB" }, \
34 { V4L2_FIELD_INTERLACED_BT, "INTERLACED_BT" })
35
36#define show_timecode_type(type) \
37 __print_symbolic(type, \
38 { V4L2_TC_TYPE_24FPS, "24FPS" }, \
39 { V4L2_TC_TYPE_25FPS, "25FPS" }, \
40 { V4L2_TC_TYPE_30FPS, "30FPS" }, \
41 { V4L2_TC_TYPE_50FPS, "50FPS" }, \
42 { V4L2_TC_TYPE_60FPS, "60FPS" })
43
44#define show_flags(flags) \
45 __print_flags(flags, "|", \
46 { V4L2_BUF_FLAG_MAPPED, "MAPPED" }, \
47 { V4L2_BUF_FLAG_QUEUED, "QUEUED" }, \
48 { V4L2_BUF_FLAG_DONE, "DONE" }, \
49 { V4L2_BUF_FLAG_KEYFRAME, "KEYFRAME" }, \
50 { V4L2_BUF_FLAG_PFRAME, "PFRAME" }, \
51 { V4L2_BUF_FLAG_BFRAME, "BFRAME" }, \
52 { V4L2_BUF_FLAG_ERROR, "ERROR" }, \
53 { V4L2_BUF_FLAG_TIMECODE, "TIMECODE" }, \
54 { V4L2_BUF_FLAG_PREPARED, "PREPARED" }, \
55 { V4L2_BUF_FLAG_NO_CACHE_INVALIDATE, "NO_CACHE_INVALIDATE" }, \
56 { V4L2_BUF_FLAG_NO_CACHE_CLEAN, "NO_CACHE_CLEAN" }, \
57 { V4L2_BUF_FLAG_TIMESTAMP_MASK, "TIMESTAMP_MASK" }, \
58 { V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN, "TIMESTAMP_UNKNOWN" }, \
59 { V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, "TIMESTAMP_MONOTONIC" }, \
60 { V4L2_BUF_FLAG_TIMESTAMP_COPY, "TIMESTAMP_COPY" })
61
62#define show_timecode_flags(flags) \
63 __print_flags(flags, "|", \
64 { V4L2_TC_FLAG_DROPFRAME, "DROPFRAME" }, \
65 { V4L2_TC_FLAG_COLORFRAME, "COLORFRAME" }, \
66 { V4L2_TC_USERBITS_USERDEFINED, "USERBITS_USERDEFINED" }, \
67 { V4L2_TC_USERBITS_8BITCHARS, "USERBITS_8BITCHARS" })
68
69#define V4L2_TRACE_EVENT(event_name) \
70 TRACE_EVENT(event_name, \
71 TP_PROTO(int minor, struct v4l2_buffer *buf), \
72 \
73 TP_ARGS(minor, buf), \
74 \
75 TP_STRUCT__entry( \
76 __field(int, minor) \
77 __field(u32, index) \
78 __field(u32, type) \
79 __field(u32, bytesused) \
80 __field(u32, flags) \
81 __field(u32, field) \
82 __field(s64, timestamp) \
83 __field(u32, timecode_type) \
84 __field(u32, timecode_flags) \
85 __field(u8, timecode_frames) \
86 __field(u8, timecode_seconds) \
87 __field(u8, timecode_minutes) \
88 __field(u8, timecode_hours) \
89 __field(u8, timecode_userbits0) \
90 __field(u8, timecode_userbits1) \
91 __field(u8, timecode_userbits2) \
92 __field(u8, timecode_userbits3) \
93 __field(u32, sequence) \
94 ), \
95 \
96 TP_fast_assign( \
97 __entry->minor = minor; \
98 __entry->index = buf->index; \
99 __entry->type = buf->type; \
100 __entry->bytesused = buf->bytesused; \
101 __entry->flags = buf->flags; \
102 __entry->field = buf->field; \
103 __entry->timestamp = \
104 timeval_to_ns(&buf->timestamp); \
105 __entry->timecode_type = buf->timecode.type; \
106 __entry->timecode_flags = buf->timecode.flags; \
107 __entry->timecode_frames = \
108 buf->timecode.frames; \
109 __entry->timecode_seconds = \
110 buf->timecode.seconds; \
111 __entry->timecode_minutes = \
112 buf->timecode.minutes; \
113 __entry->timecode_hours = buf->timecode.hours; \
114 __entry->timecode_userbits0 = \
115 buf->timecode.userbits[0]; \
116 __entry->timecode_userbits1 = \
117 buf->timecode.userbits[1]; \
118 __entry->timecode_userbits2 = \
119 buf->timecode.userbits[2]; \
120 __entry->timecode_userbits3 = \
121 buf->timecode.userbits[3]; \
122 __entry->sequence = buf->sequence; \
123 ), \
124 \
125 TP_printk("minor = %d, index = %u, type = %s, " \
126 "bytesused = %u, flags = %s, " \
127 "field = %s, timestamp = %llu, timecode = { " \
128 "type = %s, flags = %s, frames = %u, " \
129 "seconds = %u, minutes = %u, hours = %u, " \
130 "userbits = { %u %u %u %u } }, " \
131 "sequence = %u", __entry->minor, \
132 __entry->index, show_type(__entry->type), \
133 __entry->bytesused, \
134 show_flags(__entry->flags), \
135 show_field(__entry->field), \
136 __entry->timestamp, \
137 show_timecode_type(__entry->timecode_type), \
138 show_timecode_flags(__entry->timecode_flags), \
139 __entry->timecode_frames, \
140 __entry->timecode_seconds, \
141 __entry->timecode_minutes, \
142 __entry->timecode_hours, \
143 __entry->timecode_userbits0, \
144 __entry->timecode_userbits1, \
145 __entry->timecode_userbits2, \
146 __entry->timecode_userbits3, \
147 __entry->sequence \
148 ) \
149 )
150
151V4L2_TRACE_EVENT(v4l2_dqbuf);
152V4L2_TRACE_EVENT(v4l2_qbuf);
153
154#endif /* if !defined(_TRACE_V4L2_H) || defined(TRACE_HEADER_MULTI_READ) */
155
156/* This part must be outside protection */
157#include <trace/define_trace.h>
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index c7bbbe794e65..464ea82e10db 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -287,11 +287,11 @@ TRACE_EVENT(writeback_queue_io,
287 __field(int, reason) 287 __field(int, reason)
288 ), 288 ),
289 TP_fast_assign( 289 TP_fast_assign(
290 unsigned long older_than_this = work->older_than_this; 290 unsigned long *older_than_this = work->older_than_this;
291 strncpy(__entry->name, dev_name(wb->bdi->dev), 32); 291 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
292 __entry->older = older_than_this; 292 __entry->older = older_than_this ? *older_than_this : 0;
293 __entry->age = older_than_this ? 293 __entry->age = older_than_this ?
294 (jiffies - older_than_this) * 1000 / HZ : -1; 294 (jiffies - *older_than_this) * 1000 / HZ : -1;
295 __entry->moved = moved; 295 __entry->moved = moved;
296 __entry->reason = work->reason; 296 __entry->reason = work->reason;
297 ), 297 ),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 5c38606613d8..1ee19a24cc5f 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -310,15 +310,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
310#undef __array 310#undef __array
311#define __array(type, item, len) \ 311#define __array(type, item, len) \
312 do { \ 312 do { \
313 mutex_lock(&event_storage_mutex); \ 313 char *type_str = #type"["__stringify(len)"]"; \
314 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 314 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
315 snprintf(event_storage, sizeof(event_storage), \ 315 ret = trace_define_field(event_call, type_str, #item, \
316 "%s[%d]", #type, len); \
317 ret = trace_define_field(event_call, event_storage, #item, \
318 offsetof(typeof(field), item), \ 316 offsetof(typeof(field), item), \
319 sizeof(field.item), \ 317 sizeof(field.item), \
320 is_signed_type(type), FILTER_OTHER); \ 318 is_signed_type(type), FILTER_OTHER); \
321 mutex_unlock(&event_storage_mutex); \
322 if (ret) \ 319 if (ret) \
323 return ret; \ 320 return ret; \
324 } while (0); 321 } while (0);
@@ -418,6 +415,8 @@ static inline notrace int ftrace_get_offsets_##call( \
418 * struct ftrace_event_file *ftrace_file = __data; 415 * struct ftrace_event_file *ftrace_file = __data;
419 * struct ftrace_event_call *event_call = ftrace_file->event_call; 416 * struct ftrace_event_call *event_call = ftrace_file->event_call;
420 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 417 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
418 * unsigned long eflags = ftrace_file->flags;
419 * enum event_trigger_type __tt = ETT_NONE;
421 * struct ring_buffer_event *event; 420 * struct ring_buffer_event *event;
422 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 421 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
423 * struct ring_buffer *buffer; 422 * struct ring_buffer *buffer;
@@ -425,9 +424,12 @@ static inline notrace int ftrace_get_offsets_##call( \
425 * int __data_size; 424 * int __data_size;
426 * int pc; 425 * int pc;
427 * 426 *
428 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 427 * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
429 * &ftrace_file->flags)) 428 * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
430 * return; 429 * event_triggers_call(ftrace_file, NULL);
430 * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
431 * return;
432 * }
431 * 433 *
432 * local_save_flags(irq_flags); 434 * local_save_flags(irq_flags);
433 * pc = preempt_count(); 435 * pc = preempt_count();
@@ -445,8 +447,17 @@ static inline notrace int ftrace_get_offsets_##call( \
445 * { <assign>; } <-- Here we assign the entries by the __field and 447 * { <assign>; } <-- Here we assign the entries by the __field and
446 * __array macros. 448 * __array macros.
447 * 449 *
448 * if (!filter_check_discard(ftrace_file, entry, buffer, event)) 450 * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
451 * __tt = event_triggers_call(ftrace_file, entry);
452 *
453 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
454 * &ftrace_file->flags))
455 * ring_buffer_discard_commit(buffer, event);
456 * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
449 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 457 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
458 *
459 * if (__tt)
460 * event_triggers_post_call(ftrace_file, __tt);
450 * } 461 * }
451 * 462 *
452 * static struct trace_event ftrace_event_type_<call> = { 463 * static struct trace_event ftrace_event_type_<call> = {
@@ -539,8 +550,7 @@ ftrace_raw_event_##call(void *__data, proto) \
539 int __data_size; \ 550 int __data_size; \
540 int pc; \ 551 int pc; \
541 \ 552 \
542 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \ 553 if (ftrace_trigger_soft_disabled(ftrace_file)) \
543 &ftrace_file->flags)) \
544 return; \ 554 return; \
545 \ 555 \
546 local_save_flags(irq_flags); \ 556 local_save_flags(irq_flags); \
@@ -560,8 +570,8 @@ ftrace_raw_event_##call(void *__data, proto) \
560 \ 570 \
561 { assign; } \ 571 { assign; } \
562 \ 572 \
563 if (!filter_check_discard(ftrace_file, entry, buffer, event)) \ 573 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, \
564 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ 574 irq_flags, pc); \
565} 575}
566/* 576/*
567 * The ftrace_test_probe is compiled out, it is only here as a build time check 577 * The ftrace_test_probe is compiled out, it is only here as a build time check