aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace/events
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/compaction.h209
-rw-r--r--include/trace/events/f2fs.h148
-rw-r--r--include/trace/events/iommu.h31
-rw-r--r--include/trace/events/kmem.h7
-rw-r--r--include/trace/events/kvm.h19
-rw-r--r--include/trace/events/net.h8
-rw-r--r--include/trace/events/tlb.h4
-rw-r--r--include/trace/events/writeback.h12
8 files changed, 311 insertions, 127 deletions
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c6814b917bdf..9a6a3fe0fb51 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -11,39 +11,55 @@
11 11
12DECLARE_EVENT_CLASS(mm_compaction_isolate_template, 12DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
13 13
14 TP_PROTO(unsigned long nr_scanned, 14 TP_PROTO(
15 unsigned long start_pfn,
16 unsigned long end_pfn,
17 unsigned long nr_scanned,
15 unsigned long nr_taken), 18 unsigned long nr_taken),
16 19
17 TP_ARGS(nr_scanned, nr_taken), 20 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken),
18 21
19 TP_STRUCT__entry( 22 TP_STRUCT__entry(
23 __field(unsigned long, start_pfn)
24 __field(unsigned long, end_pfn)
20 __field(unsigned long, nr_scanned) 25 __field(unsigned long, nr_scanned)
21 __field(unsigned long, nr_taken) 26 __field(unsigned long, nr_taken)
22 ), 27 ),
23 28
24 TP_fast_assign( 29 TP_fast_assign(
30 __entry->start_pfn = start_pfn;
31 __entry->end_pfn = end_pfn;
25 __entry->nr_scanned = nr_scanned; 32 __entry->nr_scanned = nr_scanned;
26 __entry->nr_taken = nr_taken; 33 __entry->nr_taken = nr_taken;
27 ), 34 ),
28 35
29 TP_printk("nr_scanned=%lu nr_taken=%lu", 36 TP_printk("range=(0x%lx ~ 0x%lx) nr_scanned=%lu nr_taken=%lu",
37 __entry->start_pfn,
38 __entry->end_pfn,
30 __entry->nr_scanned, 39 __entry->nr_scanned,
31 __entry->nr_taken) 40 __entry->nr_taken)
32); 41);
33 42
34DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages, 43DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
35 44
36 TP_PROTO(unsigned long nr_scanned, 45 TP_PROTO(
46 unsigned long start_pfn,
47 unsigned long end_pfn,
48 unsigned long nr_scanned,
37 unsigned long nr_taken), 49 unsigned long nr_taken),
38 50
39 TP_ARGS(nr_scanned, nr_taken) 51 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
40); 52);
41 53
42DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages, 54DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
43 TP_PROTO(unsigned long nr_scanned, 55
56 TP_PROTO(
57 unsigned long start_pfn,
58 unsigned long end_pfn,
59 unsigned long nr_scanned,
44 unsigned long nr_taken), 60 unsigned long nr_taken),
45 61
46 TP_ARGS(nr_scanned, nr_taken) 62 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
47); 63);
48 64
49TRACE_EVENT(mm_compaction_migratepages, 65TRACE_EVENT(mm_compaction_migratepages,
@@ -85,47 +101,198 @@ TRACE_EVENT(mm_compaction_migratepages,
85); 101);
86 102
87TRACE_EVENT(mm_compaction_begin, 103TRACE_EVENT(mm_compaction_begin,
88 TP_PROTO(unsigned long zone_start, unsigned long migrate_start, 104 TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
89 unsigned long free_start, unsigned long zone_end), 105 unsigned long free_pfn, unsigned long zone_end, bool sync),
90 106
91 TP_ARGS(zone_start, migrate_start, free_start, zone_end), 107 TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync),
92 108
93 TP_STRUCT__entry( 109 TP_STRUCT__entry(
94 __field(unsigned long, zone_start) 110 __field(unsigned long, zone_start)
95 __field(unsigned long, migrate_start) 111 __field(unsigned long, migrate_pfn)
96 __field(unsigned long, free_start) 112 __field(unsigned long, free_pfn)
97 __field(unsigned long, zone_end) 113 __field(unsigned long, zone_end)
114 __field(bool, sync)
98 ), 115 ),
99 116
100 TP_fast_assign( 117 TP_fast_assign(
101 __entry->zone_start = zone_start; 118 __entry->zone_start = zone_start;
102 __entry->migrate_start = migrate_start; 119 __entry->migrate_pfn = migrate_pfn;
103 __entry->free_start = free_start; 120 __entry->free_pfn = free_pfn;
104 __entry->zone_end = zone_end; 121 __entry->zone_end = zone_end;
122 __entry->sync = sync;
105 ), 123 ),
106 124
107 TP_printk("zone_start=%lu migrate_start=%lu free_start=%lu zone_end=%lu", 125 TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s",
108 __entry->zone_start, 126 __entry->zone_start,
109 __entry->migrate_start, 127 __entry->migrate_pfn,
110 __entry->free_start, 128 __entry->free_pfn,
111 __entry->zone_end) 129 __entry->zone_end,
130 __entry->sync ? "sync" : "async")
112); 131);
113 132
114TRACE_EVENT(mm_compaction_end, 133TRACE_EVENT(mm_compaction_end,
115 TP_PROTO(int status), 134 TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
135 unsigned long free_pfn, unsigned long zone_end, bool sync,
136 int status),
116 137
117 TP_ARGS(status), 138 TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status),
118 139
119 TP_STRUCT__entry( 140 TP_STRUCT__entry(
141 __field(unsigned long, zone_start)
142 __field(unsigned long, migrate_pfn)
143 __field(unsigned long, free_pfn)
144 __field(unsigned long, zone_end)
145 __field(bool, sync)
120 __field(int, status) 146 __field(int, status)
121 ), 147 ),
122 148
123 TP_fast_assign( 149 TP_fast_assign(
150 __entry->zone_start = zone_start;
151 __entry->migrate_pfn = migrate_pfn;
152 __entry->free_pfn = free_pfn;
153 __entry->zone_end = zone_end;
154 __entry->sync = sync;
124 __entry->status = status; 155 __entry->status = status;
125 ), 156 ),
126 157
127 TP_printk("status=%d", __entry->status) 158 TP_printk("zone_start=0x%lx migrate_pfn=0x%lx free_pfn=0x%lx zone_end=0x%lx, mode=%s status=%s",
159 __entry->zone_start,
160 __entry->migrate_pfn,
161 __entry->free_pfn,
162 __entry->zone_end,
163 __entry->sync ? "sync" : "async",
164 compaction_status_string[__entry->status])
165);
166
167TRACE_EVENT(mm_compaction_try_to_compact_pages,
168
169 TP_PROTO(
170 int order,
171 gfp_t gfp_mask,
172 enum migrate_mode mode),
173
174 TP_ARGS(order, gfp_mask, mode),
175
176 TP_STRUCT__entry(
177 __field(int, order)
178 __field(gfp_t, gfp_mask)
179 __field(enum migrate_mode, mode)
180 ),
181
182 TP_fast_assign(
183 __entry->order = order;
184 __entry->gfp_mask = gfp_mask;
185 __entry->mode = mode;
186 ),
187
188 TP_printk("order=%d gfp_mask=0x%x mode=%d",
189 __entry->order,
190 __entry->gfp_mask,
191 (int)__entry->mode)
192);
193
194DECLARE_EVENT_CLASS(mm_compaction_suitable_template,
195
196 TP_PROTO(struct zone *zone,
197 int order,
198 int ret),
199
200 TP_ARGS(zone, order, ret),
201
202 TP_STRUCT__entry(
203 __field(int, nid)
204 __field(char *, name)
205 __field(int, order)
206 __field(int, ret)
207 ),
208
209 TP_fast_assign(
210 __entry->nid = zone_to_nid(zone);
211 __entry->name = (char *)zone->name;
212 __entry->order = order;
213 __entry->ret = ret;
214 ),
215
216 TP_printk("node=%d zone=%-8s order=%d ret=%s",
217 __entry->nid,
218 __entry->name,
219 __entry->order,
220 compaction_status_string[__entry->ret])
221);
222
223DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_finished,
224
225 TP_PROTO(struct zone *zone,
226 int order,
227 int ret),
228
229 TP_ARGS(zone, order, ret)
230);
231
232DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable,
233
234 TP_PROTO(struct zone *zone,
235 int order,
236 int ret),
237
238 TP_ARGS(zone, order, ret)
239);
240
241#ifdef CONFIG_COMPACTION
242DECLARE_EVENT_CLASS(mm_compaction_defer_template,
243
244 TP_PROTO(struct zone *zone, int order),
245
246 TP_ARGS(zone, order),
247
248 TP_STRUCT__entry(
249 __field(int, nid)
250 __field(char *, name)
251 __field(int, order)
252 __field(unsigned int, considered)
253 __field(unsigned int, defer_shift)
254 __field(int, order_failed)
255 ),
256
257 TP_fast_assign(
258 __entry->nid = zone_to_nid(zone);
259 __entry->name = (char *)zone->name;
260 __entry->order = order;
261 __entry->considered = zone->compact_considered;
262 __entry->defer_shift = zone->compact_defer_shift;
263 __entry->order_failed = zone->compact_order_failed;
264 ),
265
266 TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu",
267 __entry->nid,
268 __entry->name,
269 __entry->order,
270 __entry->order_failed,
271 __entry->considered,
272 1UL << __entry->defer_shift)
273);
274
275DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_deferred,
276
277 TP_PROTO(struct zone *zone, int order),
278
279 TP_ARGS(zone, order)
280);
281
282DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_compaction,
283
284 TP_PROTO(struct zone *zone, int order),
285
286 TP_ARGS(zone, order)
287);
288
289DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
290
291 TP_PROTO(struct zone *zone, int order),
292
293 TP_ARGS(zone, order)
128); 294);
295#endif
129 296
130#endif /* _TRACE_COMPACTION_H */ 297#endif /* _TRACE_COMPACTION_H */
131 298
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index bbc4de9baef7..5422dbfaf97d 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -72,6 +72,7 @@
72#define show_cpreason(type) \ 72#define show_cpreason(type) \
73 __print_symbolic(type, \ 73 __print_symbolic(type, \
74 { CP_UMOUNT, "Umount" }, \ 74 { CP_UMOUNT, "Umount" }, \
75 { CP_FASTBOOT, "Fastboot" }, \
75 { CP_SYNC, "Sync" }, \ 76 { CP_SYNC, "Sync" }, \
76 { CP_DISCARD, "Discard" }) 77 { CP_DISCARD, "Discard" })
77 78
@@ -148,14 +149,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
148 149
149TRACE_EVENT(f2fs_sync_file_exit, 150TRACE_EVENT(f2fs_sync_file_exit,
150 151
151 TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret), 152 TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret),
152 153
153 TP_ARGS(inode, need_cp, datasync, ret), 154 TP_ARGS(inode, need_cp, datasync, ret),
154 155
155 TP_STRUCT__entry( 156 TP_STRUCT__entry(
156 __field(dev_t, dev) 157 __field(dev_t, dev)
157 __field(ino_t, ino) 158 __field(ino_t, ino)
158 __field(bool, need_cp) 159 __field(int, need_cp)
159 __field(int, datasync) 160 __field(int, datasync)
160 __field(int, ret) 161 __field(int, ret)
161 ), 162 ),
@@ -190,7 +191,7 @@ TRACE_EVENT(f2fs_sync_fs,
190 191
191 TP_fast_assign( 192 TP_fast_assign(
192 __entry->dev = sb->s_dev; 193 __entry->dev = sb->s_dev;
193 __entry->dirty = F2FS_SB(sb)->s_dirty; 194 __entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY);
194 __entry->wait = wait; 195 __entry->wait = wait;
195 ), 196 ),
196 197
@@ -440,38 +441,6 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
440 __entry->err) 441 __entry->err)
441); 442);
442 443
443TRACE_EVENT_CONDITION(f2fs_submit_page_bio,
444
445 TP_PROTO(struct page *page, sector_t blkaddr, int type),
446
447 TP_ARGS(page, blkaddr, type),
448
449 TP_CONDITION(page->mapping),
450
451 TP_STRUCT__entry(
452 __field(dev_t, dev)
453 __field(ino_t, ino)
454 __field(pgoff_t, index)
455 __field(sector_t, blkaddr)
456 __field(int, type)
457 ),
458
459 TP_fast_assign(
460 __entry->dev = page->mapping->host->i_sb->s_dev;
461 __entry->ino = page->mapping->host->i_ino;
462 __entry->index = page->index;
463 __entry->blkaddr = blkaddr;
464 __entry->type = type;
465 ),
466
467 TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
468 "blkaddr = 0x%llx, bio_type = %s%s",
469 show_dev_ino(__entry),
470 (unsigned long)__entry->index,
471 (unsigned long long)__entry->blkaddr,
472 show_bio_type(__entry->type))
473);
474
475TRACE_EVENT(f2fs_get_data_block, 444TRACE_EVENT(f2fs_get_data_block,
476 TP_PROTO(struct inode *inode, sector_t iblock, 445 TP_PROTO(struct inode *inode, sector_t iblock,
477 struct buffer_head *bh, int ret), 446 struct buffer_head *bh, int ret),
@@ -680,11 +649,63 @@ TRACE_EVENT(f2fs_reserve_new_block,
680 __entry->ofs_in_node) 649 __entry->ofs_in_node)
681); 650);
682 651
652DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
653
654 TP_PROTO(struct page *page, struct f2fs_io_info *fio),
655
656 TP_ARGS(page, fio),
657
658 TP_STRUCT__entry(
659 __field(dev_t, dev)
660 __field(ino_t, ino)
661 __field(pgoff_t, index)
662 __field(block_t, blkaddr)
663 __field(int, rw)
664 __field(int, type)
665 ),
666
667 TP_fast_assign(
668 __entry->dev = page->mapping->host->i_sb->s_dev;
669 __entry->ino = page->mapping->host->i_ino;
670 __entry->index = page->index;
671 __entry->blkaddr = fio->blk_addr;
672 __entry->rw = fio->rw;
673 __entry->type = fio->type;
674 ),
675
676 TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
677 "blkaddr = 0x%llx, rw = %s%s, type = %s",
678 show_dev_ino(__entry),
679 (unsigned long)__entry->index,
680 (unsigned long long)__entry->blkaddr,
681 show_bio_type(__entry->rw),
682 show_block_type(__entry->type))
683);
684
685DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
686
687 TP_PROTO(struct page *page, struct f2fs_io_info *fio),
688
689 TP_ARGS(page, fio),
690
691 TP_CONDITION(page->mapping)
692);
693
694DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio,
695
696 TP_PROTO(struct page *page, struct f2fs_io_info *fio),
697
698 TP_ARGS(page, fio),
699
700 TP_CONDITION(page->mapping)
701);
702
683DECLARE_EVENT_CLASS(f2fs__submit_bio, 703DECLARE_EVENT_CLASS(f2fs__submit_bio,
684 704
685 TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), 705 TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
706 struct bio *bio),
686 707
687 TP_ARGS(sb, rw, type, bio), 708 TP_ARGS(sb, fio, bio),
688 709
689 TP_STRUCT__entry( 710 TP_STRUCT__entry(
690 __field(dev_t, dev) 711 __field(dev_t, dev)
@@ -696,8 +717,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
696 717
697 TP_fast_assign( 718 TP_fast_assign(
698 __entry->dev = sb->s_dev; 719 __entry->dev = sb->s_dev;
699 __entry->rw = rw; 720 __entry->rw = fio->rw;
700 __entry->type = type; 721 __entry->type = fio->type;
701 __entry->sector = bio->bi_iter.bi_sector; 722 __entry->sector = bio->bi_iter.bi_sector;
702 __entry->size = bio->bi_iter.bi_size; 723 __entry->size = bio->bi_iter.bi_size;
703 ), 724 ),
@@ -712,18 +733,20 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio,
712 733
713DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, 734DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio,
714 735
715 TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), 736 TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
737 struct bio *bio),
716 738
717 TP_ARGS(sb, rw, type, bio), 739 TP_ARGS(sb, fio, bio),
718 740
719 TP_CONDITION(bio) 741 TP_CONDITION(bio)
720); 742);
721 743
722DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, 744DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio,
723 745
724 TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), 746 TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
747 struct bio *bio),
725 748
726 TP_ARGS(sb, rw, type, bio), 749 TP_ARGS(sb, fio, bio),
727 750
728 TP_CONDITION(bio) 751 TP_CONDITION(bio)
729); 752);
@@ -916,38 +939,6 @@ TRACE_EVENT(f2fs_writepages,
916 __entry->for_sync) 939 __entry->for_sync)
917); 940);
918 941
919TRACE_EVENT(f2fs_submit_page_mbio,
920
921 TP_PROTO(struct page *page, int rw, int type, block_t blk_addr),
922
923 TP_ARGS(page, rw, type, blk_addr),
924
925 TP_STRUCT__entry(
926 __field(dev_t, dev)
927 __field(ino_t, ino)
928 __field(int, rw)
929 __field(int, type)
930 __field(pgoff_t, index)
931 __field(block_t, block)
932 ),
933
934 TP_fast_assign(
935 __entry->dev = page->mapping->host->i_sb->s_dev;
936 __entry->ino = page->mapping->host->i_ino;
937 __entry->rw = rw;
938 __entry->type = type;
939 __entry->index = page->index;
940 __entry->block = blk_addr;
941 ),
942
943 TP_printk("dev = (%d,%d), ino = %lu, %s%s, %s, index = %lu, blkaddr = 0x%llx",
944 show_dev_ino(__entry),
945 show_bio_type(__entry->rw),
946 show_block_type(__entry->type),
947 (unsigned long)__entry->index,
948 (unsigned long long)__entry->block)
949);
950
951TRACE_EVENT(f2fs_write_checkpoint, 942TRACE_EVENT(f2fs_write_checkpoint,
952 943
953 TP_PROTO(struct super_block *sb, int reason, char *msg), 944 TP_PROTO(struct super_block *sb, int reason, char *msg),
@@ -998,14 +989,15 @@ TRACE_EVENT(f2fs_issue_discard,
998 989
999TRACE_EVENT(f2fs_issue_flush, 990TRACE_EVENT(f2fs_issue_flush,
1000 991
1001 TP_PROTO(struct super_block *sb, bool nobarrier, bool flush_merge), 992 TP_PROTO(struct super_block *sb, unsigned int nobarrier,
993 unsigned int flush_merge),
1002 994
1003 TP_ARGS(sb, nobarrier, flush_merge), 995 TP_ARGS(sb, nobarrier, flush_merge),
1004 996
1005 TP_STRUCT__entry( 997 TP_STRUCT__entry(
1006 __field(dev_t, dev) 998 __field(dev_t, dev)
1007 __field(bool, nobarrier) 999 __field(unsigned int, nobarrier)
1008 __field(bool, flush_merge) 1000 __field(unsigned int, flush_merge)
1009 ), 1001 ),
1010 1002
1011 TP_fast_assign( 1003 TP_fast_assign(
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index a8f5c32d174b..2c7befb10f13 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
83 TP_ARGS(dev) 83 TP_ARGS(dev)
84); 84);
85 85
86DECLARE_EVENT_CLASS(iommu_map_unmap, 86TRACE_EVENT(map,
87 87
88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
89 89
@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
92 TP_STRUCT__entry( 92 TP_STRUCT__entry(
93 __field(u64, iova) 93 __field(u64, iova)
94 __field(u64, paddr) 94 __field(u64, paddr)
95 __field(int, size) 95 __field(size_t, size)
96 ), 96 ),
97 97
98 TP_fast_assign( 98 TP_fast_assign(
@@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
101 __entry->size = size; 101 __entry->size = size;
102 ), 102 ),
103 103
104 TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x", 104 TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
105 __entry->iova, __entry->paddr, __entry->size 105 __entry->iova, __entry->paddr, __entry->size
106 ) 106 )
107); 107);
108 108
109DEFINE_EVENT(iommu_map_unmap, map, 109TRACE_EVENT(unmap,
110 110
111 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 111 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
112
113 TP_ARGS(iova, paddr, size)
114);
115 112
116DEFINE_EVENT_PRINT(iommu_map_unmap, unmap, 113 TP_ARGS(iova, size, unmapped_size),
117 114
118 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 115 TP_STRUCT__entry(
116 __field(u64, iova)
117 __field(size_t, size)
118 __field(size_t, unmapped_size)
119 ),
119 120
120 TP_ARGS(iova, paddr, size), 121 TP_fast_assign(
122 __entry->iova = iova;
123 __entry->size = size;
124 __entry->unmapped_size = unmapped_size;
125 ),
121 126
122 TP_printk("IOMMU: iova=0x%016llx size=0x%x", 127 TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
123 __entry->iova, __entry->size 128 __entry->iova, __entry->size, __entry->unmapped_size
124 ) 129 )
125); 130);
126 131
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index aece1346ceb7..4ad10baecd4d 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -268,11 +268,11 @@ TRACE_EVENT(mm_page_alloc_extfrag,
268 268
269 TP_PROTO(struct page *page, 269 TP_PROTO(struct page *page,
270 int alloc_order, int fallback_order, 270 int alloc_order, int fallback_order,
271 int alloc_migratetype, int fallback_migratetype, int new_migratetype), 271 int alloc_migratetype, int fallback_migratetype),
272 272
273 TP_ARGS(page, 273 TP_ARGS(page,
274 alloc_order, fallback_order, 274 alloc_order, fallback_order,
275 alloc_migratetype, fallback_migratetype, new_migratetype), 275 alloc_migratetype, fallback_migratetype),
276 276
277 TP_STRUCT__entry( 277 TP_STRUCT__entry(
278 __field( struct page *, page ) 278 __field( struct page *, page )
@@ -289,7 +289,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
289 __entry->fallback_order = fallback_order; 289 __entry->fallback_order = fallback_order;
290 __entry->alloc_migratetype = alloc_migratetype; 290 __entry->alloc_migratetype = alloc_migratetype;
291 __entry->fallback_migratetype = fallback_migratetype; 291 __entry->fallback_migratetype = fallback_migratetype;
292 __entry->change_ownership = (new_migratetype == alloc_migratetype); 292 __entry->change_ownership = (alloc_migratetype ==
293 get_pageblock_migratetype(page));
293 ), 294 ),
294 295
295 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", 296 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 86b399c66c3d..a44062da684b 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -37,6 +37,25 @@ TRACE_EVENT(kvm_userspace_exit,
37 __entry->errno < 0 ? -__entry->errno : __entry->reason) 37 __entry->errno < 0 ? -__entry->errno : __entry->reason)
38); 38);
39 39
40TRACE_EVENT(kvm_vcpu_wakeup,
41 TP_PROTO(__u64 ns, bool waited),
42 TP_ARGS(ns, waited),
43
44 TP_STRUCT__entry(
45 __field( __u64, ns )
46 __field( bool, waited )
47 ),
48
49 TP_fast_assign(
50 __entry->ns = ns;
51 __entry->waited = waited;
52 ),
53
54 TP_printk("%s time %lld ns",
55 __entry->waited ? "wait" : "poll",
56 __entry->ns)
57);
58
40#if defined(CONFIG_HAVE_KVM_IRQFD) 59#if defined(CONFIG_HAVE_KVM_IRQFD)
41TRACE_EVENT(kvm_set_irq, 60TRACE_EVENT(kvm_set_irq,
42 TP_PROTO(unsigned int gsi, int level, int irq_source_id), 61 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 1de256b35807..49cc7c3de252 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -40,9 +40,9 @@ TRACE_EVENT(net_dev_start_xmit,
40 __assign_str(name, dev->name); 40 __assign_str(name, dev->name);
41 __entry->queue_mapping = skb->queue_mapping; 41 __entry->queue_mapping = skb->queue_mapping;
42 __entry->skbaddr = skb; 42 __entry->skbaddr = skb;
43 __entry->vlan_tagged = vlan_tx_tag_present(skb); 43 __entry->vlan_tagged = skb_vlan_tag_present(skb);
44 __entry->vlan_proto = ntohs(skb->vlan_proto); 44 __entry->vlan_proto = ntohs(skb->vlan_proto);
45 __entry->vlan_tci = vlan_tx_tag_get(skb); 45 __entry->vlan_tci = skb_vlan_tag_get(skb);
46 __entry->protocol = ntohs(skb->protocol); 46 __entry->protocol = ntohs(skb->protocol);
47 __entry->ip_summed = skb->ip_summed; 47 __entry->ip_summed = skb->ip_summed;
48 __entry->len = skb->len; 48 __entry->len = skb->len;
@@ -174,9 +174,9 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
174#endif 174#endif
175 __entry->queue_mapping = skb->queue_mapping; 175 __entry->queue_mapping = skb->queue_mapping;
176 __entry->skbaddr = skb; 176 __entry->skbaddr = skb;
177 __entry->vlan_tagged = vlan_tx_tag_present(skb); 177 __entry->vlan_tagged = skb_vlan_tag_present(skb);
178 __entry->vlan_proto = ntohs(skb->vlan_proto); 178 __entry->vlan_proto = ntohs(skb->vlan_proto);
179 __entry->vlan_tci = vlan_tx_tag_get(skb); 179 __entry->vlan_tci = skb_vlan_tag_get(skb);
180 __entry->protocol = ntohs(skb->protocol); 180 __entry->protocol = ntohs(skb->protocol);
181 __entry->ip_summed = skb->ip_summed; 181 __entry->ip_summed = skb->ip_summed;
182 __entry->hash = skb->hash; 182 __entry->hash = skb->hash;
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 13391d288107..0e7635765153 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -13,11 +13,13 @@
13 { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \ 13 { TLB_LOCAL_SHOOTDOWN, "local shootdown" }, \
14 { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" } 14 { TLB_LOCAL_MM_SHOOTDOWN, "local mm shootdown" }
15 15
16TRACE_EVENT(tlb_flush, 16TRACE_EVENT_CONDITION(tlb_flush,
17 17
18 TP_PROTO(int reason, unsigned long pages), 18 TP_PROTO(int reason, unsigned long pages),
19 TP_ARGS(reason, pages), 19 TP_ARGS(reason, pages),
20 20
21 TP_CONDITION(cpu_online(smp_processor_id())),
22
21 TP_STRUCT__entry( 23 TP_STRUCT__entry(
22 __field( int, reason) 24 __field( int, reason)
23 __field(unsigned long, pages) 25 __field(unsigned long, pages)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 5ecb4c234625..5a14ead59696 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -49,7 +49,7 @@ TRACE_EVENT(writeback_dirty_page,
49 49
50 TP_fast_assign( 50 TP_fast_assign(
51 strncpy(__entry->name, 51 strncpy(__entry->name,
52 mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32); 52 mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
53 __entry->ino = mapping ? mapping->host->i_ino : 0; 53 __entry->ino = mapping ? mapping->host->i_ino : 0;
54 __entry->index = page->index; 54 __entry->index = page->index;
55 ), 55 ),
@@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
75 ), 75 ),
76 76
77 TP_fast_assign( 77 TP_fast_assign(
78 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; 78 struct backing_dev_info *bdi = inode_to_bdi(inode);
79 79
80 /* may be called for files on pseudo FSes w/ unregistered bdi */ 80 /* may be called for files on pseudo FSes w/ unregistered bdi */
81 strncpy(__entry->name, 81 strncpy(__entry->name,
@@ -128,7 +128,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
128 128
129 TP_fast_assign( 129 TP_fast_assign(
130 strncpy(__entry->name, 130 strncpy(__entry->name,
131 dev_name(inode->i_mapping->backing_dev_info->dev), 32); 131 dev_name(inode_to_bdi(inode)->dev), 32);
132 __entry->ino = inode->i_ino; 132 __entry->ino = inode->i_ino;
133 __entry->sync_mode = wbc->sync_mode; 133 __entry->sync_mode = wbc->sync_mode;
134 ), 134 ),
@@ -168,10 +168,8 @@ DECLARE_EVENT_CLASS(writeback_work_class,
168 __field(int, reason) 168 __field(int, reason)
169 ), 169 ),
170 TP_fast_assign( 170 TP_fast_assign(
171 struct device *dev = bdi->dev; 171 strncpy(__entry->name,
172 if (!dev) 172 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
173 dev = default_backing_dev_info.dev;
174 strncpy(__entry->name, dev_name(dev), 32);
175 __entry->nr_pages = work->nr_pages; 173 __entry->nr_pages = work->nr_pages;
176 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 174 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
177 __entry->sync_mode = work->sync_mode; 175 __entry->sync_mode = work->sync_mode;