aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-09-23 17:08:43 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2009-09-23 17:08:43 -0400
commitd7a4b414eed51f1653bb05ebe84122bf9a7ae18b (patch)
treebd6603a0c27de4c138a1767871897e9cd3e1a1d2 /include/trace
parent1f0ab40976460bc4673fa204ce917a725185d8f2 (diff)
parenta724eada8c2a7b62463b73ccf73fd0bb6e928aeb (diff)
Merge commit 'linus/master' into tracing/kprobes
Conflicts: kernel/trace/Makefile kernel/trace/trace.h kernel/trace/trace_event_types.h kernel/trace/trace_export.c Merge reason: Sync with latest significant tracing core changes.
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/block.h4
-rw-r--r--include/trace/events/ext4.h146
-rw-r--r--include/trace/events/irq.h21
-rw-r--r--include/trace/events/jbd2.h2
-rw-r--r--include/trace/events/kmem.h163
-rw-r--r--include/trace/events/kvm.h151
-rw-r--r--include/trace/events/power.h81
-rw-r--r--include/trace/events/sched.h128
-rw-r--r--include/trace/events/skb.h20
-rw-r--r--include/trace/events/timer.h342
-rw-r--r--include/trace/ftrace.h125
11 files changed, 1076 insertions, 107 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 9a74b468a229..d86af94691c2 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -171,6 +171,7 @@ TRACE_EVENT(block_rq_complete,
171 (unsigned long long)__entry->sector, 171 (unsigned long long)__entry->sector,
172 __entry->nr_sector, __entry->errors) 172 __entry->nr_sector, __entry->errors)
173); 173);
174
174TRACE_EVENT(block_bio_bounce, 175TRACE_EVENT(block_bio_bounce,
175 176
176 TP_PROTO(struct request_queue *q, struct bio *bio), 177 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -186,7 +187,8 @@ TRACE_EVENT(block_bio_bounce,
186 ), 187 ),
187 188
188 TP_fast_assign( 189 TP_fast_assign(
189 __entry->dev = bio->bi_bdev->bd_dev; 190 __entry->dev = bio->bi_bdev ?
191 bio->bi_bdev->bd_dev : 0;
190 __entry->sector = bio->bi_sector; 192 __entry->sector = bio->bi_sector;
191 __entry->nr_sector = bio->bi_size >> 9; 193 __entry->nr_sector = bio->bi_size >> 9;
192 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 194 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 7d8b5bc74185..c1bd8f1e8b94 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -5,10 +5,15 @@
5#define _TRACE_EXT4_H 5#define _TRACE_EXT4_H
6 6
7#include <linux/writeback.h> 7#include <linux/writeback.h>
8#include "../../../fs/ext4/ext4.h"
9#include "../../../fs/ext4/mballoc.h"
10#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
11 9
10struct ext4_allocation_context;
11struct ext4_allocation_request;
12struct ext4_prealloc_space;
13struct ext4_inode_info;
14
15#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
16
12TRACE_EVENT(ext4_free_inode, 17TRACE_EVENT(ext4_free_inode,
13 TP_PROTO(struct inode *inode), 18 TP_PROTO(struct inode *inode),
14 19
@@ -33,8 +38,8 @@ TRACE_EVENT(ext4_free_inode,
33 ), 38 ),
34 39
35 TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu", 40 TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu",
36 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode, 41 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
37 __entry->uid, __entry->gid, 42 __entry->mode, __entry->uid, __entry->gid,
38 (unsigned long long) __entry->blocks) 43 (unsigned long long) __entry->blocks)
39); 44);
40 45
@@ -56,7 +61,8 @@ TRACE_EVENT(ext4_request_inode,
56 ), 61 ),
57 62
58 TP_printk("dev %s dir %lu mode %d", 63 TP_printk("dev %s dir %lu mode %d",
59 jbd2_dev_to_name(__entry->dev), __entry->dir, __entry->mode) 64 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->dir,
65 __entry->mode)
60); 66);
61 67
62TRACE_EVENT(ext4_allocate_inode, 68TRACE_EVENT(ext4_allocate_inode,
@@ -79,7 +85,8 @@ TRACE_EVENT(ext4_allocate_inode,
79 ), 85 ),
80 86
81 TP_printk("dev %s ino %lu dir %lu mode %d", 87 TP_printk("dev %s ino %lu dir %lu mode %d",
82 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->dir, __entry->mode) 88 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
89 (unsigned long) __entry->dir, __entry->mode)
83); 90);
84 91
85TRACE_EVENT(ext4_write_begin, 92TRACE_EVENT(ext4_write_begin,
@@ -106,8 +113,8 @@ TRACE_EVENT(ext4_write_begin,
106 ), 113 ),
107 114
108 TP_printk("dev %s ino %lu pos %llu len %u flags %u", 115 TP_printk("dev %s ino %lu pos %llu len %u flags %u",
109 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 116 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
110 __entry->flags) 117 __entry->pos, __entry->len, __entry->flags)
111); 118);
112 119
113TRACE_EVENT(ext4_ordered_write_end, 120TRACE_EVENT(ext4_ordered_write_end,
@@ -133,8 +140,8 @@ TRACE_EVENT(ext4_ordered_write_end,
133 ), 140 ),
134 141
135 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 142 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
136 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 143 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
137 __entry->copied) 144 __entry->pos, __entry->len, __entry->copied)
138); 145);
139 146
140TRACE_EVENT(ext4_writeback_write_end, 147TRACE_EVENT(ext4_writeback_write_end,
@@ -160,8 +167,8 @@ TRACE_EVENT(ext4_writeback_write_end,
160 ), 167 ),
161 168
162 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 169 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
163 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 170 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
164 __entry->copied) 171 __entry->pos, __entry->len, __entry->copied)
165); 172);
166 173
167TRACE_EVENT(ext4_journalled_write_end, 174TRACE_EVENT(ext4_journalled_write_end,
@@ -186,8 +193,8 @@ TRACE_EVENT(ext4_journalled_write_end,
186 ), 193 ),
187 194
188 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 195 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
189 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 196 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
190 __entry->copied) 197 __entry->pos, __entry->len, __entry->copied)
191); 198);
192 199
193TRACE_EVENT(ext4_writepage, 200TRACE_EVENT(ext4_writepage,
@@ -209,7 +216,8 @@ TRACE_EVENT(ext4_writepage,
209 ), 216 ),
210 217
211 TP_printk("dev %s ino %lu page_index %lu", 218 TP_printk("dev %s ino %lu page_index %lu",
212 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index) 219 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
220 __entry->index)
213); 221);
214 222
215TRACE_EVENT(ext4_da_writepages, 223TRACE_EVENT(ext4_da_writepages,
@@ -227,7 +235,6 @@ TRACE_EVENT(ext4_da_writepages,
227 __field( char, nonblocking ) 235 __field( char, nonblocking )
228 __field( char, for_kupdate ) 236 __field( char, for_kupdate )
229 __field( char, for_reclaim ) 237 __field( char, for_reclaim )
230 __field( char, for_writepages )
231 __field( char, range_cyclic ) 238 __field( char, range_cyclic )
232 ), 239 ),
233 240
@@ -241,16 +248,50 @@ TRACE_EVENT(ext4_da_writepages,
241 __entry->nonblocking = wbc->nonblocking; 248 __entry->nonblocking = wbc->nonblocking;
242 __entry->for_kupdate = wbc->for_kupdate; 249 __entry->for_kupdate = wbc->for_kupdate;
243 __entry->for_reclaim = wbc->for_reclaim; 250 __entry->for_reclaim = wbc->for_reclaim;
244 __entry->for_writepages = wbc->for_writepages;
245 __entry->range_cyclic = wbc->range_cyclic; 251 __entry->range_cyclic = wbc->range_cyclic;
246 ), 252 ),
247 253
248 TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d for_writepages %d range_cyclic %d", 254 TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d",
249 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write, 255 jbd2_dev_to_name(__entry->dev),
256 (unsigned long) __entry->ino, __entry->nr_to_write,
250 __entry->pages_skipped, __entry->range_start, 257 __entry->pages_skipped, __entry->range_start,
251 __entry->range_end, __entry->nonblocking, 258 __entry->range_end, __entry->nonblocking,
252 __entry->for_kupdate, __entry->for_reclaim, 259 __entry->for_kupdate, __entry->for_reclaim,
253 __entry->for_writepages, __entry->range_cyclic) 260 __entry->range_cyclic)
261);
262
263TRACE_EVENT(ext4_da_write_pages,
264 TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
265
266 TP_ARGS(inode, mpd),
267
268 TP_STRUCT__entry(
269 __field( dev_t, dev )
270 __field( ino_t, ino )
271 __field( __u64, b_blocknr )
272 __field( __u32, b_size )
273 __field( __u32, b_state )
274 __field( unsigned long, first_page )
275 __field( int, io_done )
276 __field( int, pages_written )
277 ),
278
279 TP_fast_assign(
280 __entry->dev = inode->i_sb->s_dev;
281 __entry->ino = inode->i_ino;
282 __entry->b_blocknr = mpd->b_blocknr;
283 __entry->b_size = mpd->b_size;
284 __entry->b_state = mpd->b_state;
285 __entry->first_page = mpd->first_page;
286 __entry->io_done = mpd->io_done;
287 __entry->pages_written = mpd->pages_written;
288 ),
289
290 TP_printk("dev %s ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d",
291 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
292 __entry->b_blocknr, __entry->b_size,
293 __entry->b_state, __entry->first_page,
294 __entry->io_done, __entry->pages_written)
254); 295);
255 296
256TRACE_EVENT(ext4_da_writepages_result, 297TRACE_EVENT(ext4_da_writepages_result,
@@ -282,7 +323,8 @@ TRACE_EVENT(ext4_da_writepages_result,
282 ), 323 ),
283 324
284 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d", 325 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d",
285 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->ret, 326 jbd2_dev_to_name(__entry->dev),
327 (unsigned long) __entry->ino, __entry->ret,
286 __entry->pages_written, __entry->pages_skipped, 328 __entry->pages_written, __entry->pages_skipped,
287 __entry->encountered_congestion, __entry->more_io, 329 __entry->encountered_congestion, __entry->more_io,
288 __entry->no_nrwrite_index_update) 330 __entry->no_nrwrite_index_update)
@@ -311,8 +353,8 @@ TRACE_EVENT(ext4_da_write_begin,
311 ), 353 ),
312 354
313 TP_printk("dev %s ino %lu pos %llu len %u flags %u", 355 TP_printk("dev %s ino %lu pos %llu len %u flags %u",
314 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 356 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
315 __entry->flags) 357 __entry->pos, __entry->len, __entry->flags)
316); 358);
317 359
318TRACE_EVENT(ext4_da_write_end, 360TRACE_EVENT(ext4_da_write_end,
@@ -338,8 +380,8 @@ TRACE_EVENT(ext4_da_write_end,
338 ), 380 ),
339 381
340 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 382 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
341 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 383 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
342 __entry->copied) 384 __entry->pos, __entry->len, __entry->copied)
343); 385);
344 386
345TRACE_EVENT(ext4_discard_blocks, 387TRACE_EVENT(ext4_discard_blocks,
@@ -389,8 +431,8 @@ TRACE_EVENT(ext4_mb_new_inode_pa,
389 ), 431 ),
390 432
391 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", 433 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
392 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart, 434 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
393 __entry->pa_len, __entry->pa_lstart) 435 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
394); 436);
395 437
396TRACE_EVENT(ext4_mb_new_group_pa, 438TRACE_EVENT(ext4_mb_new_group_pa,
@@ -417,8 +459,8 @@ TRACE_EVENT(ext4_mb_new_group_pa,
417 ), 459 ),
418 460
419 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", 461 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
420 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart, 462 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
421 __entry->pa_len, __entry->pa_lstart) 463 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
422); 464);
423 465
424TRACE_EVENT(ext4_mb_release_inode_pa, 466TRACE_EVENT(ext4_mb_release_inode_pa,
@@ -444,8 +486,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
444 ), 486 ),
445 487
446 TP_printk("dev %s ino %lu block %llu count %u", 488 TP_printk("dev %s ino %lu block %llu count %u",
447 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block, 489 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
448 __entry->count) 490 __entry->block, __entry->count)
449); 491);
450 492
451TRACE_EVENT(ext4_mb_release_group_pa, 493TRACE_EVENT(ext4_mb_release_group_pa,
@@ -490,7 +532,7 @@ TRACE_EVENT(ext4_discard_preallocations,
490 ), 532 ),
491 533
492 TP_printk("dev %s ino %lu", 534 TP_printk("dev %s ino %lu",
493 jbd2_dev_to_name(__entry->dev), __entry->ino) 535 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
494); 536);
495 537
496TRACE_EVENT(ext4_mb_discard_preallocations, 538TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -545,8 +587,8 @@ TRACE_EVENT(ext4_request_blocks,
545 ), 587 ),
546 588
547 TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", 589 TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
548 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags, 590 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
549 __entry->len, 591 __entry->flags, __entry->len,
550 (unsigned long long) __entry->logical, 592 (unsigned long long) __entry->logical,
551 (unsigned long long) __entry->goal, 593 (unsigned long long) __entry->goal,
552 (unsigned long long) __entry->lleft, 594 (unsigned long long) __entry->lleft,
@@ -589,8 +631,8 @@ TRACE_EVENT(ext4_allocate_blocks,
589 ), 631 ),
590 632
591 TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", 633 TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
592 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags, 634 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
593 __entry->len, __entry->block, 635 __entry->flags, __entry->len, __entry->block,
594 (unsigned long long) __entry->logical, 636 (unsigned long long) __entry->logical,
595 (unsigned long long) __entry->goal, 637 (unsigned long long) __entry->goal,
596 (unsigned long long) __entry->lleft, 638 (unsigned long long) __entry->lleft,
@@ -623,8 +665,8 @@ TRACE_EVENT(ext4_free_blocks,
623 ), 665 ),
624 666
625 TP_printk("dev %s ino %lu block %llu count %lu metadata %d", 667 TP_printk("dev %s ino %lu block %llu count %lu metadata %d",
626 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block, 668 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
627 __entry->count, __entry->metadata) 669 __entry->block, __entry->count, __entry->metadata)
628); 670);
629 671
630TRACE_EVENT(ext4_sync_file, 672TRACE_EVENT(ext4_sync_file,
@@ -647,8 +689,8 @@ TRACE_EVENT(ext4_sync_file,
647 ), 689 ),
648 690
649 TP_printk("dev %s ino %ld parent %ld datasync %d ", 691 TP_printk("dev %s ino %ld parent %ld datasync %d ",
650 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->parent, 692 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
651 __entry->datasync) 693 (unsigned long) __entry->parent, __entry->datasync)
652); 694);
653 695
654TRACE_EVENT(ext4_sync_fs, 696TRACE_EVENT(ext4_sync_fs,
@@ -671,6 +713,30 @@ TRACE_EVENT(ext4_sync_fs,
671 __entry->wait) 713 __entry->wait)
672); 714);
673 715
716TRACE_EVENT(ext4_alloc_da_blocks,
717 TP_PROTO(struct inode *inode),
718
719 TP_ARGS(inode),
720
721 TP_STRUCT__entry(
722 __field( dev_t, dev )
723 __field( ino_t, ino )
724 __field( unsigned int, data_blocks )
725 __field( unsigned int, meta_blocks )
726 ),
727
728 TP_fast_assign(
729 __entry->dev = inode->i_sb->s_dev;
730 __entry->ino = inode->i_ino;
731 __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
732 __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
733 ),
734
735 TP_printk("dev %s ino %lu data_blocks %u meta_blocks %u",
736 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
737 __entry->data_blocks, __entry->meta_blocks)
738);
739
674#endif /* _TRACE_EXT4_H */ 740#endif /* _TRACE_EXT4_H */
675 741
676/* This part must be outside protection */ 742/* This part must be outside protection */
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 1cb0c3aa11e6..b89f9db4a404 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -8,16 +8,17 @@
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9 9
10#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } 10#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
11#define show_softirq_name(val) \ 11#define show_softirq_name(val) \
12 __print_symbolic(val, \ 12 __print_symbolic(val, \
13 softirq_name(HI), \ 13 softirq_name(HI), \
14 softirq_name(TIMER), \ 14 softirq_name(TIMER), \
15 softirq_name(NET_TX), \ 15 softirq_name(NET_TX), \
16 softirq_name(NET_RX), \ 16 softirq_name(NET_RX), \
17 softirq_name(BLOCK), \ 17 softirq_name(BLOCK), \
18 softirq_name(TASKLET), \ 18 softirq_name(BLOCK_IOPOLL), \
19 softirq_name(SCHED), \ 19 softirq_name(TASKLET), \
20 softirq_name(HRTIMER), \ 20 softirq_name(SCHED), \
21 softirq_name(HRTIMER), \
21 softirq_name(RCU)) 22 softirq_name(RCU))
22 23
23/** 24/**
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 10813fa0c8d0..b851f0b4701c 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -159,7 +159,7 @@ TRACE_EVENT(jbd2_submit_inode_data,
159 ), 159 ),
160 160
161 TP_printk("dev %s ino %lu", 161 TP_printk("dev %s ino %lu",
162 jbd2_dev_to_name(__entry->dev), __entry->ino) 162 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
163); 163);
164 164
165#endif /* _TRACE_JBD2_H */ 165#endif /* _TRACE_JBD2_H */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 1493c541f9c4..eaf46bdd18a5 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -225,6 +225,169 @@ TRACE_EVENT(kmem_cache_free,
225 225
226 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) 226 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
227); 227);
228
229TRACE_EVENT(mm_page_free_direct,
230
231 TP_PROTO(struct page *page, unsigned int order),
232
233 TP_ARGS(page, order),
234
235 TP_STRUCT__entry(
236 __field( struct page *, page )
237 __field( unsigned int, order )
238 ),
239
240 TP_fast_assign(
241 __entry->page = page;
242 __entry->order = order;
243 ),
244
245 TP_printk("page=%p pfn=%lu order=%d",
246 __entry->page,
247 page_to_pfn(__entry->page),
248 __entry->order)
249);
250
251TRACE_EVENT(mm_pagevec_free,
252
253 TP_PROTO(struct page *page, int cold),
254
255 TP_ARGS(page, cold),
256
257 TP_STRUCT__entry(
258 __field( struct page *, page )
259 __field( int, cold )
260 ),
261
262 TP_fast_assign(
263 __entry->page = page;
264 __entry->cold = cold;
265 ),
266
267 TP_printk("page=%p pfn=%lu order=0 cold=%d",
268 __entry->page,
269 page_to_pfn(__entry->page),
270 __entry->cold)
271);
272
273TRACE_EVENT(mm_page_alloc,
274
275 TP_PROTO(struct page *page, unsigned int order,
276 gfp_t gfp_flags, int migratetype),
277
278 TP_ARGS(page, order, gfp_flags, migratetype),
279
280 TP_STRUCT__entry(
281 __field( struct page *, page )
282 __field( unsigned int, order )
283 __field( gfp_t, gfp_flags )
284 __field( int, migratetype )
285 ),
286
287 TP_fast_assign(
288 __entry->page = page;
289 __entry->order = order;
290 __entry->gfp_flags = gfp_flags;
291 __entry->migratetype = migratetype;
292 ),
293
294 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
295 __entry->page,
296 page_to_pfn(__entry->page),
297 __entry->order,
298 __entry->migratetype,
299 show_gfp_flags(__entry->gfp_flags))
300);
301
302TRACE_EVENT(mm_page_alloc_zone_locked,
303
304 TP_PROTO(struct page *page, unsigned int order, int migratetype),
305
306 TP_ARGS(page, order, migratetype),
307
308 TP_STRUCT__entry(
309 __field( struct page *, page )
310 __field( unsigned int, order )
311 __field( int, migratetype )
312 ),
313
314 TP_fast_assign(
315 __entry->page = page;
316 __entry->order = order;
317 __entry->migratetype = migratetype;
318 ),
319
320 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
321 __entry->page,
322 page_to_pfn(__entry->page),
323 __entry->order,
324 __entry->migratetype,
325 __entry->order == 0)
326);
327
328TRACE_EVENT(mm_page_pcpu_drain,
329
330 TP_PROTO(struct page *page, int order, int migratetype),
331
332 TP_ARGS(page, order, migratetype),
333
334 TP_STRUCT__entry(
335 __field( struct page *, page )
336 __field( int, order )
337 __field( int, migratetype )
338 ),
339
340 TP_fast_assign(
341 __entry->page = page;
342 __entry->order = order;
343 __entry->migratetype = migratetype;
344 ),
345
346 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
347 __entry->page,
348 page_to_pfn(__entry->page),
349 __entry->order,
350 __entry->migratetype)
351);
352
353TRACE_EVENT(mm_page_alloc_extfrag,
354
355 TP_PROTO(struct page *page,
356 int alloc_order, int fallback_order,
357 int alloc_migratetype, int fallback_migratetype),
358
359 TP_ARGS(page,
360 alloc_order, fallback_order,
361 alloc_migratetype, fallback_migratetype),
362
363 TP_STRUCT__entry(
364 __field( struct page *, page )
365 __field( int, alloc_order )
366 __field( int, fallback_order )
367 __field( int, alloc_migratetype )
368 __field( int, fallback_migratetype )
369 ),
370
371 TP_fast_assign(
372 __entry->page = page;
373 __entry->alloc_order = alloc_order;
374 __entry->fallback_order = fallback_order;
375 __entry->alloc_migratetype = alloc_migratetype;
376 __entry->fallback_migratetype = fallback_migratetype;
377 ),
378
379 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
380 __entry->page,
381 page_to_pfn(__entry->page),
382 __entry->alloc_order,
383 __entry->fallback_order,
384 pageblock_order,
385 __entry->alloc_migratetype,
386 __entry->fallback_migratetype,
387 __entry->fallback_order < pageblock_order,
388 __entry->alloc_migratetype == __entry->fallback_migratetype)
389);
390
228#endif /* _TRACE_KMEM_H */ 391#endif /* _TRACE_KMEM_H */
229 392
230/* This part must be outside protection */ 393/* This part must be outside protection */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
new file mode 100644
index 000000000000..dbe108455275
--- /dev/null
+++ b/include/trace/events/kvm.h
@@ -0,0 +1,151 @@
1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_MAIN_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8#define TRACE_INCLUDE_FILE kvm
9
10#if defined(__KVM_HAVE_IOAPIC)
11TRACE_EVENT(kvm_set_irq,
12 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
13 TP_ARGS(gsi, level, irq_source_id),
14
15 TP_STRUCT__entry(
16 __field( unsigned int, gsi )
17 __field( int, level )
18 __field( int, irq_source_id )
19 ),
20
21 TP_fast_assign(
22 __entry->gsi = gsi;
23 __entry->level = level;
24 __entry->irq_source_id = irq_source_id;
25 ),
26
27 TP_printk("gsi %u level %d source %d",
28 __entry->gsi, __entry->level, __entry->irq_source_id)
29);
30
31#define kvm_deliver_mode \
32 {0x0, "Fixed"}, \
33 {0x1, "LowPrio"}, \
34 {0x2, "SMI"}, \
35 {0x3, "Res3"}, \
36 {0x4, "NMI"}, \
37 {0x5, "INIT"}, \
38 {0x6, "SIPI"}, \
39 {0x7, "ExtINT"}
40
41TRACE_EVENT(kvm_ioapic_set_irq,
42 TP_PROTO(__u64 e, int pin, bool coalesced),
43 TP_ARGS(e, pin, coalesced),
44
45 TP_STRUCT__entry(
46 __field( __u64, e )
47 __field( int, pin )
48 __field( bool, coalesced )
49 ),
50
51 TP_fast_assign(
52 __entry->e = e;
53 __entry->pin = pin;
54 __entry->coalesced = coalesced;
55 ),
56
57 TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
58 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
59 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
60 (__entry->e & (1<<11)) ? "logical" : "physical",
61 (__entry->e & (1<<15)) ? "level" : "edge",
62 (__entry->e & (1<<16)) ? "|masked" : "",
63 __entry->coalesced ? " (coalesced)" : "")
64);
65
66TRACE_EVENT(kvm_msi_set_irq,
67 TP_PROTO(__u64 address, __u64 data),
68 TP_ARGS(address, data),
69
70 TP_STRUCT__entry(
71 __field( __u64, address )
72 __field( __u64, data )
73 ),
74
75 TP_fast_assign(
76 __entry->address = address;
77 __entry->data = data;
78 ),
79
80 TP_printk("dst %u vec %x (%s|%s|%s%s)",
81 (u8)(__entry->address >> 12), (u8)__entry->data,
82 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
83 (__entry->address & (1<<2)) ? "logical" : "physical",
84 (__entry->data & (1<<15)) ? "level" : "edge",
85 (__entry->address & (1<<3)) ? "|rh" : "")
86);
87
88#define kvm_irqchips \
89 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
90 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
91 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
92
93TRACE_EVENT(kvm_ack_irq,
94 TP_PROTO(unsigned int irqchip, unsigned int pin),
95 TP_ARGS(irqchip, pin),
96
97 TP_STRUCT__entry(
98 __field( unsigned int, irqchip )
99 __field( unsigned int, pin )
100 ),
101
102 TP_fast_assign(
103 __entry->irqchip = irqchip;
104 __entry->pin = pin;
105 ),
106
107 TP_printk("irqchip %s pin %u",
108 __print_symbolic(__entry->irqchip, kvm_irqchips),
109 __entry->pin)
110);
111
112
113
114#endif /* defined(__KVM_HAVE_IOAPIC) */
115
116#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
117#define KVM_TRACE_MMIO_READ 1
118#define KVM_TRACE_MMIO_WRITE 2
119
120#define kvm_trace_symbol_mmio \
121 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
122 { KVM_TRACE_MMIO_READ, "read" }, \
123 { KVM_TRACE_MMIO_WRITE, "write" }
124
125TRACE_EVENT(kvm_mmio,
126 TP_PROTO(int type, int len, u64 gpa, u64 val),
127 TP_ARGS(type, len, gpa, val),
128
129 TP_STRUCT__entry(
130 __field( u32, type )
131 __field( u32, len )
132 __field( u64, gpa )
133 __field( u64, val )
134 ),
135
136 TP_fast_assign(
137 __entry->type = type;
138 __entry->len = len;
139 __entry->gpa = gpa;
140 __entry->val = val;
141 ),
142
143 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
144 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
145 __entry->len, __entry->gpa, __entry->val)
146);
147
148#endif /* _TRACE_KVM_MAIN_H */
149
150/* This part must be outside protection */
151#include <trace/define_trace.h>
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
new file mode 100644
index 000000000000..ea6d579261ad
--- /dev/null
+++ b/include/trace/events/power.h
@@ -0,0 +1,81 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM power
3
4#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_POWER_H
6
7#include <linux/ktime.h>
8#include <linux/tracepoint.h>
9
10#ifndef _TRACE_POWER_ENUM_
11#define _TRACE_POWER_ENUM_
12enum {
13 POWER_NONE = 0,
14 POWER_CSTATE = 1,
15 POWER_PSTATE = 2,
16};
17#endif
18
19
20
21TRACE_EVENT(power_start,
22
23 TP_PROTO(unsigned int type, unsigned int state),
24
25 TP_ARGS(type, state),
26
27 TP_STRUCT__entry(
28 __field( u64, type )
29 __field( u64, state )
30 ),
31
32 TP_fast_assign(
33 __entry->type = type;
34 __entry->state = state;
35 ),
36
37 TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state)
38);
39
40TRACE_EVENT(power_end,
41
42 TP_PROTO(int dummy),
43
44 TP_ARGS(dummy),
45
46 TP_STRUCT__entry(
47 __field( u64, dummy )
48 ),
49
50 TP_fast_assign(
51 __entry->dummy = 0xffff;
52 ),
53
54 TP_printk("dummy=%lu", (unsigned long)__entry->dummy)
55
56);
57
58
59TRACE_EVENT(power_frequency,
60
61 TP_PROTO(unsigned int type, unsigned int state),
62
63 TP_ARGS(type, state),
64
65 TP_STRUCT__entry(
66 __field( u64, type )
67 __field( u64, state )
68 ),
69
70 TP_fast_assign(
71 __entry->type = type;
72 __entry->state = state;
73 ),
74
75 TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long) __entry->state)
76);
77
78#endif /* _TRACE_POWER_H */
79
80/* This part must be outside protection */
81#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index a581ef211ff5..4069c43f4187 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -344,6 +344,134 @@ TRACE_EVENT(sched_signal_send,
344 __entry->sig, __entry->comm, __entry->pid) 344 __entry->sig, __entry->comm, __entry->pid)
345); 345);
346 346
347/*
348 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
349 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
350 */
351
352/*
353 * Tracepoint for accounting wait time (time the task is runnable
354 * but not actually running due to scheduler contention).
355 */
356TRACE_EVENT(sched_stat_wait,
357
358 TP_PROTO(struct task_struct *tsk, u64 delay),
359
360 TP_ARGS(tsk, delay),
361
362 TP_STRUCT__entry(
363 __array( char, comm, TASK_COMM_LEN )
364 __field( pid_t, pid )
365 __field( u64, delay )
366 ),
367
368 TP_fast_assign(
369 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
370 __entry->pid = tsk->pid;
371 __entry->delay = delay;
372 )
373 TP_perf_assign(
374 __perf_count(delay);
375 ),
376
377 TP_printk("task: %s:%d wait: %Lu [ns]",
378 __entry->comm, __entry->pid,
379 (unsigned long long)__entry->delay)
380);
381
382/*
383 * Tracepoint for accounting runtime (time the task is executing
384 * on a CPU).
385 */
386TRACE_EVENT(sched_stat_runtime,
387
388 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
389
390 TP_ARGS(tsk, runtime, vruntime),
391
392 TP_STRUCT__entry(
393 __array( char, comm, TASK_COMM_LEN )
394 __field( pid_t, pid )
395 __field( u64, runtime )
396 __field( u64, vruntime )
397 ),
398
399 TP_fast_assign(
400 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
401 __entry->pid = tsk->pid;
402 __entry->runtime = runtime;
403 __entry->vruntime = vruntime;
404 )
405 TP_perf_assign(
406 __perf_count(runtime);
407 ),
408
409 TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]",
410 __entry->comm, __entry->pid,
411 (unsigned long long)__entry->runtime,
412 (unsigned long long)__entry->vruntime)
413);
414
415/*
416 * Tracepoint for accounting sleep time (time the task is not runnable,
417 * including iowait, see below).
418 */
419TRACE_EVENT(sched_stat_sleep,
420
421 TP_PROTO(struct task_struct *tsk, u64 delay),
422
423 TP_ARGS(tsk, delay),
424
425 TP_STRUCT__entry(
426 __array( char, comm, TASK_COMM_LEN )
427 __field( pid_t, pid )
428 __field( u64, delay )
429 ),
430
431 TP_fast_assign(
432 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
433 __entry->pid = tsk->pid;
434 __entry->delay = delay;
435 )
436 TP_perf_assign(
437 __perf_count(delay);
438 ),
439
440 TP_printk("task: %s:%d sleep: %Lu [ns]",
441 __entry->comm, __entry->pid,
442 (unsigned long long)__entry->delay)
443);
444
445/*
446 * Tracepoint for accounting iowait time (time the task is not runnable
447 * due to waiting on IO to complete).
448 */
449TRACE_EVENT(sched_stat_iowait,
450
451 TP_PROTO(struct task_struct *tsk, u64 delay),
452
453 TP_ARGS(tsk, delay),
454
455 TP_STRUCT__entry(
456 __array( char, comm, TASK_COMM_LEN )
457 __field( pid_t, pid )
458 __field( u64, delay )
459 ),
460
461 TP_fast_assign(
462 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
463 __entry->pid = tsk->pid;
464 __entry->delay = delay;
465 )
466 TP_perf_assign(
467 __perf_count(delay);
468 ),
469
470 TP_printk("task: %s:%d iowait: %Lu [ns]",
471 __entry->comm, __entry->pid,
472 (unsigned long long)__entry->delay)
473);
474
347#endif /* _TRACE_SCHED_H */ 475#endif /* _TRACE_SCHED_H */
348 476
349/* This part must be outside protection */ 477/* This part must be outside protection */
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index e499863b9669..4b2be6dc76f0 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -5,6 +5,7 @@
5#define _TRACE_SKB_H 5#define _TRACE_SKB_H
6 6
7#include <linux/skbuff.h> 7#include <linux/skbuff.h>
8#include <linux/netdevice.h>
8#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
9 10
10/* 11/*
@@ -34,6 +35,25 @@ TRACE_EVENT(kfree_skb,
34 __entry->skbaddr, __entry->protocol, __entry->location) 35 __entry->skbaddr, __entry->protocol, __entry->location)
35); 36);
36 37
38TRACE_EVENT(skb_copy_datagram_iovec,
39
40 TP_PROTO(const struct sk_buff *skb, int len),
41
42 TP_ARGS(skb, len),
43
44 TP_STRUCT__entry(
45 __field( const void *, skbaddr )
46 __field( int, len )
47 ),
48
49 TP_fast_assign(
50 __entry->skbaddr = skb;
51 __entry->len = len;
52 ),
53
54 TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len)
55);
56
37#endif /* _TRACE_SKB_H */ 57#endif /* _TRACE_SKB_H */
38 58
39/* This part must be outside protection */ 59/* This part must be outside protection */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
new file mode 100644
index 000000000000..1844c48d640e
--- /dev/null
+++ b/include/trace/events/timer.h
@@ -0,0 +1,342 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM timer
3
4#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_TIMER_H
6
7#include <linux/tracepoint.h>
8#include <linux/hrtimer.h>
9#include <linux/timer.h>
10
11/**
12 * timer_init - called when the timer is initialized
13 * @timer: pointer to struct timer_list
14 */
15TRACE_EVENT(timer_init,
16
17 TP_PROTO(struct timer_list *timer),
18
19 TP_ARGS(timer),
20
21 TP_STRUCT__entry(
22 __field( void *, timer )
23 ),
24
25 TP_fast_assign(
26 __entry->timer = timer;
27 ),
28
29 TP_printk("timer %p", __entry->timer)
30);
31
32/**
33 * timer_start - called when the timer is started
34 * @timer: pointer to struct timer_list
35 * @expires: the timers expiry time
36 */
37TRACE_EVENT(timer_start,
38
39 TP_PROTO(struct timer_list *timer, unsigned long expires),
40
41 TP_ARGS(timer, expires),
42
43 TP_STRUCT__entry(
44 __field( void *, timer )
45 __field( void *, function )
46 __field( unsigned long, expires )
47 __field( unsigned long, now )
48 ),
49
50 TP_fast_assign(
51 __entry->timer = timer;
52 __entry->function = timer->function;
53 __entry->expires = expires;
54 __entry->now = jiffies;
55 ),
56
57 TP_printk("timer %p: func %pf, expires %lu, timeout %ld",
58 __entry->timer, __entry->function, __entry->expires,
59 (long)__entry->expires - __entry->now)
60);
61
62/**
63 * timer_expire_entry - called immediately before the timer callback
64 * @timer: pointer to struct timer_list
65 *
66 * Allows to determine the timer latency.
67 */
68TRACE_EVENT(timer_expire_entry,
69
70 TP_PROTO(struct timer_list *timer),
71
72 TP_ARGS(timer),
73
74 TP_STRUCT__entry(
75 __field( void *, timer )
76 __field( unsigned long, now )
77 ),
78
79 TP_fast_assign(
80 __entry->timer = timer;
81 __entry->now = jiffies;
82 ),
83
84 TP_printk("timer %p: now %lu", __entry->timer, __entry->now)
85);
86
87/**
88 * timer_expire_exit - called immediately after the timer callback returns
89 * @timer: pointer to struct timer_list
90 *
91 * When used in combination with the timer_expire_entry tracepoint we can
92 * determine the runtime of the timer callback function.
93 *
94 * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
95 * be invalid. We solely track the pointer.
96 */
97TRACE_EVENT(timer_expire_exit,
98
99 TP_PROTO(struct timer_list *timer),
100
101 TP_ARGS(timer),
102
103 TP_STRUCT__entry(
104 __field(void *, timer )
105 ),
106
107 TP_fast_assign(
108 __entry->timer = timer;
109 ),
110
111 TP_printk("timer %p", __entry->timer)
112);
113
114/**
115 * timer_cancel - called when the timer is canceled
116 * @timer: pointer to struct timer_list
117 */
118TRACE_EVENT(timer_cancel,
119
120 TP_PROTO(struct timer_list *timer),
121
122 TP_ARGS(timer),
123
124 TP_STRUCT__entry(
125 __field( void *, timer )
126 ),
127
128 TP_fast_assign(
129 __entry->timer = timer;
130 ),
131
132 TP_printk("timer %p", __entry->timer)
133);
134
135/**
136 * hrtimer_init - called when the hrtimer is initialized
137 * @timer: pointer to struct hrtimer
138 * @clockid: the hrtimers clock
139 * @mode: the hrtimers mode
140 */
141TRACE_EVENT(hrtimer_init,
142
143 TP_PROTO(struct hrtimer *timer, clockid_t clockid,
144 enum hrtimer_mode mode),
145
146 TP_ARGS(timer, clockid, mode),
147
148 TP_STRUCT__entry(
149 __field( void *, timer )
150 __field( clockid_t, clockid )
151 __field( enum hrtimer_mode, mode )
152 ),
153
154 TP_fast_assign(
155 __entry->timer = timer;
156 __entry->clockid = clockid;
157 __entry->mode = mode;
158 ),
159
160 TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer,
161 __entry->clockid == CLOCK_REALTIME ?
162 "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
163 __entry->mode == HRTIMER_MODE_ABS ?
164 "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
165);
166
167/**
168 * hrtimer_start - called when the hrtimer is started
169 * @timer: pointer to struct hrtimer
170 */
171TRACE_EVENT(hrtimer_start,
172
173 TP_PROTO(struct hrtimer *timer),
174
175 TP_ARGS(timer),
176
177 TP_STRUCT__entry(
178 __field( void *, timer )
179 __field( void *, function )
180 __field( s64, expires )
181 __field( s64, softexpires )
182 ),
183
184 TP_fast_assign(
185 __entry->timer = timer;
186 __entry->function = timer->function;
187 __entry->expires = hrtimer_get_expires(timer).tv64;
188 __entry->softexpires = hrtimer_get_softexpires(timer).tv64;
189 ),
190
191 TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu",
192 __entry->timer, __entry->function,
193 (unsigned long long)ktime_to_ns((ktime_t) {
194 .tv64 = __entry->expires }),
195 (unsigned long long)ktime_to_ns((ktime_t) {
196 .tv64 = __entry->softexpires }))
197);
198
199/**
200 * htimmer_expire_entry - called immediately before the hrtimer callback
201 * @timer: pointer to struct hrtimer
202 * @now: pointer to variable which contains current time of the
203 * timers base.
204 *
205 * Allows to determine the timer latency.
206 */
207TRACE_EVENT(hrtimer_expire_entry,
208
209 TP_PROTO(struct hrtimer *timer, ktime_t *now),
210
211 TP_ARGS(timer, now),
212
213 TP_STRUCT__entry(
214 __field( void *, timer )
215 __field( s64, now )
216 ),
217
218 TP_fast_assign(
219 __entry->timer = timer;
220 __entry->now = now->tv64;
221 ),
222
223 TP_printk("hrtimer %p, now %llu", __entry->timer,
224 (unsigned long long)ktime_to_ns((ktime_t) {
225 .tv64 = __entry->now }))
226 );
227
228/**
229 * hrtimer_expire_exit - called immediately after the hrtimer callback returns
230 * @timer: pointer to struct hrtimer
231 *
232 * When used in combination with the hrtimer_expire_entry tracepoint we can
233 * determine the runtime of the callback function.
234 */
235TRACE_EVENT(hrtimer_expire_exit,
236
237 TP_PROTO(struct hrtimer *timer),
238
239 TP_ARGS(timer),
240
241 TP_STRUCT__entry(
242 __field( void *, timer )
243 ),
244
245 TP_fast_assign(
246 __entry->timer = timer;
247 ),
248
249 TP_printk("hrtimer %p", __entry->timer)
250);
251
252/**
253 * hrtimer_cancel - called when the hrtimer is canceled
254 * @timer: pointer to struct hrtimer
255 */
256TRACE_EVENT(hrtimer_cancel,
257
258 TP_PROTO(struct hrtimer *timer),
259
260 TP_ARGS(timer),
261
262 TP_STRUCT__entry(
263 __field( void *, timer )
264 ),
265
266 TP_fast_assign(
267 __entry->timer = timer;
268 ),
269
270 TP_printk("hrtimer %p", __entry->timer)
271);
272
273/**
274 * itimer_state - called when itimer is started or canceled
275 * @which: name of the interval timer
276 * @value: the itimers value, itimer is canceled if value->it_value is
277 * zero, otherwise it is started
278 * @expires: the itimers expiry time
279 */
280TRACE_EVENT(itimer_state,
281
282 TP_PROTO(int which, const struct itimerval *const value,
283 cputime_t expires),
284
285 TP_ARGS(which, value, expires),
286
287 TP_STRUCT__entry(
288 __field( int, which )
289 __field( cputime_t, expires )
290 __field( long, value_sec )
291 __field( long, value_usec )
292 __field( long, interval_sec )
293 __field( long, interval_usec )
294 ),
295
296 TP_fast_assign(
297 __entry->which = which;
298 __entry->expires = expires;
299 __entry->value_sec = value->it_value.tv_sec;
300 __entry->value_usec = value->it_value.tv_usec;
301 __entry->interval_sec = value->it_interval.tv_sec;
302 __entry->interval_usec = value->it_interval.tv_usec;
303 ),
304
305 TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu",
306 __entry->which, __entry->expires,
307 __entry->value_sec, __entry->value_usec,
308 __entry->interval_sec, __entry->interval_usec)
309);
310
311/**
312 * itimer_expire - called when itimer expires
313 * @which: type of the interval timer
314 * @pid: pid of the process which owns the timer
315 * @now: current time, used to calculate the latency of itimer
316 */
317TRACE_EVENT(itimer_expire,
318
319 TP_PROTO(int which, struct pid *pid, cputime_t now),
320
321 TP_ARGS(which, pid, now),
322
323 TP_STRUCT__entry(
324 __field( int , which )
325 __field( pid_t, pid )
326 __field( cputime_t, now )
327 ),
328
329 TP_fast_assign(
330 __entry->which = which;
331 __entry->now = now;
332 __entry->pid = pid_nr(pid);
333 ),
334
335 TP_printk("which %d, pid %d, now %lu", __entry->which,
336 (int) __entry->pid, __entry->now)
337);
338
339#endif /* _TRACE_TIMER_H */
340
341/* This part must be outside protection */
342#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 5d3df2a5049d..54d02c06ae7e 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -239,9 +239,9 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
239#undef __print_flags 239#undef __print_flags
240#define __print_flags(flag, delim, flag_array...) \ 240#define __print_flags(flag, delim, flag_array...) \
241 ({ \ 241 ({ \
242 static const struct trace_print_flags flags[] = \ 242 static const struct trace_print_flags __flags[] = \
243 { flag_array, { -1, NULL }}; \ 243 { flag_array, { -1, NULL }}; \
244 ftrace_print_flags_seq(p, delim, flag, flags); \ 244 ftrace_print_flags_seq(p, delim, flag, __flags); \
245 }) 245 })
246 246
247#undef __print_symbolic 247#undef __print_symbolic
@@ -254,7 +254,7 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
254 254
255#undef TRACE_EVENT 255#undef TRACE_EVENT
256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
257enum print_line_t \ 257static enum print_line_t \
258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
259{ \ 259{ \
260 struct trace_seq *s = &iter->seq; \ 260 struct trace_seq *s = &iter->seq; \
@@ -317,7 +317,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
317 317
318#undef TRACE_EVENT 318#undef TRACE_EVENT
319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
320int \ 320static int \
321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
322{ \ 322{ \
323 struct ftrace_raw_##call field; \ 323 struct ftrace_raw_##call field; \
@@ -378,24 +378,18 @@ static inline int ftrace_get_offsets_##call( \
378#ifdef CONFIG_EVENT_PROFILE 378#ifdef CONFIG_EVENT_PROFILE
379 379
380/* 380/*
381 * Generate the functions needed for tracepoint perf_counter support. 381 * Generate the functions needed for tracepoint perf_event support.
382 * 382 *
383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
384 * 384 *
385 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) 385 * static int ftrace_profile_enable_<call>(void)
386 * { 386 * {
387 * int ret = 0; 387 * return register_trace_<call>(ftrace_profile_<call>);
388 *
389 * if (!atomic_inc_return(&event_call->profile_count))
390 * ret = register_trace_<call>(ftrace_profile_<call>);
391 *
392 * return ret;
393 * } 388 * }
394 * 389 *
395 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) 390 * static void ftrace_profile_disable_<call>(void)
396 * { 391 * {
397 * if (atomic_add_negative(-1, &event->call->profile_count)) 392 * unregister_trace_<call>(ftrace_profile_<call>);
398 * unregister_trace_<call>(ftrace_profile_<call>);
399 * } 393 * }
400 * 394 *
401 */ 395 */
@@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \
405 \ 399 \
406static void ftrace_profile_##call(proto); \ 400static void ftrace_profile_##call(proto); \
407 \ 401 \
408static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ 402static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\
409{ \ 403{ \
410 int ret = 0; \ 404 return register_trace_##call(ftrace_profile_##call); \
411 \
412 if (!atomic_inc_return(&event_call->profile_count)) \
413 ret = register_trace_##call(ftrace_profile_##call); \
414 \
415 return ret; \
416} \ 405} \
417 \ 406 \
418static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ 407static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\
419{ \ 408{ \
420 if (atomic_add_negative(-1, &event_call->profile_count)) \ 409 unregister_trace_##call(ftrace_profile_##call); \
421 unregister_trace_##call(ftrace_profile_##call); \
422} 410}
423 411
424#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -656,15 +644,16 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
656 * { 644 * {
657 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
658 * struct ftrace_event_call *event_call = &event_<call>; 646 * struct ftrace_event_call *event_call = &event_<call>;
659 * extern void perf_tpcounter_event(int, u64, u64, void *, int); 647 * extern void perf_tp_event(int, u64, u64, void *, int);
660 * struct ftrace_raw_##call *entry; 648 * struct ftrace_raw_##call *entry;
661 * u64 __addr = 0, __count = 1; 649 * u64 __addr = 0, __count = 1;
662 * unsigned long irq_flags; 650 * unsigned long irq_flags;
651 * struct trace_entry *ent;
663 * int __entry_size; 652 * int __entry_size;
664 * int __data_size; 653 * int __data_size;
654 * int __cpu
665 * int pc; 655 * int pc;
666 * 656 *
667 * local_save_flags(irq_flags);
668 * pc = preempt_count(); 657 * pc = preempt_count();
669 * 658 *
670 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 659 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
@@ -675,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
675 * sizeof(u64)); 664 * sizeof(u64));
676 * __entry_size -= sizeof(u32); 665 * __entry_size -= sizeof(u32);
677 * 666 *
678 * do { 667 * // Protect the non nmi buffer
679 * char raw_data[__entry_size]; <- allocate our sample in the stack 668 * // This also protects the rcu read side
680 * struct trace_entry *ent; 669 * local_irq_save(irq_flags);
670 * __cpu = smp_processor_id();
671 *
672 * if (in_nmi())
673 * raw_data = rcu_dereference(trace_profile_buf_nmi);
674 * else
675 * raw_data = rcu_dereference(trace_profile_buf);
676 *
677 * if (!raw_data)
678 * goto end;
681 * 679 *
682 * zero dead bytes from alignment to avoid stack leak to userspace: 680 * raw_data = per_cpu_ptr(raw_data, __cpu);
683 * 681 *
684 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 682 * //zero dead bytes from alignment to avoid stack leak to userspace:
685 * entry = (struct ftrace_raw_<call> *)raw_data; 683 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
686 * ent = &entry->ent; 684 * entry = (struct ftrace_raw_<call> *)raw_data;
687 * tracing_generic_entry_update(ent, irq_flags, pc); 685 * ent = &entry->ent;
688 * ent->type = event_call->id; 686 * tracing_generic_entry_update(ent, irq_flags, pc);
687 * ent->type = event_call->id;
689 * 688 *
690 * <tstruct> <- do some jobs with dynamic arrays 689 * <tstruct> <- do some jobs with dynamic arrays
691 * 690 *
692 * <assign> <- affect our values 691 * <assign> <- affect our values
693 * 692 *
694 * perf_tpcounter_event(event_call->id, __addr, __count, entry, 693 * perf_tp_event(event_call->id, __addr, __count, entry,
695 * __entry_size); <- submit them to perf counter 694 * __entry_size); <- submit them to perf counter
696 * } while (0);
697 * 695 *
698 * } 696 * }
699 */ 697 */
@@ -712,15 +710,17 @@ static void ftrace_profile_##call(proto) \
712{ \ 710{ \
713 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
714 struct ftrace_event_call *event_call = &event_##call; \ 712 struct ftrace_event_call *event_call = &event_##call; \
715 extern void perf_tpcounter_event(int, u64, u64, void *, int); \ 713 extern void perf_tp_event(int, u64, u64, void *, int); \
716 struct ftrace_raw_##call *entry; \ 714 struct ftrace_raw_##call *entry; \
717 u64 __addr = 0, __count = 1; \ 715 u64 __addr = 0, __count = 1; \
718 unsigned long irq_flags; \ 716 unsigned long irq_flags; \
717 struct trace_entry *ent; \
719 int __entry_size; \ 718 int __entry_size; \
720 int __data_size; \ 719 int __data_size; \
720 char *raw_data; \
721 int __cpu; \
721 int pc; \ 722 int pc; \
722 \ 723 \
723 local_save_flags(irq_flags); \
724 pc = preempt_count(); \ 724 pc = preempt_count(); \
725 \ 725 \
726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
@@ -728,23 +728,38 @@ static void ftrace_profile_##call(proto) \
728 sizeof(u64)); \ 728 sizeof(u64)); \
729 __entry_size -= sizeof(u32); \ 729 __entry_size -= sizeof(u32); \
730 \ 730 \
731 do { \ 731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
732 char raw_data[__entry_size]; \ 732 "profile buffer not large enough")) \
733 struct trace_entry *ent; \ 733 return; \
734 \
735 local_irq_save(irq_flags); \
736 __cpu = smp_processor_id(); \
734 \ 737 \
735 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ 738 if (in_nmi()) \
736 entry = (struct ftrace_raw_##call *)raw_data; \ 739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
737 ent = &entry->ent; \ 740 else \
738 tracing_generic_entry_update(ent, irq_flags, pc); \ 741 raw_data = rcu_dereference(trace_profile_buf); \
739 ent->type = event_call->id; \
740 \ 742 \
741 tstruct \ 743 if (!raw_data) \
744 goto end; \
742 \ 745 \
743 { assign; } \ 746 raw_data = per_cpu_ptr(raw_data, __cpu); \
744 \ 747 \
745 perf_tpcounter_event(event_call->id, __addr, __count, entry,\ 748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
753 \
754 tstruct \
755 \
756 { assign; } \
757 \
758 perf_tp_event(event_call->id, __addr, __count, entry, \
746 __entry_size); \ 759 __entry_size); \
747 } while (0); \ 760 \
761end: \
762 local_irq_restore(irq_flags); \
748 \ 763 \
749} 764}
750 765