aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/block.h4
-rw-r--r--include/trace/events/ext4.h142
-rw-r--r--include/trace/events/irq.h21
-rw-r--r--include/trace/events/jbd2.h2
-rw-r--r--include/trace/events/power.h81
-rw-r--r--include/trace/events/sched.h33
-rw-r--r--include/trace/ftrace.h119
7 files changed, 301 insertions, 101 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 9a74b468a229..d86af94691c2 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -171,6 +171,7 @@ TRACE_EVENT(block_rq_complete,
171 (unsigned long long)__entry->sector, 171 (unsigned long long)__entry->sector,
172 __entry->nr_sector, __entry->errors) 172 __entry->nr_sector, __entry->errors)
173); 173);
174
174TRACE_EVENT(block_bio_bounce, 175TRACE_EVENT(block_bio_bounce,
175 176
176 TP_PROTO(struct request_queue *q, struct bio *bio), 177 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -186,7 +187,8 @@ TRACE_EVENT(block_bio_bounce,
186 ), 187 ),
187 188
188 TP_fast_assign( 189 TP_fast_assign(
189 __entry->dev = bio->bi_bdev->bd_dev; 190 __entry->dev = bio->bi_bdev ?
191 bio->bi_bdev->bd_dev : 0;
190 __entry->sector = bio->bi_sector; 192 __entry->sector = bio->bi_sector;
191 __entry->nr_sector = bio->bi_size >> 9; 193 __entry->nr_sector = bio->bi_size >> 9;
192 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 194 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 8d433c4e3709..c1bd8f1e8b94 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -5,10 +5,15 @@
5#define _TRACE_EXT4_H 5#define _TRACE_EXT4_H
6 6
7#include <linux/writeback.h> 7#include <linux/writeback.h>
8#include "../../../fs/ext4/ext4.h"
9#include "../../../fs/ext4/mballoc.h"
10#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
11 9
10struct ext4_allocation_context;
11struct ext4_allocation_request;
12struct ext4_prealloc_space;
13struct ext4_inode_info;
14
15#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
16
12TRACE_EVENT(ext4_free_inode, 17TRACE_EVENT(ext4_free_inode,
13 TP_PROTO(struct inode *inode), 18 TP_PROTO(struct inode *inode),
14 19
@@ -33,8 +38,8 @@ TRACE_EVENT(ext4_free_inode,
33 ), 38 ),
34 39
35 TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu", 40 TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu",
36 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->mode, 41 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
37 __entry->uid, __entry->gid, 42 __entry->mode, __entry->uid, __entry->gid,
38 (unsigned long long) __entry->blocks) 43 (unsigned long long) __entry->blocks)
39); 44);
40 45
@@ -56,7 +61,8 @@ TRACE_EVENT(ext4_request_inode,
56 ), 61 ),
57 62
58 TP_printk("dev %s dir %lu mode %d", 63 TP_printk("dev %s dir %lu mode %d",
59 jbd2_dev_to_name(__entry->dev), __entry->dir, __entry->mode) 64 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->dir,
65 __entry->mode)
60); 66);
61 67
62TRACE_EVENT(ext4_allocate_inode, 68TRACE_EVENT(ext4_allocate_inode,
@@ -79,7 +85,8 @@ TRACE_EVENT(ext4_allocate_inode,
79 ), 85 ),
80 86
81 TP_printk("dev %s ino %lu dir %lu mode %d", 87 TP_printk("dev %s ino %lu dir %lu mode %d",
82 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->dir, __entry->mode) 88 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
89 (unsigned long) __entry->dir, __entry->mode)
83); 90);
84 91
85TRACE_EVENT(ext4_write_begin, 92TRACE_EVENT(ext4_write_begin,
@@ -106,8 +113,8 @@ TRACE_EVENT(ext4_write_begin,
106 ), 113 ),
107 114
108 TP_printk("dev %s ino %lu pos %llu len %u flags %u", 115 TP_printk("dev %s ino %lu pos %llu len %u flags %u",
109 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 116 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
110 __entry->flags) 117 __entry->pos, __entry->len, __entry->flags)
111); 118);
112 119
113TRACE_EVENT(ext4_ordered_write_end, 120TRACE_EVENT(ext4_ordered_write_end,
@@ -133,8 +140,8 @@ TRACE_EVENT(ext4_ordered_write_end,
133 ), 140 ),
134 141
135 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 142 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
136 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 143 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
137 __entry->copied) 144 __entry->pos, __entry->len, __entry->copied)
138); 145);
139 146
140TRACE_EVENT(ext4_writeback_write_end, 147TRACE_EVENT(ext4_writeback_write_end,
@@ -160,8 +167,8 @@ TRACE_EVENT(ext4_writeback_write_end,
160 ), 167 ),
161 168
162 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 169 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
163 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 170 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
164 __entry->copied) 171 __entry->pos, __entry->len, __entry->copied)
165); 172);
166 173
167TRACE_EVENT(ext4_journalled_write_end, 174TRACE_EVENT(ext4_journalled_write_end,
@@ -186,8 +193,8 @@ TRACE_EVENT(ext4_journalled_write_end,
186 ), 193 ),
187 194
188 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 195 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
189 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 196 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
190 __entry->copied) 197 __entry->pos, __entry->len, __entry->copied)
191); 198);
192 199
193TRACE_EVENT(ext4_writepage, 200TRACE_EVENT(ext4_writepage,
@@ -209,7 +216,8 @@ TRACE_EVENT(ext4_writepage,
209 ), 216 ),
210 217
211 TP_printk("dev %s ino %lu page_index %lu", 218 TP_printk("dev %s ino %lu page_index %lu",
212 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->index) 219 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
220 __entry->index)
213); 221);
214 222
215TRACE_EVENT(ext4_da_writepages, 223TRACE_EVENT(ext4_da_writepages,
@@ -243,14 +251,49 @@ TRACE_EVENT(ext4_da_writepages,
243 __entry->range_cyclic = wbc->range_cyclic; 251 __entry->range_cyclic = wbc->range_cyclic;
244 ), 252 ),
245 253
246 TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d", 254 TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d",
247 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write, 255 jbd2_dev_to_name(__entry->dev),
256 (unsigned long) __entry->ino, __entry->nr_to_write,
248 __entry->pages_skipped, __entry->range_start, 257 __entry->pages_skipped, __entry->range_start,
249 __entry->range_end, __entry->nonblocking, 258 __entry->range_end, __entry->nonblocking,
250 __entry->for_kupdate, __entry->for_reclaim, 259 __entry->for_kupdate, __entry->for_reclaim,
251 __entry->range_cyclic) 260 __entry->range_cyclic)
252); 261);
253 262
263TRACE_EVENT(ext4_da_write_pages,
264 TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
265
266 TP_ARGS(inode, mpd),
267
268 TP_STRUCT__entry(
269 __field( dev_t, dev )
270 __field( ino_t, ino )
271 __field( __u64, b_blocknr )
272 __field( __u32, b_size )
273 __field( __u32, b_state )
274 __field( unsigned long, first_page )
275 __field( int, io_done )
276 __field( int, pages_written )
277 ),
278
279 TP_fast_assign(
280 __entry->dev = inode->i_sb->s_dev;
281 __entry->ino = inode->i_ino;
282 __entry->b_blocknr = mpd->b_blocknr;
283 __entry->b_size = mpd->b_size;
284 __entry->b_state = mpd->b_state;
285 __entry->first_page = mpd->first_page;
286 __entry->io_done = mpd->io_done;
287 __entry->pages_written = mpd->pages_written;
288 ),
289
290 TP_printk("dev %s ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d",
291 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
292 __entry->b_blocknr, __entry->b_size,
293 __entry->b_state, __entry->first_page,
294 __entry->io_done, __entry->pages_written)
295);
296
254TRACE_EVENT(ext4_da_writepages_result, 297TRACE_EVENT(ext4_da_writepages_result,
255 TP_PROTO(struct inode *inode, struct writeback_control *wbc, 298 TP_PROTO(struct inode *inode, struct writeback_control *wbc,
256 int ret, int pages_written), 299 int ret, int pages_written),
@@ -280,7 +323,8 @@ TRACE_EVENT(ext4_da_writepages_result,
280 ), 323 ),
281 324
282 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d", 325 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d",
283 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->ret, 326 jbd2_dev_to_name(__entry->dev),
327 (unsigned long) __entry->ino, __entry->ret,
284 __entry->pages_written, __entry->pages_skipped, 328 __entry->pages_written, __entry->pages_skipped,
285 __entry->encountered_congestion, __entry->more_io, 329 __entry->encountered_congestion, __entry->more_io,
286 __entry->no_nrwrite_index_update) 330 __entry->no_nrwrite_index_update)
@@ -309,8 +353,8 @@ TRACE_EVENT(ext4_da_write_begin,
309 ), 353 ),
310 354
311 TP_printk("dev %s ino %lu pos %llu len %u flags %u", 355 TP_printk("dev %s ino %lu pos %llu len %u flags %u",
312 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 356 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
313 __entry->flags) 357 __entry->pos, __entry->len, __entry->flags)
314); 358);
315 359
316TRACE_EVENT(ext4_da_write_end, 360TRACE_EVENT(ext4_da_write_end,
@@ -336,8 +380,8 @@ TRACE_EVENT(ext4_da_write_end,
336 ), 380 ),
337 381
338 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 382 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
339 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pos, __entry->len, 383 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
340 __entry->copied) 384 __entry->pos, __entry->len, __entry->copied)
341); 385);
342 386
343TRACE_EVENT(ext4_discard_blocks, 387TRACE_EVENT(ext4_discard_blocks,
@@ -387,8 +431,8 @@ TRACE_EVENT(ext4_mb_new_inode_pa,
387 ), 431 ),
388 432
389 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", 433 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
390 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart, 434 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
391 __entry->pa_len, __entry->pa_lstart) 435 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
392); 436);
393 437
394TRACE_EVENT(ext4_mb_new_group_pa, 438TRACE_EVENT(ext4_mb_new_group_pa,
@@ -415,8 +459,8 @@ TRACE_EVENT(ext4_mb_new_group_pa,
415 ), 459 ),
416 460
417 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", 461 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
418 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->pa_pstart, 462 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
419 __entry->pa_len, __entry->pa_lstart) 463 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
420); 464);
421 465
422TRACE_EVENT(ext4_mb_release_inode_pa, 466TRACE_EVENT(ext4_mb_release_inode_pa,
@@ -442,8 +486,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
442 ), 486 ),
443 487
444 TP_printk("dev %s ino %lu block %llu count %u", 488 TP_printk("dev %s ino %lu block %llu count %u",
445 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block, 489 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
446 __entry->count) 490 __entry->block, __entry->count)
447); 491);
448 492
449TRACE_EVENT(ext4_mb_release_group_pa, 493TRACE_EVENT(ext4_mb_release_group_pa,
@@ -488,7 +532,7 @@ TRACE_EVENT(ext4_discard_preallocations,
488 ), 532 ),
489 533
490 TP_printk("dev %s ino %lu", 534 TP_printk("dev %s ino %lu",
491 jbd2_dev_to_name(__entry->dev), __entry->ino) 535 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
492); 536);
493 537
494TRACE_EVENT(ext4_mb_discard_preallocations, 538TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -543,8 +587,8 @@ TRACE_EVENT(ext4_request_blocks,
543 ), 587 ),
544 588
545 TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", 589 TP_printk("dev %s ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
546 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags, 590 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
547 __entry->len, 591 __entry->flags, __entry->len,
548 (unsigned long long) __entry->logical, 592 (unsigned long long) __entry->logical,
549 (unsigned long long) __entry->goal, 593 (unsigned long long) __entry->goal,
550 (unsigned long long) __entry->lleft, 594 (unsigned long long) __entry->lleft,
@@ -587,8 +631,8 @@ TRACE_EVENT(ext4_allocate_blocks,
587 ), 631 ),
588 632
589 TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", 633 TP_printk("dev %s ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ",
590 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->flags, 634 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
591 __entry->len, __entry->block, 635 __entry->flags, __entry->len, __entry->block,
592 (unsigned long long) __entry->logical, 636 (unsigned long long) __entry->logical,
593 (unsigned long long) __entry->goal, 637 (unsigned long long) __entry->goal,
594 (unsigned long long) __entry->lleft, 638 (unsigned long long) __entry->lleft,
@@ -621,8 +665,8 @@ TRACE_EVENT(ext4_free_blocks,
621 ), 665 ),
622 666
623 TP_printk("dev %s ino %lu block %llu count %lu metadata %d", 667 TP_printk("dev %s ino %lu block %llu count %lu metadata %d",
624 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->block, 668 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
625 __entry->count, __entry->metadata) 669 __entry->block, __entry->count, __entry->metadata)
626); 670);
627 671
628TRACE_EVENT(ext4_sync_file, 672TRACE_EVENT(ext4_sync_file,
@@ -645,8 +689,8 @@ TRACE_EVENT(ext4_sync_file,
645 ), 689 ),
646 690
647 TP_printk("dev %s ino %ld parent %ld datasync %d ", 691 TP_printk("dev %s ino %ld parent %ld datasync %d ",
648 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->parent, 692 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
649 __entry->datasync) 693 (unsigned long) __entry->parent, __entry->datasync)
650); 694);
651 695
652TRACE_EVENT(ext4_sync_fs, 696TRACE_EVENT(ext4_sync_fs,
@@ -669,6 +713,30 @@ TRACE_EVENT(ext4_sync_fs,
669 __entry->wait) 713 __entry->wait)
670); 714);
671 715
716TRACE_EVENT(ext4_alloc_da_blocks,
717 TP_PROTO(struct inode *inode),
718
719 TP_ARGS(inode),
720
721 TP_STRUCT__entry(
722 __field( dev_t, dev )
723 __field( ino_t, ino )
724 __field( unsigned int, data_blocks )
725 __field( unsigned int, meta_blocks )
726 ),
727
728 TP_fast_assign(
729 __entry->dev = inode->i_sb->s_dev;
730 __entry->ino = inode->i_ino;
731 __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
732 __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
733 ),
734
735 TP_printk("dev %s ino %lu data_blocks %u meta_blocks %u",
736 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
737 __entry->data_blocks, __entry->meta_blocks)
738);
739
672#endif /* _TRACE_EXT4_H */ 740#endif /* _TRACE_EXT4_H */
673 741
674/* This part must be outside protection */ 742/* This part must be outside protection */
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index 1cb0c3aa11e6..b89f9db4a404 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -8,16 +8,17 @@
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9 9
10#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } 10#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
11#define show_softirq_name(val) \ 11#define show_softirq_name(val) \
12 __print_symbolic(val, \ 12 __print_symbolic(val, \
13 softirq_name(HI), \ 13 softirq_name(HI), \
14 softirq_name(TIMER), \ 14 softirq_name(TIMER), \
15 softirq_name(NET_TX), \ 15 softirq_name(NET_TX), \
16 softirq_name(NET_RX), \ 16 softirq_name(NET_RX), \
17 softirq_name(BLOCK), \ 17 softirq_name(BLOCK), \
18 softirq_name(TASKLET), \ 18 softirq_name(BLOCK_IOPOLL), \
19 softirq_name(SCHED), \ 19 softirq_name(TASKLET), \
20 softirq_name(HRTIMER), \ 20 softirq_name(SCHED), \
21 softirq_name(HRTIMER), \
21 softirq_name(RCU)) 22 softirq_name(RCU))
22 23
23/** 24/**
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 10813fa0c8d0..b851f0b4701c 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -159,7 +159,7 @@ TRACE_EVENT(jbd2_submit_inode_data,
159 ), 159 ),
160 160
161 TP_printk("dev %s ino %lu", 161 TP_printk("dev %s ino %lu",
162 jbd2_dev_to_name(__entry->dev), __entry->ino) 162 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
163); 163);
164 164
165#endif /* _TRACE_JBD2_H */ 165#endif /* _TRACE_JBD2_H */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
new file mode 100644
index 000000000000..ea6d579261ad
--- /dev/null
+++ b/include/trace/events/power.h
@@ -0,0 +1,81 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM power
3
4#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_POWER_H
6
7#include <linux/ktime.h>
8#include <linux/tracepoint.h>
9
10#ifndef _TRACE_POWER_ENUM_
11#define _TRACE_POWER_ENUM_
12enum {
13 POWER_NONE = 0,
14 POWER_CSTATE = 1,
15 POWER_PSTATE = 2,
16};
17#endif
18
19
20
21TRACE_EVENT(power_start,
22
23 TP_PROTO(unsigned int type, unsigned int state),
24
25 TP_ARGS(type, state),
26
27 TP_STRUCT__entry(
28 __field( u64, type )
29 __field( u64, state )
30 ),
31
32 TP_fast_assign(
33 __entry->type = type;
34 __entry->state = state;
35 ),
36
37 TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state)
38);
39
40TRACE_EVENT(power_end,
41
42 TP_PROTO(int dummy),
43
44 TP_ARGS(dummy),
45
46 TP_STRUCT__entry(
47 __field( u64, dummy )
48 ),
49
50 TP_fast_assign(
51 __entry->dummy = 0xffff;
52 ),
53
54 TP_printk("dummy=%lu", (unsigned long)__entry->dummy)
55
56);
57
58
59TRACE_EVENT(power_frequency,
60
61 TP_PROTO(unsigned int type, unsigned int state),
62
63 TP_ARGS(type, state),
64
65 TP_STRUCT__entry(
66 __field( u64, type )
67 __field( u64, state )
68 ),
69
70 TP_fast_assign(
71 __entry->type = type;
72 __entry->state = state;
73 ),
74
75 TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long) __entry->state)
76);
77
78#endif /* _TRACE_POWER_H */
79
80/* This part must be outside protection */
81#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index b48f1ad7c946..4069c43f4187 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -380,6 +380,39 @@ TRACE_EVENT(sched_stat_wait,
380); 380);
381 381
382/* 382/*
383 * Tracepoint for accounting runtime (time the task is executing
384 * on a CPU).
385 */
386TRACE_EVENT(sched_stat_runtime,
387
388 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
389
390 TP_ARGS(tsk, runtime, vruntime),
391
392 TP_STRUCT__entry(
393 __array( char, comm, TASK_COMM_LEN )
394 __field( pid_t, pid )
395 __field( u64, runtime )
396 __field( u64, vruntime )
397 ),
398
399 TP_fast_assign(
400 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
401 __entry->pid = tsk->pid;
402 __entry->runtime = runtime;
403 __entry->vruntime = vruntime;
404 )
405 TP_perf_assign(
406 __perf_count(runtime);
407 ),
408
409 TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]",
410 __entry->comm, __entry->pid,
411 (unsigned long long)__entry->runtime,
412 (unsigned long long)__entry->vruntime)
413);
414
415/*
383 * Tracepoint for accounting sleep time (time the task is not runnable, 416 * Tracepoint for accounting sleep time (time the task is not runnable,
384 * including iowait, see below). 417 * including iowait, see below).
385 */ 418 */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 308bafd93325..a0361cb69769 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -239,9 +239,9 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
239#undef __print_flags 239#undef __print_flags
240#define __print_flags(flag, delim, flag_array...) \ 240#define __print_flags(flag, delim, flag_array...) \
241 ({ \ 241 ({ \
242 static const struct trace_print_flags flags[] = \ 242 static const struct trace_print_flags __flags[] = \
243 { flag_array, { -1, NULL }}; \ 243 { flag_array, { -1, NULL }}; \
244 ftrace_print_flags_seq(p, delim, flag, flags); \ 244 ftrace_print_flags_seq(p, delim, flag, __flags); \
245 }) 245 })
246 246
247#undef __print_symbolic 247#undef __print_symbolic
@@ -254,7 +254,7 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
254 254
255#undef TRACE_EVENT 255#undef TRACE_EVENT
256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
257enum print_line_t \ 257static enum print_line_t \
258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
259{ \ 259{ \
260 struct trace_seq *s = &iter->seq; \ 260 struct trace_seq *s = &iter->seq; \
@@ -317,7 +317,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
317 317
318#undef TRACE_EVENT 318#undef TRACE_EVENT
319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
320int \ 320static int \
321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
322{ \ 322{ \
323 struct ftrace_raw_##call field; \ 323 struct ftrace_raw_##call field; \
@@ -382,20 +382,14 @@ static inline int ftrace_get_offsets_##call( \
382 * 382 *
383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
384 * 384 *
385 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) 385 * static int ftrace_profile_enable_<call>(void)
386 * { 386 * {
387 * int ret = 0; 387 * return register_trace_<call>(ftrace_profile_<call>);
388 *
389 * if (!atomic_inc_return(&event_call->profile_count))
390 * ret = register_trace_<call>(ftrace_profile_<call>);
391 *
392 * return ret;
393 * } 388 * }
394 * 389 *
395 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) 390 * static void ftrace_profile_disable_<call>(void)
396 * { 391 * {
397 * if (atomic_add_negative(-1, &event->call->profile_count)) 392 * unregister_trace_<call>(ftrace_profile_<call>);
398 * unregister_trace_<call>(ftrace_profile_<call>);
399 * } 393 * }
400 * 394 *
401 */ 395 */
@@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \
405 \ 399 \
406static void ftrace_profile_##call(proto); \ 400static void ftrace_profile_##call(proto); \
407 \ 401 \
408static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ 402static int ftrace_profile_enable_##call(void) \
409{ \ 403{ \
410 int ret = 0; \ 404 return register_trace_##call(ftrace_profile_##call); \
411 \
412 if (!atomic_inc_return(&event_call->profile_count)) \
413 ret = register_trace_##call(ftrace_profile_##call); \
414 \
415 return ret; \
416} \ 405} \
417 \ 406 \
418static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ 407static void ftrace_profile_disable_##call(void) \
419{ \ 408{ \
420 if (atomic_add_negative(-1, &event_call->profile_count)) \ 409 unregister_trace_##call(ftrace_profile_##call); \
421 unregister_trace_##call(ftrace_profile_##call); \
422} 410}
423 411
424#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -660,11 +648,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
660 * struct ftrace_raw_##call *entry; 648 * struct ftrace_raw_##call *entry;
661 * u64 __addr = 0, __count = 1; 649 * u64 __addr = 0, __count = 1;
662 * unsigned long irq_flags; 650 * unsigned long irq_flags;
651 * struct trace_entry *ent;
663 * int __entry_size; 652 * int __entry_size;
664 * int __data_size; 653 * int __data_size;
654 * int __cpu
665 * int pc; 655 * int pc;
666 * 656 *
667 * local_save_flags(irq_flags);
668 * pc = preempt_count(); 657 * pc = preempt_count();
669 * 658 *
670 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 659 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
@@ -675,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
675 * sizeof(u64)); 664 * sizeof(u64));
676 * __entry_size -= sizeof(u32); 665 * __entry_size -= sizeof(u32);
677 * 666 *
678 * do { 667 * // Protect the non nmi buffer
679 * char raw_data[__entry_size]; <- allocate our sample in the stack 668 * // This also protects the rcu read side
680 * struct trace_entry *ent; 669 * local_irq_save(irq_flags);
670 * __cpu = smp_processor_id();
671 *
672 * if (in_nmi())
673 * raw_data = rcu_dereference(trace_profile_buf_nmi);
674 * else
675 * raw_data = rcu_dereference(trace_profile_buf);
676 *
677 * if (!raw_data)
678 * goto end;
681 * 679 *
682 * zero dead bytes from alignment to avoid stack leak to userspace: 680 * raw_data = per_cpu_ptr(raw_data, __cpu);
683 * 681 *
684 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 682 * //zero dead bytes from alignment to avoid stack leak to userspace:
685 * entry = (struct ftrace_raw_<call> *)raw_data; 683 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
686 * ent = &entry->ent; 684 * entry = (struct ftrace_raw_<call> *)raw_data;
687 * tracing_generic_entry_update(ent, irq_flags, pc); 685 * ent = &entry->ent;
688 * ent->type = event_call->id; 686 * tracing_generic_entry_update(ent, irq_flags, pc);
687 * ent->type = event_call->id;
689 * 688 *
690 * <tstruct> <- do some jobs with dynamic arrays 689 * <tstruct> <- do some jobs with dynamic arrays
691 * 690 *
692 * <assign> <- affect our values 691 * <assign> <- affect our values
693 * 692 *
694 * perf_tpcounter_event(event_call->id, __addr, __count, entry, 693 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
695 * __entry_size); <- submit them to perf counter 694 * __entry_size); <- submit them to perf counter
696 * } while (0);
697 * 695 *
698 * } 696 * }
699 */ 697 */
@@ -716,11 +714,13 @@ static void ftrace_profile_##call(proto) \
716 struct ftrace_raw_##call *entry; \ 714 struct ftrace_raw_##call *entry; \
717 u64 __addr = 0, __count = 1; \ 715 u64 __addr = 0, __count = 1; \
718 unsigned long irq_flags; \ 716 unsigned long irq_flags; \
717 struct trace_entry *ent; \
719 int __entry_size; \ 718 int __entry_size; \
720 int __data_size; \ 719 int __data_size; \
720 char *raw_data; \
721 int __cpu; \
721 int pc; \ 722 int pc; \
722 \ 723 \
723 local_save_flags(irq_flags); \
724 pc = preempt_count(); \ 724 pc = preempt_count(); \
725 \ 725 \
726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
@@ -728,23 +728,38 @@ static void ftrace_profile_##call(proto) \
728 sizeof(u64)); \ 728 sizeof(u64)); \
729 __entry_size -= sizeof(u32); \ 729 __entry_size -= sizeof(u32); \
730 \ 730 \
731 do { \ 731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
732 char raw_data[__entry_size]; \ 732 "profile buffer not large enough")) \
733 struct trace_entry *ent; \ 733 return; \
734 \
735 local_irq_save(irq_flags); \
736 __cpu = smp_processor_id(); \
734 \ 737 \
735 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ 738 if (in_nmi()) \
736 entry = (struct ftrace_raw_##call *)raw_data; \ 739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
737 ent = &entry->ent; \ 740 else \
738 tracing_generic_entry_update(ent, irq_flags, pc); \ 741 raw_data = rcu_dereference(trace_profile_buf); \
739 ent->type = event_call->id; \
740 \ 742 \
741 tstruct \ 743 if (!raw_data) \
744 goto end; \
742 \ 745 \
743 { assign; } \ 746 raw_data = per_cpu_ptr(raw_data, __cpu); \
744 \ 747 \
745 perf_tpcounter_event(event_call->id, __addr, __count, entry,\ 748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
753 \
754 tstruct \
755 \
756 { assign; } \
757 \
758 perf_tpcounter_event(event_call->id, __addr, __count, entry, \
746 __entry_size); \ 759 __entry_size); \
747 } while (0); \ 760 \
761end: \
762 local_irq_restore(irq_flags); \
748 \ 763 \
749} 764}
750 765