aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/define_trace.h11
-rw-r--r--include/trace/events/bkl.h61
-rw-r--r--include/trace/events/block.h366
-rw-r--r--include/trace/events/ext4.h284
-rw-r--r--include/trace/events/irq.h52
-rw-r--r--include/trace/events/jbd2.h91
-rw-r--r--include/trace/events/kmem.h130
-rw-r--r--include/trace/events/kvm.h41
-rw-r--r--include/trace/events/lock.h (renamed from include/trace/events/lockdep.h)37
-rw-r--r--include/trace/events/mce.h69
-rw-r--r--include/trace/events/module.h22
-rw-r--r--include/trace/events/power.h38
-rw-r--r--include/trace/events/sched.h217
-rw-r--r--include/trace/events/signal.h173
-rw-r--r--include/trace/events/syscalls.h3
-rw-r--r--include/trace/events/timer.h83
-rw-r--r--include/trace/events/workqueue.h22
-rw-r--r--include/trace/ftrace.h514
-rw-r--r--include/trace/power.h32
-rw-r--r--include/trace/syscall.h39
20 files changed, 1290 insertions, 995 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 2a4b3bf74033..5acfb1eb4df9 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -31,6 +31,14 @@
31 assign, print, reg, unreg) \ 31 assign, print, reg, unreg) \
32 DEFINE_TRACE_FN(name, reg, unreg) 32 DEFINE_TRACE_FN(name, reg, unreg)
33 33
34#undef DEFINE_EVENT
35#define DEFINE_EVENT(template, name, proto, args) \
36 DEFINE_TRACE(name)
37
38#undef DEFINE_EVENT_PRINT
39#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
40 DEFINE_TRACE(name)
41
34#undef DECLARE_TRACE 42#undef DECLARE_TRACE
35#define DECLARE_TRACE(name, proto, args) \ 43#define DECLARE_TRACE(name, proto, args) \
36 DEFINE_TRACE(name) 44 DEFINE_TRACE(name)
@@ -63,6 +71,9 @@
63 71
64#undef TRACE_EVENT 72#undef TRACE_EVENT
65#undef TRACE_EVENT_FN 73#undef TRACE_EVENT_FN
74#undef DECLARE_EVENT_CLASS
75#undef DEFINE_EVENT
76#undef DEFINE_EVENT_PRINT
66#undef TRACE_HEADER_MULTI_READ 77#undef TRACE_HEADER_MULTI_READ
67 78
68/* Only undef what we defined in this file */ 79/* Only undef what we defined in this file */
diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h
new file mode 100644
index 000000000000..1af72dc24278
--- /dev/null
+++ b/include/trace/events/bkl.h
@@ -0,0 +1,61 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM bkl
3
4#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BKL_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(lock_kernel,
10
11 TP_PROTO(const char *func, const char *file, int line),
12
13 TP_ARGS(func, file, line),
14
15 TP_STRUCT__entry(
16 __field( int, depth )
17 __field_ext( const char *, func, FILTER_PTR_STRING )
18 __field_ext( const char *, file, FILTER_PTR_STRING )
19 __field( int, line )
20 ),
21
22 TP_fast_assign(
23 /* We want to record the lock_depth after lock is acquired */
24 __entry->depth = current->lock_depth + 1;
25 __entry->func = func;
26 __entry->file = file;
27 __entry->line = line;
28 ),
29
30 TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth,
31 __entry->file, __entry->line, __entry->func)
32);
33
34TRACE_EVENT(unlock_kernel,
35
36 TP_PROTO(const char *func, const char *file, int line),
37
38 TP_ARGS(func, file, line),
39
40 TP_STRUCT__entry(
41 __field(int, depth )
42 __field(const char *, func )
43 __field(const char *, file )
44 __field(int, line )
45 ),
46
47 TP_fast_assign(
48 __entry->depth = current->lock_depth;
49 __entry->func = func;
50 __entry->file = file;
51 __entry->line = line;
52 ),
53
54 TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth,
55 __entry->file, __entry->line, __entry->func)
56);
57
58#endif /* _TRACE_BKL_H */
59
60/* This part must be outside protection */
61#include <trace/define_trace.h>
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 00405b5f624a..d870a918559c 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -8,7 +8,7 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
10 10
11TRACE_EVENT(block_rq_abort, 11DECLARE_EVENT_CLASS(block_rq_with_error,
12 12
13 TP_PROTO(struct request_queue *q, struct request *rq), 13 TP_PROTO(struct request_queue *q, struct request *rq),
14 14
@@ -40,41 +40,58 @@ TRACE_EVENT(block_rq_abort,
40 __entry->nr_sector, __entry->errors) 40 __entry->nr_sector, __entry->errors)
41); 41);
42 42
43TRACE_EVENT(block_rq_insert, 43/**
44 * block_rq_abort - abort block operation request
45 * @q: queue containing the block operation request
46 * @rq: block IO operation request
47 *
48 * Called immediately after pending block IO operation request @rq in
49 * queue @q is aborted. The fields in the operation request @rq
50 * can be examined to determine which device and sectors the pending
51 * operation would access.
52 */
53DEFINE_EVENT(block_rq_with_error, block_rq_abort,
44 54
45 TP_PROTO(struct request_queue *q, struct request *rq), 55 TP_PROTO(struct request_queue *q, struct request *rq),
46 56
47 TP_ARGS(q, rq), 57 TP_ARGS(q, rq)
58);
48 59
49 TP_STRUCT__entry( 60/**
50 __field( dev_t, dev ) 61 * block_rq_requeue - place block IO request back on a queue
51 __field( sector_t, sector ) 62 * @q: queue holding operation
52 __field( unsigned int, nr_sector ) 63 * @rq: block IO operation request
53 __field( unsigned int, bytes ) 64 *
54 __array( char, rwbs, 6 ) 65 * The block operation request @rq is being placed back into queue
55 __array( char, comm, TASK_COMM_LEN ) 66 * @q. For some reason the request was not completed and needs to be
56 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 67 * put back in the queue.
57 ), 68 */
69DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
58 70
59 TP_fast_assign( 71 TP_PROTO(struct request_queue *q, struct request *rq),
60 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
61 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
62 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
63 __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
64 72
65 blk_fill_rwbs_rq(__entry->rwbs, rq); 73 TP_ARGS(q, rq)
66 blk_dump_cmd(__get_str(cmd), rq); 74);
67 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
68 ),
69 75
70 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", 76/**
71 MAJOR(__entry->dev), MINOR(__entry->dev), 77 * block_rq_complete - block IO operation completed by device driver
72 __entry->rwbs, __entry->bytes, __get_str(cmd), 78 * @q: queue containing the block operation request
73 (unsigned long long)__entry->sector, 79 * @rq: block operations request
74 __entry->nr_sector, __entry->comm) 80 *
81 * The block_rq_complete tracepoint event indicates that some portion
82 * of operation request has been completed by the device driver. If
83 * the @rq->bio is %NULL, then there is absolutely no additional work to
84 * do for the request. If @rq->bio is non-NULL then there is
85 * additional work required to complete the request.
86 */
87DEFINE_EVENT(block_rq_with_error, block_rq_complete,
88
89 TP_PROTO(struct request_queue *q, struct request *rq),
90
91 TP_ARGS(q, rq)
75); 92);
76 93
77TRACE_EVENT(block_rq_issue, 94DECLARE_EVENT_CLASS(block_rq,
78 95
79 TP_PROTO(struct request_queue *q, struct request *rq), 96 TP_PROTO(struct request_queue *q, struct request *rq),
80 97
@@ -86,7 +103,7 @@ TRACE_EVENT(block_rq_issue,
86 __field( unsigned int, nr_sector ) 103 __field( unsigned int, nr_sector )
87 __field( unsigned int, bytes ) 104 __field( unsigned int, bytes )
88 __array( char, rwbs, 6 ) 105 __array( char, rwbs, 6 )
89 __array( char, comm, TASK_COMM_LEN ) 106 __array( char, comm, TASK_COMM_LEN )
90 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 107 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
91 ), 108 ),
92 109
@@ -108,70 +125,49 @@ TRACE_EVENT(block_rq_issue,
108 __entry->nr_sector, __entry->comm) 125 __entry->nr_sector, __entry->comm)
109); 126);
110 127
111TRACE_EVENT(block_rq_requeue, 128/**
129 * block_rq_insert - insert block operation request into queue
130 * @q: target queue
131 * @rq: block IO operation request
132 *
133 * Called immediately before block operation request @rq is inserted
134 * into queue @q. The fields in the operation request @rq struct can
135 * be examined to determine which device and sectors the pending
136 * operation would access.
137 */
138DEFINE_EVENT(block_rq, block_rq_insert,
112 139
113 TP_PROTO(struct request_queue *q, struct request *rq), 140 TP_PROTO(struct request_queue *q, struct request *rq),
114 141
115 TP_ARGS(q, rq), 142 TP_ARGS(q, rq)
116
117 TP_STRUCT__entry(
118 __field( dev_t, dev )
119 __field( sector_t, sector )
120 __field( unsigned int, nr_sector )
121 __field( int, errors )
122 __array( char, rwbs, 6 )
123 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
124 ),
125
126 TP_fast_assign(
127 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
128 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
129 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
130 __entry->errors = rq->errors;
131
132 blk_fill_rwbs_rq(__entry->rwbs, rq);
133 blk_dump_cmd(__get_str(cmd), rq);
134 ),
135
136 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
137 MAJOR(__entry->dev), MINOR(__entry->dev),
138 __entry->rwbs, __get_str(cmd),
139 (unsigned long long)__entry->sector,
140 __entry->nr_sector, __entry->errors)
141); 143);
142 144
143TRACE_EVENT(block_rq_complete, 145/**
146 * block_rq_issue - issue pending block IO request operation to device driver
147 * @q: queue holding operation
148 * @rq: block IO operation operation request
149 *
150 * Called when block operation request @rq from queue @q is sent to a
151 * device driver for processing.
152 */
153DEFINE_EVENT(block_rq, block_rq_issue,
144 154
145 TP_PROTO(struct request_queue *q, struct request *rq), 155 TP_PROTO(struct request_queue *q, struct request *rq),
146 156
147 TP_ARGS(q, rq), 157 TP_ARGS(q, rq)
148
149 TP_STRUCT__entry(
150 __field( dev_t, dev )
151 __field( sector_t, sector )
152 __field( unsigned int, nr_sector )
153 __field( int, errors )
154 __array( char, rwbs, 6 )
155 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
156 ),
157
158 TP_fast_assign(
159 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
160 __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
161 __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
162 __entry->errors = rq->errors;
163
164 blk_fill_rwbs_rq(__entry->rwbs, rq);
165 blk_dump_cmd(__get_str(cmd), rq);
166 ),
167
168 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
169 MAJOR(__entry->dev), MINOR(__entry->dev),
170 __entry->rwbs, __get_str(cmd),
171 (unsigned long long)__entry->sector,
172 __entry->nr_sector, __entry->errors)
173); 158);
174 159
160/**
161 * block_bio_bounce - used bounce buffer when processing block operation
162 * @q: queue holding the block operation
163 * @bio: block operation
164 *
165 * A bounce buffer was used to handle the block operation @bio in @q.
166 * This occurs when hardware limitations prevent a direct transfer of
167 * data between the @bio data memory area and the IO device. Use of a
168 * bounce buffer requires extra copying of data and decreases
169 * performance.
170 */
175TRACE_EVENT(block_bio_bounce, 171TRACE_EVENT(block_bio_bounce,
176 172
177 TP_PROTO(struct request_queue *q, struct bio *bio), 173 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -201,6 +197,14 @@ TRACE_EVENT(block_bio_bounce,
201 __entry->nr_sector, __entry->comm) 197 __entry->nr_sector, __entry->comm)
202); 198);
203 199
200/**
201 * block_bio_complete - completed all work on the block operation
202 * @q: queue holding the block operation
203 * @bio: block operation completed
204 *
205 * This tracepoint indicates there is no further work to do on this
206 * block IO operation @bio.
207 */
204TRACE_EVENT(block_bio_complete, 208TRACE_EVENT(block_bio_complete,
205 209
206 TP_PROTO(struct request_queue *q, struct bio *bio), 210 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -228,7 +232,7 @@ TRACE_EVENT(block_bio_complete,
228 __entry->nr_sector, __entry->error) 232 __entry->nr_sector, __entry->error)
229); 233);
230 234
231TRACE_EVENT(block_bio_backmerge, 235DECLARE_EVENT_CLASS(block_bio,
232 236
233 TP_PROTO(struct request_queue *q, struct bio *bio), 237 TP_PROTO(struct request_queue *q, struct bio *bio),
234 238
@@ -256,63 +260,51 @@ TRACE_EVENT(block_bio_backmerge,
256 __entry->nr_sector, __entry->comm) 260 __entry->nr_sector, __entry->comm)
257); 261);
258 262
259TRACE_EVENT(block_bio_frontmerge, 263/**
264 * block_bio_backmerge - merging block operation to the end of an existing operation
265 * @q: queue holding operation
266 * @bio: new block operation to merge
267 *
268 * Merging block request @bio to the end of an existing block request
269 * in queue @q.
270 */
271DEFINE_EVENT(block_bio, block_bio_backmerge,
260 272
261 TP_PROTO(struct request_queue *q, struct bio *bio), 273 TP_PROTO(struct request_queue *q, struct bio *bio),
262 274
263 TP_ARGS(q, bio), 275 TP_ARGS(q, bio)
264
265 TP_STRUCT__entry(
266 __field( dev_t, dev )
267 __field( sector_t, sector )
268 __field( unsigned, nr_sector )
269 __array( char, rwbs, 6 )
270 __array( char, comm, TASK_COMM_LEN )
271 ),
272
273 TP_fast_assign(
274 __entry->dev = bio->bi_bdev->bd_dev;
275 __entry->sector = bio->bi_sector;
276 __entry->nr_sector = bio->bi_size >> 9;
277 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
278 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
279 ),
280
281 TP_printk("%d,%d %s %llu + %u [%s]",
282 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
283 (unsigned long long)__entry->sector,
284 __entry->nr_sector, __entry->comm)
285); 276);
286 277
287TRACE_EVENT(block_bio_queue, 278/**
279 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
280 * @q: queue holding operation
281 * @bio: new block operation to merge
282 *
283 * Merging block IO operation @bio to the beginning of an existing block
284 * operation in queue @q.
285 */
286DEFINE_EVENT(block_bio, block_bio_frontmerge,
288 287
289 TP_PROTO(struct request_queue *q, struct bio *bio), 288 TP_PROTO(struct request_queue *q, struct bio *bio),
290 289
291 TP_ARGS(q, bio), 290 TP_ARGS(q, bio)
291);
292 292
293 TP_STRUCT__entry( 293/**
294 __field( dev_t, dev ) 294 * block_bio_queue - putting new block IO operation in queue
295 __field( sector_t, sector ) 295 * @q: queue holding operation
296 __field( unsigned int, nr_sector ) 296 * @bio: new block operation
297 __array( char, rwbs, 6 ) 297 *
298 __array( char, comm, TASK_COMM_LEN ) 298 * About to place the block IO operation @bio into queue @q.
299 ), 299 */
300DEFINE_EVENT(block_bio, block_bio_queue,
300 301
301 TP_fast_assign( 302 TP_PROTO(struct request_queue *q, struct bio *bio),
302 __entry->dev = bio->bi_bdev->bd_dev;
303 __entry->sector = bio->bi_sector;
304 __entry->nr_sector = bio->bi_size >> 9;
305 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
306 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
307 ),
308 303
309 TP_printk("%d,%d %s %llu + %u [%s]", 304 TP_ARGS(q, bio)
310 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
311 (unsigned long long)__entry->sector,
312 __entry->nr_sector, __entry->comm)
313); 305);
314 306
315TRACE_EVENT(block_getrq, 307DECLARE_EVENT_CLASS(block_get_rq,
316 308
317 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 309 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
318 310
@@ -341,35 +333,48 @@ TRACE_EVENT(block_getrq,
341 __entry->nr_sector, __entry->comm) 333 __entry->nr_sector, __entry->comm)
342); 334);
343 335
344TRACE_EVENT(block_sleeprq, 336/**
337 * block_getrq - get a free request entry in queue for block IO operations
338 * @q: queue for operations
339 * @bio: pending block IO operation
340 * @rw: low bit indicates a read (%0) or a write (%1)
341 *
342 * A request struct for queue @q has been allocated to handle the
343 * block IO operation @bio.
344 */
345DEFINE_EVENT(block_get_rq, block_getrq,
345 346
346 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 347 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
347 348
348 TP_ARGS(q, bio, rw), 349 TP_ARGS(q, bio, rw)
350);
349 351
350 TP_STRUCT__entry( 352/**
351 __field( dev_t, dev ) 353 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
352 __field( sector_t, sector ) 354 * @q: queue for operation
353 __field( unsigned int, nr_sector ) 355 * @bio: pending block IO operation
354 __array( char, rwbs, 6 ) 356 * @rw: low bit indicates a read (%0) or a write (%1)
355 __array( char, comm, TASK_COMM_LEN ) 357 *
356 ), 358 * In the case where a request struct cannot be provided for queue @q
359 * the process needs to wait for an request struct to become
360 * available. This tracepoint event is generated each time the
361 * process goes to sleep waiting for request struct become available.
362 */
363DEFINE_EVENT(block_get_rq, block_sleeprq,
357 364
358 TP_fast_assign( 365 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
359 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
360 __entry->sector = bio ? bio->bi_sector : 0;
361 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
362 blk_fill_rwbs(__entry->rwbs,
363 bio ? bio->bi_rw : 0, __entry->nr_sector);
364 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
365 ),
366 366
367 TP_printk("%d,%d %s %llu + %u [%s]", 367 TP_ARGS(q, bio, rw)
368 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
369 (unsigned long long)__entry->sector,
370 __entry->nr_sector, __entry->comm)
371); 368);
372 369
370/**
371 * block_plug - keep operations requests in request queue
372 * @q: request queue to plug
373 *
374 * Plug the request queue @q. Do not allow block operation requests
375 * to be sent to the device driver. Instead, accumulate requests in
376 * the queue to improve throughput performance of the block device.
377 */
373TRACE_EVENT(block_plug, 378TRACE_EVENT(block_plug,
374 379
375 TP_PROTO(struct request_queue *q), 380 TP_PROTO(struct request_queue *q),
@@ -387,7 +392,7 @@ TRACE_EVENT(block_plug,
387 TP_printk("[%s]", __entry->comm) 392 TP_printk("[%s]", __entry->comm)
388); 393);
389 394
390TRACE_EVENT(block_unplug_timer, 395DECLARE_EVENT_CLASS(block_unplug,
391 396
392 TP_PROTO(struct request_queue *q), 397 TP_PROTO(struct request_queue *q),
393 398
@@ -406,25 +411,45 @@ TRACE_EVENT(block_unplug_timer,
406 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 411 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
407); 412);
408 413
409TRACE_EVENT(block_unplug_io, 414/**
415 * block_unplug_timer - timed release of operations requests in queue to device driver
416 * @q: request queue to unplug
417 *
418 * Unplug the request queue @q because a timer expired and allow block
419 * operation requests to be sent to the device driver.
420 */
421DEFINE_EVENT(block_unplug, block_unplug_timer,
410 422
411 TP_PROTO(struct request_queue *q), 423 TP_PROTO(struct request_queue *q),
412 424
413 TP_ARGS(q), 425 TP_ARGS(q)
426);
414 427
415 TP_STRUCT__entry( 428/**
416 __field( int, nr_rq ) 429 * block_unplug_io - release of operations requests in request queue
417 __array( char, comm, TASK_COMM_LEN ) 430 * @q: request queue to unplug
418 ), 431 *
432 * Unplug request queue @q because device driver is scheduled to work
433 * on elements in the request queue.
434 */
435DEFINE_EVENT(block_unplug, block_unplug_io,
419 436
420 TP_fast_assign( 437 TP_PROTO(struct request_queue *q),
421 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
422 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
423 ),
424 438
425 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 439 TP_ARGS(q)
426); 440);
427 441
442/**
443 * block_split - split a single bio struct into two bio structs
444 * @q: queue containing the bio
445 * @bio: block operation being split
446 * @new_sector: The starting sector for the new bio
447 *
448 * The bio request @bio in request queue @q needs to be split into two
449 * bio requests. The newly created @bio request starts at
450 * @new_sector. This split may be required due to hardware limitation
451 * such as operation crossing device boundaries in a RAID system.
452 */
428TRACE_EVENT(block_split, 453TRACE_EVENT(block_split,
429 454
430 TP_PROTO(struct request_queue *q, struct bio *bio, 455 TP_PROTO(struct request_queue *q, struct bio *bio,
@@ -455,6 +480,16 @@ TRACE_EVENT(block_split,
455 __entry->comm) 480 __entry->comm)
456); 481);
457 482
483/**
484 * block_remap - map request for a partition to the raw device
485 * @q: queue holding the operation
486 * @bio: revised operation
487 * @dev: device for the operation
488 * @from: original sector for the operation
489 *
490 * An operation for a partition on a block device has been mapped to the
491 * raw block device.
492 */
458TRACE_EVENT(block_remap, 493TRACE_EVENT(block_remap,
459 494
460 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, 495 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
@@ -488,6 +523,17 @@ TRACE_EVENT(block_remap,
488 (unsigned long long)__entry->old_sector) 523 (unsigned long long)__entry->old_sector)
489); 524);
490 525
526/**
527 * block_rq_remap - map request for a block operation request
528 * @q: queue holding the operation
529 * @rq: block IO operation request
530 * @dev: device for the operation
531 * @from: original sector for the operation
532 *
533 * The block operation request @rq in @q has been remapped. The block
534 * operation request @rq holds the current information and @from hold
535 * the original sector.
536 */
491TRACE_EVENT(block_rq_remap, 537TRACE_EVENT(block_rq_remap,
492 538
493 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 539 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d09550bf3f95..2aa6aa3e8f61 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -38,7 +38,7 @@ TRACE_EVENT(ext4_free_inode,
38 __entry->blocks = inode->i_blocks; 38 __entry->blocks = inode->i_blocks;
39 ), 39 ),
40 40
41 TP_printk("dev %s ino %lu mode %d uid %u gid %u blocks %llu", 41 TP_printk("dev %s ino %lu mode 0%o uid %u gid %u blocks %llu",
42 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, 42 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
43 __entry->mode, __entry->uid, __entry->gid, 43 __entry->mode, __entry->uid, __entry->gid,
44 (unsigned long long) __entry->blocks) 44 (unsigned long long) __entry->blocks)
@@ -61,7 +61,7 @@ TRACE_EVENT(ext4_request_inode,
61 __entry->mode = mode; 61 __entry->mode = mode;
62 ), 62 ),
63 63
64 TP_printk("dev %s dir %lu mode %d", 64 TP_printk("dev %s dir %lu mode 0%o",
65 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->dir, 65 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->dir,
66 __entry->mode) 66 __entry->mode)
67); 67);
@@ -85,12 +85,12 @@ TRACE_EVENT(ext4_allocate_inode,
85 __entry->mode = mode; 85 __entry->mode = mode;
86 ), 86 ),
87 87
88 TP_printk("dev %s ino %lu dir %lu mode %d", 88 TP_printk("dev %s ino %lu dir %lu mode 0%o",
89 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, 89 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
90 (unsigned long) __entry->dir, __entry->mode) 90 (unsigned long) __entry->dir, __entry->mode)
91); 91);
92 92
93TRACE_EVENT(ext4_write_begin, 93DECLARE_EVENT_CLASS(ext4__write_begin,
94 94
95 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, 95 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
96 unsigned int flags), 96 unsigned int flags),
@@ -118,7 +118,23 @@ TRACE_EVENT(ext4_write_begin,
118 __entry->pos, __entry->len, __entry->flags) 118 __entry->pos, __entry->len, __entry->flags)
119); 119);
120 120
121TRACE_EVENT(ext4_ordered_write_end, 121DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
122
123 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
124 unsigned int flags),
125
126 TP_ARGS(inode, pos, len, flags)
127);
128
129DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
130
131 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
132 unsigned int flags),
133
134 TP_ARGS(inode, pos, len, flags)
135);
136
137DECLARE_EVENT_CLASS(ext4__write_end,
122 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, 138 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
123 unsigned int copied), 139 unsigned int copied),
124 140
@@ -145,57 +161,36 @@ TRACE_EVENT(ext4_ordered_write_end,
145 __entry->pos, __entry->len, __entry->copied) 161 __entry->pos, __entry->len, __entry->copied)
146); 162);
147 163
148TRACE_EVENT(ext4_writeback_write_end, 164DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
165
149 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, 166 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
150 unsigned int copied), 167 unsigned int copied),
151 168
152 TP_ARGS(inode, pos, len, copied), 169 TP_ARGS(inode, pos, len, copied)
170);
153 171
154 TP_STRUCT__entry( 172DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end,
155 __field( dev_t, dev )
156 __field( ino_t, ino )
157 __field( loff_t, pos )
158 __field( unsigned int, len )
159 __field( unsigned int, copied )
160 ),
161 173
162 TP_fast_assign( 174 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
163 __entry->dev = inode->i_sb->s_dev; 175 unsigned int copied),
164 __entry->ino = inode->i_ino;
165 __entry->pos = pos;
166 __entry->len = len;
167 __entry->copied = copied;
168 ),
169 176
170 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 177 TP_ARGS(inode, pos, len, copied)
171 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
172 __entry->pos, __entry->len, __entry->copied)
173); 178);
174 179
175TRACE_EVENT(ext4_journalled_write_end, 180DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end,
181
176 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, 182 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
177 unsigned int copied), 183 unsigned int copied),
178 TP_ARGS(inode, pos, len, copied),
179 184
180 TP_STRUCT__entry( 185 TP_ARGS(inode, pos, len, copied)
181 __field( dev_t, dev ) 186);
182 __field( ino_t, ino )
183 __field( loff_t, pos )
184 __field( unsigned int, len )
185 __field( unsigned int, copied )
186 ),
187 187
188 TP_fast_assign( 188DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
189 __entry->dev = inode->i_sb->s_dev;
190 __entry->ino = inode->i_ino;
191 __entry->pos = pos;
192 __entry->len = len;
193 __entry->copied = copied;
194 ),
195 189
196 TP_printk("dev %s ino %lu pos %llu len %u copied %u", 190 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
197 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, 191 unsigned int copied),
198 __entry->pos, __entry->len, __entry->copied) 192
193 TP_ARGS(inode, pos, len, copied)
199); 194);
200 195
201TRACE_EVENT(ext4_writepage, 196TRACE_EVENT(ext4_writepage,
@@ -310,7 +305,6 @@ TRACE_EVENT(ext4_da_writepages_result,
310 __field( int, ret ) 305 __field( int, ret )
311 __field( int, pages_written ) 306 __field( int, pages_written )
312 __field( long, pages_skipped ) 307 __field( long, pages_skipped )
313 __field( char, encountered_congestion )
314 __field( char, more_io ) 308 __field( char, more_io )
315 __field( char, no_nrwrite_index_update ) 309 __field( char, no_nrwrite_index_update )
316 __field( pgoff_t, writeback_index ) 310 __field( pgoff_t, writeback_index )
@@ -322,75 +316,20 @@ TRACE_EVENT(ext4_da_writepages_result,
322 __entry->ret = ret; 316 __entry->ret = ret;
323 __entry->pages_written = pages_written; 317 __entry->pages_written = pages_written;
324 __entry->pages_skipped = wbc->pages_skipped; 318 __entry->pages_skipped = wbc->pages_skipped;
325 __entry->encountered_congestion = wbc->encountered_congestion;
326 __entry->more_io = wbc->more_io; 319 __entry->more_io = wbc->more_io;
327 __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update; 320 __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update;
328 __entry->writeback_index = inode->i_mapping->writeback_index; 321 __entry->writeback_index = inode->i_mapping->writeback_index;
329 ), 322 ),
330 323
331 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d writeback_index %lu", 324 TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld more_io %d no_nrwrite_index_update %d writeback_index %lu",
332 jbd2_dev_to_name(__entry->dev), 325 jbd2_dev_to_name(__entry->dev),
333 (unsigned long) __entry->ino, __entry->ret, 326 (unsigned long) __entry->ino, __entry->ret,
334 __entry->pages_written, __entry->pages_skipped, 327 __entry->pages_written, __entry->pages_skipped,
335 __entry->encountered_congestion, __entry->more_io, 328 __entry->more_io,
336 __entry->no_nrwrite_index_update, 329 __entry->no_nrwrite_index_update,
337 (unsigned long) __entry->writeback_index) 330 (unsigned long) __entry->writeback_index)
338); 331);
339 332
340TRACE_EVENT(ext4_da_write_begin,
341 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
342 unsigned int flags),
343
344 TP_ARGS(inode, pos, len, flags),
345
346 TP_STRUCT__entry(
347 __field( dev_t, dev )
348 __field( ino_t, ino )
349 __field( loff_t, pos )
350 __field( unsigned int, len )
351 __field( unsigned int, flags )
352 ),
353
354 TP_fast_assign(
355 __entry->dev = inode->i_sb->s_dev;
356 __entry->ino = inode->i_ino;
357 __entry->pos = pos;
358 __entry->len = len;
359 __entry->flags = flags;
360 ),
361
362 TP_printk("dev %s ino %lu pos %llu len %u flags %u",
363 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
364 __entry->pos, __entry->len, __entry->flags)
365);
366
367TRACE_EVENT(ext4_da_write_end,
368 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
369 unsigned int copied),
370
371 TP_ARGS(inode, pos, len, copied),
372
373 TP_STRUCT__entry(
374 __field( dev_t, dev )
375 __field( ino_t, ino )
376 __field( loff_t, pos )
377 __field( unsigned int, len )
378 __field( unsigned int, copied )
379 ),
380
381 TP_fast_assign(
382 __entry->dev = inode->i_sb->s_dev;
383 __entry->ino = inode->i_ino;
384 __entry->pos = pos;
385 __entry->len = len;
386 __entry->copied = copied;
387 ),
388
389 TP_printk("dev %s ino %lu pos %llu len %u copied %u",
390 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
391 __entry->pos, __entry->len, __entry->copied)
392);
393
394TRACE_EVENT(ext4_discard_blocks, 333TRACE_EVENT(ext4_discard_blocks,
395 TP_PROTO(struct super_block *sb, unsigned long long blk, 334 TP_PROTO(struct super_block *sb, unsigned long long blk,
396 unsigned long long count), 335 unsigned long long count),
@@ -650,30 +589,32 @@ TRACE_EVENT(ext4_allocate_blocks,
650 589
651TRACE_EVENT(ext4_free_blocks, 590TRACE_EVENT(ext4_free_blocks,
652 TP_PROTO(struct inode *inode, __u64 block, unsigned long count, 591 TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
653 int metadata), 592 int flags),
654 593
655 TP_ARGS(inode, block, count, metadata), 594 TP_ARGS(inode, block, count, flags),
656 595
657 TP_STRUCT__entry( 596 TP_STRUCT__entry(
658 __field( dev_t, dev ) 597 __field( dev_t, dev )
659 __field( ino_t, ino ) 598 __field( ino_t, ino )
599 __field( umode_t, mode )
660 __field( __u64, block ) 600 __field( __u64, block )
661 __field( unsigned long, count ) 601 __field( unsigned long, count )
662 __field( int, metadata ) 602 __field( int, flags )
663
664 ), 603 ),
665 604
666 TP_fast_assign( 605 TP_fast_assign(
667 __entry->dev = inode->i_sb->s_dev; 606 __entry->dev = inode->i_sb->s_dev;
668 __entry->ino = inode->i_ino; 607 __entry->ino = inode->i_ino;
608 __entry->mode = inode->i_mode;
669 __entry->block = block; 609 __entry->block = block;
670 __entry->count = count; 610 __entry->count = count;
671 __entry->metadata = metadata; 611 __entry->flags = flags;
672 ), 612 ),
673 613
674 TP_printk("dev %s ino %lu block %llu count %lu metadata %d", 614 TP_printk("dev %s ino %lu mode 0%o block %llu count %lu flags %d",
675 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, 615 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
676 __entry->block, __entry->count, __entry->metadata) 616 __entry->mode, __entry->block, __entry->count,
617 __entry->flags)
677); 618);
678 619
679TRACE_EVENT(ext4_sync_file, 620TRACE_EVENT(ext4_sync_file,
@@ -907,6 +848,133 @@ TRACE_EVENT(ext4_mballoc_free,
907 __entry->result_len, __entry->result_logical) 848 __entry->result_len, __entry->result_logical)
908); 849);
909 850
851TRACE_EVENT(ext4_forget,
852 TP_PROTO(struct inode *inode, int is_metadata, __u64 block),
853
854 TP_ARGS(inode, is_metadata, block),
855
856 TP_STRUCT__entry(
857 __field( dev_t, dev )
858 __field( ino_t, ino )
859 __field( umode_t, mode )
860 __field( int, is_metadata )
861 __field( __u64, block )
862 ),
863
864 TP_fast_assign(
865 __entry->dev = inode->i_sb->s_dev;
866 __entry->ino = inode->i_ino;
867 __entry->mode = inode->i_mode;
868 __entry->is_metadata = is_metadata;
869 __entry->block = block;
870 ),
871
872 TP_printk("dev %s ino %lu mode 0%o is_metadata %d block %llu",
873 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
874 __entry->mode, __entry->is_metadata, __entry->block)
875);
876
877TRACE_EVENT(ext4_da_update_reserve_space,
878 TP_PROTO(struct inode *inode, int used_blocks),
879
880 TP_ARGS(inode, used_blocks),
881
882 TP_STRUCT__entry(
883 __field( dev_t, dev )
884 __field( ino_t, ino )
885 __field( umode_t, mode )
886 __field( __u64, i_blocks )
887 __field( int, used_blocks )
888 __field( int, reserved_data_blocks )
889 __field( int, reserved_meta_blocks )
890 __field( int, allocated_meta_blocks )
891 ),
892
893 TP_fast_assign(
894 __entry->dev = inode->i_sb->s_dev;
895 __entry->ino = inode->i_ino;
896 __entry->mode = inode->i_mode;
897 __entry->i_blocks = inode->i_blocks;
898 __entry->used_blocks = used_blocks;
899 __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
900 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
901 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
902 ),
903
904 TP_printk("dev %s ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
905 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
906 __entry->mode, (unsigned long long) __entry->i_blocks,
907 __entry->used_blocks, __entry->reserved_data_blocks,
908 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
909);
910
911TRACE_EVENT(ext4_da_reserve_space,
912 TP_PROTO(struct inode *inode, int md_needed),
913
914 TP_ARGS(inode, md_needed),
915
916 TP_STRUCT__entry(
917 __field( dev_t, dev )
918 __field( ino_t, ino )
919 __field( umode_t, mode )
920 __field( __u64, i_blocks )
921 __field( int, md_needed )
922 __field( int, reserved_data_blocks )
923 __field( int, reserved_meta_blocks )
924 ),
925
926 TP_fast_assign(
927 __entry->dev = inode->i_sb->s_dev;
928 __entry->ino = inode->i_ino;
929 __entry->mode = inode->i_mode;
930 __entry->i_blocks = inode->i_blocks;
931 __entry->md_needed = md_needed;
932 __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
933 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
934 ),
935
936 TP_printk("dev %s ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
937 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
938 __entry->mode, (unsigned long long) __entry->i_blocks,
939 __entry->md_needed, __entry->reserved_data_blocks,
940 __entry->reserved_meta_blocks)
941);
942
943TRACE_EVENT(ext4_da_release_space,
944 TP_PROTO(struct inode *inode, int freed_blocks),
945
946 TP_ARGS(inode, freed_blocks),
947
948 TP_STRUCT__entry(
949 __field( dev_t, dev )
950 __field( ino_t, ino )
951 __field( umode_t, mode )
952 __field( __u64, i_blocks )
953 __field( int, freed_blocks )
954 __field( int, reserved_data_blocks )
955 __field( int, reserved_meta_blocks )
956 __field( int, allocated_meta_blocks )
957 ),
958
959 TP_fast_assign(
960 __entry->dev = inode->i_sb->s_dev;
961 __entry->ino = inode->i_ino;
962 __entry->mode = inode->i_mode;
963 __entry->i_blocks = inode->i_blocks;
964 __entry->freed_blocks = freed_blocks;
965 __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
966 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
967 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
968 ),
969
970 TP_printk("dev %s ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
971 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
972 __entry->mode, (unsigned long long) __entry->i_blocks,
973 __entry->freed_blocks, __entry->reserved_data_blocks,
974 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
975);
976
977
910#endif /* _TRACE_EXT4_H */ 978#endif /* _TRACE_EXT4_H */
911 979
912/* This part must be outside protection */ 980/* This part must be outside protection */
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index b89f9db4a404..0e4cfb694fe7 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -48,7 +48,7 @@ TRACE_EVENT(irq_handler_entry,
48 __assign_str(name, action->name); 48 __assign_str(name, action->name);
49 ), 49 ),
50 50
51 TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name)) 51 TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
52); 52);
53 53
54/** 54/**
@@ -78,22 +78,11 @@ TRACE_EVENT(irq_handler_exit,
78 __entry->ret = ret; 78 __entry->ret = ret;
79 ), 79 ),
80 80
81 TP_printk("irq=%d return=%s", 81 TP_printk("irq=%d ret=%s",
82 __entry->irq, __entry->ret ? "handled" : "unhandled") 82 __entry->irq, __entry->ret ? "handled" : "unhandled")
83); 83);
84 84
85/** 85DECLARE_EVENT_CLASS(softirq,
86 * softirq_entry - called immediately before the softirq handler
87 * @h: pointer to struct softirq_action
88 * @vec: pointer to first struct softirq_action in softirq_vec array
89 *
90 * The @h parameter, contains a pointer to the struct softirq_action
91 * which has a pointer to the action handler that is called. By subtracting
92 * the @vec pointer from the @h pointer, we can determine the softirq
93 * number. Also, when used in combination with the softirq_exit tracepoint
94 * we can determine the softirq latency.
95 */
96TRACE_EVENT(softirq_entry,
97 86
98 TP_PROTO(struct softirq_action *h, struct softirq_action *vec), 87 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
99 88
@@ -107,11 +96,29 @@ TRACE_EVENT(softirq_entry,
107 __entry->vec = (int)(h - vec); 96 __entry->vec = (int)(h - vec);
108 ), 97 ),
109 98
110 TP_printk("softirq=%d action=%s", __entry->vec, 99 TP_printk("vec=%d [action=%s]", __entry->vec,
111 show_softirq_name(__entry->vec)) 100 show_softirq_name(__entry->vec))
112); 101);
113 102
114/** 103/**
104 * softirq_entry - called immediately before the softirq handler
105 * @h: pointer to struct softirq_action
106 * @vec: pointer to first struct softirq_action in softirq_vec array
107 *
108 * The @h parameter, contains a pointer to the struct softirq_action
109 * which has a pointer to the action handler that is called. By subtracting
110 * the @vec pointer from the @h pointer, we can determine the softirq
111 * number. Also, when used in combination with the softirq_exit tracepoint
112 * we can determine the softirq latency.
113 */
114DEFINE_EVENT(softirq, softirq_entry,
115
116 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
117
118 TP_ARGS(h, vec)
119);
120
121/**
115 * softirq_exit - called immediately after the softirq handler returns 122 * softirq_exit - called immediately after the softirq handler returns
116 * @h: pointer to struct softirq_action 123 * @h: pointer to struct softirq_action
117 * @vec: pointer to first struct softirq_action in softirq_vec array 124 * @vec: pointer to first struct softirq_action in softirq_vec array
@@ -122,22 +129,11 @@ TRACE_EVENT(softirq_entry,
122 * combination with the softirq_entry tracepoint we can determine the softirq 129 * combination with the softirq_entry tracepoint we can determine the softirq
123 * latency. 130 * latency.
124 */ 131 */
125TRACE_EVENT(softirq_exit, 132DEFINE_EVENT(softirq, softirq_exit,
126 133
127 TP_PROTO(struct softirq_action *h, struct softirq_action *vec), 134 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
128 135
129 TP_ARGS(h, vec), 136 TP_ARGS(h, vec)
130
131 TP_STRUCT__entry(
132 __field( int, vec )
133 ),
134
135 TP_fast_assign(
136 __entry->vec = (int)(h - vec);
137 ),
138
139 TP_printk("softirq=%d action=%s", __entry->vec,
140 show_softirq_name(__entry->vec))
141); 137);
142 138
143#endif /* _TRACE_IRQ_H */ 139#endif /* _TRACE_IRQ_H */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 3c60b75adb9e..bf16545cc977 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -30,7 +30,7 @@ TRACE_EVENT(jbd2_checkpoint,
30 jbd2_dev_to_name(__entry->dev), __entry->result) 30 jbd2_dev_to_name(__entry->dev), __entry->result)
31); 31);
32 32
33TRACE_EVENT(jbd2_start_commit, 33DECLARE_EVENT_CLASS(jbd2_commit,
34 34
35 TP_PROTO(journal_t *journal, transaction_t *commit_transaction), 35 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
36 36
@@ -53,73 +53,32 @@ TRACE_EVENT(jbd2_start_commit,
53 __entry->sync_commit) 53 __entry->sync_commit)
54); 54);
55 55
56TRACE_EVENT(jbd2_commit_locking, 56DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
57 57
58 TP_PROTO(journal_t *journal, transaction_t *commit_transaction), 58 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
59 59
60 TP_ARGS(journal, commit_transaction), 60 TP_ARGS(journal, commit_transaction)
61
62 TP_STRUCT__entry(
63 __field( dev_t, dev )
64 __field( char, sync_commit )
65 __field( int, transaction )
66 ),
67
68 TP_fast_assign(
69 __entry->dev = journal->j_fs_dev->bd_dev;
70 __entry->sync_commit = commit_transaction->t_synchronous_commit;
71 __entry->transaction = commit_transaction->t_tid;
72 ),
73
74 TP_printk("dev %s transaction %d sync %d",
75 jbd2_dev_to_name(__entry->dev), __entry->transaction,
76 __entry->sync_commit)
77); 61);
78 62
79TRACE_EVENT(jbd2_commit_flushing, 63DEFINE_EVENT(jbd2_commit, jbd2_commit_locking,
80 64
81 TP_PROTO(journal_t *journal, transaction_t *commit_transaction), 65 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
82 66
83 TP_ARGS(journal, commit_transaction), 67 TP_ARGS(journal, commit_transaction)
84
85 TP_STRUCT__entry(
86 __field( dev_t, dev )
87 __field( char, sync_commit )
88 __field( int, transaction )
89 ),
90
91 TP_fast_assign(
92 __entry->dev = journal->j_fs_dev->bd_dev;
93 __entry->sync_commit = commit_transaction->t_synchronous_commit;
94 __entry->transaction = commit_transaction->t_tid;
95 ),
96
97 TP_printk("dev %s transaction %d sync %d",
98 jbd2_dev_to_name(__entry->dev), __entry->transaction,
99 __entry->sync_commit)
100); 68);
101 69
102TRACE_EVENT(jbd2_commit_logging, 70DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing,
103 71
104 TP_PROTO(journal_t *journal, transaction_t *commit_transaction), 72 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
105 73
106 TP_ARGS(journal, commit_transaction), 74 TP_ARGS(journal, commit_transaction)
75);
107 76
108 TP_STRUCT__entry( 77DEFINE_EVENT(jbd2_commit, jbd2_commit_logging,
109 __field( dev_t, dev )
110 __field( char, sync_commit )
111 __field( int, transaction )
112 ),
113 78
114 TP_fast_assign( 79 TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
115 __entry->dev = journal->j_fs_dev->bd_dev;
116 __entry->sync_commit = commit_transaction->t_synchronous_commit;
117 __entry->transaction = commit_transaction->t_tid;
118 ),
119 80
120 TP_printk("dev %s transaction %d sync %d", 81 TP_ARGS(journal, commit_transaction)
121 jbd2_dev_to_name(__entry->dev), __entry->transaction,
122 __entry->sync_commit)
123); 82);
124 83
125TRACE_EVENT(jbd2_end_commit, 84TRACE_EVENT(jbd2_end_commit,
@@ -240,6 +199,34 @@ TRACE_EVENT(jbd2_checkpoint_stats,
240 __entry->forced_to_close, __entry->written, __entry->dropped) 199 __entry->forced_to_close, __entry->written, __entry->dropped)
241); 200);
242 201
202TRACE_EVENT(jbd2_cleanup_journal_tail,
203
204 TP_PROTO(journal_t *journal, tid_t first_tid,
205 unsigned long block_nr, unsigned long freed),
206
207 TP_ARGS(journal, first_tid, block_nr, freed),
208
209 TP_STRUCT__entry(
210 __field( dev_t, dev )
211 __field( tid_t, tail_sequence )
212 __field( tid_t, first_tid )
213 __field(unsigned long, block_nr )
214 __field(unsigned long, freed )
215 ),
216
217 TP_fast_assign(
218 __entry->dev = journal->j_fs_dev->bd_dev;
219 __entry->tail_sequence = journal->j_tail_sequence;
220 __entry->first_tid = first_tid;
221 __entry->block_nr = block_nr;
222 __entry->freed = freed;
223 ),
224
225 TP_printk("dev %s from %u to %u offset %lu freed %lu",
226 jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
227 __entry->first_tid, __entry->block_nr, __entry->freed)
228);
229
243#endif /* _TRACE_JBD2_H */ 230#endif /* _TRACE_JBD2_H */
244 231
245/* This part must be outside protection */ 232/* This part must be outside protection */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index eaf46bdd18a5..3adca0ca9dbe 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -44,7 +44,7 @@
44 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ 44 {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
45 ) : "GFP_NOWAIT" 45 ) : "GFP_NOWAIT"
46 46
47TRACE_EVENT(kmalloc, 47DECLARE_EVENT_CLASS(kmem_alloc,
48 48
49 TP_PROTO(unsigned long call_site, 49 TP_PROTO(unsigned long call_site,
50 const void *ptr, 50 const void *ptr,
@@ -78,41 +78,23 @@ TRACE_EVENT(kmalloc,
78 show_gfp_flags(__entry->gfp_flags)) 78 show_gfp_flags(__entry->gfp_flags))
79); 79);
80 80
81TRACE_EVENT(kmem_cache_alloc, 81DEFINE_EVENT(kmem_alloc, kmalloc,
82 82
83 TP_PROTO(unsigned long call_site, 83 TP_PROTO(unsigned long call_site, const void *ptr,
84 const void *ptr, 84 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
85 size_t bytes_req,
86 size_t bytes_alloc,
87 gfp_t gfp_flags),
88 85
89 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), 86 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
87);
90 88
91 TP_STRUCT__entry( 89DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
92 __field( unsigned long, call_site )
93 __field( const void *, ptr )
94 __field( size_t, bytes_req )
95 __field( size_t, bytes_alloc )
96 __field( gfp_t, gfp_flags )
97 ),
98 90
99 TP_fast_assign( 91 TP_PROTO(unsigned long call_site, const void *ptr,
100 __entry->call_site = call_site; 92 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
101 __entry->ptr = ptr;
102 __entry->bytes_req = bytes_req;
103 __entry->bytes_alloc = bytes_alloc;
104 __entry->gfp_flags = gfp_flags;
105 ),
106 93
107 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", 94 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
108 __entry->call_site,
109 __entry->ptr,
110 __entry->bytes_req,
111 __entry->bytes_alloc,
112 show_gfp_flags(__entry->gfp_flags))
113); 95);
114 96
115TRACE_EVENT(kmalloc_node, 97DECLARE_EVENT_CLASS(kmem_alloc_node,
116 98
117 TP_PROTO(unsigned long call_site, 99 TP_PROTO(unsigned long call_site,
118 const void *ptr, 100 const void *ptr,
@@ -150,45 +132,25 @@ TRACE_EVENT(kmalloc_node,
150 __entry->node) 132 __entry->node)
151); 133);
152 134
153TRACE_EVENT(kmem_cache_alloc_node, 135DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
154 136
155 TP_PROTO(unsigned long call_site, 137 TP_PROTO(unsigned long call_site, const void *ptr,
156 const void *ptr, 138 size_t bytes_req, size_t bytes_alloc,
157 size_t bytes_req, 139 gfp_t gfp_flags, int node),
158 size_t bytes_alloc,
159 gfp_t gfp_flags,
160 int node),
161 140
162 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), 141 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
142);
163 143
164 TP_STRUCT__entry( 144DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
165 __field( unsigned long, call_site )
166 __field( const void *, ptr )
167 __field( size_t, bytes_req )
168 __field( size_t, bytes_alloc )
169 __field( gfp_t, gfp_flags )
170 __field( int, node )
171 ),
172 145
173 TP_fast_assign( 146 TP_PROTO(unsigned long call_site, const void *ptr,
174 __entry->call_site = call_site; 147 size_t bytes_req, size_t bytes_alloc,
175 __entry->ptr = ptr; 148 gfp_t gfp_flags, int node),
176 __entry->bytes_req = bytes_req;
177 __entry->bytes_alloc = bytes_alloc;
178 __entry->gfp_flags = gfp_flags;
179 __entry->node = node;
180 ),
181 149
182 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", 150 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
183 __entry->call_site,
184 __entry->ptr,
185 __entry->bytes_req,
186 __entry->bytes_alloc,
187 show_gfp_flags(__entry->gfp_flags),
188 __entry->node)
189); 151);
190 152
191TRACE_EVENT(kfree, 153DECLARE_EVENT_CLASS(kmem_free,
192 154
193 TP_PROTO(unsigned long call_site, const void *ptr), 155 TP_PROTO(unsigned long call_site, const void *ptr),
194 156
@@ -207,23 +169,18 @@ TRACE_EVENT(kfree,
207 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) 169 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
208); 170);
209 171
210TRACE_EVENT(kmem_cache_free, 172DEFINE_EVENT(kmem_free, kfree,
211 173
212 TP_PROTO(unsigned long call_site, const void *ptr), 174 TP_PROTO(unsigned long call_site, const void *ptr),
213 175
214 TP_ARGS(call_site, ptr), 176 TP_ARGS(call_site, ptr)
177);
215 178
216 TP_STRUCT__entry( 179DEFINE_EVENT(kmem_free, kmem_cache_free,
217 __field( unsigned long, call_site )
218 __field( const void *, ptr )
219 ),
220 180
221 TP_fast_assign( 181 TP_PROTO(unsigned long call_site, const void *ptr),
222 __entry->call_site = call_site;
223 __entry->ptr = ptr;
224 ),
225 182
226 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) 183 TP_ARGS(call_site, ptr)
227); 184);
228 185
229TRACE_EVENT(mm_page_free_direct, 186TRACE_EVENT(mm_page_free_direct,
@@ -299,7 +256,7 @@ TRACE_EVENT(mm_page_alloc,
299 show_gfp_flags(__entry->gfp_flags)) 256 show_gfp_flags(__entry->gfp_flags))
300); 257);
301 258
302TRACE_EVENT(mm_page_alloc_zone_locked, 259DECLARE_EVENT_CLASS(mm_page,
303 260
304 TP_PROTO(struct page *page, unsigned int order, int migratetype), 261 TP_PROTO(struct page *page, unsigned int order, int migratetype),
305 262
@@ -325,29 +282,22 @@ TRACE_EVENT(mm_page_alloc_zone_locked,
325 __entry->order == 0) 282 __entry->order == 0)
326); 283);
327 284
328TRACE_EVENT(mm_page_pcpu_drain, 285DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
329 286
330 TP_PROTO(struct page *page, int order, int migratetype), 287 TP_PROTO(struct page *page, unsigned int order, int migratetype),
331 288
332 TP_ARGS(page, order, migratetype), 289 TP_ARGS(page, order, migratetype)
290);
333 291
334 TP_STRUCT__entry( 292DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
335 __field( struct page *, page )
336 __field( int, order )
337 __field( int, migratetype )
338 ),
339 293
340 TP_fast_assign( 294 TP_PROTO(struct page *page, unsigned int order, int migratetype),
341 __entry->page = page; 295
342 __entry->order = order; 296 TP_ARGS(page, order, migratetype),
343 __entry->migratetype = migratetype;
344 ),
345 297
346 TP_printk("page=%p pfn=%lu order=%d migratetype=%d", 298 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
347 __entry->page, 299 __entry->page, page_to_pfn(__entry->page),
348 page_to_pfn(__entry->page), 300 __entry->order, __entry->migratetype)
349 __entry->order,
350 __entry->migratetype)
351); 301);
352 302
353TRACE_EVENT(mm_page_alloc_extfrag, 303TRACE_EVENT(mm_page_alloc_extfrag,
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index dbe108455275..b17d49dfc3ef 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -145,6 +145,47 @@ TRACE_EVENT(kvm_mmio,
145 __entry->len, __entry->gpa, __entry->val) 145 __entry->len, __entry->gpa, __entry->val)
146); 146);
147 147
148#define kvm_fpu_load_symbol \
149 {0, "unload"}, \
150 {1, "load"}
151
152TRACE_EVENT(kvm_fpu,
153 TP_PROTO(int load),
154 TP_ARGS(load),
155
156 TP_STRUCT__entry(
157 __field( u32, load )
158 ),
159
160 TP_fast_assign(
161 __entry->load = load;
162 ),
163
164 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
165);
166
167TRACE_EVENT(kvm_age_page,
168 TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
169 TP_ARGS(hva, slot, ref),
170
171 TP_STRUCT__entry(
172 __field( u64, hva )
173 __field( u64, gfn )
174 __field( u8, referenced )
175 ),
176
177 TP_fast_assign(
178 __entry->hva = hva;
179 __entry->gfn =
180 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
181 __entry->referenced = ref;
182 ),
183
184 TP_printk("hva %llx gfn %llx %s",
185 __entry->hva, __entry->gfn,
186 __entry->referenced ? "YOUNG" : "OLD")
187);
188
148#endif /* _TRACE_KVM_MAIN_H */ 189#endif /* _TRACE_KVM_MAIN_H */
149 190
150/* This part must be outside protection */ 191/* This part must be outside protection */
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lock.h
index bcf1d209a00d..5c1dcfc16c60 100644
--- a/include/trace/events/lockdep.h
+++ b/include/trace/events/lock.h
@@ -1,8 +1,8 @@
1#undef TRACE_SYSTEM 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM lockdep 2#define TRACE_SYSTEM lock
3 3
4#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ) 4#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_LOCKDEP_H 5#define _TRACE_LOCK_H
6 6
7#include <linux/lockdep.h> 7#include <linux/lockdep.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
@@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire,
20 TP_STRUCT__entry( 20 TP_STRUCT__entry(
21 __field(unsigned int, flags) 21 __field(unsigned int, flags)
22 __string(name, lock->name) 22 __string(name, lock->name)
23 __field(void *, lockdep_addr)
23 ), 24 ),
24 25
25 TP_fast_assign( 26 TP_fast_assign(
26 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); 27 __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
27 __assign_str(name, lock->name); 28 __assign_str(name, lock->name);
29 __entry->lockdep_addr = lock;
28 ), 30 ),
29 31
30 TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "", 32 TP_printk("%p %s%s%s", __entry->lockdep_addr,
33 (__entry->flags & 1) ? "try " : "",
31 (__entry->flags & 2) ? "read " : "", 34 (__entry->flags & 2) ? "read " : "",
32 __get_str(name)) 35 __get_str(name))
33); 36);
@@ -40,13 +43,16 @@ TRACE_EVENT(lock_release,
40 43
41 TP_STRUCT__entry( 44 TP_STRUCT__entry(
42 __string(name, lock->name) 45 __string(name, lock->name)
46 __field(void *, lockdep_addr)
43 ), 47 ),
44 48
45 TP_fast_assign( 49 TP_fast_assign(
46 __assign_str(name, lock->name); 50 __assign_str(name, lock->name);
51 __entry->lockdep_addr = lock;
47 ), 52 ),
48 53
49 TP_printk("%s", __get_str(name)) 54 TP_printk("%p %s",
55 __entry->lockdep_addr, __get_str(name))
50); 56);
51 57
52#ifdef CONFIG_LOCK_STAT 58#ifdef CONFIG_LOCK_STAT
@@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended,
59 65
60 TP_STRUCT__entry( 66 TP_STRUCT__entry(
61 __string(name, lock->name) 67 __string(name, lock->name)
68 __field(void *, lockdep_addr)
62 ), 69 ),
63 70
64 TP_fast_assign( 71 TP_fast_assign(
65 __assign_str(name, lock->name); 72 __assign_str(name, lock->name);
73 __entry->lockdep_addr = lock;
66 ), 74 ),
67 75
68 TP_printk("%s", __get_str(name)) 76 TP_printk("%p %s",
77 __entry->lockdep_addr, __get_str(name))
69); 78);
70 79
71TRACE_EVENT(lock_acquired, 80TRACE_EVENT(lock_acquired,
@@ -75,22 +84,24 @@ TRACE_EVENT(lock_acquired,
75 84
76 TP_STRUCT__entry( 85 TP_STRUCT__entry(
77 __string(name, lock->name) 86 __string(name, lock->name)
78 __field(unsigned long, wait_usec) 87 __field(s64, wait_nsec)
79 __field(unsigned long, wait_nsec_rem) 88 __field(void *, lockdep_addr)
80 ), 89 ),
90
81 TP_fast_assign( 91 TP_fast_assign(
82 __assign_str(name, lock->name); 92 __assign_str(name, lock->name);
83 __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); 93 __entry->wait_nsec = waittime;
84 __entry->wait_usec = (unsigned long) waittime; 94 __entry->lockdep_addr = lock;
85 ), 95 ),
86 TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec, 96 TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
87 __entry->wait_nsec_rem) 97 __get_str(name),
98 __entry->wait_nsec)
88); 99);
89 100
90#endif 101#endif
91#endif 102#endif
92 103
93#endif /* _TRACE_LOCKDEP_H */ 104#endif /* _TRACE_LOCK_H */
94 105
95/* This part must be outside protection */ 106/* This part must be outside protection */
96#include <trace/define_trace.h> 107#include <trace/define_trace.h>
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h
new file mode 100644
index 000000000000..7eee77895cb3
--- /dev/null
+++ b/include/trace/events/mce.h
@@ -0,0 +1,69 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM mce
3
4#if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_MCE_H
6
7#include <linux/ktime.h>
8#include <linux/tracepoint.h>
9#include <asm/mce.h>
10
11TRACE_EVENT(mce_record,
12
13 TP_PROTO(struct mce *m),
14
15 TP_ARGS(m),
16
17 TP_STRUCT__entry(
18 __field( u64, mcgcap )
19 __field( u64, mcgstatus )
20 __field( u8, bank )
21 __field( u64, status )
22 __field( u64, addr )
23 __field( u64, misc )
24 __field( u64, ip )
25 __field( u8, cs )
26 __field( u64, tsc )
27 __field( u64, walltime )
28 __field( u32, cpu )
29 __field( u32, cpuid )
30 __field( u32, apicid )
31 __field( u32, socketid )
32 __field( u8, cpuvendor )
33 ),
34
35 TP_fast_assign(
36 __entry->mcgcap = m->mcgcap;
37 __entry->mcgstatus = m->mcgstatus;
38 __entry->bank = m->bank;
39 __entry->status = m->status;
40 __entry->addr = m->addr;
41 __entry->misc = m->misc;
42 __entry->ip = m->ip;
43 __entry->cs = m->cs;
44 __entry->tsc = m->tsc;
45 __entry->walltime = m->time;
46 __entry->cpu = m->extcpu;
47 __entry->cpuid = m->cpuid;
48 __entry->apicid = m->apicid;
49 __entry->socketid = m->socketid;
50 __entry->cpuvendor = m->cpuvendor;
51 ),
52
53 TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x",
54 __entry->cpu,
55 __entry->mcgcap, __entry->mcgstatus,
56 __entry->bank, __entry->status,
57 __entry->addr, __entry->misc,
58 __entry->cs, __entry->ip,
59 __entry->tsc,
60 __entry->cpuvendor, __entry->cpuid,
61 __entry->walltime,
62 __entry->socketid,
63 __entry->apicid)
64);
65
66#endif /* _TRACE_MCE_H */
67
68/* This part must be outside protection */
69#include <trace/define_trace.h>
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 84160fb18478..4b0f48ba16a6 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -51,7 +51,7 @@ TRACE_EVENT(module_free,
51 TP_printk("%s", __get_str(name)) 51 TP_printk("%s", __get_str(name))
52); 52);
53 53
54TRACE_EVENT(module_get, 54DECLARE_EVENT_CLASS(module_refcnt,
55 55
56 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 56 TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
57 57
@@ -73,26 +73,18 @@ TRACE_EVENT(module_get,
73 __get_str(name), (void *)__entry->ip, __entry->refcnt) 73 __get_str(name), (void *)__entry->ip, __entry->refcnt)
74); 74);
75 75
76TRACE_EVENT(module_put, 76DEFINE_EVENT(module_refcnt, module_get,
77 77
78 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 78 TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
79 79
80 TP_ARGS(mod, ip, refcnt), 80 TP_ARGS(mod, ip, refcnt)
81);
81 82
82 TP_STRUCT__entry( 83DEFINE_EVENT(module_refcnt, module_put,
83 __field( unsigned long, ip )
84 __field( int, refcnt )
85 __string( name, mod->name )
86 ),
87 84
88 TP_fast_assign( 85 TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
89 __entry->ip = ip;
90 __entry->refcnt = refcnt;
91 __assign_str(name, mod->name);
92 ),
93 86
94 TP_printk("%s call_site=%pf refcnt=%d", 87 TP_ARGS(mod, ip, refcnt)
95 __get_str(name), (void *)__entry->ip, __entry->refcnt)
96); 88);
97 89
98TRACE_EVENT(module_request, 90TRACE_EVENT(module_request,
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index ea6d579261ad..c4efe9b8280d 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -16,9 +16,7 @@ enum {
16}; 16};
17#endif 17#endif
18 18
19 19DECLARE_EVENT_CLASS(power,
20
21TRACE_EVENT(power_start,
22 20
23 TP_PROTO(unsigned int type, unsigned int state), 21 TP_PROTO(unsigned int type, unsigned int state),
24 22
@@ -37,42 +35,36 @@ TRACE_EVENT(power_start,
37 TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state) 35 TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state)
38); 36);
39 37
40TRACE_EVENT(power_end, 38DEFINE_EVENT(power, power_start,
41
42 TP_PROTO(int dummy),
43 39
44 TP_ARGS(dummy), 40 TP_PROTO(unsigned int type, unsigned int state),
45 41
46 TP_STRUCT__entry( 42 TP_ARGS(type, state)
47 __field( u64, dummy ) 43);
48 ),
49 44
50 TP_fast_assign( 45DEFINE_EVENT(power, power_frequency,
51 __entry->dummy = 0xffff;
52 ),
53 46
54 TP_printk("dummy=%lu", (unsigned long)__entry->dummy) 47 TP_PROTO(unsigned int type, unsigned int state),
55 48
49 TP_ARGS(type, state)
56); 50);
57 51
52TRACE_EVENT(power_end,
58 53
59TRACE_EVENT(power_frequency, 54 TP_PROTO(int dummy),
60
61 TP_PROTO(unsigned int type, unsigned int state),
62 55
63 TP_ARGS(type, state), 56 TP_ARGS(dummy),
64 57
65 TP_STRUCT__entry( 58 TP_STRUCT__entry(
66 __field( u64, type ) 59 __field( u64, dummy )
67 __field( u64, state )
68 ), 60 ),
69 61
70 TP_fast_assign( 62 TP_fast_assign(
71 __entry->type = type; 63 __entry->dummy = 0xffff;
72 __entry->state = state;
73 ), 64 ),
74 65
75 TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long) __entry->state) 66 TP_printk("dummy=%lu", (unsigned long)__entry->dummy)
67
76); 68);
77 69
78#endif /* _TRACE_POWER_H */ 70#endif /* _TRACE_POWER_H */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4069c43f4187..cfceb0b73e20 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop,
26 __entry->pid = t->pid; 26 __entry->pid = t->pid;
27 ), 27 ),
28 28
29 TP_printk("task %s:%d", __entry->comm, __entry->pid) 29 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
30); 30);
31 31
32/* 32/*
@@ -46,7 +46,7 @@ TRACE_EVENT(sched_kthread_stop_ret,
46 __entry->ret = ret; 46 __entry->ret = ret;
47 ), 47 ),
48 48
49 TP_printk("ret %d", __entry->ret) 49 TP_printk("ret=%d", __entry->ret)
50); 50);
51 51
52/* 52/*
@@ -73,7 +73,7 @@ TRACE_EVENT(sched_wait_task,
73 __entry->prio = p->prio; 73 __entry->prio = p->prio;
74 ), 74 ),
75 75
76 TP_printk("task %s:%d [%d]", 76 TP_printk("comm=%s pid=%d prio=%d",
77 __entry->comm, __entry->pid, __entry->prio) 77 __entry->comm, __entry->pid, __entry->prio)
78); 78);
79 79
@@ -83,7 +83,7 @@ TRACE_EVENT(sched_wait_task,
83 * (NOTE: the 'rq' argument is not used by generic trace events, 83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. ) 84 * but used by the latency tracer plugin. )
85 */ 85 */
86TRACE_EVENT(sched_wakeup, 86DECLARE_EVENT_CLASS(sched_wakeup_template,
87 87
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 88 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
89 89
@@ -94,7 +94,7 @@ TRACE_EVENT(sched_wakeup,
94 __field( pid_t, pid ) 94 __field( pid_t, pid )
95 __field( int, prio ) 95 __field( int, prio )
96 __field( int, success ) 96 __field( int, success )
97 __field( int, cpu ) 97 __field( int, target_cpu )
98 ), 98 ),
99 99
100 TP_fast_assign( 100 TP_fast_assign(
@@ -102,46 +102,27 @@ TRACE_EVENT(sched_wakeup,
102 __entry->pid = p->pid; 102 __entry->pid = p->pid;
103 __entry->prio = p->prio; 103 __entry->prio = p->prio;
104 __entry->success = success; 104 __entry->success = success;
105 __entry->cpu = task_cpu(p); 105 __entry->target_cpu = task_cpu(p);
106 ), 106 ),
107 107
108 TP_printk("task %s:%d [%d] success=%d [%03d]", 108 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
109 __entry->comm, __entry->pid, __entry->prio, 109 __entry->comm, __entry->pid, __entry->prio,
110 __entry->success, __entry->cpu) 110 __entry->success, __entry->target_cpu)
111); 111);
112 112
113DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
114 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
115 TP_ARGS(rq, p, success));
116
113/* 117/*
114 * Tracepoint for waking up a new task: 118 * Tracepoint for waking up a new task:
115 * 119 *
116 * (NOTE: the 'rq' argument is not used by generic trace events, 120 * (NOTE: the 'rq' argument is not used by generic trace events,
117 * but used by the latency tracer plugin. ) 121 * but used by the latency tracer plugin. )
118 */ 122 */
119TRACE_EVENT(sched_wakeup_new, 123DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
120 124 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
121 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 125 TP_ARGS(rq, p, success));
122
123 TP_ARGS(rq, p, success),
124
125 TP_STRUCT__entry(
126 __array( char, comm, TASK_COMM_LEN )
127 __field( pid_t, pid )
128 __field( int, prio )
129 __field( int, success )
130 __field( int, cpu )
131 ),
132
133 TP_fast_assign(
134 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
135 __entry->pid = p->pid;
136 __entry->prio = p->prio;
137 __entry->success = success;
138 __entry->cpu = task_cpu(p);
139 ),
140
141 TP_printk("task %s:%d [%d] success=%d [%03d]",
142 __entry->comm, __entry->pid, __entry->prio,
143 __entry->success, __entry->cpu)
144);
145 126
146/* 127/*
147 * Tracepoint for task switches, performed by the scheduler: 128 * Tracepoint for task switches, performed by the scheduler:
@@ -176,7 +157,7 @@ TRACE_EVENT(sched_switch,
176 __entry->next_prio = next->prio; 157 __entry->next_prio = next->prio;
177 ), 158 ),
178 159
179 TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]", 160 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
180 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, 161 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
181 __entry->prev_state ? 162 __entry->prev_state ?
182 __print_flags(__entry->prev_state, "|", 163 __print_flags(__entry->prev_state, "|",
@@ -211,15 +192,12 @@ TRACE_EVENT(sched_migrate_task,
211 __entry->dest_cpu = dest_cpu; 192 __entry->dest_cpu = dest_cpu;
212 ), 193 ),
213 194
214 TP_printk("task %s:%d [%d] from: %d to: %d", 195 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
215 __entry->comm, __entry->pid, __entry->prio, 196 __entry->comm, __entry->pid, __entry->prio,
216 __entry->orig_cpu, __entry->dest_cpu) 197 __entry->orig_cpu, __entry->dest_cpu)
217); 198);
218 199
219/* 200DECLARE_EVENT_CLASS(sched_process_template,
220 * Tracepoint for freeing a task:
221 */
222TRACE_EVENT(sched_process_free,
223 201
224 TP_PROTO(struct task_struct *p), 202 TP_PROTO(struct task_struct *p),
225 203
@@ -237,34 +215,24 @@ TRACE_EVENT(sched_process_free,
237 __entry->prio = p->prio; 215 __entry->prio = p->prio;
238 ), 216 ),
239 217
240 TP_printk("task %s:%d [%d]", 218 TP_printk("comm=%s pid=%d prio=%d",
241 __entry->comm, __entry->pid, __entry->prio) 219 __entry->comm, __entry->pid, __entry->prio)
242); 220);
243 221
244/* 222/*
245 * Tracepoint for a task exiting: 223 * Tracepoint for freeing a task:
246 */ 224 */
247TRACE_EVENT(sched_process_exit, 225DEFINE_EVENT(sched_process_template, sched_process_free,
226 TP_PROTO(struct task_struct *p),
227 TP_ARGS(p));
228
248 229
249 TP_PROTO(struct task_struct *p), 230/*
250 231 * Tracepoint for a task exiting:
251 TP_ARGS(p), 232 */
252 233DEFINE_EVENT(sched_process_template, sched_process_exit,
253 TP_STRUCT__entry( 234 TP_PROTO(struct task_struct *p),
254 __array( char, comm, TASK_COMM_LEN ) 235 TP_ARGS(p));
255 __field( pid_t, pid )
256 __field( int, prio )
257 ),
258
259 TP_fast_assign(
260 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
261 __entry->pid = p->pid;
262 __entry->prio = p->prio;
263 ),
264
265 TP_printk("task %s:%d [%d]",
266 __entry->comm, __entry->pid, __entry->prio)
267);
268 236
269/* 237/*
270 * Tracepoint for a waiting task: 238 * Tracepoint for a waiting task:
@@ -287,7 +255,7 @@ TRACE_EVENT(sched_process_wait,
287 __entry->prio = current->prio; 255 __entry->prio = current->prio;
288 ), 256 ),
289 257
290 TP_printk("task %s:%d [%d]", 258 TP_printk("comm=%s pid=%d prio=%d",
291 __entry->comm, __entry->pid, __entry->prio) 259 __entry->comm, __entry->pid, __entry->prio)
292); 260);
293 261
@@ -314,46 +282,16 @@ TRACE_EVENT(sched_process_fork,
314 __entry->child_pid = child->pid; 282 __entry->child_pid = child->pid;
315 ), 283 ),
316 284
317 TP_printk("parent %s:%d child %s:%d", 285 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
318 __entry->parent_comm, __entry->parent_pid, 286 __entry->parent_comm, __entry->parent_pid,
319 __entry->child_comm, __entry->child_pid) 287 __entry->child_comm, __entry->child_pid)
320); 288);
321 289
322/* 290/*
323 * Tracepoint for sending a signal:
324 */
325TRACE_EVENT(sched_signal_send,
326
327 TP_PROTO(int sig, struct task_struct *p),
328
329 TP_ARGS(sig, p),
330
331 TP_STRUCT__entry(
332 __field( int, sig )
333 __array( char, comm, TASK_COMM_LEN )
334 __field( pid_t, pid )
335 ),
336
337 TP_fast_assign(
338 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
339 __entry->pid = p->pid;
340 __entry->sig = sig;
341 ),
342
343 TP_printk("sig: %d task %s:%d",
344 __entry->sig, __entry->comm, __entry->pid)
345);
346
347/*
348 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE 291 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
349 * adding sched_stat support to SCHED_FIFO/RR would be welcome. 292 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
350 */ 293 */
351 294DECLARE_EVENT_CLASS(sched_stat_template,
352/*
353 * Tracepoint for accounting wait time (time the task is runnable
354 * but not actually running due to scheduler contention).
355 */
356TRACE_EVENT(sched_stat_wait,
357 295
358 TP_PROTO(struct task_struct *tsk, u64 delay), 296 TP_PROTO(struct task_struct *tsk, u64 delay),
359 297
@@ -374,11 +312,36 @@ TRACE_EVENT(sched_stat_wait,
374 __perf_count(delay); 312 __perf_count(delay);
375 ), 313 ),
376 314
377 TP_printk("task: %s:%d wait: %Lu [ns]", 315 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
378 __entry->comm, __entry->pid, 316 __entry->comm, __entry->pid,
379 (unsigned long long)__entry->delay) 317 (unsigned long long)__entry->delay)
380); 318);
381 319
320
321/*
322 * Tracepoint for accounting wait time (time the task is runnable
323 * but not actually running due to scheduler contention).
324 */
325DEFINE_EVENT(sched_stat_template, sched_stat_wait,
326 TP_PROTO(struct task_struct *tsk, u64 delay),
327 TP_ARGS(tsk, delay));
328
329/*
330 * Tracepoint for accounting sleep time (time the task is not runnable,
331 * including iowait, see below).
332 */
333DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
334 TP_PROTO(struct task_struct *tsk, u64 delay),
335 TP_ARGS(tsk, delay));
336
337/*
338 * Tracepoint for accounting iowait time (time the task is not runnable
339 * due to waiting on IO to complete).
340 */
341DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
342 TP_PROTO(struct task_struct *tsk, u64 delay),
343 TP_ARGS(tsk, delay));
344
382/* 345/*
383 * Tracepoint for accounting runtime (time the task is executing 346 * Tracepoint for accounting runtime (time the task is executing
384 * on a CPU). 347 * on a CPU).
@@ -406,72 +369,12 @@ TRACE_EVENT(sched_stat_runtime,
406 __perf_count(runtime); 369 __perf_count(runtime);
407 ), 370 ),
408 371
409 TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]", 372 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
410 __entry->comm, __entry->pid, 373 __entry->comm, __entry->pid,
411 (unsigned long long)__entry->runtime, 374 (unsigned long long)__entry->runtime,
412 (unsigned long long)__entry->vruntime) 375 (unsigned long long)__entry->vruntime)
413); 376);
414 377
415/*
416 * Tracepoint for accounting sleep time (time the task is not runnable,
417 * including iowait, see below).
418 */
419TRACE_EVENT(sched_stat_sleep,
420
421 TP_PROTO(struct task_struct *tsk, u64 delay),
422
423 TP_ARGS(tsk, delay),
424
425 TP_STRUCT__entry(
426 __array( char, comm, TASK_COMM_LEN )
427 __field( pid_t, pid )
428 __field( u64, delay )
429 ),
430
431 TP_fast_assign(
432 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
433 __entry->pid = tsk->pid;
434 __entry->delay = delay;
435 )
436 TP_perf_assign(
437 __perf_count(delay);
438 ),
439
440 TP_printk("task: %s:%d sleep: %Lu [ns]",
441 __entry->comm, __entry->pid,
442 (unsigned long long)__entry->delay)
443);
444
445/*
446 * Tracepoint for accounting iowait time (time the task is not runnable
447 * due to waiting on IO to complete).
448 */
449TRACE_EVENT(sched_stat_iowait,
450
451 TP_PROTO(struct task_struct *tsk, u64 delay),
452
453 TP_ARGS(tsk, delay),
454
455 TP_STRUCT__entry(
456 __array( char, comm, TASK_COMM_LEN )
457 __field( pid_t, pid )
458 __field( u64, delay )
459 ),
460
461 TP_fast_assign(
462 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
463 __entry->pid = tsk->pid;
464 __entry->delay = delay;
465 )
466 TP_perf_assign(
467 __perf_count(delay);
468 ),
469
470 TP_printk("task: %s:%d iowait: %Lu [ns]",
471 __entry->comm, __entry->pid,
472 (unsigned long long)__entry->delay)
473);
474
475#endif /* _TRACE_SCHED_H */ 378#endif /* _TRACE_SCHED_H */
476 379
477/* This part must be outside protection */ 380/* This part must be outside protection */
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
new file mode 100644
index 000000000000..a510b75ac304
--- /dev/null
+++ b/include/trace/events/signal.h
@@ -0,0 +1,173 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM signal
3
4#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SIGNAL_H
6
7#include <linux/signal.h>
8#include <linux/sched.h>
9#include <linux/tracepoint.h>
10
11#define TP_STORE_SIGINFO(__entry, info) \
12 do { \
13 if (info == SEND_SIG_NOINFO) { \
14 __entry->errno = 0; \
15 __entry->code = SI_USER; \
16 } else if (info == SEND_SIG_PRIV) { \
17 __entry->errno = 0; \
18 __entry->code = SI_KERNEL; \
19 } else { \
20 __entry->errno = info->si_errno; \
21 __entry->code = info->si_code; \
22 } \
23 } while (0)
24
25/**
26 * signal_generate - called when a signal is generated
27 * @sig: signal number
28 * @info: pointer to struct siginfo
29 * @task: pointer to struct task_struct
30 *
31 * Current process sends a 'sig' signal to 'task' process with
32 * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
33 * 'info' is not a pointer and you can't access its field. Instead,
34 * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
35 * means that si_code is SI_KERNEL.
36 */
37TRACE_EVENT(signal_generate,
38
39 TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
40
41 TP_ARGS(sig, info, task),
42
43 TP_STRUCT__entry(
44 __field( int, sig )
45 __field( int, errno )
46 __field( int, code )
47 __array( char, comm, TASK_COMM_LEN )
48 __field( pid_t, pid )
49 ),
50
51 TP_fast_assign(
52 __entry->sig = sig;
53 TP_STORE_SIGINFO(__entry, info);
54 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
55 __entry->pid = task->pid;
56 ),
57
58 TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
59 __entry->sig, __entry->errno, __entry->code,
60 __entry->comm, __entry->pid)
61);
62
63/**
64 * signal_deliver - called when a signal is delivered
65 * @sig: signal number
66 * @info: pointer to struct siginfo
67 * @ka: pointer to struct k_sigaction
68 *
69 * A 'sig' signal is delivered to current process with 'info' siginfo,
70 * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or
71 * SIG_DFL.
72 * Note that some signals reported by signal_generate tracepoint can be
73 * lost, ignored or modified (by debugger) before hitting this tracepoint.
74 * This means, this can show which signals are actually delivered, but
75 * matching generated signals and delivered signals may not be correct.
76 */
77TRACE_EVENT(signal_deliver,
78
79 TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
80
81 TP_ARGS(sig, info, ka),
82
83 TP_STRUCT__entry(
84 __field( int, sig )
85 __field( int, errno )
86 __field( int, code )
87 __field( unsigned long, sa_handler )
88 __field( unsigned long, sa_flags )
89 ),
90
91 TP_fast_assign(
92 __entry->sig = sig;
93 TP_STORE_SIGINFO(__entry, info);
94 __entry->sa_handler = (unsigned long)ka->sa.sa_handler;
95 __entry->sa_flags = ka->sa.sa_flags;
96 ),
97
98 TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx",
99 __entry->sig, __entry->errno, __entry->code,
100 __entry->sa_handler, __entry->sa_flags)
101);
102
103/**
104 * signal_overflow_fail - called when signal queue is overflow
105 * @sig: signal number
106 * @group: signal to process group or not (bool)
107 * @info: pointer to struct siginfo
108 *
109 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
110 * siginfo queue is overflow, and the signal is dropped.
111 * 'group' is not 0 if the signal will be sent to a process group.
112 * 'sig' is always one of RT signals.
113 */
114TRACE_EVENT(signal_overflow_fail,
115
116 TP_PROTO(int sig, int group, struct siginfo *info),
117
118 TP_ARGS(sig, group, info),
119
120 TP_STRUCT__entry(
121 __field( int, sig )
122 __field( int, group )
123 __field( int, errno )
124 __field( int, code )
125 ),
126
127 TP_fast_assign(
128 __entry->sig = sig;
129 __entry->group = group;
130 TP_STORE_SIGINFO(__entry, info);
131 ),
132
133 TP_printk("sig=%d group=%d errno=%d code=%d",
134 __entry->sig, __entry->group, __entry->errno, __entry->code)
135);
136
137/**
138 * signal_lose_info - called when siginfo is lost
139 * @sig: signal number
140 * @group: signal to process group or not (bool)
141 * @info: pointer to struct siginfo
142 *
143 * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
144 * queue is overflow.
145 * 'group' is not 0 if the signal will be sent to a process group.
146 * 'sig' is always one of non-RT signals.
147 */
148TRACE_EVENT(signal_lose_info,
149
150 TP_PROTO(int sig, int group, struct siginfo *info),
151
152 TP_ARGS(sig, group, info),
153
154 TP_STRUCT__entry(
155 __field( int, sig )
156 __field( int, group )
157 __field( int, errno )
158 __field( int, code )
159 ),
160
161 TP_fast_assign(
162 __entry->sig = sig;
163 __entry->group = group;
164 TP_STORE_SIGINFO(__entry, info);
165 ),
166
167 TP_printk("sig=%d group=%d errno=%d code=%d",
168 __entry->sig, __entry->group, __entry->errno, __entry->code)
169);
170#endif /* _TRACE_SIGNAL_H */
171
172/* This part must be outside protection */
173#include <trace/define_trace.h>
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 397dff2dbd5a..fb726ac7caee 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
1#undef TRACE_SYSTEM 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM syscalls 2#define TRACE_SYSTEM raw_syscalls
3#define TRACE_INCLUDE_FILE syscalls
3 4
4#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) 5#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_EVENTS_SYSCALLS_H 6#define _TRACE_EVENTS_SYSCALLS_H
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 1844c48d640e..9496b965d62a 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -26,7 +26,7 @@ TRACE_EVENT(timer_init,
26 __entry->timer = timer; 26 __entry->timer = timer;
27 ), 27 ),
28 28
29 TP_printk("timer %p", __entry->timer) 29 TP_printk("timer=%p", __entry->timer)
30); 30);
31 31
32/** 32/**
@@ -54,7 +54,7 @@ TRACE_EVENT(timer_start,
54 __entry->now = jiffies; 54 __entry->now = jiffies;
55 ), 55 ),
56 56
57 TP_printk("timer %p: func %pf, expires %lu, timeout %ld", 57 TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
58 __entry->timer, __entry->function, __entry->expires, 58 __entry->timer, __entry->function, __entry->expires,
59 (long)__entry->expires - __entry->now) 59 (long)__entry->expires - __entry->now)
60); 60);
@@ -81,7 +81,7 @@ TRACE_EVENT(timer_expire_entry,
81 __entry->now = jiffies; 81 __entry->now = jiffies;
82 ), 82 ),
83 83
84 TP_printk("timer %p: now %lu", __entry->timer, __entry->now) 84 TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
85); 85);
86 86
87/** 87/**
@@ -108,7 +108,7 @@ TRACE_EVENT(timer_expire_exit,
108 __entry->timer = timer; 108 __entry->timer = timer;
109 ), 109 ),
110 110
111 TP_printk("timer %p", __entry->timer) 111 TP_printk("timer=%p", __entry->timer)
112); 112);
113 113
114/** 114/**
@@ -129,7 +129,7 @@ TRACE_EVENT(timer_cancel,
129 __entry->timer = timer; 129 __entry->timer = timer;
130 ), 130 ),
131 131
132 TP_printk("timer %p", __entry->timer) 132 TP_printk("timer=%p", __entry->timer)
133); 133);
134 134
135/** 135/**
@@ -140,24 +140,24 @@ TRACE_EVENT(timer_cancel,
140 */ 140 */
141TRACE_EVENT(hrtimer_init, 141TRACE_EVENT(hrtimer_init,
142 142
143 TP_PROTO(struct hrtimer *timer, clockid_t clockid, 143 TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
144 enum hrtimer_mode mode), 144 enum hrtimer_mode mode),
145 145
146 TP_ARGS(timer, clockid, mode), 146 TP_ARGS(hrtimer, clockid, mode),
147 147
148 TP_STRUCT__entry( 148 TP_STRUCT__entry(
149 __field( void *, timer ) 149 __field( void *, hrtimer )
150 __field( clockid_t, clockid ) 150 __field( clockid_t, clockid )
151 __field( enum hrtimer_mode, mode ) 151 __field( enum hrtimer_mode, mode )
152 ), 152 ),
153 153
154 TP_fast_assign( 154 TP_fast_assign(
155 __entry->timer = timer; 155 __entry->hrtimer = hrtimer;
156 __entry->clockid = clockid; 156 __entry->clockid = clockid;
157 __entry->mode = mode; 157 __entry->mode = mode;
158 ), 158 ),
159 159
160 TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer, 160 TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
161 __entry->clockid == CLOCK_REALTIME ? 161 __entry->clockid == CLOCK_REALTIME ?
162 "CLOCK_REALTIME" : "CLOCK_MONOTONIC", 162 "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
163 __entry->mode == HRTIMER_MODE_ABS ? 163 __entry->mode == HRTIMER_MODE_ABS ?
@@ -170,26 +170,26 @@ TRACE_EVENT(hrtimer_init,
170 */ 170 */
171TRACE_EVENT(hrtimer_start, 171TRACE_EVENT(hrtimer_start,
172 172
173 TP_PROTO(struct hrtimer *timer), 173 TP_PROTO(struct hrtimer *hrtimer),
174 174
175 TP_ARGS(timer), 175 TP_ARGS(hrtimer),
176 176
177 TP_STRUCT__entry( 177 TP_STRUCT__entry(
178 __field( void *, timer ) 178 __field( void *, hrtimer )
179 __field( void *, function ) 179 __field( void *, function )
180 __field( s64, expires ) 180 __field( s64, expires )
181 __field( s64, softexpires ) 181 __field( s64, softexpires )
182 ), 182 ),
183 183
184 TP_fast_assign( 184 TP_fast_assign(
185 __entry->timer = timer; 185 __entry->hrtimer = hrtimer;
186 __entry->function = timer->function; 186 __entry->function = hrtimer->function;
187 __entry->expires = hrtimer_get_expires(timer).tv64; 187 __entry->expires = hrtimer_get_expires(hrtimer).tv64;
188 __entry->softexpires = hrtimer_get_softexpires(timer).tv64; 188 __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64;
189 ), 189 ),
190 190
191 TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu", 191 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
192 __entry->timer, __entry->function, 192 __entry->hrtimer, __entry->function,
193 (unsigned long long)ktime_to_ns((ktime_t) { 193 (unsigned long long)ktime_to_ns((ktime_t) {
194 .tv64 = __entry->expires }), 194 .tv64 = __entry->expires }),
195 (unsigned long long)ktime_to_ns((ktime_t) { 195 (unsigned long long)ktime_to_ns((ktime_t) {
@@ -206,23 +206,22 @@ TRACE_EVENT(hrtimer_start,
206 */ 206 */
207TRACE_EVENT(hrtimer_expire_entry, 207TRACE_EVENT(hrtimer_expire_entry,
208 208
209 TP_PROTO(struct hrtimer *timer, ktime_t *now), 209 TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
210 210
211 TP_ARGS(timer, now), 211 TP_ARGS(hrtimer, now),
212 212
213 TP_STRUCT__entry( 213 TP_STRUCT__entry(
214 __field( void *, timer ) 214 __field( void *, hrtimer )
215 __field( s64, now ) 215 __field( s64, now )
216 ), 216 ),
217 217
218 TP_fast_assign( 218 TP_fast_assign(
219 __entry->timer = timer; 219 __entry->hrtimer = hrtimer;
220 __entry->now = now->tv64; 220 __entry->now = now->tv64;
221 ), 221 ),
222 222
223 TP_printk("hrtimer %p, now %llu", __entry->timer, 223 TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
224 (unsigned long long)ktime_to_ns((ktime_t) { 224 (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
225 .tv64 = __entry->now }))
226 ); 225 );
227 226
228/** 227/**
@@ -234,40 +233,40 @@ TRACE_EVENT(hrtimer_expire_entry,
234 */ 233 */
235TRACE_EVENT(hrtimer_expire_exit, 234TRACE_EVENT(hrtimer_expire_exit,
236 235
237 TP_PROTO(struct hrtimer *timer), 236 TP_PROTO(struct hrtimer *hrtimer),
238 237
239 TP_ARGS(timer), 238 TP_ARGS(hrtimer),
240 239
241 TP_STRUCT__entry( 240 TP_STRUCT__entry(
242 __field( void *, timer ) 241 __field( void *, hrtimer )
243 ), 242 ),
244 243
245 TP_fast_assign( 244 TP_fast_assign(
246 __entry->timer = timer; 245 __entry->hrtimer = hrtimer;
247 ), 246 ),
248 247
249 TP_printk("hrtimer %p", __entry->timer) 248 TP_printk("hrtimer=%p", __entry->hrtimer)
250); 249);
251 250
252/** 251/**
253 * hrtimer_cancel - called when the hrtimer is canceled 252 * hrtimer_cancel - called when the hrtimer is canceled
254 * @timer: pointer to struct hrtimer 253 * @hrtimer: pointer to struct hrtimer
255 */ 254 */
256TRACE_EVENT(hrtimer_cancel, 255TRACE_EVENT(hrtimer_cancel,
257 256
258 TP_PROTO(struct hrtimer *timer), 257 TP_PROTO(struct hrtimer *hrtimer),
259 258
260 TP_ARGS(timer), 259 TP_ARGS(hrtimer),
261 260
262 TP_STRUCT__entry( 261 TP_STRUCT__entry(
263 __field( void *, timer ) 262 __field( void *, hrtimer )
264 ), 263 ),
265 264
266 TP_fast_assign( 265 TP_fast_assign(
267 __entry->timer = timer; 266 __entry->hrtimer = hrtimer;
268 ), 267 ),
269 268
270 TP_printk("hrtimer %p", __entry->timer) 269 TP_printk("hrtimer=%p", __entry->hrtimer)
271); 270);
272 271
273/** 272/**
@@ -302,8 +301,8 @@ TRACE_EVENT(itimer_state,
302 __entry->interval_usec = value->it_interval.tv_usec; 301 __entry->interval_usec = value->it_interval.tv_usec;
303 ), 302 ),
304 303
305 TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu", 304 TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
306 __entry->which, __entry->expires, 305 __entry->which, (unsigned long long)__entry->expires,
307 __entry->value_sec, __entry->value_usec, 306 __entry->value_sec, __entry->value_usec,
308 __entry->interval_sec, __entry->interval_usec) 307 __entry->interval_sec, __entry->interval_usec)
309); 308);
@@ -332,8 +331,8 @@ TRACE_EVENT(itimer_expire,
332 __entry->pid = pid_nr(pid); 331 __entry->pid = pid_nr(pid);
333 ), 332 ),
334 333
335 TP_printk("which %d, pid %d, now %lu", __entry->which, 334 TP_printk("which=%d pid=%d now=%llu", __entry->which,
336 (int) __entry->pid, __entry->now) 335 (int) __entry->pid, (unsigned long long)__entry->now)
337); 336);
338 337
339#endif /* _TRACE_TIMER_H */ 338#endif /* _TRACE_TIMER_H */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index e4612dbd7ba6..d6c974474e70 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -8,7 +8,7 @@
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/tracepoint.h> 9#include <linux/tracepoint.h>
10 10
11TRACE_EVENT(workqueue_insertion, 11DECLARE_EVENT_CLASS(workqueue,
12 12
13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), 13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
14 14
@@ -30,26 +30,18 @@ TRACE_EVENT(workqueue_insertion,
30 __entry->thread_pid, __entry->func) 30 __entry->thread_pid, __entry->func)
31); 31);
32 32
33TRACE_EVENT(workqueue_execution, 33DEFINE_EVENT(workqueue, workqueue_insertion,
34 34
35 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), 35 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
36 36
37 TP_ARGS(wq_thread, work), 37 TP_ARGS(wq_thread, work)
38);
38 39
39 TP_STRUCT__entry( 40DEFINE_EVENT(workqueue, workqueue_execution,
40 __array(char, thread_comm, TASK_COMM_LEN)
41 __field(pid_t, thread_pid)
42 __field(work_func_t, func)
43 ),
44 41
45 TP_fast_assign( 42 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
46 memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
47 __entry->thread_pid = wq_thread->pid;
48 __entry->func = work->func;
49 ),
50 43
51 TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, 44 TP_ARGS(wq_thread, work)
52 __entry->thread_pid, __entry->func)
53); 45);
54 46
55/* Trace the creation of one workqueue thread on a cpu */ 47/* Trace the creation of one workqueue thread on a cpu */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index cc0d9667e182..ea6f9d4a20e9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -18,6 +18,26 @@
18 18
19#include <linux/ftrace_event.h> 19#include <linux/ftrace_event.h>
20 20
21/*
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
27 *
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
29 */
30#undef TRACE_EVENT
31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
21#undef __field 41#undef __field
22#define __field(type, item) type item; 42#define __field(type, item) type item;
23 43
@@ -36,14 +56,21 @@
36#undef TP_STRUCT__entry 56#undef TP_STRUCT__entry
37#define TP_STRUCT__entry(args...) args 57#define TP_STRUCT__entry(args...) args
38 58
39#undef TRACE_EVENT 59#undef DECLARE_EVENT_CLASS
40#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \ 61 struct ftrace_raw_##name { \
42 struct trace_entry ent; \ 62 struct trace_entry ent; \
43 tstruct \ 63 tstruct \
44 char __data[0]; \ 64 char __data[0]; \
45 }; \ 65 };
46 static struct ftrace_event_call event_##name 66#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call \
69 __attribute__((__aligned__(4))) event_##name
70
71#undef DEFINE_EVENT_PRINT
72#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
73 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
47 74
48#undef __cpparg 75#undef __cpparg
49#define __cpparg(arg...) arg 76#define __cpparg(arg...) arg
@@ -89,99 +116,18 @@
89#undef __string 116#undef __string
90#define __string(item, src) __dynamic_array(char, item, -1) 117#define __string(item, src) __dynamic_array(char, item, -1)
91 118
92#undef TRACE_EVENT 119#undef DECLARE_EVENT_CLASS
93#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 120#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
94 struct ftrace_data_offsets_##call { \ 121 struct ftrace_data_offsets_##call { \
95 tstruct; \ 122 tstruct; \
96 }; 123 };
97 124
98#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 125#undef DEFINE_EVENT
99 126#define DEFINE_EVENT(template, name, proto, args)
100/*
101 * Setup the showing format of trace point.
102 *
103 * int
104 * ftrace_format_##call(struct trace_seq *s)
105 * {
106 * struct ftrace_raw_##call field;
107 * int ret;
108 *
109 * ret = trace_seq_printf(s, #type " " #item ";"
110 * " offset:%u; size:%u;\n",
111 * offsetof(struct ftrace_raw_##call, item),
112 * sizeof(field.type));
113 *
114 * }
115 */
116
117#undef TP_STRUCT__entry
118#define TP_STRUCT__entry(args...) args
119
120#undef __field
121#define __field(type, item) \
122 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
123 "offset:%u;\tsize:%u;\n", \
124 (unsigned int)offsetof(typeof(field), item), \
125 (unsigned int)sizeof(field.item)); \
126 if (!ret) \
127 return 0;
128
129#undef __field_ext
130#define __field_ext(type, item, filter_type) __field(type, item)
131
132#undef __array
133#define __array(type, item, len) \
134 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
135 "offset:%u;\tsize:%u;\n", \
136 (unsigned int)offsetof(typeof(field), item), \
137 (unsigned int)sizeof(field.item)); \
138 if (!ret) \
139 return 0;
140
141#undef __dynamic_array
142#define __dynamic_array(type, item, len) \
143 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
144 "offset:%u;\tsize:%u;\n", \
145 (unsigned int)offsetof(typeof(field), \
146 __data_loc_##item), \
147 (unsigned int)sizeof(field.__data_loc_##item)); \
148 if (!ret) \
149 return 0;
150
151#undef __string
152#define __string(item, src) __dynamic_array(char, item, -1)
153
154#undef __entry
155#define __entry REC
156
157#undef __print_symbolic
158#undef __get_dynamic_array
159#undef __get_str
160
161#undef TP_printk
162#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
163
164#undef TP_fast_assign
165#define TP_fast_assign(args...) args
166
167#undef TP_perf_assign
168#define TP_perf_assign(args...)
169 127
170#undef TRACE_EVENT 128#undef DEFINE_EVENT_PRINT
171#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 129#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
172static int \ 130 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
173ftrace_format_##call(struct ftrace_event_call *unused, \
174 struct trace_seq *s) \
175{ \
176 struct ftrace_raw_##call field __attribute__((unused)); \
177 int ret = 0; \
178 \
179 tstruct; \
180 \
181 trace_seq_printf(s, "\nprint fmt: " print); \
182 \
183 return ret; \
184}
185 131
186#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 132#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
187 133
@@ -252,10 +198,11 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
252 ftrace_print_symbols_seq(p, value, symbols); \ 198 ftrace_print_symbols_seq(p, value, symbols); \
253 }) 199 })
254 200
255#undef TRACE_EVENT 201#undef DECLARE_EVENT_CLASS
256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 202#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
257static enum print_line_t \ 203static notrace enum print_line_t \
258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 204ftrace_raw_output_id_##call(int event_id, const char *name, \
205 struct trace_iterator *iter, int flags) \
259{ \ 206{ \
260 struct trace_seq *s = &iter->seq; \ 207 struct trace_seq *s = &iter->seq; \
261 struct ftrace_raw_##call *field; \ 208 struct ftrace_raw_##call *field; \
@@ -265,6 +212,47 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
265 \ 212 \
266 entry = iter->ent; \ 213 entry = iter->ent; \
267 \ 214 \
215 if (entry->type != event_id) { \
216 WARN_ON_ONCE(1); \
217 return TRACE_TYPE_UNHANDLED; \
218 } \
219 \
220 field = (typeof(field))entry; \
221 \
222 p = &get_cpu_var(ftrace_event_seq); \
223 trace_seq_init(p); \
224 ret = trace_seq_printf(s, "%s: ", name); \
225 if (ret) \
226 ret = trace_seq_printf(s, print); \
227 put_cpu(); \
228 if (!ret) \
229 return TRACE_TYPE_PARTIAL_LINE; \
230 \
231 return TRACE_TYPE_HANDLED; \
232}
233
234#undef DEFINE_EVENT
235#define DEFINE_EVENT(template, name, proto, args) \
236static notrace enum print_line_t \
237ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
238{ \
239 return ftrace_raw_output_id_##template(event_##name.id, \
240 #name, iter, flags); \
241}
242
243#undef DEFINE_EVENT_PRINT
244#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
245static notrace enum print_line_t \
246ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
247{ \
248 struct trace_seq *s = &iter->seq; \
249 struct ftrace_raw_##template *field; \
250 struct trace_entry *entry; \
251 struct trace_seq *p; \
252 int ret; \
253 \
254 entry = iter->ent; \
255 \
268 if (entry->type != event_##call.id) { \ 256 if (entry->type != event_##call.id) { \
269 WARN_ON_ONCE(1); \ 257 WARN_ON_ONCE(1); \
270 return TRACE_TYPE_UNHANDLED; \ 258 return TRACE_TYPE_UNHANDLED; \
@@ -274,14 +262,16 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
274 \ 262 \
275 p = &get_cpu_var(ftrace_event_seq); \ 263 p = &get_cpu_var(ftrace_event_seq); \
276 trace_seq_init(p); \ 264 trace_seq_init(p); \
277 ret = trace_seq_printf(s, #call ": " print); \ 265 ret = trace_seq_printf(s, "%s: ", #call); \
266 if (ret) \
267 ret = trace_seq_printf(s, print); \
278 put_cpu(); \ 268 put_cpu(); \
279 if (!ret) \ 269 if (!ret) \
280 return TRACE_TYPE_PARTIAL_LINE; \ 270 return TRACE_TYPE_PARTIAL_LINE; \
281 \ 271 \
282 return TRACE_TYPE_HANDLED; \ 272 return TRACE_TYPE_HANDLED; \
283} 273}
284 274
285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 275#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
286 276
287#undef __field_ext 277#undef __field_ext
@@ -301,7 +291,8 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 291 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
302 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 292 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
303 offsetof(typeof(field), item), \ 293 offsetof(typeof(field), item), \
304 sizeof(field.item), 0, FILTER_OTHER); \ 294 sizeof(field.item), \
295 is_signed_type(type), FILTER_OTHER); \
305 if (ret) \ 296 if (ret) \
306 return ret; 297 return ret;
307 298
@@ -309,29 +300,32 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
309#define __dynamic_array(type, item, len) \ 300#define __dynamic_array(type, item, len) \
310 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 301 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
311 offsetof(typeof(field), __data_loc_##item), \ 302 offsetof(typeof(field), __data_loc_##item), \
312 sizeof(field.__data_loc_##item), 0, \ 303 sizeof(field.__data_loc_##item), \
313 FILTER_OTHER); 304 is_signed_type(type), FILTER_OTHER);
314 305
315#undef __string 306#undef __string
316#define __string(item, src) __dynamic_array(char, item, -1) 307#define __string(item, src) __dynamic_array(char, item, -1)
317 308
318#undef TRACE_EVENT 309#undef DECLARE_EVENT_CLASS
319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 310#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
320static int \ 311static int notrace \
321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 312ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
322{ \ 313{ \
323 struct ftrace_raw_##call field; \ 314 struct ftrace_raw_##call field; \
324 int ret; \ 315 int ret; \
325 \ 316 \
326 ret = trace_define_common_fields(event_call); \
327 if (ret) \
328 return ret; \
329 \
330 tstruct; \ 317 tstruct; \
331 \ 318 \
332 return ret; \ 319 return ret; \
333} 320}
334 321
322#undef DEFINE_EVENT
323#define DEFINE_EVENT(template, name, proto, args)
324
325#undef DEFINE_EVENT_PRINT
326#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
327 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
328
335#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 329#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
336 330
337/* 331/*
@@ -358,11 +352,11 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
358 __data_size += (len) * sizeof(type); 352 __data_size += (len) * sizeof(type);
359 353
360#undef __string 354#undef __string
361#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ 355#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
362 356
363#undef TRACE_EVENT 357#undef DECLARE_EVENT_CLASS
364#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 358#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
365static inline int ftrace_get_offsets_##call( \ 359static inline notrace int ftrace_get_offsets_##call( \
366 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 360 struct ftrace_data_offsets_##call *__data_offsets, proto) \
367{ \ 361{ \
368 int __data_size = 0; \ 362 int __data_size = 0; \
@@ -373,9 +367,16 @@ static inline int ftrace_get_offsets_##call( \
373 return __data_size; \ 367 return __data_size; \
374} 368}
375 369
370#undef DEFINE_EVENT
371#define DEFINE_EVENT(template, name, proto, args)
372
373#undef DEFINE_EVENT_PRINT
374#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
375 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
376
376#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
377 378
378#ifdef CONFIG_EVENT_PROFILE 379#ifdef CONFIG_PERF_EVENTS
379 380
380/* 381/*
381 * Generate the functions needed for tracepoint perf_event support. 382 * Generate the functions needed for tracepoint perf_event support.
@@ -394,24 +395,33 @@ static inline int ftrace_get_offsets_##call( \
394 * 395 *
395 */ 396 */
396 397
397#undef TRACE_EVENT 398#undef DECLARE_EVENT_CLASS
398#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 399#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
400
401#undef DEFINE_EVENT
402#define DEFINE_EVENT(template, name, proto, args) \
399 \ 403 \
400static void ftrace_profile_##call(proto); \ 404static void perf_trace_##name(proto); \
401 \ 405 \
402static int ftrace_profile_enable_##call(void) \ 406static notrace int \
407perf_trace_enable_##name(struct ftrace_event_call *unused) \
403{ \ 408{ \
404 return register_trace_##call(ftrace_profile_##call); \ 409 return register_trace_##name(perf_trace_##name); \
405} \ 410} \
406 \ 411 \
407static void ftrace_profile_disable_##call(void) \ 412static notrace void \
413perf_trace_disable_##name(struct ftrace_event_call *unused) \
408{ \ 414{ \
409 unregister_trace_##call(ftrace_profile_##call); \ 415 unregister_trace_##name(perf_trace_##name); \
410} 416}
411 417
418#undef DEFINE_EVENT_PRINT
419#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
420 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
421
412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
413 423
414#endif 424#endif /* CONFIG_PERF_EVENTS */
415 425
416/* 426/*
417 * Stage 4 of the trace events. 427 * Stage 4 of the trace events.
@@ -423,18 +433,12 @@ static void ftrace_profile_disable_##call(void) \
423 * event_trace_printk(_RET_IP_, "<call>: " <fmt>); 433 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
424 * } 434 * }
425 * 435 *
426 * static int ftrace_reg_event_<call>(void) 436 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
427 * { 437 * {
428 * int ret; 438 * return register_trace_<call>(ftrace_event_<call>);
429 *
430 * ret = register_trace_<call>(ftrace_event_<call>);
431 * if (!ret)
432 * pr_info("event trace: Could not activate trace point "
433 * "probe to <call>");
434 * return ret;
435 * } 439 * }
436 * 440 *
437 * static void ftrace_unreg_event_<call>(void) 441 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
438 * { 442 * {
439 * unregister_trace_<call>(ftrace_event_<call>); 443 * unregister_trace_<call>(ftrace_event_<call>);
440 * } 444 * }
@@ -469,7 +473,7 @@ static void ftrace_profile_disable_##call(void) \
469 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); 473 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
470 * } 474 * }
471 * 475 *
472 * static int ftrace_raw_reg_event_<call>(void) 476 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
473 * { 477 * {
474 * int ret; 478 * int ret;
475 * 479 *
@@ -480,7 +484,7 @@ static void ftrace_profile_disable_##call(void) \
480 * return ret; 484 * return ret;
481 * } 485 * }
482 * 486 *
483 * static void ftrace_unreg_event_<call>(void) 487 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
484 * { 488 * {
485 * unregister_trace_<call>(ftrace_raw_event_<call>); 489 * unregister_trace_<call>(ftrace_raw_event_<call>);
486 * } 490 * }
@@ -489,43 +493,27 @@ static void ftrace_profile_disable_##call(void) \
489 * .trace = ftrace_raw_output_<call>, <-- stage 2 493 * .trace = ftrace_raw_output_<call>, <-- stage 2
490 * }; 494 * };
491 * 495 *
492 * static int ftrace_raw_init_event_<call>(void)
493 * {
494 * int id;
495 *
496 * id = register_ftrace_event(&ftrace_event_type_<call>);
497 * if (!id)
498 * return -ENODEV;
499 * event_<call>.id = id;
500 * return 0;
501 * }
502 *
503 * static struct ftrace_event_call __used 496 * static struct ftrace_event_call __used
504 * __attribute__((__aligned__(4))) 497 * __attribute__((__aligned__(4)))
505 * __attribute__((section("_ftrace_events"))) event_<call> = { 498 * __attribute__((section("_ftrace_events"))) event_<call> = {
506 * .name = "<call>", 499 * .name = "<call>",
507 * .system = "<system>", 500 * .system = "<system>",
508 * .raw_init = ftrace_raw_init_event_<call>, 501 * .raw_init = trace_event_raw_init,
509 * .regfunc = ftrace_reg_event_<call>, 502 * .regfunc = ftrace_reg_event_<call>,
510 * .unregfunc = ftrace_unreg_event_<call>, 503 * .unregfunc = ftrace_unreg_event_<call>,
511 * .show_format = ftrace_format_<call>,
512 * } 504 * }
513 * 505 *
514 */ 506 */
515 507
516#undef TP_FMT 508#ifdef CONFIG_PERF_EVENTS
517#define TP_FMT(fmt, args...) fmt "\n", ##args
518
519#ifdef CONFIG_EVENT_PROFILE
520 509
521#define _TRACE_PROFILE_INIT(call) \ 510#define _TRACE_PERF_INIT(call) \
522 .profile_count = ATOMIC_INIT(-1), \ 511 .perf_event_enable = perf_trace_enable_##call, \
523 .profile_enable = ftrace_profile_enable_##call, \ 512 .perf_event_disable = perf_trace_disable_##call,
524 .profile_disable = ftrace_profile_disable_##call,
525 513
526#else 514#else
527#define _TRACE_PROFILE_INIT(call) 515#define _TRACE_PERF_INIT(call)
528#endif 516#endif /* CONFIG_PERF_EVENTS */
529 517
530#undef __entry 518#undef __entry
531#define __entry entry 519#define __entry entry
@@ -547,15 +535,20 @@ static void ftrace_profile_disable_##call(void) \
547#define __assign_str(dst, src) \ 535#define __assign_str(dst, src) \
548 strcpy(__get_str(dst), src); 536 strcpy(__get_str(dst), src);
549 537
550#undef TRACE_EVENT 538#undef TP_fast_assign
551#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 539#define TP_fast_assign(args...) args
552 \ 540
553static struct ftrace_event_call event_##call; \ 541#undef TP_perf_assign
542#define TP_perf_assign(args...)
543
544#undef DECLARE_EVENT_CLASS
545#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
554 \ 546 \
555static void ftrace_raw_event_##call(proto) \ 547static notrace void \
548ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
549 proto) \
556{ \ 550{ \
557 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 551 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
558 struct ftrace_event_call *event_call = &event_##call; \
559 struct ring_buffer_event *event; \ 552 struct ring_buffer_event *event; \
560 struct ftrace_raw_##call *entry; \ 553 struct ftrace_raw_##call *entry; \
561 struct ring_buffer *buffer; \ 554 struct ring_buffer *buffer; \
@@ -569,7 +562,7 @@ static void ftrace_raw_event_##call(proto) \
569 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 562 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
570 \ 563 \
571 event = trace_current_buffer_lock_reserve(&buffer, \ 564 event = trace_current_buffer_lock_reserve(&buffer, \
572 event_##call.id, \ 565 event_call->id, \
573 sizeof(*entry) + __data_size, \ 566 sizeof(*entry) + __data_size, \
574 irq_flags, pc); \ 567 irq_flags, pc); \
575 if (!event) \ 568 if (!event) \
@@ -584,39 +577,74 @@ static void ftrace_raw_event_##call(proto) \
584 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 577 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
585 trace_nowake_buffer_unlock_commit(buffer, \ 578 trace_nowake_buffer_unlock_commit(buffer, \
586 event, irq_flags, pc); \ 579 event, irq_flags, pc); \
587} \ 580}
581
582#undef DEFINE_EVENT
583#define DEFINE_EVENT(template, call, proto, args) \
588 \ 584 \
589static int ftrace_raw_reg_event_##call(void *ptr) \ 585static notrace void ftrace_raw_event_##call(proto) \
590{ \ 586{ \
591 int ret; \ 587 ftrace_raw_event_id_##template(&event_##call, args); \
588} \
592 \ 589 \
593 ret = register_trace_##call(ftrace_raw_event_##call); \ 590static notrace int \
594 if (ret) \ 591ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
595 pr_info("event trace: Could not activate trace point " \ 592{ \
596 "probe to " #call "\n"); \ 593 return register_trace_##call(ftrace_raw_event_##call); \
597 return ret; \
598} \ 594} \
599 \ 595 \
600static void ftrace_raw_unreg_event_##call(void *ptr) \ 596static notrace void \
597ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
601{ \ 598{ \
602 unregister_trace_##call(ftrace_raw_event_##call); \ 599 unregister_trace_##call(ftrace_raw_event_##call); \
603} \ 600} \
604 \ 601 \
605static struct trace_event ftrace_event_type_##call = { \ 602static struct trace_event ftrace_event_type_##call = { \
606 .trace = ftrace_raw_output_##call, \ 603 .trace = ftrace_raw_output_##call, \
607}; \ 604};
605
606#undef DEFINE_EVENT_PRINT
607#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
608 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
609
610#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
611
612#undef __entry
613#define __entry REC
614
615#undef __print_flags
616#undef __print_symbolic
617#undef __get_dynamic_array
618#undef __get_str
619
620#undef TP_printk
621#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
622
623#undef DECLARE_EVENT_CLASS
624#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
625static const char print_fmt_##call[] = print;
626
627#undef DEFINE_EVENT
628#define DEFINE_EVENT(template, call, proto, args) \
608 \ 629 \
609static int ftrace_raw_init_event_##call(void) \ 630static struct ftrace_event_call __used \
610{ \ 631__attribute__((__aligned__(4))) \
611 int id; \ 632__attribute__((section("_ftrace_events"))) event_##call = { \
612 \ 633 .name = #call, \
613 id = register_ftrace_event(&ftrace_event_type_##call); \ 634 .system = __stringify(TRACE_SYSTEM), \
614 if (!id) \ 635 .event = &ftrace_event_type_##call, \
615 return -ENODEV; \ 636 .raw_init = trace_event_raw_init, \
616 event_##call.id = id; \ 637 .regfunc = ftrace_raw_reg_event_##call, \
617 INIT_LIST_HEAD(&event_##call.fields); \ 638 .unregfunc = ftrace_raw_unreg_event_##call, \
618 return 0; \ 639 .print_fmt = print_fmt_##template, \
619} \ 640 .define_fields = ftrace_define_fields_##template, \
641 _TRACE_PERF_INIT(call) \
642}
643
644#undef DEFINE_EVENT_PRINT
645#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
646 \
647static const char print_fmt_##call[] = print; \
620 \ 648 \
621static struct ftrace_event_call __used \ 649static struct ftrace_event_call __used \
622__attribute__((__aligned__(4))) \ 650__attribute__((__aligned__(4))) \
@@ -624,28 +652,29 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
624 .name = #call, \ 652 .name = #call, \
625 .system = __stringify(TRACE_SYSTEM), \ 653 .system = __stringify(TRACE_SYSTEM), \
626 .event = &ftrace_event_type_##call, \ 654 .event = &ftrace_event_type_##call, \
627 .raw_init = ftrace_raw_init_event_##call, \ 655 .raw_init = trace_event_raw_init, \
628 .regfunc = ftrace_raw_reg_event_##call, \ 656 .regfunc = ftrace_raw_reg_event_##call, \
629 .unregfunc = ftrace_raw_unreg_event_##call, \ 657 .unregfunc = ftrace_raw_unreg_event_##call, \
630 .show_format = ftrace_format_##call, \ 658 .print_fmt = print_fmt_##call, \
631 .define_fields = ftrace_define_fields_##call, \ 659 .define_fields = ftrace_define_fields_##template, \
632 _TRACE_PROFILE_INIT(call) \ 660 _TRACE_PERF_INIT(call) \
633} 661}
634 662
635#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
636 664
637/* 665/*
638 * Define the insertion callback to profile events 666 * Define the insertion callback to perf events
639 * 667 *
640 * The job is very similar to ftrace_raw_event_<call> except that we don't 668 * The job is very similar to ftrace_raw_event_<call> except that we don't
641 * insert in the ring buffer but in a perf counter. 669 * insert in the ring buffer but in a perf counter.
642 * 670 *
643 * static void ftrace_profile_<call>(proto) 671 * static void ftrace_perf_<call>(proto)
644 * { 672 * {
645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
646 * struct ftrace_event_call *event_call = &event_<call>; 674 * struct ftrace_event_call *event_call = &event_<call>;
647 * extern void perf_tp_event(int, u64, u64, void *, int); 675 * extern void perf_tp_event(int, u64, u64, void *, int);
648 * struct ftrace_raw_##call *entry; 676 * struct ftrace_raw_##call *entry;
677 * struct perf_trace_buf *trace_buf;
649 * u64 __addr = 0, __count = 1; 678 * u64 __addr = 0, __count = 1;
650 * unsigned long irq_flags; 679 * unsigned long irq_flags;
651 * struct trace_entry *ent; 680 * struct trace_entry *ent;
@@ -670,14 +699,25 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
670 * __cpu = smp_processor_id(); 699 * __cpu = smp_processor_id();
671 * 700 *
672 * if (in_nmi()) 701 * if (in_nmi())
673 * raw_data = rcu_dereference(trace_profile_buf_nmi); 702 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
674 * else 703 * else
675 * raw_data = rcu_dereference(trace_profile_buf); 704 * trace_buf = rcu_dereference_sched(perf_trace_buf);
676 * 705 *
677 * if (!raw_data) 706 * if (!trace_buf)
678 * goto end; 707 * goto end;
679 * 708 *
680 * raw_data = per_cpu_ptr(raw_data, __cpu); 709 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
710 *
711 * // Avoid recursion from perf that could mess up the buffer
712 * if (trace_buf->recursion++)
713 * goto end_recursion;
714 *
715 * raw_data = trace_buf->buf;
716 *
717 * // Make recursion update visible before entering perf_tp_event
718 * // so that we protect from perf recursions.
719 *
720 * barrier();
681 * 721 *
682 * //zero dead bytes from alignment to avoid stack leak to userspace: 722 * //zero dead bytes from alignment to avoid stack leak to userspace:
683 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 723 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
@@ -696,7 +736,17 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
696 * } 736 * }
697 */ 737 */
698 738
699#ifdef CONFIG_EVENT_PROFILE 739#ifdef CONFIG_PERF_EVENTS
740
741#undef __entry
742#define __entry entry
743
744#undef __get_dynamic_array
745#define __get_dynamic_array(field) \
746 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
747
748#undef __get_str
749#define __get_str(field) (char *)__get_dynamic_array(field)
700 750
701#undef __perf_addr 751#undef __perf_addr
702#define __perf_addr(a) __addr = (a) 752#define __perf_addr(a) __addr = (a)
@@ -704,67 +754,59 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
704#undef __perf_count 754#undef __perf_count
705#define __perf_count(c) __count = (c) 755#define __perf_count(c) __count = (c)
706 756
707#undef TRACE_EVENT 757#undef DECLARE_EVENT_CLASS
708#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
709static void ftrace_profile_##call(proto) \ 759static notrace void \
760perf_trace_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \
710{ \ 762{ \
711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
712 struct ftrace_event_call *event_call = &event_##call; \
713 extern void perf_tp_event(int, u64, u64, void *, int); \
714 struct ftrace_raw_##call *entry; \ 764 struct ftrace_raw_##call *entry; \
715 u64 __addr = 0, __count = 1; \ 765 u64 __addr = 0, __count = 1; \
716 unsigned long irq_flags; \ 766 unsigned long irq_flags; \
717 struct trace_entry *ent; \ 767 struct pt_regs *__regs; \
718 int __entry_size; \ 768 int __entry_size; \
719 int __data_size; \ 769 int __data_size; \
720 char *raw_data; \ 770 int rctx; \
721 int __cpu; \
722 int pc; \
723 \
724 pc = preempt_count(); \
725 \ 771 \
726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 772 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
727 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 773 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
728 sizeof(u64)); \ 774 sizeof(u64)); \
729 __entry_size -= sizeof(u32); \ 775 __entry_size -= sizeof(u32); \
730 \ 776 \
731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 777 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
732 "profile buffer not large enough")) \ 778 "profile buffer not large enough")) \
733 return; \ 779 return; \
734 \ 780 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
735 local_irq_save(irq_flags); \ 781 __entry_size, event_call->id, &rctx, &irq_flags); \
736 __cpu = smp_processor_id(); \ 782 if (!entry) \
737 \ 783 return; \
738 if (in_nmi()) \
739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
740 else \
741 raw_data = rcu_dereference(trace_profile_buf); \
742 \
743 if (!raw_data) \
744 goto end; \
745 \
746 raw_data = per_cpu_ptr(raw_data, __cpu); \
747 \
748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
753 \
754 tstruct \ 784 tstruct \
755 \ 785 \
756 { assign; } \ 786 { assign; } \
757 \ 787 \
758 perf_tp_event(event_call->id, __addr, __count, entry, \ 788 __regs = &__get_cpu_var(perf_trace_regs); \
759 __entry_size); \ 789 perf_fetch_caller_regs(__regs, 2); \
760 \
761end: \
762 local_irq_restore(irq_flags); \
763 \ 790 \
791 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
792 __count, irq_flags, __regs); \
764} 793}
765 794
795#undef DEFINE_EVENT
796#define DEFINE_EVENT(template, call, proto, args) \
797static notrace void perf_trace_##call(proto) \
798{ \
799 struct ftrace_event_call *event_call = &event_##call; \
800 \
801 perf_trace_templ_##template(event_call, args); \
802}
803
804#undef DEFINE_EVENT_PRINT
805#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
806 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
807
766#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 808#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
767#endif /* CONFIG_EVENT_PROFILE */ 809#endif /* CONFIG_PERF_EVENTS */
768 810
769#undef _TRACE_PROFILE_INIT 811#undef _TRACE_PROFILE_INIT
770 812
diff --git a/include/trace/power.h b/include/trace/power.h
deleted file mode 100644
index ef204666e983..000000000000
--- a/include/trace/power.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef _TRACE_POWER_H
2#define _TRACE_POWER_H
3
4#include <linux/ktime.h>
5#include <linux/tracepoint.h>
6
7enum {
8 POWER_NONE = 0,
9 POWER_CSTATE = 1,
10 POWER_PSTATE = 2,
11};
12
13struct power_trace {
14 ktime_t stamp;
15 ktime_t end;
16 int type;
17 int state;
18};
19
20DECLARE_TRACE(power_start,
21 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
22 TP_ARGS(it, type, state));
23
24DECLARE_TRACE(power_mark,
25 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
26 TP_ARGS(it, type, state));
27
28DECLARE_TRACE(power_end,
29 TP_PROTO(struct power_trace *it),
30 TP_ARGS(it));
31
32#endif /* _TRACE_POWER_H */
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 5dc283ba5ae0..e5e5f48dbfb3 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -12,52 +12,45 @@
12 * A syscall entry in the ftrace syscalls array. 12 * A syscall entry in the ftrace syscalls array.
13 * 13 *
14 * @name: name of the syscall 14 * @name: name of the syscall
15 * @syscall_nr: number of the syscall
15 * @nb_args: number of parameters it takes 16 * @nb_args: number of parameters it takes
16 * @types: list of types as strings 17 * @types: list of types as strings
17 * @args: list of args as strings (args[i] matches types[i]) 18 * @args: list of args as strings (args[i] matches types[i])
18 * @enter_id: associated ftrace enter event id
19 * @exit_id: associated ftrace exit event id
20 * @enter_event: associated syscall_enter trace event 19 * @enter_event: associated syscall_enter trace event
21 * @exit_event: associated syscall_exit trace event 20 * @exit_event: associated syscall_exit trace event
22 */ 21 */
23struct syscall_metadata { 22struct syscall_metadata {
24 const char *name; 23 const char *name;
24 int syscall_nr;
25 int nb_args; 25 int nb_args;
26 const char **types; 26 const char **types;
27 const char **args; 27 const char **args;
28 int enter_id;
29 int exit_id;
30 28
31 struct ftrace_event_call *enter_event; 29 struct ftrace_event_call *enter_event;
32 struct ftrace_event_call *exit_event; 30 struct ftrace_event_call *exit_event;
33}; 31};
34 32
35#ifdef CONFIG_FTRACE_SYSCALLS 33#ifdef CONFIG_FTRACE_SYSCALLS
36extern struct syscall_metadata *syscall_nr_to_meta(int nr); 34extern unsigned long arch_syscall_addr(int nr);
37extern int syscall_name_to_nr(char *name); 35extern int init_syscall_trace(struct ftrace_event_call *call);
38void set_syscall_enter_id(int num, int id); 36
39void set_syscall_exit_id(int num, int id);
40extern struct trace_event event_syscall_enter;
41extern struct trace_event event_syscall_exit;
42extern int reg_event_syscall_enter(void *ptr);
43extern void unreg_event_syscall_enter(void *ptr);
44extern int reg_event_syscall_exit(void *ptr);
45extern void unreg_event_syscall_exit(void *ptr);
46extern int syscall_enter_format(struct ftrace_event_call *call,
47 struct trace_seq *s);
48extern int syscall_exit_format(struct ftrace_event_call *call,
49 struct trace_seq *s);
50extern int syscall_enter_define_fields(struct ftrace_event_call *call); 37extern int syscall_enter_define_fields(struct ftrace_event_call *call);
51extern int syscall_exit_define_fields(struct ftrace_event_call *call); 38extern int syscall_exit_define_fields(struct ftrace_event_call *call);
39extern int reg_event_syscall_enter(struct ftrace_event_call *call);
40extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
41extern int reg_event_syscall_exit(struct ftrace_event_call *call);
42extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
43extern int
44ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
52enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); 45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
53enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); 46enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
54#endif 47#endif
55#ifdef CONFIG_EVENT_PROFILE
56int reg_prof_syscall_enter(char *name);
57void unreg_prof_syscall_enter(char *name);
58int reg_prof_syscall_exit(char *name);
59void unreg_prof_syscall_exit(char *name);
60 48
49#ifdef CONFIG_PERF_EVENTS
50int perf_sysenter_enable(struct ftrace_event_call *call);
51void perf_sysenter_disable(struct ftrace_event_call *call);
52int perf_sysexit_enable(struct ftrace_event_call *call);
53void perf_sysexit_disable(struct ftrace_event_call *call);
61#endif 54#endif
62 55
63#endif /* _TRACE_SYSCALL_H */ 56#endif /* _TRACE_SYSCALL_H */