aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2011-11-28 07:42:15 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2011-11-28 20:05:05 -0500
commit763be8c0a919015a3c1e205005176d4aacec22e3 (patch)
tree0e26230bbc03900ca943776e6d4f3f86abd09d44 /drivers
parent69e1242eaab021eb6a4110a671af1e443fbf704d (diff)
lttng instrumentation: tracepoint events
Modifications to the in-kernel TRACE_EVENT are needed to generate the compact event descriptions and the probe code LTTng generates. These changes could apply to upstream TRACE_EVENT, but requires changing the in-kernel API. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/lttng/instrumentation/events/README7
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/block.h626
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/irq.h155
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h312
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h34
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/sched.h400
-rw-r--r--drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h76
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/block.h569
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/irq.h150
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/kvm.h312
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/sched.h397
-rw-r--r--drivers/staging/lttng/instrumentation/events/mainline/syscalls.h75
12 files changed, 3113 insertions, 0 deletions
diff --git a/drivers/staging/lttng/instrumentation/events/README b/drivers/staging/lttng/instrumentation/events/README
new file mode 100644
index 00000000000..dad2cbbd9de
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/README
@@ -0,0 +1,7 @@
1The workflow for updating patches from newer kernel:
2
3Diff mainline/ and lttng-module/ directories.
4
5Pull the new headers from mainline kernel to mainline/.
6Copy them into lttng-modules.
7Apply diff. Fix conflicts.
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/block.h b/drivers/staging/lttng/instrumentation/events/lttng-module/block.h
new file mode 100644
index 00000000000..42184f3d1e7
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/block.h
@@ -0,0 +1,626 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM block
3
4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BLOCK_H
6
7#include <linux/blktrace_api.h>
8#include <linux/blkdev.h>
9#include <linux/tracepoint.h>
10#include <linux/trace_seq.h>
11
12#ifndef _TRACE_BLOCK_DEF_
13#define _TRACE_BLOCK_DEF_
14
15#define __blk_dump_cmd(cmd, len) "<unknown>"
16
17enum {
18 RWBS_FLAG_WRITE = (1 << 0),
19 RWBS_FLAG_DISCARD = (1 << 1),
20 RWBS_FLAG_READ = (1 << 2),
21 RWBS_FLAG_RAHEAD = (1 << 3),
22 RWBS_FLAG_SYNC = (1 << 4),
23 RWBS_FLAG_META = (1 << 5),
24 RWBS_FLAG_SECURE = (1 << 6),
25};
26
27#endif /* _TRACE_BLOCK_DEF_ */
28
29#define __print_rwbs_flags(rwbs) \
30 __print_flags(rwbs, "", \
31 { RWBS_FLAG_WRITE, "W" }, \
32 { RWBS_FLAG_DISCARD, "D" }, \
33 { RWBS_FLAG_READ, "R" }, \
34 { RWBS_FLAG_RAHEAD, "A" }, \
35 { RWBS_FLAG_SYNC, "S" }, \
36 { RWBS_FLAG_META, "M" }, \
37 { RWBS_FLAG_SECURE, "E" })
38
39#define blk_fill_rwbs(rwbs, rw, bytes) \
40 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
41 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
42 ( (bytes) ? RWBS_FLAG_READ : \
43 ( 0 )))) \
44 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
45 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
46 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
47 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
48
49DECLARE_EVENT_CLASS(block_rq_with_error,
50
51 TP_PROTO(struct request_queue *q, struct request *rq),
52
53 TP_ARGS(q, rq),
54
55 TP_STRUCT__entry(
56 __field( dev_t, dev )
57 __field( sector_t, sector )
58 __field( unsigned int, nr_sector )
59 __field( int, errors )
60 __field( unsigned int, rwbs )
61 __dynamic_array_hex( unsigned char, cmd,
62 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
63 rq->cmd_len : 0)
64 ),
65
66 TP_fast_assign(
67 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
68 tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
69 0 : blk_rq_pos(rq))
70 tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
71 0 : blk_rq_sectors(rq))
72 tp_assign(errors, rq->errors)
73 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
74 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
75 rq->cmd : NULL);
76 ),
77
78 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
79 MAJOR(__entry->dev), MINOR(__entry->dev),
80 __print_rwbs_flags(__entry->rwbs),
81 __blk_dump_cmd(__get_dynamic_array(cmd),
82 __get_dynamic_array_len(cmd)),
83 (unsigned long long)__entry->sector,
84 __entry->nr_sector, __entry->errors)
85)
86
87/**
88 * block_rq_abort - abort block operation request
89 * @q: queue containing the block operation request
90 * @rq: block IO operation request
91 *
92 * Called immediately after pending block IO operation request @rq in
93 * queue @q is aborted. The fields in the operation request @rq
94 * can be examined to determine which device and sectors the pending
95 * operation would access.
96 */
97DEFINE_EVENT(block_rq_with_error, block_rq_abort,
98
99 TP_PROTO(struct request_queue *q, struct request *rq),
100
101 TP_ARGS(q, rq)
102)
103
104/**
105 * block_rq_requeue - place block IO request back on a queue
106 * @q: queue holding operation
107 * @rq: block IO operation request
108 *
109 * The block operation request @rq is being placed back into queue
110 * @q. For some reason the request was not completed and needs to be
111 * put back in the queue.
112 */
113DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
114
115 TP_PROTO(struct request_queue *q, struct request *rq),
116
117 TP_ARGS(q, rq)
118)
119
120/**
121 * block_rq_complete - block IO operation completed by device driver
122 * @q: queue containing the block operation request
123 * @rq: block operations request
124 *
125 * The block_rq_complete tracepoint event indicates that some portion
126 * of operation request has been completed by the device driver. If
127 * the @rq->bio is %NULL, then there is absolutely no additional work to
128 * do for the request. If @rq->bio is non-NULL then there is
129 * additional work required to complete the request.
130 */
131DEFINE_EVENT(block_rq_with_error, block_rq_complete,
132
133 TP_PROTO(struct request_queue *q, struct request *rq),
134
135 TP_ARGS(q, rq)
136)
137
138DECLARE_EVENT_CLASS(block_rq,
139
140 TP_PROTO(struct request_queue *q, struct request *rq),
141
142 TP_ARGS(q, rq),
143
144 TP_STRUCT__entry(
145 __field( dev_t, dev )
146 __field( sector_t, sector )
147 __field( unsigned int, nr_sector )
148 __field( unsigned int, bytes )
149 __field( unsigned int, rwbs )
150 __array_text( char, comm, TASK_COMM_LEN )
151 __dynamic_array_hex( unsigned char, cmd,
152 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
153 rq->cmd_len : 0)
154 ),
155
156 TP_fast_assign(
157 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
158 tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
159 0 : blk_rq_pos(rq))
160 tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
161 0 : blk_rq_sectors(rq))
162 tp_assign(bytes, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
163 blk_rq_bytes(rq) : 0)
164 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
165 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
166 rq->cmd : NULL);
167 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
168 ),
169
170 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
171 MAJOR(__entry->dev), MINOR(__entry->dev),
172 __print_rwbs_flags(__entry->rwbs),
173 __entry->bytes,
174 __blk_dump_cmd(__get_dynamic_array(cmd),
175 __get_dynamic_array_len(cmd)),
176 (unsigned long long)__entry->sector,
177 __entry->nr_sector, __entry->comm)
178)
179
180/**
181 * block_rq_insert - insert block operation request into queue
182 * @q: target queue
183 * @rq: block IO operation request
184 *
185 * Called immediately before block operation request @rq is inserted
186 * into queue @q. The fields in the operation request @rq struct can
187 * be examined to determine which device and sectors the pending
188 * operation would access.
189 */
190DEFINE_EVENT(block_rq, block_rq_insert,
191
192 TP_PROTO(struct request_queue *q, struct request *rq),
193
194 TP_ARGS(q, rq)
195)
196
197/**
198 * block_rq_issue - issue pending block IO request operation to device driver
199 * @q: queue holding operation
200 * @rq: block IO operation operation request
201 *
202 * Called when block operation request @rq from queue @q is sent to a
203 * device driver for processing.
204 */
205DEFINE_EVENT(block_rq, block_rq_issue,
206
207 TP_PROTO(struct request_queue *q, struct request *rq),
208
209 TP_ARGS(q, rq)
210)
211
212/**
213 * block_bio_bounce - used bounce buffer when processing block operation
214 * @q: queue holding the block operation
215 * @bio: block operation
216 *
217 * A bounce buffer was used to handle the block operation @bio in @q.
218 * This occurs when hardware limitations prevent a direct transfer of
219 * data between the @bio data memory area and the IO device. Use of a
220 * bounce buffer requires extra copying of data and decreases
221 * performance.
222 */
223TRACE_EVENT(block_bio_bounce,
224
225 TP_PROTO(struct request_queue *q, struct bio *bio),
226
227 TP_ARGS(q, bio),
228
229 TP_STRUCT__entry(
230 __field( dev_t, dev )
231 __field( sector_t, sector )
232 __field( unsigned int, nr_sector )
233 __field( unsigned int, rwbs )
234 __array_text( char, comm, TASK_COMM_LEN )
235 ),
236
237 TP_fast_assign(
238 tp_assign(dev, bio->bi_bdev ?
239 bio->bi_bdev->bd_dev : 0)
240 tp_assign(sector, bio->bi_sector)
241 tp_assign(nr_sector, bio->bi_size >> 9)
242 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
243 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
244 ),
245
246 TP_printk("%d,%d %s %llu + %u [%s]",
247 MAJOR(__entry->dev), MINOR(__entry->dev),
248 __print_rwbs_flags(__entry->rwbs),
249 (unsigned long long)__entry->sector,
250 __entry->nr_sector, __entry->comm)
251)
252
253/**
254 * block_bio_complete - completed all work on the block operation
255 * @q: queue holding the block operation
256 * @bio: block operation completed
257 * @error: io error value
258 *
259 * This tracepoint indicates there is no further work to do on this
260 * block IO operation @bio.
261 */
262TRACE_EVENT(block_bio_complete,
263
264 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
265
266 TP_ARGS(q, bio, error),
267
268 TP_STRUCT__entry(
269 __field( dev_t, dev )
270 __field( sector_t, sector )
271 __field( unsigned, nr_sector )
272 __field( int, error )
273 __field( unsigned int, rwbs )
274 ),
275
276 TP_fast_assign(
277 tp_assign(dev, bio->bi_bdev->bd_dev)
278 tp_assign(sector, bio->bi_sector)
279 tp_assign(nr_sector, bio->bi_size >> 9)
280 tp_assign(error, error)
281 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
282 ),
283
284 TP_printk("%d,%d %s %llu + %u [%d]",
285 MAJOR(__entry->dev), MINOR(__entry->dev),
286 __print_rwbs_flags(__entry->rwbs),
287 (unsigned long long)__entry->sector,
288 __entry->nr_sector, __entry->error)
289)
290
291DECLARE_EVENT_CLASS(block_bio,
292
293 TP_PROTO(struct request_queue *q, struct bio *bio),
294
295 TP_ARGS(q, bio),
296
297 TP_STRUCT__entry(
298 __field( dev_t, dev )
299 __field( sector_t, sector )
300 __field( unsigned int, nr_sector )
301 __field( unsigned int, rwbs )
302 __array_text( char, comm, TASK_COMM_LEN )
303 ),
304
305 TP_fast_assign(
306 tp_assign(dev, bio->bi_bdev->bd_dev)
307 tp_assign(sector, bio->bi_sector)
308 tp_assign(nr_sector, bio->bi_size >> 9)
309 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
310 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
311 ),
312
313 TP_printk("%d,%d %s %llu + %u [%s]",
314 MAJOR(__entry->dev), MINOR(__entry->dev),
315 __print_rwbs_flags(__entry->rwbs),
316 (unsigned long long)__entry->sector,
317 __entry->nr_sector, __entry->comm)
318)
319
320/**
321 * block_bio_backmerge - merging block operation to the end of an existing operation
322 * @q: queue holding operation
323 * @bio: new block operation to merge
324 *
325 * Merging block request @bio to the end of an existing block request
326 * in queue @q.
327 */
328DEFINE_EVENT(block_bio, block_bio_backmerge,
329
330 TP_PROTO(struct request_queue *q, struct bio *bio),
331
332 TP_ARGS(q, bio)
333)
334
335/**
336 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
337 * @q: queue holding operation
338 * @bio: new block operation to merge
339 *
340 * Merging block IO operation @bio to the beginning of an existing block
341 * operation in queue @q.
342 */
343DEFINE_EVENT(block_bio, block_bio_frontmerge,
344
345 TP_PROTO(struct request_queue *q, struct bio *bio),
346
347 TP_ARGS(q, bio)
348)
349
350/**
351 * block_bio_queue - putting new block IO operation in queue
352 * @q: queue holding operation
353 * @bio: new block operation
354 *
355 * About to place the block IO operation @bio into queue @q.
356 */
357DEFINE_EVENT(block_bio, block_bio_queue,
358
359 TP_PROTO(struct request_queue *q, struct bio *bio),
360
361 TP_ARGS(q, bio)
362)
363
364DECLARE_EVENT_CLASS(block_get_rq,
365
366 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
367
368 TP_ARGS(q, bio, rw),
369
370 TP_STRUCT__entry(
371 __field( dev_t, dev )
372 __field( sector_t, sector )
373 __field( unsigned int, nr_sector )
374 __field( unsigned int, rwbs )
375 __array_text( char, comm, TASK_COMM_LEN )
376 ),
377
378 TP_fast_assign(
379 tp_assign(dev, bio ? bio->bi_bdev->bd_dev : 0)
380 tp_assign(sector, bio ? bio->bi_sector : 0)
381 tp_assign(nr_sector, bio ? bio->bi_size >> 9 : 0)
382 blk_fill_rwbs(rwbs, bio ? bio->bi_rw : 0,
383 bio ? bio->bi_size >> 9 : 0)
384 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
385 ),
386
387 TP_printk("%d,%d %s %llu + %u [%s]",
388 MAJOR(__entry->dev), MINOR(__entry->dev),
389 __print_rwbs_flags(__entry->rwbs),
390 (unsigned long long)__entry->sector,
391 __entry->nr_sector, __entry->comm)
392)
393
394/**
395 * block_getrq - get a free request entry in queue for block IO operations
396 * @q: queue for operations
397 * @bio: pending block IO operation
398 * @rw: low bit indicates a read (%0) or a write (%1)
399 *
400 * A request struct for queue @q has been allocated to handle the
401 * block IO operation @bio.
402 */
403DEFINE_EVENT(block_get_rq, block_getrq,
404
405 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
406
407 TP_ARGS(q, bio, rw)
408)
409
410/**
411 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
412 * @q: queue for operation
413 * @bio: pending block IO operation
414 * @rw: low bit indicates a read (%0) or a write (%1)
415 *
416 * In the case where a request struct cannot be provided for queue @q
417 * the process needs to wait for an request struct to become
418 * available. This tracepoint event is generated each time the
419 * process goes to sleep waiting for request struct become available.
420 */
421DEFINE_EVENT(block_get_rq, block_sleeprq,
422
423 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
424
425 TP_ARGS(q, bio, rw)
426)
427
428/**
429 * block_plug - keep operations requests in request queue
430 * @q: request queue to plug
431 *
432 * Plug the request queue @q. Do not allow block operation requests
433 * to be sent to the device driver. Instead, accumulate requests in
434 * the queue to improve throughput performance of the block device.
435 */
436TRACE_EVENT(block_plug,
437
438 TP_PROTO(struct request_queue *q),
439
440 TP_ARGS(q),
441
442 TP_STRUCT__entry(
443 __array_text( char, comm, TASK_COMM_LEN )
444 ),
445
446 TP_fast_assign(
447 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
448 ),
449
450 TP_printk("[%s]", __entry->comm)
451)
452
453DECLARE_EVENT_CLASS(block_unplug,
454
455 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
456
457 TP_ARGS(q, depth, explicit),
458
459 TP_STRUCT__entry(
460 __field( int, nr_rq )
461 __array_text( char, comm, TASK_COMM_LEN )
462 ),
463
464 TP_fast_assign(
465 tp_assign(nr_rq, depth)
466 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
467 ),
468
469 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
470)
471
472/**
473 * block_unplug - release of operations requests in request queue
474 * @q: request queue to unplug
475 * @depth: number of requests just added to the queue
476 * @explicit: whether this was an explicit unplug, or one from schedule()
477 *
478 * Unplug request queue @q because device driver is scheduled to work
479 * on elements in the request queue.
480 */
481DEFINE_EVENT(block_unplug, block_unplug,
482
483 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
484
485 TP_ARGS(q, depth, explicit)
486)
487
488/**
489 * block_split - split a single bio struct into two bio structs
490 * @q: queue containing the bio
491 * @bio: block operation being split
492 * @new_sector: The starting sector for the new bio
493 *
494 * The bio request @bio in request queue @q needs to be split into two
495 * bio requests. The newly created @bio request starts at
496 * @new_sector. This split may be required due to hardware limitation
497 * such as operation crossing device boundaries in a RAID system.
498 */
499TRACE_EVENT(block_split,
500
501 TP_PROTO(struct request_queue *q, struct bio *bio,
502 unsigned int new_sector),
503
504 TP_ARGS(q, bio, new_sector),
505
506 TP_STRUCT__entry(
507 __field( dev_t, dev )
508 __field( sector_t, sector )
509 __field( sector_t, new_sector )
510 __field( unsigned int, rwbs )
511 __array_text( char, comm, TASK_COMM_LEN )
512 ),
513
514 TP_fast_assign(
515 tp_assign(dev, bio->bi_bdev->bd_dev)
516 tp_assign(sector, bio->bi_sector)
517 tp_assign(new_sector, new_sector)
518 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
519 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
520 ),
521
522 TP_printk("%d,%d %s %llu / %llu [%s]",
523 MAJOR(__entry->dev), MINOR(__entry->dev),
524 __print_rwbs_flags(__entry->rwbs),
525 (unsigned long long)__entry->sector,
526 (unsigned long long)__entry->new_sector,
527 __entry->comm)
528)
529
530/**
531 * block_bio_remap - map request for a logical device to the raw device
532 * @q: queue holding the operation
533 * @bio: revised operation
534 * @dev: device for the operation
535 * @from: original sector for the operation
536 *
537 * An operation for a logical device has been mapped to the
538 * raw block device.
539 */
540TRACE_EVENT(block_bio_remap,
541
542 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
543 sector_t from),
544
545 TP_ARGS(q, bio, dev, from),
546
547 TP_STRUCT__entry(
548 __field( dev_t, dev )
549 __field( sector_t, sector )
550 __field( unsigned int, nr_sector )
551 __field( dev_t, old_dev )
552 __field( sector_t, old_sector )
553 __field( unsigned int, rwbs )
554 ),
555
556 TP_fast_assign(
557 tp_assign(dev, bio->bi_bdev->bd_dev)
558 tp_assign(sector, bio->bi_sector)
559 tp_assign(nr_sector, bio->bi_size >> 9)
560 tp_assign(old_dev, dev)
561 tp_assign(old_sector, from)
562 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
563 ),
564
565 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
566 MAJOR(__entry->dev), MINOR(__entry->dev),
567 __print_rwbs_flags(__entry->rwbs),
568 (unsigned long long)__entry->sector,
569 __entry->nr_sector,
570 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
571 (unsigned long long)__entry->old_sector)
572)
573
574/**
575 * block_rq_remap - map request for a block operation request
576 * @q: queue holding the operation
577 * @rq: block IO operation request
578 * @dev: device for the operation
579 * @from: original sector for the operation
580 *
581 * The block operation request @rq in @q has been remapped. The block
582 * operation request @rq holds the current information and @from hold
583 * the original sector.
584 */
585TRACE_EVENT(block_rq_remap,
586
587 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
588 sector_t from),
589
590 TP_ARGS(q, rq, dev, from),
591
592 TP_STRUCT__entry(
593 __field( dev_t, dev )
594 __field( sector_t, sector )
595 __field( unsigned int, nr_sector )
596 __field( dev_t, old_dev )
597 __field( sector_t, old_sector )
598 __field( unsigned int, rwbs )
599 ),
600
601 TP_fast_assign(
602 tp_assign(dev, disk_devt(rq->rq_disk))
603 tp_assign(sector, blk_rq_pos(rq))
604 tp_assign(nr_sector, blk_rq_sectors(rq))
605 tp_assign(old_dev, dev)
606 tp_assign(old_sector, from)
607 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
608 ),
609
610 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
611 MAJOR(__entry->dev), MINOR(__entry->dev),
612 __print_rwbs_flags(__entry->rwbs),
613 (unsigned long long)__entry->sector,
614 __entry->nr_sector,
615 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
616 (unsigned long long)__entry->old_sector)
617)
618
619#undef __print_rwbs_flags
620#undef blk_fill_rwbs
621
622#endif /* _TRACE_BLOCK_H */
623
624/* This part must be outside protection */
625#include "../../../probes/define_trace.h"
626
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h b/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h
new file mode 100644
index 00000000000..344015d4654
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h
@@ -0,0 +1,155 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM irq
3
4#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_IRQ_H
6
7#include <linux/tracepoint.h>
8
9#ifndef _TRACE_IRQ_DEF_
10#define _TRACE_IRQ_DEF_
11
12struct irqaction;
13struct softirq_action;
14
15#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
16#define show_softirq_name(val) \
17 __print_symbolic(val, \
18 softirq_name(HI), \
19 softirq_name(TIMER), \
20 softirq_name(NET_TX), \
21 softirq_name(NET_RX), \
22 softirq_name(BLOCK), \
23 softirq_name(BLOCK_IOPOLL), \
24 softirq_name(TASKLET), \
25 softirq_name(SCHED), \
26 softirq_name(HRTIMER), \
27 softirq_name(RCU))
28
29#endif /* _TRACE_IRQ_DEF_ */
30
31/**
32 * irq_handler_entry - called immediately before the irq action handler
33 * @irq: irq number
34 * @action: pointer to struct irqaction
35 *
36 * The struct irqaction pointed to by @action contains various
37 * information about the handler, including the device name,
38 * @action->name, and the device id, @action->dev_id. When used in
39 * conjunction with the irq_handler_exit tracepoint, we can figure
40 * out irq handler latencies.
41 */
42TRACE_EVENT(irq_handler_entry,
43
44 TP_PROTO(int irq, struct irqaction *action),
45
46 TP_ARGS(irq, action),
47
48 TP_STRUCT__entry(
49 __field( int, irq )
50 __string( name, action->name )
51 ),
52
53 TP_fast_assign(
54 tp_assign(irq, irq)
55 tp_strcpy(name, action->name)
56 ),
57
58 TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
59)
60
61/**
62 * irq_handler_exit - called immediately after the irq action handler returns
63 * @irq: irq number
64 * @action: pointer to struct irqaction
65 * @ret: return value
66 *
67 * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
68 * @action->handler scuccessully handled this irq. Otherwise, the irq might be
69 * a shared irq line, or the irq was not handled successfully. Can be used in
70 * conjunction with the irq_handler_entry to understand irq handler latencies.
71 */
72TRACE_EVENT(irq_handler_exit,
73
74 TP_PROTO(int irq, struct irqaction *action, int ret),
75
76 TP_ARGS(irq, action, ret),
77
78 TP_STRUCT__entry(
79 __field( int, irq )
80 __field( int, ret )
81 ),
82
83 TP_fast_assign(
84 tp_assign(irq, irq)
85 tp_assign(ret, ret)
86 ),
87
88 TP_printk("irq=%d ret=%s",
89 __entry->irq, __entry->ret ? "handled" : "unhandled")
90)
91
92DECLARE_EVENT_CLASS(softirq,
93
94 TP_PROTO(unsigned int vec_nr),
95
96 TP_ARGS(vec_nr),
97
98 TP_STRUCT__entry(
99 __field( unsigned int, vec )
100 ),
101
102 TP_fast_assign(
103 tp_assign(vec, vec_nr)
104 ),
105
106 TP_printk("vec=%u [action=%s]", __entry->vec,
107 show_softirq_name(__entry->vec))
108)
109
110/**
111 * softirq_entry - called immediately before the softirq handler
112 * @vec_nr: softirq vector number
113 *
114 * When used in combination with the softirq_exit tracepoint
115 * we can determine the softirq handler runtine.
116 */
117DEFINE_EVENT(softirq, softirq_entry,
118
119 TP_PROTO(unsigned int vec_nr),
120
121 TP_ARGS(vec_nr)
122)
123
124/**
125 * softirq_exit - called immediately after the softirq handler returns
126 * @vec_nr: softirq vector number
127 *
128 * When used in combination with the softirq_entry tracepoint
129 * we can determine the softirq handler runtine.
130 */
131DEFINE_EVENT(softirq, softirq_exit,
132
133 TP_PROTO(unsigned int vec_nr),
134
135 TP_ARGS(vec_nr)
136)
137
138/**
139 * softirq_raise - called immediately when a softirq is raised
140 * @vec_nr: softirq vector number
141 *
142 * When used in combination with the softirq_entry tracepoint
143 * we can determine the softirq raise to run latency.
144 */
145DEFINE_EVENT(softirq, softirq_raise,
146
147 TP_PROTO(unsigned int vec_nr),
148
149 TP_ARGS(vec_nr)
150)
151
152#endif /* _TRACE_IRQ_H */
153
154/* This part must be outside protection */
155#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h b/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h
new file mode 100644
index 00000000000..e10455bc8cf
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h
@@ -0,0 +1,312 @@
1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_MAIN_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10
11#define kvm_trace_exit_reason \
12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
17
18TRACE_EVENT(kvm_userspace_exit,
19 TP_PROTO(__u32 reason, int errno),
20 TP_ARGS(reason, errno),
21
22 TP_STRUCT__entry(
23 __field( __u32, reason )
24 __field( int, errno )
25 ),
26
27 TP_fast_assign(
28 tp_assign(reason, reason)
29 tp_assign(errno, errno)
30 ),
31
32 TP_printk("reason %s (%d)",
33 __entry->errno < 0 ?
34 (__entry->errno == -EINTR ? "restart" : "error") :
35 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
36 __entry->errno < 0 ? -__entry->errno : __entry->reason)
37)
38
39#if defined(__KVM_HAVE_IOAPIC)
40TRACE_EVENT(kvm_set_irq,
41 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
42 TP_ARGS(gsi, level, irq_source_id),
43
44 TP_STRUCT__entry(
45 __field( unsigned int, gsi )
46 __field( int, level )
47 __field( int, irq_source_id )
48 ),
49
50 TP_fast_assign(
51 tp_assign(gsi, gsi)
52 tp_assign(level, level)
53 tp_assign(irq_source_id, irq_source_id)
54 ),
55
56 TP_printk("gsi %u level %d source %d",
57 __entry->gsi, __entry->level, __entry->irq_source_id)
58)
59
60#define kvm_deliver_mode \
61 {0x0, "Fixed"}, \
62 {0x1, "LowPrio"}, \
63 {0x2, "SMI"}, \
64 {0x3, "Res3"}, \
65 {0x4, "NMI"}, \
66 {0x5, "INIT"}, \
67 {0x6, "SIPI"}, \
68 {0x7, "ExtINT"}
69
70TRACE_EVENT(kvm_ioapic_set_irq,
71 TP_PROTO(__u64 e, int pin, bool coalesced),
72 TP_ARGS(e, pin, coalesced),
73
74 TP_STRUCT__entry(
75 __field( __u64, e )
76 __field( int, pin )
77 __field( bool, coalesced )
78 ),
79
80 TP_fast_assign(
81 tp_assign(e, e)
82 tp_assign(pin, pin)
83 tp_assign(coalesced, coalesced)
84 ),
85
86 TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
87 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
88 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
89 (__entry->e & (1<<11)) ? "logical" : "physical",
90 (__entry->e & (1<<15)) ? "level" : "edge",
91 (__entry->e & (1<<16)) ? "|masked" : "",
92 __entry->coalesced ? " (coalesced)" : "")
93)
94
95TRACE_EVENT(kvm_msi_set_irq,
96 TP_PROTO(__u64 address, __u64 data),
97 TP_ARGS(address, data),
98
99 TP_STRUCT__entry(
100 __field( __u64, address )
101 __field( __u64, data )
102 ),
103
104 TP_fast_assign(
105 tp_assign(address, address)
106 tp_assign(data, data)
107 ),
108
109 TP_printk("dst %u vec %x (%s|%s|%s%s)",
110 (u8)(__entry->address >> 12), (u8)__entry->data,
111 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
112 (__entry->address & (1<<2)) ? "logical" : "physical",
113 (__entry->data & (1<<15)) ? "level" : "edge",
114 (__entry->address & (1<<3)) ? "|rh" : "")
115)
116
117#define kvm_irqchips \
118 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
119 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
120 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
121
122TRACE_EVENT(kvm_ack_irq,
123 TP_PROTO(unsigned int irqchip, unsigned int pin),
124 TP_ARGS(irqchip, pin),
125
126 TP_STRUCT__entry(
127 __field( unsigned int, irqchip )
128 __field( unsigned int, pin )
129 ),
130
131 TP_fast_assign(
132 tp_assign(irqchip, irqchip)
133 tp_assign(pin, pin)
134 ),
135
136 TP_printk("irqchip %s pin %u",
137 __print_symbolic(__entry->irqchip, kvm_irqchips),
138 __entry->pin)
139)
140
141
142
143#endif /* defined(__KVM_HAVE_IOAPIC) */
144
145#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
146#define KVM_TRACE_MMIO_READ 1
147#define KVM_TRACE_MMIO_WRITE 2
148
149#define kvm_trace_symbol_mmio \
150 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
151 { KVM_TRACE_MMIO_READ, "read" }, \
152 { KVM_TRACE_MMIO_WRITE, "write" }
153
154TRACE_EVENT(kvm_mmio,
155 TP_PROTO(int type, int len, u64 gpa, u64 val),
156 TP_ARGS(type, len, gpa, val),
157
158 TP_STRUCT__entry(
159 __field( u32, type )
160 __field( u32, len )
161 __field( u64, gpa )
162 __field( u64, val )
163 ),
164
165 TP_fast_assign(
166 tp_assign(type, type)
167 tp_assign(len, len)
168 tp_assign(gpa, gpa)
169 tp_assign(val, val)
170 ),
171
172 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
173 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
174 __entry->len, __entry->gpa, __entry->val)
175)
176
177#define kvm_fpu_load_symbol \
178 {0, "unload"}, \
179 {1, "load"}
180
181TRACE_EVENT(kvm_fpu,
182 TP_PROTO(int load),
183 TP_ARGS(load),
184
185 TP_STRUCT__entry(
186 __field( u32, load )
187 ),
188
189 TP_fast_assign(
190 tp_assign(load, load)
191 ),
192
193 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
194)
195
196TRACE_EVENT(kvm_age_page,
197 TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
198 TP_ARGS(hva, slot, ref),
199
200 TP_STRUCT__entry(
201 __field( u64, hva )
202 __field( u64, gfn )
203 __field( u8, referenced )
204 ),
205
206 TP_fast_assign(
207 tp_assign(hva, hva)
208 tp_assign(gfn,
209 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT))
210 tp_assign(referenced, ref)
211 ),
212
213 TP_printk("hva %llx gfn %llx %s",
214 __entry->hva, __entry->gfn,
215 __entry->referenced ? "YOUNG" : "OLD")
216)
217
218#ifdef CONFIG_KVM_ASYNC_PF
219DECLARE_EVENT_CLASS(kvm_async_get_page_class,
220
221 TP_PROTO(u64 gva, u64 gfn),
222
223 TP_ARGS(gva, gfn),
224
225 TP_STRUCT__entry(
226 __field(__u64, gva)
227 __field(u64, gfn)
228 ),
229
230 TP_fast_assign(
231 tp_assign(gva, gva)
232 tp_assign(gfn, gfn)
233 ),
234
235 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
236)
237
238DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
239
240 TP_PROTO(u64 gva, u64 gfn),
241
242 TP_ARGS(gva, gfn)
243)
244
245DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
246
247 TP_PROTO(u64 gva, u64 gfn),
248
249 TP_ARGS(gva, gfn)
250)
251
252DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
253
254 TP_PROTO(u64 token, u64 gva),
255
256 TP_ARGS(token, gva),
257
258 TP_STRUCT__entry(
259 __field(__u64, token)
260 __field(__u64, gva)
261 ),
262
263 TP_fast_assign(
264 tp_assign(token, token)
265 tp_assign(gva, gva)
266 ),
267
268 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
269
270)
271
272DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
273
274 TP_PROTO(u64 token, u64 gva),
275
276 TP_ARGS(token, gva)
277)
278
279DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
280
281 TP_PROTO(u64 token, u64 gva),
282
283 TP_ARGS(token, gva)
284)
285
286TRACE_EVENT(
287 kvm_async_pf_completed,
288 TP_PROTO(unsigned long address, struct page *page, u64 gva),
289 TP_ARGS(address, page, gva),
290
291 TP_STRUCT__entry(
292 __field(unsigned long, address)
293 __field(pfn_t, pfn)
294 __field(u64, gva)
295 ),
296
297 TP_fast_assign(
298 tp_assign(address, address)
299 tp_assign(pfn, page ? page_to_pfn(page) : 0)
300 tp_assign(gva, gva)
301 ),
302
303 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
304 __entry->address, __entry->pfn)
305)
306
307#endif
308
309#endif /* _TRACE_KVM_MAIN_H */
310
311/* This part must be outside protection */
312#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h b/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h
new file mode 100644
index 00000000000..6f3d6d14121
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h
@@ -0,0 +1,34 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM lttng
3
4#if !defined(_TRACE_LTTNG_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_LTTNG_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(lttng_metadata,
10
11 TP_PROTO(const char *str),
12
13 TP_ARGS(str),
14
15 /*
16 * Not exactly a string: more a sequence of bytes (dynamic
17 * array) without the length. This is a dummy anyway: we only
18 * use this declaration to generate an event metadata entry.
19 */
20 TP_STRUCT__entry(
21 __string( str, str )
22 ),
23
24 TP_fast_assign(
25 tp_strcpy(str, str)
26 ),
27
28 TP_printk("")
29)
30
31#endif /* _TRACE_LTTNG_H */
32
33/* This part must be outside protection */
34#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h b/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h
new file mode 100644
index 00000000000..33f69213e42
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h
@@ -0,0 +1,400 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
9
10#ifndef _TRACE_SCHED_DEF_
11#define _TRACE_SCHED_DEF_
12
13static inline long __trace_sched_switch_state(struct task_struct *p)
14{
15 long state = p->state;
16
17#ifdef CONFIG_PREEMPT
18 /*
19 * For all intents and purposes a preempted task is a running task.
20 */
21 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
22 state = TASK_RUNNING;
23#endif
24
25 return state;
26}
27
28#endif /* _TRACE_SCHED_DEF_ */
29
30/*
31 * Tracepoint for calling kthread_stop, performed to end a kthread:
32 */
33TRACE_EVENT(sched_kthread_stop,
34
35 TP_PROTO(struct task_struct *t),
36
37 TP_ARGS(t),
38
39 TP_STRUCT__entry(
40 __array_text( char, comm, TASK_COMM_LEN )
41 __field( pid_t, tid )
42 ),
43
44 TP_fast_assign(
45 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
46 tp_assign(tid, t->pid)
47 ),
48
49 TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
50)
51
52/*
53 * Tracepoint for the return value of the kthread stopping:
54 */
55TRACE_EVENT(sched_kthread_stop_ret,
56
57 TP_PROTO(int ret),
58
59 TP_ARGS(ret),
60
61 TP_STRUCT__entry(
62 __field( int, ret )
63 ),
64
65 TP_fast_assign(
66 tp_assign(ret, ret)
67 ),
68
69 TP_printk("ret=%d", __entry->ret)
70)
71
72/*
73 * Tracepoint for waking up a task:
74 */
75DECLARE_EVENT_CLASS(sched_wakeup_template,
76
77 TP_PROTO(struct task_struct *p, int success),
78
79 TP_ARGS(p, success),
80
81 TP_STRUCT__entry(
82 __array_text( char, comm, TASK_COMM_LEN )
83 __field( pid_t, tid )
84 __field( int, prio )
85 __field( int, success )
86 __field( int, target_cpu )
87 ),
88
89 TP_fast_assign(
90 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
91 tp_assign(tid, p->pid)
92 tp_assign(prio, p->prio)
93 tp_assign(success, success)
94 tp_assign(target_cpu, task_cpu(p))
95 ),
96
97 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
98 __entry->comm, __entry->tid, __entry->prio,
99 __entry->success, __entry->target_cpu)
100)
101
102DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
103 TP_PROTO(struct task_struct *p, int success),
104 TP_ARGS(p, success))
105
106/*
107 * Tracepoint for waking up a new task:
108 */
109DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
110 TP_PROTO(struct task_struct *p, int success),
111 TP_ARGS(p, success))
112
113/*
114 * Tracepoint for task switches, performed by the scheduler:
115 */
116TRACE_EVENT(sched_switch,
117
118 TP_PROTO(struct task_struct *prev,
119 struct task_struct *next),
120
121 TP_ARGS(prev, next),
122
123 TP_STRUCT__entry(
124 __array_text( char, prev_comm, TASK_COMM_LEN )
125 __field( pid_t, prev_tid )
126 __field( int, prev_prio )
127 __field( long, prev_state )
128 __array_text( char, next_comm, TASK_COMM_LEN )
129 __field( pid_t, next_tid )
130 __field( int, next_prio )
131 ),
132
133 TP_fast_assign(
134 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
135 tp_assign(prev_tid, prev->pid)
136 tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
137 tp_assign(prev_state, __trace_sched_switch_state(prev))
138 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
139 tp_assign(next_tid, next->pid)
140 tp_assign(next_prio, next->prio - MAX_RT_PRIO)
141 ),
142
143 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
144 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
145 __entry->prev_state ?
146 __print_flags(__entry->prev_state, "|",
147 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
148 { 16, "Z" }, { 32, "X" }, { 64, "x" },
149 { 128, "W" }) : "R",
150 __entry->next_comm, __entry->next_tid, __entry->next_prio)
151)
152
153/*
154 * Tracepoint for a task being migrated:
155 */
156TRACE_EVENT(sched_migrate_task,
157
158 TP_PROTO(struct task_struct *p, int dest_cpu),
159
160 TP_ARGS(p, dest_cpu),
161
162 TP_STRUCT__entry(
163 __array_text( char, comm, TASK_COMM_LEN )
164 __field( pid_t, tid )
165 __field( int, prio )
166 __field( int, orig_cpu )
167 __field( int, dest_cpu )
168 ),
169
170 TP_fast_assign(
171 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
172 tp_assign(tid, p->pid)
173 tp_assign(prio, p->prio - MAX_RT_PRIO)
174 tp_assign(orig_cpu, task_cpu(p))
175 tp_assign(dest_cpu, dest_cpu)
176 ),
177
178 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
179 __entry->comm, __entry->tid, __entry->prio,
180 __entry->orig_cpu, __entry->dest_cpu)
181)
182
183DECLARE_EVENT_CLASS(sched_process_template,
184
185 TP_PROTO(struct task_struct *p),
186
187 TP_ARGS(p),
188
189 TP_STRUCT__entry(
190 __array_text( char, comm, TASK_COMM_LEN )
191 __field( pid_t, tid )
192 __field( int, prio )
193 ),
194
195 TP_fast_assign(
196 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
197 tp_assign(tid, p->pid)
198 tp_assign(prio, p->prio - MAX_RT_PRIO)
199 ),
200
201 TP_printk("comm=%s tid=%d prio=%d",
202 __entry->comm, __entry->tid, __entry->prio)
203)
204
205/*
206 * Tracepoint for freeing a task:
207 */
208DEFINE_EVENT(sched_process_template, sched_process_free,
209 TP_PROTO(struct task_struct *p),
210 TP_ARGS(p))
211
212
213/*
214 * Tracepoint for a task exiting:
215 */
216DEFINE_EVENT(sched_process_template, sched_process_exit,
217 TP_PROTO(struct task_struct *p),
218 TP_ARGS(p))
219
220/*
221 * Tracepoint for waiting on task to unschedule:
222 */
223DEFINE_EVENT(sched_process_template, sched_wait_task,
224 TP_PROTO(struct task_struct *p),
225 TP_ARGS(p))
226
227/*
228 * Tracepoint for a waiting task:
229 */
230TRACE_EVENT(sched_process_wait,
231
232 TP_PROTO(struct pid *pid),
233
234 TP_ARGS(pid),
235
236 TP_STRUCT__entry(
237 __array_text( char, comm, TASK_COMM_LEN )
238 __field( pid_t, tid )
239 __field( int, prio )
240 ),
241
242 TP_fast_assign(
243 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
244 tp_assign(tid, pid_nr(pid))
245 tp_assign(prio, current->prio - MAX_RT_PRIO)
246 ),
247
248 TP_printk("comm=%s tid=%d prio=%d",
249 __entry->comm, __entry->tid, __entry->prio)
250)
251
252/*
253 * Tracepoint for do_fork:
254 */
255TRACE_EVENT(sched_process_fork,
256
257 TP_PROTO(struct task_struct *parent, struct task_struct *child),
258
259 TP_ARGS(parent, child),
260
261 TP_STRUCT__entry(
262 __array_text( char, parent_comm, TASK_COMM_LEN )
263 __field( pid_t, parent_tid )
264 __array_text( char, child_comm, TASK_COMM_LEN )
265 __field( pid_t, child_tid )
266 ),
267
268 TP_fast_assign(
269 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
270 tp_assign(parent_tid, parent->pid)
271 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
272 tp_assign(child_tid, child->pid)
273 ),
274
275 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
276 __entry->parent_comm, __entry->parent_tid,
277 __entry->child_comm, __entry->child_tid)
278)
279
280/*
281 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
282 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
283 */
284DECLARE_EVENT_CLASS(sched_stat_template,
285
286 TP_PROTO(struct task_struct *tsk, u64 delay),
287
288 TP_ARGS(tsk, delay),
289
290 TP_STRUCT__entry(
291 __array_text( char, comm, TASK_COMM_LEN )
292 __field( pid_t, tid )
293 __field( u64, delay )
294 ),
295
296 TP_fast_assign(
297 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
298 tp_assign(tid, tsk->pid)
299 tp_assign(delay, delay)
300 )
301 TP_perf_assign(
302 __perf_count(delay)
303 ),
304
305 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
306 __entry->comm, __entry->tid,
307 (unsigned long long)__entry->delay)
308)
309
310
311/*
312 * Tracepoint for accounting wait time (time the task is runnable
313 * but not actually running due to scheduler contention).
314 */
315DEFINE_EVENT(sched_stat_template, sched_stat_wait,
316 TP_PROTO(struct task_struct *tsk, u64 delay),
317 TP_ARGS(tsk, delay))
318
319/*
320 * Tracepoint for accounting sleep time (time the task is not runnable,
321 * including iowait, see below).
322 */
323DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
324 TP_PROTO(struct task_struct *tsk, u64 delay),
325 TP_ARGS(tsk, delay))
326
327/*
328 * Tracepoint for accounting iowait time (time the task is not runnable
329 * due to waiting on IO to complete).
330 */
331DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
332 TP_PROTO(struct task_struct *tsk, u64 delay),
333 TP_ARGS(tsk, delay))
334
335/*
336 * Tracepoint for accounting runtime (time the task is executing
337 * on a CPU).
338 */
339TRACE_EVENT(sched_stat_runtime,
340
341 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
342
343 TP_ARGS(tsk, runtime, vruntime),
344
345 TP_STRUCT__entry(
346 __array_text( char, comm, TASK_COMM_LEN )
347 __field( pid_t, tid )
348 __field( u64, runtime )
349 __field( u64, vruntime )
350 ),
351
352 TP_fast_assign(
353 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
354 tp_assign(tid, tsk->pid)
355 tp_assign(runtime, runtime)
356 tp_assign(vruntime, vruntime)
357 )
358 TP_perf_assign(
359 __perf_count(runtime)
360 ),
361
362 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
363 __entry->comm, __entry->tid,
364 (unsigned long long)__entry->runtime,
365 (unsigned long long)__entry->vruntime)
366)
367
368/*
369 * Tracepoint for showing priority inheritance modifying a tasks
370 * priority.
371 */
372TRACE_EVENT(sched_pi_setprio,
373
374 TP_PROTO(struct task_struct *tsk, int newprio),
375
376 TP_ARGS(tsk, newprio),
377
378 TP_STRUCT__entry(
379 __array_text( char, comm, TASK_COMM_LEN )
380 __field( pid_t, tid )
381 __field( int, oldprio )
382 __field( int, newprio )
383 ),
384
385 TP_fast_assign(
386 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
387 tp_assign(tid, tsk->pid)
388 tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
389 tp_assign(newprio, newprio - MAX_RT_PRIO)
390 ),
391
392 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
393 __entry->comm, __entry->tid,
394 __entry->oldprio, __entry->newprio)
395)
396
397#endif /* _TRACE_SCHED_H */
398
399/* This part must be outside protection */
400#include "../../../probes/define_trace.h"
diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h b/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h
new file mode 100644
index 00000000000..a2bb9563563
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h
@@ -0,0 +1,76 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM raw_syscalls
3#define TRACE_INCLUDE_FILE syscalls
4
5#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_EVENTS_SYSCALLS_H
7
8#include <linux/tracepoint.h>
9
10#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
11
12#ifndef _TRACE_SYSCALLS_DEF_
13#define _TRACE_SYSCALLS_DEF_
14
15#include <asm/ptrace.h>
16#include <asm/syscall.h>
17
18#endif /* _TRACE_SYSCALLS_DEF_ */
19
20TRACE_EVENT(sys_enter,
21
22 TP_PROTO(struct pt_regs *regs, long id),
23
24 TP_ARGS(regs, id),
25
26 TP_STRUCT__entry(
27 __field( long, id )
28 __array( unsigned long, args, 6 )
29 ),
30
31 TP_fast_assign(
32 tp_assign(id, id)
33 {
34 tp_memcpy(args,
35 ({
36 unsigned long args_copy[6];
37 syscall_get_arguments(current, regs,
38 0, 6, args_copy);
39 args_copy;
40 }), 6 * sizeof(unsigned long));
41 }
42 ),
43
44 TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
45 __entry->id,
46 __entry->args[0], __entry->args[1], __entry->args[2],
47 __entry->args[3], __entry->args[4], __entry->args[5])
48)
49
50TRACE_EVENT(sys_exit,
51
52 TP_PROTO(struct pt_regs *regs, long ret),
53
54 TP_ARGS(regs, ret),
55
56 TP_STRUCT__entry(
57 __field( long, id )
58 __field( long, ret )
59 ),
60
61 TP_fast_assign(
62 tp_assign(id, syscall_get_nr(current, regs))
63 tp_assign(ret, ret)
64 ),
65
66 TP_printk("NR %ld = %ld",
67 __entry->id, __entry->ret)
68)
69
70#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
71
72#endif /* _TRACE_EVENTS_SYSCALLS_H */
73
74/* This part must be outside protection */
75#include "../../../probes/define_trace.h"
76
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/block.h b/drivers/staging/lttng/instrumentation/events/mainline/block.h
new file mode 100644
index 00000000000..bf366547da2
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/mainline/block.h
@@ -0,0 +1,569 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM block
3
4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_BLOCK_H
6
7#include <linux/blktrace_api.h>
8#include <linux/blkdev.h>
9#include <linux/tracepoint.h>
10
11DECLARE_EVENT_CLASS(block_rq_with_error,
12
13 TP_PROTO(struct request_queue *q, struct request *rq),
14
15 TP_ARGS(q, rq),
16
17 TP_STRUCT__entry(
18 __field( dev_t, dev )
19 __field( sector_t, sector )
20 __field( unsigned int, nr_sector )
21 __field( int, errors )
22 __array( char, rwbs, 6 )
23 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
24 ),
25
26 TP_fast_assign(
27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
28 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
29 0 : blk_rq_pos(rq);
30 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
31 0 : blk_rq_sectors(rq);
32 __entry->errors = rq->errors;
33
34 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
35 blk_dump_cmd(__get_str(cmd), rq);
36 ),
37
38 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
39 MAJOR(__entry->dev), MINOR(__entry->dev),
40 __entry->rwbs, __get_str(cmd),
41 (unsigned long long)__entry->sector,
42 __entry->nr_sector, __entry->errors)
43);
44
45/**
46 * block_rq_abort - abort block operation request
47 * @q: queue containing the block operation request
48 * @rq: block IO operation request
49 *
50 * Called immediately after pending block IO operation request @rq in
51 * queue @q is aborted. The fields in the operation request @rq
52 * can be examined to determine which device and sectors the pending
53 * operation would access.
54 */
55DEFINE_EVENT(block_rq_with_error, block_rq_abort,
56
57 TP_PROTO(struct request_queue *q, struct request *rq),
58
59 TP_ARGS(q, rq)
60);
61
62/**
63 * block_rq_requeue - place block IO request back on a queue
64 * @q: queue holding operation
65 * @rq: block IO operation request
66 *
67 * The block operation request @rq is being placed back into queue
68 * @q. For some reason the request was not completed and needs to be
69 * put back in the queue.
70 */
71DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
72
73 TP_PROTO(struct request_queue *q, struct request *rq),
74
75 TP_ARGS(q, rq)
76);
77
78/**
79 * block_rq_complete - block IO operation completed by device driver
80 * @q: queue containing the block operation request
81 * @rq: block operations request
82 *
83 * The block_rq_complete tracepoint event indicates that some portion
84 * of operation request has been completed by the device driver. If
85 * the @rq->bio is %NULL, then there is absolutely no additional work to
86 * do for the request. If @rq->bio is non-NULL then there is
87 * additional work required to complete the request.
88 */
89DEFINE_EVENT(block_rq_with_error, block_rq_complete,
90
91 TP_PROTO(struct request_queue *q, struct request *rq),
92
93 TP_ARGS(q, rq)
94);
95
96DECLARE_EVENT_CLASS(block_rq,
97
98 TP_PROTO(struct request_queue *q, struct request *rq),
99
100 TP_ARGS(q, rq),
101
102 TP_STRUCT__entry(
103 __field( dev_t, dev )
104 __field( sector_t, sector )
105 __field( unsigned int, nr_sector )
106 __field( unsigned int, bytes )
107 __array( char, rwbs, 6 )
108 __array( char, comm, TASK_COMM_LEN )
109 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
110 ),
111
112 TP_fast_assign(
113 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
114 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
115 0 : blk_rq_pos(rq);
116 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
117 0 : blk_rq_sectors(rq);
118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
119 blk_rq_bytes(rq) : 0;
120
121 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
122 blk_dump_cmd(__get_str(cmd), rq);
123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
124 ),
125
126 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
127 MAJOR(__entry->dev), MINOR(__entry->dev),
128 __entry->rwbs, __entry->bytes, __get_str(cmd),
129 (unsigned long long)__entry->sector,
130 __entry->nr_sector, __entry->comm)
131);
132
133/**
134 * block_rq_insert - insert block operation request into queue
135 * @q: target queue
136 * @rq: block IO operation request
137 *
138 * Called immediately before block operation request @rq is inserted
139 * into queue @q. The fields in the operation request @rq struct can
140 * be examined to determine which device and sectors the pending
141 * operation would access.
142 */
143DEFINE_EVENT(block_rq, block_rq_insert,
144
145 TP_PROTO(struct request_queue *q, struct request *rq),
146
147 TP_ARGS(q, rq)
148);
149
150/**
151 * block_rq_issue - issue pending block IO request operation to device driver
152 * @q: queue holding operation
153 * @rq: block IO operation operation request
154 *
155 * Called when block operation request @rq from queue @q is sent to a
156 * device driver for processing.
157 */
158DEFINE_EVENT(block_rq, block_rq_issue,
159
160 TP_PROTO(struct request_queue *q, struct request *rq),
161
162 TP_ARGS(q, rq)
163);
164
165/**
166 * block_bio_bounce - used bounce buffer when processing block operation
167 * @q: queue holding the block operation
168 * @bio: block operation
169 *
170 * A bounce buffer was used to handle the block operation @bio in @q.
171 * This occurs when hardware limitations prevent a direct transfer of
172 * data between the @bio data memory area and the IO device. Use of a
173 * bounce buffer requires extra copying of data and decreases
174 * performance.
175 */
176TRACE_EVENT(block_bio_bounce,
177
178 TP_PROTO(struct request_queue *q, struct bio *bio),
179
180 TP_ARGS(q, bio),
181
182 TP_STRUCT__entry(
183 __field( dev_t, dev )
184 __field( sector_t, sector )
185 __field( unsigned int, nr_sector )
186 __array( char, rwbs, 6 )
187 __array( char, comm, TASK_COMM_LEN )
188 ),
189
190 TP_fast_assign(
191 __entry->dev = bio->bi_bdev ?
192 bio->bi_bdev->bd_dev : 0;
193 __entry->sector = bio->bi_sector;
194 __entry->nr_sector = bio->bi_size >> 9;
195 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
196 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
197 ),
198
199 TP_printk("%d,%d %s %llu + %u [%s]",
200 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
201 (unsigned long long)__entry->sector,
202 __entry->nr_sector, __entry->comm)
203);
204
205/**
206 * block_bio_complete - completed all work on the block operation
207 * @q: queue holding the block operation
208 * @bio: block operation completed
209 * @error: io error value
210 *
211 * This tracepoint indicates there is no further work to do on this
212 * block IO operation @bio.
213 */
214TRACE_EVENT(block_bio_complete,
215
216 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
217
218 TP_ARGS(q, bio, error),
219
220 TP_STRUCT__entry(
221 __field( dev_t, dev )
222 __field( sector_t, sector )
223 __field( unsigned, nr_sector )
224 __field( int, error )
225 __array( char, rwbs, 6 )
226 ),
227
228 TP_fast_assign(
229 __entry->dev = bio->bi_bdev->bd_dev;
230 __entry->sector = bio->bi_sector;
231 __entry->nr_sector = bio->bi_size >> 9;
232 __entry->error = error;
233 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
234 ),
235
236 TP_printk("%d,%d %s %llu + %u [%d]",
237 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
238 (unsigned long long)__entry->sector,
239 __entry->nr_sector, __entry->error)
240);
241
242DECLARE_EVENT_CLASS(block_bio,
243
244 TP_PROTO(struct request_queue *q, struct bio *bio),
245
246 TP_ARGS(q, bio),
247
248 TP_STRUCT__entry(
249 __field( dev_t, dev )
250 __field( sector_t, sector )
251 __field( unsigned int, nr_sector )
252 __array( char, rwbs, 6 )
253 __array( char, comm, TASK_COMM_LEN )
254 ),
255
256 TP_fast_assign(
257 __entry->dev = bio->bi_bdev->bd_dev;
258 __entry->sector = bio->bi_sector;
259 __entry->nr_sector = bio->bi_size >> 9;
260 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
261 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
262 ),
263
264 TP_printk("%d,%d %s %llu + %u [%s]",
265 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
266 (unsigned long long)__entry->sector,
267 __entry->nr_sector, __entry->comm)
268);
269
270/**
271 * block_bio_backmerge - merging block operation to the end of an existing operation
272 * @q: queue holding operation
273 * @bio: new block operation to merge
274 *
275 * Merging block request @bio to the end of an existing block request
276 * in queue @q.
277 */
278DEFINE_EVENT(block_bio, block_bio_backmerge,
279
280 TP_PROTO(struct request_queue *q, struct bio *bio),
281
282 TP_ARGS(q, bio)
283);
284
285/**
286 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
287 * @q: queue holding operation
288 * @bio: new block operation to merge
289 *
290 * Merging block IO operation @bio to the beginning of an existing block
291 * operation in queue @q.
292 */
293DEFINE_EVENT(block_bio, block_bio_frontmerge,
294
295 TP_PROTO(struct request_queue *q, struct bio *bio),
296
297 TP_ARGS(q, bio)
298);
299
300/**
301 * block_bio_queue - putting new block IO operation in queue
302 * @q: queue holding operation
303 * @bio: new block operation
304 *
305 * About to place the block IO operation @bio into queue @q.
306 */
307DEFINE_EVENT(block_bio, block_bio_queue,
308
309 TP_PROTO(struct request_queue *q, struct bio *bio),
310
311 TP_ARGS(q, bio)
312);
313
314DECLARE_EVENT_CLASS(block_get_rq,
315
316 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
317
318 TP_ARGS(q, bio, rw),
319
320 TP_STRUCT__entry(
321 __field( dev_t, dev )
322 __field( sector_t, sector )
323 __field( unsigned int, nr_sector )
324 __array( char, rwbs, 6 )
325 __array( char, comm, TASK_COMM_LEN )
326 ),
327
328 TP_fast_assign(
329 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
330 __entry->sector = bio ? bio->bi_sector : 0;
331 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
332 blk_fill_rwbs(__entry->rwbs,
333 bio ? bio->bi_rw : 0, __entry->nr_sector);
334 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
335 ),
336
337 TP_printk("%d,%d %s %llu + %u [%s]",
338 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
339 (unsigned long long)__entry->sector,
340 __entry->nr_sector, __entry->comm)
341);
342
343/**
344 * block_getrq - get a free request entry in queue for block IO operations
345 * @q: queue for operations
346 * @bio: pending block IO operation
347 * @rw: low bit indicates a read (%0) or a write (%1)
348 *
349 * A request struct for queue @q has been allocated to handle the
350 * block IO operation @bio.
351 */
352DEFINE_EVENT(block_get_rq, block_getrq,
353
354 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
355
356 TP_ARGS(q, bio, rw)
357);
358
359/**
360 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
361 * @q: queue for operation
362 * @bio: pending block IO operation
363 * @rw: low bit indicates a read (%0) or a write (%1)
364 *
365 * In the case where a request struct cannot be provided for queue @q
366 * the process needs to wait for an request struct to become
367 * available. This tracepoint event is generated each time the
368 * process goes to sleep waiting for request struct become available.
369 */
370DEFINE_EVENT(block_get_rq, block_sleeprq,
371
372 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
373
374 TP_ARGS(q, bio, rw)
375);
376
377/**
378 * block_plug - keep operations requests in request queue
379 * @q: request queue to plug
380 *
381 * Plug the request queue @q. Do not allow block operation requests
382 * to be sent to the device driver. Instead, accumulate requests in
383 * the queue to improve throughput performance of the block device.
384 */
385TRACE_EVENT(block_plug,
386
387 TP_PROTO(struct request_queue *q),
388
389 TP_ARGS(q),
390
391 TP_STRUCT__entry(
392 __array( char, comm, TASK_COMM_LEN )
393 ),
394
395 TP_fast_assign(
396 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
397 ),
398
399 TP_printk("[%s]", __entry->comm)
400);
401
402DECLARE_EVENT_CLASS(block_unplug,
403
404 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
405
406 TP_ARGS(q, depth, explicit),
407
408 TP_STRUCT__entry(
409 __field( int, nr_rq )
410 __array( char, comm, TASK_COMM_LEN )
411 ),
412
413 TP_fast_assign(
414 __entry->nr_rq = depth;
415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
416 ),
417
418 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
419);
420
421/**
422 * block_unplug - release of operations requests in request queue
423 * @q: request queue to unplug
424 * @depth: number of requests just added to the queue
425 * @explicit: whether this was an explicit unplug, or one from schedule()
426 *
427 * Unplug request queue @q because device driver is scheduled to work
428 * on elements in the request queue.
429 */
430DEFINE_EVENT(block_unplug, block_unplug,
431
432 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
433
434 TP_ARGS(q, depth, explicit)
435);
436
437/**
438 * block_split - split a single bio struct into two bio structs
439 * @q: queue containing the bio
440 * @bio: block operation being split
441 * @new_sector: The starting sector for the new bio
442 *
443 * The bio request @bio in request queue @q needs to be split into two
444 * bio requests. The newly created @bio request starts at
445 * @new_sector. This split may be required due to hardware limitation
446 * such as operation crossing device boundaries in a RAID system.
447 */
448TRACE_EVENT(block_split,
449
450 TP_PROTO(struct request_queue *q, struct bio *bio,
451 unsigned int new_sector),
452
453 TP_ARGS(q, bio, new_sector),
454
455 TP_STRUCT__entry(
456 __field( dev_t, dev )
457 __field( sector_t, sector )
458 __field( sector_t, new_sector )
459 __array( char, rwbs, 6 )
460 __array( char, comm, TASK_COMM_LEN )
461 ),
462
463 TP_fast_assign(
464 __entry->dev = bio->bi_bdev->bd_dev;
465 __entry->sector = bio->bi_sector;
466 __entry->new_sector = new_sector;
467 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
468 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
469 ),
470
471 TP_printk("%d,%d %s %llu / %llu [%s]",
472 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
473 (unsigned long long)__entry->sector,
474 (unsigned long long)__entry->new_sector,
475 __entry->comm)
476);
477
478/**
479 * block_bio_remap - map request for a logical device to the raw device
480 * @q: queue holding the operation
481 * @bio: revised operation
482 * @dev: device for the operation
483 * @from: original sector for the operation
484 *
485 * An operation for a logical device has been mapped to the
486 * raw block device.
487 */
488TRACE_EVENT(block_bio_remap,
489
490 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
491 sector_t from),
492
493 TP_ARGS(q, bio, dev, from),
494
495 TP_STRUCT__entry(
496 __field( dev_t, dev )
497 __field( sector_t, sector )
498 __field( unsigned int, nr_sector )
499 __field( dev_t, old_dev )
500 __field( sector_t, old_sector )
501 __array( char, rwbs, 6 )
502 ),
503
504 TP_fast_assign(
505 __entry->dev = bio->bi_bdev->bd_dev;
506 __entry->sector = bio->bi_sector;
507 __entry->nr_sector = bio->bi_size >> 9;
508 __entry->old_dev = dev;
509 __entry->old_sector = from;
510 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
511 ),
512
513 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
514 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
515 (unsigned long long)__entry->sector,
516 __entry->nr_sector,
517 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
518 (unsigned long long)__entry->old_sector)
519);
520
521/**
522 * block_rq_remap - map request for a block operation request
523 * @q: queue holding the operation
524 * @rq: block IO operation request
525 * @dev: device for the operation
526 * @from: original sector for the operation
527 *
528 * The block operation request @rq in @q has been remapped. The block
529 * operation request @rq holds the current information and @from hold
530 * the original sector.
531 */
532TRACE_EVENT(block_rq_remap,
533
534 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
535 sector_t from),
536
537 TP_ARGS(q, rq, dev, from),
538
539 TP_STRUCT__entry(
540 __field( dev_t, dev )
541 __field( sector_t, sector )
542 __field( unsigned int, nr_sector )
543 __field( dev_t, old_dev )
544 __field( sector_t, old_sector )
545 __array( char, rwbs, 6 )
546 ),
547
548 TP_fast_assign(
549 __entry->dev = disk_devt(rq->rq_disk);
550 __entry->sector = blk_rq_pos(rq);
551 __entry->nr_sector = blk_rq_sectors(rq);
552 __entry->old_dev = dev;
553 __entry->old_sector = from;
554 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
555 ),
556
557 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
558 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
559 (unsigned long long)__entry->sector,
560 __entry->nr_sector,
561 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
562 (unsigned long long)__entry->old_sector)
563);
564
565#endif /* _TRACE_BLOCK_H */
566
567/* This part must be outside protection */
568#include <trace/define_trace.h>
569
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/irq.h b/drivers/staging/lttng/instrumentation/events/mainline/irq.h
new file mode 100644
index 00000000000..1c09820df58
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/mainline/irq.h
@@ -0,0 +1,150 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM irq
3
4#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_IRQ_H
6
7#include <linux/tracepoint.h>
8
9struct irqaction;
10struct softirq_action;
11
12#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
13#define show_softirq_name(val) \
14 __print_symbolic(val, \
15 softirq_name(HI), \
16 softirq_name(TIMER), \
17 softirq_name(NET_TX), \
18 softirq_name(NET_RX), \
19 softirq_name(BLOCK), \
20 softirq_name(BLOCK_IOPOLL), \
21 softirq_name(TASKLET), \
22 softirq_name(SCHED), \
23 softirq_name(HRTIMER), \
24 softirq_name(RCU))
25
26/**
27 * irq_handler_entry - called immediately before the irq action handler
28 * @irq: irq number
29 * @action: pointer to struct irqaction
30 *
31 * The struct irqaction pointed to by @action contains various
32 * information about the handler, including the device name,
33 * @action->name, and the device id, @action->dev_id. When used in
34 * conjunction with the irq_handler_exit tracepoint, we can figure
35 * out irq handler latencies.
36 */
37TRACE_EVENT(irq_handler_entry,
38
39 TP_PROTO(int irq, struct irqaction *action),
40
41 TP_ARGS(irq, action),
42
43 TP_STRUCT__entry(
44 __field( int, irq )
45 __string( name, action->name )
46 ),
47
48 TP_fast_assign(
49 __entry->irq = irq;
50 __assign_str(name, action->name);
51 ),
52
53 TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
54);
55
56/**
57 * irq_handler_exit - called immediately after the irq action handler returns
58 * @irq: irq number
59 * @action: pointer to struct irqaction
60 * @ret: return value
61 *
62 * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
63 * @action->handler scuccessully handled this irq. Otherwise, the irq might be
64 * a shared irq line, or the irq was not handled successfully. Can be used in
65 * conjunction with the irq_handler_entry to understand irq handler latencies.
66 */
67TRACE_EVENT(irq_handler_exit,
68
69 TP_PROTO(int irq, struct irqaction *action, int ret),
70
71 TP_ARGS(irq, action, ret),
72
73 TP_STRUCT__entry(
74 __field( int, irq )
75 __field( int, ret )
76 ),
77
78 TP_fast_assign(
79 __entry->irq = irq;
80 __entry->ret = ret;
81 ),
82
83 TP_printk("irq=%d ret=%s",
84 __entry->irq, __entry->ret ? "handled" : "unhandled")
85);
86
87DECLARE_EVENT_CLASS(softirq,
88
89 TP_PROTO(unsigned int vec_nr),
90
91 TP_ARGS(vec_nr),
92
93 TP_STRUCT__entry(
94 __field( unsigned int, vec )
95 ),
96
97 TP_fast_assign(
98 __entry->vec = vec_nr;
99 ),
100
101 TP_printk("vec=%u [action=%s]", __entry->vec,
102 show_softirq_name(__entry->vec))
103);
104
105/**
106 * softirq_entry - called immediately before the softirq handler
107 * @vec_nr: softirq vector number
108 *
109 * When used in combination with the softirq_exit tracepoint
110 * we can determine the softirq handler runtine.
111 */
112DEFINE_EVENT(softirq, softirq_entry,
113
114 TP_PROTO(unsigned int vec_nr),
115
116 TP_ARGS(vec_nr)
117);
118
119/**
120 * softirq_exit - called immediately after the softirq handler returns
121 * @vec_nr: softirq vector number
122 *
123 * When used in combination with the softirq_entry tracepoint
124 * we can determine the softirq handler runtine.
125 */
126DEFINE_EVENT(softirq, softirq_exit,
127
128 TP_PROTO(unsigned int vec_nr),
129
130 TP_ARGS(vec_nr)
131);
132
133/**
134 * softirq_raise - called immediately when a softirq is raised
135 * @vec_nr: softirq vector number
136 *
137 * When used in combination with the softirq_entry tracepoint
138 * we can determine the softirq raise to run latency.
139 */
140DEFINE_EVENT(softirq, softirq_raise,
141
142 TP_PROTO(unsigned int vec_nr),
143
144 TP_ARGS(vec_nr)
145);
146
147#endif /* _TRACE_IRQ_H */
148
149/* This part must be outside protection */
150#include <trace/define_trace.h>
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/kvm.h b/drivers/staging/lttng/instrumentation/events/mainline/kvm.h
new file mode 100644
index 00000000000..46e3cd8e197
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/mainline/kvm.h
@@ -0,0 +1,312 @@
1#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_MAIN_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
10
11#define kvm_trace_exit_reason \
12 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
13 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
14 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
15 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
16 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
17
18TRACE_EVENT(kvm_userspace_exit,
19 TP_PROTO(__u32 reason, int errno),
20 TP_ARGS(reason, errno),
21
22 TP_STRUCT__entry(
23 __field( __u32, reason )
24 __field( int, errno )
25 ),
26
27 TP_fast_assign(
28 __entry->reason = reason;
29 __entry->errno = errno;
30 ),
31
32 TP_printk("reason %s (%d)",
33 __entry->errno < 0 ?
34 (__entry->errno == -EINTR ? "restart" : "error") :
35 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
36 __entry->errno < 0 ? -__entry->errno : __entry->reason)
37);
38
39#if defined(__KVM_HAVE_IOAPIC)
40TRACE_EVENT(kvm_set_irq,
41 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
42 TP_ARGS(gsi, level, irq_source_id),
43
44 TP_STRUCT__entry(
45 __field( unsigned int, gsi )
46 __field( int, level )
47 __field( int, irq_source_id )
48 ),
49
50 TP_fast_assign(
51 __entry->gsi = gsi;
52 __entry->level = level;
53 __entry->irq_source_id = irq_source_id;
54 ),
55
56 TP_printk("gsi %u level %d source %d",
57 __entry->gsi, __entry->level, __entry->irq_source_id)
58);
59
60#define kvm_deliver_mode \
61 {0x0, "Fixed"}, \
62 {0x1, "LowPrio"}, \
63 {0x2, "SMI"}, \
64 {0x3, "Res3"}, \
65 {0x4, "NMI"}, \
66 {0x5, "INIT"}, \
67 {0x6, "SIPI"}, \
68 {0x7, "ExtINT"}
69
70TRACE_EVENT(kvm_ioapic_set_irq,
71 TP_PROTO(__u64 e, int pin, bool coalesced),
72 TP_ARGS(e, pin, coalesced),
73
74 TP_STRUCT__entry(
75 __field( __u64, e )
76 __field( int, pin )
77 __field( bool, coalesced )
78 ),
79
80 TP_fast_assign(
81 __entry->e = e;
82 __entry->pin = pin;
83 __entry->coalesced = coalesced;
84 ),
85
86 TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
87 __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
88 __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
89 (__entry->e & (1<<11)) ? "logical" : "physical",
90 (__entry->e & (1<<15)) ? "level" : "edge",
91 (__entry->e & (1<<16)) ? "|masked" : "",
92 __entry->coalesced ? " (coalesced)" : "")
93);
94
95TRACE_EVENT(kvm_msi_set_irq,
96 TP_PROTO(__u64 address, __u64 data),
97 TP_ARGS(address, data),
98
99 TP_STRUCT__entry(
100 __field( __u64, address )
101 __field( __u64, data )
102 ),
103
104 TP_fast_assign(
105 __entry->address = address;
106 __entry->data = data;
107 ),
108
109 TP_printk("dst %u vec %x (%s|%s|%s%s)",
110 (u8)(__entry->address >> 12), (u8)__entry->data,
111 __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
112 (__entry->address & (1<<2)) ? "logical" : "physical",
113 (__entry->data & (1<<15)) ? "level" : "edge",
114 (__entry->address & (1<<3)) ? "|rh" : "")
115);
116
117#define kvm_irqchips \
118 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
119 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
120 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
121
122TRACE_EVENT(kvm_ack_irq,
123 TP_PROTO(unsigned int irqchip, unsigned int pin),
124 TP_ARGS(irqchip, pin),
125
126 TP_STRUCT__entry(
127 __field( unsigned int, irqchip )
128 __field( unsigned int, pin )
129 ),
130
131 TP_fast_assign(
132 __entry->irqchip = irqchip;
133 __entry->pin = pin;
134 ),
135
136 TP_printk("irqchip %s pin %u",
137 __print_symbolic(__entry->irqchip, kvm_irqchips),
138 __entry->pin)
139);
140
141
142
143#endif /* defined(__KVM_HAVE_IOAPIC) */
144
145#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
146#define KVM_TRACE_MMIO_READ 1
147#define KVM_TRACE_MMIO_WRITE 2
148
149#define kvm_trace_symbol_mmio \
150 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
151 { KVM_TRACE_MMIO_READ, "read" }, \
152 { KVM_TRACE_MMIO_WRITE, "write" }
153
154TRACE_EVENT(kvm_mmio,
155 TP_PROTO(int type, int len, u64 gpa, u64 val),
156 TP_ARGS(type, len, gpa, val),
157
158 TP_STRUCT__entry(
159 __field( u32, type )
160 __field( u32, len )
161 __field( u64, gpa )
162 __field( u64, val )
163 ),
164
165 TP_fast_assign(
166 __entry->type = type;
167 __entry->len = len;
168 __entry->gpa = gpa;
169 __entry->val = val;
170 ),
171
172 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
173 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
174 __entry->len, __entry->gpa, __entry->val)
175);
176
177#define kvm_fpu_load_symbol \
178 {0, "unload"}, \
179 {1, "load"}
180
181TRACE_EVENT(kvm_fpu,
182 TP_PROTO(int load),
183 TP_ARGS(load),
184
185 TP_STRUCT__entry(
186 __field( u32, load )
187 ),
188
189 TP_fast_assign(
190 __entry->load = load;
191 ),
192
193 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
194);
195
196TRACE_EVENT(kvm_age_page,
197 TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
198 TP_ARGS(hva, slot, ref),
199
200 TP_STRUCT__entry(
201 __field( u64, hva )
202 __field( u64, gfn )
203 __field( u8, referenced )
204 ),
205
206 TP_fast_assign(
207 __entry->hva = hva;
208 __entry->gfn =
209 slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
210 __entry->referenced = ref;
211 ),
212
213 TP_printk("hva %llx gfn %llx %s",
214 __entry->hva, __entry->gfn,
215 __entry->referenced ? "YOUNG" : "OLD")
216);
217
218#ifdef CONFIG_KVM_ASYNC_PF
219DECLARE_EVENT_CLASS(kvm_async_get_page_class,
220
221 TP_PROTO(u64 gva, u64 gfn),
222
223 TP_ARGS(gva, gfn),
224
225 TP_STRUCT__entry(
226 __field(__u64, gva)
227 __field(u64, gfn)
228 ),
229
230 TP_fast_assign(
231 __entry->gva = gva;
232 __entry->gfn = gfn;
233 ),
234
235 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
236);
237
238DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
239
240 TP_PROTO(u64 gva, u64 gfn),
241
242 TP_ARGS(gva, gfn)
243);
244
245DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
246
247 TP_PROTO(u64 gva, u64 gfn),
248
249 TP_ARGS(gva, gfn)
250);
251
252DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
253
254 TP_PROTO(u64 token, u64 gva),
255
256 TP_ARGS(token, gva),
257
258 TP_STRUCT__entry(
259 __field(__u64, token)
260 __field(__u64, gva)
261 ),
262
263 TP_fast_assign(
264 __entry->token = token;
265 __entry->gva = gva;
266 ),
267
268 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
269
270);
271
272DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
273
274 TP_PROTO(u64 token, u64 gva),
275
276 TP_ARGS(token, gva)
277);
278
279DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
280
281 TP_PROTO(u64 token, u64 gva),
282
283 TP_ARGS(token, gva)
284);
285
286TRACE_EVENT(
287 kvm_async_pf_completed,
288 TP_PROTO(unsigned long address, struct page *page, u64 gva),
289 TP_ARGS(address, page, gva),
290
291 TP_STRUCT__entry(
292 __field(unsigned long, address)
293 __field(pfn_t, pfn)
294 __field(u64, gva)
295 ),
296
297 TP_fast_assign(
298 __entry->address = address;
299 __entry->pfn = page ? page_to_pfn(page) : 0;
300 __entry->gva = gva;
301 ),
302
303 TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
304 __entry->address, __entry->pfn)
305);
306
307#endif
308
309#endif /* _TRACE_KVM_MAIN_H */
310
311/* This part must be outside protection */
312#include <trace/define_trace.h>
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/sched.h b/drivers/staging/lttng/instrumentation/events/mainline/sched.h
new file mode 100644
index 00000000000..f6334782a59
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/mainline/sched.h
@@ -0,0 +1,397 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
9
10/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread:
12 */
13TRACE_EVENT(sched_kthread_stop,
14
15 TP_PROTO(struct task_struct *t),
16
17 TP_ARGS(t),
18
19 TP_STRUCT__entry(
20 __array( char, comm, TASK_COMM_LEN )
21 __field( pid_t, pid )
22 ),
23
24 TP_fast_assign(
25 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
26 __entry->pid = t->pid;
27 ),
28
29 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
30);
31
32/*
33 * Tracepoint for the return value of the kthread stopping:
34 */
35TRACE_EVENT(sched_kthread_stop_ret,
36
37 TP_PROTO(int ret),
38
39 TP_ARGS(ret),
40
41 TP_STRUCT__entry(
42 __field( int, ret )
43 ),
44
45 TP_fast_assign(
46 __entry->ret = ret;
47 ),
48
49 TP_printk("ret=%d", __entry->ret)
50);
51
52/*
53 * Tracepoint for waking up a task:
54 */
55DECLARE_EVENT_CLASS(sched_wakeup_template,
56
57 TP_PROTO(struct task_struct *p, int success),
58
59 TP_ARGS(p, success),
60
61 TP_STRUCT__entry(
62 __array( char, comm, TASK_COMM_LEN )
63 __field( pid_t, pid )
64 __field( int, prio )
65 __field( int, success )
66 __field( int, target_cpu )
67 ),
68
69 TP_fast_assign(
70 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
71 __entry->pid = p->pid;
72 __entry->prio = p->prio;
73 __entry->success = success;
74 __entry->target_cpu = task_cpu(p);
75 ),
76
77 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
78 __entry->comm, __entry->pid, __entry->prio,
79 __entry->success, __entry->target_cpu)
80);
81
82DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
83 TP_PROTO(struct task_struct *p, int success),
84 TP_ARGS(p, success));
85
86/*
87 * Tracepoint for waking up a new task:
88 */
89DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
90 TP_PROTO(struct task_struct *p, int success),
91 TP_ARGS(p, success));
92
93#ifdef CREATE_TRACE_POINTS
94static inline long __trace_sched_switch_state(struct task_struct *p)
95{
96 long state = p->state;
97
98#ifdef CONFIG_PREEMPT
99 /*
100 * For all intents and purposes a preempted task is a running task.
101 */
102 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
103 state = TASK_RUNNING;
104#endif
105
106 return state;
107}
108#endif
109
110/*
111 * Tracepoint for task switches, performed by the scheduler:
112 */
113TRACE_EVENT(sched_switch,
114
115 TP_PROTO(struct task_struct *prev,
116 struct task_struct *next),
117
118 TP_ARGS(prev, next),
119
120 TP_STRUCT__entry(
121 __array( char, prev_comm, TASK_COMM_LEN )
122 __field( pid_t, prev_pid )
123 __field( int, prev_prio )
124 __field( long, prev_state )
125 __array( char, next_comm, TASK_COMM_LEN )
126 __field( pid_t, next_pid )
127 __field( int, next_prio )
128 ),
129
130 TP_fast_assign(
131 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
132 __entry->prev_pid = prev->pid;
133 __entry->prev_prio = prev->prio;
134 __entry->prev_state = __trace_sched_switch_state(prev);
135 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
136 __entry->next_pid = next->pid;
137 __entry->next_prio = next->prio;
138 ),
139
140 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
141 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
142 __entry->prev_state ?
143 __print_flags(__entry->prev_state, "|",
144 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
145 { 16, "Z" }, { 32, "X" }, { 64, "x" },
146 { 128, "W" }) : "R",
147 __entry->next_comm, __entry->next_pid, __entry->next_prio)
148);
149
150/*
151 * Tracepoint for a task being migrated:
152 */
153TRACE_EVENT(sched_migrate_task,
154
155 TP_PROTO(struct task_struct *p, int dest_cpu),
156
157 TP_ARGS(p, dest_cpu),
158
159 TP_STRUCT__entry(
160 __array( char, comm, TASK_COMM_LEN )
161 __field( pid_t, pid )
162 __field( int, prio )
163 __field( int, orig_cpu )
164 __field( int, dest_cpu )
165 ),
166
167 TP_fast_assign(
168 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
169 __entry->pid = p->pid;
170 __entry->prio = p->prio;
171 __entry->orig_cpu = task_cpu(p);
172 __entry->dest_cpu = dest_cpu;
173 ),
174
175 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
176 __entry->comm, __entry->pid, __entry->prio,
177 __entry->orig_cpu, __entry->dest_cpu)
178);
179
180DECLARE_EVENT_CLASS(sched_process_template,
181
182 TP_PROTO(struct task_struct *p),
183
184 TP_ARGS(p),
185
186 TP_STRUCT__entry(
187 __array( char, comm, TASK_COMM_LEN )
188 __field( pid_t, pid )
189 __field( int, prio )
190 ),
191
192 TP_fast_assign(
193 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
194 __entry->pid = p->pid;
195 __entry->prio = p->prio;
196 ),
197
198 TP_printk("comm=%s pid=%d prio=%d",
199 __entry->comm, __entry->pid, __entry->prio)
200);
201
202/*
203 * Tracepoint for freeing a task:
204 */
205DEFINE_EVENT(sched_process_template, sched_process_free,
206 TP_PROTO(struct task_struct *p),
207 TP_ARGS(p));
208
209
210/*
211 * Tracepoint for a task exiting:
212 */
213DEFINE_EVENT(sched_process_template, sched_process_exit,
214 TP_PROTO(struct task_struct *p),
215 TP_ARGS(p));
216
217/*
218 * Tracepoint for waiting on task to unschedule:
219 */
220DEFINE_EVENT(sched_process_template, sched_wait_task,
221 TP_PROTO(struct task_struct *p),
222 TP_ARGS(p));
223
224/*
225 * Tracepoint for a waiting task:
226 */
227TRACE_EVENT(sched_process_wait,
228
229 TP_PROTO(struct pid *pid),
230
231 TP_ARGS(pid),
232
233 TP_STRUCT__entry(
234 __array( char, comm, TASK_COMM_LEN )
235 __field( pid_t, pid )
236 __field( int, prio )
237 ),
238
239 TP_fast_assign(
240 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
241 __entry->pid = pid_nr(pid);
242 __entry->prio = current->prio;
243 ),
244
245 TP_printk("comm=%s pid=%d prio=%d",
246 __entry->comm, __entry->pid, __entry->prio)
247);
248
249/*
250 * Tracepoint for do_fork:
251 */
252TRACE_EVENT(sched_process_fork,
253
254 TP_PROTO(struct task_struct *parent, struct task_struct *child),
255
256 TP_ARGS(parent, child),
257
258 TP_STRUCT__entry(
259 __array( char, parent_comm, TASK_COMM_LEN )
260 __field( pid_t, parent_pid )
261 __array( char, child_comm, TASK_COMM_LEN )
262 __field( pid_t, child_pid )
263 ),
264
265 TP_fast_assign(
266 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
267 __entry->parent_pid = parent->pid;
268 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
269 __entry->child_pid = child->pid;
270 ),
271
272 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
273 __entry->parent_comm, __entry->parent_pid,
274 __entry->child_comm, __entry->child_pid)
275);
276
277/*
278 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
279 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
280 */
281DECLARE_EVENT_CLASS(sched_stat_template,
282
283 TP_PROTO(struct task_struct *tsk, u64 delay),
284
285 TP_ARGS(tsk, delay),
286
287 TP_STRUCT__entry(
288 __array( char, comm, TASK_COMM_LEN )
289 __field( pid_t, pid )
290 __field( u64, delay )
291 ),
292
293 TP_fast_assign(
294 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
295 __entry->pid = tsk->pid;
296 __entry->delay = delay;
297 )
298 TP_perf_assign(
299 __perf_count(delay);
300 ),
301
302 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
303 __entry->comm, __entry->pid,
304 (unsigned long long)__entry->delay)
305);
306
307
308/*
309 * Tracepoint for accounting wait time (time the task is runnable
310 * but not actually running due to scheduler contention).
311 */
312DEFINE_EVENT(sched_stat_template, sched_stat_wait,
313 TP_PROTO(struct task_struct *tsk, u64 delay),
314 TP_ARGS(tsk, delay));
315
316/*
317 * Tracepoint for accounting sleep time (time the task is not runnable,
318 * including iowait, see below).
319 */
320DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
321 TP_PROTO(struct task_struct *tsk, u64 delay),
322 TP_ARGS(tsk, delay));
323
324/*
325 * Tracepoint for accounting iowait time (time the task is not runnable
326 * due to waiting on IO to complete).
327 */
328DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
329 TP_PROTO(struct task_struct *tsk, u64 delay),
330 TP_ARGS(tsk, delay));
331
332/*
333 * Tracepoint for accounting runtime (time the task is executing
334 * on a CPU).
335 */
336TRACE_EVENT(sched_stat_runtime,
337
338 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
339
340 TP_ARGS(tsk, runtime, vruntime),
341
342 TP_STRUCT__entry(
343 __array( char, comm, TASK_COMM_LEN )
344 __field( pid_t, pid )
345 __field( u64, runtime )
346 __field( u64, vruntime )
347 ),
348
349 TP_fast_assign(
350 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
351 __entry->pid = tsk->pid;
352 __entry->runtime = runtime;
353 __entry->vruntime = vruntime;
354 )
355 TP_perf_assign(
356 __perf_count(runtime);
357 ),
358
359 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
360 __entry->comm, __entry->pid,
361 (unsigned long long)__entry->runtime,
362 (unsigned long long)__entry->vruntime)
363);
364
365/*
366 * Tracepoint for showing priority inheritance modifying a tasks
367 * priority.
368 */
369TRACE_EVENT(sched_pi_setprio,
370
371 TP_PROTO(struct task_struct *tsk, int newprio),
372
373 TP_ARGS(tsk, newprio),
374
375 TP_STRUCT__entry(
376 __array( char, comm, TASK_COMM_LEN )
377 __field( pid_t, pid )
378 __field( int, oldprio )
379 __field( int, newprio )
380 ),
381
382 TP_fast_assign(
383 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
384 __entry->pid = tsk->pid;
385 __entry->oldprio = tsk->prio;
386 __entry->newprio = newprio;
387 ),
388
389 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
390 __entry->comm, __entry->pid,
391 __entry->oldprio, __entry->newprio)
392);
393
394#endif /* _TRACE_SCHED_H */
395
396/* This part must be outside protection */
397#include <trace/define_trace.h>
diff --git a/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h b/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h
new file mode 100644
index 00000000000..5a4c04a75b3
--- /dev/null
+++ b/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h
@@ -0,0 +1,75 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM raw_syscalls
3#define TRACE_INCLUDE_FILE syscalls
4
5#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_EVENTS_SYSCALLS_H
7
8#include <linux/tracepoint.h>
9
10#include <asm/ptrace.h>
11#include <asm/syscall.h>
12
13
14#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
15
16extern void syscall_regfunc(void);
17extern void syscall_unregfunc(void);
18
19TRACE_EVENT_FN(sys_enter,
20
21 TP_PROTO(struct pt_regs *regs, long id),
22
23 TP_ARGS(regs, id),
24
25 TP_STRUCT__entry(
26 __field( long, id )
27 __array( unsigned long, args, 6 )
28 ),
29
30 TP_fast_assign(
31 __entry->id = id;
32 syscall_get_arguments(current, regs, 0, 6, __entry->args);
33 ),
34
35 TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
36 __entry->id,
37 __entry->args[0], __entry->args[1], __entry->args[2],
38 __entry->args[3], __entry->args[4], __entry->args[5]),
39
40 syscall_regfunc, syscall_unregfunc
41);
42
43TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
44
45TRACE_EVENT_FN(sys_exit,
46
47 TP_PROTO(struct pt_regs *regs, long ret),
48
49 TP_ARGS(regs, ret),
50
51 TP_STRUCT__entry(
52 __field( long, id )
53 __field( long, ret )
54 ),
55
56 TP_fast_assign(
57 __entry->id = syscall_get_nr(current, regs);
58 __entry->ret = ret;
59 ),
60
61 TP_printk("NR %ld = %ld",
62 __entry->id, __entry->ret),
63
64 syscall_regfunc, syscall_unregfunc
65);
66
67TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
68
69#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
70
71#endif /* _TRACE_EVENTS_SYSCALLS_H */
72
73/* This part must be outside protection */
74#include <trace/define_trace.h>
75