aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/define_trace.h5
-rw-r--r--include/trace/events/block.h164
-rw-r--r--include/trace/events/lock.h55
-rw-r--r--include/trace/events/module.h18
-rw-r--r--include/trace/events/napi.h10
-rw-r--r--include/trace/events/sched.h32
-rw-r--r--include/trace/events/signal.h52
-rw-r--r--include/trace/ftrace.h92
-rw-r--r--include/trace/syscall.h8
9 files changed, 292 insertions, 144 deletions
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 5acfb1eb4df9..1dfab5401511 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -65,6 +65,10 @@
65 65
66#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 66#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
67 67
68/* Make all open coded DECLARE_TRACE nops */
69#undef DECLARE_TRACE
70#define DECLARE_TRACE(name, proto, args)
71
68#ifdef CONFIG_EVENT_TRACING 72#ifdef CONFIG_EVENT_TRACING
69#include <trace/ftrace.h> 73#include <trace/ftrace.h>
70#endif 74#endif
@@ -75,6 +79,7 @@
75#undef DEFINE_EVENT 79#undef DEFINE_EVENT
76#undef DEFINE_EVENT_PRINT 80#undef DEFINE_EVENT_PRINT
77#undef TRACE_HEADER_MULTI_READ 81#undef TRACE_HEADER_MULTI_READ
82#undef DECLARE_TRACE
78 83
79/* Only undef what we defined in this file */ 84/* Only undef what we defined in this file */
80#ifdef UNDEF_TRACE_INCLUDE_FILE 85#ifdef UNDEF_TRACE_INCLUDE_FILE
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 5fb72733331e..d870a918559c 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -40,6 +40,16 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
40 __entry->nr_sector, __entry->errors) 40 __entry->nr_sector, __entry->errors)
41); 41);
42 42
43/**
44 * block_rq_abort - abort block operation request
45 * @q: queue containing the block operation request
46 * @rq: block IO operation request
47 *
48 * Called immediately after pending block IO operation request @rq in
49 * queue @q is aborted. The fields in the operation request @rq
50 * can be examined to determine which device and sectors the pending
51 * operation would access.
52 */
43DEFINE_EVENT(block_rq_with_error, block_rq_abort, 53DEFINE_EVENT(block_rq_with_error, block_rq_abort,
44 54
45 TP_PROTO(struct request_queue *q, struct request *rq), 55 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -47,6 +57,15 @@ DEFINE_EVENT(block_rq_with_error, block_rq_abort,
47 TP_ARGS(q, rq) 57 TP_ARGS(q, rq)
48); 58);
49 59
60/**
61 * block_rq_requeue - place block IO request back on a queue
62 * @q: queue holding operation
63 * @rq: block IO operation request
64 *
65 * The block operation request @rq is being placed back into queue
66 * @q. For some reason the request was not completed and needs to be
67 * put back in the queue.
68 */
50DEFINE_EVENT(block_rq_with_error, block_rq_requeue, 69DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
51 70
52 TP_PROTO(struct request_queue *q, struct request *rq), 71 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -54,6 +73,17 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
54 TP_ARGS(q, rq) 73 TP_ARGS(q, rq)
55); 74);
56 75
76/**
77 * block_rq_complete - block IO operation completed by device driver
78 * @q: queue containing the block operation request
79 * @rq: block operations request
80 *
81 * The block_rq_complete tracepoint event indicates that some portion
82 * of operation request has been completed by the device driver. If
83 * the @rq->bio is %NULL, then there is absolutely no additional work to
84 * do for the request. If @rq->bio is non-NULL then there is
85 * additional work required to complete the request.
86 */
57DEFINE_EVENT(block_rq_with_error, block_rq_complete, 87DEFINE_EVENT(block_rq_with_error, block_rq_complete,
58 88
59 TP_PROTO(struct request_queue *q, struct request *rq), 89 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -95,6 +125,16 @@ DECLARE_EVENT_CLASS(block_rq,
95 __entry->nr_sector, __entry->comm) 125 __entry->nr_sector, __entry->comm)
96); 126);
97 127
128/**
129 * block_rq_insert - insert block operation request into queue
130 * @q: target queue
131 * @rq: block IO operation request
132 *
133 * Called immediately before block operation request @rq is inserted
134 * into queue @q. The fields in the operation request @rq struct can
135 * be examined to determine which device and sectors the pending
136 * operation would access.
137 */
98DEFINE_EVENT(block_rq, block_rq_insert, 138DEFINE_EVENT(block_rq, block_rq_insert,
99 139
100 TP_PROTO(struct request_queue *q, struct request *rq), 140 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -102,6 +142,14 @@ DEFINE_EVENT(block_rq, block_rq_insert,
102 TP_ARGS(q, rq) 142 TP_ARGS(q, rq)
103); 143);
104 144
145/**
146 * block_rq_issue - issue pending block IO request operation to device driver
147 * @q: queue holding operation
148 * @rq: block IO operation operation request
149 *
150 * Called when block operation request @rq from queue @q is sent to a
151 * device driver for processing.
152 */
105DEFINE_EVENT(block_rq, block_rq_issue, 153DEFINE_EVENT(block_rq, block_rq_issue,
106 154
107 TP_PROTO(struct request_queue *q, struct request *rq), 155 TP_PROTO(struct request_queue *q, struct request *rq),
@@ -109,6 +157,17 @@ DEFINE_EVENT(block_rq, block_rq_issue,
109 TP_ARGS(q, rq) 157 TP_ARGS(q, rq)
110); 158);
111 159
160/**
161 * block_bio_bounce - used bounce buffer when processing block operation
162 * @q: queue holding the block operation
163 * @bio: block operation
164 *
165 * A bounce buffer was used to handle the block operation @bio in @q.
166 * This occurs when hardware limitations prevent a direct transfer of
167 * data between the @bio data memory area and the IO device. Use of a
168 * bounce buffer requires extra copying of data and decreases
169 * performance.
170 */
112TRACE_EVENT(block_bio_bounce, 171TRACE_EVENT(block_bio_bounce,
113 172
114 TP_PROTO(struct request_queue *q, struct bio *bio), 173 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -138,6 +197,14 @@ TRACE_EVENT(block_bio_bounce,
138 __entry->nr_sector, __entry->comm) 197 __entry->nr_sector, __entry->comm)
139); 198);
140 199
200/**
201 * block_bio_complete - completed all work on the block operation
202 * @q: queue holding the block operation
203 * @bio: block operation completed
204 *
205 * This tracepoint indicates there is no further work to do on this
206 * block IO operation @bio.
207 */
141TRACE_EVENT(block_bio_complete, 208TRACE_EVENT(block_bio_complete,
142 209
143 TP_PROTO(struct request_queue *q, struct bio *bio), 210 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -193,6 +260,14 @@ DECLARE_EVENT_CLASS(block_bio,
193 __entry->nr_sector, __entry->comm) 260 __entry->nr_sector, __entry->comm)
194); 261);
195 262
263/**
264 * block_bio_backmerge - merging block operation to the end of an existing operation
265 * @q: queue holding operation
266 * @bio: new block operation to merge
267 *
268 * Merging block request @bio to the end of an existing block request
269 * in queue @q.
270 */
196DEFINE_EVENT(block_bio, block_bio_backmerge, 271DEFINE_EVENT(block_bio, block_bio_backmerge,
197 272
198 TP_PROTO(struct request_queue *q, struct bio *bio), 273 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -200,6 +275,14 @@ DEFINE_EVENT(block_bio, block_bio_backmerge,
200 TP_ARGS(q, bio) 275 TP_ARGS(q, bio)
201); 276);
202 277
278/**
279 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
280 * @q: queue holding operation
281 * @bio: new block operation to merge
282 *
283 * Merging block IO operation @bio to the beginning of an existing block
284 * operation in queue @q.
285 */
203DEFINE_EVENT(block_bio, block_bio_frontmerge, 286DEFINE_EVENT(block_bio, block_bio_frontmerge,
204 287
205 TP_PROTO(struct request_queue *q, struct bio *bio), 288 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -207,6 +290,13 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge,
207 TP_ARGS(q, bio) 290 TP_ARGS(q, bio)
208); 291);
209 292
293/**
294 * block_bio_queue - putting new block IO operation in queue
295 * @q: queue holding operation
296 * @bio: new block operation
297 *
298 * About to place the block IO operation @bio into queue @q.
299 */
210DEFINE_EVENT(block_bio, block_bio_queue, 300DEFINE_EVENT(block_bio, block_bio_queue,
211 301
212 TP_PROTO(struct request_queue *q, struct bio *bio), 302 TP_PROTO(struct request_queue *q, struct bio *bio),
@@ -243,6 +333,15 @@ DECLARE_EVENT_CLASS(block_get_rq,
243 __entry->nr_sector, __entry->comm) 333 __entry->nr_sector, __entry->comm)
244); 334);
245 335
336/**
337 * block_getrq - get a free request entry in queue for block IO operations
338 * @q: queue for operations
339 * @bio: pending block IO operation
340 * @rw: low bit indicates a read (%0) or a write (%1)
341 *
342 * A request struct for queue @q has been allocated to handle the
343 * block IO operation @bio.
344 */
246DEFINE_EVENT(block_get_rq, block_getrq, 345DEFINE_EVENT(block_get_rq, block_getrq,
247 346
248 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 347 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
@@ -250,6 +349,17 @@ DEFINE_EVENT(block_get_rq, block_getrq,
250 TP_ARGS(q, bio, rw) 349 TP_ARGS(q, bio, rw)
251); 350);
252 351
352/**
353 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
354 * @q: queue for operation
355 * @bio: pending block IO operation
356 * @rw: low bit indicates a read (%0) or a write (%1)
357 *
358 * In the case where a request struct cannot be provided for queue @q
359 * the process needs to wait for an request struct to become
360 * available. This tracepoint event is generated each time the
361 * process goes to sleep waiting for request struct become available.
362 */
253DEFINE_EVENT(block_get_rq, block_sleeprq, 363DEFINE_EVENT(block_get_rq, block_sleeprq,
254 364
255 TP_PROTO(struct request_queue *q, struct bio *bio, int rw), 365 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
@@ -257,6 +367,14 @@ DEFINE_EVENT(block_get_rq, block_sleeprq,
257 TP_ARGS(q, bio, rw) 367 TP_ARGS(q, bio, rw)
258); 368);
259 369
370/**
371 * block_plug - keep operations requests in request queue
372 * @q: request queue to plug
373 *
374 * Plug the request queue @q. Do not allow block operation requests
375 * to be sent to the device driver. Instead, accumulate requests in
376 * the queue to improve throughput performance of the block device.
377 */
260TRACE_EVENT(block_plug, 378TRACE_EVENT(block_plug,
261 379
262 TP_PROTO(struct request_queue *q), 380 TP_PROTO(struct request_queue *q),
@@ -293,6 +411,13 @@ DECLARE_EVENT_CLASS(block_unplug,
293 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 411 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
294); 412);
295 413
414/**
415 * block_unplug_timer - timed release of operations requests in queue to device driver
416 * @q: request queue to unplug
417 *
418 * Unplug the request queue @q because a timer expired and allow block
419 * operation requests to be sent to the device driver.
420 */
296DEFINE_EVENT(block_unplug, block_unplug_timer, 421DEFINE_EVENT(block_unplug, block_unplug_timer,
297 422
298 TP_PROTO(struct request_queue *q), 423 TP_PROTO(struct request_queue *q),
@@ -300,6 +425,13 @@ DEFINE_EVENT(block_unplug, block_unplug_timer,
300 TP_ARGS(q) 425 TP_ARGS(q)
301); 426);
302 427
428/**
429 * block_unplug_io - release of operations requests in request queue
430 * @q: request queue to unplug
431 *
432 * Unplug request queue @q because device driver is scheduled to work
433 * on elements in the request queue.
434 */
303DEFINE_EVENT(block_unplug, block_unplug_io, 435DEFINE_EVENT(block_unplug, block_unplug_io,
304 436
305 TP_PROTO(struct request_queue *q), 437 TP_PROTO(struct request_queue *q),
@@ -307,6 +439,17 @@ DEFINE_EVENT(block_unplug, block_unplug_io,
307 TP_ARGS(q) 439 TP_ARGS(q)
308); 440);
309 441
442/**
443 * block_split - split a single bio struct into two bio structs
444 * @q: queue containing the bio
445 * @bio: block operation being split
446 * @new_sector: The starting sector for the new bio
447 *
448 * The bio request @bio in request queue @q needs to be split into two
449 * bio requests. The newly created @bio request starts at
450 * @new_sector. This split may be required due to hardware limitation
451 * such as operation crossing device boundaries in a RAID system.
452 */
310TRACE_EVENT(block_split, 453TRACE_EVENT(block_split,
311 454
312 TP_PROTO(struct request_queue *q, struct bio *bio, 455 TP_PROTO(struct request_queue *q, struct bio *bio,
@@ -337,6 +480,16 @@ TRACE_EVENT(block_split,
337 __entry->comm) 480 __entry->comm)
338); 481);
339 482
483/**
484 * block_remap - map request for a partition to the raw device
485 * @q: queue holding the operation
486 * @bio: revised operation
487 * @dev: device for the operation
488 * @from: original sector for the operation
489 *
490 * An operation for a partition on a block device has been mapped to the
491 * raw block device.
492 */
340TRACE_EVENT(block_remap, 493TRACE_EVENT(block_remap,
341 494
342 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, 495 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
@@ -370,6 +523,17 @@ TRACE_EVENT(block_remap,
370 (unsigned long long)__entry->old_sector) 523 (unsigned long long)__entry->old_sector)
371); 524);
372 525
526/**
527 * block_rq_remap - map request for a block operation request
528 * @q: queue holding the operation
529 * @rq: block IO operation request
530 * @dev: device for the operation
531 * @from: original sector for the operation
532 *
533 * The block operation request @rq in @q has been remapped. The block
534 * operation request @rq holds the current information and @from hold
535 * the original sector.
536 */
373TRACE_EVENT(block_rq_remap, 537TRACE_EVENT(block_rq_remap,
374 538
375 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 539 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index 5c1dcfc16c60..2821b86de63b 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -35,15 +35,15 @@ TRACE_EVENT(lock_acquire,
35 __get_str(name)) 35 __get_str(name))
36); 36);
37 37
38TRACE_EVENT(lock_release, 38DECLARE_EVENT_CLASS(lock,
39 39
40 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), 40 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
41 41
42 TP_ARGS(lock, nested, ip), 42 TP_ARGS(lock, ip),
43 43
44 TP_STRUCT__entry( 44 TP_STRUCT__entry(
45 __string(name, lock->name) 45 __string( name, lock->name )
46 __field(void *, lockdep_addr) 46 __field( void *, lockdep_addr )
47 ), 47 ),
48 48
49 TP_fast_assign( 49 TP_fast_assign(
@@ -51,51 +51,30 @@ TRACE_EVENT(lock_release,
51 __entry->lockdep_addr = lock; 51 __entry->lockdep_addr = lock;
52 ), 52 ),
53 53
54 TP_printk("%p %s", 54 TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
55 __entry->lockdep_addr, __get_str(name))
56); 55);
57 56
58#ifdef CONFIG_LOCK_STAT 57DEFINE_EVENT(lock, lock_release,
59
60TRACE_EVENT(lock_contended,
61 58
62 TP_PROTO(struct lockdep_map *lock, unsigned long ip), 59 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
63 60
64 TP_ARGS(lock, ip), 61 TP_ARGS(lock, ip)
62);
65 63
66 TP_STRUCT__entry( 64#ifdef CONFIG_LOCK_STAT
67 __string(name, lock->name)
68 __field(void *, lockdep_addr)
69 ),
70 65
71 TP_fast_assign( 66DEFINE_EVENT(lock, lock_contended,
72 __assign_str(name, lock->name);
73 __entry->lockdep_addr = lock;
74 ),
75 67
76 TP_printk("%p %s", 68 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
77 __entry->lockdep_addr, __get_str(name))
78);
79 69
80TRACE_EVENT(lock_acquired, 70 TP_ARGS(lock, ip)
81 TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime), 71);
82 72
83 TP_ARGS(lock, ip, waittime), 73DEFINE_EVENT(lock, lock_acquired,
84 74
85 TP_STRUCT__entry( 75 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
86 __string(name, lock->name)
87 __field(s64, wait_nsec)
88 __field(void *, lockdep_addr)
89 ),
90 76
91 TP_fast_assign( 77 TP_ARGS(lock, ip)
92 __assign_str(name, lock->name);
93 __entry->wait_nsec = waittime;
94 __entry->lockdep_addr = lock;
95 ),
96 TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
97 __get_str(name),
98 __entry->wait_nsec)
99); 78);
100 79
101#endif 80#endif
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 4b0f48ba16a6..c7bb2f0482fe 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -51,11 +51,14 @@ TRACE_EVENT(module_free,
51 TP_printk("%s", __get_str(name)) 51 TP_printk("%s", __get_str(name))
52); 52);
53 53
54#ifdef CONFIG_MODULE_UNLOAD
55/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
56
54DECLARE_EVENT_CLASS(module_refcnt, 57DECLARE_EVENT_CLASS(module_refcnt,
55 58
56 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 59 TP_PROTO(struct module *mod, unsigned long ip),
57 60
58 TP_ARGS(mod, ip, refcnt), 61 TP_ARGS(mod, ip),
59 62
60 TP_STRUCT__entry( 63 TP_STRUCT__entry(
61 __field( unsigned long, ip ) 64 __field( unsigned long, ip )
@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
65 68
66 TP_fast_assign( 69 TP_fast_assign(
67 __entry->ip = ip; 70 __entry->ip = ip;
68 __entry->refcnt = refcnt; 71 __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
69 __assign_str(name, mod->name); 72 __assign_str(name, mod->name);
70 ), 73 ),
71 74
@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt,
75 78
76DEFINE_EVENT(module_refcnt, module_get, 79DEFINE_EVENT(module_refcnt, module_get,
77 80
78 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 81 TP_PROTO(struct module *mod, unsigned long ip),
79 82
80 TP_ARGS(mod, ip, refcnt) 83 TP_ARGS(mod, ip)
81); 84);
82 85
83DEFINE_EVENT(module_refcnt, module_put, 86DEFINE_EVENT(module_refcnt, module_put,
84 87
85 TP_PROTO(struct module *mod, unsigned long ip, int refcnt), 88 TP_PROTO(struct module *mod, unsigned long ip),
86 89
87 TP_ARGS(mod, ip, refcnt) 90 TP_ARGS(mod, ip)
88); 91);
92#endif /* CONFIG_MODULE_UNLOAD */
89 93
90TRACE_EVENT(module_request, 94TRACE_EVENT(module_request,
91 95
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index a8989c4547e7..188deca2f3c7 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -1,4 +1,7 @@
1#ifndef _TRACE_NAPI_H_ 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM napi
3
4#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_NAPI_H_ 5#define _TRACE_NAPI_H_
3 6
4#include <linux/netdevice.h> 7#include <linux/netdevice.h>
@@ -8,4 +11,7 @@ DECLARE_TRACE(napi_poll,
8 TP_PROTO(struct napi_struct *napi), 11 TP_PROTO(struct napi_struct *napi),
9 TP_ARGS(napi)); 12 TP_ARGS(napi));
10 13
11#endif 14#endif /* _TRACE_NAPI_H_ */
15
16/* This part must be outside protection */
17#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index cfceb0b73e20..4f733ecea46e 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -51,15 +51,12 @@ TRACE_EVENT(sched_kthread_stop_ret,
51 51
52/* 52/*
53 * Tracepoint for waiting on task to unschedule: 53 * Tracepoint for waiting on task to unschedule:
54 *
55 * (NOTE: the 'rq' argument is not used by generic trace events,
56 * but used by the latency tracer plugin. )
57 */ 54 */
58TRACE_EVENT(sched_wait_task, 55TRACE_EVENT(sched_wait_task,
59 56
60 TP_PROTO(struct rq *rq, struct task_struct *p), 57 TP_PROTO(struct task_struct *p),
61 58
62 TP_ARGS(rq, p), 59 TP_ARGS(p),
63 60
64 TP_STRUCT__entry( 61 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN ) 62 __array( char, comm, TASK_COMM_LEN )
@@ -79,15 +76,12 @@ TRACE_EVENT(sched_wait_task,
79 76
80/* 77/*
81 * Tracepoint for waking up a task: 78 * Tracepoint for waking up a task:
82 *
83 * (NOTE: the 'rq' argument is not used by generic trace events,
84 * but used by the latency tracer plugin. )
85 */ 79 */
86DECLARE_EVENT_CLASS(sched_wakeup_template, 80DECLARE_EVENT_CLASS(sched_wakeup_template,
87 81
88 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 82 TP_PROTO(struct task_struct *p, int success),
89 83
90 TP_ARGS(rq, p, success), 84 TP_ARGS(p, success),
91 85
92 TP_STRUCT__entry( 86 TP_STRUCT__entry(
93 __array( char, comm, TASK_COMM_LEN ) 87 __array( char, comm, TASK_COMM_LEN )
@@ -111,31 +105,25 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
111); 105);
112 106
113DEFINE_EVENT(sched_wakeup_template, sched_wakeup, 107DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
114 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 108 TP_PROTO(struct task_struct *p, int success),
115 TP_ARGS(rq, p, success)); 109 TP_ARGS(p, success));
116 110
117/* 111/*
118 * Tracepoint for waking up a new task: 112 * Tracepoint for waking up a new task:
119 *
120 * (NOTE: the 'rq' argument is not used by generic trace events,
121 * but used by the latency tracer plugin. )
122 */ 113 */
123DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, 114DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
124 TP_PROTO(struct rq *rq, struct task_struct *p, int success), 115 TP_PROTO(struct task_struct *p, int success),
125 TP_ARGS(rq, p, success)); 116 TP_ARGS(p, success));
126 117
127/* 118/*
128 * Tracepoint for task switches, performed by the scheduler: 119 * Tracepoint for task switches, performed by the scheduler:
129 *
130 * (NOTE: the 'rq' argument is not used by generic trace events,
131 * but used by the latency tracer plugin. )
132 */ 120 */
133TRACE_EVENT(sched_switch, 121TRACE_EVENT(sched_switch,
134 122
135 TP_PROTO(struct rq *rq, struct task_struct *prev, 123 TP_PROTO(struct task_struct *prev,
136 struct task_struct *next), 124 struct task_struct *next),
137 125
138 TP_ARGS(rq, prev, next), 126 TP_ARGS(prev, next),
139 127
140 TP_STRUCT__entry( 128 TP_STRUCT__entry(
141 __array( char, prev_comm, TASK_COMM_LEN ) 129 __array( char, prev_comm, TASK_COMM_LEN )
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index a510b75ac304..814566c99d29 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver,
100 __entry->sa_handler, __entry->sa_flags) 100 __entry->sa_handler, __entry->sa_flags)
101); 101);
102 102
103/** 103DECLARE_EVENT_CLASS(signal_queue_overflow,
104 * signal_overflow_fail - called when signal queue is overflow
105 * @sig: signal number
106 * @group: signal to process group or not (bool)
107 * @info: pointer to struct siginfo
108 *
109 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
110 * siginfo queue is overflow, and the signal is dropped.
111 * 'group' is not 0 if the signal will be sent to a process group.
112 * 'sig' is always one of RT signals.
113 */
114TRACE_EVENT(signal_overflow_fail,
115 104
116 TP_PROTO(int sig, int group, struct siginfo *info), 105 TP_PROTO(int sig, int group, struct siginfo *info),
117 106
@@ -135,6 +124,24 @@ TRACE_EVENT(signal_overflow_fail,
135); 124);
136 125
137/** 126/**
127 * signal_overflow_fail - called when signal queue is overflow
128 * @sig: signal number
129 * @group: signal to process group or not (bool)
130 * @info: pointer to struct siginfo
131 *
132 * Kernel fails to generate 'sig' signal with 'info' siginfo, because
133 * siginfo queue is overflow, and the signal is dropped.
134 * 'group' is not 0 if the signal will be sent to a process group.
135 * 'sig' is always one of RT signals.
136 */
137DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
138
139 TP_PROTO(int sig, int group, struct siginfo *info),
140
141 TP_ARGS(sig, group, info)
142);
143
144/**
138 * signal_lose_info - called when siginfo is lost 145 * signal_lose_info - called when siginfo is lost
139 * @sig: signal number 146 * @sig: signal number
140 * @group: signal to process group or not (bool) 147 * @group: signal to process group or not (bool)
@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail,
145 * 'group' is not 0 if the signal will be sent to a process group. 152 * 'group' is not 0 if the signal will be sent to a process group.
146 * 'sig' is always one of non-RT signals. 153 * 'sig' is always one of non-RT signals.
147 */ 154 */
148TRACE_EVENT(signal_lose_info, 155DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
149 156
150 TP_PROTO(int sig, int group, struct siginfo *info), 157 TP_PROTO(int sig, int group, struct siginfo *info),
151 158
152 TP_ARGS(sig, group, info), 159 TP_ARGS(sig, group, info)
153
154 TP_STRUCT__entry(
155 __field( int, sig )
156 __field( int, group )
157 __field( int, errno )
158 __field( int, code )
159 ),
160
161 TP_fast_assign(
162 __entry->sig = sig;
163 __entry->group = group;
164 TP_STORE_SIGINFO(__entry, info);
165 ),
166
167 TP_printk("sig=%d group=%d errno=%d code=%d",
168 __entry->sig, __entry->group, __entry->errno, __entry->code)
169); 160);
161
170#endif /* _TRACE_SIGNAL_H */ 162#endif /* _TRACE_SIGNAL_H */
171 163
172/* This part must be outside protection */ 164/* This part must be outside protection */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 0804cd594803..16253db38d73 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -154,9 +154,11 @@
154 * 154 *
155 * field = (typeof(field))entry; 155 * field = (typeof(field))entry;
156 * 156 *
157 * p = get_cpu_var(ftrace_event_seq); 157 * p = &get_cpu_var(ftrace_event_seq);
158 * trace_seq_init(p); 158 * trace_seq_init(p);
159 * ret = trace_seq_printf(s, <TP_printk> "\n"); 159 * ret = trace_seq_printf(s, "%s: ", <call>);
160 * if (ret)
161 * ret = trace_seq_printf(s, <TP_printk> "\n");
160 * put_cpu(); 162 * put_cpu();
161 * if (!ret) 163 * if (!ret)
162 * return TRACE_TYPE_PARTIAL_LINE; 164 * return TRACE_TYPE_PARTIAL_LINE;
@@ -401,18 +403,18 @@ static inline notrace int ftrace_get_offsets_##call( \
401#undef DEFINE_EVENT 403#undef DEFINE_EVENT
402#define DEFINE_EVENT(template, name, proto, args) \ 404#define DEFINE_EVENT(template, name, proto, args) \
403 \ 405 \
404static void ftrace_profile_##name(proto); \ 406static void perf_trace_##name(proto); \
405 \ 407 \
406static notrace int \ 408static notrace int \
407ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ 409perf_trace_enable_##name(struct ftrace_event_call *unused) \
408{ \ 410{ \
409 return register_trace_##name(ftrace_profile_##name); \ 411 return register_trace_##name(perf_trace_##name); \
410} \ 412} \
411 \ 413 \
412static notrace void \ 414static notrace void \
413ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ 415perf_trace_disable_##name(struct ftrace_event_call *unused) \
414{ \ 416{ \
415 unregister_trace_##name(ftrace_profile_##name); \ 417 unregister_trace_##name(perf_trace_##name); \
416} 418}
417 419
418#undef DEFINE_EVENT_PRINT 420#undef DEFINE_EVENT_PRINT
@@ -450,38 +452,38 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
450 * 452 *
451 * static void ftrace_raw_event_<call>(proto) 453 * static void ftrace_raw_event_<call>(proto)
452 * { 454 * {
455 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
453 * struct ring_buffer_event *event; 456 * struct ring_buffer_event *event;
454 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 457 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
455 * struct ring_buffer *buffer; 458 * struct ring_buffer *buffer;
456 * unsigned long irq_flags; 459 * unsigned long irq_flags;
460 * int __data_size;
457 * int pc; 461 * int pc;
458 * 462 *
459 * local_save_flags(irq_flags); 463 * local_save_flags(irq_flags);
460 * pc = preempt_count(); 464 * pc = preempt_count();
461 * 465 *
466 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
467 *
462 * event = trace_current_buffer_lock_reserve(&buffer, 468 * event = trace_current_buffer_lock_reserve(&buffer,
463 * event_<call>.id, 469 * event_<call>.id,
464 * sizeof(struct ftrace_raw_<call>), 470 * sizeof(*entry) + __data_size,
465 * irq_flags, pc); 471 * irq_flags, pc);
466 * if (!event) 472 * if (!event)
467 * return; 473 * return;
468 * entry = ring_buffer_event_data(event); 474 * entry = ring_buffer_event_data(event);
469 * 475 *
470 * <assign>; <-- Here we assign the entries by the __field and 476 * { <assign>; } <-- Here we assign the entries by the __field and
471 * __array macros. 477 * __array macros.
472 * 478 *
473 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); 479 * if (!filter_current_check_discard(buffer, event_call, entry, event))
480 * trace_current_buffer_unlock_commit(buffer,
481 * event, irq_flags, pc);
474 * } 482 * }
475 * 483 *
476 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) 484 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
477 * { 485 * {
478 * int ret; 486 * return register_trace_<call>(ftrace_raw_event_<call>);
479 *
480 * ret = register_trace_<call>(ftrace_raw_event_<call>);
481 * if (!ret)
482 * pr_info("event trace: Could not activate trace point "
483 * "probe to <call>");
484 * return ret;
485 * } 487 * }
486 * 488 *
487 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 489 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
@@ -493,6 +495,8 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
493 * .trace = ftrace_raw_output_<call>, <-- stage 2 495 * .trace = ftrace_raw_output_<call>, <-- stage 2
494 * }; 496 * };
495 * 497 *
498 * static const char print_fmt_<call>[] = <TP_printk>;
499 *
496 * static struct ftrace_event_call __used 500 * static struct ftrace_event_call __used
497 * __attribute__((__aligned__(4))) 501 * __attribute__((__aligned__(4)))
498 * __attribute__((section("_ftrace_events"))) event_<call> = { 502 * __attribute__((section("_ftrace_events"))) event_<call> = {
@@ -501,18 +505,20 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \
501 * .raw_init = trace_event_raw_init, 505 * .raw_init = trace_event_raw_init,
502 * .regfunc = ftrace_reg_event_<call>, 506 * .regfunc = ftrace_reg_event_<call>,
503 * .unregfunc = ftrace_unreg_event_<call>, 507 * .unregfunc = ftrace_unreg_event_<call>,
508 * .print_fmt = print_fmt_<call>,
509 * .define_fields = ftrace_define_fields_<call>,
504 * } 510 * }
505 * 511 *
506 */ 512 */
507 513
508#ifdef CONFIG_PERF_EVENTS 514#ifdef CONFIG_PERF_EVENTS
509 515
510#define _TRACE_PROFILE_INIT(call) \ 516#define _TRACE_PERF_INIT(call) \
511 .profile_enable = ftrace_profile_enable_##call, \ 517 .perf_event_enable = perf_trace_enable_##call, \
512 .profile_disable = ftrace_profile_disable_##call, 518 .perf_event_disable = perf_trace_disable_##call,
513 519
514#else 520#else
515#define _TRACE_PROFILE_INIT(call) 521#define _TRACE_PERF_INIT(call)
516#endif /* CONFIG_PERF_EVENTS */ 522#endif /* CONFIG_PERF_EVENTS */
517 523
518#undef __entry 524#undef __entry
@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
569 return; \ 575 return; \
570 entry = ring_buffer_event_data(event); \ 576 entry = ring_buffer_event_data(event); \
571 \ 577 \
572 \
573 tstruct \ 578 tstruct \
574 \ 579 \
575 { assign; } \ 580 { assign; } \
@@ -638,7 +643,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
638 .unregfunc = ftrace_raw_unreg_event_##call, \ 643 .unregfunc = ftrace_raw_unreg_event_##call, \
639 .print_fmt = print_fmt_##template, \ 644 .print_fmt = print_fmt_##template, \
640 .define_fields = ftrace_define_fields_##template, \ 645 .define_fields = ftrace_define_fields_##template, \
641 _TRACE_PROFILE_INIT(call) \ 646 _TRACE_PERF_INIT(call) \
642} 647}
643 648
644#undef DEFINE_EVENT_PRINT 649#undef DEFINE_EVENT_PRINT
@@ -657,18 +662,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
657 .unregfunc = ftrace_raw_unreg_event_##call, \ 662 .unregfunc = ftrace_raw_unreg_event_##call, \
658 .print_fmt = print_fmt_##call, \ 663 .print_fmt = print_fmt_##call, \
659 .define_fields = ftrace_define_fields_##template, \ 664 .define_fields = ftrace_define_fields_##template, \
660 _TRACE_PROFILE_INIT(call) \ 665 _TRACE_PERF_INIT(call) \
661} 666}
662 667
663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 668#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
664 669
665/* 670/*
666 * Define the insertion callback to profile events 671 * Define the insertion callback to perf events
667 * 672 *
668 * The job is very similar to ftrace_raw_event_<call> except that we don't 673 * The job is very similar to ftrace_raw_event_<call> except that we don't
669 * insert in the ring buffer but in a perf counter. 674 * insert in the ring buffer but in a perf counter.
670 * 675 *
671 * static void ftrace_profile_<call>(proto) 676 * static void ftrace_perf_<call>(proto)
672 * { 677 * {
673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 678 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
674 * struct ftrace_event_call *event_call = &event_<call>; 679 * struct ftrace_event_call *event_call = &event_<call>;
@@ -699,9 +704,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
699 * __cpu = smp_processor_id(); 704 * __cpu = smp_processor_id();
700 * 705 *
701 * if (in_nmi()) 706 * if (in_nmi())
702 * trace_buf = rcu_dereference(perf_trace_buf_nmi); 707 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
703 * else 708 * else
704 * trace_buf = rcu_dereference(perf_trace_buf); 709 * trace_buf = rcu_dereference_sched(perf_trace_buf);
705 * 710 *
706 * if (!trace_buf) 711 * if (!trace_buf)
707 * goto end; 712 * goto end;
@@ -757,8 +762,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
757#undef DECLARE_EVENT_CLASS 762#undef DECLARE_EVENT_CLASS
758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 763#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
759static notrace void \ 764static notrace void \
760ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 765perf_trace_templ_##call(struct ftrace_event_call *event_call, \
761 proto) \ 766 struct pt_regs *__regs, proto) \
762{ \ 767{ \
763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 768 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
764 struct ftrace_raw_##call *entry; \ 769 struct ftrace_raw_##call *entry; \
@@ -773,10 +778,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
773 sizeof(u64)); \ 778 sizeof(u64)); \
774 __entry_size -= sizeof(u32); \ 779 __entry_size -= sizeof(u32); \
775 \ 780 \
776 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 781 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
777 "profile buffer not large enough")) \ 782 "profile buffer not large enough")) \
778 return; \ 783 return; \
779 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ 784 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
780 __entry_size, event_call->id, &rctx, &irq_flags); \ 785 __entry_size, event_call->id, &rctx, &irq_flags); \
781 if (!entry) \ 786 if (!entry) \
782 return; \ 787 return; \
@@ -784,17 +789,22 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
784 \ 789 \
785 { assign; } \ 790 { assign; } \
786 \ 791 \
787 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ 792 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
788 __count, irq_flags); \ 793 __count, irq_flags, __regs); \
789} 794}
790 795
791#undef DEFINE_EVENT 796#undef DEFINE_EVENT
792#define DEFINE_EVENT(template, call, proto, args) \ 797#define DEFINE_EVENT(template, call, proto, args) \
793static notrace void ftrace_profile_##call(proto) \ 798static notrace void perf_trace_##call(proto) \
794{ \ 799{ \
795 struct ftrace_event_call *event_call = &event_##call; \ 800 struct ftrace_event_call *event_call = &event_##call; \
796 \ 801 struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
797 ftrace_profile_templ_##template(event_call, args); \ 802 \
803 perf_fetch_caller_regs(__regs, 1); \
804 \
805 perf_trace_templ_##template(event_call, __regs, args); \
806 \
807 put_cpu_var(perf_trace_regs); \
798} 808}
799 809
800#undef DEFINE_EVENT_PRINT 810#undef DEFINE_EVENT_PRINT
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 0387100752f0..e5e5f48dbfb3 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
47#endif 47#endif
48 48
49#ifdef CONFIG_PERF_EVENTS 49#ifdef CONFIG_PERF_EVENTS
50int prof_sysenter_enable(struct ftrace_event_call *call); 50int perf_sysenter_enable(struct ftrace_event_call *call);
51void prof_sysenter_disable(struct ftrace_event_call *call); 51void perf_sysenter_disable(struct ftrace_event_call *call);
52int prof_sysexit_enable(struct ftrace_event_call *call); 52int perf_sysexit_enable(struct ftrace_event_call *call);
53void prof_sysexit_disable(struct ftrace_event_call *call); 53void perf_sysexit_disable(struct ftrace_event_call *call);
54#endif 54#endif
55 55
56#endif /* _TRACE_SYSCALL_H */ 56#endif /* _TRACE_SYSCALL_H */