diff options
author | David S. Miller <davem@davemloft.net> | 2009-06-15 06:02:23 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-15 06:02:23 -0400 |
commit | 9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch) | |
tree | 8d104ec2a459346b99413b0b77421ca7b9936c1a /include/trace | |
parent | ca44d6e60f9de26281fda203f58b570e1748c015 (diff) | |
parent | 45e3e1935e2857c54783291107d33323b3ef33c8 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
Documentation/feature-removal-schedule.txt
drivers/scsi/fcoe/fcoe.c
net/core/drop_monitor.c
net/core/net-traces.c
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/block.h | 76 | ||||
-rw-r--r-- | include/trace/define_trace.h | 75 | ||||
-rw-r--r-- | include/trace/events/block.h | 493 | ||||
-rw-r--r-- | include/trace/events/irq.h | 145 | ||||
-rw-r--r-- | include/trace/events/kmem.h | 231 | ||||
-rw-r--r-- | include/trace/events/lockdep.h | 96 | ||||
-rw-r--r-- | include/trace/events/napi.h (renamed from include/trace/napi.h) | 0 | ||||
-rw-r--r-- | include/trace/events/sched.h (renamed from include/trace/sched_event_types.h) | 29 | ||||
-rw-r--r-- | include/trace/events/skb.h | 40 | ||||
-rw-r--r-- | include/trace/events/workqueue.h | 100 | ||||
-rw-r--r-- | include/trace/ftrace.h | 591 | ||||
-rw-r--r-- | include/trace/irq.h | 9 | ||||
-rw-r--r-- | include/trace/irq_event_types.h | 55 | ||||
-rw-r--r-- | include/trace/kmemtrace.h | 63 | ||||
-rw-r--r-- | include/trace/lockdep.h | 9 | ||||
-rw-r--r-- | include/trace/lockdep_event_types.h | 44 | ||||
-rw-r--r-- | include/trace/sched.h | 9 | ||||
-rw-r--r-- | include/trace/skb.h | 11 | ||||
-rw-r--r-- | include/trace/trace_event_types.h | 5 | ||||
-rw-r--r-- | include/trace/trace_events.h | 5 | ||||
-rw-r--r-- | include/trace/workqueue.h | 25 |
21 files changed, 1790 insertions, 321 deletions
diff --git a/include/trace/block.h b/include/trace/block.h deleted file mode 100644 index 25b7068b819e..000000000000 --- a/include/trace/block.h +++ /dev/null | |||
@@ -1,76 +0,0 @@ | |||
1 | #ifndef _TRACE_BLOCK_H | ||
2 | #define _TRACE_BLOCK_H | ||
3 | |||
4 | #include <linux/blkdev.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | DECLARE_TRACE(block_rq_abort, | ||
8 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
9 | TP_ARGS(q, rq)); | ||
10 | |||
11 | DECLARE_TRACE(block_rq_insert, | ||
12 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
13 | TP_ARGS(q, rq)); | ||
14 | |||
15 | DECLARE_TRACE(block_rq_issue, | ||
16 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
17 | TP_ARGS(q, rq)); | ||
18 | |||
19 | DECLARE_TRACE(block_rq_requeue, | ||
20 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
21 | TP_ARGS(q, rq)); | ||
22 | |||
23 | DECLARE_TRACE(block_rq_complete, | ||
24 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
25 | TP_ARGS(q, rq)); | ||
26 | |||
27 | DECLARE_TRACE(block_bio_bounce, | ||
28 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
29 | TP_ARGS(q, bio)); | ||
30 | |||
31 | DECLARE_TRACE(block_bio_complete, | ||
32 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
33 | TP_ARGS(q, bio)); | ||
34 | |||
35 | DECLARE_TRACE(block_bio_backmerge, | ||
36 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
37 | TP_ARGS(q, bio)); | ||
38 | |||
39 | DECLARE_TRACE(block_bio_frontmerge, | ||
40 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
41 | TP_ARGS(q, bio)); | ||
42 | |||
43 | DECLARE_TRACE(block_bio_queue, | ||
44 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
45 | TP_ARGS(q, bio)); | ||
46 | |||
47 | DECLARE_TRACE(block_getrq, | ||
48 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | ||
49 | TP_ARGS(q, bio, rw)); | ||
50 | |||
51 | DECLARE_TRACE(block_sleeprq, | ||
52 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | ||
53 | TP_ARGS(q, bio, rw)); | ||
54 | |||
55 | DECLARE_TRACE(block_plug, | ||
56 | TP_PROTO(struct request_queue *q), | ||
57 | TP_ARGS(q)); | ||
58 | |||
59 | DECLARE_TRACE(block_unplug_timer, | ||
60 | TP_PROTO(struct request_queue *q), | ||
61 | TP_ARGS(q)); | ||
62 | |||
63 | DECLARE_TRACE(block_unplug_io, | ||
64 | TP_PROTO(struct request_queue *q), | ||
65 | TP_ARGS(q)); | ||
66 | |||
67 | DECLARE_TRACE(block_split, | ||
68 | TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), | ||
69 | TP_ARGS(q, bio, pdu)); | ||
70 | |||
71 | DECLARE_TRACE(block_remap, | ||
72 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | ||
73 | sector_t from, sector_t to), | ||
74 | TP_ARGS(q, bio, dev, from, to)); | ||
75 | |||
76 | #endif | ||
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h new file mode 100644 index 000000000000..f7a7ae1e8f90 --- /dev/null +++ b/include/trace/define_trace.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Trace files that want to automate creationg of all tracepoints defined | ||
3 | * in their file should include this file. The following are macros that the | ||
4 | * trace file may define: | ||
5 | * | ||
6 | * TRACE_SYSTEM defines the system the tracepoint is for | ||
7 | * | ||
8 | * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h | ||
9 | * This macro may be defined to tell define_trace.h what file to include. | ||
10 | * Note, leave off the ".h". | ||
11 | * | ||
12 | * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace | ||
13 | * then this macro can define the path to use. Note, the path is relative to | ||
14 | * define_trace.h, not the file including it. Full path names for out of tree | ||
15 | * modules must be used. | ||
16 | */ | ||
17 | |||
18 | #ifdef CREATE_TRACE_POINTS | ||
19 | |||
20 | /* Prevent recursion */ | ||
21 | #undef CREATE_TRACE_POINTS | ||
22 | |||
23 | #include <linux/stringify.h> | ||
24 | |||
25 | #undef TRACE_EVENT | ||
26 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | ||
27 | DEFINE_TRACE(name) | ||
28 | |||
29 | #undef DECLARE_TRACE | ||
30 | #define DECLARE_TRACE(name, proto, args) \ | ||
31 | DEFINE_TRACE(name) | ||
32 | |||
33 | #undef TRACE_INCLUDE | ||
34 | #undef __TRACE_INCLUDE | ||
35 | |||
36 | #ifndef TRACE_INCLUDE_FILE | ||
37 | # define TRACE_INCLUDE_FILE TRACE_SYSTEM | ||
38 | # define UNDEF_TRACE_INCLUDE_FILE | ||
39 | #endif | ||
40 | |||
41 | #ifndef TRACE_INCLUDE_PATH | ||
42 | # define __TRACE_INCLUDE(system) <trace/events/system.h> | ||
43 | # define UNDEF_TRACE_INCLUDE_PATH | ||
44 | #else | ||
45 | # define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h) | ||
46 | #endif | ||
47 | |||
48 | # define TRACE_INCLUDE(system) __TRACE_INCLUDE(system) | ||
49 | |||
50 | /* Let the trace headers be reread */ | ||
51 | #define TRACE_HEADER_MULTI_READ | ||
52 | |||
53 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
54 | |||
55 | #ifdef CONFIG_EVENT_TRACING | ||
56 | #include <trace/ftrace.h> | ||
57 | #endif | ||
58 | |||
59 | #undef TRACE_HEADER_MULTI_READ | ||
60 | |||
61 | /* Only undef what we defined in this file */ | ||
62 | #ifdef UNDEF_TRACE_INCLUDE_FILE | ||
63 | # undef TRACE_INCLUDE_FILE | ||
64 | # undef UNDEF_TRACE_INCLUDE_FILE | ||
65 | #endif | ||
66 | |||
67 | #ifdef UNDEF_TRACE_INCLUDE_PATH | ||
68 | # undef TRACE_INCLUDE_PATH | ||
69 | # undef UNDEF_TRACE_INCLUDE_PATH | ||
70 | #endif | ||
71 | |||
72 | /* We may be processing more files */ | ||
73 | #define CREATE_TRACE_POINTS | ||
74 | |||
75 | #endif /* CREATE_TRACE_POINTS */ | ||
diff --git a/include/trace/events/block.h b/include/trace/events/block.h new file mode 100644 index 000000000000..d6b05f42dd44 --- /dev/null +++ b/include/trace/events/block.h | |||
@@ -0,0 +1,493 @@ | |||
1 | #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_BLOCK_H | ||
3 | |||
4 | #include <linux/blktrace_api.h> | ||
5 | #include <linux/blkdev.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM block | ||
10 | |||
11 | TRACE_EVENT(block_rq_abort, | ||
12 | |||
13 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
14 | |||
15 | TP_ARGS(q, rq), | ||
16 | |||
17 | TP_STRUCT__entry( | ||
18 | __field( dev_t, dev ) | ||
19 | __field( sector_t, sector ) | ||
20 | __field( unsigned int, nr_sector ) | ||
21 | __field( int, errors ) | ||
22 | __array( char, rwbs, 6 ) | ||
23 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
24 | ), | ||
25 | |||
26 | TP_fast_assign( | ||
27 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
28 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
29 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
30 | __entry->errors = rq->errors; | ||
31 | |||
32 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
33 | blk_dump_cmd(__get_str(cmd), rq); | ||
34 | ), | ||
35 | |||
36 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
37 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
38 | __entry->rwbs, __get_str(cmd), | ||
39 | (unsigned long long)__entry->sector, | ||
40 | __entry->nr_sector, __entry->errors) | ||
41 | ); | ||
42 | |||
43 | TRACE_EVENT(block_rq_insert, | ||
44 | |||
45 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
46 | |||
47 | TP_ARGS(q, rq), | ||
48 | |||
49 | TP_STRUCT__entry( | ||
50 | __field( dev_t, dev ) | ||
51 | __field( sector_t, sector ) | ||
52 | __field( unsigned int, nr_sector ) | ||
53 | __field( unsigned int, bytes ) | ||
54 | __array( char, rwbs, 6 ) | ||
55 | __array( char, comm, TASK_COMM_LEN ) | ||
56 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
57 | ), | ||
58 | |||
59 | TP_fast_assign( | ||
60 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
61 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
62 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
63 | __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; | ||
64 | |||
65 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
66 | blk_dump_cmd(__get_str(cmd), rq); | ||
67 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
68 | ), | ||
69 | |||
70 | TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", | ||
71 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
72 | __entry->rwbs, __entry->bytes, __get_str(cmd), | ||
73 | (unsigned long long)__entry->sector, | ||
74 | __entry->nr_sector, __entry->comm) | ||
75 | ); | ||
76 | |||
77 | TRACE_EVENT(block_rq_issue, | ||
78 | |||
79 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
80 | |||
81 | TP_ARGS(q, rq), | ||
82 | |||
83 | TP_STRUCT__entry( | ||
84 | __field( dev_t, dev ) | ||
85 | __field( sector_t, sector ) | ||
86 | __field( unsigned int, nr_sector ) | ||
87 | __field( unsigned int, bytes ) | ||
88 | __array( char, rwbs, 6 ) | ||
89 | __array( char, comm, TASK_COMM_LEN ) | ||
90 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
91 | ), | ||
92 | |||
93 | TP_fast_assign( | ||
94 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
95 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
96 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
97 | __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; | ||
98 | |||
99 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
100 | blk_dump_cmd(__get_str(cmd), rq); | ||
101 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
102 | ), | ||
103 | |||
104 | TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", | ||
105 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
106 | __entry->rwbs, __entry->bytes, __get_str(cmd), | ||
107 | (unsigned long long)__entry->sector, | ||
108 | __entry->nr_sector, __entry->comm) | ||
109 | ); | ||
110 | |||
111 | TRACE_EVENT(block_rq_requeue, | ||
112 | |||
113 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
114 | |||
115 | TP_ARGS(q, rq), | ||
116 | |||
117 | TP_STRUCT__entry( | ||
118 | __field( dev_t, dev ) | ||
119 | __field( sector_t, sector ) | ||
120 | __field( unsigned int, nr_sector ) | ||
121 | __field( int, errors ) | ||
122 | __array( char, rwbs, 6 ) | ||
123 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
124 | ), | ||
125 | |||
126 | TP_fast_assign( | ||
127 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
128 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
129 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
130 | __entry->errors = rq->errors; | ||
131 | |||
132 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
133 | blk_dump_cmd(__get_str(cmd), rq); | ||
134 | ), | ||
135 | |||
136 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
137 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
138 | __entry->rwbs, __get_str(cmd), | ||
139 | (unsigned long long)__entry->sector, | ||
140 | __entry->nr_sector, __entry->errors) | ||
141 | ); | ||
142 | |||
143 | TRACE_EVENT(block_rq_complete, | ||
144 | |||
145 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
146 | |||
147 | TP_ARGS(q, rq), | ||
148 | |||
149 | TP_STRUCT__entry( | ||
150 | __field( dev_t, dev ) | ||
151 | __field( sector_t, sector ) | ||
152 | __field( unsigned int, nr_sector ) | ||
153 | __field( int, errors ) | ||
154 | __array( char, rwbs, 6 ) | ||
155 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
156 | ), | ||
157 | |||
158 | TP_fast_assign( | ||
159 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
160 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
161 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
162 | __entry->errors = rq->errors; | ||
163 | |||
164 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
165 | blk_dump_cmd(__get_str(cmd), rq); | ||
166 | ), | ||
167 | |||
168 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
169 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
170 | __entry->rwbs, __get_str(cmd), | ||
171 | (unsigned long long)__entry->sector, | ||
172 | __entry->nr_sector, __entry->errors) | ||
173 | ); | ||
174 | TRACE_EVENT(block_bio_bounce, | ||
175 | |||
176 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
177 | |||
178 | TP_ARGS(q, bio), | ||
179 | |||
180 | TP_STRUCT__entry( | ||
181 | __field( dev_t, dev ) | ||
182 | __field( sector_t, sector ) | ||
183 | __field( unsigned int, nr_sector ) | ||
184 | __array( char, rwbs, 6 ) | ||
185 | __array( char, comm, TASK_COMM_LEN ) | ||
186 | ), | ||
187 | |||
188 | TP_fast_assign( | ||
189 | __entry->dev = bio->bi_bdev->bd_dev; | ||
190 | __entry->sector = bio->bi_sector; | ||
191 | __entry->nr_sector = bio->bi_size >> 9; | ||
192 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
193 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
194 | ), | ||
195 | |||
196 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
197 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
198 | (unsigned long long)__entry->sector, | ||
199 | __entry->nr_sector, __entry->comm) | ||
200 | ); | ||
201 | |||
202 | TRACE_EVENT(block_bio_complete, | ||
203 | |||
204 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
205 | |||
206 | TP_ARGS(q, bio), | ||
207 | |||
208 | TP_STRUCT__entry( | ||
209 | __field( dev_t, dev ) | ||
210 | __field( sector_t, sector ) | ||
211 | __field( unsigned, nr_sector ) | ||
212 | __field( int, error ) | ||
213 | __array( char, rwbs, 6 ) | ||
214 | ), | ||
215 | |||
216 | TP_fast_assign( | ||
217 | __entry->dev = bio->bi_bdev->bd_dev; | ||
218 | __entry->sector = bio->bi_sector; | ||
219 | __entry->nr_sector = bio->bi_size >> 9; | ||
220 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
221 | ), | ||
222 | |||
223 | TP_printk("%d,%d %s %llu + %u [%d]", | ||
224 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
225 | (unsigned long long)__entry->sector, | ||
226 | __entry->nr_sector, __entry->error) | ||
227 | ); | ||
228 | |||
229 | TRACE_EVENT(block_bio_backmerge, | ||
230 | |||
231 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
232 | |||
233 | TP_ARGS(q, bio), | ||
234 | |||
235 | TP_STRUCT__entry( | ||
236 | __field( dev_t, dev ) | ||
237 | __field( sector_t, sector ) | ||
238 | __field( unsigned int, nr_sector ) | ||
239 | __array( char, rwbs, 6 ) | ||
240 | __array( char, comm, TASK_COMM_LEN ) | ||
241 | ), | ||
242 | |||
243 | TP_fast_assign( | ||
244 | __entry->dev = bio->bi_bdev->bd_dev; | ||
245 | __entry->sector = bio->bi_sector; | ||
246 | __entry->nr_sector = bio->bi_size >> 9; | ||
247 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
248 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
249 | ), | ||
250 | |||
251 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
252 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
253 | (unsigned long long)__entry->sector, | ||
254 | __entry->nr_sector, __entry->comm) | ||
255 | ); | ||
256 | |||
257 | TRACE_EVENT(block_bio_frontmerge, | ||
258 | |||
259 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
260 | |||
261 | TP_ARGS(q, bio), | ||
262 | |||
263 | TP_STRUCT__entry( | ||
264 | __field( dev_t, dev ) | ||
265 | __field( sector_t, sector ) | ||
266 | __field( unsigned, nr_sector ) | ||
267 | __array( char, rwbs, 6 ) | ||
268 | __array( char, comm, TASK_COMM_LEN ) | ||
269 | ), | ||
270 | |||
271 | TP_fast_assign( | ||
272 | __entry->dev = bio->bi_bdev->bd_dev; | ||
273 | __entry->sector = bio->bi_sector; | ||
274 | __entry->nr_sector = bio->bi_size >> 9; | ||
275 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
276 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
277 | ), | ||
278 | |||
279 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
280 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
281 | (unsigned long long)__entry->sector, | ||
282 | __entry->nr_sector, __entry->comm) | ||
283 | ); | ||
284 | |||
285 | TRACE_EVENT(block_bio_queue, | ||
286 | |||
287 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
288 | |||
289 | TP_ARGS(q, bio), | ||
290 | |||
291 | TP_STRUCT__entry( | ||
292 | __field( dev_t, dev ) | ||
293 | __field( sector_t, sector ) | ||
294 | __field( unsigned int, nr_sector ) | ||
295 | __array( char, rwbs, 6 ) | ||
296 | __array( char, comm, TASK_COMM_LEN ) | ||
297 | ), | ||
298 | |||
299 | TP_fast_assign( | ||
300 | __entry->dev = bio->bi_bdev->bd_dev; | ||
301 | __entry->sector = bio->bi_sector; | ||
302 | __entry->nr_sector = bio->bi_size >> 9; | ||
303 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
304 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
305 | ), | ||
306 | |||
307 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
308 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
309 | (unsigned long long)__entry->sector, | ||
310 | __entry->nr_sector, __entry->comm) | ||
311 | ); | ||
312 | |||
313 | TRACE_EVENT(block_getrq, | ||
314 | |||
315 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | ||
316 | |||
317 | TP_ARGS(q, bio, rw), | ||
318 | |||
319 | TP_STRUCT__entry( | ||
320 | __field( dev_t, dev ) | ||
321 | __field( sector_t, sector ) | ||
322 | __field( unsigned int, nr_sector ) | ||
323 | __array( char, rwbs, 6 ) | ||
324 | __array( char, comm, TASK_COMM_LEN ) | ||
325 | ), | ||
326 | |||
327 | TP_fast_assign( | ||
328 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | ||
329 | __entry->sector = bio ? bio->bi_sector : 0; | ||
330 | __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; | ||
331 | blk_fill_rwbs(__entry->rwbs, | ||
332 | bio ? bio->bi_rw : 0, __entry->nr_sector); | ||
333 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
334 | ), | ||
335 | |||
336 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
337 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
338 | (unsigned long long)__entry->sector, | ||
339 | __entry->nr_sector, __entry->comm) | ||
340 | ); | ||
341 | |||
342 | TRACE_EVENT(block_sleeprq, | ||
343 | |||
344 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | ||
345 | |||
346 | TP_ARGS(q, bio, rw), | ||
347 | |||
348 | TP_STRUCT__entry( | ||
349 | __field( dev_t, dev ) | ||
350 | __field( sector_t, sector ) | ||
351 | __field( unsigned int, nr_sector ) | ||
352 | __array( char, rwbs, 6 ) | ||
353 | __array( char, comm, TASK_COMM_LEN ) | ||
354 | ), | ||
355 | |||
356 | TP_fast_assign( | ||
357 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | ||
358 | __entry->sector = bio ? bio->bi_sector : 0; | ||
359 | __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; | ||
360 | blk_fill_rwbs(__entry->rwbs, | ||
361 | bio ? bio->bi_rw : 0, __entry->nr_sector); | ||
362 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
363 | ), | ||
364 | |||
365 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
366 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
367 | (unsigned long long)__entry->sector, | ||
368 | __entry->nr_sector, __entry->comm) | ||
369 | ); | ||
370 | |||
371 | TRACE_EVENT(block_plug, | ||
372 | |||
373 | TP_PROTO(struct request_queue *q), | ||
374 | |||
375 | TP_ARGS(q), | ||
376 | |||
377 | TP_STRUCT__entry( | ||
378 | __array( char, comm, TASK_COMM_LEN ) | ||
379 | ), | ||
380 | |||
381 | TP_fast_assign( | ||
382 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
383 | ), | ||
384 | |||
385 | TP_printk("[%s]", __entry->comm) | ||
386 | ); | ||
387 | |||
388 | TRACE_EVENT(block_unplug_timer, | ||
389 | |||
390 | TP_PROTO(struct request_queue *q), | ||
391 | |||
392 | TP_ARGS(q), | ||
393 | |||
394 | TP_STRUCT__entry( | ||
395 | __field( int, nr_rq ) | ||
396 | __array( char, comm, TASK_COMM_LEN ) | ||
397 | ), | ||
398 | |||
399 | TP_fast_assign( | ||
400 | __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; | ||
401 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
402 | ), | ||
403 | |||
404 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | ||
405 | ); | ||
406 | |||
407 | TRACE_EVENT(block_unplug_io, | ||
408 | |||
409 | TP_PROTO(struct request_queue *q), | ||
410 | |||
411 | TP_ARGS(q), | ||
412 | |||
413 | TP_STRUCT__entry( | ||
414 | __field( int, nr_rq ) | ||
415 | __array( char, comm, TASK_COMM_LEN ) | ||
416 | ), | ||
417 | |||
418 | TP_fast_assign( | ||
419 | __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; | ||
420 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
421 | ), | ||
422 | |||
423 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | ||
424 | ); | ||
425 | |||
426 | TRACE_EVENT(block_split, | ||
427 | |||
428 | TP_PROTO(struct request_queue *q, struct bio *bio, | ||
429 | unsigned int new_sector), | ||
430 | |||
431 | TP_ARGS(q, bio, new_sector), | ||
432 | |||
433 | TP_STRUCT__entry( | ||
434 | __field( dev_t, dev ) | ||
435 | __field( sector_t, sector ) | ||
436 | __field( sector_t, new_sector ) | ||
437 | __array( char, rwbs, 6 ) | ||
438 | __array( char, comm, TASK_COMM_LEN ) | ||
439 | ), | ||
440 | |||
441 | TP_fast_assign( | ||
442 | __entry->dev = bio->bi_bdev->bd_dev; | ||
443 | __entry->sector = bio->bi_sector; | ||
444 | __entry->new_sector = new_sector; | ||
445 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
446 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
447 | ), | ||
448 | |||
449 | TP_printk("%d,%d %s %llu / %llu [%s]", | ||
450 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
451 | (unsigned long long)__entry->sector, | ||
452 | (unsigned long long)__entry->new_sector, | ||
453 | __entry->comm) | ||
454 | ); | ||
455 | |||
456 | TRACE_EVENT(block_remap, | ||
457 | |||
458 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | ||
459 | sector_t from), | ||
460 | |||
461 | TP_ARGS(q, bio, dev, from), | ||
462 | |||
463 | TP_STRUCT__entry( | ||
464 | __field( dev_t, dev ) | ||
465 | __field( sector_t, sector ) | ||
466 | __field( unsigned int, nr_sector ) | ||
467 | __field( dev_t, old_dev ) | ||
468 | __field( sector_t, old_sector ) | ||
469 | __array( char, rwbs, 6 ) | ||
470 | ), | ||
471 | |||
472 | TP_fast_assign( | ||
473 | __entry->dev = bio->bi_bdev->bd_dev; | ||
474 | __entry->sector = bio->bi_sector; | ||
475 | __entry->nr_sector = bio->bi_size >> 9; | ||
476 | __entry->old_dev = dev; | ||
477 | __entry->old_sector = from; | ||
478 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
479 | ), | ||
480 | |||
481 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | ||
482 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
483 | (unsigned long long)__entry->sector, | ||
484 | __entry->nr_sector, | ||
485 | MAJOR(__entry->old_dev), MINOR(__entry->old_dev), | ||
486 | (unsigned long long)__entry->old_sector) | ||
487 | ); | ||
488 | |||
489 | #endif /* _TRACE_BLOCK_H */ | ||
490 | |||
491 | /* This part must be outside protection */ | ||
492 | #include <trace/define_trace.h> | ||
493 | |||
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h new file mode 100644 index 000000000000..b0c7ede55eb1 --- /dev/null +++ b/include/trace/events/irq.h | |||
@@ -0,0 +1,145 @@ | |||
1 | #if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_IRQ_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM irq | ||
9 | |||
10 | #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } | ||
11 | #define show_softirq_name(val) \ | ||
12 | __print_symbolic(val, \ | ||
13 | softirq_name(HI), \ | ||
14 | softirq_name(TIMER), \ | ||
15 | softirq_name(NET_TX), \ | ||
16 | softirq_name(NET_RX), \ | ||
17 | softirq_name(BLOCK), \ | ||
18 | softirq_name(TASKLET), \ | ||
19 | softirq_name(SCHED), \ | ||
20 | softirq_name(HRTIMER), \ | ||
21 | softirq_name(RCU)) | ||
22 | |||
23 | /** | ||
24 | * irq_handler_entry - called immediately before the irq action handler | ||
25 | * @irq: irq number | ||
26 | * @action: pointer to struct irqaction | ||
27 | * | ||
28 | * The struct irqaction pointed to by @action contains various | ||
29 | * information about the handler, including the device name, | ||
30 | * @action->name, and the device id, @action->dev_id. When used in | ||
31 | * conjunction with the irq_handler_exit tracepoint, we can figure | ||
32 | * out irq handler latencies. | ||
33 | */ | ||
34 | TRACE_EVENT(irq_handler_entry, | ||
35 | |||
36 | TP_PROTO(int irq, struct irqaction *action), | ||
37 | |||
38 | TP_ARGS(irq, action), | ||
39 | |||
40 | TP_STRUCT__entry( | ||
41 | __field( int, irq ) | ||
42 | __string( name, action->name ) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | __entry->irq = irq; | ||
47 | __assign_str(name, action->name); | ||
48 | ), | ||
49 | |||
50 | TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name)) | ||
51 | ); | ||
52 | |||
53 | /** | ||
54 | * irq_handler_exit - called immediately after the irq action handler returns | ||
55 | * @irq: irq number | ||
56 | * @action: pointer to struct irqaction | ||
57 | * @ret: return value | ||
58 | * | ||
59 | * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding | ||
60 | * @action->handler scuccessully handled this irq. Otherwise, the irq might be | ||
61 | * a shared irq line, or the irq was not handled successfully. Can be used in | ||
62 | * conjunction with the irq_handler_entry to understand irq handler latencies. | ||
63 | */ | ||
64 | TRACE_EVENT(irq_handler_exit, | ||
65 | |||
66 | TP_PROTO(int irq, struct irqaction *action, int ret), | ||
67 | |||
68 | TP_ARGS(irq, action, ret), | ||
69 | |||
70 | TP_STRUCT__entry( | ||
71 | __field( int, irq ) | ||
72 | __field( int, ret ) | ||
73 | ), | ||
74 | |||
75 | TP_fast_assign( | ||
76 | __entry->irq = irq; | ||
77 | __entry->ret = ret; | ||
78 | ), | ||
79 | |||
80 | TP_printk("irq=%d return=%s", | ||
81 | __entry->irq, __entry->ret ? "handled" : "unhandled") | ||
82 | ); | ||
83 | |||
84 | /** | ||
85 | * softirq_entry - called immediately before the softirq handler | ||
86 | * @h: pointer to struct softirq_action | ||
87 | * @vec: pointer to first struct softirq_action in softirq_vec array | ||
88 | * | ||
89 | * The @h parameter, contains a pointer to the struct softirq_action | ||
90 | * which has a pointer to the action handler that is called. By subtracting | ||
91 | * the @vec pointer from the @h pointer, we can determine the softirq | ||
92 | * number. Also, when used in combination with the softirq_exit tracepoint | ||
93 | * we can determine the softirq latency. | ||
94 | */ | ||
95 | TRACE_EVENT(softirq_entry, | ||
96 | |||
97 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
98 | |||
99 | TP_ARGS(h, vec), | ||
100 | |||
101 | TP_STRUCT__entry( | ||
102 | __field( int, vec ) | ||
103 | ), | ||
104 | |||
105 | TP_fast_assign( | ||
106 | __entry->vec = (int)(h - vec); | ||
107 | ), | ||
108 | |||
109 | TP_printk("softirq=%d action=%s", __entry->vec, | ||
110 | show_softirq_name(__entry->vec)) | ||
111 | ); | ||
112 | |||
113 | /** | ||
114 | * softirq_exit - called immediately after the softirq handler returns | ||
115 | * @h: pointer to struct softirq_action | ||
116 | * @vec: pointer to first struct softirq_action in softirq_vec array | ||
117 | * | ||
118 | * The @h parameter contains a pointer to the struct softirq_action | ||
119 | * that has handled the softirq. By subtracting the @vec pointer from | ||
120 | * the @h pointer, we can determine the softirq number. Also, when used in | ||
121 | * combination with the softirq_entry tracepoint we can determine the softirq | ||
122 | * latency. | ||
123 | */ | ||
124 | TRACE_EVENT(softirq_exit, | ||
125 | |||
126 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
127 | |||
128 | TP_ARGS(h, vec), | ||
129 | |||
130 | TP_STRUCT__entry( | ||
131 | __field( int, vec ) | ||
132 | ), | ||
133 | |||
134 | TP_fast_assign( | ||
135 | __entry->vec = (int)(h - vec); | ||
136 | ), | ||
137 | |||
138 | TP_printk("softirq=%d action=%s", __entry->vec, | ||
139 | show_softirq_name(__entry->vec)) | ||
140 | ); | ||
141 | |||
142 | #endif /* _TRACE_IRQ_H */ | ||
143 | |||
144 | /* This part must be outside protection */ | ||
145 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h new file mode 100644 index 000000000000..9baba50d6512 --- /dev/null +++ b/include/trace/events/kmem.h | |||
@@ -0,0 +1,231 @@ | |||
1 | #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_KMEM_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM kmem | ||
9 | |||
10 | /* | ||
11 | * The order of these masks is important. Matching masks will be seen | ||
12 | * first and the left over flags will end up showing by themselves. | ||
13 | * | ||
14 | * For example, if we have GFP_KERNEL before GFP_USER we wil get: | ||
15 | * | ||
16 | * GFP_KERNEL|GFP_HARDWALL | ||
17 | * | ||
18 | * Thus most bits set go first. | ||
19 | */ | ||
20 | #define show_gfp_flags(flags) \ | ||
21 | (flags) ? __print_flags(flags, "|", \ | ||
22 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ | ||
23 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ | ||
24 | {(unsigned long)GFP_USER, "GFP_USER"}, \ | ||
25 | {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ | ||
26 | {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ | ||
27 | {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ | ||
28 | {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ | ||
29 | {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ | ||
30 | {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \ | ||
31 | {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \ | ||
32 | {(unsigned long)__GFP_IO, "GFP_IO"}, \ | ||
33 | {(unsigned long)__GFP_COLD, "GFP_COLD"}, \ | ||
34 | {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \ | ||
35 | {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \ | ||
36 | {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \ | ||
37 | {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \ | ||
38 | {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ | ||
39 | {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ | ||
40 | {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ | ||
41 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ | ||
42 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ | ||
43 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ | ||
44 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ | ||
45 | ) : "GFP_NOWAIT" | ||
46 | |||
47 | TRACE_EVENT(kmalloc, | ||
48 | |||
49 | TP_PROTO(unsigned long call_site, | ||
50 | const void *ptr, | ||
51 | size_t bytes_req, | ||
52 | size_t bytes_alloc, | ||
53 | gfp_t gfp_flags), | ||
54 | |||
55 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | ||
56 | |||
57 | TP_STRUCT__entry( | ||
58 | __field( unsigned long, call_site ) | ||
59 | __field( const void *, ptr ) | ||
60 | __field( size_t, bytes_req ) | ||
61 | __field( size_t, bytes_alloc ) | ||
62 | __field( gfp_t, gfp_flags ) | ||
63 | ), | ||
64 | |||
65 | TP_fast_assign( | ||
66 | __entry->call_site = call_site; | ||
67 | __entry->ptr = ptr; | ||
68 | __entry->bytes_req = bytes_req; | ||
69 | __entry->bytes_alloc = bytes_alloc; | ||
70 | __entry->gfp_flags = gfp_flags; | ||
71 | ), | ||
72 | |||
73 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", | ||
74 | __entry->call_site, | ||
75 | __entry->ptr, | ||
76 | __entry->bytes_req, | ||
77 | __entry->bytes_alloc, | ||
78 | show_gfp_flags(__entry->gfp_flags)) | ||
79 | ); | ||
80 | |||
81 | TRACE_EVENT(kmem_cache_alloc, | ||
82 | |||
83 | TP_PROTO(unsigned long call_site, | ||
84 | const void *ptr, | ||
85 | size_t bytes_req, | ||
86 | size_t bytes_alloc, | ||
87 | gfp_t gfp_flags), | ||
88 | |||
89 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | ||
90 | |||
91 | TP_STRUCT__entry( | ||
92 | __field( unsigned long, call_site ) | ||
93 | __field( const void *, ptr ) | ||
94 | __field( size_t, bytes_req ) | ||
95 | __field( size_t, bytes_alloc ) | ||
96 | __field( gfp_t, gfp_flags ) | ||
97 | ), | ||
98 | |||
99 | TP_fast_assign( | ||
100 | __entry->call_site = call_site; | ||
101 | __entry->ptr = ptr; | ||
102 | __entry->bytes_req = bytes_req; | ||
103 | __entry->bytes_alloc = bytes_alloc; | ||
104 | __entry->gfp_flags = gfp_flags; | ||
105 | ), | ||
106 | |||
107 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", | ||
108 | __entry->call_site, | ||
109 | __entry->ptr, | ||
110 | __entry->bytes_req, | ||
111 | __entry->bytes_alloc, | ||
112 | show_gfp_flags(__entry->gfp_flags)) | ||
113 | ); | ||
114 | |||
115 | TRACE_EVENT(kmalloc_node, | ||
116 | |||
117 | TP_PROTO(unsigned long call_site, | ||
118 | const void *ptr, | ||
119 | size_t bytes_req, | ||
120 | size_t bytes_alloc, | ||
121 | gfp_t gfp_flags, | ||
122 | int node), | ||
123 | |||
124 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | ||
125 | |||
126 | TP_STRUCT__entry( | ||
127 | __field( unsigned long, call_site ) | ||
128 | __field( const void *, ptr ) | ||
129 | __field( size_t, bytes_req ) | ||
130 | __field( size_t, bytes_alloc ) | ||
131 | __field( gfp_t, gfp_flags ) | ||
132 | __field( int, node ) | ||
133 | ), | ||
134 | |||
135 | TP_fast_assign( | ||
136 | __entry->call_site = call_site; | ||
137 | __entry->ptr = ptr; | ||
138 | __entry->bytes_req = bytes_req; | ||
139 | __entry->bytes_alloc = bytes_alloc; | ||
140 | __entry->gfp_flags = gfp_flags; | ||
141 | __entry->node = node; | ||
142 | ), | ||
143 | |||
144 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", | ||
145 | __entry->call_site, | ||
146 | __entry->ptr, | ||
147 | __entry->bytes_req, | ||
148 | __entry->bytes_alloc, | ||
149 | show_gfp_flags(__entry->gfp_flags), | ||
150 | __entry->node) | ||
151 | ); | ||
152 | |||
153 | TRACE_EVENT(kmem_cache_alloc_node, | ||
154 | |||
155 | TP_PROTO(unsigned long call_site, | ||
156 | const void *ptr, | ||
157 | size_t bytes_req, | ||
158 | size_t bytes_alloc, | ||
159 | gfp_t gfp_flags, | ||
160 | int node), | ||
161 | |||
162 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | ||
163 | |||
164 | TP_STRUCT__entry( | ||
165 | __field( unsigned long, call_site ) | ||
166 | __field( const void *, ptr ) | ||
167 | __field( size_t, bytes_req ) | ||
168 | __field( size_t, bytes_alloc ) | ||
169 | __field( gfp_t, gfp_flags ) | ||
170 | __field( int, node ) | ||
171 | ), | ||
172 | |||
173 | TP_fast_assign( | ||
174 | __entry->call_site = call_site; | ||
175 | __entry->ptr = ptr; | ||
176 | __entry->bytes_req = bytes_req; | ||
177 | __entry->bytes_alloc = bytes_alloc; | ||
178 | __entry->gfp_flags = gfp_flags; | ||
179 | __entry->node = node; | ||
180 | ), | ||
181 | |||
182 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", | ||
183 | __entry->call_site, | ||
184 | __entry->ptr, | ||
185 | __entry->bytes_req, | ||
186 | __entry->bytes_alloc, | ||
187 | show_gfp_flags(__entry->gfp_flags), | ||
188 | __entry->node) | ||
189 | ); | ||
190 | |||
191 | TRACE_EVENT(kfree, | ||
192 | |||
193 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
194 | |||
195 | TP_ARGS(call_site, ptr), | ||
196 | |||
197 | TP_STRUCT__entry( | ||
198 | __field( unsigned long, call_site ) | ||
199 | __field( const void *, ptr ) | ||
200 | ), | ||
201 | |||
202 | TP_fast_assign( | ||
203 | __entry->call_site = call_site; | ||
204 | __entry->ptr = ptr; | ||
205 | ), | ||
206 | |||
207 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | ||
208 | ); | ||
209 | |||
210 | TRACE_EVENT(kmem_cache_free, | ||
211 | |||
212 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
213 | |||
214 | TP_ARGS(call_site, ptr), | ||
215 | |||
216 | TP_STRUCT__entry( | ||
217 | __field( unsigned long, call_site ) | ||
218 | __field( const void *, ptr ) | ||
219 | ), | ||
220 | |||
221 | TP_fast_assign( | ||
222 | __entry->call_site = call_site; | ||
223 | __entry->ptr = ptr; | ||
224 | ), | ||
225 | |||
226 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | ||
227 | ); | ||
228 | #endif /* _TRACE_KMEM_H */ | ||
229 | |||
230 | /* This part must be outside protection */ | ||
231 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lockdep.h new file mode 100644 index 000000000000..0e956c9dfd7e --- /dev/null +++ b/include/trace/events/lockdep.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_LOCKDEP_H | ||
3 | |||
4 | #include <linux/lockdep.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM lockdep | ||
9 | |||
10 | #ifdef CONFIG_LOCKDEP | ||
11 | |||
12 | TRACE_EVENT(lock_acquire, | ||
13 | |||
14 | TP_PROTO(struct lockdep_map *lock, unsigned int subclass, | ||
15 | int trylock, int read, int check, | ||
16 | struct lockdep_map *next_lock, unsigned long ip), | ||
17 | |||
18 | TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), | ||
19 | |||
20 | TP_STRUCT__entry( | ||
21 | __field(unsigned int, flags) | ||
22 | __string(name, lock->name) | ||
23 | ), | ||
24 | |||
25 | TP_fast_assign( | ||
26 | __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); | ||
27 | __assign_str(name, lock->name); | ||
28 | ), | ||
29 | |||
30 | TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "", | ||
31 | (__entry->flags & 2) ? "read " : "", | ||
32 | __get_str(name)) | ||
33 | ); | ||
34 | |||
35 | TRACE_EVENT(lock_release, | ||
36 | |||
37 | TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), | ||
38 | |||
39 | TP_ARGS(lock, nested, ip), | ||
40 | |||
41 | TP_STRUCT__entry( | ||
42 | __string(name, lock->name) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | __assign_str(name, lock->name); | ||
47 | ), | ||
48 | |||
49 | TP_printk("%s", __get_str(name)) | ||
50 | ); | ||
51 | |||
52 | #ifdef CONFIG_LOCK_STAT | ||
53 | |||
54 | TRACE_EVENT(lock_contended, | ||
55 | |||
56 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | ||
57 | |||
58 | TP_ARGS(lock, ip), | ||
59 | |||
60 | TP_STRUCT__entry( | ||
61 | __string(name, lock->name) | ||
62 | ), | ||
63 | |||
64 | TP_fast_assign( | ||
65 | __assign_str(name, lock->name); | ||
66 | ), | ||
67 | |||
68 | TP_printk("%s", __get_str(name)) | ||
69 | ); | ||
70 | |||
71 | TRACE_EVENT(lock_acquired, | ||
72 | TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime), | ||
73 | |||
74 | TP_ARGS(lock, ip, waittime), | ||
75 | |||
76 | TP_STRUCT__entry( | ||
77 | __string(name, lock->name) | ||
78 | __field(unsigned long, wait_usec) | ||
79 | __field(unsigned long, wait_nsec_rem) | ||
80 | ), | ||
81 | TP_fast_assign( | ||
82 | __assign_str(name, lock->name); | ||
83 | __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); | ||
84 | __entry->wait_usec = (unsigned long) waittime; | ||
85 | ), | ||
86 | TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec, | ||
87 | __entry->wait_nsec_rem) | ||
88 | ); | ||
89 | |||
90 | #endif | ||
91 | #endif | ||
92 | |||
93 | #endif /* _TRACE_LOCKDEP_H */ | ||
94 | |||
95 | /* This part must be outside protection */ | ||
96 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/napi.h b/include/trace/events/napi.h index a8989c4547e7..a8989c4547e7 100644 --- a/include/trace/napi.h +++ b/include/trace/events/napi.h | |||
diff --git a/include/trace/sched_event_types.h b/include/trace/events/sched.h index 63547dc1125f..24ab5bcff7b2 100644 --- a/include/trace/sched_event_types.h +++ b/include/trace/events/sched.h | |||
@@ -1,9 +1,8 @@ | |||
1 | #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_SCHED_H | ||
1 | 3 | ||
2 | /* use <trace/sched.h> instead */ | 4 | #include <linux/sched.h> |
3 | #ifndef TRACE_EVENT | 5 | #include <linux/tracepoint.h> |
4 | # error Do not include this file directly. | ||
5 | # error Unless you know what you are doing. | ||
6 | #endif | ||
7 | 6 | ||
8 | #undef TRACE_SYSTEM | 7 | #undef TRACE_SYSTEM |
9 | #define TRACE_SYSTEM sched | 8 | #define TRACE_SYSTEM sched |
@@ -157,6 +156,7 @@ TRACE_EVENT(sched_switch, | |||
157 | __array( char, prev_comm, TASK_COMM_LEN ) | 156 | __array( char, prev_comm, TASK_COMM_LEN ) |
158 | __field( pid_t, prev_pid ) | 157 | __field( pid_t, prev_pid ) |
159 | __field( int, prev_prio ) | 158 | __field( int, prev_prio ) |
159 | __field( long, prev_state ) | ||
160 | __array( char, next_comm, TASK_COMM_LEN ) | 160 | __array( char, next_comm, TASK_COMM_LEN ) |
161 | __field( pid_t, next_pid ) | 161 | __field( pid_t, next_pid ) |
162 | __field( int, next_prio ) | 162 | __field( int, next_prio ) |
@@ -166,13 +166,19 @@ TRACE_EVENT(sched_switch, | |||
166 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); | 166 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); |
167 | __entry->prev_pid = prev->pid; | 167 | __entry->prev_pid = prev->pid; |
168 | __entry->prev_prio = prev->prio; | 168 | __entry->prev_prio = prev->prio; |
169 | __entry->prev_state = prev->state; | ||
169 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); | 170 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); |
170 | __entry->next_pid = next->pid; | 171 | __entry->next_pid = next->pid; |
171 | __entry->next_prio = next->prio; | 172 | __entry->next_prio = next->prio; |
172 | ), | 173 | ), |
173 | 174 | ||
174 | TP_printk("task %s:%d [%d] ==> %s:%d [%d]", | 175 | TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]", |
175 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | 176 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, |
177 | __entry->prev_state ? | ||
178 | __print_flags(__entry->prev_state, "|", | ||
179 | { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, | ||
180 | { 16, "Z" }, { 32, "X" }, { 64, "x" }, | ||
181 | { 128, "W" }) : "R", | ||
176 | __entry->next_comm, __entry->next_pid, __entry->next_prio) | 182 | __entry->next_comm, __entry->next_pid, __entry->next_prio) |
177 | ); | 183 | ); |
178 | 184 | ||
@@ -181,9 +187,9 @@ TRACE_EVENT(sched_switch, | |||
181 | */ | 187 | */ |
182 | TRACE_EVENT(sched_migrate_task, | 188 | TRACE_EVENT(sched_migrate_task, |
183 | 189 | ||
184 | TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu), | 190 | TP_PROTO(struct task_struct *p, int dest_cpu), |
185 | 191 | ||
186 | TP_ARGS(p, orig_cpu, dest_cpu), | 192 | TP_ARGS(p, dest_cpu), |
187 | 193 | ||
188 | TP_STRUCT__entry( | 194 | TP_STRUCT__entry( |
189 | __array( char, comm, TASK_COMM_LEN ) | 195 | __array( char, comm, TASK_COMM_LEN ) |
@@ -197,7 +203,7 @@ TRACE_EVENT(sched_migrate_task, | |||
197 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 203 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
198 | __entry->pid = p->pid; | 204 | __entry->pid = p->pid; |
199 | __entry->prio = p->prio; | 205 | __entry->prio = p->prio; |
200 | __entry->orig_cpu = orig_cpu; | 206 | __entry->orig_cpu = task_cpu(p); |
201 | __entry->dest_cpu = dest_cpu; | 207 | __entry->dest_cpu = dest_cpu; |
202 | ), | 208 | ), |
203 | 209 | ||
@@ -334,4 +340,7 @@ TRACE_EVENT(sched_signal_send, | |||
334 | __entry->sig, __entry->comm, __entry->pid) | 340 | __entry->sig, __entry->comm, __entry->pid) |
335 | ); | 341 | ); |
336 | 342 | ||
337 | #undef TRACE_SYSTEM | 343 | #endif /* _TRACE_SCHED_H */ |
344 | |||
345 | /* This part must be outside protection */ | ||
346 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h new file mode 100644 index 000000000000..1e8fabb57c06 --- /dev/null +++ b/include/trace/events/skb.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_SKB_H | ||
3 | |||
4 | #include <linux/skbuff.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM skb | ||
9 | |||
10 | /* | ||
11 | * Tracepoint for free an sk_buff: | ||
12 | */ | ||
13 | TRACE_EVENT(kfree_skb, | ||
14 | |||
15 | TP_PROTO(struct sk_buff *skb, void *location), | ||
16 | |||
17 | TP_ARGS(skb, location), | ||
18 | |||
19 | TP_STRUCT__entry( | ||
20 | __field( void *, skbaddr ) | ||
21 | __field( unsigned short, protocol ) | ||
22 | __field( void *, location ) | ||
23 | ), | ||
24 | |||
25 | TP_fast_assign( | ||
26 | __entry->skbaddr = skb; | ||
27 | if (skb) { | ||
28 | __entry->protocol = ntohs(skb->protocol); | ||
29 | } | ||
30 | __entry->location = location; | ||
31 | ), | ||
32 | |||
33 | TP_printk("skbaddr=%p protocol=%u location=%p", | ||
34 | __entry->skbaddr, __entry->protocol, __entry->location) | ||
35 | ); | ||
36 | |||
37 | #endif /* _TRACE_SKB_H */ | ||
38 | |||
39 | /* This part must be outside protection */ | ||
40 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h new file mode 100644 index 000000000000..035f1bff288e --- /dev/null +++ b/include/trace/events/workqueue.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_WORKQUEUE_H | ||
3 | |||
4 | #include <linux/workqueue.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM workqueue | ||
10 | |||
11 | TRACE_EVENT(workqueue_insertion, | ||
12 | |||
13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
14 | |||
15 | TP_ARGS(wq_thread, work), | ||
16 | |||
17 | TP_STRUCT__entry( | ||
18 | __array(char, thread_comm, TASK_COMM_LEN) | ||
19 | __field(pid_t, thread_pid) | ||
20 | __field(work_func_t, func) | ||
21 | ), | ||
22 | |||
23 | TP_fast_assign( | ||
24 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
25 | __entry->thread_pid = wq_thread->pid; | ||
26 | __entry->func = work->func; | ||
27 | ), | ||
28 | |||
29 | TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, | ||
30 | __entry->thread_pid, __entry->func) | ||
31 | ); | ||
32 | |||
33 | TRACE_EVENT(workqueue_execution, | ||
34 | |||
35 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
36 | |||
37 | TP_ARGS(wq_thread, work), | ||
38 | |||
39 | TP_STRUCT__entry( | ||
40 | __array(char, thread_comm, TASK_COMM_LEN) | ||
41 | __field(pid_t, thread_pid) | ||
42 | __field(work_func_t, func) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
47 | __entry->thread_pid = wq_thread->pid; | ||
48 | __entry->func = work->func; | ||
49 | ), | ||
50 | |||
51 | TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, | ||
52 | __entry->thread_pid, __entry->func) | ||
53 | ); | ||
54 | |||
55 | /* Trace the creation of one workqueue thread on a cpu */ | ||
56 | TRACE_EVENT(workqueue_creation, | ||
57 | |||
58 | TP_PROTO(struct task_struct *wq_thread, int cpu), | ||
59 | |||
60 | TP_ARGS(wq_thread, cpu), | ||
61 | |||
62 | TP_STRUCT__entry( | ||
63 | __array(char, thread_comm, TASK_COMM_LEN) | ||
64 | __field(pid_t, thread_pid) | ||
65 | __field(int, cpu) | ||
66 | ), | ||
67 | |||
68 | TP_fast_assign( | ||
69 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
70 | __entry->thread_pid = wq_thread->pid; | ||
71 | __entry->cpu = cpu; | ||
72 | ), | ||
73 | |||
74 | TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm, | ||
75 | __entry->thread_pid, __entry->cpu) | ||
76 | ); | ||
77 | |||
78 | TRACE_EVENT(workqueue_destruction, | ||
79 | |||
80 | TP_PROTO(struct task_struct *wq_thread), | ||
81 | |||
82 | TP_ARGS(wq_thread), | ||
83 | |||
84 | TP_STRUCT__entry( | ||
85 | __array(char, thread_comm, TASK_COMM_LEN) | ||
86 | __field(pid_t, thread_pid) | ||
87 | ), | ||
88 | |||
89 | TP_fast_assign( | ||
90 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
91 | __entry->thread_pid = wq_thread->pid; | ||
92 | ), | ||
93 | |||
94 | TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid) | ||
95 | ); | ||
96 | |||
97 | #endif /* _TRACE_WORKQUEUE_H */ | ||
98 | |||
99 | /* This part must be outside protection */ | ||
100 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h new file mode 100644 index 000000000000..1867553c61e5 --- /dev/null +++ b/include/trace/ftrace.h | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | * Stage 1 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_events.h> to include the following: | ||
5 | * | ||
6 | * struct ftrace_raw_<call> { | ||
7 | * struct trace_entry ent; | ||
8 | * <type> <item>; | ||
9 | * <type2> <item2>[<len>]; | ||
10 | * [...] | ||
11 | * }; | ||
12 | * | ||
13 | * The <type> <item> is created by the __field(type, item) macro or | ||
14 | * the __array(type2, item2, len) macro. | ||
15 | * We simply do "type item;", and that will create the fields | ||
16 | * in the structure. | ||
17 | */ | ||
18 | |||
19 | #include <linux/ftrace_event.h> | ||
20 | |||
21 | #undef __field | ||
22 | #define __field(type, item) type item; | ||
23 | |||
24 | #undef __array | ||
25 | #define __array(type, item, len) type item[len]; | ||
26 | |||
27 | #undef __dynamic_array | ||
28 | #define __dynamic_array(type, item, len) unsigned short __data_loc_##item; | ||
29 | |||
30 | #undef __string | ||
31 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
32 | |||
33 | #undef TP_STRUCT__entry | ||
34 | #define TP_STRUCT__entry(args...) args | ||
35 | |||
36 | #undef TRACE_EVENT | ||
37 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | ||
38 | struct ftrace_raw_##name { \ | ||
39 | struct trace_entry ent; \ | ||
40 | tstruct \ | ||
41 | char __data[0]; \ | ||
42 | }; \ | ||
43 | static struct ftrace_event_call event_##name | ||
44 | |||
45 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
46 | |||
47 | |||
48 | /* | ||
49 | * Stage 2 of the trace events. | ||
50 | * | ||
51 | * Include the following: | ||
52 | * | ||
53 | * struct ftrace_data_offsets_<call> { | ||
54 | * int <item1>; | ||
55 | * int <item2>; | ||
56 | * [...] | ||
57 | * }; | ||
58 | * | ||
59 | * The __dynamic_array() macro will create each int <item>, this is | ||
60 | * to keep the offset of each array from the beginning of the event. | ||
61 | */ | ||
62 | |||
63 | #undef __field | ||
64 | #define __field(type, item); | ||
65 | |||
66 | #undef __array | ||
67 | #define __array(type, item, len) | ||
68 | |||
69 | #undef __dynamic_array | ||
70 | #define __dynamic_array(type, item, len) int item; | ||
71 | |||
72 | #undef __string | ||
73 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
74 | |||
75 | #undef TRACE_EVENT | ||
76 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
77 | struct ftrace_data_offsets_##call { \ | ||
78 | tstruct; \ | ||
79 | }; | ||
80 | |||
81 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
82 | |||
83 | /* | ||
84 | * Setup the showing format of trace point. | ||
85 | * | ||
86 | * int | ||
87 | * ftrace_format_##call(struct trace_seq *s) | ||
88 | * { | ||
89 | * struct ftrace_raw_##call field; | ||
90 | * int ret; | ||
91 | * | ||
92 | * ret = trace_seq_printf(s, #type " " #item ";" | ||
93 | * " offset:%u; size:%u;\n", | ||
94 | * offsetof(struct ftrace_raw_##call, item), | ||
95 | * sizeof(field.type)); | ||
96 | * | ||
97 | * } | ||
98 | */ | ||
99 | |||
100 | #undef TP_STRUCT__entry | ||
101 | #define TP_STRUCT__entry(args...) args | ||
102 | |||
103 | #undef __field | ||
104 | #define __field(type, item) \ | ||
105 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | ||
106 | "offset:%u;\tsize:%u;\n", \ | ||
107 | (unsigned int)offsetof(typeof(field), item), \ | ||
108 | (unsigned int)sizeof(field.item)); \ | ||
109 | if (!ret) \ | ||
110 | return 0; | ||
111 | |||
112 | #undef __array | ||
113 | #define __array(type, item, len) \ | ||
114 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | ||
115 | "offset:%u;\tsize:%u;\n", \ | ||
116 | (unsigned int)offsetof(typeof(field), item), \ | ||
117 | (unsigned int)sizeof(field.item)); \ | ||
118 | if (!ret) \ | ||
119 | return 0; | ||
120 | |||
121 | #undef __dynamic_array | ||
122 | #define __dynamic_array(type, item, len) \ | ||
123 | ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ | ||
124 | "offset:%u;\tsize:%u;\n", \ | ||
125 | (unsigned int)offsetof(typeof(field), \ | ||
126 | __data_loc_##item), \ | ||
127 | (unsigned int)sizeof(field.__data_loc_##item)); \ | ||
128 | if (!ret) \ | ||
129 | return 0; | ||
130 | |||
131 | #undef __string | ||
132 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
133 | |||
134 | #undef __entry | ||
135 | #define __entry REC | ||
136 | |||
137 | #undef __print_symbolic | ||
138 | #undef __get_dynamic_array | ||
139 | #undef __get_str | ||
140 | |||
141 | #undef TP_printk | ||
142 | #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) | ||
143 | |||
144 | #undef TP_fast_assign | ||
145 | #define TP_fast_assign(args...) args | ||
146 | |||
147 | #undef TRACE_EVENT | ||
148 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | ||
149 | static int \ | ||
150 | ftrace_format_##call(struct trace_seq *s) \ | ||
151 | { \ | ||
152 | struct ftrace_raw_##call field __attribute__((unused)); \ | ||
153 | int ret = 0; \ | ||
154 | \ | ||
155 | tstruct; \ | ||
156 | \ | ||
157 | trace_seq_printf(s, "\nprint fmt: " print); \ | ||
158 | \ | ||
159 | return ret; \ | ||
160 | } | ||
161 | |||
162 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
163 | |||
164 | /* | ||
165 | * Stage 3 of the trace events. | ||
166 | * | ||
167 | * Override the macros in <trace/trace_events.h> to include the following: | ||
168 | * | ||
169 | * enum print_line_t | ||
170 | * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) | ||
171 | * { | ||
172 | * struct trace_seq *s = &iter->seq; | ||
173 | * struct ftrace_raw_<call> *field; <-- defined in stage 1 | ||
174 | * struct trace_entry *entry; | ||
175 | * struct trace_seq *p; | ||
176 | * int ret; | ||
177 | * | ||
178 | * entry = iter->ent; | ||
179 | * | ||
180 | * if (entry->type != event_<call>.id) { | ||
181 | * WARN_ON_ONCE(1); | ||
182 | * return TRACE_TYPE_UNHANDLED; | ||
183 | * } | ||
184 | * | ||
185 | * field = (typeof(field))entry; | ||
186 | * | ||
187 | * p = get_cpu_var(ftrace_event_seq); | ||
188 | * trace_seq_init(p); | ||
189 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
190 | * put_cpu(); | ||
191 | * if (!ret) | ||
192 | * return TRACE_TYPE_PARTIAL_LINE; | ||
193 | * | ||
194 | * return TRACE_TYPE_HANDLED; | ||
195 | * } | ||
196 | * | ||
197 | * This is the method used to print the raw event to the trace | ||
198 | * output format. Note, this is not needed if the data is read | ||
199 | * in binary. | ||
200 | */ | ||
201 | |||
202 | #undef __entry | ||
203 | #define __entry field | ||
204 | |||
205 | #undef TP_printk | ||
206 | #define TP_printk(fmt, args...) fmt "\n", args | ||
207 | |||
208 | #undef __get_dynamic_array | ||
209 | #define __get_dynamic_array(field) \ | ||
210 | ((void *)__entry + __entry->__data_loc_##field) | ||
211 | |||
212 | #undef __get_str | ||
213 | #define __get_str(field) (char *)__get_dynamic_array(field) | ||
214 | |||
215 | #undef __print_flags | ||
216 | #define __print_flags(flag, delim, flag_array...) \ | ||
217 | ({ \ | ||
218 | static const struct trace_print_flags flags[] = \ | ||
219 | { flag_array, { -1, NULL }}; \ | ||
220 | ftrace_print_flags_seq(p, delim, flag, flags); \ | ||
221 | }) | ||
222 | |||
223 | #undef __print_symbolic | ||
224 | #define __print_symbolic(value, symbol_array...) \ | ||
225 | ({ \ | ||
226 | static const struct trace_print_flags symbols[] = \ | ||
227 | { symbol_array, { -1, NULL }}; \ | ||
228 | ftrace_print_symbols_seq(p, value, symbols); \ | ||
229 | }) | ||
230 | |||
231 | #undef TRACE_EVENT | ||
232 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
233 | enum print_line_t \ | ||
234 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | ||
235 | { \ | ||
236 | struct trace_seq *s = &iter->seq; \ | ||
237 | struct ftrace_raw_##call *field; \ | ||
238 | struct trace_entry *entry; \ | ||
239 | struct trace_seq *p; \ | ||
240 | int ret; \ | ||
241 | \ | ||
242 | entry = iter->ent; \ | ||
243 | \ | ||
244 | if (entry->type != event_##call.id) { \ | ||
245 | WARN_ON_ONCE(1); \ | ||
246 | return TRACE_TYPE_UNHANDLED; \ | ||
247 | } \ | ||
248 | \ | ||
249 | field = (typeof(field))entry; \ | ||
250 | \ | ||
251 | p = &get_cpu_var(ftrace_event_seq); \ | ||
252 | trace_seq_init(p); \ | ||
253 | ret = trace_seq_printf(s, #call ": " print); \ | ||
254 | put_cpu(); \ | ||
255 | if (!ret) \ | ||
256 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
257 | \ | ||
258 | return TRACE_TYPE_HANDLED; \ | ||
259 | } | ||
260 | |||
261 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
262 | |||
263 | #undef __field | ||
264 | #define __field(type, item) \ | ||
265 | ret = trace_define_field(event_call, #type, #item, \ | ||
266 | offsetof(typeof(field), item), \ | ||
267 | sizeof(field.item), is_signed_type(type)); \ | ||
268 | if (ret) \ | ||
269 | return ret; | ||
270 | |||
271 | #undef __array | ||
272 | #define __array(type, item, len) \ | ||
273 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | ||
274 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | ||
275 | offsetof(typeof(field), item), \ | ||
276 | sizeof(field.item), 0); \ | ||
277 | if (ret) \ | ||
278 | return ret; | ||
279 | |||
280 | #undef __dynamic_array | ||
281 | #define __dynamic_array(type, item, len) \ | ||
282 | ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ | ||
283 | offsetof(typeof(field), __data_loc_##item), \ | ||
284 | sizeof(field.__data_loc_##item), 0); | ||
285 | |||
286 | #undef __string | ||
287 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
288 | |||
289 | #undef TRACE_EVENT | ||
290 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | ||
291 | int \ | ||
292 | ftrace_define_fields_##call(void) \ | ||
293 | { \ | ||
294 | struct ftrace_raw_##call field; \ | ||
295 | struct ftrace_event_call *event_call = &event_##call; \ | ||
296 | int ret; \ | ||
297 | \ | ||
298 | __common_field(int, type, 1); \ | ||
299 | __common_field(unsigned char, flags, 0); \ | ||
300 | __common_field(unsigned char, preempt_count, 0); \ | ||
301 | __common_field(int, pid, 1); \ | ||
302 | __common_field(int, tgid, 1); \ | ||
303 | \ | ||
304 | tstruct; \ | ||
305 | \ | ||
306 | return ret; \ | ||
307 | } | ||
308 | |||
309 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
310 | |||
311 | /* | ||
312 | * remember the offset of each array from the beginning of the event. | ||
313 | */ | ||
314 | |||
315 | #undef __entry | ||
316 | #define __entry entry | ||
317 | |||
318 | #undef __field | ||
319 | #define __field(type, item) | ||
320 | |||
321 | #undef __array | ||
322 | #define __array(type, item, len) | ||
323 | |||
324 | #undef __dynamic_array | ||
325 | #define __dynamic_array(type, item, len) \ | ||
326 | __data_offsets->item = __data_size + \ | ||
327 | offsetof(typeof(*entry), __data); \ | ||
328 | __data_size += (len) * sizeof(type); | ||
329 | |||
330 | #undef __string | ||
331 | #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ | ||
332 | |||
333 | #undef TRACE_EVENT | ||
334 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
335 | static inline int ftrace_get_offsets_##call( \ | ||
336 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ | ||
337 | { \ | ||
338 | int __data_size = 0; \ | ||
339 | struct ftrace_raw_##call __maybe_unused *entry; \ | ||
340 | \ | ||
341 | tstruct; \ | ||
342 | \ | ||
343 | return __data_size; \ | ||
344 | } | ||
345 | |||
346 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
347 | |||
348 | /* | ||
349 | * Stage 4 of the trace events. | ||
350 | * | ||
351 | * Override the macros in <trace/trace_events.h> to include the following: | ||
352 | * | ||
353 | * static void ftrace_event_<call>(proto) | ||
354 | * { | ||
355 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | ||
356 | * } | ||
357 | * | ||
358 | * static int ftrace_reg_event_<call>(void) | ||
359 | * { | ||
360 | * int ret; | ||
361 | * | ||
362 | * ret = register_trace_<call>(ftrace_event_<call>); | ||
363 | * if (!ret) | ||
364 | * pr_info("event trace: Could not activate trace point " | ||
365 | * "probe to <call>"); | ||
366 | * return ret; | ||
367 | * } | ||
368 | * | ||
369 | * static void ftrace_unreg_event_<call>(void) | ||
370 | * { | ||
371 | * unregister_trace_<call>(ftrace_event_<call>); | ||
372 | * } | ||
373 | * | ||
374 | * | ||
375 | * For those macros defined with TRACE_EVENT: | ||
376 | * | ||
377 | * static struct ftrace_event_call event_<call>; | ||
378 | * | ||
379 | * static void ftrace_raw_event_<call>(proto) | ||
380 | * { | ||
381 | * struct ring_buffer_event *event; | ||
382 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | ||
383 | * unsigned long irq_flags; | ||
384 | * int pc; | ||
385 | * | ||
386 | * local_save_flags(irq_flags); | ||
387 | * pc = preempt_count(); | ||
388 | * | ||
389 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | ||
390 | * sizeof(struct ftrace_raw_<call>), | ||
391 | * irq_flags, pc); | ||
392 | * if (!event) | ||
393 | * return; | ||
394 | * entry = ring_buffer_event_data(event); | ||
395 | * | ||
396 | * <assign>; <-- Here we assign the entries by the __field and | ||
397 | * __array macros. | ||
398 | * | ||
399 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | ||
400 | * } | ||
401 | * | ||
402 | * static int ftrace_raw_reg_event_<call>(void) | ||
403 | * { | ||
404 | * int ret; | ||
405 | * | ||
406 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
407 | * if (!ret) | ||
408 | * pr_info("event trace: Could not activate trace point " | ||
409 | * "probe to <call>"); | ||
410 | * return ret; | ||
411 | * } | ||
412 | * | ||
413 | * static void ftrace_unreg_event_<call>(void) | ||
414 | * { | ||
415 | * unregister_trace_<call>(ftrace_raw_event_<call>); | ||
416 | * } | ||
417 | * | ||
418 | * static struct trace_event ftrace_event_type_<call> = { | ||
419 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | ||
420 | * }; | ||
421 | * | ||
422 | * static int ftrace_raw_init_event_<call>(void) | ||
423 | * { | ||
424 | * int id; | ||
425 | * | ||
426 | * id = register_ftrace_event(&ftrace_event_type_<call>); | ||
427 | * if (!id) | ||
428 | * return -ENODEV; | ||
429 | * event_<call>.id = id; | ||
430 | * return 0; | ||
431 | * } | ||
432 | * | ||
433 | * static struct ftrace_event_call __used | ||
434 | * __attribute__((__aligned__(4))) | ||
435 | * __attribute__((section("_ftrace_events"))) event_<call> = { | ||
436 | * .name = "<call>", | ||
437 | * .system = "<system>", | ||
438 | * .raw_init = ftrace_raw_init_event_<call>, | ||
439 | * .regfunc = ftrace_reg_event_<call>, | ||
440 | * .unregfunc = ftrace_unreg_event_<call>, | ||
441 | * .show_format = ftrace_format_<call>, | ||
442 | * } | ||
443 | * | ||
444 | */ | ||
445 | |||
446 | #undef TP_FMT | ||
447 | #define TP_FMT(fmt, args...) fmt "\n", ##args | ||
448 | |||
449 | #ifdef CONFIG_EVENT_PROFILE | ||
450 | #define _TRACE_PROFILE(call, proto, args) \ | ||
451 | static void ftrace_profile_##call(proto) \ | ||
452 | { \ | ||
453 | extern void perf_tpcounter_event(int); \ | ||
454 | perf_tpcounter_event(event_##call.id); \ | ||
455 | } \ | ||
456 | \ | ||
457 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
458 | { \ | ||
459 | int ret = 0; \ | ||
460 | \ | ||
461 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
462 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
463 | \ | ||
464 | return ret; \ | ||
465 | } \ | ||
466 | \ | ||
467 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
468 | { \ | ||
469 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
470 | unregister_trace_##call(ftrace_profile_##call); \ | ||
471 | } | ||
472 | |||
473 | #define _TRACE_PROFILE_INIT(call) \ | ||
474 | .profile_count = ATOMIC_INIT(-1), \ | ||
475 | .profile_enable = ftrace_profile_enable_##call, \ | ||
476 | .profile_disable = ftrace_profile_disable_##call, | ||
477 | |||
478 | #else | ||
479 | #define _TRACE_PROFILE(call, proto, args) | ||
480 | #define _TRACE_PROFILE_INIT(call) | ||
481 | #endif | ||
482 | |||
483 | #undef __entry | ||
484 | #define __entry entry | ||
485 | |||
486 | #undef __field | ||
487 | #define __field(type, item) | ||
488 | |||
489 | #undef __array | ||
490 | #define __array(type, item, len) | ||
491 | |||
492 | #undef __dynamic_array | ||
493 | #define __dynamic_array(type, item, len) \ | ||
494 | __entry->__data_loc_##item = __data_offsets.item; | ||
495 | |||
496 | #undef __string | ||
497 | #define __string(item, src) __dynamic_array(char, item, -1) \ | ||
498 | |||
499 | #undef __assign_str | ||
500 | #define __assign_str(dst, src) \ | ||
501 | strcpy(__get_str(dst), src); | ||
502 | |||
503 | #undef TRACE_EVENT | ||
504 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
505 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
506 | \ | ||
507 | static struct ftrace_event_call event_##call; \ | ||
508 | \ | ||
509 | static void ftrace_raw_event_##call(proto) \ | ||
510 | { \ | ||
511 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
512 | struct ftrace_event_call *event_call = &event_##call; \ | ||
513 | struct ring_buffer_event *event; \ | ||
514 | struct ftrace_raw_##call *entry; \ | ||
515 | unsigned long irq_flags; \ | ||
516 | int __data_size; \ | ||
517 | int pc; \ | ||
518 | \ | ||
519 | local_save_flags(irq_flags); \ | ||
520 | pc = preempt_count(); \ | ||
521 | \ | ||
522 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
523 | \ | ||
524 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | ||
525 | sizeof(*entry) + __data_size, \ | ||
526 | irq_flags, pc); \ | ||
527 | if (!event) \ | ||
528 | return; \ | ||
529 | entry = ring_buffer_event_data(event); \ | ||
530 | \ | ||
531 | \ | ||
532 | tstruct \ | ||
533 | \ | ||
534 | { assign; } \ | ||
535 | \ | ||
536 | if (!filter_current_check_discard(event_call, entry, event)) \ | ||
537 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | ||
538 | } \ | ||
539 | \ | ||
540 | static int ftrace_raw_reg_event_##call(void) \ | ||
541 | { \ | ||
542 | int ret; \ | ||
543 | \ | ||
544 | ret = register_trace_##call(ftrace_raw_event_##call); \ | ||
545 | if (ret) \ | ||
546 | pr_info("event trace: Could not activate trace point " \ | ||
547 | "probe to " #call "\n"); \ | ||
548 | return ret; \ | ||
549 | } \ | ||
550 | \ | ||
551 | static void ftrace_raw_unreg_event_##call(void) \ | ||
552 | { \ | ||
553 | unregister_trace_##call(ftrace_raw_event_##call); \ | ||
554 | } \ | ||
555 | \ | ||
556 | static struct trace_event ftrace_event_type_##call = { \ | ||
557 | .trace = ftrace_raw_output_##call, \ | ||
558 | }; \ | ||
559 | \ | ||
560 | static int ftrace_raw_init_event_##call(void) \ | ||
561 | { \ | ||
562 | int id; \ | ||
563 | \ | ||
564 | id = register_ftrace_event(&ftrace_event_type_##call); \ | ||
565 | if (!id) \ | ||
566 | return -ENODEV; \ | ||
567 | event_##call.id = id; \ | ||
568 | INIT_LIST_HEAD(&event_##call.fields); \ | ||
569 | init_preds(&event_##call); \ | ||
570 | return 0; \ | ||
571 | } \ | ||
572 | \ | ||
573 | static struct ftrace_event_call __used \ | ||
574 | __attribute__((__aligned__(4))) \ | ||
575 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
576 | .name = #call, \ | ||
577 | .system = __stringify(TRACE_SYSTEM), \ | ||
578 | .event = &ftrace_event_type_##call, \ | ||
579 | .raw_init = ftrace_raw_init_event_##call, \ | ||
580 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
581 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
582 | .show_format = ftrace_format_##call, \ | ||
583 | .define_fields = ftrace_define_fields_##call, \ | ||
584 | _TRACE_PROFILE_INIT(call) \ | ||
585 | } | ||
586 | |||
587 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
588 | |||
589 | #undef _TRACE_PROFILE | ||
590 | #undef _TRACE_PROFILE_INIT | ||
591 | |||
diff --git a/include/trace/irq.h b/include/trace/irq.h deleted file mode 100644 index ff5d4495dc37..000000000000 --- a/include/trace/irq.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _TRACE_IRQ_H | ||
2 | #define _TRACE_IRQ_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #include <trace/irq_event_types.h> | ||
8 | |||
9 | #endif | ||
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h deleted file mode 100644 index 85964ebd47ec..000000000000 --- a/include/trace/irq_event_types.h +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | |||
2 | /* use <trace/irq.h> instead */ | ||
3 | #ifndef TRACE_FORMAT | ||
4 | # error Do not include this file directly. | ||
5 | # error Unless you know what you are doing. | ||
6 | #endif | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM irq | ||
10 | |||
11 | /* | ||
12 | * Tracepoint for entry of interrupt handler: | ||
13 | */ | ||
14 | TRACE_FORMAT(irq_handler_entry, | ||
15 | TP_PROTO(int irq, struct irqaction *action), | ||
16 | TP_ARGS(irq, action), | ||
17 | TP_FMT("irq=%d handler=%s", irq, action->name) | ||
18 | ); | ||
19 | |||
20 | /* | ||
21 | * Tracepoint for return of an interrupt handler: | ||
22 | */ | ||
23 | TRACE_EVENT(irq_handler_exit, | ||
24 | |||
25 | TP_PROTO(int irq, struct irqaction *action, int ret), | ||
26 | |||
27 | TP_ARGS(irq, action, ret), | ||
28 | |||
29 | TP_STRUCT__entry( | ||
30 | __field( int, irq ) | ||
31 | __field( int, ret ) | ||
32 | ), | ||
33 | |||
34 | TP_fast_assign( | ||
35 | __entry->irq = irq; | ||
36 | __entry->ret = ret; | ||
37 | ), | ||
38 | |||
39 | TP_printk("irq=%d return=%s", | ||
40 | __entry->irq, __entry->ret ? "handled" : "unhandled") | ||
41 | ); | ||
42 | |||
43 | TRACE_FORMAT(softirq_entry, | ||
44 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
45 | TP_ARGS(h, vec), | ||
46 | TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec]) | ||
47 | ); | ||
48 | |||
49 | TRACE_FORMAT(softirq_exit, | ||
50 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
51 | TP_ARGS(h, vec), | ||
52 | TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec]) | ||
53 | ); | ||
54 | |||
55 | #undef TRACE_SYSTEM | ||
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h deleted file mode 100644 index 28ee69f9cd46..000000000000 --- a/include/trace/kmemtrace.h +++ /dev/null | |||
@@ -1,63 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_KMEMTRACE_H | ||
8 | #define _LINUX_KMEMTRACE_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/tracepoint.h> | ||
13 | #include <linux/types.h> | ||
14 | |||
15 | #ifdef CONFIG_KMEMTRACE | ||
16 | extern void kmemtrace_init(void); | ||
17 | #else | ||
18 | static inline void kmemtrace_init(void) | ||
19 | { | ||
20 | } | ||
21 | #endif | ||
22 | |||
23 | DECLARE_TRACE(kmalloc, | ||
24 | TP_PROTO(unsigned long call_site, | ||
25 | const void *ptr, | ||
26 | size_t bytes_req, | ||
27 | size_t bytes_alloc, | ||
28 | gfp_t gfp_flags), | ||
29 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)); | ||
30 | DECLARE_TRACE(kmem_cache_alloc, | ||
31 | TP_PROTO(unsigned long call_site, | ||
32 | const void *ptr, | ||
33 | size_t bytes_req, | ||
34 | size_t bytes_alloc, | ||
35 | gfp_t gfp_flags), | ||
36 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)); | ||
37 | DECLARE_TRACE(kmalloc_node, | ||
38 | TP_PROTO(unsigned long call_site, | ||
39 | const void *ptr, | ||
40 | size_t bytes_req, | ||
41 | size_t bytes_alloc, | ||
42 | gfp_t gfp_flags, | ||
43 | int node), | ||
44 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)); | ||
45 | DECLARE_TRACE(kmem_cache_alloc_node, | ||
46 | TP_PROTO(unsigned long call_site, | ||
47 | const void *ptr, | ||
48 | size_t bytes_req, | ||
49 | size_t bytes_alloc, | ||
50 | gfp_t gfp_flags, | ||
51 | int node), | ||
52 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)); | ||
53 | DECLARE_TRACE(kfree, | ||
54 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
55 | TP_ARGS(call_site, ptr)); | ||
56 | DECLARE_TRACE(kmem_cache_free, | ||
57 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
58 | TP_ARGS(call_site, ptr)); | ||
59 | |||
60 | #endif /* __KERNEL__ */ | ||
61 | |||
62 | #endif /* _LINUX_KMEMTRACE_H */ | ||
63 | |||
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h deleted file mode 100644 index 5ca67df87f2a..000000000000 --- a/include/trace/lockdep.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _TRACE_LOCKDEP_H | ||
2 | #define _TRACE_LOCKDEP_H | ||
3 | |||
4 | #include <linux/lockdep.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #include <trace/lockdep_event_types.h> | ||
8 | |||
9 | #endif | ||
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h deleted file mode 100644 index adccfcd2ec8f..000000000000 --- a/include/trace/lockdep_event_types.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | |||
2 | #ifndef TRACE_FORMAT | ||
3 | # error Do not include this file directly. | ||
4 | # error Unless you know what you are doing. | ||
5 | #endif | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM lock | ||
9 | |||
10 | #ifdef CONFIG_LOCKDEP | ||
11 | |||
12 | TRACE_FORMAT(lock_acquire, | ||
13 | TP_PROTO(struct lockdep_map *lock, unsigned int subclass, | ||
14 | int trylock, int read, int check, | ||
15 | struct lockdep_map *next_lock, unsigned long ip), | ||
16 | TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), | ||
17 | TP_FMT("%s%s%s", trylock ? "try " : "", | ||
18 | read ? "read " : "", lock->name) | ||
19 | ); | ||
20 | |||
21 | TRACE_FORMAT(lock_release, | ||
22 | TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), | ||
23 | TP_ARGS(lock, nested, ip), | ||
24 | TP_FMT("%s", lock->name) | ||
25 | ); | ||
26 | |||
27 | #ifdef CONFIG_LOCK_STAT | ||
28 | |||
29 | TRACE_FORMAT(lock_contended, | ||
30 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | ||
31 | TP_ARGS(lock, ip), | ||
32 | TP_FMT("%s", lock->name) | ||
33 | ); | ||
34 | |||
35 | TRACE_FORMAT(lock_acquired, | ||
36 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | ||
37 | TP_ARGS(lock, ip), | ||
38 | TP_FMT("%s", lock->name) | ||
39 | ); | ||
40 | |||
41 | #endif | ||
42 | #endif | ||
43 | |||
44 | #undef TRACE_SYSTEM | ||
diff --git a/include/trace/sched.h b/include/trace/sched.h deleted file mode 100644 index 4e372a1a29bf..000000000000 --- a/include/trace/sched.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _TRACE_SCHED_H | ||
2 | #define _TRACE_SCHED_H | ||
3 | |||
4 | #include <linux/sched.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #include <trace/sched_event_types.h> | ||
8 | |||
9 | #endif | ||
diff --git a/include/trace/skb.h b/include/trace/skb.h deleted file mode 100644 index b66206d9be72..000000000000 --- a/include/trace/skb.h +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | #ifndef _TRACE_SKB_H_ | ||
2 | #define _TRACE_SKB_H_ | ||
3 | |||
4 | #include <linux/skbuff.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | DECLARE_TRACE(kfree_skb, | ||
8 | TP_PROTO(struct sk_buff *skb, void *location), | ||
9 | TP_ARGS(skb, location)); | ||
10 | |||
11 | #endif | ||
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h deleted file mode 100644 index df56f5694be6..000000000000 --- a/include/trace/trace_event_types.h +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | /* trace/<type>_event_types.h here */ | ||
2 | |||
3 | #include <trace/sched_event_types.h> | ||
4 | #include <trace/irq_event_types.h> | ||
5 | #include <trace/lockdep_event_types.h> | ||
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h deleted file mode 100644 index fd13750ca4ba..000000000000 --- a/include/trace/trace_events.h +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | /* trace/<type>.h here */ | ||
2 | |||
3 | #include <trace/sched.h> | ||
4 | #include <trace/irq.h> | ||
5 | #include <trace/lockdep.h> | ||
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h deleted file mode 100644 index 7626523deeba..000000000000 --- a/include/trace/workqueue.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | #ifndef __TRACE_WORKQUEUE_H | ||
2 | #define __TRACE_WORKQUEUE_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | #include <linux/sched.h> | ||
7 | |||
8 | DECLARE_TRACE(workqueue_insertion, | ||
9 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
10 | TP_ARGS(wq_thread, work)); | ||
11 | |||
12 | DECLARE_TRACE(workqueue_execution, | ||
13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
14 | TP_ARGS(wq_thread, work)); | ||
15 | |||
16 | /* Trace the creation of one workqueue thread on a cpu */ | ||
17 | DECLARE_TRACE(workqueue_creation, | ||
18 | TP_PROTO(struct task_struct *wq_thread, int cpu), | ||
19 | TP_ARGS(wq_thread, cpu)); | ||
20 | |||
21 | DECLARE_TRACE(workqueue_destruction, | ||
22 | TP_PROTO(struct task_struct *wq_thread), | ||
23 | TP_ARGS(wq_thread)); | ||
24 | |||
25 | #endif /* __TRACE_WORKQUEUE_H */ | ||