aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2009-06-09 01:43:05 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-06-09 12:34:23 -0400
commit55782138e47d9baf2f7d3a7af9e7cf42adf72c56 (patch)
treec7ccabae20e27bbeb08b69a358e8b86c98d1d9f3 /include/trace
parentf57a8a1911342265e7acdc190333c4e9235a6632 (diff)
tracing/events: convert block trace points to TRACE_EVENT()
TRACE_EVENT is a more generic way to define tracepoints. Doing so adds these new capabilities to this tracepoint: - zero-copy and per-cpu splice() tracing - binary tracing without printf overhead - structured logging records exposed under /debug/tracing/events - trace events embedded in function tracer output and other plugins - user-defined, per tracepoint filter expressions ... Cons: - no dev_t info for the output of plug, unplug_timer and unplug_io events. no dev_t info for getrq and sleeprq events if bio == NULL. no dev_t info for rq_abort,...,rq_requeue events if rq->rq_disk == NULL. This is mainly because we can't get the deivce from a request queue. But this may change in the future. - A packet command is converted to a string in TP_assign, not TP_print. While blktrace do the convertion just before output. Since pc requests should be rather rare, this is not a big issue. - In blktrace, an event can have 2 different print formats, but a TRACE_EVENT has a unique format, which means we have some unused data in a trace entry. The overhead is minimized by using __dynamic_array() instead of __array(). I've benchmarked the ioctl blktrace vs the splice based TRACE_EVENT tracing: dd dd + ioctl blktrace dd + TRACE_EVENT (splice) 1 7.36s, 42.7 MB/s 7.50s, 42.0 MB/s 7.41s, 42.5 MB/s 2 7.43s, 42.3 MB/s 7.48s, 42.1 MB/s 7.43s, 42.4 MB/s 3 7.38s, 42.6 MB/s 7.45s, 42.2 MB/s 7.41s, 42.5 MB/s So the overhead of tracing is very small, and no regression when using those trace events vs blktrace. And the binary output of TRACE_EVENT is much smaller than blktrace: # ls -l -h -rw-r--r-- 1 root root 8.8M 06-09 13:24 sda.blktrace.0 -rw-r--r-- 1 root root 195K 06-09 13:24 sda.blktrace.1 -rw-r--r-- 1 root root 2.7M 06-09 13:25 trace_splice.out Following are some comparisons between TRACE_EVENT and blktrace: plug: kjournald-480 [000] 303.084981: block_plug: [kjournald] kjournald-480 [000] 303.084981: 8,0 P N [kjournald] unplug_io: kblockd/0-118 [000] 300.052973: block_unplug_io: [kblockd/0] 1 kblockd/0-118 [000] 300.052974: 8,0 U N [kblockd/0] 1 remap: kjournald-480 [000] 303.085042: block_remap: 8,0 W 102736992 + 8 <- (8,8) 33384 kjournald-480 [000] 303.085043: 8,0 A W 102736992 + 8 <- (8,8) 33384 bio_backmerge: kjournald-480 [000] 303.085086: block_bio_backmerge: 8,0 W 102737032 + 8 [kjournald] kjournald-480 [000] 303.085086: 8,0 M W 102737032 + 8 [kjournald] getrq: kjournald-480 [000] 303.084974: block_getrq: 8,0 W 102736984 + 8 [kjournald] kjournald-480 [000] 303.084975: 8,0 G W 102736984 + 8 [kjournald] bash-2066 [001] 1072.953770: 8,0 G N [bash] bash-2066 [001] 1072.953773: block_getrq: 0,0 N 0 + 0 [bash] rq_complete: konsole-2065 [001] 300.053184: block_rq_complete: 8,0 W () 103669040 + 16 [0] konsole-2065 [001] 300.053191: 8,0 C W 103669040 + 16 [0] ksoftirqd/1-7 [001] 1072.953811: 8,0 C N (5a 00 08 00 00 00 00 00 24 00) [0] ksoftirqd/1-7 [001] 1072.953813: block_rq_complete: 0,0 N (5a 00 08 00 00 00 00 00 24 00) 0 + 0 [0] rq_insert: kjournald-480 [000] 303.084985: block_rq_insert: 8,0 W 0 () 102736984 + 8 [kjournald] kjournald-480 [000] 303.084986: 8,0 I W 102736984 + 8 [kjournald] Changelog from v2 -> v3: - use the newly introduced __dynamic_array(). Changelog from v1 -> v2: - use __string() instead of __array() to minimize the memory required to store hex dump of rq->cmd(). - support large pc requests. - add missing blk_fill_rwbs_rq() in block_rq_requeue TRACE_EVENT. - some cleanups. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> LKML-Reference: <4A2DF669.5070905@cn.fujitsu.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/block.h76
-rw-r--r--include/trace/events/block.h483
2 files changed, 483 insertions, 76 deletions
diff --git a/include/trace/block.h b/include/trace/block.h
deleted file mode 100644
index 5b12efa096b6..000000000000
--- a/include/trace/block.h
+++ /dev/null
@@ -1,76 +0,0 @@
1#ifndef _TRACE_BLOCK_H
2#define _TRACE_BLOCK_H
3
4#include <linux/blkdev.h>
5#include <linux/tracepoint.h>
6
7DECLARE_TRACE(block_rq_abort,
8 TP_PROTO(struct request_queue *q, struct request *rq),
9 TP_ARGS(q, rq));
10
11DECLARE_TRACE(block_rq_insert,
12 TP_PROTO(struct request_queue *q, struct request *rq),
13 TP_ARGS(q, rq));
14
15DECLARE_TRACE(block_rq_issue,
16 TP_PROTO(struct request_queue *q, struct request *rq),
17 TP_ARGS(q, rq));
18
19DECLARE_TRACE(block_rq_requeue,
20 TP_PROTO(struct request_queue *q, struct request *rq),
21 TP_ARGS(q, rq));
22
23DECLARE_TRACE(block_rq_complete,
24 TP_PROTO(struct request_queue *q, struct request *rq),
25 TP_ARGS(q, rq));
26
27DECLARE_TRACE(block_bio_bounce,
28 TP_PROTO(struct request_queue *q, struct bio *bio),
29 TP_ARGS(q, bio));
30
31DECLARE_TRACE(block_bio_complete,
32 TP_PROTO(struct request_queue *q, struct bio *bio),
33 TP_ARGS(q, bio));
34
35DECLARE_TRACE(block_bio_backmerge,
36 TP_PROTO(struct request_queue *q, struct bio *bio),
37 TP_ARGS(q, bio));
38
39DECLARE_TRACE(block_bio_frontmerge,
40 TP_PROTO(struct request_queue *q, struct bio *bio),
41 TP_ARGS(q, bio));
42
43DECLARE_TRACE(block_bio_queue,
44 TP_PROTO(struct request_queue *q, struct bio *bio),
45 TP_ARGS(q, bio));
46
47DECLARE_TRACE(block_getrq,
48 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
49 TP_ARGS(q, bio, rw));
50
51DECLARE_TRACE(block_sleeprq,
52 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
53 TP_ARGS(q, bio, rw));
54
55DECLARE_TRACE(block_plug,
56 TP_PROTO(struct request_queue *q),
57 TP_ARGS(q));
58
59DECLARE_TRACE(block_unplug_timer,
60 TP_PROTO(struct request_queue *q),
61 TP_ARGS(q));
62
63DECLARE_TRACE(block_unplug_io,
64 TP_PROTO(struct request_queue *q),
65 TP_ARGS(q));
66
67DECLARE_TRACE(block_split,
68 TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
69 TP_ARGS(q, bio, pdu));
70
71DECLARE_TRACE(block_remap,
72 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
73 sector_t from),
74 TP_ARGS(q, bio, dev, from));
75
76#endif
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
new file mode 100644
index 000000000000..a99d1e565bb0
--- /dev/null
+++ b/include/trace/events/block.h
@@ -0,0 +1,483 @@
1#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_BLOCK_H
3
4#include <linux/blktrace_api.h>
5#include <linux/blkdev.h>
6#include <linux/tracepoint.h>
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM block
10
11TRACE_EVENT(block_rq_abort,
12
13 TP_PROTO(struct request_queue *q, struct request *rq),
14
15 TP_ARGS(q, rq),
16
17 TP_STRUCT__entry(
18 __field( dev_t, dev )
19 __field( sector_t, sector )
20 __field( unsigned int, nr_sector )
21 __field( int, errors )
22 __array( char, rwbs, 6 )
23 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
24 ),
25
26 TP_fast_assign(
27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
28 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
29 __entry->nr_sector = blk_pc_request(rq) ?
30 0 : rq->hard_nr_sectors;
31 __entry->errors = rq->errors;
32
33 blk_fill_rwbs_rq(__entry->rwbs, rq);
34 blk_dump_cmd(__get_str(cmd), rq);
35 ),
36
37 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
38 MAJOR(__entry->dev), MINOR(__entry->dev),
39 __entry->rwbs, __get_str(cmd),
40 __entry->sector, __entry->nr_sector, __entry->errors)
41);
42
43TRACE_EVENT(block_rq_insert,
44
45 TP_PROTO(struct request_queue *q, struct request *rq),
46
47 TP_ARGS(q, rq),
48
49 TP_STRUCT__entry(
50 __field( dev_t, dev )
51 __field( sector_t, sector )
52 __field( unsigned int, nr_sector )
53 __field( unsigned int, bytes )
54 __array( char, rwbs, 6 )
55 __array( char, comm, TASK_COMM_LEN )
56 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
57 ),
58
59 TP_fast_assign(
60 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
61 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
62 __entry->nr_sector = blk_pc_request(rq) ?
63 0 : rq->hard_nr_sectors;
64 __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0;
65
66 blk_fill_rwbs_rq(__entry->rwbs, rq);
67 blk_dump_cmd(__get_str(cmd), rq);
68 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
69 ),
70
71 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
72 MAJOR(__entry->dev), MINOR(__entry->dev),
73 __entry->rwbs, __entry->bytes, __get_str(cmd),
74 __entry->sector, __entry->nr_sector, __entry->comm)
75);
76
77TRACE_EVENT(block_rq_issue,
78
79 TP_PROTO(struct request_queue *q, struct request *rq),
80
81 TP_ARGS(q, rq),
82
83 TP_STRUCT__entry(
84 __field( dev_t, dev )
85 __field( sector_t, sector )
86 __field( unsigned int, nr_sector )
87 __field( unsigned int, bytes )
88 __array( char, rwbs, 6 )
89 __array( char, comm, TASK_COMM_LEN )
90 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
91 ),
92
93 TP_fast_assign(
94 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
95 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
96 __entry->nr_sector = blk_pc_request(rq) ?
97 0 : rq->hard_nr_sectors;
98 __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0;
99
100 blk_fill_rwbs_rq(__entry->rwbs, rq);
101 blk_dump_cmd(__get_str(cmd), rq);
102 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
103 ),
104
105 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
106 MAJOR(__entry->dev), MINOR(__entry->dev),
107 __entry->rwbs, __entry->bytes, __get_str(cmd),
108 __entry->sector, __entry->nr_sector, __entry->comm)
109);
110
111TRACE_EVENT(block_rq_requeue,
112
113 TP_PROTO(struct request_queue *q, struct request *rq),
114
115 TP_ARGS(q, rq),
116
117 TP_STRUCT__entry(
118 __field( dev_t, dev )
119 __field( sector_t, sector )
120 __field( unsigned int, nr_sector )
121 __field( int, errors )
122 __array( char, rwbs, 6 )
123 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
124 ),
125
126 TP_fast_assign(
127 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
128 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
129 __entry->nr_sector = blk_pc_request(rq) ?
130 0 : rq->hard_nr_sectors;
131 __entry->errors = rq->errors;
132
133 blk_fill_rwbs_rq(__entry->rwbs, rq);
134 blk_dump_cmd(__get_str(cmd), rq);
135 ),
136
137 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
138 MAJOR(__entry->dev), MINOR(__entry->dev),
139 __entry->rwbs, __get_str(cmd),
140 __entry->sector, __entry->nr_sector, __entry->errors)
141);
142
143TRACE_EVENT(block_rq_complete,
144
145 TP_PROTO(struct request_queue *q, struct request *rq),
146
147 TP_ARGS(q, rq),
148
149 TP_STRUCT__entry(
150 __field( dev_t, dev )
151 __field( sector_t, sector )
152 __field( unsigned int, nr_sector )
153 __field( int, errors )
154 __array( char, rwbs, 6 )
155 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
156 ),
157
158 TP_fast_assign(
159 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
160 __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector;
161 __entry->nr_sector = blk_pc_request(rq) ?
162 0 : rq->hard_nr_sectors;
163 __entry->errors = rq->errors;
164
165 blk_fill_rwbs_rq(__entry->rwbs, rq);
166 blk_dump_cmd(__get_str(cmd), rq);
167 ),
168
169 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
170 MAJOR(__entry->dev), MINOR(__entry->dev),
171 __entry->rwbs, __get_str(cmd),
172 __entry->sector, __entry->nr_sector, __entry->errors)
173);
174TRACE_EVENT(block_bio_bounce,
175
176 TP_PROTO(struct request_queue *q, struct bio *bio),
177
178 TP_ARGS(q, bio),
179
180 TP_STRUCT__entry(
181 __field( dev_t, dev )
182 __field( sector_t, sector )
183 __field( unsigned int, nr_sector )
184 __array( char, rwbs, 6 )
185 __array( char, comm, TASK_COMM_LEN )
186 ),
187
188 TP_fast_assign(
189 __entry->dev = bio->bi_bdev->bd_dev;
190 __entry->sector = bio->bi_sector;
191 __entry->nr_sector = bio->bi_size >> 9;
192 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
193 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
194 ),
195
196 TP_printk("%d,%d %s %llu + %u [%s]",
197 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
198 __entry->sector, __entry->nr_sector, __entry->comm)
199);
200
201TRACE_EVENT(block_bio_complete,
202
203 TP_PROTO(struct request_queue *q, struct bio *bio),
204
205 TP_ARGS(q, bio),
206
207 TP_STRUCT__entry(
208 __field( dev_t, dev )
209 __field( sector_t, sector )
210 __field( unsigned, nr_sector )
211 __field( int, error )
212 __array( char, rwbs, 6 )
213 ),
214
215 TP_fast_assign(
216 __entry->dev = bio->bi_bdev->bd_dev;
217 __entry->sector = bio->bi_sector;
218 __entry->nr_sector = bio->bi_size >> 9;
219 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
220 ),
221
222 TP_printk("%d,%d %s %llu + %u [%d]",
223 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
224 __entry->sector, __entry->nr_sector, __entry->error)
225);
226
227TRACE_EVENT(block_bio_backmerge,
228
229 TP_PROTO(struct request_queue *q, struct bio *bio),
230
231 TP_ARGS(q, bio),
232
233 TP_STRUCT__entry(
234 __field( dev_t, dev )
235 __field( sector_t, sector )
236 __field( unsigned int, nr_sector )
237 __array( char, rwbs, 6 )
238 __array( char, comm, TASK_COMM_LEN )
239 ),
240
241 TP_fast_assign(
242 __entry->dev = bio->bi_bdev->bd_dev;
243 __entry->sector = bio->bi_sector;
244 __entry->nr_sector = bio->bi_size >> 9;
245 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
246 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
247 ),
248
249 TP_printk("%d,%d %s %llu + %u [%s]",
250 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
251 __entry->sector, __entry->nr_sector, __entry->comm)
252);
253
254TRACE_EVENT(block_bio_frontmerge,
255
256 TP_PROTO(struct request_queue *q, struct bio *bio),
257
258 TP_ARGS(q, bio),
259
260 TP_STRUCT__entry(
261 __field( dev_t, dev )
262 __field( sector_t, sector )
263 __field( unsigned, nr_sector )
264 __array( char, rwbs, 6 )
265 __array( char, comm, TASK_COMM_LEN )
266 ),
267
268 TP_fast_assign(
269 __entry->dev = bio->bi_bdev->bd_dev;
270 __entry->sector = bio->bi_sector;
271 __entry->nr_sector = bio->bi_size >> 9;
272 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
273 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
274 ),
275
276 TP_printk("%d,%d %s %llu + %u [%s]",
277 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
278 __entry->sector, __entry->nr_sector, __entry->comm)
279);
280
281TRACE_EVENT(block_bio_queue,
282
283 TP_PROTO(struct request_queue *q, struct bio *bio),
284
285 TP_ARGS(q, bio),
286
287 TP_STRUCT__entry(
288 __field( dev_t, dev )
289 __field( sector_t, sector )
290 __field( unsigned int, nr_sector )
291 __array( char, rwbs, 6 )
292 __array( char, comm, TASK_COMM_LEN )
293 ),
294
295 TP_fast_assign(
296 __entry->dev = bio->bi_bdev->bd_dev;
297 __entry->sector = bio->bi_sector;
298 __entry->nr_sector = bio->bi_size >> 9;
299 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
300 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
301 ),
302
303 TP_printk("%d,%d %s %llu + %u [%s]",
304 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
305 __entry->sector, __entry->nr_sector, __entry->comm)
306);
307
308TRACE_EVENT(block_getrq,
309
310 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
311
312 TP_ARGS(q, bio, rw),
313
314 TP_STRUCT__entry(
315 __field( dev_t, dev )
316 __field( sector_t, sector )
317 __field( unsigned int, nr_sector )
318 __array( char, rwbs, 6 )
319 __array( char, comm, TASK_COMM_LEN )
320 ),
321
322 TP_fast_assign(
323 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
324 __entry->sector = bio ? bio->bi_sector : 0;
325 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
326 blk_fill_rwbs(__entry->rwbs,
327 bio ? bio->bi_rw : 0, __entry->nr_sector);
328 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
329 ),
330
331 TP_printk("%d,%d %s %llu + %u [%s]",
332 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
333 __entry->sector, __entry->nr_sector, __entry->comm)
334);
335
336TRACE_EVENT(block_sleeprq,
337
338 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
339
340 TP_ARGS(q, bio, rw),
341
342 TP_STRUCT__entry(
343 __field( dev_t, dev )
344 __field( sector_t, sector )
345 __field( unsigned int, nr_sector )
346 __array( char, rwbs, 6 )
347 __array( char, comm, TASK_COMM_LEN )
348 ),
349
350 TP_fast_assign(
351 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
352 __entry->sector = bio ? bio->bi_sector : 0;
353 __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
354 blk_fill_rwbs(__entry->rwbs,
355 bio ? bio->bi_rw : 0, __entry->nr_sector);
356 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
357 ),
358
359 TP_printk("%d,%d %s %llu + %u [%s]",
360 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
361 __entry->sector, __entry->nr_sector, __entry->comm)
362);
363
364TRACE_EVENT(block_plug,
365
366 TP_PROTO(struct request_queue *q),
367
368 TP_ARGS(q),
369
370 TP_STRUCT__entry(
371 __array( char, comm, TASK_COMM_LEN )
372 ),
373
374 TP_fast_assign(
375 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
376 ),
377
378 TP_printk("[%s]", __entry->comm)
379);
380
381TRACE_EVENT(block_unplug_timer,
382
383 TP_PROTO(struct request_queue *q),
384
385 TP_ARGS(q),
386
387 TP_STRUCT__entry(
388 __field( int, nr_rq )
389 __array( char, comm, TASK_COMM_LEN )
390 ),
391
392 TP_fast_assign(
393 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
394 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
395 ),
396
397 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
398);
399
400TRACE_EVENT(block_unplug_io,
401
402 TP_PROTO(struct request_queue *q),
403
404 TP_ARGS(q),
405
406 TP_STRUCT__entry(
407 __field( int, nr_rq )
408 __array( char, comm, TASK_COMM_LEN )
409 ),
410
411 TP_fast_assign(
412 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
413 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
414 ),
415
416 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
417);
418
419TRACE_EVENT(block_split,
420
421 TP_PROTO(struct request_queue *q, struct bio *bio,
422 unsigned int new_sector),
423
424 TP_ARGS(q, bio, new_sector),
425
426 TP_STRUCT__entry(
427 __field( dev_t, dev )
428 __field( sector_t, sector )
429 __field( sector_t, new_sector )
430 __array( char, rwbs, 6 )
431 __array( char, comm, TASK_COMM_LEN )
432 ),
433
434 TP_fast_assign(
435 __entry->dev = bio->bi_bdev->bd_dev;
436 __entry->sector = bio->bi_sector;
437 __entry->new_sector = new_sector;
438 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
439 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
440 ),
441
442 TP_printk("%d,%d %s %llu / %llu [%s]",
443 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
444 __entry->sector, __entry->new_sector, __entry->comm)
445);
446
447TRACE_EVENT(block_remap,
448
449 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
450 sector_t from),
451
452 TP_ARGS(q, bio, dev, from),
453
454 TP_STRUCT__entry(
455 __field( dev_t, dev )
456 __field( sector_t, sector )
457 __field( unsigned int, nr_sector )
458 __field( dev_t, old_dev )
459 __field( sector_t, old_sector )
460 __array( char, rwbs, 6 )
461 ),
462
463 TP_fast_assign(
464 __entry->dev = bio->bi_bdev->bd_dev;
465 __entry->sector = bio->bi_sector;
466 __entry->nr_sector = bio->bi_size >> 9;
467 __entry->old_dev = dev;
468 __entry->old_sector = from;
469 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
470 ),
471
472 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
473 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
474 __entry->sector, __entry->nr_sector,
475 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
476 __entry->old_sector)
477);
478
479#endif /* _TRACE_BLOCK_H */
480
481/* This part must be outside protection */
482#include <trace/define_trace.h>
483