diff options
Diffstat (limited to 'include/trace/events/block.h')
-rw-r--r-- | include/trace/events/block.h | 366 |
1 files changed, 206 insertions, 160 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 00405b5f624a..d870a918559c 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/tracepoint.h> | 9 | #include <linux/tracepoint.h> |
10 | 10 | ||
11 | TRACE_EVENT(block_rq_abort, | 11 | DECLARE_EVENT_CLASS(block_rq_with_error, |
12 | 12 | ||
13 | TP_PROTO(struct request_queue *q, struct request *rq), | 13 | TP_PROTO(struct request_queue *q, struct request *rq), |
14 | 14 | ||
@@ -40,41 +40,58 @@ TRACE_EVENT(block_rq_abort, | |||
40 | __entry->nr_sector, __entry->errors) | 40 | __entry->nr_sector, __entry->errors) |
41 | ); | 41 | ); |
42 | 42 | ||
43 | TRACE_EVENT(block_rq_insert, | 43 | /** |
44 | * block_rq_abort - abort block operation request | ||
45 | * @q: queue containing the block operation request | ||
46 | * @rq: block IO operation request | ||
47 | * | ||
48 | * Called immediately after pending block IO operation request @rq in | ||
49 | * queue @q is aborted. The fields in the operation request @rq | ||
50 | * can be examined to determine which device and sectors the pending | ||
51 | * operation would access. | ||
52 | */ | ||
53 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, | ||
44 | 54 | ||
45 | TP_PROTO(struct request_queue *q, struct request *rq), | 55 | TP_PROTO(struct request_queue *q, struct request *rq), |
46 | 56 | ||
47 | TP_ARGS(q, rq), | 57 | TP_ARGS(q, rq) |
58 | ); | ||
48 | 59 | ||
49 | TP_STRUCT__entry( | 60 | /** |
50 | __field( dev_t, dev ) | 61 | * block_rq_requeue - place block IO request back on a queue |
51 | __field( sector_t, sector ) | 62 | * @q: queue holding operation |
52 | __field( unsigned int, nr_sector ) | 63 | * @rq: block IO operation request |
53 | __field( unsigned int, bytes ) | 64 | * |
54 | __array( char, rwbs, 6 ) | 65 | * The block operation request @rq is being placed back into queue |
55 | __array( char, comm, TASK_COMM_LEN ) | 66 | * @q. For some reason the request was not completed and needs to be |
56 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | 67 | * put back in the queue. |
57 | ), | 68 | */ |
69 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, | ||
58 | 70 | ||
59 | TP_fast_assign( | 71 | TP_PROTO(struct request_queue *q, struct request *rq), |
60 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
61 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
62 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
63 | __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; | ||
64 | 72 | ||
65 | blk_fill_rwbs_rq(__entry->rwbs, rq); | 73 | TP_ARGS(q, rq) |
66 | blk_dump_cmd(__get_str(cmd), rq); | 74 | ); |
67 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
68 | ), | ||
69 | 75 | ||
70 | TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", | 76 | /** |
71 | MAJOR(__entry->dev), MINOR(__entry->dev), | 77 | * block_rq_complete - block IO operation completed by device driver |
72 | __entry->rwbs, __entry->bytes, __get_str(cmd), | 78 | * @q: queue containing the block operation request |
73 | (unsigned long long)__entry->sector, | 79 | * @rq: block operations request |
74 | __entry->nr_sector, __entry->comm) | 80 | * |
81 | * The block_rq_complete tracepoint event indicates that some portion | ||
82 | * of operation request has been completed by the device driver. If | ||
83 | * the @rq->bio is %NULL, then there is absolutely no additional work to | ||
84 | * do for the request. If @rq->bio is non-NULL then there is | ||
85 | * additional work required to complete the request. | ||
86 | */ | ||
87 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, | ||
88 | |||
89 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
90 | |||
91 | TP_ARGS(q, rq) | ||
75 | ); | 92 | ); |
76 | 93 | ||
77 | TRACE_EVENT(block_rq_issue, | 94 | DECLARE_EVENT_CLASS(block_rq, |
78 | 95 | ||
79 | TP_PROTO(struct request_queue *q, struct request *rq), | 96 | TP_PROTO(struct request_queue *q, struct request *rq), |
80 | 97 | ||
@@ -86,7 +103,7 @@ TRACE_EVENT(block_rq_issue, | |||
86 | __field( unsigned int, nr_sector ) | 103 | __field( unsigned int, nr_sector ) |
87 | __field( unsigned int, bytes ) | 104 | __field( unsigned int, bytes ) |
88 | __array( char, rwbs, 6 ) | 105 | __array( char, rwbs, 6 ) |
89 | __array( char, comm, TASK_COMM_LEN ) | 106 | __array( char, comm, TASK_COMM_LEN ) |
90 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | 107 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) |
91 | ), | 108 | ), |
92 | 109 | ||
@@ -108,70 +125,49 @@ TRACE_EVENT(block_rq_issue, | |||
108 | __entry->nr_sector, __entry->comm) | 125 | __entry->nr_sector, __entry->comm) |
109 | ); | 126 | ); |
110 | 127 | ||
111 | TRACE_EVENT(block_rq_requeue, | 128 | /** |
129 | * block_rq_insert - insert block operation request into queue | ||
130 | * @q: target queue | ||
131 | * @rq: block IO operation request | ||
132 | * | ||
133 | * Called immediately before block operation request @rq is inserted | ||
134 | * into queue @q. The fields in the operation request @rq struct can | ||
135 | * be examined to determine which device and sectors the pending | ||
136 | * operation would access. | ||
137 | */ | ||
138 | DEFINE_EVENT(block_rq, block_rq_insert, | ||
112 | 139 | ||
113 | TP_PROTO(struct request_queue *q, struct request *rq), | 140 | TP_PROTO(struct request_queue *q, struct request *rq), |
114 | 141 | ||
115 | TP_ARGS(q, rq), | 142 | TP_ARGS(q, rq) |
116 | |||
117 | TP_STRUCT__entry( | ||
118 | __field( dev_t, dev ) | ||
119 | __field( sector_t, sector ) | ||
120 | __field( unsigned int, nr_sector ) | ||
121 | __field( int, errors ) | ||
122 | __array( char, rwbs, 6 ) | ||
123 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
124 | ), | ||
125 | |||
126 | TP_fast_assign( | ||
127 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
128 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
129 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
130 | __entry->errors = rq->errors; | ||
131 | |||
132 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
133 | blk_dump_cmd(__get_str(cmd), rq); | ||
134 | ), | ||
135 | |||
136 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
137 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
138 | __entry->rwbs, __get_str(cmd), | ||
139 | (unsigned long long)__entry->sector, | ||
140 | __entry->nr_sector, __entry->errors) | ||
141 | ); | 143 | ); |
142 | 144 | ||
143 | TRACE_EVENT(block_rq_complete, | 145 | /** |
146 | * block_rq_issue - issue pending block IO request operation to device driver | ||
147 | * @q: queue holding operation | ||
148 | * @rq: block IO operation operation request | ||
149 | * | ||
150 | * Called when block operation request @rq from queue @q is sent to a | ||
151 | * device driver for processing. | ||
152 | */ | ||
153 | DEFINE_EVENT(block_rq, block_rq_issue, | ||
144 | 154 | ||
145 | TP_PROTO(struct request_queue *q, struct request *rq), | 155 | TP_PROTO(struct request_queue *q, struct request *rq), |
146 | 156 | ||
147 | TP_ARGS(q, rq), | 157 | TP_ARGS(q, rq) |
148 | |||
149 | TP_STRUCT__entry( | ||
150 | __field( dev_t, dev ) | ||
151 | __field( sector_t, sector ) | ||
152 | __field( unsigned int, nr_sector ) | ||
153 | __field( int, errors ) | ||
154 | __array( char, rwbs, 6 ) | ||
155 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
156 | ), | ||
157 | |||
158 | TP_fast_assign( | ||
159 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
160 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
161 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
162 | __entry->errors = rq->errors; | ||
163 | |||
164 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
165 | blk_dump_cmd(__get_str(cmd), rq); | ||
166 | ), | ||
167 | |||
168 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
169 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
170 | __entry->rwbs, __get_str(cmd), | ||
171 | (unsigned long long)__entry->sector, | ||
172 | __entry->nr_sector, __entry->errors) | ||
173 | ); | 158 | ); |
174 | 159 | ||
160 | /** | ||
161 | * block_bio_bounce - used bounce buffer when processing block operation | ||
162 | * @q: queue holding the block operation | ||
163 | * @bio: block operation | ||
164 | * | ||
165 | * A bounce buffer was used to handle the block operation @bio in @q. | ||
166 | * This occurs when hardware limitations prevent a direct transfer of | ||
167 | * data between the @bio data memory area and the IO device. Use of a | ||
168 | * bounce buffer requires extra copying of data and decreases | ||
169 | * performance. | ||
170 | */ | ||
175 | TRACE_EVENT(block_bio_bounce, | 171 | TRACE_EVENT(block_bio_bounce, |
176 | 172 | ||
177 | TP_PROTO(struct request_queue *q, struct bio *bio), | 173 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -201,6 +197,14 @@ TRACE_EVENT(block_bio_bounce, | |||
201 | __entry->nr_sector, __entry->comm) | 197 | __entry->nr_sector, __entry->comm) |
202 | ); | 198 | ); |
203 | 199 | ||
200 | /** | ||
201 | * block_bio_complete - completed all work on the block operation | ||
202 | * @q: queue holding the block operation | ||
203 | * @bio: block operation completed | ||
204 | * | ||
205 | * This tracepoint indicates there is no further work to do on this | ||
206 | * block IO operation @bio. | ||
207 | */ | ||
204 | TRACE_EVENT(block_bio_complete, | 208 | TRACE_EVENT(block_bio_complete, |
205 | 209 | ||
206 | TP_PROTO(struct request_queue *q, struct bio *bio), | 210 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -228,7 +232,7 @@ TRACE_EVENT(block_bio_complete, | |||
228 | __entry->nr_sector, __entry->error) | 232 | __entry->nr_sector, __entry->error) |
229 | ); | 233 | ); |
230 | 234 | ||
231 | TRACE_EVENT(block_bio_backmerge, | 235 | DECLARE_EVENT_CLASS(block_bio, |
232 | 236 | ||
233 | TP_PROTO(struct request_queue *q, struct bio *bio), | 237 | TP_PROTO(struct request_queue *q, struct bio *bio), |
234 | 238 | ||
@@ -256,63 +260,51 @@ TRACE_EVENT(block_bio_backmerge, | |||
256 | __entry->nr_sector, __entry->comm) | 260 | __entry->nr_sector, __entry->comm) |
257 | ); | 261 | ); |
258 | 262 | ||
259 | TRACE_EVENT(block_bio_frontmerge, | 263 | /** |
264 | * block_bio_backmerge - merging block operation to the end of an existing operation | ||
265 | * @q: queue holding operation | ||
266 | * @bio: new block operation to merge | ||
267 | * | ||
268 | * Merging block request @bio to the end of an existing block request | ||
269 | * in queue @q. | ||
270 | */ | ||
271 | DEFINE_EVENT(block_bio, block_bio_backmerge, | ||
260 | 272 | ||
261 | TP_PROTO(struct request_queue *q, struct bio *bio), | 273 | TP_PROTO(struct request_queue *q, struct bio *bio), |
262 | 274 | ||
263 | TP_ARGS(q, bio), | 275 | TP_ARGS(q, bio) |
264 | |||
265 | TP_STRUCT__entry( | ||
266 | __field( dev_t, dev ) | ||
267 | __field( sector_t, sector ) | ||
268 | __field( unsigned, nr_sector ) | ||
269 | __array( char, rwbs, 6 ) | ||
270 | __array( char, comm, TASK_COMM_LEN ) | ||
271 | ), | ||
272 | |||
273 | TP_fast_assign( | ||
274 | __entry->dev = bio->bi_bdev->bd_dev; | ||
275 | __entry->sector = bio->bi_sector; | ||
276 | __entry->nr_sector = bio->bi_size >> 9; | ||
277 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
278 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
279 | ), | ||
280 | |||
281 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
282 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
283 | (unsigned long long)__entry->sector, | ||
284 | __entry->nr_sector, __entry->comm) | ||
285 | ); | 276 | ); |
286 | 277 | ||
287 | TRACE_EVENT(block_bio_queue, | 278 | /** |
279 | * block_bio_frontmerge - merging block operation to the beginning of an existing operation | ||
280 | * @q: queue holding operation | ||
281 | * @bio: new block operation to merge | ||
282 | * | ||
283 | * Merging block IO operation @bio to the beginning of an existing block | ||
284 | * operation in queue @q. | ||
285 | */ | ||
286 | DEFINE_EVENT(block_bio, block_bio_frontmerge, | ||
288 | 287 | ||
289 | TP_PROTO(struct request_queue *q, struct bio *bio), | 288 | TP_PROTO(struct request_queue *q, struct bio *bio), |
290 | 289 | ||
291 | TP_ARGS(q, bio), | 290 | TP_ARGS(q, bio) |
291 | ); | ||
292 | 292 | ||
293 | TP_STRUCT__entry( | 293 | /** |
294 | __field( dev_t, dev ) | 294 | * block_bio_queue - putting new block IO operation in queue |
295 | __field( sector_t, sector ) | 295 | * @q: queue holding operation |
296 | __field( unsigned int, nr_sector ) | 296 | * @bio: new block operation |
297 | __array( char, rwbs, 6 ) | 297 | * |
298 | __array( char, comm, TASK_COMM_LEN ) | 298 | * About to place the block IO operation @bio into queue @q. |
299 | ), | 299 | */ |
300 | DEFINE_EVENT(block_bio, block_bio_queue, | ||
300 | 301 | ||
301 | TP_fast_assign( | 302 | TP_PROTO(struct request_queue *q, struct bio *bio), |
302 | __entry->dev = bio->bi_bdev->bd_dev; | ||
303 | __entry->sector = bio->bi_sector; | ||
304 | __entry->nr_sector = bio->bi_size >> 9; | ||
305 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
306 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
307 | ), | ||
308 | 303 | ||
309 | TP_printk("%d,%d %s %llu + %u [%s]", | 304 | TP_ARGS(q, bio) |
310 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
311 | (unsigned long long)__entry->sector, | ||
312 | __entry->nr_sector, __entry->comm) | ||
313 | ); | 305 | ); |
314 | 306 | ||
315 | TRACE_EVENT(block_getrq, | 307 | DECLARE_EVENT_CLASS(block_get_rq, |
316 | 308 | ||
317 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 309 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
318 | 310 | ||
@@ -341,35 +333,48 @@ TRACE_EVENT(block_getrq, | |||
341 | __entry->nr_sector, __entry->comm) | 333 | __entry->nr_sector, __entry->comm) |
342 | ); | 334 | ); |
343 | 335 | ||
344 | TRACE_EVENT(block_sleeprq, | 336 | /** |
337 | * block_getrq - get a free request entry in queue for block IO operations | ||
338 | * @q: queue for operations | ||
339 | * @bio: pending block IO operation | ||
340 | * @rw: low bit indicates a read (%0) or a write (%1) | ||
341 | * | ||
342 | * A request struct for queue @q has been allocated to handle the | ||
343 | * block IO operation @bio. | ||
344 | */ | ||
345 | DEFINE_EVENT(block_get_rq, block_getrq, | ||
345 | 346 | ||
346 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 347 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
347 | 348 | ||
348 | TP_ARGS(q, bio, rw), | 349 | TP_ARGS(q, bio, rw) |
350 | ); | ||
349 | 351 | ||
350 | TP_STRUCT__entry( | 352 | /** |
351 | __field( dev_t, dev ) | 353 | * block_sleeprq - waiting to get a free request entry in queue for block IO operation |
352 | __field( sector_t, sector ) | 354 | * @q: queue for operation |
353 | __field( unsigned int, nr_sector ) | 355 | * @bio: pending block IO operation |
354 | __array( char, rwbs, 6 ) | 356 | * @rw: low bit indicates a read (%0) or a write (%1) |
355 | __array( char, comm, TASK_COMM_LEN ) | 357 | * |
356 | ), | 358 | * In the case where a request struct cannot be provided for queue @q |
359 | * the process needs to wait for an request struct to become | ||
360 | * available. This tracepoint event is generated each time the | ||
361 | * process goes to sleep waiting for request struct become available. | ||
362 | */ | ||
363 | DEFINE_EVENT(block_get_rq, block_sleeprq, | ||
357 | 364 | ||
358 | TP_fast_assign( | 365 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
359 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | ||
360 | __entry->sector = bio ? bio->bi_sector : 0; | ||
361 | __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; | ||
362 | blk_fill_rwbs(__entry->rwbs, | ||
363 | bio ? bio->bi_rw : 0, __entry->nr_sector); | ||
364 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
365 | ), | ||
366 | 366 | ||
367 | TP_printk("%d,%d %s %llu + %u [%s]", | 367 | TP_ARGS(q, bio, rw) |
368 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
369 | (unsigned long long)__entry->sector, | ||
370 | __entry->nr_sector, __entry->comm) | ||
371 | ); | 368 | ); |
372 | 369 | ||
370 | /** | ||
371 | * block_plug - keep operations requests in request queue | ||
372 | * @q: request queue to plug | ||
373 | * | ||
374 | * Plug the request queue @q. Do not allow block operation requests | ||
375 | * to be sent to the device driver. Instead, accumulate requests in | ||
376 | * the queue to improve throughput performance of the block device. | ||
377 | */ | ||
373 | TRACE_EVENT(block_plug, | 378 | TRACE_EVENT(block_plug, |
374 | 379 | ||
375 | TP_PROTO(struct request_queue *q), | 380 | TP_PROTO(struct request_queue *q), |
@@ -387,7 +392,7 @@ TRACE_EVENT(block_plug, | |||
387 | TP_printk("[%s]", __entry->comm) | 392 | TP_printk("[%s]", __entry->comm) |
388 | ); | 393 | ); |
389 | 394 | ||
390 | TRACE_EVENT(block_unplug_timer, | 395 | DECLARE_EVENT_CLASS(block_unplug, |
391 | 396 | ||
392 | TP_PROTO(struct request_queue *q), | 397 | TP_PROTO(struct request_queue *q), |
393 | 398 | ||
@@ -406,25 +411,45 @@ TRACE_EVENT(block_unplug_timer, | |||
406 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | 411 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) |
407 | ); | 412 | ); |
408 | 413 | ||
409 | TRACE_EVENT(block_unplug_io, | 414 | /** |
415 | * block_unplug_timer - timed release of operations requests in queue to device driver | ||
416 | * @q: request queue to unplug | ||
417 | * | ||
418 | * Unplug the request queue @q because a timer expired and allow block | ||
419 | * operation requests to be sent to the device driver. | ||
420 | */ | ||
421 | DEFINE_EVENT(block_unplug, block_unplug_timer, | ||
410 | 422 | ||
411 | TP_PROTO(struct request_queue *q), | 423 | TP_PROTO(struct request_queue *q), |
412 | 424 | ||
413 | TP_ARGS(q), | 425 | TP_ARGS(q) |
426 | ); | ||
414 | 427 | ||
415 | TP_STRUCT__entry( | 428 | /** |
416 | __field( int, nr_rq ) | 429 | * block_unplug_io - release of operations requests in request queue |
417 | __array( char, comm, TASK_COMM_LEN ) | 430 | * @q: request queue to unplug |
418 | ), | 431 | * |
432 | * Unplug request queue @q because device driver is scheduled to work | ||
433 | * on elements in the request queue. | ||
434 | */ | ||
435 | DEFINE_EVENT(block_unplug, block_unplug_io, | ||
419 | 436 | ||
420 | TP_fast_assign( | 437 | TP_PROTO(struct request_queue *q), |
421 | __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; | ||
422 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
423 | ), | ||
424 | 438 | ||
425 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | 439 | TP_ARGS(q) |
426 | ); | 440 | ); |
427 | 441 | ||
442 | /** | ||
443 | * block_split - split a single bio struct into two bio structs | ||
444 | * @q: queue containing the bio | ||
445 | * @bio: block operation being split | ||
446 | * @new_sector: The starting sector for the new bio | ||
447 | * | ||
448 | * The bio request @bio in request queue @q needs to be split into two | ||
449 | * bio requests. The newly created @bio request starts at | ||
450 | * @new_sector. This split may be required due to hardware limitation | ||
451 | * such as operation crossing device boundaries in a RAID system. | ||
452 | */ | ||
428 | TRACE_EVENT(block_split, | 453 | TRACE_EVENT(block_split, |
429 | 454 | ||
430 | TP_PROTO(struct request_queue *q, struct bio *bio, | 455 | TP_PROTO(struct request_queue *q, struct bio *bio, |
@@ -455,6 +480,16 @@ TRACE_EVENT(block_split, | |||
455 | __entry->comm) | 480 | __entry->comm) |
456 | ); | 481 | ); |
457 | 482 | ||
483 | /** | ||
484 | * block_remap - map request for a partition to the raw device | ||
485 | * @q: queue holding the operation | ||
486 | * @bio: revised operation | ||
487 | * @dev: device for the operation | ||
488 | * @from: original sector for the operation | ||
489 | * | ||
490 | * An operation for a partition on a block device has been mapped to the | ||
491 | * raw block device. | ||
492 | */ | ||
458 | TRACE_EVENT(block_remap, | 493 | TRACE_EVENT(block_remap, |
459 | 494 | ||
460 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | 495 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, |
@@ -488,6 +523,17 @@ TRACE_EVENT(block_remap, | |||
488 | (unsigned long long)__entry->old_sector) | 523 | (unsigned long long)__entry->old_sector) |
489 | ); | 524 | ); |
490 | 525 | ||
526 | /** | ||
527 | * block_rq_remap - map request for a block operation request | ||
528 | * @q: queue holding the operation | ||
529 | * @rq: block IO operation request | ||
530 | * @dev: device for the operation | ||
531 | * @from: original sector for the operation | ||
532 | * | ||
533 | * The block operation request @rq in @q has been remapped. The block | ||
534 | * operation request @rq holds the current information and @from hold | ||
535 | * the original sector. | ||
536 | */ | ||
491 | TRACE_EVENT(block_rq_remap, | 537 | TRACE_EVENT(block_rq_remap, |
492 | 538 | ||
493 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, | 539 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, |