diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-04-21 03:47:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-21 03:47:05 -0400 |
commit | ac0053fd51d2bac09a7d4b4a59f6dac863bd4373 (patch) | |
tree | 00c32e14428853f352fa3828d0131653ad6a7c69 /include/trace/events | |
parent | b15c7b1cee119999e9eafcd602d24a595e77adac (diff) | |
parent | 01bf0b64579ead8a82e7cfc32ae44bc667e7ad0f (diff) |
Merge commit 'v2.6.34-rc5' into tracing/core
Merge reason: pick up latest -rc's.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/trace/events')
-rw-r--r-- | include/trace/events/block.h | 164 |
1 files changed, 164 insertions, 0 deletions
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 5fb72733331e..d870a918559c 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -40,6 +40,16 @@ DECLARE_EVENT_CLASS(block_rq_with_error, | |||
40 | __entry->nr_sector, __entry->errors) | 40 | __entry->nr_sector, __entry->errors) |
41 | ); | 41 | ); |
42 | 42 | ||
43 | /** | ||
44 | * block_rq_abort - abort block operation request | ||
45 | * @q: queue containing the block operation request | ||
46 | * @rq: block IO operation request | ||
47 | * | ||
48 | * Called immediately after pending block IO operation request @rq in | ||
49 | * queue @q is aborted. The fields in the operation request @rq | ||
50 | * can be examined to determine which device and sectors the pending | ||
51 | * operation would access. | ||
52 | */ | ||
43 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, | 53 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, |
44 | 54 | ||
45 | TP_PROTO(struct request_queue *q, struct request *rq), | 55 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -47,6 +57,15 @@ DEFINE_EVENT(block_rq_with_error, block_rq_abort, | |||
47 | TP_ARGS(q, rq) | 57 | TP_ARGS(q, rq) |
48 | ); | 58 | ); |
49 | 59 | ||
60 | /** | ||
61 | * block_rq_requeue - place block IO request back on a queue | ||
62 | * @q: queue holding operation | ||
63 | * @rq: block IO operation request | ||
64 | * | ||
65 | * The block operation request @rq is being placed back into queue | ||
66 | * @q. For some reason the request was not completed and needs to be | ||
67 | * put back in the queue. | ||
68 | */ | ||
50 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, | 69 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, |
51 | 70 | ||
52 | TP_PROTO(struct request_queue *q, struct request *rq), | 71 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -54,6 +73,17 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue, | |||
54 | TP_ARGS(q, rq) | 73 | TP_ARGS(q, rq) |
55 | ); | 74 | ); |
56 | 75 | ||
76 | /** | ||
77 | * block_rq_complete - block IO operation completed by device driver | ||
78 | * @q: queue containing the block operation request | ||
79 | * @rq: block operations request | ||
80 | * | ||
81 | * The block_rq_complete tracepoint event indicates that some portion | ||
82 | * of operation request has been completed by the device driver. If | ||
83 | * the @rq->bio is %NULL, then there is absolutely no additional work to | ||
84 | * do for the request. If @rq->bio is non-NULL then there is | ||
85 | * additional work required to complete the request. | ||
86 | */ | ||
57 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, | 87 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, |
58 | 88 | ||
59 | TP_PROTO(struct request_queue *q, struct request *rq), | 89 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -95,6 +125,16 @@ DECLARE_EVENT_CLASS(block_rq, | |||
95 | __entry->nr_sector, __entry->comm) | 125 | __entry->nr_sector, __entry->comm) |
96 | ); | 126 | ); |
97 | 127 | ||
128 | /** | ||
129 | * block_rq_insert - insert block operation request into queue | ||
130 | * @q: target queue | ||
131 | * @rq: block IO operation request | ||
132 | * | ||
133 | * Called immediately before block operation request @rq is inserted | ||
134 | * into queue @q. The fields in the operation request @rq struct can | ||
135 | * be examined to determine which device and sectors the pending | ||
136 | * operation would access. | ||
137 | */ | ||
98 | DEFINE_EVENT(block_rq, block_rq_insert, | 138 | DEFINE_EVENT(block_rq, block_rq_insert, |
99 | 139 | ||
100 | TP_PROTO(struct request_queue *q, struct request *rq), | 140 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -102,6 +142,14 @@ DEFINE_EVENT(block_rq, block_rq_insert, | |||
102 | TP_ARGS(q, rq) | 142 | TP_ARGS(q, rq) |
103 | ); | 143 | ); |
104 | 144 | ||
145 | /** | ||
146 | * block_rq_issue - issue pending block IO request operation to device driver | ||
147 | * @q: queue holding operation | ||
148 | * @rq: block IO operation operation request | ||
149 | * | ||
150 | * Called when block operation request @rq from queue @q is sent to a | ||
151 | * device driver for processing. | ||
152 | */ | ||
105 | DEFINE_EVENT(block_rq, block_rq_issue, | 153 | DEFINE_EVENT(block_rq, block_rq_issue, |
106 | 154 | ||
107 | TP_PROTO(struct request_queue *q, struct request *rq), | 155 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -109,6 +157,17 @@ DEFINE_EVENT(block_rq, block_rq_issue, | |||
109 | TP_ARGS(q, rq) | 157 | TP_ARGS(q, rq) |
110 | ); | 158 | ); |
111 | 159 | ||
160 | /** | ||
161 | * block_bio_bounce - used bounce buffer when processing block operation | ||
162 | * @q: queue holding the block operation | ||
163 | * @bio: block operation | ||
164 | * | ||
165 | * A bounce buffer was used to handle the block operation @bio in @q. | ||
166 | * This occurs when hardware limitations prevent a direct transfer of | ||
167 | * data between the @bio data memory area and the IO device. Use of a | ||
168 | * bounce buffer requires extra copying of data and decreases | ||
169 | * performance. | ||
170 | */ | ||
112 | TRACE_EVENT(block_bio_bounce, | 171 | TRACE_EVENT(block_bio_bounce, |
113 | 172 | ||
114 | TP_PROTO(struct request_queue *q, struct bio *bio), | 173 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -138,6 +197,14 @@ TRACE_EVENT(block_bio_bounce, | |||
138 | __entry->nr_sector, __entry->comm) | 197 | __entry->nr_sector, __entry->comm) |
139 | ); | 198 | ); |
140 | 199 | ||
200 | /** | ||
201 | * block_bio_complete - completed all work on the block operation | ||
202 | * @q: queue holding the block operation | ||
203 | * @bio: block operation completed | ||
204 | * | ||
205 | * This tracepoint indicates there is no further work to do on this | ||
206 | * block IO operation @bio. | ||
207 | */ | ||
141 | TRACE_EVENT(block_bio_complete, | 208 | TRACE_EVENT(block_bio_complete, |
142 | 209 | ||
143 | TP_PROTO(struct request_queue *q, struct bio *bio), | 210 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -193,6 +260,14 @@ DECLARE_EVENT_CLASS(block_bio, | |||
193 | __entry->nr_sector, __entry->comm) | 260 | __entry->nr_sector, __entry->comm) |
194 | ); | 261 | ); |
195 | 262 | ||
263 | /** | ||
264 | * block_bio_backmerge - merging block operation to the end of an existing operation | ||
265 | * @q: queue holding operation | ||
266 | * @bio: new block operation to merge | ||
267 | * | ||
268 | * Merging block request @bio to the end of an existing block request | ||
269 | * in queue @q. | ||
270 | */ | ||
196 | DEFINE_EVENT(block_bio, block_bio_backmerge, | 271 | DEFINE_EVENT(block_bio, block_bio_backmerge, |
197 | 272 | ||
198 | TP_PROTO(struct request_queue *q, struct bio *bio), | 273 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -200,6 +275,14 @@ DEFINE_EVENT(block_bio, block_bio_backmerge, | |||
200 | TP_ARGS(q, bio) | 275 | TP_ARGS(q, bio) |
201 | ); | 276 | ); |
202 | 277 | ||
278 | /** | ||
279 | * block_bio_frontmerge - merging block operation to the beginning of an existing operation | ||
280 | * @q: queue holding operation | ||
281 | * @bio: new block operation to merge | ||
282 | * | ||
283 | * Merging block IO operation @bio to the beginning of an existing block | ||
284 | * operation in queue @q. | ||
285 | */ | ||
203 | DEFINE_EVENT(block_bio, block_bio_frontmerge, | 286 | DEFINE_EVENT(block_bio, block_bio_frontmerge, |
204 | 287 | ||
205 | TP_PROTO(struct request_queue *q, struct bio *bio), | 288 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -207,6 +290,13 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge, | |||
207 | TP_ARGS(q, bio) | 290 | TP_ARGS(q, bio) |
208 | ); | 291 | ); |
209 | 292 | ||
293 | /** | ||
294 | * block_bio_queue - putting new block IO operation in queue | ||
295 | * @q: queue holding operation | ||
296 | * @bio: new block operation | ||
297 | * | ||
298 | * About to place the block IO operation @bio into queue @q. | ||
299 | */ | ||
210 | DEFINE_EVENT(block_bio, block_bio_queue, | 300 | DEFINE_EVENT(block_bio, block_bio_queue, |
211 | 301 | ||
212 | TP_PROTO(struct request_queue *q, struct bio *bio), | 302 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -243,6 +333,15 @@ DECLARE_EVENT_CLASS(block_get_rq, | |||
243 | __entry->nr_sector, __entry->comm) | 333 | __entry->nr_sector, __entry->comm) |
244 | ); | 334 | ); |
245 | 335 | ||
336 | /** | ||
337 | * block_getrq - get a free request entry in queue for block IO operations | ||
338 | * @q: queue for operations | ||
339 | * @bio: pending block IO operation | ||
340 | * @rw: low bit indicates a read (%0) or a write (%1) | ||
341 | * | ||
342 | * A request struct for queue @q has been allocated to handle the | ||
343 | * block IO operation @bio. | ||
344 | */ | ||
246 | DEFINE_EVENT(block_get_rq, block_getrq, | 345 | DEFINE_EVENT(block_get_rq, block_getrq, |
247 | 346 | ||
248 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 347 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
@@ -250,6 +349,17 @@ DEFINE_EVENT(block_get_rq, block_getrq, | |||
250 | TP_ARGS(q, bio, rw) | 349 | TP_ARGS(q, bio, rw) |
251 | ); | 350 | ); |
252 | 351 | ||
352 | /** | ||
353 | * block_sleeprq - waiting to get a free request entry in queue for block IO operation | ||
354 | * @q: queue for operation | ||
355 | * @bio: pending block IO operation | ||
356 | * @rw: low bit indicates a read (%0) or a write (%1) | ||
357 | * | ||
358 | * In the case where a request struct cannot be provided for queue @q | ||
359 | * the process needs to wait for an request struct to become | ||
360 | * available. This tracepoint event is generated each time the | ||
361 | * process goes to sleep waiting for request struct become available. | ||
362 | */ | ||
253 | DEFINE_EVENT(block_get_rq, block_sleeprq, | 363 | DEFINE_EVENT(block_get_rq, block_sleeprq, |
254 | 364 | ||
255 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 365 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
@@ -257,6 +367,14 @@ DEFINE_EVENT(block_get_rq, block_sleeprq, | |||
257 | TP_ARGS(q, bio, rw) | 367 | TP_ARGS(q, bio, rw) |
258 | ); | 368 | ); |
259 | 369 | ||
370 | /** | ||
371 | * block_plug - keep operations requests in request queue | ||
372 | * @q: request queue to plug | ||
373 | * | ||
374 | * Plug the request queue @q. Do not allow block operation requests | ||
375 | * to be sent to the device driver. Instead, accumulate requests in | ||
376 | * the queue to improve throughput performance of the block device. | ||
377 | */ | ||
260 | TRACE_EVENT(block_plug, | 378 | TRACE_EVENT(block_plug, |
261 | 379 | ||
262 | TP_PROTO(struct request_queue *q), | 380 | TP_PROTO(struct request_queue *q), |
@@ -293,6 +411,13 @@ DECLARE_EVENT_CLASS(block_unplug, | |||
293 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | 411 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) |
294 | ); | 412 | ); |
295 | 413 | ||
414 | /** | ||
415 | * block_unplug_timer - timed release of operations requests in queue to device driver | ||
416 | * @q: request queue to unplug | ||
417 | * | ||
418 | * Unplug the request queue @q because a timer expired and allow block | ||
419 | * operation requests to be sent to the device driver. | ||
420 | */ | ||
296 | DEFINE_EVENT(block_unplug, block_unplug_timer, | 421 | DEFINE_EVENT(block_unplug, block_unplug_timer, |
297 | 422 | ||
298 | TP_PROTO(struct request_queue *q), | 423 | TP_PROTO(struct request_queue *q), |
@@ -300,6 +425,13 @@ DEFINE_EVENT(block_unplug, block_unplug_timer, | |||
300 | TP_ARGS(q) | 425 | TP_ARGS(q) |
301 | ); | 426 | ); |
302 | 427 | ||
428 | /** | ||
429 | * block_unplug_io - release of operations requests in request queue | ||
430 | * @q: request queue to unplug | ||
431 | * | ||
432 | * Unplug request queue @q because device driver is scheduled to work | ||
433 | * on elements in the request queue. | ||
434 | */ | ||
303 | DEFINE_EVENT(block_unplug, block_unplug_io, | 435 | DEFINE_EVENT(block_unplug, block_unplug_io, |
304 | 436 | ||
305 | TP_PROTO(struct request_queue *q), | 437 | TP_PROTO(struct request_queue *q), |
@@ -307,6 +439,17 @@ DEFINE_EVENT(block_unplug, block_unplug_io, | |||
307 | TP_ARGS(q) | 439 | TP_ARGS(q) |
308 | ); | 440 | ); |
309 | 441 | ||
442 | /** | ||
443 | * block_split - split a single bio struct into two bio structs | ||
444 | * @q: queue containing the bio | ||
445 | * @bio: block operation being split | ||
446 | * @new_sector: The starting sector for the new bio | ||
447 | * | ||
448 | * The bio request @bio in request queue @q needs to be split into two | ||
449 | * bio requests. The newly created @bio request starts at | ||
450 | * @new_sector. This split may be required due to hardware limitation | ||
451 | * such as operation crossing device boundaries in a RAID system. | ||
452 | */ | ||
310 | TRACE_EVENT(block_split, | 453 | TRACE_EVENT(block_split, |
311 | 454 | ||
312 | TP_PROTO(struct request_queue *q, struct bio *bio, | 455 | TP_PROTO(struct request_queue *q, struct bio *bio, |
@@ -337,6 +480,16 @@ TRACE_EVENT(block_split, | |||
337 | __entry->comm) | 480 | __entry->comm) |
338 | ); | 481 | ); |
339 | 482 | ||
483 | /** | ||
484 | * block_remap - map request for a partition to the raw device | ||
485 | * @q: queue holding the operation | ||
486 | * @bio: revised operation | ||
487 | * @dev: device for the operation | ||
488 | * @from: original sector for the operation | ||
489 | * | ||
490 | * An operation for a partition on a block device has been mapped to the | ||
491 | * raw block device. | ||
492 | */ | ||
340 | TRACE_EVENT(block_remap, | 493 | TRACE_EVENT(block_remap, |
341 | 494 | ||
342 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | 495 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, |
@@ -370,6 +523,17 @@ TRACE_EVENT(block_remap, | |||
370 | (unsigned long long)__entry->old_sector) | 523 | (unsigned long long)__entry->old_sector) |
371 | ); | 524 | ); |
372 | 525 | ||
526 | /** | ||
527 | * block_rq_remap - map request for a block operation request | ||
528 | * @q: queue holding the operation | ||
529 | * @rq: block IO operation request | ||
530 | * @dev: device for the operation | ||
531 | * @from: original sector for the operation | ||
532 | * | ||
533 | * The block operation request @rq in @q has been remapped. The block | ||
534 | * operation request @rq holds the current information and @from hold | ||
535 | * the original sector. | ||
536 | */ | ||
373 | TRACE_EVENT(block_rq_remap, | 537 | TRACE_EVENT(block_rq_remap, |
374 | 538 | ||
375 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, | 539 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, |