diff options
author | Jens Axboe <axboe@fb.com> | 2014-05-29 13:00:11 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-29 13:00:11 -0400 |
commit | 4b570521be54666e6ad7e5f47af92fd609fbd8b5 (patch) | |
tree | fca8d28ad96f8857ea2f2c88b9301a38eeaf016b /block | |
parent | 05f1dd5315217398fc8d122bdee80f96a9f21274 (diff) |
blk-mq: request initialization optimizations
We currently clear a lot more than we need to, so make that a bit
more clever. Make some of the init dependent on features, like
only setting start_time if we are going to use it.
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 26 |
1 files changed, 9 insertions, 17 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index f98d977fd150..6160128085fc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -199,19 +199,12 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
199 | rq->q = q; | 199 | rq->q = q; |
200 | rq->mq_ctx = ctx; | 200 | rq->mq_ctx = ctx; |
201 | rq->cmd_flags |= rw_flags; | 201 | rq->cmd_flags |= rw_flags; |
202 | rq->cmd_type = 0; | ||
203 | /* do not touch atomic flags, it needs atomic ops against the timer */ | 202 | /* do not touch atomic flags, it needs atomic ops against the timer */ |
204 | rq->cpu = -1; | 203 | rq->cpu = -1; |
205 | rq->__data_len = 0; | ||
206 | rq->__sector = (sector_t) -1; | ||
207 | rq->bio = NULL; | ||
208 | rq->biotail = NULL; | ||
209 | INIT_HLIST_NODE(&rq->hash); | 204 | INIT_HLIST_NODE(&rq->hash); |
210 | RB_CLEAR_NODE(&rq->rb_node); | 205 | RB_CLEAR_NODE(&rq->rb_node); |
211 | memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv))); | ||
212 | rq->rq_disk = NULL; | 206 | rq->rq_disk = NULL; |
213 | rq->part = NULL; | 207 | rq->part = NULL; |
214 | rq->start_time = jiffies; | ||
215 | #ifdef CONFIG_BLK_CGROUP | 208 | #ifdef CONFIG_BLK_CGROUP |
216 | rq->rl = NULL; | 209 | rq->rl = NULL; |
217 | set_start_time_ns(rq); | 210 | set_start_time_ns(rq); |
@@ -221,23 +214,16 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
221 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 214 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
222 | rq->nr_integrity_segments = 0; | 215 | rq->nr_integrity_segments = 0; |
223 | #endif | 216 | #endif |
224 | rq->ioprio = 0; | ||
225 | rq->special = NULL; | 217 | rq->special = NULL; |
226 | /* tag was already set */ | 218 | /* tag was already set */ |
227 | rq->errors = 0; | 219 | rq->errors = 0; |
228 | memset(rq->__cmd, 0, sizeof(rq->__cmd)); | ||
229 | rq->cmd = rq->__cmd; | ||
230 | rq->cmd_len = BLK_MAX_CDB; | ||
231 | 220 | ||
232 | rq->extra_len = 0; | 221 | rq->extra_len = 0; |
233 | rq->sense_len = 0; | 222 | rq->sense_len = 0; |
234 | rq->resid_len = 0; | 223 | rq->resid_len = 0; |
235 | rq->sense = NULL; | 224 | rq->sense = NULL; |
236 | 225 | ||
237 | rq->deadline = 0; | ||
238 | INIT_LIST_HEAD(&rq->timeout_list); | 226 | INIT_LIST_HEAD(&rq->timeout_list); |
239 | rq->timeout = 0; | ||
240 | rq->retries = 0; | ||
241 | rq->end_io = NULL; | 227 | rq->end_io = NULL; |
242 | rq->end_io_data = NULL; | 228 | rq->end_io_data = NULL; |
243 | rq->next_rq = NULL; | 229 | rq->next_rq = NULL; |
@@ -449,8 +435,10 @@ static void blk_mq_start_request(struct request *rq, bool last) | |||
449 | * complete. So be sure to clear complete again when we start | 435 | * complete. So be sure to clear complete again when we start |
450 | * the request, otherwise we'll ignore the completion event. | 436 | * the request, otherwise we'll ignore the completion event. |
451 | */ | 437 | */ |
452 | set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 438 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) |
453 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | 439 | set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
440 | if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) | ||
441 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); | ||
454 | 442 | ||
455 | if (q->dma_drain_size && blk_rq_bytes(rq)) { | 443 | if (q->dma_drain_size && blk_rq_bytes(rq)) { |
456 | /* | 444 | /* |
@@ -1112,7 +1100,11 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
1112 | static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) | 1100 | static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) |
1113 | { | 1101 | { |
1114 | init_request_from_bio(rq, bio); | 1102 | init_request_from_bio(rq, bio); |
1115 | blk_account_io_start(rq, 1); | 1103 | |
1104 | if (blk_do_io_stat(rq)) { | ||
1105 | rq->start_time = jiffies; | ||
1106 | blk_account_io_start(rq, 1); | ||
1107 | } | ||
1116 | } | 1108 | } |
1117 | 1109 | ||
1118 | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, | 1110 | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, |