diff options
author | Christoph Hellwig <hch@lst.de> | 2014-05-06 06:12:45 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-09 10:43:49 -0400 |
commit | af76e555e5e29e08eb8ac1f7878e23dbf0d6741f (patch) | |
tree | f583d3360eebfb37413f5e8e350c146bac591ada /block/blk-mq.c | |
parent | 9fccfed8f0cad9b79575a87c45d6f5f6ee05bb66 (diff) |
blk-mq: initialize struct request fields individually
This allows us to avoid a non-atomic memset over ->atomic_flags as well
as killing lots of duplicate initializations.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 47 |
1 files changed, 45 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3fdb097ebe5e..492f49f96459 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -82,9 +82,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, | |||
82 | tag = blk_mq_get_tag(hctx->tags, gfp, reserved); | 82 | tag = blk_mq_get_tag(hctx->tags, gfp, reserved); |
83 | if (tag != BLK_MQ_TAG_FAIL) { | 83 | if (tag != BLK_MQ_TAG_FAIL) { |
84 | rq = hctx->tags->rqs[tag]; | 84 | rq = hctx->tags->rqs[tag]; |
85 | blk_rq_init(hctx->queue, rq); | ||
86 | rq->tag = tag; | 85 | rq->tag = tag; |
87 | |||
88 | return rq; | 86 | return rq; |
89 | } | 87 | } |
90 | 88 | ||
@@ -187,10 +185,54 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
187 | if (blk_queue_io_stat(q)) | 185 | if (blk_queue_io_stat(q)) |
188 | rw_flags |= REQ_IO_STAT; | 186 | rw_flags |= REQ_IO_STAT; |
189 | 187 | ||
188 | INIT_LIST_HEAD(&rq->queuelist); | ||
189 | /* csd/requeue_work/fifo_time is initialized before use */ | ||
190 | rq->q = q; | ||
190 | rq->mq_ctx = ctx; | 191 | rq->mq_ctx = ctx; |
191 | rq->cmd_flags = rw_flags; | 192 | rq->cmd_flags = rw_flags; |
193 | rq->cmd_type = 0; | ||
194 | /* do not touch atomic flags, it needs atomic ops against the timer */ | ||
195 | rq->cpu = -1; | ||
196 | rq->__data_len = 0; | ||
197 | rq->__sector = (sector_t) -1; | ||
198 | rq->bio = NULL; | ||
199 | rq->biotail = NULL; | ||
200 | INIT_HLIST_NODE(&rq->hash); | ||
201 | RB_CLEAR_NODE(&rq->rb_node); | ||
202 | memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv))); | ||
203 | rq->rq_disk = NULL; | ||
204 | rq->part = NULL; | ||
192 | rq->start_time = jiffies; | 205 | rq->start_time = jiffies; |
206 | #ifdef CONFIG_BLK_CGROUP | ||
207 | rq->rl = NULL; | ||
193 | set_start_time_ns(rq); | 208 | set_start_time_ns(rq); |
209 | rq->io_start_time_ns = 0; | ||
210 | #endif | ||
211 | rq->nr_phys_segments = 0; | ||
212 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
213 | rq->nr_integrity_segments = 0; | ||
214 | #endif | ||
215 | rq->ioprio = 0; | ||
216 | rq->special = NULL; | ||
217 | /* tag was already set */ | ||
218 | rq->errors = 0; | ||
219 | memset(rq->__cmd, 0, sizeof(rq->__cmd)); | ||
220 | rq->cmd = rq->__cmd; | ||
221 | rq->cmd_len = BLK_MAX_CDB; | ||
222 | |||
223 | rq->extra_len = 0; | ||
224 | rq->sense_len = 0; | ||
225 | rq->resid_len = 0; | ||
226 | rq->sense = NULL; | ||
227 | |||
228 | rq->deadline = 0; | ||
229 | INIT_LIST_HEAD(&rq->timeout_list); | ||
230 | rq->timeout = 0; | ||
231 | rq->retries = 0; | ||
232 | rq->end_io = NULL; | ||
233 | rq->end_io_data = NULL; | ||
234 | rq->next_rq = NULL; | ||
235 | |||
194 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; | 236 | ctx->rq_dispatched[rw_is_sync(rw_flags)]++; |
195 | } | 237 | } |
196 | 238 | ||
@@ -258,6 +300,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, | |||
258 | const int tag = rq->tag; | 300 | const int tag = rq->tag; |
259 | struct request_queue *q = rq->q; | 301 | struct request_queue *q = rq->q; |
260 | 302 | ||
303 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | ||
261 | blk_mq_put_tag(hctx->tags, tag); | 304 | blk_mq_put_tag(hctx->tags, tag); |
262 | blk_mq_queue_exit(q); | 305 | blk_mq_queue_exit(q); |
263 | } | 306 | } |