diff options
author | Ming Lei <ming.lei@canonical.com> | 2014-09-25 11:23:42 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-25 17:22:38 -0400 |
commit | 7ddab5de5b80d3111f9e6765714e728b2c4f1c07 (patch) | |
tree | 35c64aea8aa76af4a375102458f39417473b545a /block | |
parent | 3c09676c12b1dabf84acbb5849bfc54acadaf092 (diff) |
block: avoid to use q->flush_rq directly
This patch trys to use local variable to access flush request,
so that we can convert to per-queue flush machinery a bit easier.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-flush.c | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index a49ffbdcfcdc..caf44756d329 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -223,7 +223,7 @@ static void flush_end_io(struct request *flush_rq, int error) | |||
223 | 223 | ||
224 | if (q->mq_ops) { | 224 | if (q->mq_ops) { |
225 | spin_lock_irqsave(&q->mq_flush_lock, flags); | 225 | spin_lock_irqsave(&q->mq_flush_lock, flags); |
226 | q->flush_rq->tag = -1; | 226 | flush_rq->tag = -1; |
227 | } | 227 | } |
228 | 228 | ||
229 | running = &q->flush_queue[q->flush_running_idx]; | 229 | running = &q->flush_queue[q->flush_running_idx]; |
@@ -281,6 +281,7 @@ static bool blk_kick_flush(struct request_queue *q) | |||
281 | struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; | 281 | struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; |
282 | struct request *first_rq = | 282 | struct request *first_rq = |
283 | list_first_entry(pending, struct request, flush.list); | 283 | list_first_entry(pending, struct request, flush.list); |
284 | struct request *flush_rq = q->flush_rq; | ||
284 | 285 | ||
285 | /* C1 described at the top of this file */ | 286 | /* C1 described at the top of this file */ |
286 | if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) | 287 | if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) |
@@ -298,16 +299,16 @@ static bool blk_kick_flush(struct request_queue *q) | |||
298 | */ | 299 | */ |
299 | q->flush_pending_idx ^= 1; | 300 | q->flush_pending_idx ^= 1; |
300 | 301 | ||
301 | blk_rq_init(q, q->flush_rq); | 302 | blk_rq_init(q, flush_rq); |
302 | if (q->mq_ops) | 303 | if (q->mq_ops) |
303 | blk_mq_clone_flush_request(q->flush_rq, first_rq); | 304 | blk_mq_clone_flush_request(flush_rq, first_rq); |
304 | 305 | ||
305 | q->flush_rq->cmd_type = REQ_TYPE_FS; | 306 | flush_rq->cmd_type = REQ_TYPE_FS; |
306 | q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; | 307 | flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; |
307 | q->flush_rq->rq_disk = first_rq->rq_disk; | 308 | flush_rq->rq_disk = first_rq->rq_disk; |
308 | q->flush_rq->end_io = flush_end_io; | 309 | flush_rq->end_io = flush_end_io; |
309 | 310 | ||
310 | return blk_flush_queue_rq(q->flush_rq, false); | 311 | return blk_flush_queue_rq(flush_rq, false); |
311 | } | 312 | } |
312 | 313 | ||
313 | static void flush_data_end_io(struct request *rq, int error) | 314 | static void flush_data_end_io(struct request *rq, int error) |