diff options
author | Christoph Hellwig <hch@lst.de> | 2017-01-31 10:57:31 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-01-31 16:00:44 -0500 |
commit | aebf526b53aea164508730427597d45f3e06b376 (patch) | |
tree | 98ab726d0f7feb610feee9830246c900c6919eea /block/blk-map.c | |
parent | 2f5a8e80f79dc82e00f4cca557dc9ceaf064b450 (diff) |
block: fold cmd_type into the REQ_OP_ space
Instead of keeping two levels of indirection for requests types, fold it
all into the operations. The little caveat here is that previously
cmd_type only applied to struct request, while the request and bio op
fields were set to plain REQ_OP_READ/WRITE even for passthrough
operations.
Instead this patch adds new REQ_OP_* for SCSI passthrough and driver
private requests, althought it has to add two for each so that we
can communicate the data in/out nature of the request.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-map.c')
-rw-r--r-- | block/blk-map.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-map.c b/block/blk-map.c index 0acb6640ead7..2f18c2a0be1b 100644 --- a/block/blk-map.c +++ b/block/blk-map.c | |||
@@ -16,8 +16,6 @@ | |||
16 | int blk_rq_append_bio(struct request *rq, struct bio *bio) | 16 | int blk_rq_append_bio(struct request *rq, struct bio *bio) |
17 | { | 17 | { |
18 | if (!rq->bio) { | 18 | if (!rq->bio) { |
19 | rq->cmd_flags &= REQ_OP_MASK; | ||
20 | rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK); | ||
21 | blk_rq_bio_prep(rq->q, rq, bio); | 19 | blk_rq_bio_prep(rq->q, rq, bio); |
22 | } else { | 20 | } else { |
23 | if (!ll_back_merge_fn(rq->q, rq, bio)) | 21 | if (!ll_back_merge_fn(rq->q, rq, bio)) |
@@ -62,6 +60,9 @@ static int __blk_rq_map_user_iov(struct request *rq, | |||
62 | if (IS_ERR(bio)) | 60 | if (IS_ERR(bio)) |
63 | return PTR_ERR(bio); | 61 | return PTR_ERR(bio); |
64 | 62 | ||
63 | bio->bi_opf &= ~REQ_OP_MASK; | ||
64 | bio->bi_opf |= req_op(rq); | ||
65 | |||
65 | if (map_data && map_data->null_mapped) | 66 | if (map_data && map_data->null_mapped) |
66 | bio_set_flag(bio, BIO_NULL_MAPPED); | 67 | bio_set_flag(bio, BIO_NULL_MAPPED); |
67 | 68 | ||
@@ -90,7 +91,7 @@ static int __blk_rq_map_user_iov(struct request *rq, | |||
90 | } | 91 | } |
91 | 92 | ||
92 | /** | 93 | /** |
93 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage | 94 | * blk_rq_map_user_iov - map user data to a request, for passthrough requests |
94 | * @q: request queue where request should be inserted | 95 | * @q: request queue where request should be inserted |
95 | * @rq: request to map data to | 96 | * @rq: request to map data to |
96 | * @map_data: pointer to the rq_map_data holding pages (if necessary) | 97 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
@@ -199,7 +200,7 @@ int blk_rq_unmap_user(struct bio *bio) | |||
199 | EXPORT_SYMBOL(blk_rq_unmap_user); | 200 | EXPORT_SYMBOL(blk_rq_unmap_user); |
200 | 201 | ||
201 | /** | 202 | /** |
202 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage | 203 | * blk_rq_map_kern - map kernel data to a request, for passthrough requests |
203 | * @q: request queue where request should be inserted | 204 | * @q: request queue where request should be inserted |
204 | * @rq: request to fill | 205 | * @rq: request to fill |
205 | * @kbuf: the kernel buffer | 206 | * @kbuf: the kernel buffer |
@@ -234,8 +235,8 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |||
234 | if (IS_ERR(bio)) | 235 | if (IS_ERR(bio)) |
235 | return PTR_ERR(bio); | 236 | return PTR_ERR(bio); |
236 | 237 | ||
237 | if (!reading) | 238 | bio->bi_opf &= ~REQ_OP_MASK; |
238 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 239 | bio->bi_opf |= req_op(rq); |
239 | 240 | ||
240 | if (do_copy) | 241 | if (do_copy) |
241 | rq->rq_flags |= RQF_COPY_USER; | 242 | rq->rq_flags |= RQF_COPY_USER; |