summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-01-31 10:57:31 -0500
committerJens Axboe <axboe@fb.com>2017-01-31 16:00:44 -0500
commitaebf526b53aea164508730427597d45f3e06b376 (patch)
tree98ab726d0f7feb610feee9830246c900c6919eea /block
parent2f5a8e80f79dc82e00f4cca557dc9ceaf064b450 (diff)
block: fold cmd_type into the REQ_OP_ space
Instead of keeping two levels of indirection for requests types, fold it all into the operations. The little caveat here is that previously cmd_type only applied to struct request, while the request and bio op fields were set to plain REQ_OP_READ/WRITE even for passthrough operations. Instead this patch adds new REQ_OP_* for SCSI passthrough and driver private requests, althought it has to add two for each so that we can communicate the data in/out nature of the request. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c10
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-flush.c1
-rw-r--r--block/blk-map.c13
-rw-r--r--block/blk-mq-debugfs.c4
-rw-r--r--block/bsg.c17
-rw-r--r--block/scsi_ioctl.c9
7 files changed, 24 insertions, 36 deletions
diff --git a/block/bio.c b/block/bio.c
index 2b375020fc49..9a2dd7145e83 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1227,9 +1227,6 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1227 if (!bio) 1227 if (!bio)
1228 goto out_bmd; 1228 goto out_bmd;
1229 1229
1230 if (iter->type & WRITE)
1231 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1232
1233 ret = 0; 1230 ret = 0;
1234 1231
1235 if (map_data) { 1232 if (map_data) {
@@ -1394,12 +1391,6 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1394 1391
1395 kfree(pages); 1392 kfree(pages);
1396 1393
1397 /*
1398 * set data direction, and check if mapped pages need bouncing
1399 */
1400 if (iter->type & WRITE)
1401 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1402
1403 bio_set_flag(bio, BIO_USER_MAPPED); 1394 bio_set_flag(bio, BIO_USER_MAPPED);
1404 1395
1405 /* 1396 /*
@@ -1590,7 +1581,6 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1590 bio->bi_private = data; 1581 bio->bi_private = data;
1591 } else { 1582 } else {
1592 bio->bi_end_io = bio_copy_kern_endio; 1583 bio->bi_end_io = bio_copy_kern_endio;
1593 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1594 } 1584 }
1595 1585
1596 return bio; 1586 return bio;
diff --git a/block/blk-core.c b/block/blk-core.c
index 44431086e4e7..3266daaa343f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -158,8 +158,8 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
158 158
159void blk_dump_rq_flags(struct request *rq, char *msg) 159void blk_dump_rq_flags(struct request *rq, char *msg)
160{ 160{
161 printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, 161 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
162 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 162 rq->rq_disk ? rq->rq_disk->disk_name : "?",
163 (unsigned long long) rq->cmd_flags); 163 (unsigned long long) rq->cmd_flags);
164 164
165 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 165 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
@@ -1593,7 +1593,6 @@ out:
1593 1593
1594void init_request_from_bio(struct request *req, struct bio *bio) 1594void init_request_from_bio(struct request *req, struct bio *bio)
1595{ 1595{
1596 req->cmd_type = REQ_TYPE_FS;
1597 if (bio->bi_opf & REQ_RAHEAD) 1596 if (bio->bi_opf & REQ_RAHEAD)
1598 req->cmd_flags |= REQ_FAILFAST_MASK; 1597 req->cmd_flags |= REQ_FAILFAST_MASK;
1599 1598
@@ -2983,7 +2982,6 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2983static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2982static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2984{ 2983{
2985 dst->cpu = src->cpu; 2984 dst->cpu = src->cpu;
2986 dst->cmd_type = src->cmd_type;
2987 dst->__sector = blk_rq_pos(src); 2985 dst->__sector = blk_rq_pos(src);
2988 dst->__data_len = blk_rq_bytes(src); 2986 dst->__data_len = blk_rq_bytes(src);
2989 dst->nr_phys_segments = src->nr_phys_segments; 2987 dst->nr_phys_segments = src->nr_phys_segments;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 0a0358e48b76..968162579234 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -327,7 +327,6 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
327 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); 327 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
328 } 328 }
329 329
330 flush_rq->cmd_type = REQ_TYPE_FS;
331 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; 330 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
332 flush_rq->rq_flags |= RQF_FLUSH_SEQ; 331 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
333 flush_rq->rq_disk = first_rq->rq_disk; 332 flush_rq->rq_disk = first_rq->rq_disk;
diff --git a/block/blk-map.c b/block/blk-map.c
index 0acb6640ead7..2f18c2a0be1b 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -16,8 +16,6 @@
16int blk_rq_append_bio(struct request *rq, struct bio *bio) 16int blk_rq_append_bio(struct request *rq, struct bio *bio)
17{ 17{
18 if (!rq->bio) { 18 if (!rq->bio) {
19 rq->cmd_flags &= REQ_OP_MASK;
20 rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
21 blk_rq_bio_prep(rq->q, rq, bio); 19 blk_rq_bio_prep(rq->q, rq, bio);
22 } else { 20 } else {
23 if (!ll_back_merge_fn(rq->q, rq, bio)) 21 if (!ll_back_merge_fn(rq->q, rq, bio))
@@ -62,6 +60,9 @@ static int __blk_rq_map_user_iov(struct request *rq,
62 if (IS_ERR(bio)) 60 if (IS_ERR(bio))
63 return PTR_ERR(bio); 61 return PTR_ERR(bio);
64 62
63 bio->bi_opf &= ~REQ_OP_MASK;
64 bio->bi_opf |= req_op(rq);
65
65 if (map_data && map_data->null_mapped) 66 if (map_data && map_data->null_mapped)
66 bio_set_flag(bio, BIO_NULL_MAPPED); 67 bio_set_flag(bio, BIO_NULL_MAPPED);
67 68
@@ -90,7 +91,7 @@ static int __blk_rq_map_user_iov(struct request *rq,
90} 91}
91 92
92/** 93/**
93 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 94 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
94 * @q: request queue where request should be inserted 95 * @q: request queue where request should be inserted
95 * @rq: request to map data to 96 * @rq: request to map data to
96 * @map_data: pointer to the rq_map_data holding pages (if necessary) 97 * @map_data: pointer to the rq_map_data holding pages (if necessary)
@@ -199,7 +200,7 @@ int blk_rq_unmap_user(struct bio *bio)
199EXPORT_SYMBOL(blk_rq_unmap_user); 200EXPORT_SYMBOL(blk_rq_unmap_user);
200 201
201/** 202/**
202 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 203 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
203 * @q: request queue where request should be inserted 204 * @q: request queue where request should be inserted
204 * @rq: request to fill 205 * @rq: request to fill
205 * @kbuf: the kernel buffer 206 * @kbuf: the kernel buffer
@@ -234,8 +235,8 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
234 if (IS_ERR(bio)) 235 if (IS_ERR(bio))
235 return PTR_ERR(bio); 236 return PTR_ERR(bio);
236 237
237 if (!reading) 238 bio->bi_opf &= ~REQ_OP_MASK;
238 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 239 bio->bi_opf |= req_op(rq);
239 240
240 if (do_copy) 241 if (do_copy)
241 rq->rq_flags |= RQF_COPY_USER; 242 rq->rq_flags |= RQF_COPY_USER;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 5cd2b435a9f5..1e2a4a2ff623 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -88,8 +88,8 @@ static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
88{ 88{
89 struct request *rq = list_entry_rq(v); 89 struct request *rq = list_entry_rq(v);
90 90
91 seq_printf(m, "%p {.cmd_type=%u, .cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n", 91 seq_printf(m, "%p {.cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
92 rq, rq->cmd_type, rq->cmd_flags, (unsigned int)rq->rq_flags, 92 rq, rq->cmd_flags, (unsigned int)rq->rq_flags,
93 rq->tag, rq->internal_tag); 93 rq->tag, rq->internal_tag);
94 return 0; 94 return 0;
95} 95}
diff --git a/block/bsg.c b/block/bsg.c
index e34c3320956c..a9a8b8e0446f 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -177,7 +177,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
177 * Check if sg_io_v4 from user is allowed and valid 177 * Check if sg_io_v4 from user is allowed and valid
178 */ 178 */
179static int 179static int
180bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw) 180bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op)
181{ 181{
182 int ret = 0; 182 int ret = 0;
183 183
@@ -198,7 +198,7 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
198 ret = -EINVAL; 198 ret = -EINVAL;
199 } 199 }
200 200
201 *rw = hdr->dout_xfer_len ? WRITE : READ; 201 *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN;
202 return ret; 202 return ret;
203} 203}
204 204
@@ -210,8 +210,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
210{ 210{
211 struct request_queue *q = bd->queue; 211 struct request_queue *q = bd->queue;
212 struct request *rq, *next_rq = NULL; 212 struct request *rq, *next_rq = NULL;
213 int ret, rw; 213 int ret;
214 unsigned int dxfer_len; 214 unsigned int op, dxfer_len;
215 void __user *dxferp = NULL; 215 void __user *dxferp = NULL;
216 struct bsg_class_device *bcd = &q->bsg_dev; 216 struct bsg_class_device *bcd = &q->bsg_dev;
217 217
@@ -226,14 +226,14 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
226 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, 226 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
227 hdr->din_xfer_len); 227 hdr->din_xfer_len);
228 228
229 ret = bsg_validate_sgv4_hdr(hdr, &rw); 229 ret = bsg_validate_sgv4_hdr(hdr, &op);
230 if (ret) 230 if (ret)
231 return ERR_PTR(ret); 231 return ERR_PTR(ret);
232 232
233 /* 233 /*
234 * map scatter-gather elements separately and string them to request 234 * map scatter-gather elements separately and string them to request
235 */ 235 */
236 rq = blk_get_request(q, rw, GFP_KERNEL); 236 rq = blk_get_request(q, op, GFP_KERNEL);
237 if (IS_ERR(rq)) 237 if (IS_ERR(rq))
238 return rq; 238 return rq;
239 scsi_req_init(rq); 239 scsi_req_init(rq);
@@ -242,20 +242,19 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
242 if (ret) 242 if (ret)
243 goto out; 243 goto out;
244 244
245 if (rw == WRITE && hdr->din_xfer_len) { 245 if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) {
246 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { 246 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
247 ret = -EOPNOTSUPP; 247 ret = -EOPNOTSUPP;
248 goto out; 248 goto out;
249 } 249 }
250 250
251 next_rq = blk_get_request(q, READ, GFP_KERNEL); 251 next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
252 if (IS_ERR(next_rq)) { 252 if (IS_ERR(next_rq)) {
253 ret = PTR_ERR(next_rq); 253 ret = PTR_ERR(next_rq);
254 next_rq = NULL; 254 next_rq = NULL;
255 goto out; 255 goto out;
256 } 256 }
257 rq->next_rq = next_rq; 257 rq->next_rq = next_rq;
258 next_rq->cmd_type = rq->cmd_type;
259 258
260 dxferp = (void __user *)(unsigned long)hdr->din_xferp; 259 dxferp = (void __user *)(unsigned long)hdr->din_xferp;
261 ret = blk_rq_map_user(q, next_rq, NULL, dxferp, 260 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 7edf44f25e08..2a2fc768b27a 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,7 +321,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
321 at_head = 1; 321 at_head = 1;
322 322
323 ret = -ENOMEM; 323 ret = -ENOMEM;
324 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 324 rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
325 GFP_KERNEL);
325 if (IS_ERR(rq)) 326 if (IS_ERR(rq))
326 return PTR_ERR(rq); 327 return PTR_ERR(rq);
327 req = scsi_req(rq); 328 req = scsi_req(rq);
@@ -448,7 +449,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
448 449
449 } 450 }
450 451
451 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM); 452 rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
453 __GFP_RECLAIM);
452 if (IS_ERR(rq)) { 454 if (IS_ERR(rq)) {
453 err = PTR_ERR(rq); 455 err = PTR_ERR(rq);
454 goto error_free_buffer; 456 goto error_free_buffer;
@@ -537,7 +539,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
537 struct request *rq; 539 struct request *rq;
538 int err; 540 int err;
539 541
540 rq = blk_get_request(q, WRITE, __GFP_RECLAIM); 542 rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM);
541 if (IS_ERR(rq)) 543 if (IS_ERR(rq))
542 return PTR_ERR(rq); 544 return PTR_ERR(rq);
543 scsi_req_init(rq); 545 scsi_req_init(rq);
@@ -745,7 +747,6 @@ void scsi_req_init(struct request *rq)
745{ 747{
746 struct scsi_request *req = scsi_req(rq); 748 struct scsi_request *req = scsi_req(rq);
747 749
748 rq->cmd_type = REQ_TYPE_BLOCK_PC;
749 memset(req->__cmd, 0, sizeof(req->__cmd)); 750 memset(req->__cmd, 0, sizeof(req->__cmd));
750 req->cmd = req->__cmd; 751 req->cmd = req->__cmd;
751 req->cmd_len = BLK_MAX_CDB; 752 req->cmd_len = BLK_MAX_CDB;