summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-10-20 09:12:13 -0400
committerJens Axboe <axboe@fb.com>2016-10-28 10:45:17 -0400
commite806402130c9c494e22c73ae9ead4e79d2a5811c (patch)
treebac59e1eb3f1b5945409bd0780a4824e9b8383f8
parent8d2bbd4c8236e9e38e6b36ac9e2c54fdcfe5b335 (diff)
block: split out request-only flags into a new namespace
A lot of the REQ_* flags are only used on struct requests, and only of use to the block layer and a few drivers that dig into struct request internals. This patch adds a new req_flags_t rq_flags field to struct request for them, and thus dramatically shrinks the number of common requests. It also removes the unfortunate situation where we have to fit the fields from the same enum into 32 bits for struct bio and 64 bits for struct request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Shaun Tancheff <shaun.tancheff@seagate.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--Documentation/block/biodoc.txt2
-rw-r--r--block/blk-core.c71
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c9
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-merge.c8
-rw-r--r--block/blk-mq.c19
-rw-r--r--block/blk-tag.c6
-rw-r--r--block/blk.h4
-rw-r--r--block/elevator.c32
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/ide/ide-atapi.c6
-rw-r--r--drivers/ide/ide-cd.c46
-rw-r--r--drivers/ide/ide-cd.h2
-rw-r--r--drivers/ide/ide-cd_ioctl.c6
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/md/dm-rq.c12
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/mmc/card/block.c4
-rw-r--r--drivers/mmc/card/queue.c4
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c75
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/sd_zbc.c2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c6
-rw-r--r--include/linux/blk_types.h39
-rw-r--r--include/linux/blkdev.h49
-rw-r--r--include/scsi/scsi_device.h4
38 files changed, 242 insertions, 218 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 918e1e0d0e78..6acea160298c 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -348,7 +348,7 @@ Drivers can now specify a request prepare function (q->prep_rq_fn) that the
348block layer would invoke to pre-build device commands for a given request, 348block layer would invoke to pre-build device commands for a given request,
349or perform other preparatory processing for the request. This is routine is 349or perform other preparatory processing for the request. This is routine is
350called by elv_next_request(), i.e. typically just before servicing a request. 350called by elv_next_request(), i.e. typically just before servicing a request.
351(The prepare function would not be called for requests that have REQ_DONTPREP 351(The prepare function would not be called for requests that have RQF_DONTPREP
352enabled) 352enabled)
353 353
354Aside: 354Aside:
diff --git a/block/blk-core.c b/block/blk-core.c
index e4eda5d2aa56..fd416651a676 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -145,13 +145,13 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
145 if (error) 145 if (error)
146 bio->bi_error = error; 146 bio->bi_error = error;
147 147
148 if (unlikely(rq->cmd_flags & REQ_QUIET)) 148 if (unlikely(rq->rq_flags & RQF_QUIET))
149 bio_set_flag(bio, BIO_QUIET); 149 bio_set_flag(bio, BIO_QUIET);
150 150
151 bio_advance(bio, nbytes); 151 bio_advance(bio, nbytes);
152 152
153 /* don't actually finish bio if it's part of flush sequence */ 153 /* don't actually finish bio if it's part of flush sequence */
154 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 154 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
155 bio_endio(bio); 155 bio_endio(bio);
156} 156}
157 157
@@ -899,7 +899,7 @@ EXPORT_SYMBOL(blk_get_queue);
899 899
900static inline void blk_free_request(struct request_list *rl, struct request *rq) 900static inline void blk_free_request(struct request_list *rl, struct request *rq)
901{ 901{
902 if (rq->cmd_flags & REQ_ELVPRIV) { 902 if (rq->rq_flags & RQF_ELVPRIV) {
903 elv_put_request(rl->q, rq); 903 elv_put_request(rl->q, rq);
904 if (rq->elv.icq) 904 if (rq->elv.icq)
905 put_io_context(rq->elv.icq->ioc); 905 put_io_context(rq->elv.icq->ioc);
@@ -961,14 +961,14 @@ static void __freed_request(struct request_list *rl, int sync)
961 * A request has just been released. Account for it, update the full and 961 * A request has just been released. Account for it, update the full and
962 * congestion status, wake up any waiters. Called under q->queue_lock. 962 * congestion status, wake up any waiters. Called under q->queue_lock.
963 */ 963 */
964static void freed_request(struct request_list *rl, int op, unsigned int flags) 964static void freed_request(struct request_list *rl, bool sync,
965 req_flags_t rq_flags)
965{ 966{
966 struct request_queue *q = rl->q; 967 struct request_queue *q = rl->q;
967 int sync = rw_is_sync(op, flags);
968 968
969 q->nr_rqs[sync]--; 969 q->nr_rqs[sync]--;
970 rl->count[sync]--; 970 rl->count[sync]--;
971 if (flags & REQ_ELVPRIV) 971 if (rq_flags & RQF_ELVPRIV)
972 q->nr_rqs_elvpriv--; 972 q->nr_rqs_elvpriv--;
973 973
974 __freed_request(rl, sync); 974 __freed_request(rl, sync);
@@ -1079,6 +1079,7 @@ static struct request *__get_request(struct request_list *rl, int op,
1079 struct io_cq *icq = NULL; 1079 struct io_cq *icq = NULL;
1080 const bool is_sync = rw_is_sync(op, op_flags) != 0; 1080 const bool is_sync = rw_is_sync(op, op_flags) != 0;
1081 int may_queue; 1081 int may_queue;
1082 req_flags_t rq_flags = RQF_ALLOCED;
1082 1083
1083 if (unlikely(blk_queue_dying(q))) 1084 if (unlikely(blk_queue_dying(q)))
1084 return ERR_PTR(-ENODEV); 1085 return ERR_PTR(-ENODEV);
@@ -1127,7 +1128,7 @@ static struct request *__get_request(struct request_list *rl, int op,
1127 1128
1128 /* 1129 /*
1129 * Decide whether the new request will be managed by elevator. If 1130 * Decide whether the new request will be managed by elevator. If
1130 * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will 1131 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
1131 * prevent the current elevator from being destroyed until the new 1132 * prevent the current elevator from being destroyed until the new
1132 * request is freed. This guarantees icq's won't be destroyed and 1133 * request is freed. This guarantees icq's won't be destroyed and
1133 * makes creating new ones safe. 1134 * makes creating new ones safe.
@@ -1136,14 +1137,14 @@ static struct request *__get_request(struct request_list *rl, int op,
1136 * it will be created after releasing queue_lock. 1137 * it will be created after releasing queue_lock.
1137 */ 1138 */
1138 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 1139 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
1139 op_flags |= REQ_ELVPRIV; 1140 rq_flags |= RQF_ELVPRIV;
1140 q->nr_rqs_elvpriv++; 1141 q->nr_rqs_elvpriv++;
1141 if (et->icq_cache && ioc) 1142 if (et->icq_cache && ioc)
1142 icq = ioc_lookup_icq(ioc, q); 1143 icq = ioc_lookup_icq(ioc, q);
1143 } 1144 }
1144 1145
1145 if (blk_queue_io_stat(q)) 1146 if (blk_queue_io_stat(q))
1146 op_flags |= REQ_IO_STAT; 1147 rq_flags |= RQF_IO_STAT;
1147 spin_unlock_irq(q->queue_lock); 1148 spin_unlock_irq(q->queue_lock);
1148 1149
1149 /* allocate and init request */ 1150 /* allocate and init request */
@@ -1153,10 +1154,11 @@ static struct request *__get_request(struct request_list *rl, int op,
1153 1154
1154 blk_rq_init(q, rq); 1155 blk_rq_init(q, rq);
1155 blk_rq_set_rl(rq, rl); 1156 blk_rq_set_rl(rq, rl);
1156 req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED); 1157 req_set_op_attrs(rq, op, op_flags);
1158 rq->rq_flags = rq_flags;
1157 1159
1158 /* init elvpriv */ 1160 /* init elvpriv */
1159 if (op_flags & REQ_ELVPRIV) { 1161 if (rq_flags & RQF_ELVPRIV) {
1160 if (unlikely(et->icq_cache && !icq)) { 1162 if (unlikely(et->icq_cache && !icq)) {
1161 if (ioc) 1163 if (ioc)
1162 icq = ioc_create_icq(ioc, q, gfp_mask); 1164 icq = ioc_create_icq(ioc, q, gfp_mask);
@@ -1195,7 +1197,7 @@ fail_elvpriv:
1195 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1197 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
1196 __func__, dev_name(q->backing_dev_info.dev)); 1198 __func__, dev_name(q->backing_dev_info.dev));
1197 1199
1198 rq->cmd_flags &= ~REQ_ELVPRIV; 1200 rq->rq_flags &= ~RQF_ELVPRIV;
1199 rq->elv.icq = NULL; 1201 rq->elv.icq = NULL;
1200 1202
1201 spin_lock_irq(q->queue_lock); 1203 spin_lock_irq(q->queue_lock);
@@ -1212,7 +1214,7 @@ fail_alloc:
1212 * queue, but this is pretty rare. 1214 * queue, but this is pretty rare.
1213 */ 1215 */
1214 spin_lock_irq(q->queue_lock); 1216 spin_lock_irq(q->queue_lock);
1215 freed_request(rl, op, op_flags); 1217 freed_request(rl, is_sync, rq_flags);
1216 1218
1217 /* 1219 /*
1218 * in the very unlikely event that allocation failed and no 1220 * in the very unlikely event that allocation failed and no
@@ -1347,7 +1349,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
1347 blk_clear_rq_complete(rq); 1349 blk_clear_rq_complete(rq);
1348 trace_block_rq_requeue(q, rq); 1350 trace_block_rq_requeue(q, rq);
1349 1351
1350 if (rq->cmd_flags & REQ_QUEUED) 1352 if (rq->rq_flags & RQF_QUEUED)
1351 blk_queue_end_tag(q, rq); 1353 blk_queue_end_tag(q, rq);
1352 1354
1353 BUG_ON(blk_queued_rq(rq)); 1355 BUG_ON(blk_queued_rq(rq));
@@ -1409,7 +1411,7 @@ EXPORT_SYMBOL_GPL(part_round_stats);
1409#ifdef CONFIG_PM 1411#ifdef CONFIG_PM
1410static void blk_pm_put_request(struct request *rq) 1412static void blk_pm_put_request(struct request *rq)
1411{ 1413{
1412 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) 1414 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
1413 pm_runtime_mark_last_busy(rq->q->dev); 1415 pm_runtime_mark_last_busy(rq->q->dev);
1414} 1416}
1415#else 1417#else
@@ -1421,6 +1423,8 @@ static inline void blk_pm_put_request(struct request *rq) {}
1421 */ 1423 */
1422void __blk_put_request(struct request_queue *q, struct request *req) 1424void __blk_put_request(struct request_queue *q, struct request *req)
1423{ 1425{
1426 req_flags_t rq_flags = req->rq_flags;
1427
1424 if (unlikely(!q)) 1428 if (unlikely(!q))
1425 return; 1429 return;
1426 1430
@@ -1440,16 +1444,15 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1440 * Request may not have originated from ll_rw_blk. if not, 1444 * Request may not have originated from ll_rw_blk. if not,
1441 * it didn't come out of our reserved rq pools 1445 * it didn't come out of our reserved rq pools
1442 */ 1446 */
1443 if (req->cmd_flags & REQ_ALLOCED) { 1447 if (rq_flags & RQF_ALLOCED) {
1444 unsigned int flags = req->cmd_flags;
1445 int op = req_op(req);
1446 struct request_list *rl = blk_rq_rl(req); 1448 struct request_list *rl = blk_rq_rl(req);
1449 bool sync = rw_is_sync(req_op(req), req->cmd_flags);
1447 1450
1448 BUG_ON(!list_empty(&req->queuelist)); 1451 BUG_ON(!list_empty(&req->queuelist));
1449 BUG_ON(ELV_ON_HASH(req)); 1452 BUG_ON(ELV_ON_HASH(req));
1450 1453
1451 blk_free_request(rl, req); 1454 blk_free_request(rl, req);
1452 freed_request(rl, op, flags); 1455 freed_request(rl, sync, rq_flags);
1453 blk_put_rl(rl); 1456 blk_put_rl(rl);
1454 } 1457 }
1455} 1458}
@@ -2214,7 +2217,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
2214 unsigned int bytes = 0; 2217 unsigned int bytes = 0;
2215 struct bio *bio; 2218 struct bio *bio;
2216 2219
2217 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 2220 if (!(rq->rq_flags & RQF_MIXED_MERGE))
2218 return blk_rq_bytes(rq); 2221 return blk_rq_bytes(rq);
2219 2222
2220 /* 2223 /*
@@ -2257,7 +2260,7 @@ void blk_account_io_done(struct request *req)
2257 * normal IO on queueing nor completion. Accounting the 2260 * normal IO on queueing nor completion. Accounting the
2258 * containing request is enough. 2261 * containing request is enough.
2259 */ 2262 */
2260 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 2263 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
2261 unsigned long duration = jiffies - req->start_time; 2264 unsigned long duration = jiffies - req->start_time;
2262 const int rw = rq_data_dir(req); 2265 const int rw = rq_data_dir(req);
2263 struct hd_struct *part; 2266 struct hd_struct *part;
@@ -2285,7 +2288,7 @@ static struct request *blk_pm_peek_request(struct request_queue *q,
2285 struct request *rq) 2288 struct request *rq)
2286{ 2289{
2287 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 2290 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2288 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) 2291 (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
2289 return NULL; 2292 return NULL;
2290 else 2293 else
2291 return rq; 2294 return rq;
@@ -2361,13 +2364,13 @@ struct request *blk_peek_request(struct request_queue *q)
2361 if (!rq) 2364 if (!rq)
2362 break; 2365 break;
2363 2366
2364 if (!(rq->cmd_flags & REQ_STARTED)) { 2367 if (!(rq->rq_flags & RQF_STARTED)) {
2365 /* 2368 /*
2366 * This is the first time the device driver 2369 * This is the first time the device driver
2367 * sees this request (possibly after 2370 * sees this request (possibly after
2368 * requeueing). Notify IO scheduler. 2371 * requeueing). Notify IO scheduler.
2369 */ 2372 */
2370 if (rq->cmd_flags & REQ_SORTED) 2373 if (rq->rq_flags & RQF_SORTED)
2371 elv_activate_rq(q, rq); 2374 elv_activate_rq(q, rq);
2372 2375
2373 /* 2376 /*
@@ -2375,7 +2378,7 @@ struct request *blk_peek_request(struct request_queue *q)
2375 * it, a request that has been delayed should 2378 * it, a request that has been delayed should
2376 * not be passed by new incoming requests 2379 * not be passed by new incoming requests
2377 */ 2380 */
2378 rq->cmd_flags |= REQ_STARTED; 2381 rq->rq_flags |= RQF_STARTED;
2379 trace_block_rq_issue(q, rq); 2382 trace_block_rq_issue(q, rq);
2380 } 2383 }
2381 2384
@@ -2384,7 +2387,7 @@ struct request *blk_peek_request(struct request_queue *q)
2384 q->boundary_rq = NULL; 2387 q->boundary_rq = NULL;
2385 } 2388 }
2386 2389
2387 if (rq->cmd_flags & REQ_DONTPREP) 2390 if (rq->rq_flags & RQF_DONTPREP)
2388 break; 2391 break;
2389 2392
2390 if (q->dma_drain_size && blk_rq_bytes(rq)) { 2393 if (q->dma_drain_size && blk_rq_bytes(rq)) {
@@ -2407,11 +2410,11 @@ struct request *blk_peek_request(struct request_queue *q)
2407 /* 2410 /*
2408 * the request may have been (partially) prepped. 2411 * the request may have been (partially) prepped.
2409 * we need to keep this request in the front to 2412 * we need to keep this request in the front to
2410 * avoid resource deadlock. REQ_STARTED will 2413 * avoid resource deadlock. RQF_STARTED will
2411 * prevent other fs requests from passing this one. 2414 * prevent other fs requests from passing this one.
2412 */ 2415 */
2413 if (q->dma_drain_size && blk_rq_bytes(rq) && 2416 if (q->dma_drain_size && blk_rq_bytes(rq) &&
2414 !(rq->cmd_flags & REQ_DONTPREP)) { 2417 !(rq->rq_flags & RQF_DONTPREP)) {
2415 /* 2418 /*
2416 * remove the space for the drain we added 2419 * remove the space for the drain we added
2417 * so that we don't add it again 2420 * so that we don't add it again
@@ -2424,7 +2427,7 @@ struct request *blk_peek_request(struct request_queue *q)
2424 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { 2427 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2425 int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO; 2428 int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
2426 2429
2427 rq->cmd_flags |= REQ_QUIET; 2430 rq->rq_flags |= RQF_QUIET;
2428 /* 2431 /*
2429 * Mark this request as started so we don't trigger 2432 * Mark this request as started so we don't trigger
2430 * any debug logic in the end I/O path. 2433 * any debug logic in the end I/O path.
@@ -2561,7 +2564,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2561 req->errors = 0; 2564 req->errors = 0;
2562 2565
2563 if (error && req->cmd_type == REQ_TYPE_FS && 2566 if (error && req->cmd_type == REQ_TYPE_FS &&
2564 !(req->cmd_flags & REQ_QUIET)) { 2567 !(req->rq_flags & RQF_QUIET)) {
2565 char *error_type; 2568 char *error_type;
2566 2569
2567 switch (error) { 2570 switch (error) {
@@ -2634,7 +2637,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2634 req->__sector += total_bytes >> 9; 2637 req->__sector += total_bytes >> 9;
2635 2638
2636 /* mixed attributes always follow the first bio */ 2639 /* mixed attributes always follow the first bio */
2637 if (req->cmd_flags & REQ_MIXED_MERGE) { 2640 if (req->rq_flags & RQF_MIXED_MERGE) {
2638 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2641 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2639 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 2642 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
2640 } 2643 }
@@ -2687,7 +2690,7 @@ void blk_unprep_request(struct request *req)
2687{ 2690{
2688 struct request_queue *q = req->q; 2691 struct request_queue *q = req->q;
2689 2692
2690 req->cmd_flags &= ~REQ_DONTPREP; 2693 req->rq_flags &= ~RQF_DONTPREP;
2691 if (q->unprep_rq_fn) 2694 if (q->unprep_rq_fn)
2692 q->unprep_rq_fn(q, req); 2695 q->unprep_rq_fn(q, req);
2693} 2696}
@@ -2698,7 +2701,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
2698 */ 2701 */
2699void blk_finish_request(struct request *req, int error) 2702void blk_finish_request(struct request *req, int error)
2700{ 2703{
2701 if (req->cmd_flags & REQ_QUEUED) 2704 if (req->rq_flags & RQF_QUEUED)
2702 blk_queue_end_tag(req->q, req); 2705 blk_queue_end_tag(req->q, req);
2703 2706
2704 BUG_ON(blk_queued_rq(req)); 2707 BUG_ON(blk_queued_rq(req));
@@ -2708,7 +2711,7 @@ void blk_finish_request(struct request *req, int error)
2708 2711
2709 blk_delete_timer(req); 2712 blk_delete_timer(req);
2710 2713
2711 if (req->cmd_flags & REQ_DONTPREP) 2714 if (req->rq_flags & RQF_DONTPREP)
2712 blk_unprep_request(req); 2715 blk_unprep_request(req);
2713 2716
2714 blk_account_io_done(req); 2717 blk_account_io_done(req);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7ea04325d02f..3ecb00a6cf45 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -72,7 +72,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
72 spin_lock_irq(q->queue_lock); 72 spin_lock_irq(q->queue_lock);
73 73
74 if (unlikely(blk_queue_dying(q))) { 74 if (unlikely(blk_queue_dying(q))) {
75 rq->cmd_flags |= REQ_QUIET; 75 rq->rq_flags |= RQF_QUIET;
76 rq->errors = -ENXIO; 76 rq->errors = -ENXIO;
77 __blk_end_request_all(rq, rq->errors); 77 __blk_end_request_all(rq, rq->errors);
78 spin_unlock_irq(q->queue_lock); 78 spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6a14b68b9135..3990b9cfbda5 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -56,7 +56,7 @@
56 * Once while executing DATA and again after the whole sequence is 56 * Once while executing DATA and again after the whole sequence is
57 * complete. The first completion updates the contained bio but doesn't 57 * complete. The first completion updates the contained bio but doesn't
58 * finish it so that the bio submitter is notified only after the whole 58 * finish it so that the bio submitter is notified only after the whole
59 * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in 59 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
60 * req_bio_endio(). 60 * req_bio_endio().
61 * 61 *
62 * The above peculiarity requires that each FLUSH/FUA request has only one 62 * The above peculiarity requires that each FLUSH/FUA request has only one
@@ -127,7 +127,7 @@ static void blk_flush_restore_request(struct request *rq)
127 rq->bio = rq->biotail; 127 rq->bio = rq->biotail;
128 128
129 /* make @rq a normal request */ 129 /* make @rq a normal request */
130 rq->cmd_flags &= ~REQ_FLUSH_SEQ; 130 rq->rq_flags &= ~RQF_FLUSH_SEQ;
131 rq->end_io = rq->flush.saved_end_io; 131 rq->end_io = rq->flush.saved_end_io;
132} 132}
133 133
@@ -330,7 +330,8 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
330 } 330 }
331 331
332 flush_rq->cmd_type = REQ_TYPE_FS; 332 flush_rq->cmd_type = REQ_TYPE_FS;
333 req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ); 333 req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH);
334 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
334 flush_rq->rq_disk = first_rq->rq_disk; 335 flush_rq->rq_disk = first_rq->rq_disk;
335 flush_rq->end_io = flush_end_io; 336 flush_rq->end_io = flush_end_io;
336 337
@@ -433,7 +434,7 @@ void blk_insert_flush(struct request *rq)
433 */ 434 */
434 memset(&rq->flush, 0, sizeof(rq->flush)); 435 memset(&rq->flush, 0, sizeof(rq->flush));
435 INIT_LIST_HEAD(&rq->flush.list); 436 INIT_LIST_HEAD(&rq->flush.list);
436 rq->cmd_flags |= REQ_FLUSH_SEQ; 437 rq->rq_flags |= RQF_FLUSH_SEQ;
437 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ 438 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
438 if (q->mq_ops) { 439 if (q->mq_ops) {
439 rq->end_io = mq_flush_data_end_io; 440 rq->end_io = mq_flush_data_end_io;
diff --git a/block/blk-map.c b/block/blk-map.c
index b8657fa8dc9a..2c5ae5fef473 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -135,7 +135,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
135 } while (iov_iter_count(&i)); 135 } while (iov_iter_count(&i));
136 136
137 if (!bio_flagged(bio, BIO_USER_MAPPED)) 137 if (!bio_flagged(bio, BIO_USER_MAPPED))
138 rq->cmd_flags |= REQ_COPY_USER; 138 rq->rq_flags |= RQF_COPY_USER;
139 return 0; 139 return 0;
140 140
141unmap_rq: 141unmap_rq:
@@ -232,7 +232,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
232 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 232 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
233 233
234 if (do_copy) 234 if (do_copy)
235 rq->cmd_flags |= REQ_COPY_USER; 235 rq->rq_flags |= RQF_COPY_USER;
236 236
237 ret = blk_rq_append_bio(rq, bio); 237 ret = blk_rq_append_bio(rq, bio);
238 if (unlikely(ret)) { 238 if (unlikely(ret)) {
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2642e5fc8b69..fda6a12fc776 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -456,7 +456,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
456 if (rq->bio) 456 if (rq->bio)
457 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); 457 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
458 458
459 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 459 if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
460 (blk_rq_bytes(rq) & q->dma_pad_mask)) { 460 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
461 unsigned int pad_len = 461 unsigned int pad_len =
462 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 462 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
@@ -634,7 +634,7 @@ void blk_rq_set_mixed_merge(struct request *rq)
634 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 634 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
635 struct bio *bio; 635 struct bio *bio;
636 636
637 if (rq->cmd_flags & REQ_MIXED_MERGE) 637 if (rq->rq_flags & RQF_MIXED_MERGE)
638 return; 638 return;
639 639
640 /* 640 /*
@@ -647,7 +647,7 @@ void blk_rq_set_mixed_merge(struct request *rq)
647 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); 647 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
648 bio->bi_opf |= ff; 648 bio->bi_opf |= ff;
649 } 649 }
650 rq->cmd_flags |= REQ_MIXED_MERGE; 650 rq->rq_flags |= RQF_MIXED_MERGE;
651} 651}
652 652
653static void blk_account_io_merge(struct request *req) 653static void blk_account_io_merge(struct request *req)
@@ -709,7 +709,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
709 * makes sure that all involved bios have mixable attributes 709 * makes sure that all involved bios have mixable attributes
710 * set properly. 710 * set properly.
711 */ 711 */
712 if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || 712 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
713 (req->cmd_flags & REQ_FAILFAST_MASK) != 713 (req->cmd_flags & REQ_FAILFAST_MASK) !=
714 (next->cmd_flags & REQ_FAILFAST_MASK)) { 714 (next->cmd_flags & REQ_FAILFAST_MASK)) {
715 blk_rq_set_mixed_merge(req); 715 blk_rq_set_mixed_merge(req);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d74a74a9f9ef..b49c6658eb05 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -142,14 +142,13 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
142 struct request *rq, int op, 142 struct request *rq, int op,
143 unsigned int op_flags) 143 unsigned int op_flags)
144{ 144{
145 if (blk_queue_io_stat(q))
146 op_flags |= REQ_IO_STAT;
147
148 INIT_LIST_HEAD(&rq->queuelist); 145 INIT_LIST_HEAD(&rq->queuelist);
149 /* csd/requeue_work/fifo_time is initialized before use */ 146 /* csd/requeue_work/fifo_time is initialized before use */
150 rq->q = q; 147 rq->q = q;
151 rq->mq_ctx = ctx; 148 rq->mq_ctx = ctx;
152 req_set_op_attrs(rq, op, op_flags); 149 req_set_op_attrs(rq, op, op_flags);
150 if (blk_queue_io_stat(q))
151 rq->rq_flags |= RQF_IO_STAT;
153 /* do not touch atomic flags, it needs atomic ops against the timer */ 152 /* do not touch atomic flags, it needs atomic ops against the timer */
154 rq->cpu = -1; 153 rq->cpu = -1;
155 INIT_HLIST_NODE(&rq->hash); 154 INIT_HLIST_NODE(&rq->hash);
@@ -198,7 +197,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
198 rq = data->hctx->tags->rqs[tag]; 197 rq = data->hctx->tags->rqs[tag];
199 198
200 if (blk_mq_tag_busy(data->hctx)) { 199 if (blk_mq_tag_busy(data->hctx)) {
201 rq->cmd_flags = REQ_MQ_INFLIGHT; 200 rq->rq_flags = RQF_MQ_INFLIGHT;
202 atomic_inc(&data->hctx->nr_active); 201 atomic_inc(&data->hctx->nr_active);
203 } 202 }
204 203
@@ -298,9 +297,9 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
298 const int tag = rq->tag; 297 const int tag = rq->tag;
299 struct request_queue *q = rq->q; 298 struct request_queue *q = rq->q;
300 299
301 if (rq->cmd_flags & REQ_MQ_INFLIGHT) 300 if (rq->rq_flags & RQF_MQ_INFLIGHT)
302 atomic_dec(&hctx->nr_active); 301 atomic_dec(&hctx->nr_active);
303 rq->cmd_flags = 0; 302 rq->rq_flags = 0;
304 303
305 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 304 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
306 blk_mq_put_tag(hctx, ctx, tag); 305 blk_mq_put_tag(hctx, ctx, tag);
@@ -489,10 +488,10 @@ static void blk_mq_requeue_work(struct work_struct *work)
489 spin_unlock_irqrestore(&q->requeue_lock, flags); 488 spin_unlock_irqrestore(&q->requeue_lock, flags);
490 489
491 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 490 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
492 if (!(rq->cmd_flags & REQ_SOFTBARRIER)) 491 if (!(rq->rq_flags & RQF_SOFTBARRIER))
493 continue; 492 continue;
494 493
495 rq->cmd_flags &= ~REQ_SOFTBARRIER; 494 rq->rq_flags &= ~RQF_SOFTBARRIER;
496 list_del_init(&rq->queuelist); 495 list_del_init(&rq->queuelist);
497 blk_mq_insert_request(rq, true, false, false); 496 blk_mq_insert_request(rq, true, false, false);
498 } 497 }
@@ -519,11 +518,11 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
519 * We abuse this flag that is otherwise used by the I/O scheduler to 518 * We abuse this flag that is otherwise used by the I/O scheduler to
520 * request head insertation from the workqueue. 519 * request head insertation from the workqueue.
521 */ 520 */
522 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); 521 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
523 522
524 spin_lock_irqsave(&q->requeue_lock, flags); 523 spin_lock_irqsave(&q->requeue_lock, flags);
525 if (at_head) { 524 if (at_head) {
526 rq->cmd_flags |= REQ_SOFTBARRIER; 525 rq->rq_flags |= RQF_SOFTBARRIER;
527 list_add(&rq->queuelist, &q->requeue_list); 526 list_add(&rq->queuelist, &q->requeue_list);
528 } else { 527 } else {
529 list_add_tail(&rq->queuelist, &q->requeue_list); 528 list_add_tail(&rq->queuelist, &q->requeue_list);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index f0344e6939d5..bae1decb6ec3 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -270,7 +270,7 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
270 BUG_ON(tag >= bqt->real_max_depth); 270 BUG_ON(tag >= bqt->real_max_depth);
271 271
272 list_del_init(&rq->queuelist); 272 list_del_init(&rq->queuelist);
273 rq->cmd_flags &= ~REQ_QUEUED; 273 rq->rq_flags &= ~RQF_QUEUED;
274 rq->tag = -1; 274 rq->tag = -1;
275 275
276 if (unlikely(bqt->tag_index[tag] == NULL)) 276 if (unlikely(bqt->tag_index[tag] == NULL))
@@ -316,7 +316,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
316 unsigned max_depth; 316 unsigned max_depth;
317 int tag; 317 int tag;
318 318
319 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 319 if (unlikely((rq->rq_flags & RQF_QUEUED))) {
320 printk(KERN_ERR 320 printk(KERN_ERR
321 "%s: request %p for device [%s] already tagged %d", 321 "%s: request %p for device [%s] already tagged %d",
322 __func__, rq, 322 __func__, rq,
@@ -371,7 +371,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
371 */ 371 */
372 372
373 bqt->next_tag = (tag + 1) % bqt->max_depth; 373 bqt->next_tag = (tag + 1) % bqt->max_depth;
374 rq->cmd_flags |= REQ_QUEUED; 374 rq->rq_flags |= RQF_QUEUED;
375 rq->tag = tag; 375 rq->tag = tag;
376 bqt->tag_index[tag] = rq; 376 bqt->tag_index[tag] = rq;
377 blk_start_request(rq); 377 blk_start_request(rq);
diff --git a/block/blk.h b/block/blk.h
index 74444c49078f..aa132dea598c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -130,7 +130,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
130/* 130/*
131 * Internal elevator interface 131 * Internal elevator interface
132 */ 132 */
133#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) 133#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
134 134
135void blk_insert_flush(struct request *rq); 135void blk_insert_flush(struct request *rq);
136 136
@@ -247,7 +247,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
247static inline int blk_do_io_stat(struct request *rq) 247static inline int blk_do_io_stat(struct request *rq)
248{ 248{
249 return rq->rq_disk && 249 return rq->rq_disk &&
250 (rq->cmd_flags & REQ_IO_STAT) && 250 (rq->rq_flags & RQF_IO_STAT) &&
251 (rq->cmd_type == REQ_TYPE_FS); 251 (rq->cmd_type == REQ_TYPE_FS);
252} 252}
253 253
diff --git a/block/elevator.c b/block/elevator.c
index f7d973a56fd7..ac80f89a0842 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -245,7 +245,7 @@ EXPORT_SYMBOL(elevator_exit);
245static inline void __elv_rqhash_del(struct request *rq) 245static inline void __elv_rqhash_del(struct request *rq)
246{ 246{
247 hash_del(&rq->hash); 247 hash_del(&rq->hash);
248 rq->cmd_flags &= ~REQ_HASHED; 248 rq->rq_flags &= ~RQF_HASHED;
249} 249}
250 250
251static void elv_rqhash_del(struct request_queue *q, struct request *rq) 251static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -260,7 +260,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
260 260
261 BUG_ON(ELV_ON_HASH(rq)); 261 BUG_ON(ELV_ON_HASH(rq));
262 hash_add(e->hash, &rq->hash, rq_hash_key(rq)); 262 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
263 rq->cmd_flags |= REQ_HASHED; 263 rq->rq_flags |= RQF_HASHED;
264} 264}
265 265
266static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) 266static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
@@ -352,7 +352,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
352{ 352{
353 sector_t boundary; 353 sector_t boundary;
354 struct list_head *entry; 354 struct list_head *entry;
355 int stop_flags;
356 355
357 if (q->last_merge == rq) 356 if (q->last_merge == rq)
358 q->last_merge = NULL; 357 q->last_merge = NULL;
@@ -362,7 +361,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
362 q->nr_sorted--; 361 q->nr_sorted--;
363 362
364 boundary = q->end_sector; 363 boundary = q->end_sector;
365 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
366 list_for_each_prev(entry, &q->queue_head) { 364 list_for_each_prev(entry, &q->queue_head) {
367 struct request *pos = list_entry_rq(entry); 365 struct request *pos = list_entry_rq(entry);
368 366
@@ -370,7 +368,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
370 break; 368 break;
371 if (rq_data_dir(rq) != rq_data_dir(pos)) 369 if (rq_data_dir(rq) != rq_data_dir(pos))
372 break; 370 break;
373 if (pos->cmd_flags & stop_flags) 371 if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
374 break; 372 break;
375 if (blk_rq_pos(rq) >= boundary) { 373 if (blk_rq_pos(rq) >= boundary) {
376 if (blk_rq_pos(pos) < boundary) 374 if (blk_rq_pos(pos) < boundary)
@@ -510,7 +508,7 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
510 struct request *next) 508 struct request *next)
511{ 509{
512 struct elevator_queue *e = q->elevator; 510 struct elevator_queue *e = q->elevator;
513 const int next_sorted = next->cmd_flags & REQ_SORTED; 511 const int next_sorted = next->rq_flags & RQF_SORTED;
514 512
515 if (next_sorted && e->type->ops.elevator_merge_req_fn) 513 if (next_sorted && e->type->ops.elevator_merge_req_fn)
516 e->type->ops.elevator_merge_req_fn(q, rq, next); 514 e->type->ops.elevator_merge_req_fn(q, rq, next);
@@ -537,13 +535,13 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
537#ifdef CONFIG_PM 535#ifdef CONFIG_PM
538static void blk_pm_requeue_request(struct request *rq) 536static void blk_pm_requeue_request(struct request *rq)
539{ 537{
540 if (rq->q->dev && !(rq->cmd_flags & REQ_PM)) 538 if (rq->q->dev && !(rq->rq_flags & RQF_PM))
541 rq->q->nr_pending--; 539 rq->q->nr_pending--;
542} 540}
543 541
544static void blk_pm_add_request(struct request_queue *q, struct request *rq) 542static void blk_pm_add_request(struct request_queue *q, struct request *rq)
545{ 543{
546 if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 && 544 if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
547 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING)) 545 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
548 pm_request_resume(q->dev); 546 pm_request_resume(q->dev);
549} 547}
@@ -563,11 +561,11 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
563 */ 561 */
564 if (blk_account_rq(rq)) { 562 if (blk_account_rq(rq)) {
565 q->in_flight[rq_is_sync(rq)]--; 563 q->in_flight[rq_is_sync(rq)]--;
566 if (rq->cmd_flags & REQ_SORTED) 564 if (rq->rq_flags & RQF_SORTED)
567 elv_deactivate_rq(q, rq); 565 elv_deactivate_rq(q, rq);
568 } 566 }
569 567
570 rq->cmd_flags &= ~REQ_STARTED; 568 rq->rq_flags &= ~RQF_STARTED;
571 569
572 blk_pm_requeue_request(rq); 570 blk_pm_requeue_request(rq);
573 571
@@ -597,13 +595,13 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
597 595
598 rq->q = q; 596 rq->q = q;
599 597
600 if (rq->cmd_flags & REQ_SOFTBARRIER) { 598 if (rq->rq_flags & RQF_SOFTBARRIER) {
601 /* barriers are scheduling boundary, update end_sector */ 599 /* barriers are scheduling boundary, update end_sector */
602 if (rq->cmd_type == REQ_TYPE_FS) { 600 if (rq->cmd_type == REQ_TYPE_FS) {
603 q->end_sector = rq_end_sector(rq); 601 q->end_sector = rq_end_sector(rq);
604 q->boundary_rq = rq; 602 q->boundary_rq = rq;
605 } 603 }
606 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 604 } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
607 (where == ELEVATOR_INSERT_SORT || 605 (where == ELEVATOR_INSERT_SORT ||
608 where == ELEVATOR_INSERT_SORT_MERGE)) 606 where == ELEVATOR_INSERT_SORT_MERGE))
609 where = ELEVATOR_INSERT_BACK; 607 where = ELEVATOR_INSERT_BACK;
@@ -611,12 +609,12 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
611 switch (where) { 609 switch (where) {
612 case ELEVATOR_INSERT_REQUEUE: 610 case ELEVATOR_INSERT_REQUEUE:
613 case ELEVATOR_INSERT_FRONT: 611 case ELEVATOR_INSERT_FRONT:
614 rq->cmd_flags |= REQ_SOFTBARRIER; 612 rq->rq_flags |= RQF_SOFTBARRIER;
615 list_add(&rq->queuelist, &q->queue_head); 613 list_add(&rq->queuelist, &q->queue_head);
616 break; 614 break;
617 615
618 case ELEVATOR_INSERT_BACK: 616 case ELEVATOR_INSERT_BACK:
619 rq->cmd_flags |= REQ_SOFTBARRIER; 617 rq->rq_flags |= RQF_SOFTBARRIER;
620 elv_drain_elevator(q); 618 elv_drain_elevator(q);
621 list_add_tail(&rq->queuelist, &q->queue_head); 619 list_add_tail(&rq->queuelist, &q->queue_head);
622 /* 620 /*
@@ -642,7 +640,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
642 break; 640 break;
643 case ELEVATOR_INSERT_SORT: 641 case ELEVATOR_INSERT_SORT:
644 BUG_ON(rq->cmd_type != REQ_TYPE_FS); 642 BUG_ON(rq->cmd_type != REQ_TYPE_FS);
645 rq->cmd_flags |= REQ_SORTED; 643 rq->rq_flags |= RQF_SORTED;
646 q->nr_sorted++; 644 q->nr_sorted++;
647 if (rq_mergeable(rq)) { 645 if (rq_mergeable(rq)) {
648 elv_rqhash_add(q, rq); 646 elv_rqhash_add(q, rq);
@@ -659,7 +657,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
659 break; 657 break;
660 658
661 case ELEVATOR_INSERT_FLUSH: 659 case ELEVATOR_INSERT_FLUSH:
662 rq->cmd_flags |= REQ_SOFTBARRIER; 660 rq->rq_flags |= RQF_SOFTBARRIER;
663 blk_insert_flush(rq); 661 blk_insert_flush(rq);
664 break; 662 break;
665 default: 663 default:
@@ -735,7 +733,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
735 */ 733 */
736 if (blk_account_rq(rq)) { 734 if (blk_account_rq(rq)) {
737 q->in_flight[rq_is_sync(rq)]--; 735 q->in_flight[rq_is_sync(rq)]--;
738 if ((rq->cmd_flags & REQ_SORTED) && 736 if ((rq->rq_flags & RQF_SORTED) &&
739 e->type->ops.elevator_completed_req_fn) 737 e->type->ops.elevator_completed_req_fn)
740 e->type->ops.elevator_completed_req_fn(q, rq); 738 e->type->ops.elevator_completed_req_fn(q, rq);
741 } 739 }
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 90fa4ac149db..7cf795e0fc8d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -721,7 +721,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
721 721
722 rq->timeout = 60*HZ; 722 rq->timeout = 60*HZ;
723 if (cgc->quiet) 723 if (cgc->quiet)
724 rq->cmd_flags |= REQ_QUIET; 724 rq->rq_flags |= RQF_QUIET;
725 725
726 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); 726 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
727 if (rq->errors) 727 if (rq->errors)
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 05352f490d60..f90ea221f7f2 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -211,7 +211,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
211 sense_rq->cmd[0] = GPCMD_REQUEST_SENSE; 211 sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
212 sense_rq->cmd[4] = cmd_len; 212 sense_rq->cmd[4] = cmd_len;
213 sense_rq->cmd_type = REQ_TYPE_ATA_SENSE; 213 sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
214 sense_rq->cmd_flags |= REQ_PREEMPT; 214 sense_rq->rq_flags |= RQF_PREEMPT;
215 215
216 if (drive->media == ide_tape) 216 if (drive->media == ide_tape)
217 sense_rq->cmd[13] = REQ_IDETAPE_PC1; 217 sense_rq->cmd[13] = REQ_IDETAPE_PC1;
@@ -295,7 +295,7 @@ int ide_cd_expiry(ide_drive_t *drive)
295 wait = ATAPI_WAIT_PC; 295 wait = ATAPI_WAIT_PC;
296 break; 296 break;
297 default: 297 default:
298 if (!(rq->cmd_flags & REQ_QUIET)) 298 if (!(rq->rq_flags & RQF_QUIET))
299 printk(KERN_INFO PFX "cmd 0x%x timed out\n", 299 printk(KERN_INFO PFX "cmd 0x%x timed out\n",
300 rq->cmd[0]); 300 rq->cmd[0]);
301 wait = 0; 301 wait = 0;
@@ -375,7 +375,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
375 } 375 }
376 376
377 if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC) 377 if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
378 rq->cmd_flags |= REQ_FAILED; 378 rq->rq_flags |= RQF_FAILED;
379 379
380 return 1; 380 return 1;
381} 381}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index bf9a2ad296ed..9cbd217bc0c9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -98,7 +98,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
98 struct request_sense *sense = &drive->sense_data; 98 struct request_sense *sense = &drive->sense_data;
99 int log = 0; 99 int log = 0;
100 100
101 if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) 101 if (!sense || !rq || (rq->rq_flags & RQF_QUIET))
102 return 0; 102 return 0;
103 103
104 ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key); 104 ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key);
@@ -291,7 +291,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
291 * (probably while trying to recover from a former error). 291 * (probably while trying to recover from a former error).
292 * Just give up. 292 * Just give up.
293 */ 293 */
294 rq->cmd_flags |= REQ_FAILED; 294 rq->rq_flags |= RQF_FAILED;
295 return 2; 295 return 2;
296 } 296 }
297 297
@@ -311,7 +311,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
311 cdrom_saw_media_change(drive); 311 cdrom_saw_media_change(drive);
312 312
313 if (rq->cmd_type == REQ_TYPE_FS && 313 if (rq->cmd_type == REQ_TYPE_FS &&
314 !(rq->cmd_flags & REQ_QUIET)) 314 !(rq->rq_flags & RQF_QUIET))
315 printk(KERN_ERR PFX "%s: tray open\n", 315 printk(KERN_ERR PFX "%s: tray open\n",
316 drive->name); 316 drive->name);
317 } 317 }
@@ -346,7 +346,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
346 * No point in retrying after an illegal request or data 346 * No point in retrying after an illegal request or data
347 * protect error. 347 * protect error.
348 */ 348 */
349 if (!(rq->cmd_flags & REQ_QUIET)) 349 if (!(rq->rq_flags & RQF_QUIET))
350 ide_dump_status(drive, "command error", stat); 350 ide_dump_status(drive, "command error", stat);
351 do_end_request = 1; 351 do_end_request = 1;
352 break; 352 break;
@@ -355,14 +355,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
355 * No point in re-trying a zillion times on a bad sector. 355 * No point in re-trying a zillion times on a bad sector.
356 * If we got here the error is not correctable. 356 * If we got here the error is not correctable.
357 */ 357 */
358 if (!(rq->cmd_flags & REQ_QUIET)) 358 if (!(rq->rq_flags & RQF_QUIET))
359 ide_dump_status(drive, "media error " 359 ide_dump_status(drive, "media error "
360 "(bad sector)", stat); 360 "(bad sector)", stat);
361 do_end_request = 1; 361 do_end_request = 1;
362 break; 362 break;
363 case BLANK_CHECK: 363 case BLANK_CHECK:
364 /* disk appears blank? */ 364 /* disk appears blank? */
365 if (!(rq->cmd_flags & REQ_QUIET)) 365 if (!(rq->rq_flags & RQF_QUIET))
366 ide_dump_status(drive, "media error (blank)", 366 ide_dump_status(drive, "media error (blank)",
367 stat); 367 stat);
368 do_end_request = 1; 368 do_end_request = 1;
@@ -380,7 +380,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
380 } 380 }
381 381
382 if (rq->cmd_type != REQ_TYPE_FS) { 382 if (rq->cmd_type != REQ_TYPE_FS) {
383 rq->cmd_flags |= REQ_FAILED; 383 rq->rq_flags |= RQF_FAILED;
384 do_end_request = 1; 384 do_end_request = 1;
385 } 385 }
386 386
@@ -422,19 +422,19 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
422int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, 422int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
423 int write, void *buffer, unsigned *bufflen, 423 int write, void *buffer, unsigned *bufflen,
424 struct request_sense *sense, int timeout, 424 struct request_sense *sense, int timeout,
425 unsigned int cmd_flags) 425 req_flags_t rq_flags)
426{ 426{
427 struct cdrom_info *info = drive->driver_data; 427 struct cdrom_info *info = drive->driver_data;
428 struct request_sense local_sense; 428 struct request_sense local_sense;
429 int retries = 10; 429 int retries = 10;
430 unsigned int flags = 0; 430 req_flags_t flags = 0;
431 431
432 if (!sense) 432 if (!sense)
433 sense = &local_sense; 433 sense = &local_sense;
434 434
435 ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, " 435 ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
436 "cmd_flags: 0x%x", 436 "rq_flags: 0x%x",
437 cmd[0], write, timeout, cmd_flags); 437 cmd[0], write, timeout, rq_flags);
438 438
439 /* start of retry loop */ 439 /* start of retry loop */
440 do { 440 do {
@@ -446,7 +446,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
446 memcpy(rq->cmd, cmd, BLK_MAX_CDB); 446 memcpy(rq->cmd, cmd, BLK_MAX_CDB);
447 rq->cmd_type = REQ_TYPE_ATA_PC; 447 rq->cmd_type = REQ_TYPE_ATA_PC;
448 rq->sense = sense; 448 rq->sense = sense;
449 rq->cmd_flags |= cmd_flags; 449 rq->rq_flags |= rq_flags;
450 rq->timeout = timeout; 450 rq->timeout = timeout;
451 if (buffer) { 451 if (buffer) {
452 error = blk_rq_map_kern(drive->queue, rq, buffer, 452 error = blk_rq_map_kern(drive->queue, rq, buffer,
@@ -462,14 +462,14 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
462 if (buffer) 462 if (buffer)
463 *bufflen = rq->resid_len; 463 *bufflen = rq->resid_len;
464 464
465 flags = rq->cmd_flags; 465 flags = rq->rq_flags;
466 blk_put_request(rq); 466 blk_put_request(rq);
467 467
468 /* 468 /*
469 * FIXME: we should probably abort/retry or something in case of 469 * FIXME: we should probably abort/retry or something in case of
470 * failure. 470 * failure.
471 */ 471 */
472 if (flags & REQ_FAILED) { 472 if (flags & RQF_FAILED) {
473 /* 473 /*
474 * The request failed. Retry if it was due to a unit 474 * The request failed. Retry if it was due to a unit
475 * attention status (usually means media was changed). 475 * attention status (usually means media was changed).
@@ -494,10 +494,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
494 } 494 }
495 495
496 /* end of retry loop */ 496 /* end of retry loop */
497 } while ((flags & REQ_FAILED) && retries >= 0); 497 } while ((flags & RQF_FAILED) && retries >= 0);
498 498
499 /* return an error if the command failed */ 499 /* return an error if the command failed */
500 return (flags & REQ_FAILED) ? -EIO : 0; 500 return (flags & RQF_FAILED) ? -EIO : 0;
501} 501}
502 502
503/* 503/*
@@ -589,7 +589,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
589 "(%u bytes)\n", drive->name, __func__, 589 "(%u bytes)\n", drive->name, __func__,
590 cmd->nleft); 590 cmd->nleft);
591 if (!write) 591 if (!write)
592 rq->cmd_flags |= REQ_FAILED; 592 rq->rq_flags |= RQF_FAILED;
593 uptodate = 0; 593 uptodate = 0;
594 } 594 }
595 } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { 595 } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
@@ -607,7 +607,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
607 } 607 }
608 608
609 if (!uptodate) 609 if (!uptodate)
610 rq->cmd_flags |= REQ_FAILED; 610 rq->rq_flags |= RQF_FAILED;
611 } 611 }
612 goto out_end; 612 goto out_end;
613 } 613 }
@@ -745,9 +745,9 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
745 rq->cmd[0], rq->cmd_type); 745 rq->cmd[0], rq->cmd_type);
746 746
747 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 747 if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
748 rq->cmd_flags |= REQ_QUIET; 748 rq->rq_flags |= RQF_QUIET;
749 else 749 else
750 rq->cmd_flags &= ~REQ_FAILED; 750 rq->rq_flags &= ~RQF_FAILED;
751 751
752 drive->dma = 0; 752 drive->dma = 0;
753 753
@@ -867,7 +867,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
867 */ 867 */
868 cmd[7] = cdi->sanyo_slot % 3; 868 cmd[7] = cdi->sanyo_slot % 3;
869 869
870 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET); 870 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET);
871} 871}
872 872
873static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, 873static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -890,7 +890,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
890 cmd[0] = GPCMD_READ_CDVD_CAPACITY; 890 cmd[0] = GPCMD_READ_CDVD_CAPACITY;
891 891
892 stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0, 892 stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
893 REQ_QUIET); 893 RQF_QUIET);
894 if (stat) 894 if (stat)
895 return stat; 895 return stat;
896 896
@@ -943,7 +943,7 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
943 if (msf_flag) 943 if (msf_flag)
944 cmd[1] = 2; 944 cmd[1] = 2;
945 945
946 return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, REQ_QUIET); 946 return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET);
947} 947}
948 948
949/* Try to read the entire TOC for the disk into our internal buffer. */ 949/* Try to read the entire TOC for the disk into our internal buffer. */
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1efc936f5b66..eea60c986c4f 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -101,7 +101,7 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *);
101 101
102/* ide-cd.c functions used by ide-cd_ioctl.c */ 102/* ide-cd.c functions used by ide-cd_ioctl.c */
103int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *, 103int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
104 unsigned *, struct request_sense *, int, unsigned int); 104 unsigned *, struct request_sense *, int, req_flags_t);
105int ide_cd_read_toc(ide_drive_t *, struct request_sense *); 105int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
106int ide_cdrom_get_capabilities(ide_drive_t *, u8 *); 106int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
107void ide_cdrom_update_speed(ide_drive_t *, u8 *); 107void ide_cdrom_update_speed(ide_drive_t *, u8 *);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 5887a7a09e37..f085e3a2e1d6 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -305,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
305 305
306 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 306 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
307 rq->cmd_type = REQ_TYPE_DRV_PRIV; 307 rq->cmd_type = REQ_TYPE_DRV_PRIV;
308 rq->cmd_flags = REQ_QUIET; 308 rq->rq_flags = RQF_QUIET;
309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); 309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
310 blk_put_request(rq); 310 blk_put_request(rq);
311 /* 311 /*
@@ -449,7 +449,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
449 struct packet_command *cgc) 449 struct packet_command *cgc)
450{ 450{
451 ide_drive_t *drive = cdi->handle; 451 ide_drive_t *drive = cdi->handle;
452 unsigned int flags = 0; 452 req_flags_t flags = 0;
453 unsigned len = cgc->buflen; 453 unsigned len = cgc->buflen;
454 454
455 if (cgc->timeout <= 0) 455 if (cgc->timeout <= 0)
@@ -463,7 +463,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
463 memset(cgc->sense, 0, sizeof(struct request_sense)); 463 memset(cgc->sense, 0, sizeof(struct request_sense));
464 464
465 if (cgc->quiet) 465 if (cgc->quiet)
466 flags |= REQ_QUIET; 466 flags |= RQF_QUIET;
467 467
468 cgc->stat = ide_cd_queue_pc(drive, cgc->cmd, 468 cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
469 cgc->data_direction == CGC_DATA_WRITE, 469 cgc->data_direction == CGC_DATA_WRITE,
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 669ea1e45795..6360bbd37efe 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
307{ 307{
308 ide_startstop_t startstop; 308 ide_startstop_t startstop;
309 309
310 BUG_ON(!(rq->cmd_flags & REQ_STARTED)); 310 BUG_ON(!(rq->rq_flags & RQF_STARTED));
311 311
312#ifdef DEBUG 312#ifdef DEBUG
313 printk("%s: start_request: current=0x%08lx\n", 313 printk("%s: start_request: current=0x%08lx\n",
@@ -316,7 +316,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
316 316
317 /* bail early if we've exceeded max_failures */ 317 /* bail early if we've exceeded max_failures */
318 if (drive->max_failures && (drive->failures > drive->max_failures)) { 318 if (drive->max_failures && (drive->failures > drive->max_failures)) {
319 rq->cmd_flags |= REQ_FAILED; 319 rq->rq_flags |= RQF_FAILED;
320 goto kill_rq; 320 goto kill_rq;
321 } 321 }
322 322
@@ -539,7 +539,7 @@ repeat:
539 */ 539 */
540 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 540 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
541 ata_pm_request(rq) == 0 && 541 ata_pm_request(rq) == 0 &&
542 (rq->cmd_flags & REQ_PREEMPT) == 0) { 542 (rq->rq_flags & RQF_PREEMPT) == 0) {
543 /* there should be no pending command at this point */ 543 /* there should be no pending command at this point */
544 ide_unlock_port(hwif); 544 ide_unlock_port(hwif);
545 goto plug_device; 545 goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index e34af488693a..a015acdffb39 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -53,7 +53,7 @@ static int ide_pm_execute_rq(struct request *rq)
53 53
54 spin_lock_irq(q->queue_lock); 54 spin_lock_irq(q->queue_lock);
55 if (unlikely(blk_queue_dying(q))) { 55 if (unlikely(blk_queue_dying(q))) {
56 rq->cmd_flags |= REQ_QUIET; 56 rq->rq_flags |= RQF_QUIET;
57 rq->errors = -ENXIO; 57 rq->errors = -ENXIO;
58 __blk_end_request_all(rq, rq->errors); 58 __blk_end_request_all(rq, rq->errors);
59 spin_unlock_irq(q->queue_lock); 59 spin_unlock_irq(q->queue_lock);
@@ -90,7 +90,7 @@ int generic_ide_resume(struct device *dev)
90 memset(&rqpm, 0, sizeof(rqpm)); 90 memset(&rqpm, 0, sizeof(rqpm));
91 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 91 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
92 rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; 92 rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
93 rq->cmd_flags |= REQ_PREEMPT; 93 rq->rq_flags |= RQF_PREEMPT;
94 rq->special = &rqpm; 94 rq->special = &rqpm;
95 rqpm.pm_step = IDE_PM_START_RESUME; 95 rqpm.pm_step = IDE_PM_START_RESUME;
96 rqpm.pm_state = PM_EVENT_ON; 96 rqpm.pm_state = PM_EVENT_ON;
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index dc75bea0d541..f76cc36b8546 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -313,7 +313,7 @@ static void dm_unprep_request(struct request *rq)
313 313
314 if (!rq->q->mq_ops) { 314 if (!rq->q->mq_ops) {
315 rq->special = NULL; 315 rq->special = NULL;
316 rq->cmd_flags &= ~REQ_DONTPREP; 316 rq->rq_flags &= ~RQF_DONTPREP;
317 } 317 }
318 318
319 if (clone) 319 if (clone)
@@ -431,7 +431,7 @@ static void dm_softirq_done(struct request *rq)
431 return; 431 return;
432 } 432 }
433 433
434 if (rq->cmd_flags & REQ_FAILED) 434 if (rq->rq_flags & RQF_FAILED)
435 mapped = false; 435 mapped = false;
436 436
437 dm_done(clone, tio->error, mapped); 437 dm_done(clone, tio->error, mapped);
@@ -460,7 +460,7 @@ static void dm_complete_request(struct request *rq, int error)
460 */ 460 */
461static void dm_kill_unmapped_request(struct request *rq, int error) 461static void dm_kill_unmapped_request(struct request *rq, int error)
462{ 462{
463 rq->cmd_flags |= REQ_FAILED; 463 rq->rq_flags |= RQF_FAILED;
464 dm_complete_request(rq, error); 464 dm_complete_request(rq, error);
465} 465}
466 466
@@ -476,7 +476,7 @@ static void end_clone_request(struct request *clone, int error)
476 * For just cleaning up the information of the queue in which 476 * For just cleaning up the information of the queue in which
477 * the clone was dispatched. 477 * the clone was dispatched.
478 * The clone is *NOT* freed actually here because it is alloced 478 * The clone is *NOT* freed actually here because it is alloced
479 * from dm own mempool (REQ_ALLOCED isn't set). 479 * from dm own mempool (RQF_ALLOCED isn't set).
480 */ 480 */
481 __blk_put_request(clone->q, clone); 481 __blk_put_request(clone->q, clone);
482 } 482 }
@@ -497,7 +497,7 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
497 int r; 497 int r;
498 498
499 if (blk_queue_io_stat(clone->q)) 499 if (blk_queue_io_stat(clone->q))
500 clone->cmd_flags |= REQ_IO_STAT; 500 clone->rq_flags |= RQF_IO_STAT;
501 501
502 clone->start_time = jiffies; 502 clone->start_time = jiffies;
503 r = blk_insert_cloned_request(clone->q, clone); 503 r = blk_insert_cloned_request(clone->q, clone);
@@ -633,7 +633,7 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
633 return BLKPREP_DEFER; 633 return BLKPREP_DEFER;
634 634
635 rq->special = tio; 635 rq->special = tio;
636 rq->cmd_flags |= REQ_DONTPREP; 636 rq->rq_flags |= RQF_DONTPREP;
637 637
638 return BLKPREP_OK; 638 return BLKPREP_OK;
639} 639}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index aacf584f2a42..f3512404bc52 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2006,7 +2006,7 @@ static int msb_prepare_req(struct request_queue *q, struct request *req)
2006 blk_dump_rq_flags(req, "MS unsupported request"); 2006 blk_dump_rq_flags(req, "MS unsupported request");
2007 return BLKPREP_KILL; 2007 return BLKPREP_KILL;
2008 } 2008 }
2009 req->cmd_flags |= REQ_DONTPREP; 2009 req->rq_flags |= RQF_DONTPREP;
2010 return BLKPREP_OK; 2010 return BLKPREP_OK;
2011} 2011}
2012 2012
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c1472275fe57..fa0746d182ff 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -834,7 +834,7 @@ static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
834 return BLKPREP_KILL; 834 return BLKPREP_KILL;
835 } 835 }
836 836
837 req->cmd_flags |= REQ_DONTPREP; 837 req->rq_flags |= RQF_DONTPREP;
838 838
839 return BLKPREP_OK; 839 return BLKPREP_OK;
840} 840}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c3335112e68c..f8190dd4a35c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2117,7 +2117,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
2117 mmc_blk_abort_packed_req(mq_rq); 2117 mmc_blk_abort_packed_req(mq_rq);
2118 } else { 2118 } else {
2119 if (mmc_card_removed(card)) 2119 if (mmc_card_removed(card))
2120 req->cmd_flags |= REQ_QUIET; 2120 req->rq_flags |= RQF_QUIET;
2121 while (ret) 2121 while (ret)
2122 ret = blk_end_request(req, -EIO, 2122 ret = blk_end_request(req, -EIO,
2123 blk_rq_cur_bytes(req)); 2123 blk_rq_cur_bytes(req));
@@ -2126,7 +2126,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
2126 start_new_req: 2126 start_new_req:
2127 if (rqc) { 2127 if (rqc) {
2128 if (mmc_card_removed(card)) { 2128 if (mmc_card_removed(card)) {
2129 rqc->cmd_flags |= REQ_QUIET; 2129 rqc->rq_flags |= RQF_QUIET;
2130 blk_end_request_all(rqc, -EIO); 2130 blk_end_request_all(rqc, -EIO);
2131 } else { 2131 } else {
2132 /* 2132 /*
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 8037f73a109a..8a67f1c2ce21 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -44,7 +44,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
44 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) 44 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
45 return BLKPREP_KILL; 45 return BLKPREP_KILL;
46 46
47 req->cmd_flags |= REQ_DONTPREP; 47 req->rq_flags |= RQF_DONTPREP;
48 48
49 return BLKPREP_OK; 49 return BLKPREP_OK;
50} 50}
@@ -120,7 +120,7 @@ static void mmc_request_fn(struct request_queue *q)
120 120
121 if (!mq) { 121 if (!mq) {
122 while ((req = blk_fetch_request(q)) != NULL) { 122 while ((req = blk_fetch_request(q)) != NULL) {
123 req->cmd_flags |= REQ_QUIET; 123 req->rq_flags |= RQF_QUIET;
124 __blk_end_request_all(req, -EIO); 124 __blk_end_request_all(req, -EIO);
125 } 125 }
126 return; 126 return;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0fc99f0f2571..0955e9d22020 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -323,9 +323,9 @@ static int nvme_init_iod(struct request *rq, unsigned size,
323 iod->nents = 0; 323 iod->nents = 0;
324 iod->length = size; 324 iod->length = size;
325 325
326 if (!(rq->cmd_flags & REQ_DONTPREP)) { 326 if (!(rq->rq_flags & RQF_DONTPREP)) {
327 rq->retries = 0; 327 rq->retries = 0;
328 rq->cmd_flags |= REQ_DONTPREP; 328 rq->rq_flags |= RQF_DONTPREP;
329 } 329 }
330 return 0; 330 return 0;
331} 331}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 241829e59668..05813a420188 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -154,7 +154,8 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
154 return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE, 154 return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
155 buff, bufflen, sshdr, 155 buff, bufflen, sshdr,
156 ALUA_FAILOVER_TIMEOUT * HZ, 156 ALUA_FAILOVER_TIMEOUT * HZ,
157 ALUA_FAILOVER_RETRIES, NULL, req_flags); 157 ALUA_FAILOVER_RETRIES, NULL,
158 req_flags, 0);
158} 159}
159 160
160/* 161/*
@@ -187,7 +188,8 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
187 return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, 188 return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
188 stpg_data, stpg_len, 189 stpg_data, stpg_len,
189 sshdr, ALUA_FAILOVER_TIMEOUT * HZ, 190 sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
190 ALUA_FAILOVER_RETRIES, NULL, req_flags); 191 ALUA_FAILOVER_RETRIES, NULL,
192 req_flags, 0);
191} 193}
192 194
193static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, 195static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
@@ -1063,7 +1065,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
1063 state != SCSI_ACCESS_STATE_ACTIVE && 1065 state != SCSI_ACCESS_STATE_ACTIVE &&
1064 state != SCSI_ACCESS_STATE_LBA) { 1066 state != SCSI_ACCESS_STATE_LBA) {
1065 ret = BLKPREP_KILL; 1067 ret = BLKPREP_KILL;
1066 req->cmd_flags |= REQ_QUIET; 1068 req->rq_flags |= RQF_QUIET;
1067 } 1069 }
1068 return ret; 1070 return ret;
1069 1071
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 375d81850f15..5b80746980b8 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -452,7 +452,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
452 452
453 if (h->lun_state != CLARIION_LUN_OWNED) { 453 if (h->lun_state != CLARIION_LUN_OWNED) {
454 ret = BLKPREP_KILL; 454 ret = BLKPREP_KILL;
455 req->cmd_flags |= REQ_QUIET; 455 req->rq_flags |= RQF_QUIET;
456 } 456 }
457 return ret; 457 return ret;
458 458
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9406d5f4a3d3..308e87195dc1 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -266,7 +266,7 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
266 266
267 if (h->path_state != HP_SW_PATH_ACTIVE) { 267 if (h->path_state != HP_SW_PATH_ACTIVE) {
268 ret = BLKPREP_KILL; 268 ret = BLKPREP_KILL;
269 req->cmd_flags |= REQ_QUIET; 269 req->rq_flags |= RQF_QUIET;
270 } 270 }
271 return ret; 271 return ret;
272 272
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 06fbd0b0c68a..00d9c326158e 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -724,7 +724,7 @@ static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
724 724
725 if (h->state != RDAC_STATE_ACTIVE) { 725 if (h->state != RDAC_STATE_ACTIVE) {
726 ret = BLKPREP_KILL; 726 ret = BLKPREP_KILL;
727 req->cmd_flags |= REQ_QUIET; 727 req->rq_flags |= RQF_QUIET;
728 } 728 }
729 return ret; 729 return ret;
730 730
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 2f2a9910e30e..ef99f62831fb 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1595,7 +1595,7 @@ static int _init_blk_request(struct osd_request *or,
1595 } 1595 }
1596 1596
1597 or->request = req; 1597 or->request = req;
1598 req->cmd_flags |= REQ_QUIET; 1598 req->rq_flags |= RQF_QUIET;
1599 1599
1600 req->timeout = or->timeout; 1600 req->timeout = or->timeout;
1601 req->retries = or->retries; 1601 req->retries = or->retries;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 5033223f6287..a2960f5d98ec 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -368,7 +368,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
368 return DRIVER_ERROR << 24; 368 return DRIVER_ERROR << 24;
369 369
370 blk_rq_set_block_pc(req); 370 blk_rq_set_block_pc(req);
371 req->cmd_flags |= REQ_QUIET; 371 req->rq_flags |= RQF_QUIET;
372 372
373 SRpnt->bio = NULL; 373 SRpnt->bio = NULL;
374 374
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106a6adbd6f1..996e134d79fa 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1988,7 +1988,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
1988 1988
1989 req->cmd_len = COMMAND_SIZE(req->cmd[0]); 1989 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
1990 1990
1991 req->cmd_flags |= REQ_QUIET; 1991 req->rq_flags |= RQF_QUIET;
1992 req->timeout = 10 * HZ; 1992 req->timeout = 10 * HZ;
1993 req->retries = 5; 1993 req->retries = 5;
1994 1994
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2cca9cffc63f..8c52622ac257 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -163,26 +163,11 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
163{ 163{
164 __scsi_queue_insert(cmd, reason, 1); 164 __scsi_queue_insert(cmd, reason, 1);
165} 165}
166/** 166
167 * scsi_execute - insert request and wait for the result 167static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
168 * @sdev: scsi device
169 * @cmd: scsi command
170 * @data_direction: data direction
171 * @buffer: data buffer
172 * @bufflen: len of buffer
173 * @sense: optional sense buffer
174 * @timeout: request timeout in seconds
175 * @retries: number of times to retry request
176 * @flags: or into request flags;
177 * @resid: optional residual length
178 *
179 * returns the req->errors value which is the scsi_cmnd result
180 * field.
181 */
182int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
183 int data_direction, void *buffer, unsigned bufflen, 168 int data_direction, void *buffer, unsigned bufflen,
184 unsigned char *sense, int timeout, int retries, u64 flags, 169 unsigned char *sense, int timeout, int retries, u64 flags,
185 int *resid) 170 req_flags_t rq_flags, int *resid)
186{ 171{
187 struct request *req; 172 struct request *req;
188 int write = (data_direction == DMA_TO_DEVICE); 173 int write = (data_direction == DMA_TO_DEVICE);
@@ -203,7 +188,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
203 req->sense_len = 0; 188 req->sense_len = 0;
204 req->retries = retries; 189 req->retries = retries;
205 req->timeout = timeout; 190 req->timeout = timeout;
206 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 191 req->cmd_flags |= flags;
192 req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
207 193
208 /* 194 /*
209 * head injection *required* here otherwise quiesce won't work 195 * head injection *required* here otherwise quiesce won't work
@@ -227,12 +213,37 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
227 213
228 return ret; 214 return ret;
229} 215}
216
217/**
218 * scsi_execute - insert request and wait for the result
219 * @sdev: scsi device
220 * @cmd: scsi command
221 * @data_direction: data direction
222 * @buffer: data buffer
223 * @bufflen: len of buffer
224 * @sense: optional sense buffer
225 * @timeout: request timeout in seconds
226 * @retries: number of times to retry request
227 * @flags: or into request flags;
228 * @resid: optional residual length
229 *
230 * returns the req->errors value which is the scsi_cmnd result
231 * field.
232 */
233int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
234 int data_direction, void *buffer, unsigned bufflen,
235 unsigned char *sense, int timeout, int retries, u64 flags,
236 int *resid)
237{
238 return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,
239 timeout, retries, flags, 0, resid);
240}
230EXPORT_SYMBOL(scsi_execute); 241EXPORT_SYMBOL(scsi_execute);
231 242
232int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 243int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
233 int data_direction, void *buffer, unsigned bufflen, 244 int data_direction, void *buffer, unsigned bufflen,
234 struct scsi_sense_hdr *sshdr, int timeout, int retries, 245 struct scsi_sense_hdr *sshdr, int timeout, int retries,
235 int *resid, u64 flags) 246 int *resid, u64 flags, req_flags_t rq_flags)
236{ 247{
237 char *sense = NULL; 248 char *sense = NULL;
238 int result; 249 int result;
@@ -242,8 +253,8 @@ int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
242 if (!sense) 253 if (!sense)
243 return DRIVER_ERROR << 24; 254 return DRIVER_ERROR << 24;
244 } 255 }
245 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 256 result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
246 sense, timeout, retries, flags, resid); 257 sense, timeout, retries, flags, rq_flags, resid);
247 if (sshdr) 258 if (sshdr)
248 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 259 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
249 260
@@ -813,7 +824,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
813 */ 824 */
814 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 825 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
815 ; 826 ;
816 else if (!(req->cmd_flags & REQ_QUIET)) 827 else if (!(req->rq_flags & RQF_QUIET))
817 scsi_print_sense(cmd); 828 scsi_print_sense(cmd);
818 result = 0; 829 result = 0;
819 /* BLOCK_PC may have set error */ 830 /* BLOCK_PC may have set error */
@@ -943,7 +954,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
943 switch (action) { 954 switch (action) {
944 case ACTION_FAIL: 955 case ACTION_FAIL:
945 /* Give up and fail the remainder of the request */ 956 /* Give up and fail the remainder of the request */
946 if (!(req->cmd_flags & REQ_QUIET)) { 957 if (!(req->rq_flags & RQF_QUIET)) {
947 static DEFINE_RATELIMIT_STATE(_rs, 958 static DEFINE_RATELIMIT_STATE(_rs,
948 DEFAULT_RATELIMIT_INTERVAL, 959 DEFAULT_RATELIMIT_INTERVAL,
949 DEFAULT_RATELIMIT_BURST); 960 DEFAULT_RATELIMIT_BURST);
@@ -972,7 +983,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
972 * A new command will be prepared and issued. 983 * A new command will be prepared and issued.
973 */ 984 */
974 if (q->mq_ops) { 985 if (q->mq_ops) {
975 cmd->request->cmd_flags &= ~REQ_DONTPREP; 986 cmd->request->rq_flags &= ~RQF_DONTPREP;
976 scsi_mq_uninit_cmd(cmd); 987 scsi_mq_uninit_cmd(cmd);
977 scsi_mq_requeue_cmd(cmd); 988 scsi_mq_requeue_cmd(cmd);
978 } else { 989 } else {
@@ -1234,7 +1245,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1234 /* 1245 /*
1235 * If the devices is blocked we defer normal commands. 1246 * If the devices is blocked we defer normal commands.
1236 */ 1247 */
1237 if (!(req->cmd_flags & REQ_PREEMPT)) 1248 if (!(req->rq_flags & RQF_PREEMPT))
1238 ret = BLKPREP_DEFER; 1249 ret = BLKPREP_DEFER;
1239 break; 1250 break;
1240 default: 1251 default:
@@ -1243,7 +1254,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1243 * special commands. In particular any user initiated 1254 * special commands. In particular any user initiated
1244 * command is not allowed. 1255 * command is not allowed.
1245 */ 1256 */
1246 if (!(req->cmd_flags & REQ_PREEMPT)) 1257 if (!(req->rq_flags & RQF_PREEMPT))
1247 ret = BLKPREP_KILL; 1258 ret = BLKPREP_KILL;
1248 break; 1259 break;
1249 } 1260 }
@@ -1279,7 +1290,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1279 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1290 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1280 break; 1291 break;
1281 default: 1292 default:
1282 req->cmd_flags |= REQ_DONTPREP; 1293 req->rq_flags |= RQF_DONTPREP;
1283 } 1294 }
1284 1295
1285 return ret; 1296 return ret;
@@ -1736,7 +1747,7 @@ static void scsi_request_fn(struct request_queue *q)
1736 * we add the dev to the starved list so it eventually gets 1747 * we add the dev to the starved list so it eventually gets
1737 * a run when a tag is freed. 1748 * a run when a tag is freed.
1738 */ 1749 */
1739 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { 1750 if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
1740 spin_lock_irq(shost->host_lock); 1751 spin_lock_irq(shost->host_lock);
1741 if (list_empty(&sdev->starved_entry)) 1752 if (list_empty(&sdev->starved_entry))
1742 list_add_tail(&sdev->starved_entry, 1753 list_add_tail(&sdev->starved_entry,
@@ -1903,11 +1914,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1903 goto out_dec_target_busy; 1914 goto out_dec_target_busy;
1904 1915
1905 1916
1906 if (!(req->cmd_flags & REQ_DONTPREP)) { 1917 if (!(req->rq_flags & RQF_DONTPREP)) {
1907 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1918 ret = prep_to_mq(scsi_mq_prep_fn(req));
1908 if (ret) 1919 if (ret)
1909 goto out_dec_host_busy; 1920 goto out_dec_host_busy;
1910 req->cmd_flags |= REQ_DONTPREP; 1921 req->rq_flags |= RQF_DONTPREP;
1911 } else { 1922 } else {
1912 blk_mq_start_request(req); 1923 blk_mq_start_request(req);
1913 } 1924 }
@@ -1952,7 +1963,7 @@ out:
1952 * we hit an error, as we will never see this command 1963 * we hit an error, as we will never see this command
1953 * again. 1964 * again.
1954 */ 1965 */
1955 if (req->cmd_flags & REQ_DONTPREP) 1966 if (req->rq_flags & RQF_DONTPREP)
1956 scsi_mq_uninit_cmd(cmd); 1967 scsi_mq_uninit_cmd(cmd);
1957 break; 1968 break;
1958 default: 1969 default:
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b9618ffca829..cef1f78031d4 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1520,7 +1520,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
1520 */ 1520 */
1521 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, 1521 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
1522 &sshdr, timeout, SD_MAX_RETRIES, 1522 &sshdr, timeout, SD_MAX_RETRIES,
1523 NULL, REQ_PM); 1523 NULL, 0, RQF_PM);
1524 if (res == 0) 1524 if (res == 0)
1525 break; 1525 break;
1526 } 1526 }
@@ -1879,7 +1879,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1879 1879
1880 good_bytes = 0; 1880 good_bytes = 0;
1881 req->__data_len = blk_rq_bytes(req); 1881 req->__data_len = blk_rq_bytes(req);
1882 req->cmd_flags |= REQ_QUIET; 1882 req->rq_flags |= RQF_QUIET;
1883 } 1883 }
1884 } 1884 }
1885 } 1885 }
@@ -3278,7 +3278,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3278 return -ENODEV; 3278 return -ENODEV;
3279 3279
3280 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 3280 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
3281 SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); 3281 SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM);
3282 if (res) { 3282 if (res) {
3283 sd_print_result(sdkp, "Start/Stop Unit failed", res); 3283 sd_print_result(sdkp, "Start/Stop Unit failed", res);
3284 if (driver_byte(res) & DRIVER_SENSE) 3284 if (driver_byte(res) & DRIVER_SENSE)
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index d5b3bd915d9e..394ab490919c 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -348,7 +348,7 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
348 * this case, so be quiet about the error. 348 * this case, so be quiet about the error.
349 */ 349 */
350 if (req_op(rq) == REQ_OP_ZONE_RESET) 350 if (req_op(rq) == REQ_OP_ZONE_RESET)
351 rq->cmd_flags |= REQ_QUIET; 351 rq->rq_flags |= RQF_QUIET;
352 break; 352 break;
353 case 0x21: 353 case 0x21:
354 /* 354 /*
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7af5226aa55b..3bc46a4abd43 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -546,7 +546,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
546 return DRIVER_ERROR << 24; 546 return DRIVER_ERROR << 24;
547 547
548 blk_rq_set_block_pc(req); 548 blk_rq_set_block_pc(req);
549 req->cmd_flags |= REQ_QUIET; 549 req->rq_flags |= RQF_QUIET;
550 550
551 mdata->null_mapped = 1; 551 mdata->null_mapped = 1;
552 552
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 05c745663c10..cf549871c1ee 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -5590,7 +5590,7 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
5590 5590
5591 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, 5591 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
5592 SCSI_SENSE_BUFFERSIZE, NULL, 5592 SCSI_SENSE_BUFFERSIZE, NULL,
5593 msecs_to_jiffies(1000), 3, NULL, REQ_PM); 5593 msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
5594 if (ret) 5594 if (ret)
5595 pr_err("%s: failed with err %d\n", __func__, ret); 5595 pr_err("%s: failed with err %d\n", __func__, ret);
5596 5596
@@ -5652,11 +5652,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
5652 5652
5653 /* 5653 /*
5654 * Current function would be generally called from the power management 5654 * Current function would be generally called from the power management
5655 * callbacks hence set the REQ_PM flag so that it doesn't resume the 5655 * callbacks hence set the RQF_PM flag so that it doesn't resume the
5656 * already suspended childs. 5656 * already suspended childs.
5657 */ 5657 */
5658 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 5658 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
5659 START_STOP_TIMEOUT, 0, NULL, REQ_PM); 5659 START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
5660 if (ret) { 5660 if (ret) {
5661 sdev_printk(KERN_WARNING, sdp, 5661 sdev_printk(KERN_WARNING, sdp,
5662 "START_STOP failed for power mode: %d, result %x\n", 5662 "START_STOP failed for power mode: %d, result %x\n",
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 6df722de2e22..ec69a8fe3b29 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -167,26 +167,6 @@ enum rq_flag_bits {
167 __REQ_PREFLUSH, /* request for cache flush */ 167 __REQ_PREFLUSH, /* request for cache flush */
168 __REQ_RAHEAD, /* read ahead, can fail anytime */ 168 __REQ_RAHEAD, /* read ahead, can fail anytime */
169 169
170 /* request only flags */
171 __REQ_SORTED, /* elevator knows about this request */
172 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
173 __REQ_STARTED, /* drive already may have started this one */
174 __REQ_DONTPREP, /* don't call prep for this one */
175 __REQ_QUEUED, /* uses queueing */
176 __REQ_ELVPRIV, /* elevator private data attached */
177 __REQ_FAILED, /* set if the request failed */
178 __REQ_QUIET, /* don't worry about errors */
179 __REQ_PREEMPT, /* set for "ide_preempt" requests and also
180 for requests for which the SCSI "quiesce"
181 state must be ignored. */
182 __REQ_ALLOCED, /* request came from our alloc pool */
183 __REQ_COPY_USER, /* contains copies of user pages */
184 __REQ_FLUSH_SEQ, /* request for flush sequence */
185 __REQ_IO_STAT, /* account I/O stat */
186 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
187 __REQ_PM, /* runtime pm request */
188 __REQ_HASHED, /* on IO scheduler merge hash */
189 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
190 __REQ_NR_BITS, /* stops here */ 170 __REQ_NR_BITS, /* stops here */
191}; 171};
192 172
@@ -208,29 +188,12 @@ enum rq_flag_bits {
208 188
209/* This mask is used for both bio and request merge checking */ 189/* This mask is used for both bio and request merge checking */
210#define REQ_NOMERGE_FLAGS \ 190#define REQ_NOMERGE_FLAGS \
211 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) 191 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
212 192
213#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 193#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
214#define REQ_SORTED (1ULL << __REQ_SORTED)
215#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
216#define REQ_FUA (1ULL << __REQ_FUA) 194#define REQ_FUA (1ULL << __REQ_FUA)
217#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 195#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
218#define REQ_STARTED (1ULL << __REQ_STARTED)
219#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
220#define REQ_QUEUED (1ULL << __REQ_QUEUED)
221#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
222#define REQ_FAILED (1ULL << __REQ_FAILED)
223#define REQ_QUIET (1ULL << __REQ_QUIET)
224#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
225#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
226#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
227#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 196#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
228#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
229#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
230#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
231#define REQ_PM (1ULL << __REQ_PM)
232#define REQ_HASHED (1ULL << __REQ_HASHED)
233#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
234 197
235enum req_op { 198enum req_op {
236 REQ_OP_READ, 199 REQ_OP_READ,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 90097dd8b8ed..b4415feac679 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -78,6 +78,50 @@ enum rq_cmd_type_bits {
78 REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 78 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
79}; 79};
80 80
81/*
82 * request flags */
83typedef __u32 __bitwise req_flags_t;
84
85/* elevator knows about this request */
86#define RQF_SORTED ((__force req_flags_t)(1 << 0))
87/* drive already may have started this one */
88#define RQF_STARTED ((__force req_flags_t)(1 << 1))
89/* uses tagged queueing */
90#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
91/* may not be passed by ioscheduler */
92#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
93/* request for flush sequence */
94#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
95/* merge of different types, fail separately */
96#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
97/* track inflight for MQ */
98#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
99/* don't call prep for this one */
100#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
101/* set for "ide_preempt" requests and also for requests for which the SCSI
102 "quiesce" state must be ignored. */
103#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
104/* contains copies of user pages */
105#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
106/* vaguely specified driver internal error. Ignored by the block layer */
107#define RQF_FAILED ((__force req_flags_t)(1 << 10))
108/* don't warn about errors */
109#define RQF_QUIET ((__force req_flags_t)(1 << 11))
110/* elevator private data attached */
111#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
112/* account I/O stat */
113#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
114/* request came from our alloc pool */
115#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
116/* runtime pm request */
117#define RQF_PM ((__force req_flags_t)(1 << 15))
118/* on IO scheduler merge hash */
119#define RQF_HASHED ((__force req_flags_t)(1 << 16))
120
121/* flags that prevent us from merging requests: */
122#define RQF_NOMERGE_FLAGS \
123 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ)
124
81#define BLK_MAX_CDB 16 125#define BLK_MAX_CDB 16
82 126
83/* 127/*
@@ -99,6 +143,7 @@ struct request {
99 int cpu; 143 int cpu;
100 unsigned cmd_type; 144 unsigned cmd_type;
101 u64 cmd_flags; 145 u64 cmd_flags;
146 req_flags_t rq_flags;
102 unsigned long atomic_flags; 147 unsigned long atomic_flags;
103 148
104 /* the following two fields are internal, NEVER access directly */ 149 /* the following two fields are internal, NEVER access directly */
@@ -648,7 +693,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
648 REQ_FAILFAST_DRIVER)) 693 REQ_FAILFAST_DRIVER))
649 694
650#define blk_account_rq(rq) \ 695#define blk_account_rq(rq) \
651 (((rq)->cmd_flags & REQ_STARTED) && \ 696 (((rq)->rq_flags & RQF_STARTED) && \
652 ((rq)->cmd_type == REQ_TYPE_FS)) 697 ((rq)->cmd_type == REQ_TYPE_FS))
653 698
654#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 699#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
@@ -740,6 +785,8 @@ static inline bool rq_mergeable(struct request *rq)
740 785
741 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 786 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
742 return false; 787 return false;
788 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
789 return false;
743 790
744 return true; 791 return true;
745} 792}
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 8a9563144890..8990e580b278 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -414,14 +414,14 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
414extern int scsi_execute_req_flags(struct scsi_device *sdev, 414extern int scsi_execute_req_flags(struct scsi_device *sdev,
415 const unsigned char *cmd, int data_direction, void *buffer, 415 const unsigned char *cmd, int data_direction, void *buffer,
416 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 416 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
417 int retries, int *resid, u64 flags); 417 int retries, int *resid, u64 flags, req_flags_t rq_flags);
418static inline int scsi_execute_req(struct scsi_device *sdev, 418static inline int scsi_execute_req(struct scsi_device *sdev,
419 const unsigned char *cmd, int data_direction, void *buffer, 419 const unsigned char *cmd, int data_direction, void *buffer,
420 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 420 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
421 int retries, int *resid) 421 int retries, int *resid)
422{ 422{
423 return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, 423 return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
424 bufflen, sshdr, timeout, retries, resid, 0); 424 bufflen, sshdr, timeout, retries, resid, 0, 0);
425} 425}
426extern void sdev_disable_disk_events(struct scsi_device *sdev); 426extern void sdev_disable_disk_events(struct scsi_device *sdev);
427extern void sdev_enable_disk_events(struct scsi_device *sdev); 427extern void sdev_enable_disk_events(struct scsi_device *sdev);