aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2016-06-05 15:32:11 -0400
committerJens Axboe <axboe@fb.com>2016-06-07 15:41:38 -0400
commite6a40b096e284ee11374807eaaab6fd21a3fbabb (patch)
tree41c8d380184824bdc952133339c6ead998ca2752 /block
parent4993b77d3ff1d3a887804af2434b7b3dfac4c210 (diff)
block: prepare request creation/destruction code to use REQ_OPs
This patch prepares *_get_request/*_put_request and freed_request, to use separate variables for the operation and flags. In the next patches the struct request users will be converted like was done for bios where the op and flags are set separately. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c54
1 files changed, 29 insertions, 25 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c45254ccb01..a68dc0709299 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync)
959 * A request has just been released. Account for it, update the full and 959 * A request has just been released. Account for it, update the full and
960 * congestion status, wake up any waiters. Called under q->queue_lock. 960 * congestion status, wake up any waiters. Called under q->queue_lock.
961 */ 961 */
962static void freed_request(struct request_list *rl, unsigned int flags) 962static void freed_request(struct request_list *rl, int op, unsigned int flags)
963{ 963{
964 struct request_queue *q = rl->q; 964 struct request_queue *q = rl->q;
965 int sync = rw_is_sync(flags); 965 int sync = rw_is_sync(op | flags);
966 966
967 q->nr_rqs[sync]--; 967 q->nr_rqs[sync]--;
968 rl->count[sync]--; 968 rl->count[sync]--;
@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio)
1054/** 1054/**
1055 * __get_request - get a free request 1055 * __get_request - get a free request
1056 * @rl: request list to allocate from 1056 * @rl: request list to allocate from
1057 * @rw_flags: RW and SYNC flags 1057 * @op: REQ_OP_READ/REQ_OP_WRITE
1058 * @op_flags: rq_flag_bits
1058 * @bio: bio to allocate request for (can be %NULL) 1059 * @bio: bio to allocate request for (can be %NULL)
1059 * @gfp_mask: allocation mask 1060 * @gfp_mask: allocation mask
1060 * 1061 *
@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio)
1065 * Returns ERR_PTR on failure, with @q->queue_lock held. 1066 * Returns ERR_PTR on failure, with @q->queue_lock held.
1066 * Returns request pointer on success, with @q->queue_lock *not held*. 1067 * Returns request pointer on success, with @q->queue_lock *not held*.
1067 */ 1068 */
1068static struct request *__get_request(struct request_list *rl, int rw_flags, 1069static struct request *__get_request(struct request_list *rl, int op,
1069 struct bio *bio, gfp_t gfp_mask) 1070 int op_flags, struct bio *bio,
1071 gfp_t gfp_mask)
1070{ 1072{
1071 struct request_queue *q = rl->q; 1073 struct request_queue *q = rl->q;
1072 struct request *rq; 1074 struct request *rq;
1073 struct elevator_type *et = q->elevator->type; 1075 struct elevator_type *et = q->elevator->type;
1074 struct io_context *ioc = rq_ioc(bio); 1076 struct io_context *ioc = rq_ioc(bio);
1075 struct io_cq *icq = NULL; 1077 struct io_cq *icq = NULL;
1076 const bool is_sync = rw_is_sync(rw_flags) != 0; 1078 const bool is_sync = rw_is_sync(op | op_flags) != 0;
1077 int may_queue; 1079 int may_queue;
1078 1080
1079 if (unlikely(blk_queue_dying(q))) 1081 if (unlikely(blk_queue_dying(q)))
1080 return ERR_PTR(-ENODEV); 1082 return ERR_PTR(-ENODEV);
1081 1083
1082 may_queue = elv_may_queue(q, rw_flags); 1084 may_queue = elv_may_queue(q, op | op_flags);
1083 if (may_queue == ELV_MQUEUE_NO) 1085 if (may_queue == ELV_MQUEUE_NO)
1084 goto rq_starved; 1086 goto rq_starved;
1085 1087
@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1123 1125
1124 /* 1126 /*
1125 * Decide whether the new request will be managed by elevator. If 1127 * Decide whether the new request will be managed by elevator. If
1126 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will 1128 * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will
1127 * prevent the current elevator from being destroyed until the new 1129 * prevent the current elevator from being destroyed until the new
1128 * request is freed. This guarantees icq's won't be destroyed and 1130 * request is freed. This guarantees icq's won't be destroyed and
1129 * makes creating new ones safe. 1131 * makes creating new ones safe.
@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1132 * it will be created after releasing queue_lock. 1134 * it will be created after releasing queue_lock.
1133 */ 1135 */
1134 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 1136 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
1135 rw_flags |= REQ_ELVPRIV; 1137 op_flags |= REQ_ELVPRIV;
1136 q->nr_rqs_elvpriv++; 1138 q->nr_rqs_elvpriv++;
1137 if (et->icq_cache && ioc) 1139 if (et->icq_cache && ioc)
1138 icq = ioc_lookup_icq(ioc, q); 1140 icq = ioc_lookup_icq(ioc, q);
1139 } 1141 }
1140 1142
1141 if (blk_queue_io_stat(q)) 1143 if (blk_queue_io_stat(q))
1142 rw_flags |= REQ_IO_STAT; 1144 op_flags |= REQ_IO_STAT;
1143 spin_unlock_irq(q->queue_lock); 1145 spin_unlock_irq(q->queue_lock);
1144 1146
1145 /* allocate and init request */ 1147 /* allocate and init request */
@@ -1149,10 +1151,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1149 1151
1150 blk_rq_init(q, rq); 1152 blk_rq_init(q, rq);
1151 blk_rq_set_rl(rq, rl); 1153 blk_rq_set_rl(rq, rl);
1152 rq->cmd_flags = rw_flags | REQ_ALLOCED; 1154 req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED);
1153 1155
1154 /* init elvpriv */ 1156 /* init elvpriv */
1155 if (rw_flags & REQ_ELVPRIV) { 1157 if (op_flags & REQ_ELVPRIV) {
1156 if (unlikely(et->icq_cache && !icq)) { 1158 if (unlikely(et->icq_cache && !icq)) {
1157 if (ioc) 1159 if (ioc)
1158 icq = ioc_create_icq(ioc, q, gfp_mask); 1160 icq = ioc_create_icq(ioc, q, gfp_mask);
@@ -1178,7 +1180,7 @@ out:
1178 if (ioc_batching(q, ioc)) 1180 if (ioc_batching(q, ioc))
1179 ioc->nr_batch_requests--; 1181 ioc->nr_batch_requests--;
1180 1182
1181 trace_block_getrq(q, bio, rw_flags & 1); 1183 trace_block_getrq(q, bio, op);
1182 return rq; 1184 return rq;
1183 1185
1184fail_elvpriv: 1186fail_elvpriv:
@@ -1208,7 +1210,7 @@ fail_alloc:
1208 * queue, but this is pretty rare. 1210 * queue, but this is pretty rare.
1209 */ 1211 */
1210 spin_lock_irq(q->queue_lock); 1212 spin_lock_irq(q->queue_lock);
1211 freed_request(rl, rw_flags); 1213 freed_request(rl, op, op_flags);
1212 1214
1213 /* 1215 /*
1214 * in the very unlikely event that allocation failed and no 1216 * in the very unlikely event that allocation failed and no
@@ -1226,7 +1228,8 @@ rq_starved:
1226/** 1228/**
1227 * get_request - get a free request 1229 * get_request - get a free request
1228 * @q: request_queue to allocate request from 1230 * @q: request_queue to allocate request from
1229 * @rw_flags: RW and SYNC flags 1231 * @op: REQ_OP_READ/REQ_OP_WRITE
1232 * @op_flags: rq_flag_bits
1230 * @bio: bio to allocate request for (can be %NULL) 1233 * @bio: bio to allocate request for (can be %NULL)
1231 * @gfp_mask: allocation mask 1234 * @gfp_mask: allocation mask
1232 * 1235 *
@@ -1237,17 +1240,18 @@ rq_starved:
1237 * Returns ERR_PTR on failure, with @q->queue_lock held. 1240 * Returns ERR_PTR on failure, with @q->queue_lock held.
1238 * Returns request pointer on success, with @q->queue_lock *not held*. 1241 * Returns request pointer on success, with @q->queue_lock *not held*.
1239 */ 1242 */
1240static struct request *get_request(struct request_queue *q, int rw_flags, 1243static struct request *get_request(struct request_queue *q, int op,
1241 struct bio *bio, gfp_t gfp_mask) 1244 int op_flags, struct bio *bio,
1245 gfp_t gfp_mask)
1242{ 1246{
1243 const bool is_sync = rw_is_sync(rw_flags) != 0; 1247 const bool is_sync = rw_is_sync(op | op_flags) != 0;
1244 DEFINE_WAIT(wait); 1248 DEFINE_WAIT(wait);
1245 struct request_list *rl; 1249 struct request_list *rl;
1246 struct request *rq; 1250 struct request *rq;
1247 1251
1248 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1252 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
1249retry: 1253retry:
1250 rq = __get_request(rl, rw_flags, bio, gfp_mask); 1254 rq = __get_request(rl, op, op_flags, bio, gfp_mask);
1251 if (!IS_ERR(rq)) 1255 if (!IS_ERR(rq))
1252 return rq; 1256 return rq;
1253 1257
@@ -1260,7 +1264,7 @@ retry:
1260 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 1264 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1261 TASK_UNINTERRUPTIBLE); 1265 TASK_UNINTERRUPTIBLE);
1262 1266
1263 trace_block_sleeprq(q, bio, rw_flags & 1); 1267 trace_block_sleeprq(q, bio, op);
1264 1268
1265 spin_unlock_irq(q->queue_lock); 1269 spin_unlock_irq(q->queue_lock);
1266 io_schedule(); 1270 io_schedule();
@@ -1289,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
1289 create_io_context(gfp_mask, q->node); 1293 create_io_context(gfp_mask, q->node);
1290 1294
1291 spin_lock_irq(q->queue_lock); 1295 spin_lock_irq(q->queue_lock);
1292 rq = get_request(q, rw, NULL, gfp_mask); 1296 rq = get_request(q, rw, 0, NULL, gfp_mask);
1293 if (IS_ERR(rq)) 1297 if (IS_ERR(rq))
1294 spin_unlock_irq(q->queue_lock); 1298 spin_unlock_irq(q->queue_lock);
1295 /* q->queue_lock is unlocked at this point */ 1299 /* q->queue_lock is unlocked at this point */
@@ -1491,13 +1495,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1491 */ 1495 */
1492 if (req->cmd_flags & REQ_ALLOCED) { 1496 if (req->cmd_flags & REQ_ALLOCED) {
1493 unsigned int flags = req->cmd_flags; 1497 unsigned int flags = req->cmd_flags;
1498 int op = req_op(req);
1494 struct request_list *rl = blk_rq_rl(req); 1499 struct request_list *rl = blk_rq_rl(req);
1495 1500
1496 BUG_ON(!list_empty(&req->queuelist)); 1501 BUG_ON(!list_empty(&req->queuelist));
1497 BUG_ON(ELV_ON_HASH(req)); 1502 BUG_ON(ELV_ON_HASH(req));
1498 1503
1499 blk_free_request(rl, req); 1504 blk_free_request(rl, req);
1500 freed_request(rl, flags); 1505 freed_request(rl, op, flags);
1501 blk_put_rl(rl); 1506 blk_put_rl(rl);
1502 } 1507 }
1503} 1508}
@@ -1712,7 +1717,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1712{ 1717{
1713 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1718 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1714 struct blk_plug *plug; 1719 struct blk_plug *plug;
1715 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; 1720 int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
1716 struct request *req; 1721 struct request *req;
1717 unsigned int request_count = 0; 1722 unsigned int request_count = 0;
1718 1723
@@ -1772,7 +1777,6 @@ get_rq:
1772 * but we need to set it earlier to expose the sync flag to the 1777 * but we need to set it earlier to expose the sync flag to the
1773 * rq allocator and io schedulers. 1778 * rq allocator and io schedulers.
1774 */ 1779 */
1775 rw_flags = bio_data_dir(bio);
1776 if (sync) 1780 if (sync)
1777 rw_flags |= REQ_SYNC; 1781 rw_flags |= REQ_SYNC;
1778 1782
@@ -1780,7 +1784,7 @@ get_rq:
1780 * Grab a free request. This is might sleep but can not fail. 1784 * Grab a free request. This is might sleep but can not fail.
1781 * Returns with the queue unlocked. 1785 * Returns with the queue unlocked.
1782 */ 1786 */
1783 req = get_request(q, rw_flags, bio, GFP_NOIO); 1787 req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
1784 if (IS_ERR(req)) { 1788 if (IS_ERR(req)) {
1785 bio->bi_error = PTR_ERR(req); 1789 bio->bi_error = PTR_ERR(req);
1786 bio_endio(bio); 1790 bio_endio(bio);