aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e6c05a97ee2b..3a78b00edd71 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -642,7 +642,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
642 if (rq->cmd_flags & REQ_ELVPRIV) { 642 if (rq->cmd_flags & REQ_ELVPRIV) {
643 elv_put_request(q, rq); 643 elv_put_request(q, rq);
644 if (rq->elv.icq) 644 if (rq->elv.icq)
645 put_io_context(rq->elv.icq->ioc, q); 645 put_io_context(rq->elv.icq->ioc);
646 } 646 }
647 647
648 mempool_free(rq, q->rq.rq_pool); 648 mempool_free(rq, q->rq.rq_pool);
@@ -872,13 +872,15 @@ retry:
872 spin_unlock_irq(q->queue_lock); 872 spin_unlock_irq(q->queue_lock);
873 873
874 /* create icq if missing */ 874 /* create icq if missing */
875 if (unlikely(et->icq_cache && !icq)) 875 if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
876 icq = ioc_create_icq(q, gfp_mask); 876 icq = ioc_create_icq(q, gfp_mask);
877 if (!icq)
878 goto fail_icq;
879 }
877 880
878 /* rqs are guaranteed to have icq on elv_set_request() if requested */ 881 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
879 if (likely(!et->icq_cache || icq))
880 rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
881 882
883fail_icq:
882 if (unlikely(!rq)) { 884 if (unlikely(!rq)) {
883 /* 885 /*
884 * Allocation failed presumably due to memory. Undo anything 886 * Allocation failed presumably due to memory. Undo anything
@@ -1210,7 +1212,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1210 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1212 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1211 1213
1212 drive_stat_acct(req, 0); 1214 drive_stat_acct(req, 0);
1213 elv_bio_merged(q, req, bio);
1214 return true; 1215 return true;
1215} 1216}
1216 1217
@@ -1241,7 +1242,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1241 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1242 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1242 1243
1243 drive_stat_acct(req, 0); 1244 drive_stat_acct(req, 0);
1244 elv_bio_merged(q, req, bio);
1245 return true; 1245 return true;
1246} 1246}
1247 1247
@@ -1255,13 +1255,12 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1255 * on %current's plugged list. Returns %true if merge was successful, 1255 * on %current's plugged list. Returns %true if merge was successful,
1256 * otherwise %false. 1256 * otherwise %false.
1257 * 1257 *
1258 * This function is called without @q->queue_lock; however, elevator is 1258 * Plugging coalesces IOs from the same issuer for the same purpose without
1259 * accessed iff there already are requests on the plugged list which in 1259 * going through @q->queue_lock. As such it's more of an issuing mechanism
1260 * turn guarantees validity of the elevator. 1260 * than scheduling, and the request, while may have elvpriv data, is not
1261 * 1261 * added on the elevator at this point. In addition, we don't have
1262 * Note that, on successful merge, elevator operation 1262 * reliable access to the elevator outside queue lock. Only check basic
1263 * elevator_bio_merged_fn() will be called without queue lock. Elevator 1263 * merging parameters without querying the elevator.
1264 * must be ready for this.
1265 */ 1264 */
1266static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, 1265static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1267 unsigned int *request_count) 1266 unsigned int *request_count)
@@ -1280,10 +1279,10 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1280 1279
1281 (*request_count)++; 1280 (*request_count)++;
1282 1281
1283 if (rq->q != q) 1282 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1284 continue; 1283 continue;
1285 1284
1286 el_ret = elv_try_merge(rq, bio); 1285 el_ret = blk_try_merge(rq, bio);
1287 if (el_ret == ELEVATOR_BACK_MERGE) { 1286 if (el_ret == ELEVATOR_BACK_MERGE) {
1288 ret = bio_attempt_back_merge(q, rq, bio); 1287 ret = bio_attempt_back_merge(q, rq, bio);
1289 if (ret) 1288 if (ret)
@@ -1345,12 +1344,14 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1345 el_ret = elv_merge(q, &req, bio); 1344 el_ret = elv_merge(q, &req, bio);
1346 if (el_ret == ELEVATOR_BACK_MERGE) { 1345 if (el_ret == ELEVATOR_BACK_MERGE) {
1347 if (bio_attempt_back_merge(q, req, bio)) { 1346 if (bio_attempt_back_merge(q, req, bio)) {
1347 elv_bio_merged(q, req, bio);
1348 if (!attempt_back_merge(q, req)) 1348 if (!attempt_back_merge(q, req))
1349 elv_merged_request(q, req, el_ret); 1349 elv_merged_request(q, req, el_ret);
1350 goto out_unlock; 1350 goto out_unlock;
1351 } 1351 }
1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1352 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1353 if (bio_attempt_front_merge(q, req, bio)) { 1353 if (bio_attempt_front_merge(q, req, bio)) {
1354 elv_bio_merged(q, req, bio);
1354 if (!attempt_front_merge(q, req)) 1355 if (!attempt_front_merge(q, req))
1355 elv_merged_request(q, req, el_ret); 1356 elv_merged_request(q, req, el_ret);
1356 goto out_unlock; 1357 goto out_unlock;