aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 20:23:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 20:23:49 -0500
commit3419b45039c6b799c974a8019361c045e7ca232c (patch)
tree36a63602036cc50f34fadcbd5d5d8fca94e44297 /block/blk-mq.c
parent01504f5e9e071f1dde1062e3be15f54d4555308f (diff)
parentc1c534609fe8a859f9c8108a5591e6e8a97e34d1 (diff)
Merge branch 'for-4.4/io-poll' of git://git.kernel.dk/linux-block
Pull block IO poll support from Jens Axboe: "Various groups have been doing experimentation around IO polling for (really) fast devices. The code has been reviewed and has been sitting on the side for a few releases, but this is now good enough for coordinated benchmarking and further experimentation. Currently O_DIRECT sync read/write are supported. A framework is in the works that allows scalable stats tracking so we can auto-tune this. And we'll add libaio support as well soon. Fow now, it's an opt-in feature for test purposes" * 'for-4.4/io-poll' of git://git.kernel.dk/linux-block: direct-io: be sure to assign dio->bio_bdev for both paths directio: add block polling support NVMe: add blk polling support block: add block polling support blk-mq: return tag/queue combo in the make_request_fn handlers block: change ->make_request_fn() and users to return a queue cookie
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c59
1 files changed, 36 insertions, 23 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 694f8703f83c..86bd5b25288e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1198,7 +1198,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1198 return rq; 1198 return rq;
1199} 1199}
1200 1200
1201static int blk_mq_direct_issue_request(struct request *rq) 1201static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1202{ 1202{
1203 int ret; 1203 int ret;
1204 struct request_queue *q = rq->q; 1204 struct request_queue *q = rq->q;
@@ -1209,6 +1209,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
1209 .list = NULL, 1209 .list = NULL,
1210 .last = 1 1210 .last = 1
1211 }; 1211 };
1212 blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
1212 1213
1213 /* 1214 /*
1214 * For OK queue, we are done. For error, kill it. Any other 1215 * For OK queue, we are done. For error, kill it. Any other
@@ -1216,18 +1217,21 @@ static int blk_mq_direct_issue_request(struct request *rq)
1216 * would have done 1217 * would have done
1217 */ 1218 */
1218 ret = q->mq_ops->queue_rq(hctx, &bd); 1219 ret = q->mq_ops->queue_rq(hctx, &bd);
1219 if (ret == BLK_MQ_RQ_QUEUE_OK) 1220 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1221 *cookie = new_cookie;
1220 return 0; 1222 return 0;
1221 else { 1223 }
1222 __blk_mq_requeue_request(rq);
1223 1224
1224 if (ret == BLK_MQ_RQ_QUEUE_ERROR) { 1225 __blk_mq_requeue_request(rq);
1225 rq->errors = -EIO; 1226
1226 blk_mq_end_request(rq, rq->errors); 1227 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1227 return 0; 1228 *cookie = BLK_QC_T_NONE;
1228 } 1229 rq->errors = -EIO;
1229 return -1; 1230 blk_mq_end_request(rq, rq->errors);
1231 return 0;
1230 } 1232 }
1233
1234 return -1;
1231} 1235}
1232 1236
1233/* 1237/*
@@ -1235,7 +1239,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
1235 * but will attempt to bypass the hctx queueing if we can go straight to 1239 * but will attempt to bypass the hctx queueing if we can go straight to
1236 * hardware for SYNC IO. 1240 * hardware for SYNC IO.
1237 */ 1241 */
1238static void blk_mq_make_request(struct request_queue *q, struct bio *bio) 1242static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1239{ 1243{
1240 const int is_sync = rw_is_sync(bio->bi_rw); 1244 const int is_sync = rw_is_sync(bio->bi_rw);
1241 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1245 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
@@ -1244,12 +1248,13 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1244 unsigned int request_count = 0; 1248 unsigned int request_count = 0;
1245 struct blk_plug *plug; 1249 struct blk_plug *plug;
1246 struct request *same_queue_rq = NULL; 1250 struct request *same_queue_rq = NULL;
1251 blk_qc_t cookie;
1247 1252
1248 blk_queue_bounce(q, &bio); 1253 blk_queue_bounce(q, &bio);
1249 1254
1250 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1255 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1251 bio_io_error(bio); 1256 bio_io_error(bio);
1252 return; 1257 return BLK_QC_T_NONE;
1253 } 1258 }
1254 1259
1255 blk_queue_split(q, &bio, q->bio_split); 1260 blk_queue_split(q, &bio, q->bio_split);
@@ -1257,13 +1262,15 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1257 if (!is_flush_fua && !blk_queue_nomerges(q)) { 1262 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1258 if (blk_attempt_plug_merge(q, bio, &request_count, 1263 if (blk_attempt_plug_merge(q, bio, &request_count,
1259 &same_queue_rq)) 1264 &same_queue_rq))
1260 return; 1265 return BLK_QC_T_NONE;
1261 } else 1266 } else
1262 request_count = blk_plug_queued_count(q); 1267 request_count = blk_plug_queued_count(q);
1263 1268
1264 rq = blk_mq_map_request(q, bio, &data); 1269 rq = blk_mq_map_request(q, bio, &data);
1265 if (unlikely(!rq)) 1270 if (unlikely(!rq))
1266 return; 1271 return BLK_QC_T_NONE;
1272
1273 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1267 1274
1268 if (unlikely(is_flush_fua)) { 1275 if (unlikely(is_flush_fua)) {
1269 blk_mq_bio_to_request(rq, bio); 1276 blk_mq_bio_to_request(rq, bio);
@@ -1302,11 +1309,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1302 old_rq = rq; 1309 old_rq = rq;
1303 blk_mq_put_ctx(data.ctx); 1310 blk_mq_put_ctx(data.ctx);
1304 if (!old_rq) 1311 if (!old_rq)
1305 return; 1312 goto done;
1306 if (!blk_mq_direct_issue_request(old_rq)) 1313 if (!blk_mq_direct_issue_request(old_rq, &cookie))
1307 return; 1314 goto done;
1308 blk_mq_insert_request(old_rq, false, true, true); 1315 blk_mq_insert_request(old_rq, false, true, true);
1309 return; 1316 goto done;
1310 } 1317 }
1311 1318
1312 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1319 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1320,13 +1327,15 @@ run_queue:
1320 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); 1327 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1321 } 1328 }
1322 blk_mq_put_ctx(data.ctx); 1329 blk_mq_put_ctx(data.ctx);
1330done:
1331 return cookie;
1323} 1332}
1324 1333
1325/* 1334/*
1326 * Single hardware queue variant. This will attempt to use any per-process 1335 * Single hardware queue variant. This will attempt to use any per-process
1327 * plug for merging and IO deferral. 1336 * plug for merging and IO deferral.
1328 */ 1337 */
1329static void blk_sq_make_request(struct request_queue *q, struct bio *bio) 1338static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1330{ 1339{
1331 const int is_sync = rw_is_sync(bio->bi_rw); 1340 const int is_sync = rw_is_sync(bio->bi_rw);
1332 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1341 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
@@ -1334,23 +1343,26 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1334 unsigned int request_count = 0; 1343 unsigned int request_count = 0;
1335 struct blk_map_ctx data; 1344 struct blk_map_ctx data;
1336 struct request *rq; 1345 struct request *rq;
1346 blk_qc_t cookie;
1337 1347
1338 blk_queue_bounce(q, &bio); 1348 blk_queue_bounce(q, &bio);
1339 1349
1340 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1350 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1341 bio_io_error(bio); 1351 bio_io_error(bio);
1342 return; 1352 return BLK_QC_T_NONE;
1343 } 1353 }
1344 1354
1345 blk_queue_split(q, &bio, q->bio_split); 1355 blk_queue_split(q, &bio, q->bio_split);
1346 1356
1347 if (!is_flush_fua && !blk_queue_nomerges(q) && 1357 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1348 blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1358 blk_attempt_plug_merge(q, bio, &request_count, NULL))
1349 return; 1359 return BLK_QC_T_NONE;
1350 1360
1351 rq = blk_mq_map_request(q, bio, &data); 1361 rq = blk_mq_map_request(q, bio, &data);
1352 if (unlikely(!rq)) 1362 if (unlikely(!rq))
1353 return; 1363 return BLK_QC_T_NONE;
1364
1365 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1354 1366
1355 if (unlikely(is_flush_fua)) { 1367 if (unlikely(is_flush_fua)) {
1356 blk_mq_bio_to_request(rq, bio); 1368 blk_mq_bio_to_request(rq, bio);
@@ -1374,7 +1386,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1374 } 1386 }
1375 list_add_tail(&rq->queuelist, &plug->mq_list); 1387 list_add_tail(&rq->queuelist, &plug->mq_list);
1376 blk_mq_put_ctx(data.ctx); 1388 blk_mq_put_ctx(data.ctx);
1377 return; 1389 return cookie;
1378 } 1390 }
1379 1391
1380 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1392 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1389,6 +1401,7 @@ run_queue:
1389 } 1401 }
1390 1402
1391 blk_mq_put_ctx(data.ctx); 1403 blk_mq_put_ctx(data.ctx);
1404 return cookie;
1392} 1405}
1393 1406
1394/* 1407/*