aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/m68k/emu/nfblock.c3
-rw-r--r--arch/powerpc/sysdev/axonram.c5
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c3
-rw-r--r--block/blk-core.c67
-rw-r--r--block/blk-mq-sysfs.c10
-rw-r--r--block/blk-mq.c59
-rw-r--r--block/blk-sysfs.c35
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/null_blk.c3
-rw-r--r--drivers/block/pktcdvd.c9
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/rsxx/dev.c5
-rw-r--r--drivers/block/umem.c4
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/lightnvm/rrpc.c9
-rw-r--r--drivers/md/bcache/request.c11
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/nvdimm/blk.c3
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/pmem.c3
-rw-r--r--drivers/nvme/host/pci.c32
-rw-r--r--drivers/s390/block/dcssblk.c8
-rw-r--r--drivers/s390/block/xpram.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c5
-rw-r--r--fs/direct-io.c14
-rw-r--r--include/linux/blk-mq.h10
-rw-r--r--include/linux/blk_types.h24
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/lightnvm.h2
33 files changed, 286 insertions, 90 deletions
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index f2a00c591bf7..e9110b9b8bcd 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -59,7 +59,7 @@ struct nfhd_device {
59 struct gendisk *disk; 59 struct gendisk *disk;
60}; 60};
61 61
62static void nfhd_make_request(struct request_queue *queue, struct bio *bio) 62static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
63{ 63{
64 struct nfhd_device *dev = queue->queuedata; 64 struct nfhd_device *dev = queue->queuedata;
65 struct bio_vec bvec; 65 struct bio_vec bvec;
@@ -77,6 +77,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
77 sec += len; 77 sec += len;
78 } 78 }
79 bio_endio(bio); 79 bio_endio(bio);
80 return BLK_QC_T_NONE;
80} 81}
81 82
82static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 83static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index d2b79bc336c1..7a399b4d60a0 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -103,7 +103,7 @@ axon_ram_irq_handler(int irq, void *dev)
103 * axon_ram_make_request - make_request() method for block device 103 * axon_ram_make_request - make_request() method for block device
104 * @queue, @bio: see blk_queue_make_request() 104 * @queue, @bio: see blk_queue_make_request()
105 */ 105 */
106static void 106static blk_qc_t
107axon_ram_make_request(struct request_queue *queue, struct bio *bio) 107axon_ram_make_request(struct request_queue *queue, struct bio *bio)
108{ 108{
109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; 109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
@@ -120,7 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
120 bio_for_each_segment(vec, bio, iter) { 120 bio_for_each_segment(vec, bio, iter) {
121 if (unlikely(phys_mem + vec.bv_len > phys_end)) { 121 if (unlikely(phys_mem + vec.bv_len > phys_end)) {
122 bio_io_error(bio); 122 bio_io_error(bio);
123 return; 123 return BLK_QC_T_NONE;
124 } 124 }
125 125
126 user_mem = page_address(vec.bv_page) + vec.bv_offset; 126 user_mem = page_address(vec.bv_page) + vec.bv_offset;
@@ -133,6 +133,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
133 transfered += vec.bv_len; 133 transfered += vec.bv_len;
134 } 134 }
135 bio_endio(bio); 135 bio_endio(bio);
136 return BLK_QC_T_NONE;
136} 137}
137 138
138/** 139/**
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index fa84ca990caa..3c3ace2c46b6 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -101,7 +101,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
101 spin_unlock(&dev->lock); 101 spin_unlock(&dev->lock);
102} 102}
103 103
104static void simdisk_make_request(struct request_queue *q, struct bio *bio) 104static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio)
105{ 105{
106 struct simdisk *dev = q->queuedata; 106 struct simdisk *dev = q->queuedata;
107 struct bio_vec bvec; 107 struct bio_vec bvec;
@@ -119,6 +119,7 @@ static void simdisk_make_request(struct request_queue *q, struct bio *bio)
119 } 119 }
120 120
121 bio_endio(bio); 121 bio_endio(bio);
122 return BLK_QC_T_NONE;
122} 123}
123 124
124static int simdisk_open(struct block_device *bdev, fmode_t mode) 125static int simdisk_open(struct block_device *bdev, fmode_t mode)
diff --git a/block/blk-core.c b/block/blk-core.c
index 590cca21c24a..2bbf08cd2900 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -809,7 +809,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
809} 809}
810EXPORT_SYMBOL(blk_init_queue_node); 810EXPORT_SYMBOL(blk_init_queue_node);
811 811
812static void blk_queue_bio(struct request_queue *q, struct bio *bio); 812static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
813 813
814struct request_queue * 814struct request_queue *
815blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, 815blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
@@ -1678,7 +1678,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1678 blk_rq_bio_prep(req->q, req, bio); 1678 blk_rq_bio_prep(req->q, req, bio);
1679} 1679}
1680 1680
1681static void blk_queue_bio(struct request_queue *q, struct bio *bio) 1681static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1682{ 1682{
1683 const bool sync = !!(bio->bi_rw & REQ_SYNC); 1683 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1684 struct blk_plug *plug; 1684 struct blk_plug *plug;
@@ -1698,7 +1698,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
1698 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1698 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1699 bio->bi_error = -EIO; 1699 bio->bi_error = -EIO;
1700 bio_endio(bio); 1700 bio_endio(bio);
1701 return; 1701 return BLK_QC_T_NONE;
1702 } 1702 }
1703 1703
1704 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1704 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
@@ -1713,7 +1713,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
1713 */ 1713 */
1714 if (!blk_queue_nomerges(q)) { 1714 if (!blk_queue_nomerges(q)) {
1715 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1715 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1716 return; 1716 return BLK_QC_T_NONE;
1717 } else 1717 } else
1718 request_count = blk_plug_queued_count(q); 1718 request_count = blk_plug_queued_count(q);
1719 1719
@@ -1791,6 +1791,8 @@ get_rq:
1791out_unlock: 1791out_unlock:
1792 spin_unlock_irq(q->queue_lock); 1792 spin_unlock_irq(q->queue_lock);
1793 } 1793 }
1794
1795 return BLK_QC_T_NONE;
1794} 1796}
1795 1797
1796/* 1798/*
@@ -1996,12 +1998,13 @@ end_io:
1996 * a lower device by calling into generic_make_request recursively, which 1998 * a lower device by calling into generic_make_request recursively, which
1997 * means the bio should NOT be touched after the call to ->make_request_fn. 1999 * means the bio should NOT be touched after the call to ->make_request_fn.
1998 */ 2000 */
1999void generic_make_request(struct bio *bio) 2001blk_qc_t generic_make_request(struct bio *bio)
2000{ 2002{
2001 struct bio_list bio_list_on_stack; 2003 struct bio_list bio_list_on_stack;
2004 blk_qc_t ret = BLK_QC_T_NONE;
2002 2005
2003 if (!generic_make_request_checks(bio)) 2006 if (!generic_make_request_checks(bio))
2004 return; 2007 goto out;
2005 2008
2006 /* 2009 /*
2007 * We only want one ->make_request_fn to be active at a time, else 2010 * We only want one ->make_request_fn to be active at a time, else
@@ -2015,7 +2018,7 @@ void generic_make_request(struct bio *bio)
2015 */ 2018 */
2016 if (current->bio_list) { 2019 if (current->bio_list) {
2017 bio_list_add(current->bio_list, bio); 2020 bio_list_add(current->bio_list, bio);
2018 return; 2021 goto out;
2019 } 2022 }
2020 2023
2021 /* following loop may be a bit non-obvious, and so deserves some 2024 /* following loop may be a bit non-obvious, and so deserves some
@@ -2040,7 +2043,7 @@ void generic_make_request(struct bio *bio)
2040 2043
2041 if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) { 2044 if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {
2042 2045
2043 q->make_request_fn(q, bio); 2046 ret = q->make_request_fn(q, bio);
2044 2047
2045 blk_queue_exit(q); 2048 blk_queue_exit(q);
2046 2049
@@ -2053,6 +2056,9 @@ void generic_make_request(struct bio *bio)
2053 } 2056 }
2054 } while (bio); 2057 } while (bio);
2055 current->bio_list = NULL; /* deactivate */ 2058 current->bio_list = NULL; /* deactivate */
2059
2060out:
2061 return ret;
2056} 2062}
2057EXPORT_SYMBOL(generic_make_request); 2063EXPORT_SYMBOL(generic_make_request);
2058 2064
@@ -2066,7 +2072,7 @@ EXPORT_SYMBOL(generic_make_request);
2066 * interfaces; @bio must be presetup and ready for I/O. 2072 * interfaces; @bio must be presetup and ready for I/O.
2067 * 2073 *
2068 */ 2074 */
2069void submit_bio(int rw, struct bio *bio) 2075blk_qc_t submit_bio(int rw, struct bio *bio)
2070{ 2076{
2071 bio->bi_rw |= rw; 2077 bio->bi_rw |= rw;
2072 2078
@@ -2100,7 +2106,7 @@ void submit_bio(int rw, struct bio *bio)
2100 } 2106 }
2101 } 2107 }
2102 2108
2103 generic_make_request(bio); 2109 return generic_make_request(bio);
2104} 2110}
2105EXPORT_SYMBOL(submit_bio); 2111EXPORT_SYMBOL(submit_bio);
2106 2112
@@ -3306,6 +3312,47 @@ void blk_finish_plug(struct blk_plug *plug)
3306} 3312}
3307EXPORT_SYMBOL(blk_finish_plug); 3313EXPORT_SYMBOL(blk_finish_plug);
3308 3314
3315bool blk_poll(struct request_queue *q, blk_qc_t cookie)
3316{
3317 struct blk_plug *plug;
3318 long state;
3319
3320 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
3321 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
3322 return false;
3323
3324 plug = current->plug;
3325 if (plug)
3326 blk_flush_plug_list(plug, false);
3327
3328 state = current->state;
3329 while (!need_resched()) {
3330 unsigned int queue_num = blk_qc_t_to_queue_num(cookie);
3331 struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num];
3332 int ret;
3333
3334 hctx->poll_invoked++;
3335
3336 ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie));
3337 if (ret > 0) {
3338 hctx->poll_success++;
3339 set_current_state(TASK_RUNNING);
3340 return true;
3341 }
3342
3343 if (signal_pending_state(state, current))
3344 set_current_state(TASK_RUNNING);
3345
3346 if (current->state == TASK_RUNNING)
3347 return true;
3348 if (ret < 0)
3349 break;
3350 cpu_relax();
3351 }
3352
3353 return false;
3354}
3355
3309#ifdef CONFIG_PM 3356#ifdef CONFIG_PM
3310/** 3357/**
3311 * blk_pm_runtime_init - Block layer runtime PM initialization routine 3358 * blk_pm_runtime_init - Block layer runtime PM initialization routine
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 6f57a110289c..1cf18784c5cf 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -174,6 +174,11 @@ static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
174 return ret; 174 return ret;
175} 175}
176 176
177static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
178{
179 return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
180}
181
177static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx, 182static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
178 char *page) 183 char *page)
179{ 184{
@@ -295,6 +300,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
295 .attr = {.name = "cpu_list", .mode = S_IRUGO }, 300 .attr = {.name = "cpu_list", .mode = S_IRUGO },
296 .show = blk_mq_hw_sysfs_cpus_show, 301 .show = blk_mq_hw_sysfs_cpus_show,
297}; 302};
303static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
304 .attr = {.name = "io_poll", .mode = S_IRUGO },
305 .show = blk_mq_hw_sysfs_poll_show,
306};
298 307
299static struct attribute *default_hw_ctx_attrs[] = { 308static struct attribute *default_hw_ctx_attrs[] = {
300 &blk_mq_hw_sysfs_queued.attr, 309 &blk_mq_hw_sysfs_queued.attr,
@@ -304,6 +313,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
304 &blk_mq_hw_sysfs_tags.attr, 313 &blk_mq_hw_sysfs_tags.attr,
305 &blk_mq_hw_sysfs_cpus.attr, 314 &blk_mq_hw_sysfs_cpus.attr,
306 &blk_mq_hw_sysfs_active.attr, 315 &blk_mq_hw_sysfs_active.attr,
316 &blk_mq_hw_sysfs_poll.attr,
307 NULL, 317 NULL,
308}; 318};
309 319
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 694f8703f83c..86bd5b25288e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1198,7 +1198,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1198 return rq; 1198 return rq;
1199} 1199}
1200 1200
1201static int blk_mq_direct_issue_request(struct request *rq) 1201static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1202{ 1202{
1203 int ret; 1203 int ret;
1204 struct request_queue *q = rq->q; 1204 struct request_queue *q = rq->q;
@@ -1209,6 +1209,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
1209 .list = NULL, 1209 .list = NULL,
1210 .last = 1 1210 .last = 1
1211 }; 1211 };
1212 blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
1212 1213
1213 /* 1214 /*
1214 * For OK queue, we are done. For error, kill it. Any other 1215 * For OK queue, we are done. For error, kill it. Any other
@@ -1216,18 +1217,21 @@ static int blk_mq_direct_issue_request(struct request *rq)
1216 * would have done 1217 * would have done
1217 */ 1218 */
1218 ret = q->mq_ops->queue_rq(hctx, &bd); 1219 ret = q->mq_ops->queue_rq(hctx, &bd);
1219 if (ret == BLK_MQ_RQ_QUEUE_OK) 1220 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1221 *cookie = new_cookie;
1220 return 0; 1222 return 0;
1221 else { 1223 }
1222 __blk_mq_requeue_request(rq);
1223 1224
1224 if (ret == BLK_MQ_RQ_QUEUE_ERROR) { 1225 __blk_mq_requeue_request(rq);
1225 rq->errors = -EIO; 1226
1226 blk_mq_end_request(rq, rq->errors); 1227 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1227 return 0; 1228 *cookie = BLK_QC_T_NONE;
1228 } 1229 rq->errors = -EIO;
1229 return -1; 1230 blk_mq_end_request(rq, rq->errors);
1231 return 0;
1230 } 1232 }
1233
1234 return -1;
1231} 1235}
1232 1236
1233/* 1237/*
@@ -1235,7 +1239,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
1235 * but will attempt to bypass the hctx queueing if we can go straight to 1239 * but will attempt to bypass the hctx queueing if we can go straight to
1236 * hardware for SYNC IO. 1240 * hardware for SYNC IO.
1237 */ 1241 */
1238static void blk_mq_make_request(struct request_queue *q, struct bio *bio) 1242static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1239{ 1243{
1240 const int is_sync = rw_is_sync(bio->bi_rw); 1244 const int is_sync = rw_is_sync(bio->bi_rw);
1241 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1245 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
@@ -1244,12 +1248,13 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1244 unsigned int request_count = 0; 1248 unsigned int request_count = 0;
1245 struct blk_plug *plug; 1249 struct blk_plug *plug;
1246 struct request *same_queue_rq = NULL; 1250 struct request *same_queue_rq = NULL;
1251 blk_qc_t cookie;
1247 1252
1248 blk_queue_bounce(q, &bio); 1253 blk_queue_bounce(q, &bio);
1249 1254
1250 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1255 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1251 bio_io_error(bio); 1256 bio_io_error(bio);
1252 return; 1257 return BLK_QC_T_NONE;
1253 } 1258 }
1254 1259
1255 blk_queue_split(q, &bio, q->bio_split); 1260 blk_queue_split(q, &bio, q->bio_split);
@@ -1257,13 +1262,15 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1257 if (!is_flush_fua && !blk_queue_nomerges(q)) { 1262 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1258 if (blk_attempt_plug_merge(q, bio, &request_count, 1263 if (blk_attempt_plug_merge(q, bio, &request_count,
1259 &same_queue_rq)) 1264 &same_queue_rq))
1260 return; 1265 return BLK_QC_T_NONE;
1261 } else 1266 } else
1262 request_count = blk_plug_queued_count(q); 1267 request_count = blk_plug_queued_count(q);
1263 1268
1264 rq = blk_mq_map_request(q, bio, &data); 1269 rq = blk_mq_map_request(q, bio, &data);
1265 if (unlikely(!rq)) 1270 if (unlikely(!rq))
1266 return; 1271 return BLK_QC_T_NONE;
1272
1273 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1267 1274
1268 if (unlikely(is_flush_fua)) { 1275 if (unlikely(is_flush_fua)) {
1269 blk_mq_bio_to_request(rq, bio); 1276 blk_mq_bio_to_request(rq, bio);
@@ -1302,11 +1309,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1302 old_rq = rq; 1309 old_rq = rq;
1303 blk_mq_put_ctx(data.ctx); 1310 blk_mq_put_ctx(data.ctx);
1304 if (!old_rq) 1311 if (!old_rq)
1305 return; 1312 goto done;
1306 if (!blk_mq_direct_issue_request(old_rq)) 1313 if (!blk_mq_direct_issue_request(old_rq, &cookie))
1307 return; 1314 goto done;
1308 blk_mq_insert_request(old_rq, false, true, true); 1315 blk_mq_insert_request(old_rq, false, true, true);
1309 return; 1316 goto done;
1310 } 1317 }
1311 1318
1312 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1319 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1320,13 +1327,15 @@ run_queue:
1320 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); 1327 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1321 } 1328 }
1322 blk_mq_put_ctx(data.ctx); 1329 blk_mq_put_ctx(data.ctx);
1330done:
1331 return cookie;
1323} 1332}
1324 1333
1325/* 1334/*
1326 * Single hardware queue variant. This will attempt to use any per-process 1335 * Single hardware queue variant. This will attempt to use any per-process
1327 * plug for merging and IO deferral. 1336 * plug for merging and IO deferral.
1328 */ 1337 */
1329static void blk_sq_make_request(struct request_queue *q, struct bio *bio) 1338static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1330{ 1339{
1331 const int is_sync = rw_is_sync(bio->bi_rw); 1340 const int is_sync = rw_is_sync(bio->bi_rw);
1332 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); 1341 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
@@ -1334,23 +1343,26 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1334 unsigned int request_count = 0; 1343 unsigned int request_count = 0;
1335 struct blk_map_ctx data; 1344 struct blk_map_ctx data;
1336 struct request *rq; 1345 struct request *rq;
1346 blk_qc_t cookie;
1337 1347
1338 blk_queue_bounce(q, &bio); 1348 blk_queue_bounce(q, &bio);
1339 1349
1340 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { 1350 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1341 bio_io_error(bio); 1351 bio_io_error(bio);
1342 return; 1352 return BLK_QC_T_NONE;
1343 } 1353 }
1344 1354
1345 blk_queue_split(q, &bio, q->bio_split); 1355 blk_queue_split(q, &bio, q->bio_split);
1346 1356
1347 if (!is_flush_fua && !blk_queue_nomerges(q) && 1357 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1348 blk_attempt_plug_merge(q, bio, &request_count, NULL)) 1358 blk_attempt_plug_merge(q, bio, &request_count, NULL))
1349 return; 1359 return BLK_QC_T_NONE;
1350 1360
1351 rq = blk_mq_map_request(q, bio, &data); 1361 rq = blk_mq_map_request(q, bio, &data);
1352 if (unlikely(!rq)) 1362 if (unlikely(!rq))
1353 return; 1363 return BLK_QC_T_NONE;
1364
1365 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1354 1366
1355 if (unlikely(is_flush_fua)) { 1367 if (unlikely(is_flush_fua)) {
1356 blk_mq_bio_to_request(rq, bio); 1368 blk_mq_bio_to_request(rq, bio);
@@ -1374,7 +1386,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1374 } 1386 }
1375 list_add_tail(&rq->queuelist, &plug->mq_list); 1387 list_add_tail(&rq->queuelist, &plug->mq_list);
1376 blk_mq_put_ctx(data.ctx); 1388 blk_mq_put_ctx(data.ctx);
1377 return; 1389 return cookie;
1378 } 1390 }
1379 1391
1380 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { 1392 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1389,6 +1401,7 @@ run_queue:
1389 } 1401 }
1390 1402
1391 blk_mq_put_ctx(data.ctx); 1403 blk_mq_put_ctx(data.ctx);
1404 return cookie;
1392} 1405}
1393 1406
1394/* 1407/*
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 31849e328b45..565b8dac5782 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -317,6 +317,34 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
317 return ret; 317 return ret;
318} 318}
319 319
320static ssize_t queue_poll_show(struct request_queue *q, char *page)
321{
322 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
323}
324
325static ssize_t queue_poll_store(struct request_queue *q, const char *page,
326 size_t count)
327{
328 unsigned long poll_on;
329 ssize_t ret;
330
331 if (!q->mq_ops || !q->mq_ops->poll)
332 return -EINVAL;
333
334 ret = queue_var_store(&poll_on, page, count);
335 if (ret < 0)
336 return ret;
337
338 spin_lock_irq(q->queue_lock);
339 if (poll_on)
340 queue_flag_set(QUEUE_FLAG_POLL, q);
341 else
342 queue_flag_clear(QUEUE_FLAG_POLL, q);
343 spin_unlock_irq(q->queue_lock);
344
345 return ret;
346}
347
320static struct queue_sysfs_entry queue_requests_entry = { 348static struct queue_sysfs_entry queue_requests_entry = {
321 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 349 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
322 .show = queue_requests_show, 350 .show = queue_requests_show,
@@ -442,6 +470,12 @@ static struct queue_sysfs_entry queue_random_entry = {
442 .store = queue_store_random, 470 .store = queue_store_random,
443}; 471};
444 472
473static struct queue_sysfs_entry queue_poll_entry = {
474 .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
475 .show = queue_poll_show,
476 .store = queue_poll_store,
477};
478
445static struct attribute *default_attrs[] = { 479static struct attribute *default_attrs[] = {
446 &queue_requests_entry.attr, 480 &queue_requests_entry.attr,
447 &queue_ra_entry.attr, 481 &queue_ra_entry.attr,
@@ -466,6 +500,7 @@ static struct attribute *default_attrs[] = {
466 &queue_rq_affinity_entry.attr, 500 &queue_rq_affinity_entry.attr,
467 &queue_iostats_entry.attr, 501 &queue_iostats_entry.attr,
468 &queue_random_entry.attr, 502 &queue_random_entry.attr,
503 &queue_poll_entry.attr,
469 NULL, 504 NULL,
470}; 505};
471 506
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index b9794aeeb878..c9f9c30d6467 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -323,7 +323,7 @@ out:
323 return err; 323 return err;
324} 324}
325 325
326static void brd_make_request(struct request_queue *q, struct bio *bio) 326static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
327{ 327{
328 struct block_device *bdev = bio->bi_bdev; 328 struct block_device *bdev = bio->bi_bdev;
329 struct brd_device *brd = bdev->bd_disk->private_data; 329 struct brd_device *brd = bdev->bd_disk->private_data;
@@ -358,9 +358,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
358 358
359out: 359out:
360 bio_endio(bio); 360 bio_endio(bio);
361 return; 361 return BLK_QC_T_NONE;
362io_error: 362io_error:
363 bio_io_error(bio); 363 bio_io_error(bio);
364 return BLK_QC_T_NONE;
364} 365}
365 366
366static int brd_rw_page(struct block_device *bdev, sector_t sector, 367static int brd_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 015c6e91b756..e66d453a5f2b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1448,7 +1448,7 @@ extern int proc_details;
1448/* drbd_req */ 1448/* drbd_req */
1449extern void do_submit(struct work_struct *ws); 1449extern void do_submit(struct work_struct *ws);
1450extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); 1450extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1451extern void drbd_make_request(struct request_queue *q, struct bio *bio); 1451extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1452extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); 1452extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1453extern int is_valid_ar_handle(struct drbd_request *, sector_t); 1453extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1454 1454
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 211592682169..3ae2c0086563 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1494,7 +1494,7 @@ void do_submit(struct work_struct *ws)
1494 } 1494 }
1495} 1495}
1496 1496
1497void drbd_make_request(struct request_queue *q, struct bio *bio) 1497blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
1498{ 1498{
1499 struct drbd_device *device = (struct drbd_device *) q->queuedata; 1499 struct drbd_device *device = (struct drbd_device *) q->queuedata;
1500 unsigned long start_jif; 1500 unsigned long start_jif;
@@ -1510,6 +1510,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1510 1510
1511 inc_ap_bio(device); 1511 inc_ap_bio(device);
1512 __drbd_make_request(device, bio, start_jif); 1512 __drbd_make_request(device, bio, start_jif);
1513 return BLK_QC_T_NONE;
1513} 1514}
1514 1515
1515void request_timer_fn(unsigned long data) 1516void request_timer_fn(unsigned long data)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 1c9e4fe5aa44..6255d1c4bba4 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -321,7 +321,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
321 return &nullb->queues[index]; 321 return &nullb->queues[index];
322} 322}
323 323
324static void null_queue_bio(struct request_queue *q, struct bio *bio) 324static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
325{ 325{
326 struct nullb *nullb = q->queuedata; 326 struct nullb *nullb = q->queuedata;
327 struct nullb_queue *nq = nullb_to_queue(nullb); 327 struct nullb_queue *nq = nullb_to_queue(nullb);
@@ -331,6 +331,7 @@ static void null_queue_bio(struct request_queue *q, struct bio *bio)
331 cmd->bio = bio; 331 cmd->bio = bio;
332 332
333 null_handle_cmd(cmd); 333 null_handle_cmd(cmd);
334 return BLK_QC_T_NONE;
334} 335}
335 336
336static int null_rq_prep_fn(struct request_queue *q, struct request *req) 337static int null_rq_prep_fn(struct request_queue *q, struct request *req)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2f477d45d6cf..d06c62eccdf0 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2441,7 +2441,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2441 } 2441 }
2442} 2442}
2443 2443
2444static void pkt_make_request(struct request_queue *q, struct bio *bio) 2444static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
2445{ 2445{
2446 struct pktcdvd_device *pd; 2446 struct pktcdvd_device *pd;
2447 char b[BDEVNAME_SIZE]; 2447 char b[BDEVNAME_SIZE];
@@ -2467,7 +2467,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2467 */ 2467 */
2468 if (bio_data_dir(bio) == READ) { 2468 if (bio_data_dir(bio) == READ) {
2469 pkt_make_request_read(pd, bio); 2469 pkt_make_request_read(pd, bio);
2470 return; 2470 return BLK_QC_T_NONE;
2471 } 2471 }
2472 2472
2473 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2473 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2499,13 +2499,12 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2499 pkt_make_request_write(q, split); 2499 pkt_make_request_write(q, split);
2500 } while (split != bio); 2500 } while (split != bio);
2501 2501
2502 return; 2502 return BLK_QC_T_NONE;
2503end_io: 2503end_io:
2504 bio_io_error(bio); 2504 bio_io_error(bio);
2505 return BLK_QC_T_NONE;
2505} 2506}
2506 2507
2507
2508
2509static void pkt_init_queue(struct pktcdvd_device *pd) 2508static void pkt_init_queue(struct pktcdvd_device *pd)
2510{ 2509{
2511 struct request_queue *q = pd->disk->queue; 2510 struct request_queue *q = pd->disk->queue;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index d89fcac59515..56847fcda086 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -598,7 +598,7 @@ out:
598 return next; 598 return next;
599} 599}
600 600
601static void ps3vram_make_request(struct request_queue *q, struct bio *bio) 601static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
602{ 602{
603 struct ps3_system_bus_device *dev = q->queuedata; 603 struct ps3_system_bus_device *dev = q->queuedata;
604 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 604 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -614,11 +614,13 @@ static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
614 spin_unlock_irq(&priv->lock); 614 spin_unlock_irq(&priv->lock);
615 615
616 if (busy) 616 if (busy)
617 return; 617 return BLK_QC_T_NONE;
618 618
619 do { 619 do {
620 bio = ps3vram_do_bio(dev, bio); 620 bio = ps3vram_do_bio(dev, bio);
621 } while (bio); 621 } while (bio);
622
623 return BLK_QC_T_NONE;
622} 624}
623 625
624static int ps3vram_probe(struct ps3_system_bus_device *dev) 626static int ps3vram_probe(struct ps3_system_bus_device *dev)
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 3163e4cdc2cc..e1b8b7061d2f 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -145,7 +145,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
145 } 145 }
146} 146}
147 147
148static void rsxx_make_request(struct request_queue *q, struct bio *bio) 148static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
149{ 149{
150 struct rsxx_cardinfo *card = q->queuedata; 150 struct rsxx_cardinfo *card = q->queuedata;
151 struct rsxx_bio_meta *bio_meta; 151 struct rsxx_bio_meta *bio_meta;
@@ -199,7 +199,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
199 if (st) 199 if (st)
200 goto queue_err; 200 goto queue_err;
201 201
202 return; 202 return BLK_QC_T_NONE;
203 203
204queue_err: 204queue_err:
205 kmem_cache_free(bio_meta_pool, bio_meta); 205 kmem_cache_free(bio_meta_pool, bio_meta);
@@ -207,6 +207,7 @@ req_err:
207 if (st) 207 if (st)
208 bio->bi_error = st; 208 bio->bi_error = st;
209 bio_endio(bio); 209 bio_endio(bio);
210 return BLK_QC_T_NONE;
210} 211}
211 212
212/*----------------- Device Setup -------------------*/ 213/*----------------- Device Setup -------------------*/
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 04d65790a886..7939b9f87441 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -524,7 +524,7 @@ static int mm_check_plugged(struct cardinfo *card)
524 return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); 524 return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
525} 525}
526 526
527static void mm_make_request(struct request_queue *q, struct bio *bio) 527static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
528{ 528{
529 struct cardinfo *card = q->queuedata; 529 struct cardinfo *card = q->queuedata;
530 pr_debug("mm_make_request %llu %u\n", 530 pr_debug("mm_make_request %llu %u\n",
@@ -541,7 +541,7 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
541 activate(card); 541 activate(card);
542 spin_unlock_irq(&card->lock); 542 spin_unlock_irq(&card->lock);
543 543
544 return; 544 return BLK_QC_T_NONE;
545} 545}
546 546
547static irqreturn_t mm_interrupt(int irq, void *__card) 547static irqreturn_t mm_interrupt(int irq, void *__card)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 81a557c33a1f..47915d736f8d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -894,7 +894,7 @@ out:
894/* 894/*
895 * Handler function for all zram I/O requests. 895 * Handler function for all zram I/O requests.
896 */ 896 */
897static void zram_make_request(struct request_queue *queue, struct bio *bio) 897static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
898{ 898{
899 struct zram *zram = queue->queuedata; 899 struct zram *zram = queue->queuedata;
900 900
@@ -911,11 +911,12 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
911 911
912 __zram_make_request(zram, bio); 912 __zram_make_request(zram, bio);
913 zram_meta_put(zram); 913 zram_meta_put(zram);
914 return; 914 return BLK_QC_T_NONE;
915put_zram: 915put_zram:
916 zram_meta_put(zram); 916 zram_meta_put(zram);
917error: 917error:
918 bio_io_error(bio); 918 bio_io_error(bio);
919 return BLK_QC_T_NONE;
919} 920}
920 921
921static void zram_slot_free_notify(struct block_device *bdev, 922static void zram_slot_free_notify(struct block_device *bdev,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 64a888a5e9b3..7ba64c87ba1c 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -803,7 +803,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
803 return NVM_IO_OK; 803 return NVM_IO_OK;
804} 804}
805 805
806static void rrpc_make_rq(struct request_queue *q, struct bio *bio) 806static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
807{ 807{
808 struct rrpc *rrpc = q->queuedata; 808 struct rrpc *rrpc = q->queuedata;
809 struct nvm_rq *rqd; 809 struct nvm_rq *rqd;
@@ -811,21 +811,21 @@ static void rrpc_make_rq(struct request_queue *q, struct bio *bio)
811 811
812 if (bio->bi_rw & REQ_DISCARD) { 812 if (bio->bi_rw & REQ_DISCARD) {
813 rrpc_discard(rrpc, bio); 813 rrpc_discard(rrpc, bio);
814 return; 814 return BLK_QC_T_NONE;
815 } 815 }
816 816
817 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); 817 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
818 if (!rqd) { 818 if (!rqd) {
819 pr_err_ratelimited("rrpc: not able to queue bio."); 819 pr_err_ratelimited("rrpc: not able to queue bio.");
820 bio_io_error(bio); 820 bio_io_error(bio);
821 return; 821 return BLK_QC_T_NONE;
822 } 822 }
823 memset(rqd, 0, sizeof(struct nvm_rq)); 823 memset(rqd, 0, sizeof(struct nvm_rq));
824 824
825 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); 825 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
826 switch (err) { 826 switch (err) {
827 case NVM_IO_OK: 827 case NVM_IO_OK:
828 return; 828 return BLK_QC_T_NONE;
829 case NVM_IO_ERR: 829 case NVM_IO_ERR:
830 bio_io_error(bio); 830 bio_io_error(bio);
831 break; 831 break;
@@ -841,6 +841,7 @@ static void rrpc_make_rq(struct request_queue *q, struct bio *bio)
841 } 841 }
842 842
843 mempool_free(rqd, rrpc->rq_pool); 843 mempool_free(rqd, rrpc->rq_pool);
844 return BLK_QC_T_NONE;
844} 845}
845 846
846static void rrpc_requeue(struct work_struct *work) 847static void rrpc_requeue(struct work_struct *work)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 8e9877b04637..25fa8445bb24 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -958,7 +958,8 @@ static void cached_dev_nodata(struct closure *cl)
958 958
959/* Cached devices - read & write stuff */ 959/* Cached devices - read & write stuff */
960 960
961static void cached_dev_make_request(struct request_queue *q, struct bio *bio) 961static blk_qc_t cached_dev_make_request(struct request_queue *q,
962 struct bio *bio)
962{ 963{
963 struct search *s; 964 struct search *s;
964 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 965 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
@@ -997,6 +998,8 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
997 else 998 else
998 generic_make_request(bio); 999 generic_make_request(bio);
999 } 1000 }
1001
1002 return BLK_QC_T_NONE;
1000} 1003}
1001 1004
1002static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, 1005static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1070,7 +1073,8 @@ static void flash_dev_nodata(struct closure *cl)
1070 continue_at(cl, search_free, NULL); 1073 continue_at(cl, search_free, NULL);
1071} 1074}
1072 1075
1073static void flash_dev_make_request(struct request_queue *q, struct bio *bio) 1076static blk_qc_t flash_dev_make_request(struct request_queue *q,
1077 struct bio *bio)
1074{ 1078{
1075 struct search *s; 1079 struct search *s;
1076 struct closure *cl; 1080 struct closure *cl;
@@ -1093,7 +1097,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1093 continue_at_nobarrier(&s->cl, 1097 continue_at_nobarrier(&s->cl,
1094 flash_dev_nodata, 1098 flash_dev_nodata,
1095 bcache_wq); 1099 bcache_wq);
1096 return; 1100 return BLK_QC_T_NONE;
1097 } else if (rw) { 1101 } else if (rw) {
1098 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1102 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1099 &KEY(d->id, bio->bi_iter.bi_sector, 0), 1103 &KEY(d->id, bio->bi_iter.bi_sector, 0),
@@ -1109,6 +1113,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1109 } 1113 }
1110 1114
1111 continue_at(cl, search_free, NULL); 1115 continue_at(cl, search_free, NULL);
1116 return BLK_QC_T_NONE;
1112} 1117}
1113 1118
1114static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, 1119static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 32440ad5f684..6e15f3565892 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1755,7 +1755,7 @@ static void __split_and_process_bio(struct mapped_device *md,
1755 * The request function that just remaps the bio built up by 1755 * The request function that just remaps the bio built up by
1756 * dm_merge_bvec. 1756 * dm_merge_bvec.
1757 */ 1757 */
1758static void dm_make_request(struct request_queue *q, struct bio *bio) 1758static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1759{ 1759{
1760 int rw = bio_data_dir(bio); 1760 int rw = bio_data_dir(bio);
1761 struct mapped_device *md = q->queuedata; 1761 struct mapped_device *md = q->queuedata;
@@ -1774,12 +1774,12 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
1774 queue_io(md, bio); 1774 queue_io(md, bio);
1775 else 1775 else
1776 bio_io_error(bio); 1776 bio_io_error(bio);
1777 return; 1777 return BLK_QC_T_NONE;
1778 } 1778 }
1779 1779
1780 __split_and_process_bio(md, map, bio); 1780 __split_and_process_bio(md, map, bio);
1781 dm_put_live_table(md, srcu_idx); 1781 dm_put_live_table(md, srcu_idx);
1782 return; 1782 return BLK_QC_T_NONE;
1783} 1783}
1784 1784
1785int dm_request_based(struct mapped_device *md) 1785int dm_request_based(struct mapped_device *md)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3f9a514b5b9d..807095f4c793 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -250,7 +250,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
250 * call has finished, the bio has been linked into some internal structure 250 * call has finished, the bio has been linked into some internal structure
251 * and so is visible to ->quiesce(), so we don't need the refcount any more. 251 * and so is visible to ->quiesce(), so we don't need the refcount any more.
252 */ 252 */
253static void md_make_request(struct request_queue *q, struct bio *bio) 253static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
254{ 254{
255 const int rw = bio_data_dir(bio); 255 const int rw = bio_data_dir(bio);
256 struct mddev *mddev = q->queuedata; 256 struct mddev *mddev = q->queuedata;
@@ -262,13 +262,13 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
262 if (mddev == NULL || mddev->pers == NULL 262 if (mddev == NULL || mddev->pers == NULL
263 || !mddev->ready) { 263 || !mddev->ready) {
264 bio_io_error(bio); 264 bio_io_error(bio);
265 return; 265 return BLK_QC_T_NONE;
266 } 266 }
267 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 267 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
268 if (bio_sectors(bio) != 0) 268 if (bio_sectors(bio) != 0)
269 bio->bi_error = -EROFS; 269 bio->bi_error = -EROFS;
270 bio_endio(bio); 270 bio_endio(bio);
271 return; 271 return BLK_QC_T_NONE;
272 } 272 }
273 smp_rmb(); /* Ensure implications of 'active' are visible */ 273 smp_rmb(); /* Ensure implications of 'active' are visible */
274 rcu_read_lock(); 274 rcu_read_lock();
@@ -302,6 +302,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
302 302
303 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 303 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
304 wake_up(&mddev->sb_wait); 304 wake_up(&mddev->sb_wait);
305
306 return BLK_QC_T_NONE;
305} 307}
306 308
307/* mddev_suspend makes sure no new requests are submitted 309/* mddev_suspend makes sure no new requests are submitted
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 0df77cb07df6..91a336ea8c4f 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -161,7 +161,7 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
161 return err; 161 return err;
162} 162}
163 163
164static void nd_blk_make_request(struct request_queue *q, struct bio *bio) 164static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
165{ 165{
166 struct block_device *bdev = bio->bi_bdev; 166 struct block_device *bdev = bio->bi_bdev;
167 struct gendisk *disk = bdev->bd_disk; 167 struct gendisk *disk = bdev->bd_disk;
@@ -208,6 +208,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
208 208
209 out: 209 out:
210 bio_endio(bio); 210 bio_endio(bio);
211 return BLK_QC_T_NONE;
211} 212}
212 213
213static int nd_blk_rw_bytes(struct nd_namespace_common *ndns, 214static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index eae93ab8ffcd..efb2c1ceef98 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1150,7 +1150,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1150 return ret; 1150 return ret;
1151} 1151}
1152 1152
1153static void btt_make_request(struct request_queue *q, struct bio *bio) 1153static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1154{ 1154{
1155 struct bio_integrity_payload *bip = bio_integrity(bio); 1155 struct bio_integrity_payload *bip = bio_integrity(bio);
1156 struct btt *btt = q->queuedata; 1156 struct btt *btt = q->queuedata;
@@ -1198,6 +1198,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
1198 1198
1199out: 1199out:
1200 bio_endio(bio); 1200 bio_endio(bio);
1201 return BLK_QC_T_NONE;
1201} 1202}
1202 1203
1203static int btt_rw_page(struct block_device *bdev, sector_t sector, 1204static int btt_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 349f03e7ed06..012e0649f1ac 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -64,7 +64,7 @@ static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
64 kunmap_atomic(mem); 64 kunmap_atomic(mem);
65} 65}
66 66
67static void pmem_make_request(struct request_queue *q, struct bio *bio) 67static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
68{ 68{
69 bool do_acct; 69 bool do_acct;
70 unsigned long start; 70 unsigned long start;
@@ -84,6 +84,7 @@ static void pmem_make_request(struct request_queue *q, struct bio *bio)
84 wmb_pmem(); 84 wmb_pmem();
85 85
86 bio_endio(bio); 86 bio_endio(bio);
87 return BLK_QC_T_NONE;
87} 88}
88 89
89static int pmem_rw_page(struct block_device *bdev, sector_t sector, 90static int pmem_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 97b6640a3745..3dfc28875cc3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -90,7 +90,7 @@ static struct class *nvme_class;
90 90
91static int __nvme_reset(struct nvme_dev *dev); 91static int __nvme_reset(struct nvme_dev *dev);
92static int nvme_reset(struct nvme_dev *dev); 92static int nvme_reset(struct nvme_dev *dev);
93static int nvme_process_cq(struct nvme_queue *nvmeq); 93static void nvme_process_cq(struct nvme_queue *nvmeq);
94static void nvme_dead_ctrl(struct nvme_dev *dev); 94static void nvme_dead_ctrl(struct nvme_dev *dev);
95 95
96struct async_cmd_info { 96struct async_cmd_info {
@@ -935,7 +935,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
935 return BLK_MQ_RQ_QUEUE_BUSY; 935 return BLK_MQ_RQ_QUEUE_BUSY;
936} 936}
937 937
938static int nvme_process_cq(struct nvme_queue *nvmeq) 938static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
939{ 939{
940 u16 head, phase; 940 u16 head, phase;
941 941
@@ -953,6 +953,8 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
953 head = 0; 953 head = 0;
954 phase = !phase; 954 phase = !phase;
955 } 955 }
956 if (tag && *tag == cqe.command_id)
957 *tag = -1;
956 ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn); 958 ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn);
957 fn(nvmeq, ctx, &cqe); 959 fn(nvmeq, ctx, &cqe);
958 } 960 }
@@ -964,14 +966,18 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
964 * a big problem. 966 * a big problem.
965 */ 967 */
966 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 968 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
967 return 0; 969 return;
968 970
969 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 971 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
970 nvmeq->cq_head = head; 972 nvmeq->cq_head = head;
971 nvmeq->cq_phase = phase; 973 nvmeq->cq_phase = phase;
972 974
973 nvmeq->cqe_seen = 1; 975 nvmeq->cqe_seen = 1;
974 return 1; 976}
977
978static void nvme_process_cq(struct nvme_queue *nvmeq)
979{
980 __nvme_process_cq(nvmeq, NULL);
975} 981}
976 982
977static irqreturn_t nvme_irq(int irq, void *data) 983static irqreturn_t nvme_irq(int irq, void *data)
@@ -995,6 +1001,23 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
995 return IRQ_WAKE_THREAD; 1001 return IRQ_WAKE_THREAD;
996} 1002}
997 1003
1004static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1005{
1006 struct nvme_queue *nvmeq = hctx->driver_data;
1007
1008 if ((le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
1009 nvmeq->cq_phase) {
1010 spin_lock_irq(&nvmeq->q_lock);
1011 __nvme_process_cq(nvmeq, &tag);
1012 spin_unlock_irq(&nvmeq->q_lock);
1013
1014 if (tag == -1)
1015 return 1;
1016 }
1017
1018 return 0;
1019}
1020
998/* 1021/*
999 * Returns 0 on success. If the result is negative, it's a Linux error code; 1022 * Returns 0 on success. If the result is negative, it's a Linux error code;
1000 * if the result is positive, it's an NVM Express status code 1023 * if the result is positive, it's an NVM Express status code
@@ -1656,6 +1679,7 @@ static struct blk_mq_ops nvme_mq_ops = {
1656 .init_hctx = nvme_init_hctx, 1679 .init_hctx = nvme_init_hctx,
1657 .init_request = nvme_init_request, 1680 .init_request = nvme_init_request,
1658 .timeout = nvme_timeout, 1681 .timeout = nvme_timeout,
1682 .poll = nvme_poll,
1659}; 1683};
1660 1684
1661static void nvme_dev_remove_admin(struct nvme_dev *dev) 1685static void nvme_dev_remove_admin(struct nvme_dev *dev)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 5ed44fe21380..94a8f4ab57bc 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -27,7 +27,8 @@
27 27
28static int dcssblk_open(struct block_device *bdev, fmode_t mode); 28static int dcssblk_open(struct block_device *bdev, fmode_t mode);
29static void dcssblk_release(struct gendisk *disk, fmode_t mode); 29static void dcssblk_release(struct gendisk *disk, fmode_t mode);
30static void dcssblk_make_request(struct request_queue *q, struct bio *bio); 30static blk_qc_t dcssblk_make_request(struct request_queue *q,
31 struct bio *bio);
31static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, 32static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
32 void __pmem **kaddr, unsigned long *pfn); 33 void __pmem **kaddr, unsigned long *pfn);
33 34
@@ -815,7 +816,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
815 up_write(&dcssblk_devices_sem); 816 up_write(&dcssblk_devices_sem);
816} 817}
817 818
818static void 819static blk_qc_t
819dcssblk_make_request(struct request_queue *q, struct bio *bio) 820dcssblk_make_request(struct request_queue *q, struct bio *bio)
820{ 821{
821 struct dcssblk_dev_info *dev_info; 822 struct dcssblk_dev_info *dev_info;
@@ -874,9 +875,10 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
874 bytes_done += bvec.bv_len; 875 bytes_done += bvec.bv_len;
875 } 876 }
876 bio_endio(bio); 877 bio_endio(bio);
877 return; 878 return BLK_QC_T_NONE;
878fail: 879fail:
879 bio_io_error(bio); 880 bio_io_error(bio);
881 return BLK_QC_T_NONE;
880} 882}
881 883
882static long 884static long
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 02871f1db562..288f59a4147b 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -181,7 +181,7 @@ static unsigned long xpram_highest_page_index(void)
181/* 181/*
182 * Block device make request function. 182 * Block device make request function.
183 */ 183 */
184static void xpram_make_request(struct request_queue *q, struct bio *bio) 184static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
185{ 185{
186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
187 struct bio_vec bvec; 187 struct bio_vec bvec;
@@ -223,9 +223,10 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio)
223 } 223 }
224 } 224 }
225 bio_endio(bio); 225 bio_endio(bio);
226 return; 226 return BLK_QC_T_NONE;
227fail: 227fail:
228 bio_io_error(bio); 228 bio_io_error(bio);
229 return BLK_QC_T_NONE;
229} 230}
230 231
231static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) 232static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index e6974c36276d..fed50d538a41 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -333,7 +333,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
333 return count; 333 return count;
334} 334}
335 335
336static void loop_make_request(struct request_queue *q, struct bio *old_bio) 336static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
337{ 337{
338 struct lloop_device *lo = q->queuedata; 338 struct lloop_device *lo = q->queuedata;
339 int rw = bio_rw(old_bio); 339 int rw = bio_rw(old_bio);
@@ -364,9 +364,10 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
364 goto err; 364 goto err;
365 } 365 }
366 loop_add_bio(lo, old_bio); 366 loop_add_bio(lo, old_bio);
367 return; 367 return BLK_QC_T_NONE;
368err: 368err:
369 bio_io_error(old_bio); 369 bio_io_error(old_bio);
370 return BLK_QC_T_NONE;
370} 371}
371 372
372static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio) 373static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 18e7554cf94c..cb5337d8c273 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -109,6 +109,8 @@ struct dio_submit {
109struct dio { 109struct dio {
110 int flags; /* doesn't change */ 110 int flags; /* doesn't change */
111 int rw; 111 int rw;
112 blk_qc_t bio_cookie;
113 struct block_device *bio_bdev;
112 struct inode *inode; 114 struct inode *inode;
113 loff_t i_size; /* i_size when submitted */ 115 loff_t i_size; /* i_size when submitted */
114 dio_iodone_t *end_io; /* IO completion function */ 116 dio_iodone_t *end_io; /* IO completion function */
@@ -397,11 +399,14 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
397 if (dio->is_async && dio->rw == READ && dio->should_dirty) 399 if (dio->is_async && dio->rw == READ && dio->should_dirty)
398 bio_set_pages_dirty(bio); 400 bio_set_pages_dirty(bio);
399 401
400 if (sdio->submit_io) 402 dio->bio_bdev = bio->bi_bdev;
403
404 if (sdio->submit_io) {
401 sdio->submit_io(dio->rw, bio, dio->inode, 405 sdio->submit_io(dio->rw, bio, dio->inode,
402 sdio->logical_offset_in_bio); 406 sdio->logical_offset_in_bio);
403 else 407 dio->bio_cookie = BLK_QC_T_NONE;
404 submit_bio(dio->rw, bio); 408 } else
409 dio->bio_cookie = submit_bio(dio->rw, bio);
405 410
406 sdio->bio = NULL; 411 sdio->bio = NULL;
407 sdio->boundary = 0; 412 sdio->boundary = 0;
@@ -440,7 +445,8 @@ static struct bio *dio_await_one(struct dio *dio)
440 __set_current_state(TASK_UNINTERRUPTIBLE); 445 __set_current_state(TASK_UNINTERRUPTIBLE);
441 dio->waiter = current; 446 dio->waiter = current;
442 spin_unlock_irqrestore(&dio->bio_lock, flags); 447 spin_unlock_irqrestore(&dio->bio_lock, flags);
443 io_schedule(); 448 if (!blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
449 io_schedule();
444 /* wake up sets us TASK_RUNNING */ 450 /* wake up sets us TASK_RUNNING */
445 spin_lock_irqsave(&dio->bio_lock, flags); 451 spin_lock_irqsave(&dio->bio_lock, flags);
446 dio->waiter = NULL; 452 dio->waiter = NULL;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 83cc9d4e5455..daf17d70aeca 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -59,6 +59,9 @@ struct blk_mq_hw_ctx {
59 59
60 struct blk_mq_cpu_notifier cpu_notifier; 60 struct blk_mq_cpu_notifier cpu_notifier;
61 struct kobject kobj; 61 struct kobject kobj;
62
63 unsigned long poll_invoked;
64 unsigned long poll_success;
62}; 65};
63 66
64struct blk_mq_tag_set { 67struct blk_mq_tag_set {
@@ -97,6 +100,8 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
97typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 100typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
98 bool); 101 bool);
99typedef void (busy_tag_iter_fn)(struct request *, void *, bool); 102typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
103typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
104
100 105
101struct blk_mq_ops { 106struct blk_mq_ops {
102 /* 107 /*
@@ -114,6 +119,11 @@ struct blk_mq_ops {
114 */ 119 */
115 timeout_fn *timeout; 120 timeout_fn *timeout;
116 121
122 /*
123 * Called to poll for completion of a specific tag.
124 */
125 poll_fn *poll;
126
117 softirq_done_fn *complete; 127 softirq_done_fn *complete;
118 128
119 /* 129 /*
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index e8130138f29d..641e5a3ed58c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -244,4 +244,28 @@ enum rq_flag_bits {
244#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 244#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
245#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) 245#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
246 246
247typedef unsigned int blk_qc_t;
248#define BLK_QC_T_NONE -1U
249#define BLK_QC_T_SHIFT 16
250
251static inline bool blk_qc_t_valid(blk_qc_t cookie)
252{
253 return cookie != BLK_QC_T_NONE;
254}
255
256static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
257{
258 return tag | (queue_num << BLK_QC_T_SHIFT);
259}
260
261static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
262{
263 return cookie >> BLK_QC_T_SHIFT;
264}
265
266static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
267{
268 return cookie & 0xffff;
269}
270
247#endif /* __LINUX_BLK_TYPES_H */ 271#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d045ca8487af..3fe27f8d91f0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -209,7 +209,7 @@ static inline unsigned short req_get_ioprio(struct request *req)
209struct blk_queue_ctx; 209struct blk_queue_ctx;
210 210
211typedef void (request_fn_proc) (struct request_queue *q); 211typedef void (request_fn_proc) (struct request_queue *q);
212typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); 212typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
213typedef int (prep_rq_fn) (struct request_queue *, struct request *); 213typedef int (prep_rq_fn) (struct request_queue *, struct request *);
214typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 214typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
215 215
@@ -487,6 +487,7 @@ struct request_queue {
487#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 487#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
488#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 488#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
489#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 489#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
490#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
490 491
491#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 492#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
492 (1 << QUEUE_FLAG_STACKABLE) | \ 493 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -761,7 +762,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
761 762
762extern int blk_register_queue(struct gendisk *disk); 763extern int blk_register_queue(struct gendisk *disk);
763extern void blk_unregister_queue(struct gendisk *disk); 764extern void blk_unregister_queue(struct gendisk *disk);
764extern void generic_make_request(struct bio *bio); 765extern blk_qc_t generic_make_request(struct bio *bio);
765extern void blk_rq_init(struct request_queue *q, struct request *rq); 766extern void blk_rq_init(struct request_queue *q, struct request *rq);
766extern void blk_put_request(struct request *); 767extern void blk_put_request(struct request *);
767extern void __blk_put_request(struct request_queue *, struct request *); 768extern void __blk_put_request(struct request_queue *, struct request *);
@@ -814,6 +815,8 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
814extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 815extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
815 struct request *, int, rq_end_io_fn *); 816 struct request *, int, rq_end_io_fn *);
816 817
818bool blk_poll(struct request_queue *q, blk_qc_t cookie);
819
817static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 820static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
818{ 821{
819 return bdev->bd_disk->queue; /* this is never NULL */ 822 return bdev->bd_disk->queue; /* this is never NULL */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9a1cb8c605e0..6230eb2a9cca 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2613,7 +2613,7 @@ static inline void remove_inode_hash(struct inode *inode)
2613extern void inode_sb_list_add(struct inode *inode); 2613extern void inode_sb_list_add(struct inode *inode);
2614 2614
2615#ifdef CONFIG_BLOCK 2615#ifdef CONFIG_BLOCK
2616extern void submit_bio(int, struct bio *); 2616extern blk_qc_t submit_bio(int, struct bio *);
2617extern int bdev_read_only(struct block_device *); 2617extern int bdev_read_only(struct block_device *);
2618#endif 2618#endif
2619extern int set_blocksize(struct block_device *, int); 2619extern int set_blocksize(struct block_device *, int);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 5ebd70d12f35..69c9057e1ab8 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -426,7 +426,7 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
426 return ppa; 426 return ppa;
427} 427}
428 428
429typedef void (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); 429typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
430typedef sector_t (nvm_tgt_capacity_fn)(void *); 430typedef sector_t (nvm_tgt_capacity_fn)(void *);
431typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int); 431typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
432typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); 432typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);