aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 20:23:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 20:23:49 -0500
commit3419b45039c6b799c974a8019361c045e7ca232c (patch)
tree36a63602036cc50f34fadcbd5d5d8fca94e44297 /drivers/md
parent01504f5e9e071f1dde1062e3be15f54d4555308f (diff)
parentc1c534609fe8a859f9c8108a5591e6e8a97e34d1 (diff)
Merge branch 'for-4.4/io-poll' of git://git.kernel.dk/linux-block
Pull block IO poll support from Jens Axboe: "Various groups have been doing experimentation around IO polling for (really) fast devices. The code has been reviewed and has been sitting on the side for a few releases, but this is now good enough for coordinated benchmarking and further experimentation. Currently O_DIRECT sync read/write are supported. A framework is in the works that allows scalable stats tracking so we can auto-tune this. And we'll add libaio support as well soon. Fow now, it's an opt-in feature for test purposes" * 'for-4.4/io-poll' of git://git.kernel.dk/linux-block: direct-io: be sure to assign dio->bio_bdev for both paths directio: add block polling support NVMe: add blk polling support block: add block polling support blk-mq: return tag/queue combo in the make_request_fn handlers block: change ->make_request_fn() and users to return a queue cookie
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/request.c11
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/md.c8
3 files changed, 16 insertions, 9 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 8e9877b04637..25fa8445bb24 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -958,7 +958,8 @@ static void cached_dev_nodata(struct closure *cl)
958 958
959/* Cached devices - read & write stuff */ 959/* Cached devices - read & write stuff */
960 960
961static void cached_dev_make_request(struct request_queue *q, struct bio *bio) 961static blk_qc_t cached_dev_make_request(struct request_queue *q,
962 struct bio *bio)
962{ 963{
963 struct search *s; 964 struct search *s;
964 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 965 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
@@ -997,6 +998,8 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
997 else 998 else
998 generic_make_request(bio); 999 generic_make_request(bio);
999 } 1000 }
1001
1002 return BLK_QC_T_NONE;
1000} 1003}
1001 1004
1002static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, 1005static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1070,7 +1073,8 @@ static void flash_dev_nodata(struct closure *cl)
1070 continue_at(cl, search_free, NULL); 1073 continue_at(cl, search_free, NULL);
1071} 1074}
1072 1075
1073static void flash_dev_make_request(struct request_queue *q, struct bio *bio) 1076static blk_qc_t flash_dev_make_request(struct request_queue *q,
1077 struct bio *bio)
1074{ 1078{
1075 struct search *s; 1079 struct search *s;
1076 struct closure *cl; 1080 struct closure *cl;
@@ -1093,7 +1097,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1093 continue_at_nobarrier(&s->cl, 1097 continue_at_nobarrier(&s->cl,
1094 flash_dev_nodata, 1098 flash_dev_nodata,
1095 bcache_wq); 1099 bcache_wq);
1096 return; 1100 return BLK_QC_T_NONE;
1097 } else if (rw) { 1101 } else if (rw) {
1098 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1102 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1099 &KEY(d->id, bio->bi_iter.bi_sector, 0), 1103 &KEY(d->id, bio->bi_iter.bi_sector, 0),
@@ -1109,6 +1113,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1109 } 1113 }
1110 1114
1111 continue_at(cl, search_free, NULL); 1115 continue_at(cl, search_free, NULL);
1116 return BLK_QC_T_NONE;
1112} 1117}
1113 1118
1114static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, 1119static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 32440ad5f684..6e15f3565892 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1755,7 +1755,7 @@ static void __split_and_process_bio(struct mapped_device *md,
1755 * The request function that just remaps the bio built up by 1755 * The request function that just remaps the bio built up by
1756 * dm_merge_bvec. 1756 * dm_merge_bvec.
1757 */ 1757 */
1758static void dm_make_request(struct request_queue *q, struct bio *bio) 1758static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1759{ 1759{
1760 int rw = bio_data_dir(bio); 1760 int rw = bio_data_dir(bio);
1761 struct mapped_device *md = q->queuedata; 1761 struct mapped_device *md = q->queuedata;
@@ -1774,12 +1774,12 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
1774 queue_io(md, bio); 1774 queue_io(md, bio);
1775 else 1775 else
1776 bio_io_error(bio); 1776 bio_io_error(bio);
1777 return; 1777 return BLK_QC_T_NONE;
1778 } 1778 }
1779 1779
1780 __split_and_process_bio(md, map, bio); 1780 __split_and_process_bio(md, map, bio);
1781 dm_put_live_table(md, srcu_idx); 1781 dm_put_live_table(md, srcu_idx);
1782 return; 1782 return BLK_QC_T_NONE;
1783} 1783}
1784 1784
1785int dm_request_based(struct mapped_device *md) 1785int dm_request_based(struct mapped_device *md)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3f9a514b5b9d..807095f4c793 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -250,7 +250,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
250 * call has finished, the bio has been linked into some internal structure 250 * call has finished, the bio has been linked into some internal structure
251 * and so is visible to ->quiesce(), so we don't need the refcount any more. 251 * and so is visible to ->quiesce(), so we don't need the refcount any more.
252 */ 252 */
253static void md_make_request(struct request_queue *q, struct bio *bio) 253static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
254{ 254{
255 const int rw = bio_data_dir(bio); 255 const int rw = bio_data_dir(bio);
256 struct mddev *mddev = q->queuedata; 256 struct mddev *mddev = q->queuedata;
@@ -262,13 +262,13 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
262 if (mddev == NULL || mddev->pers == NULL 262 if (mddev == NULL || mddev->pers == NULL
263 || !mddev->ready) { 263 || !mddev->ready) {
264 bio_io_error(bio); 264 bio_io_error(bio);
265 return; 265 return BLK_QC_T_NONE;
266 } 266 }
267 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 267 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
268 if (bio_sectors(bio) != 0) 268 if (bio_sectors(bio) != 0)
269 bio->bi_error = -EROFS; 269 bio->bi_error = -EROFS;
270 bio_endio(bio); 270 bio_endio(bio);
271 return; 271 return BLK_QC_T_NONE;
272 } 272 }
273 smp_rmb(); /* Ensure implications of 'active' are visible */ 273 smp_rmb(); /* Ensure implications of 'active' are visible */
274 rcu_read_lock(); 274 rcu_read_lock();
@@ -302,6 +302,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
302 302
303 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 303 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
304 wake_up(&mddev->sb_wait); 304 wake_up(&mddev->sb_wait);
305
306 return BLK_QC_T_NONE;
305} 307}
306 308
307/* mddev_suspend makes sure no new requests are submitted 309/* mddev_suspend makes sure no new requests are submitted