aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 21:29:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 21:29:13 -0500
commitd7252d0d36375fe8c544098469a21d03fa267a55 (patch)
tree0912357cdfa9ab2031d342d27b7e828b158c8bd7 /drivers
parent0fe4e2d5cd931ad2ff99d61cfdd5c6dc0c3ec60b (diff)
parent59f75fd0e31532bdcf65f754516cac2954d5ddc7 (diff)
Merge tag 'for-linus-20190104' of git://git.kernel.dk/linux-block
Pull block updates and fixes from Jens Axboe: - Pulled in MD changes that Shaohua had queued up for 4.21. Unfortunately we lost Shaohua late 2018, I'm sending these in on his behalf. - In conjunction with the above, I added a CREDITS entry for Shaoua. - sunvdc queue restart fix (Ming) * tag 'for-linus-20190104' of git://git.kernel.dk/linux-block: Add CREDITS entry for Shaohua Li block: sunvdc: don't run hw queue synchronously from irq context md: fix raid10 hang issue caused by barrier raid10: refactor common wait code from regular read/write request md: remvoe redundant condition check lib/raid6: add option to skip algo benchmarking lib/raid6: sort algos in rough performance order lib/raid6: check for assembler SSSE3 support lib/raid6: avoid __attribute_const__ redefinition lib/raid6: add missing include for raid6test md: remove set but not used variable 'bi_rdev'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid10.c76
3 files changed, 34 insertions, 58 deletions
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 0ff27e2d98c4..26937ba28f78 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -181,7 +181,7 @@ static void vdc_blk_queue_start(struct vdc_port *port)
181 * allocated a disk. 181 * allocated a disk.
182 */ 182 */
183 if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) 183 if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
184 blk_mq_start_hw_queues(port->disk->queue); 184 blk_mq_start_stopped_hw_queues(port->disk->queue, true);
185} 185}
186 186
187static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) 187static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9a0a1e0934d5..fd4af4de03b4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2147,14 +2147,12 @@ EXPORT_SYMBOL(md_integrity_register);
2147 */ 2147 */
2148int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2148int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2149{ 2149{
2150 struct blk_integrity *bi_rdev;
2151 struct blk_integrity *bi_mddev; 2150 struct blk_integrity *bi_mddev;
2152 char name[BDEVNAME_SIZE]; 2151 char name[BDEVNAME_SIZE];
2153 2152
2154 if (!mddev->gendisk) 2153 if (!mddev->gendisk)
2155 return 0; 2154 return 0;
2156 2155
2157 bi_rdev = bdev_get_integrity(rdev->bdev);
2158 bi_mddev = blk_get_integrity(mddev->gendisk); 2156 bi_mddev = blk_get_integrity(mddev->gendisk);
2159 2157
2160 if (!bi_mddev) /* nothing to do */ 2158 if (!bi_mddev) /* nothing to do */
@@ -5693,14 +5691,10 @@ int md_run(struct mddev *mddev)
5693 return 0; 5691 return 0;
5694 5692
5695abort: 5693abort:
5696 if (mddev->flush_bio_pool) { 5694 mempool_destroy(mddev->flush_bio_pool);
5697 mempool_destroy(mddev->flush_bio_pool); 5695 mddev->flush_bio_pool = NULL;
5698 mddev->flush_bio_pool = NULL; 5696 mempool_destroy(mddev->flush_pool);
5699 } 5697 mddev->flush_pool = NULL;
5700 if (mddev->flush_pool){
5701 mempool_destroy(mddev->flush_pool);
5702 mddev->flush_pool = NULL;
5703 }
5704 5698
5705 return err; 5699 return err;
5706} 5700}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b98e746e7fc4..abb5d382f64d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1124,6 +1124,29 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1124 kfree(plug); 1124 kfree(plug);
1125} 1125}
1126 1126
1127/*
1128 * 1. Register the new request and wait if the reconstruction thread has put
1129 * up a bar for new requests. Continue immediately if no resync is active
1130 * currently.
1131 * 2. If IO spans the reshape position. Need to wait for reshape to pass.
1132 */
1133static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1134 struct bio *bio, sector_t sectors)
1135{
1136 wait_barrier(conf);
1137 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1138 bio->bi_iter.bi_sector < conf->reshape_progress &&
1139 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1140 raid10_log(conf->mddev, "wait reshape");
1141 allow_barrier(conf);
1142 wait_event(conf->wait_barrier,
1143 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1144 conf->reshape_progress >= bio->bi_iter.bi_sector +
1145 sectors);
1146 wait_barrier(conf);
1147 }
1148}
1149
1127static void raid10_read_request(struct mddev *mddev, struct bio *bio, 1150static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1128 struct r10bio *r10_bio) 1151 struct r10bio *r10_bio)
1129{ 1152{
@@ -1132,7 +1155,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1132 const int op = bio_op(bio); 1155 const int op = bio_op(bio);
1133 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1156 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1134 int max_sectors; 1157 int max_sectors;
1135 sector_t sectors;
1136 struct md_rdev *rdev; 1158 struct md_rdev *rdev;
1137 char b[BDEVNAME_SIZE]; 1159 char b[BDEVNAME_SIZE];
1138 int slot = r10_bio->read_slot; 1160 int slot = r10_bio->read_slot;
@@ -1166,30 +1188,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1166 } 1188 }
1167 rcu_read_unlock(); 1189 rcu_read_unlock();
1168 } 1190 }
1169 /*
1170 * Register the new request and wait if the reconstruction
1171 * thread has put up a bar for new requests.
1172 * Continue immediately if no resync is active currently.
1173 */
1174 wait_barrier(conf);
1175
1176 sectors = r10_bio->sectors;
1177 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1178 bio->bi_iter.bi_sector < conf->reshape_progress &&
1179 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1180 /*
1181 * IO spans the reshape position. Need to wait for reshape to
1182 * pass
1183 */
1184 raid10_log(conf->mddev, "wait reshape");
1185 allow_barrier(conf);
1186 wait_event(conf->wait_barrier,
1187 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1188 conf->reshape_progress >= bio->bi_iter.bi_sector +
1189 sectors);
1190 wait_barrier(conf);
1191 }
1192 1191
1192 regular_request_wait(mddev, conf, bio, r10_bio->sectors);
1193 rdev = read_balance(conf, r10_bio, &max_sectors); 1193 rdev = read_balance(conf, r10_bio, &max_sectors);
1194 if (!rdev) { 1194 if (!rdev) {
1195 if (err_rdev) { 1195 if (err_rdev) {
@@ -1209,7 +1209,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1209 struct bio *split = bio_split(bio, max_sectors, 1209 struct bio *split = bio_split(bio, max_sectors,
1210 gfp, &conf->bio_split); 1210 gfp, &conf->bio_split);
1211 bio_chain(split, bio); 1211 bio_chain(split, bio);
1212 allow_barrier(conf);
1212 generic_make_request(bio); 1213 generic_make_request(bio);
1214 wait_barrier(conf);
1213 bio = split; 1215 bio = split;
1214 r10_bio->master_bio = bio; 1216 r10_bio->master_bio = bio;
1215 r10_bio->sectors = max_sectors; 1217 r10_bio->sectors = max_sectors;
@@ -1332,30 +1334,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1332 finish_wait(&conf->wait_barrier, &w); 1334 finish_wait(&conf->wait_barrier, &w);
1333 } 1335 }
1334 1336
1335 /*
1336 * Register the new request and wait if the reconstruction
1337 * thread has put up a bar for new requests.
1338 * Continue immediately if no resync is active currently.
1339 */
1340 wait_barrier(conf);
1341
1342 sectors = r10_bio->sectors; 1337 sectors = r10_bio->sectors;
1343 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1338 regular_request_wait(mddev, conf, bio, sectors);
1344 bio->bi_iter.bi_sector < conf->reshape_progress &&
1345 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1346 /*
1347 * IO spans the reshape position. Need to wait for reshape to
1348 * pass
1349 */
1350 raid10_log(conf->mddev, "wait reshape");
1351 allow_barrier(conf);
1352 wait_event(conf->wait_barrier,
1353 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1354 conf->reshape_progress >= bio->bi_iter.bi_sector +
1355 sectors);
1356 wait_barrier(conf);
1357 }
1358
1359 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1339 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1360 (mddev->reshape_backwards 1340 (mddev->reshape_backwards
1361 ? (bio->bi_iter.bi_sector < conf->reshape_safe && 1341 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
@@ -1514,7 +1494,9 @@ retry_write:
1514 struct bio *split = bio_split(bio, r10_bio->sectors, 1494 struct bio *split = bio_split(bio, r10_bio->sectors,
1515 GFP_NOIO, &conf->bio_split); 1495 GFP_NOIO, &conf->bio_split);
1516 bio_chain(split, bio); 1496 bio_chain(split, bio);
1497 allow_barrier(conf);
1517 generic_make_request(bio); 1498 generic_make_request(bio);
1499 wait_barrier(conf);
1518 bio = split; 1500 bio = split;
1519 r10_bio->master_bio = bio; 1501 r10_bio->master_bio = bio;
1520 } 1502 }