aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-05-05 02:41:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-05-05 02:41:44 -0400
commit2f50037a1c687ac928bbd47b6eb959b39f748ada (patch)
tree3b1e97aa8b1e4605822f9736588914809711092c /drivers
parent2e171ffcdf62a90ea7a0192728f81c1ac288de50 (diff)
parentb8b784958eccbf8f51ebeee65282ca3fd59ea391 (diff)
Merge tag 'for-linus-20180504' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A collection of fixes that should to into this release. This contains: - Set of bcache fixes from Coly, fixing regression in patches that went into this series. - Set of NVMe fixes by way of Keith. - Set of bdi related fixes, one from Jan and two from Tetsuo Handa, fixing various issues around device addition/removal. - Two block inflight fixes from Omar, fixing issues around the transition to using tags for blk-mq inflight accounting that we did a few releases ago" * tag 'for-linus-20180504' of git://git.kernel.dk/linux-block: bdi: Fix oops in wb_workfn() nvmet: switch loopback target state to connecting when resetting nvme/multipath: Fix multipath disabled naming collisions nvme/multipath: Disable runtime writable enabling parameter nvme: Set integrity flag for user passthrough commands nvme: fix potential memory leak in option parsing bdi: Fix use after free bug in debugfs_remove() bdi: wake up concurrent wb_shutdown() callers. bcache: use pr_info() to inform duplicated CACHE_SET_IO_DISABLE set bcache: set dc->io_disable to true in conditional_stop_bcache_device() bcache: add wait_for_kthread_stop() in bch_allocator_thread() bcache: count backing device I/O error for writeback I/O bcache: set CACHE_SET_IO_DISABLE in bch_cached_dev_error() bcache: store disk name in struct cache and struct cached_dev blk-mq: fix sysfs inflight counter blk-mq: count allocated but not started requests in iostats inflight
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/debug.c3
-rw-r--r--drivers/md/bcache/io.c8
-rw-r--r--drivers/md/bcache/request.c5
-rw-r--r--drivers/md/bcache/super.c75
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/nvme/host/core.c27
-rw-r--r--drivers/nvme/host/fabrics.c6
-rw-r--r--drivers/nvme/host/multipath.c24
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/target/loop.c6
12 files changed, 117 insertions, 62 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 004cc3cc6123..7fa2631b422c 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -290,7 +290,7 @@ do { \
290 if (kthread_should_stop() || \ 290 if (kthread_should_stop() || \
291 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ 291 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
292 set_current_state(TASK_RUNNING); \ 292 set_current_state(TASK_RUNNING); \
293 return 0; \ 293 goto out; \
294 } \ 294 } \
295 \ 295 \
296 schedule(); \ 296 schedule(); \
@@ -378,6 +378,9 @@ retry_invalidate:
378 bch_prio_write(ca); 378 bch_prio_write(ca);
379 } 379 }
380 } 380 }
381out:
382 wait_for_kthread_stop();
383 return 0;
381} 384}
382 385
383/* Allocation */ 386/* Allocation */
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index d338b7086013..3a0cfb237af9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -392,6 +392,8 @@ struct cached_dev {
392#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 392#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
393 atomic_t io_errors; 393 atomic_t io_errors;
394 unsigned error_limit; 394 unsigned error_limit;
395
396 char backing_dev_name[BDEVNAME_SIZE];
395}; 397};
396 398
397enum alloc_reserve { 399enum alloc_reserve {
@@ -464,6 +466,8 @@ struct cache {
464 atomic_long_t meta_sectors_written; 466 atomic_long_t meta_sectors_written;
465 atomic_long_t btree_sectors_written; 467 atomic_long_t btree_sectors_written;
466 atomic_long_t sectors_written; 468 atomic_long_t sectors_written;
469
470 char cache_dev_name[BDEVNAME_SIZE];
467}; 471};
468 472
469struct gc_stat { 473struct gc_stat {
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 028f7b386e01..4e63c6f6c04d 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -106,7 +106,6 @@ void bch_btree_verify(struct btree *b)
106 106
107void bch_data_verify(struct cached_dev *dc, struct bio *bio) 107void bch_data_verify(struct cached_dev *dc, struct bio *bio)
108{ 108{
109 char name[BDEVNAME_SIZE];
110 struct bio *check; 109 struct bio *check;
111 struct bio_vec bv, cbv; 110 struct bio_vec bv, cbv;
112 struct bvec_iter iter, citer = { 0 }; 111 struct bvec_iter iter, citer = { 0 };
@@ -134,7 +133,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
134 bv.bv_len), 133 bv.bv_len),
135 dc->disk.c, 134 dc->disk.c,
136 "verify failed at dev %s sector %llu", 135 "verify failed at dev %s sector %llu",
137 bdevname(dc->bdev, name), 136 dc->backing_dev_name,
138 (uint64_t) bio->bi_iter.bi_sector); 137 (uint64_t) bio->bi_iter.bi_sector);
139 138
140 kunmap_atomic(p1); 139 kunmap_atomic(p1);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 7fac97ae036e..2ddf8515e6a5 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -52,7 +52,6 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
52/* IO errors */ 52/* IO errors */
53void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) 53void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
54{ 54{
55 char buf[BDEVNAME_SIZE];
56 unsigned errors; 55 unsigned errors;
57 56
58 WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); 57 WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
@@ -60,7 +59,7 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
60 errors = atomic_add_return(1, &dc->io_errors); 59 errors = atomic_add_return(1, &dc->io_errors);
61 if (errors < dc->error_limit) 60 if (errors < dc->error_limit)
62 pr_err("%s: IO error on backing device, unrecoverable", 61 pr_err("%s: IO error on backing device, unrecoverable",
63 bio_devname(bio, buf)); 62 dc->backing_dev_name);
64 else 63 else
65 bch_cached_dev_error(dc); 64 bch_cached_dev_error(dc);
66} 65}
@@ -105,19 +104,18 @@ void bch_count_io_errors(struct cache *ca,
105 } 104 }
106 105
107 if (error) { 106 if (error) {
108 char buf[BDEVNAME_SIZE];
109 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, 107 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
110 &ca->io_errors); 108 &ca->io_errors);
111 errors >>= IO_ERROR_SHIFT; 109 errors >>= IO_ERROR_SHIFT;
112 110
113 if (errors < ca->set->error_limit) 111 if (errors < ca->set->error_limit)
114 pr_err("%s: IO error on %s%s", 112 pr_err("%s: IO error on %s%s",
115 bdevname(ca->bdev, buf), m, 113 ca->cache_dev_name, m,
116 is_read ? ", recovering." : "."); 114 is_read ? ", recovering." : ".");
117 else 115 else
118 bch_cache_set_error(ca->set, 116 bch_cache_set_error(ca->set,
119 "%s: too many IO errors %s", 117 "%s: too many IO errors %s",
120 bdevname(ca->bdev, buf), m); 118 ca->cache_dev_name, m);
121 } 119 }
122} 120}
123 121
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a65e3365eeb9..8e3e8655ed63 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -649,11 +649,8 @@ static void backing_request_endio(struct bio *bio)
649 */ 649 */
650 if (unlikely(s->iop.writeback && 650 if (unlikely(s->iop.writeback &&
651 bio->bi_opf & REQ_PREFLUSH)) { 651 bio->bi_opf & REQ_PREFLUSH)) {
652 char buf[BDEVNAME_SIZE];
653
654 bio_devname(bio, buf);
655 pr_err("Can't flush %s: returned bi_status %i", 652 pr_err("Can't flush %s: returned bi_status %i",
656 buf, bio->bi_status); 653 dc->backing_dev_name, bio->bi_status);
657 } else { 654 } else {
658 /* set to orig_bio->bi_status in bio_complete() */ 655 /* set to orig_bio->bi_status in bio_complete() */
659 s->iop.status = bio->bi_status; 656 s->iop.status = bio->bi_status;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index d90d9e59ca00..3dea06b41d43 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
936static void cached_dev_detach_finish(struct work_struct *w) 936static void cached_dev_detach_finish(struct work_struct *w)
937{ 937{
938 struct cached_dev *dc = container_of(w, struct cached_dev, detach); 938 struct cached_dev *dc = container_of(w, struct cached_dev, detach);
939 char buf[BDEVNAME_SIZE];
940 struct closure cl; 939 struct closure cl;
941 closure_init_stack(&cl); 940 closure_init_stack(&cl);
942 941
@@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
967 966
968 mutex_unlock(&bch_register_lock); 967 mutex_unlock(&bch_register_lock);
969 968
970 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); 969 pr_info("Caching disabled for %s", dc->backing_dev_name);
971 970
972 /* Drop ref we took in cached_dev_detach() */ 971 /* Drop ref we took in cached_dev_detach() */
973 closure_put(&dc->disk.cl); 972 closure_put(&dc->disk.cl);
@@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
999{ 998{
1000 uint32_t rtime = cpu_to_le32(get_seconds()); 999 uint32_t rtime = cpu_to_le32(get_seconds());
1001 struct uuid_entry *u; 1000 struct uuid_entry *u;
1002 char buf[BDEVNAME_SIZE];
1003 struct cached_dev *exist_dc, *t; 1001 struct cached_dev *exist_dc, *t;
1004 1002
1005 bdevname(dc->bdev, buf);
1006
1007 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || 1003 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
1008 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) 1004 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
1009 return -ENOENT; 1005 return -ENOENT;
1010 1006
1011 if (dc->disk.c) { 1007 if (dc->disk.c) {
1012 pr_err("Can't attach %s: already attached", buf); 1008 pr_err("Can't attach %s: already attached",
1009 dc->backing_dev_name);
1013 return -EINVAL; 1010 return -EINVAL;
1014 } 1011 }
1015 1012
1016 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { 1013 if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
1017 pr_err("Can't attach %s: shutting down", buf); 1014 pr_err("Can't attach %s: shutting down",
1015 dc->backing_dev_name);
1018 return -EINVAL; 1016 return -EINVAL;
1019 } 1017 }
1020 1018
1021 if (dc->sb.block_size < c->sb.block_size) { 1019 if (dc->sb.block_size < c->sb.block_size) {
1022 /* Will die */ 1020 /* Will die */
1023 pr_err("Couldn't attach %s: block size less than set's block size", 1021 pr_err("Couldn't attach %s: block size less than set's block size",
1024 buf); 1022 dc->backing_dev_name);
1025 return -EINVAL; 1023 return -EINVAL;
1026 } 1024 }
1027 1025
@@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1029 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { 1027 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
1030 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { 1028 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
1031 pr_err("Tried to attach %s but duplicate UUID already attached", 1029 pr_err("Tried to attach %s but duplicate UUID already attached",
1032 buf); 1030 dc->backing_dev_name);
1033 1031
1034 return -EINVAL; 1032 return -EINVAL;
1035 } 1033 }
@@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1047 1045
1048 if (!u) { 1046 if (!u) {
1049 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { 1047 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1050 pr_err("Couldn't find uuid for %s in set", buf); 1048 pr_err("Couldn't find uuid for %s in set",
1049 dc->backing_dev_name);
1051 return -ENOENT; 1050 return -ENOENT;
1052 } 1051 }
1053 1052
1054 u = uuid_find_empty(c); 1053 u = uuid_find_empty(c);
1055 if (!u) { 1054 if (!u) {
1056 pr_err("Not caching %s, no room for UUID", buf); 1055 pr_err("Not caching %s, no room for UUID",
1056 dc->backing_dev_name);
1057 return -EINVAL; 1057 return -EINVAL;
1058 } 1058 }
1059 } 1059 }
@@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1112 up_write(&dc->writeback_lock); 1112 up_write(&dc->writeback_lock);
1113 1113
1114 pr_info("Caching %s as %s on set %pU", 1114 pr_info("Caching %s as %s on set %pU",
1115 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, 1115 dc->backing_dev_name,
1116 dc->disk.disk->disk_name,
1116 dc->disk.c->sb.set_uuid); 1117 dc->disk.c->sb.set_uuid);
1117 return 0; 1118 return 0;
1118} 1119}
@@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1225 struct block_device *bdev, 1226 struct block_device *bdev,
1226 struct cached_dev *dc) 1227 struct cached_dev *dc)
1227{ 1228{
1228 char name[BDEVNAME_SIZE];
1229 const char *err = "cannot allocate memory"; 1229 const char *err = "cannot allocate memory";
1230 struct cache_set *c; 1230 struct cache_set *c;
1231 1231
1232 bdevname(bdev, dc->backing_dev_name);
1232 memcpy(&dc->sb, sb, sizeof(struct cache_sb)); 1233 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1233 dc->bdev = bdev; 1234 dc->bdev = bdev;
1234 dc->bdev->bd_holder = dc; 1235 dc->bdev->bd_holder = dc;
@@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1237 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; 1238 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
1238 get_page(sb_page); 1239 get_page(sb_page);
1239 1240
1241
1240 if (cached_dev_init(dc, sb->block_size << 9)) 1242 if (cached_dev_init(dc, sb->block_size << 9))
1241 goto err; 1243 goto err;
1242 1244
@@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1247 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) 1249 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1248 goto err; 1250 goto err;
1249 1251
1250 pr_info("registered backing device %s", bdevname(bdev, name)); 1252 pr_info("registered backing device %s", dc->backing_dev_name);
1251 1253
1252 list_add(&dc->list, &uncached_devices); 1254 list_add(&dc->list, &uncached_devices);
1253 list_for_each_entry(c, &bch_cache_sets, list) 1255 list_for_each_entry(c, &bch_cache_sets, list)
@@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1259 1261
1260 return; 1262 return;
1261err: 1263err:
1262 pr_notice("error %s: %s", bdevname(bdev, name), err); 1264 pr_notice("error %s: %s", dc->backing_dev_name, err);
1263 bcache_device_stop(&dc->disk); 1265 bcache_device_stop(&dc->disk);
1264} 1266}
1265 1267
@@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1367 1369
1368bool bch_cached_dev_error(struct cached_dev *dc) 1370bool bch_cached_dev_error(struct cached_dev *dc)
1369{ 1371{
1370 char name[BDEVNAME_SIZE]; 1372 struct cache_set *c;
1371 1373
1372 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) 1374 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1373 return false; 1375 return false;
@@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc)
1377 smp_mb(); 1379 smp_mb();
1378 1380
1379 pr_err("stop %s: too many IO errors on backing device %s\n", 1381 pr_err("stop %s: too many IO errors on backing device %s\n",
1380 dc->disk.disk->disk_name, bdevname(dc->bdev, name)); 1382 dc->disk.disk->disk_name, dc->backing_dev_name);
1383
1384 /*
1385 * If the cached device is still attached to a cache set,
1386 * even dc->io_disable is true and no more I/O requests
1387 * accepted, cache device internal I/O (writeback scan or
1388 * garbage collection) may still prevent bcache device from
1389 * being stopped. So here CACHE_SET_IO_DISABLE should be
1390 * set to c->flags too, to make the internal I/O to cache
1391 * device rejected and stopped immediately.
1392 * If c is NULL, that means the bcache device is not attached
1393 * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
1394 */
1395 c = dc->disk.c;
1396 if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1397 pr_info("CACHE_SET_IO_DISABLE already set");
1381 1398
1382 bcache_device_stop(&dc->disk); 1399 bcache_device_stop(&dc->disk);
1383 return true; 1400 return true;
@@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1395 return false; 1412 return false;
1396 1413
1397 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) 1414 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1398 pr_warn("CACHE_SET_IO_DISABLE already set"); 1415 pr_info("CACHE_SET_IO_DISABLE already set");
1399 1416
1400 /* XXX: we can be called from atomic context 1417 /* XXX: we can be called from atomic context
1401 acquire_console_sem(); 1418 acquire_console_sem();
@@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c,
1539 */ 1556 */
1540 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", 1557 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
1541 d->disk->disk_name); 1558 d->disk->disk_name);
1559 /*
1560 * There might be a small time gap that cache set is
1561 * released but bcache device is not. Inside this time
1562 * gap, regular I/O requests will directly go into
1563 * backing device as no cache set attached to. This
1564 * behavior may also introduce potential inconsistence
1565 * data in writeback mode while cache is dirty.
1566 * Therefore before calling bcache_device_stop() due
1567 * to a broken cache device, dc->io_disable should be
1568 * explicitly set to true.
1569 */
1570 dc->io_disable = true;
1571 /* make others know io_disable is true earlier */
1572 smp_mb();
1542 bcache_device_stop(d); 1573 bcache_device_stop(d);
1543 } else { 1574 } else {
1544 /* 1575 /*
@@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca)
2003static int register_cache(struct cache_sb *sb, struct page *sb_page, 2034static int register_cache(struct cache_sb *sb, struct page *sb_page,
2004 struct block_device *bdev, struct cache *ca) 2035 struct block_device *bdev, struct cache *ca)
2005{ 2036{
2006 char name[BDEVNAME_SIZE];
2007 const char *err = NULL; /* must be set for any error case */ 2037 const char *err = NULL; /* must be set for any error case */
2008 int ret = 0; 2038 int ret = 0;
2009 2039
2010 bdevname(bdev, name); 2040 bdevname(bdev, ca->cache_dev_name);
2011
2012 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 2041 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
2013 ca->bdev = bdev; 2042 ca->bdev = bdev;
2014 ca->bdev->bd_holder = ca; 2043 ca->bdev->bd_holder = ca;
@@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
2045 goto out; 2074 goto out;
2046 } 2075 }
2047 2076
2048 pr_info("registered cache device %s", name); 2077 pr_info("registered cache device %s", ca->cache_dev_name);
2049 2078
2050out: 2079out:
2051 kobject_put(&ca->kobj); 2080 kobject_put(&ca->kobj);
2052 2081
2053err: 2082err:
2054 if (err) 2083 if (err)
2055 pr_notice("error %s: %s", name, err); 2084 pr_notice("error %s: %s", ca->cache_dev_name, err);
2056 2085
2057 return ret; 2086 return ret;
2058} 2087}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 4a9547cdcdc5..ad45ebe1a74b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -244,8 +244,10 @@ static void dirty_endio(struct bio *bio)
244 struct keybuf_key *w = bio->bi_private; 244 struct keybuf_key *w = bio->bi_private;
245 struct dirty_io *io = w->private; 245 struct dirty_io *io = w->private;
246 246
247 if (bio->bi_status) 247 if (bio->bi_status) {
248 SET_KEY_DIRTY(&w->key, false); 248 SET_KEY_DIRTY(&w->key, false);
249 bch_count_backing_io_errors(io->dc, bio);
250 }
249 251
250 closure_put(&io->cl); 252 closure_put(&io->cl);
251} 253}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9df4f71e58ca..a3771c5729f5 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -764,6 +764,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
764 ret = PTR_ERR(meta); 764 ret = PTR_ERR(meta);
765 goto out_unmap; 765 goto out_unmap;
766 } 766 }
767 req->cmd_flags |= REQ_INTEGRITY;
767 } 768 }
768 } 769 }
769 770
@@ -2997,31 +2998,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2997 if (nvme_init_ns_head(ns, nsid, id)) 2998 if (nvme_init_ns_head(ns, nsid, id))
2998 goto out_free_id; 2999 goto out_free_id;
2999 nvme_setup_streams_ns(ctrl, ns); 3000 nvme_setup_streams_ns(ctrl, ns);
3000 3001 nvme_set_disk_name(disk_name, ns, ctrl, &flags);
3001#ifdef CONFIG_NVME_MULTIPATH
3002 /*
3003 * If multipathing is enabled we need to always use the subsystem
3004 * instance number for numbering our devices to avoid conflicts
3005 * between subsystems that have multiple controllers and thus use
3006 * the multipath-aware subsystem node and those that have a single
3007 * controller and use the controller node directly.
3008 */
3009 if (ns->head->disk) {
3010 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
3011 ctrl->cntlid, ns->head->instance);
3012 flags = GENHD_FL_HIDDEN;
3013 } else {
3014 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
3015 ns->head->instance);
3016 }
3017#else
3018 /*
3019 * But without the multipath code enabled, multiple controller per
3020 * subsystems are visible as devices and thus we cannot use the
3021 * subsystem instance.
3022 */
3023 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
3024#endif
3025 3002
3026 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 3003 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
3027 if (nvme_nvm_register(ns, disk_name, node)) { 3004 if (nvme_nvm_register(ns, disk_name, node)) {
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 124c458806df..7ae732a77fe8 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -668,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
668 ret = -ENOMEM; 668 ret = -ENOMEM;
669 goto out; 669 goto out;
670 } 670 }
671 kfree(opts->transport);
671 opts->transport = p; 672 opts->transport = p;
672 break; 673 break;
673 case NVMF_OPT_NQN: 674 case NVMF_OPT_NQN:
@@ -676,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
676 ret = -ENOMEM; 677 ret = -ENOMEM;
677 goto out; 678 goto out;
678 } 679 }
680 kfree(opts->subsysnqn);
679 opts->subsysnqn = p; 681 opts->subsysnqn = p;
680 nqnlen = strlen(opts->subsysnqn); 682 nqnlen = strlen(opts->subsysnqn);
681 if (nqnlen >= NVMF_NQN_SIZE) { 683 if (nqnlen >= NVMF_NQN_SIZE) {
@@ -698,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
698 ret = -ENOMEM; 700 ret = -ENOMEM;
699 goto out; 701 goto out;
700 } 702 }
703 kfree(opts->traddr);
701 opts->traddr = p; 704 opts->traddr = p;
702 break; 705 break;
703 case NVMF_OPT_TRSVCID: 706 case NVMF_OPT_TRSVCID:
@@ -706,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
706 ret = -ENOMEM; 709 ret = -ENOMEM;
707 goto out; 710 goto out;
708 } 711 }
712 kfree(opts->trsvcid);
709 opts->trsvcid = p; 713 opts->trsvcid = p;
710 break; 714 break;
711 case NVMF_OPT_QUEUE_SIZE: 715 case NVMF_OPT_QUEUE_SIZE:
@@ -792,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
792 ret = -EINVAL; 796 ret = -EINVAL;
793 goto out; 797 goto out;
794 } 798 }
799 nvmf_host_put(opts->host);
795 opts->host = nvmf_host_add(p); 800 opts->host = nvmf_host_add(p);
796 kfree(p); 801 kfree(p);
797 if (!opts->host) { 802 if (!opts->host) {
@@ -817,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
817 ret = -ENOMEM; 822 ret = -ENOMEM;
818 goto out; 823 goto out;
819 } 824 }
825 kfree(opts->host_traddr);
820 opts->host_traddr = p; 826 opts->host_traddr = p;
821 break; 827 break;
822 case NVMF_OPT_HOST_ID: 828 case NVMF_OPT_HOST_ID:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 956e0b8e9c4d..d7b664ae5923 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -15,10 +15,32 @@
15#include "nvme.h" 15#include "nvme.h"
16 16
17static bool multipath = true; 17static bool multipath = true;
18module_param(multipath, bool, 0644); 18module_param(multipath, bool, 0444);
19MODULE_PARM_DESC(multipath, 19MODULE_PARM_DESC(multipath,
20 "turn on native support for multiple controllers per subsystem"); 20 "turn on native support for multiple controllers per subsystem");
21 21
22/*
23 * If multipathing is enabled we need to always use the subsystem instance
24 * number for numbering our devices to avoid conflicts between subsystems that
25 * have multiple controllers and thus use the multipath-aware subsystem node
26 * and those that have a single controller and use the controller node
27 * directly.
28 */
29void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
30 struct nvme_ctrl *ctrl, int *flags)
31{
32 if (!multipath) {
33 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
34 } else if (ns->head->disk) {
35 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
36 ctrl->cntlid, ns->head->instance);
37 *flags = GENHD_FL_HIDDEN;
38 } else {
39 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
40 ns->head->instance);
41 }
42}
43
22void nvme_failover_req(struct request *req) 44void nvme_failover_req(struct request *req)
23{ 45{
24 struct nvme_ns *ns = req->q->queuedata; 46 struct nvme_ns *ns = req->q->queuedata;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 061fecfd44f5..7ded7a51c430 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -436,6 +436,8 @@ extern const struct attribute_group nvme_ns_id_attr_group;
436extern const struct block_device_operations nvme_ns_head_ops; 436extern const struct block_device_operations nvme_ns_head_ops;
437 437
438#ifdef CONFIG_NVME_MULTIPATH 438#ifdef CONFIG_NVME_MULTIPATH
439void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
440 struct nvme_ctrl *ctrl, int *flags);
439void nvme_failover_req(struct request *req); 441void nvme_failover_req(struct request *req);
440bool nvme_req_needs_failover(struct request *req, blk_status_t error); 442bool nvme_req_needs_failover(struct request *req, blk_status_t error);
441void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 443void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
@@ -461,6 +463,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
461} 463}
462 464
463#else 465#else
466/*
467 * Without the multipath code enabled, multiple controller per subsystems are
468 * visible as devices and thus we cannot use the subsystem instance.
469 */
470static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
471 struct nvme_ctrl *ctrl, int *flags)
472{
473 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
474}
475
464static inline void nvme_failover_req(struct request *req) 476static inline void nvme_failover_req(struct request *req)
465{ 477{
466} 478}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 31fdfba556a8..27a8561c0cb9 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -469,6 +469,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
469 nvme_stop_ctrl(&ctrl->ctrl); 469 nvme_stop_ctrl(&ctrl->ctrl);
470 nvme_loop_shutdown_ctrl(ctrl); 470 nvme_loop_shutdown_ctrl(ctrl);
471 471
472 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
473 /* state change failure should never happen */
474 WARN_ON_ONCE(1);
475 return;
476 }
477
472 ret = nvme_loop_configure_admin_queue(ctrl); 478 ret = nvme_loop_configure_admin_queue(ctrl);
473 if (ret) 479 if (ret)
474 goto out_disable; 480 goto out_disable;