diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-03-10 11:48:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-03-10 11:48:01 -0500 |
commit | 91a262096ee8fa0262b65f5aad32c4c17f088da7 (patch) | |
tree | 3b5a600dbf16d2191e983f9a00dce43bc22e8975 /drivers | |
parent | b3b25b1d9e104352b8272488ab94145fe84c4261 (diff) | |
parent | 1d037577c323e5090ce281e96bc313ab2eee5be2 (diff) |
Merge tag 'for-linus-20180309' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
- a xen-blkfront fix from Bhavesh with a multiqueue fix when
detaching/re-attaching
- a few important NVMe fixes, including a revert for a sysfs fix that
caused some user space confusion
- two bcache fixes by way of Michael Lyle
- a loop regression fix, fixing an issue with lost writes on DAX.
* tag 'for-linus-20180309' of git://git.kernel.dk/linux-block:
loop: Fix lost writes caused by missing flag
nvme_fc: rework sqsize handling
nvme-fabrics: Ignore nr_io_queues option for discovery controllers
xen-blkfront: move negotiate_mq to cover all cases of new VBDs
Revert "nvme: create 'slaves' and 'holders' entries for hidden controllers"
bcache: don't attach backing with duplicate UUID
bcache: fix crashes in duplicate cache device register
nvme: pci: pass max vectors as num_possible_cpus() to pci_alloc_irq_vectors
nvme-pci: Fix EEH failure on ppc
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/loop.c | 2 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 17 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 27 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.c | 5 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 27 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 30 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 8 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 15 |
9 files changed, 60 insertions, 73 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 87855b5123a6..ee62d2d517bf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) | |||
266 | struct iov_iter i; | 266 | struct iov_iter i; |
267 | ssize_t bw; | 267 | ssize_t bw; |
268 | 268 | ||
269 | iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); | 269 | iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); |
270 | 270 | ||
271 | file_start_write(file); | 271 | file_start_write(file); |
272 | bw = vfs_iter_write(file, &i, ppos, 0); | 272 | bw = vfs_iter_write(file, &i, ppos, 0); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e126e4cac2ca..92ec1bbece51 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock); | |||
262 | 262 | ||
263 | static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); | 263 | static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); |
264 | static void blkfront_gather_backend_features(struct blkfront_info *info); | 264 | static void blkfront_gather_backend_features(struct blkfront_info *info); |
265 | static int negotiate_mq(struct blkfront_info *info); | ||
265 | 266 | ||
266 | static int get_id_from_freelist(struct blkfront_ring_info *rinfo) | 267 | static int get_id_from_freelist(struct blkfront_ring_info *rinfo) |
267 | { | 268 | { |
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev, | |||
1774 | unsigned int i, max_page_order; | 1775 | unsigned int i, max_page_order; |
1775 | unsigned int ring_page_order; | 1776 | unsigned int ring_page_order; |
1776 | 1777 | ||
1778 | if (!info) | ||
1779 | return -ENODEV; | ||
1780 | |||
1777 | max_page_order = xenbus_read_unsigned(info->xbdev->otherend, | 1781 | max_page_order = xenbus_read_unsigned(info->xbdev->otherend, |
1778 | "max-ring-page-order", 0); | 1782 | "max-ring-page-order", 0); |
1779 | ring_page_order = min(xen_blkif_max_ring_order, max_page_order); | 1783 | ring_page_order = min(xen_blkif_max_ring_order, max_page_order); |
1780 | info->nr_ring_pages = 1 << ring_page_order; | 1784 | info->nr_ring_pages = 1 << ring_page_order; |
1781 | 1785 | ||
1786 | err = negotiate_mq(info); | ||
1787 | if (err) | ||
1788 | goto destroy_blkring; | ||
1789 | |||
1782 | for (i = 0; i < info->nr_rings; i++) { | 1790 | for (i = 0; i < info->nr_rings; i++) { |
1783 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | 1791 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; |
1784 | 1792 | ||
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1978 | } | 1986 | } |
1979 | 1987 | ||
1980 | info->xbdev = dev; | 1988 | info->xbdev = dev; |
1981 | err = negotiate_mq(info); | ||
1982 | if (err) { | ||
1983 | kfree(info); | ||
1984 | return err; | ||
1985 | } | ||
1986 | 1989 | ||
1987 | mutex_init(&info->mutex); | 1990 | mutex_init(&info->mutex); |
1988 | info->vdevice = vdevice; | 1991 | info->vdevice = vdevice; |
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
2099 | 2102 | ||
2100 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | 2103 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); |
2101 | 2104 | ||
2102 | err = negotiate_mq(info); | ||
2103 | if (err) | ||
2104 | return err; | ||
2105 | |||
2106 | err = talk_to_blkback(dev, info); | 2105 | err = talk_to_blkback(dev, info); |
2107 | if (!err) | 2106 | if (!err) |
2108 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); | 2107 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4d1d8dfb2d2a..f2273143b3cb 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, | |||
963 | uint32_t rtime = cpu_to_le32(get_seconds()); | 963 | uint32_t rtime = cpu_to_le32(get_seconds()); |
964 | struct uuid_entry *u; | 964 | struct uuid_entry *u; |
965 | char buf[BDEVNAME_SIZE]; | 965 | char buf[BDEVNAME_SIZE]; |
966 | struct cached_dev *exist_dc, *t; | ||
966 | 967 | ||
967 | bdevname(dc->bdev, buf); | 968 | bdevname(dc->bdev, buf); |
968 | 969 | ||
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, | |||
987 | return -EINVAL; | 988 | return -EINVAL; |
988 | } | 989 | } |
989 | 990 | ||
991 | /* Check whether already attached */ | ||
992 | list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { | ||
993 | if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { | ||
994 | pr_err("Tried to attach %s but duplicate UUID already attached", | ||
995 | buf); | ||
996 | |||
997 | return -EINVAL; | ||
998 | } | ||
999 | } | ||
1000 | |||
990 | u = uuid_find(c, dc->sb.uuid); | 1001 | u = uuid_find(c, dc->sb.uuid); |
991 | 1002 | ||
992 | if (u && | 1003 | if (u && |
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, | |||
1204 | 1215 | ||
1205 | return; | 1216 | return; |
1206 | err: | 1217 | err: |
1207 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | 1218 | pr_notice("error %s: %s", bdevname(bdev, name), err); |
1208 | bcache_device_stop(&dc->disk); | 1219 | bcache_device_stop(&dc->disk); |
1209 | } | 1220 | } |
1210 | 1221 | ||
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1883 | const char *err = NULL; /* must be set for any error case */ | 1894 | const char *err = NULL; /* must be set for any error case */ |
1884 | int ret = 0; | 1895 | int ret = 0; |
1885 | 1896 | ||
1897 | bdevname(bdev, name); | ||
1898 | |||
1886 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); | 1899 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
1887 | ca->bdev = bdev; | 1900 | ca->bdev = bdev; |
1888 | ca->bdev->bd_holder = ca; | 1901 | ca->bdev->bd_holder = ca; |
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1891 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; | 1904 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; |
1892 | get_page(sb_page); | 1905 | get_page(sb_page); |
1893 | 1906 | ||
1894 | if (blk_queue_discard(bdev_get_queue(ca->bdev))) | 1907 | if (blk_queue_discard(bdev_get_queue(bdev))) |
1895 | ca->discard = CACHE_DISCARD(&ca->sb); | 1908 | ca->discard = CACHE_DISCARD(&ca->sb); |
1896 | 1909 | ||
1897 | ret = cache_alloc(ca); | 1910 | ret = cache_alloc(ca); |
1898 | if (ret != 0) { | 1911 | if (ret != 0) { |
1912 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | ||
1899 | if (ret == -ENOMEM) | 1913 | if (ret == -ENOMEM) |
1900 | err = "cache_alloc(): -ENOMEM"; | 1914 | err = "cache_alloc(): -ENOMEM"; |
1901 | else | 1915 | else |
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1918 | goto out; | 1932 | goto out; |
1919 | } | 1933 | } |
1920 | 1934 | ||
1921 | pr_info("registered cache device %s", bdevname(bdev, name)); | 1935 | pr_info("registered cache device %s", name); |
1922 | 1936 | ||
1923 | out: | 1937 | out: |
1924 | kobject_put(&ca->kobj); | 1938 | kobject_put(&ca->kobj); |
1925 | 1939 | ||
1926 | err: | 1940 | err: |
1927 | if (err) | 1941 | if (err) |
1928 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | 1942 | pr_notice("error %s: %s", name, err); |
1929 | 1943 | ||
1930 | return ret; | 1944 | return ret; |
1931 | } | 1945 | } |
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
2014 | if (err) | 2028 | if (err) |
2015 | goto err_close; | 2029 | goto err_close; |
2016 | 2030 | ||
2031 | err = "failed to register device"; | ||
2017 | if (SB_IS_BDEV(sb)) { | 2032 | if (SB_IS_BDEV(sb)) { |
2018 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); | 2033 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
2019 | if (!dc) | 2034 | if (!dc) |
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
2028 | goto err_close; | 2043 | goto err_close; |
2029 | 2044 | ||
2030 | if (register_cache(sb, sb_page, bdev, ca) != 0) | 2045 | if (register_cache(sb, sb_page, bdev, ca) != 0) |
2031 | goto err_close; | 2046 | goto err; |
2032 | } | 2047 | } |
2033 | out: | 2048 | out: |
2034 | if (sb_page) | 2049 | if (sb_page) |
@@ -2041,7 +2056,7 @@ out: | |||
2041 | err_close: | 2056 | err_close: |
2042 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | 2057 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
2043 | err: | 2058 | err: |
2044 | pr_info("error opening %s: %s", path, err); | 2059 | pr_info("error %s: %s", path, err); |
2045 | ret = -EINVAL; | 2060 | ret = -EINVAL; |
2046 | goto out; | 2061 | goto out; |
2047 | } | 2062 | } |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 817e5e2766da..7aeca5db7916 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -3033,7 +3033,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
3033 | ns->disk->disk_name); | 3033 | ns->disk->disk_name); |
3034 | 3034 | ||
3035 | nvme_mpath_add_disk(ns->head); | 3035 | nvme_mpath_add_disk(ns->head); |
3036 | nvme_mpath_add_disk_links(ns); | ||
3037 | return; | 3036 | return; |
3038 | out_unlink_ns: | 3037 | out_unlink_ns: |
3039 | mutex_lock(&ctrl->subsys->lock); | 3038 | mutex_lock(&ctrl->subsys->lock); |
@@ -3053,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) | |||
3053 | return; | 3052 | return; |
3054 | 3053 | ||
3055 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { | 3054 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { |
3056 | nvme_mpath_remove_disk_links(ns); | ||
3057 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, | 3055 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, |
3058 | &nvme_ns_id_attr_group); | 3056 | &nvme_ns_id_attr_group); |
3059 | if (ns->ndev) | 3057 | if (ns->ndev) |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index a1c58e35075e..8f0f34d06d46 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, | |||
650 | ret = -EINVAL; | 650 | ret = -EINVAL; |
651 | goto out; | 651 | goto out; |
652 | } | 652 | } |
653 | if (opts->discovery_nqn) { | ||
654 | pr_debug("Ignoring nr_io_queues value for discovery controller\n"); | ||
655 | break; | ||
656 | } | ||
657 | |||
653 | opts->nr_io_queues = min_t(unsigned int, | 658 | opts->nr_io_queues = min_t(unsigned int, |
654 | num_online_cpus(), token); | 659 | num_online_cpus(), token); |
655 | break; | 660 | break; |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 7f51f8414b97..1dc1387b7134 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, | |||
1206 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | 1206 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); |
1207 | 1207 | ||
1208 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | 1208 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); |
1209 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); | 1209 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); |
1210 | /* Linux supports only Dynamic controllers */ | 1210 | /* Linux supports only Dynamic controllers */ |
1211 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); | 1211 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); |
1212 | uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); | 1212 | uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); |
@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
1321 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | 1321 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); |
1322 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | 1322 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); |
1323 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); | 1323 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); |
1324 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); | 1324 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); |
1325 | 1325 | ||
1326 | lsop->queue = queue; | 1326 | lsop->queue = queue; |
1327 | lsreq->rqstaddr = conn_rqst; | 1327 | lsreq->rqstaddr = conn_rqst; |
@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) | |||
2481 | goto out_free_tag_set; | 2481 | goto out_free_tag_set; |
2482 | } | 2482 | } |
2483 | 2483 | ||
2484 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2484 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2485 | if (ret) | 2485 | if (ret) |
2486 | goto out_cleanup_blk_queue; | 2486 | goto out_cleanup_blk_queue; |
2487 | 2487 | ||
2488 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2488 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2489 | if (ret) | 2489 | if (ret) |
2490 | goto out_delete_hw_queues; | 2490 | goto out_delete_hw_queues; |
2491 | 2491 | ||
@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) | |||
2532 | if (ret) | 2532 | if (ret) |
2533 | goto out_free_io_queues; | 2533 | goto out_free_io_queues; |
2534 | 2534 | ||
2535 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2535 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2536 | if (ret) | 2536 | if (ret) |
2537 | goto out_free_io_queues; | 2537 | goto out_free_io_queues; |
2538 | 2538 | ||
2539 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2539 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2540 | if (ret) | 2540 | if (ret) |
2541 | goto out_delete_hw_queues; | 2541 | goto out_delete_hw_queues; |
2542 | 2542 | ||
@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2632 | nvme_fc_init_queue(ctrl, 0); | 2632 | nvme_fc_init_queue(ctrl, 0); |
2633 | 2633 | ||
2634 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, | 2634 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, |
2635 | NVME_AQ_BLK_MQ_DEPTH); | 2635 | NVME_AQ_DEPTH); |
2636 | if (ret) | 2636 | if (ret) |
2637 | goto out_free_queue; | 2637 | goto out_free_queue; |
2638 | 2638 | ||
2639 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], | 2639 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], |
2640 | NVME_AQ_BLK_MQ_DEPTH, | 2640 | NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); |
2641 | (NVME_AQ_BLK_MQ_DEPTH / 4)); | ||
2642 | if (ret) | 2641 | if (ret) |
2643 | goto out_delete_hw_queue; | 2642 | goto out_delete_hw_queue; |
2644 | 2643 | ||
@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2666 | } | 2665 | } |
2667 | 2666 | ||
2668 | ctrl->ctrl.sqsize = | 2667 | ctrl->ctrl.sqsize = |
2669 | min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); | 2668 | min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); |
2670 | 2669 | ||
2671 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); | 2670 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); |
2672 | if (ret) | 2671 | if (ret) |
@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2699 | opts->queue_size = ctrl->ctrl.maxcmd; | 2698 | opts->queue_size = ctrl->ctrl.maxcmd; |
2700 | } | 2699 | } |
2701 | 2700 | ||
2701 | if (opts->queue_size > ctrl->ctrl.sqsize + 1) { | ||
2702 | /* warn if sqsize is lower than queue_size */ | ||
2703 | dev_warn(ctrl->ctrl.device, | ||
2704 | "queue_size %zu > ctrl sqsize %u, clamping down\n", | ||
2705 | opts->queue_size, ctrl->ctrl.sqsize + 1); | ||
2706 | opts->queue_size = ctrl->ctrl.sqsize + 1; | ||
2707 | } | ||
2708 | |||
2702 | ret = nvme_fc_init_aen_ops(ctrl); | 2709 | ret = nvme_fc_init_aen_ops(ctrl); |
2703 | if (ret) | 2710 | if (ret) |
2704 | goto out_term_aen_ops; | 2711 | goto out_term_aen_ops; |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index b7e5c6db4d92..060f69e03427 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
@@ -210,25 +210,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head) | |||
210 | mutex_unlock(&head->subsys->lock); | 210 | mutex_unlock(&head->subsys->lock); |
211 | } | 211 | } |
212 | 212 | ||
213 | void nvme_mpath_add_disk_links(struct nvme_ns *ns) | ||
214 | { | ||
215 | struct kobject *slave_disk_kobj, *holder_disk_kobj; | ||
216 | |||
217 | if (!ns->head->disk) | ||
218 | return; | ||
219 | |||
220 | slave_disk_kobj = &disk_to_dev(ns->disk)->kobj; | ||
221 | if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj, | ||
222 | kobject_name(slave_disk_kobj))) | ||
223 | return; | ||
224 | |||
225 | holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj; | ||
226 | if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj, | ||
227 | kobject_name(holder_disk_kobj))) | ||
228 | sysfs_remove_link(ns->head->disk->slave_dir, | ||
229 | kobject_name(slave_disk_kobj)); | ||
230 | } | ||
231 | |||
232 | void nvme_mpath_remove_disk(struct nvme_ns_head *head) | 213 | void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
233 | { | 214 | { |
234 | if (!head->disk) | 215 | if (!head->disk) |
@@ -243,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) | |||
243 | blk_cleanup_queue(head->disk->queue); | 224 | blk_cleanup_queue(head->disk->queue); |
244 | put_disk(head->disk); | 225 | put_disk(head->disk); |
245 | } | 226 | } |
246 | |||
247 | void nvme_mpath_remove_disk_links(struct nvme_ns *ns) | ||
248 | { | ||
249 | if (!ns->head->disk) | ||
250 | return; | ||
251 | |||
252 | sysfs_remove_link(ns->disk->part0.holder_dir, | ||
253 | kobject_name(&disk_to_dev(ns->head->disk)->kobj)); | ||
254 | sysfs_remove_link(ns->head->disk->slave_dir, | ||
255 | kobject_name(&disk_to_dev(ns->disk)->kobj)); | ||
256 | } | ||
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 0521e4707d1c..d733b14ede9d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error); | |||
410 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); | 410 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); |
411 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); | 411 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); |
412 | void nvme_mpath_add_disk(struct nvme_ns_head *head); | 412 | void nvme_mpath_add_disk(struct nvme_ns_head *head); |
413 | void nvme_mpath_add_disk_links(struct nvme_ns *ns); | ||
414 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); | 413 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); |
415 | void nvme_mpath_remove_disk_links(struct nvme_ns *ns); | ||
416 | 414 | ||
417 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) | 415 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) |
418 | { | 416 | { |
@@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head) | |||
454 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) | 452 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
455 | { | 453 | { |
456 | } | 454 | } |
457 | static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns) | ||
458 | { | ||
459 | } | ||
460 | static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns) | ||
461 | { | ||
462 | } | ||
463 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) | 455 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) |
464 | { | 456 | { |
465 | } | 457 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5933a5c732e8..b6f43b738f03 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) | |||
1153 | if (!(csts & NVME_CSTS_CFS) && !nssro) | 1153 | if (!(csts & NVME_CSTS_CFS) && !nssro) |
1154 | return false; | 1154 | return false; |
1155 | 1155 | ||
1156 | /* If PCI error recovery process is happening, we cannot reset or | ||
1157 | * the recovery mechanism will surely fail. | ||
1158 | */ | ||
1159 | if (pci_channel_offline(to_pci_dev(dev->dev))) | ||
1160 | return false; | ||
1161 | |||
1162 | return true; | 1156 | return true; |
1163 | } | 1157 | } |
1164 | 1158 | ||
@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) | |||
1189 | struct nvme_command cmd; | 1183 | struct nvme_command cmd; |
1190 | u32 csts = readl(dev->bar + NVME_REG_CSTS); | 1184 | u32 csts = readl(dev->bar + NVME_REG_CSTS); |
1191 | 1185 | ||
1186 | /* If PCI error recovery process is happening, we cannot reset or | ||
1187 | * the recovery mechanism will surely fail. | ||
1188 | */ | ||
1189 | mb(); | ||
1190 | if (pci_channel_offline(to_pci_dev(dev->dev))) | ||
1191 | return BLK_EH_RESET_TIMER; | ||
1192 | |||
1192 | /* | 1193 | /* |
1193 | * Reset immediately if the controller is failed | 1194 | * Reset immediately if the controller is failed |
1194 | */ | 1195 | */ |
@@ -1913,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1913 | int result, nr_io_queues; | 1914 | int result, nr_io_queues; |
1914 | unsigned long size; | 1915 | unsigned long size; |
1915 | 1916 | ||
1916 | nr_io_queues = num_present_cpus(); | 1917 | nr_io_queues = num_possible_cpus(); |
1917 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); | 1918 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); |
1918 | if (result < 0) | 1919 | if (result < 0) |
1919 | return result; | 1920 | return result; |