diff options
author | Sagi Grimberg <sagi@grimberg.me> | 2018-07-09 05:49:06 -0400 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-07-24 09:55:49 -0400 |
commit | c66e2998c8ca4d5da85d4915612dca29e054ad21 (patch) | |
tree | 951eb6ca331becae77f5401ba576ad881d11da4c /drivers/nvme | |
parent | 90140624e8face94207003ac9a9d2a329b309d68 (diff) |
nvme-rdma: centralize controller setup sequence
Centralize controller sequence to a single routine that correctly cleans
up after failures instead of having multiple apperances in several flows
(create, reset, reconnect).
One thing that we also gain here are the sanity/boundary checks also
when connecting back to a dynamic controller.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/rdma.c | 130 |
1 files changed, 53 insertions, 77 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2b683b8d4763..c22125c5661b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -917,24 +917,44 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) | |||
917 | } | 917 | } |
918 | } | 918 | } |
919 | 919 | ||
920 | static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | 920 | static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) |
921 | { | 921 | { |
922 | struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), | 922 | int ret = -EINVAL; |
923 | struct nvme_rdma_ctrl, reconnect_work); | ||
924 | bool changed; | 923 | bool changed; |
925 | int ret; | ||
926 | |||
927 | ++ctrl->ctrl.nr_reconnects; | ||
928 | 924 | ||
929 | ret = nvme_rdma_configure_admin_queue(ctrl, false); | 925 | ret = nvme_rdma_configure_admin_queue(ctrl, new); |
930 | if (ret) | 926 | if (ret) |
931 | goto requeue; | 927 | return ret; |
928 | |||
929 | if (ctrl->ctrl.icdoff) { | ||
930 | dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); | ||
931 | goto destroy_admin; | ||
932 | } | ||
933 | |||
934 | if (!(ctrl->ctrl.sgls & (1 << 2))) { | ||
935 | dev_err(ctrl->ctrl.device, | ||
936 | "Mandatory keyed sgls are not supported!\n"); | ||
937 | goto destroy_admin; | ||
938 | } | ||
939 | |||
940 | if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { | ||
941 | dev_warn(ctrl->ctrl.device, | ||
942 | "queue_size %zu > ctrl sqsize %u, clamping down\n", | ||
943 | ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); | ||
944 | } | ||
945 | |||
946 | if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { | ||
947 | dev_warn(ctrl->ctrl.device, | ||
948 | "sqsize %u > ctrl maxcmd %u, clamping down\n", | ||
949 | ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); | ||
950 | ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; | ||
951 | } | ||
932 | 952 | ||
933 | if (ctrl->ctrl.sgls & (1 << 20)) | 953 | if (ctrl->ctrl.sgls & (1 << 20)) |
934 | ctrl->use_inline_data = true; | 954 | ctrl->use_inline_data = true; |
935 | 955 | ||
936 | if (ctrl->ctrl.queue_count > 1) { | 956 | if (ctrl->ctrl.queue_count > 1) { |
937 | ret = nvme_rdma_configure_io_queues(ctrl, false); | 957 | ret = nvme_rdma_configure_io_queues(ctrl, new); |
938 | if (ret) | 958 | if (ret) |
939 | goto destroy_admin; | 959 | goto destroy_admin; |
940 | } | 960 | } |
@@ -943,10 +963,31 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
943 | if (!changed) { | 963 | if (!changed) { |
944 | /* state change failure is ok if we're in DELETING state */ | 964 | /* state change failure is ok if we're in DELETING state */ |
945 | WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); | 965 | WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); |
946 | return; | 966 | ret = -EINVAL; |
967 | goto destroy_io; | ||
947 | } | 968 | } |
948 | 969 | ||
949 | nvme_start_ctrl(&ctrl->ctrl); | 970 | nvme_start_ctrl(&ctrl->ctrl); |
971 | return 0; | ||
972 | |||
973 | destroy_io: | ||
974 | if (ctrl->ctrl.queue_count > 1) | ||
975 | nvme_rdma_destroy_io_queues(ctrl, new); | ||
976 | destroy_admin: | ||
977 | nvme_rdma_stop_queue(&ctrl->queues[0]); | ||
978 | nvme_rdma_destroy_admin_queue(ctrl, new); | ||
979 | return ret; | ||
980 | } | ||
981 | |||
982 | static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | ||
983 | { | ||
984 | struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), | ||
985 | struct nvme_rdma_ctrl, reconnect_work); | ||
986 | |||
987 | ++ctrl->ctrl.nr_reconnects; | ||
988 | |||
989 | if (nvme_rdma_setup_ctrl(ctrl, false)) | ||
990 | goto requeue; | ||
950 | 991 | ||
951 | dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", | 992 | dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", |
952 | ctrl->ctrl.nr_reconnects); | 993 | ctrl->ctrl.nr_reconnects); |
@@ -955,9 +996,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
955 | 996 | ||
956 | return; | 997 | return; |
957 | 998 | ||
958 | destroy_admin: | ||
959 | nvme_rdma_stop_queue(&ctrl->queues[0]); | ||
960 | nvme_rdma_destroy_admin_queue(ctrl, false); | ||
961 | requeue: | 999 | requeue: |
962 | dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", | 1000 | dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", |
963 | ctrl->ctrl.nr_reconnects); | 1001 | ctrl->ctrl.nr_reconnects); |
@@ -1786,8 +1824,6 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) | |||
1786 | { | 1824 | { |
1787 | struct nvme_rdma_ctrl *ctrl = | 1825 | struct nvme_rdma_ctrl *ctrl = |
1788 | container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); | 1826 | container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); |
1789 | int ret; | ||
1790 | bool changed; | ||
1791 | 1827 | ||
1792 | nvme_stop_ctrl(&ctrl->ctrl); | 1828 | nvme_stop_ctrl(&ctrl->ctrl); |
1793 | nvme_rdma_shutdown_ctrl(ctrl, false); | 1829 | nvme_rdma_shutdown_ctrl(ctrl, false); |
@@ -1798,25 +1834,9 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) | |||
1798 | return; | 1834 | return; |
1799 | } | 1835 | } |
1800 | 1836 | ||
1801 | ret = nvme_rdma_configure_admin_queue(ctrl, false); | 1837 | if (nvme_rdma_setup_ctrl(ctrl, false)) |
1802 | if (ret) | ||
1803 | goto out_fail; | 1838 | goto out_fail; |
1804 | 1839 | ||
1805 | if (ctrl->ctrl.queue_count > 1) { | ||
1806 | ret = nvme_rdma_configure_io_queues(ctrl, false); | ||
1807 | if (ret) | ||
1808 | goto out_fail; | ||
1809 | } | ||
1810 | |||
1811 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | ||
1812 | if (!changed) { | ||
1813 | /* state change failure is ok if we're in DELETING state */ | ||
1814 | WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); | ||
1815 | return; | ||
1816 | } | ||
1817 | |||
1818 | nvme_start_ctrl(&ctrl->ctrl); | ||
1819 | |||
1820 | return; | 1840 | return; |
1821 | 1841 | ||
1822 | out_fail: | 1842 | out_fail: |
@@ -1979,49 +1999,10 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
1979 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); | 1999 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); |
1980 | WARN_ON_ONCE(!changed); | 2000 | WARN_ON_ONCE(!changed); |
1981 | 2001 | ||
1982 | ret = nvme_rdma_configure_admin_queue(ctrl, true); | 2002 | ret = nvme_rdma_setup_ctrl(ctrl, true); |
1983 | if (ret) | 2003 | if (ret) |
1984 | goto out_uninit_ctrl; | 2004 | goto out_uninit_ctrl; |
1985 | 2005 | ||
1986 | /* sanity check icdoff */ | ||
1987 | if (ctrl->ctrl.icdoff) { | ||
1988 | dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); | ||
1989 | ret = -EINVAL; | ||
1990 | goto out_remove_admin_queue; | ||
1991 | } | ||
1992 | |||
1993 | /* sanity check keyed sgls */ | ||
1994 | if (!(ctrl->ctrl.sgls & (1 << 2))) { | ||
1995 | dev_err(ctrl->ctrl.device, | ||
1996 | "Mandatory keyed sgls are not supported!\n"); | ||
1997 | ret = -EINVAL; | ||
1998 | goto out_remove_admin_queue; | ||
1999 | } | ||
2000 | |||
2001 | /* only warn if argument is too large here, will clamp later */ | ||
2002 | if (opts->queue_size > ctrl->ctrl.sqsize + 1) { | ||
2003 | dev_warn(ctrl->ctrl.device, | ||
2004 | "queue_size %zu > ctrl sqsize %u, clamping down\n", | ||
2005 | opts->queue_size, ctrl->ctrl.sqsize + 1); | ||
2006 | } | ||
2007 | |||
2008 | /* warn if maxcmd is lower than sqsize+1 */ | ||
2009 | if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { | ||
2010 | dev_warn(ctrl->ctrl.device, | ||
2011 | "sqsize %u > ctrl maxcmd %u, clamping down\n", | ||
2012 | ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); | ||
2013 | ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; | ||
2014 | } | ||
2015 | |||
2016 | if (opts->nr_io_queues) { | ||
2017 | ret = nvme_rdma_configure_io_queues(ctrl, true); | ||
2018 | if (ret) | ||
2019 | goto out_remove_admin_queue; | ||
2020 | } | ||
2021 | |||
2022 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | ||
2023 | WARN_ON_ONCE(!changed); | ||
2024 | |||
2025 | dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", | 2006 | dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", |
2026 | ctrl->ctrl.opts->subsysnqn, &ctrl->addr); | 2007 | ctrl->ctrl.opts->subsysnqn, &ctrl->addr); |
2027 | 2008 | ||
@@ -2031,13 +2012,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
2031 | list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); | 2012 | list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); |
2032 | mutex_unlock(&nvme_rdma_ctrl_mutex); | 2013 | mutex_unlock(&nvme_rdma_ctrl_mutex); |
2033 | 2014 | ||
2034 | nvme_start_ctrl(&ctrl->ctrl); | ||
2035 | |||
2036 | return &ctrl->ctrl; | 2015 | return &ctrl->ctrl; |
2037 | 2016 | ||
2038 | out_remove_admin_queue: | ||
2039 | nvme_rdma_stop_queue(&ctrl->queues[0]); | ||
2040 | nvme_rdma_destroy_admin_queue(ctrl, true); | ||
2041 | out_uninit_ctrl: | 2017 | out_uninit_ctrl: |
2042 | nvme_uninit_ctrl(&ctrl->ctrl); | 2018 | nvme_uninit_ctrl(&ctrl->ctrl); |
2043 | nvme_put_ctrl(&ctrl->ctrl); | 2019 | nvme_put_ctrl(&ctrl->ctrl); |