aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@sandisk.com>2016-02-11 14:08:53 -0500
committerDoug Ledford <dledford@redhat.com>2016-02-29 17:12:36 -0500
commitaaf45bd83eba804adfa5c4ff5b17ca0c88884c6f (patch)
tree5bf8a5c34114da1c315d7ba59c422346d5879713
parent8628991fbe6a9086189f55f0b33dee7f25108ecc (diff)
IB/srpt: Detect session shutdown reliably
The Last WQE Reached event is only generated after one or more work requests have been queued on the QP associated with a session. Since session shutdown can start before any work requests have been queued, use a zero-length RDMA write to wait until a QP has been drained. Additionally, rework the code for closing and disconnecting a session. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Alex Estrin <alex.estrin@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c282
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h18
2 files changed, 150 insertions, 150 deletions
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 33bd408c5eea..0881ae902ba0 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -92,10 +92,11 @@ MODULE_PARM_DESC(srpt_service_guid,
92 92
93static struct ib_client srpt_client; 93static struct ib_client srpt_client;
94static void srpt_release_cmd(struct se_cmd *se_cmd); 94static void srpt_release_cmd(struct se_cmd *se_cmd);
95static void srpt_release_channel(struct srpt_rdma_ch *ch); 95static void srpt_free_ch(struct kref *kref);
96static int srpt_queue_status(struct se_cmd *cmd); 96static int srpt_queue_status(struct se_cmd *cmd);
97static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc); 97static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
98static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc); 98static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
99static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc);
99 100
100/* 101/*
101 * The only allowed channel state changes are those that change the channel 102 * The only allowed channel state changes are those that change the channel
@@ -175,6 +176,23 @@ static void srpt_srq_event(struct ib_event *event, void *ctx)
175 pr_info("SRQ event %d\n", event->event); 176 pr_info("SRQ event %d\n", event->event);
176} 177}
177 178
179static const char *get_ch_state_name(enum rdma_ch_state s)
180{
181 switch (s) {
182 case CH_CONNECTING:
183 return "connecting";
184 case CH_LIVE:
185 return "live";
186 case CH_DISCONNECTING:
187 return "disconnecting";
188 case CH_DRAINING:
189 return "draining";
190 case CH_DISCONNECTED:
191 return "disconnected";
192 }
193 return "???";
194}
195
178/** 196/**
179 * srpt_qp_event() - QP event callback function. 197 * srpt_qp_event() - QP event callback function.
180 */ 198 */
@@ -188,11 +206,9 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
188 ib_cm_notify(ch->cm_id, event->event); 206 ib_cm_notify(ch->cm_id, event->event);
189 break; 207 break;
190 case IB_EVENT_QP_LAST_WQE_REACHED: 208 case IB_EVENT_QP_LAST_WQE_REACHED:
191 if (srpt_set_ch_state(ch, CH_RELEASING)) 209 pr_debug("%s-%d, state %s: received Last WQE event.\n",
192 srpt_release_channel(ch); 210 ch->sess_name, ch->qp->qp_num,
193 else 211 get_ch_state_name(ch->state));
194 pr_debug("%s: state %d - ignored LAST_WQE.\n",
195 ch->sess_name, ch->state);
196 break; 212 break;
197 default: 213 default:
198 pr_err("received unrecognized IB QP event %d\n", event->event); 214 pr_err("received unrecognized IB QP event %d\n", event->event);
@@ -795,6 +811,37 @@ out:
795} 811}
796 812
797/** 813/**
814 * srpt_zerolength_write() - Perform a zero-length RDMA write.
815 *
816 * A quote from the InfiniBand specification: C9-88: For an HCA responder
817 * using Reliable Connection service, for each zero-length RDMA READ or WRITE
818 * request, the R_Key shall not be validated, even if the request includes
819 * Immediate data.
820 */
821static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
822{
823 struct ib_send_wr wr, *bad_wr;
824
825 memset(&wr, 0, sizeof(wr));
826 wr.opcode = IB_WR_RDMA_WRITE;
827 wr.wr_cqe = &ch->zw_cqe;
828 wr.send_flags = IB_SEND_SIGNALED;
829 return ib_post_send(ch->qp, &wr, &bad_wr);
830}
831
832static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
833{
834 struct srpt_rdma_ch *ch = cq->cq_context;
835
836 WARN(wc->status == IB_WC_SUCCESS, "%s-%d: QP not in error state\n",
837 ch->sess_name, ch->qp->qp_num);
838 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
839 schedule_work(&ch->release_work);
840 else
841 WARN_ONCE("%s-%d\n", ch->sess_name, ch->qp->qp_num);
842}
843
844/**
798 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request. 845 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
799 * @ioctx: Pointer to the I/O context associated with the request. 846 * @ioctx: Pointer to the I/O context associated with the request.
800 * @srp_cmd: Pointer to the SRP_CMD request data. 847 * @srp_cmd: Pointer to the SRP_CMD request data.
@@ -1816,110 +1863,102 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1816} 1863}
1817 1864
1818/** 1865/**
1819 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state. 1866 * srpt_close_ch() - Close an RDMA channel.
1820 * 1867 *
1821 * Reset the QP and make sure all resources associated with the channel will 1868 * Make sure all resources associated with the channel will be deallocated at
1822 * be deallocated at an appropriate time. 1869 * an appropriate time.
1823 * 1870 *
1824 * Note: The caller must hold ch->sport->sdev->spinlock. 1871 * Returns true if and only if the channel state has been modified into
1872 * CH_DRAINING.
1825 */ 1873 */
1826static void __srpt_close_ch(struct srpt_rdma_ch *ch) 1874static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1827{ 1875{
1828 enum rdma_ch_state prev_state; 1876 int ret;
1829 unsigned long flags;
1830 1877
1831 spin_lock_irqsave(&ch->spinlock, flags); 1878 if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1832 prev_state = ch->state; 1879 pr_debug("%s-%d: already closed\n", ch->sess_name,
1833 switch (prev_state) { 1880 ch->qp->qp_num);
1834 case CH_CONNECTING: 1881 return false;
1835 case CH_LIVE:
1836 ch->state = CH_DISCONNECTING;
1837 break;
1838 default:
1839 break;
1840 } 1882 }
1841 spin_unlock_irqrestore(&ch->spinlock, flags);
1842 1883
1843 switch (prev_state) { 1884 kref_get(&ch->kref);
1844 case CH_CONNECTING:
1845 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
1846 NULL, 0);
1847 /* fall through */
1848 case CH_LIVE:
1849 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
1850 pr_err("sending CM DREQ failed.\n");
1851 break;
1852 case CH_DISCONNECTING:
1853 break;
1854 case CH_DRAINING:
1855 case CH_RELEASING:
1856 break;
1857 }
1858}
1859 1885
1860/** 1886 ret = srpt_ch_qp_err(ch);
1861 * srpt_close_ch() - Close an RDMA channel. 1887 if (ret < 0)
1862 */ 1888 pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1863static void srpt_close_ch(struct srpt_rdma_ch *ch) 1889 ch->sess_name, ch->qp->qp_num, ret);
1864{
1865 struct srpt_device *sdev = ch->sport->sdev;
1866 1890
1867 mutex_lock(&sdev->mutex); 1891 pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
1868 __srpt_close_ch(ch); 1892 ch->qp->qp_num);
1869 mutex_unlock(&sdev->mutex); 1893 ret = srpt_zerolength_write(ch);
1870} 1894 if (ret < 0) {
1895 pr_err("%s-%d: queuing zero-length write failed: %d\n",
1896 ch->sess_name, ch->qp->qp_num, ret);
1897 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1898 schedule_work(&ch->release_work);
1899 else
1900 WARN_ON_ONCE(true);
1901 }
1871 1902
1872/** 1903 kref_put(&ch->kref, srpt_free_ch);
1873 * srpt_shutdown_session() - Whether or not a session may be shut down. 1904
1874 */ 1905 return true;
1875static int srpt_shutdown_session(struct se_session *se_sess)
1876{
1877 return 1;
1878} 1906}
1879 1907
1880/** 1908/*
1881 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. 1909 * Change the channel state into CH_DISCONNECTING. If a channel has not yet
1882 * @cm_id: Pointer to the CM ID of the channel to be drained. 1910 * reached the connected state, close it. If a channel is in the connected
1883 * 1911 * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
1884 * Note: Must be called from inside srpt_cm_handler to avoid a race between 1912 * the responsibility of the caller to ensure that this function is not
1885 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one() 1913 * invoked concurrently with the code that accepts a connection. This means
1886 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one() 1914 * that this function must either be invoked from inside a CM callback
1887 * waits until all target sessions for the associated IB device have been 1915 * function or that it must be invoked with the srpt_port.mutex held.
1888 * unregistered and target session registration involves a call to
1889 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
1890 * this function has finished).
1891 */ 1916 */
1892static void srpt_drain_channel(struct srpt_rdma_ch *ch) 1917static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1893{ 1918{
1894 int ret; 1919 int ret;
1895 bool do_reset = false;
1896 1920
1897 WARN_ON_ONCE(irqs_disabled()); 1921 if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1922 return -ENOTCONN;
1923
1924 ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
1925 if (ret < 0)
1926 ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
1927
1928 if (ret < 0 && srpt_close_ch(ch))
1929 ret = 0;
1930
1931 return ret;
1932}
1898 1933
1899 do_reset = srpt_set_ch_state(ch, CH_DRAINING); 1934static void __srpt_close_all_ch(struct srpt_device *sdev)
1935{
1936 struct srpt_rdma_ch *ch;
1900 1937
1901 if (do_reset) { 1938 lockdep_assert_held(&sdev->mutex);
1902 if (ch->sess)
1903 srpt_shutdown_session(ch->sess);
1904 1939
1905 ret = srpt_ch_qp_err(ch); 1940 list_for_each_entry(ch, &sdev->rch_list, list) {
1906 if (ret < 0) 1941 if (srpt_disconnect_ch(ch) >= 0)
1907 pr_err("Setting queue pair in error state" 1942 pr_info("Closing channel %s-%d because target %s has been disabled\n",
1908 " failed: %d\n", ret); 1943 ch->sess_name, ch->qp->qp_num,
1944 sdev->device->name);
1945 srpt_close_ch(ch);
1909 } 1946 }
1910} 1947}
1911 1948
1912/** 1949/**
1913 * srpt_release_channel() - Release channel resources. 1950 * srpt_shutdown_session() - Whether or not a session may be shut down.
1914 *
1915 * Schedules the actual release because:
1916 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
1917 * trigger a deadlock.
1918 * - It is not safe to call TCM transport_* functions from interrupt context.
1919 */ 1951 */
1920static void srpt_release_channel(struct srpt_rdma_ch *ch) 1952static int srpt_shutdown_session(struct se_session *se_sess)
1953{
1954 return 1;
1955}
1956
1957static void srpt_free_ch(struct kref *kref)
1921{ 1958{
1922 schedule_work(&ch->release_work); 1959 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
1960
1961 kfree(ch);
1923} 1962}
1924 1963
1925static void srpt_release_channel_work(struct work_struct *w) 1964static void srpt_release_channel_work(struct work_struct *w)
@@ -1961,7 +2000,7 @@ static void srpt_release_channel_work(struct work_struct *w)
1961 2000
1962 wake_up(&sdev->ch_releaseQ); 2001 wake_up(&sdev->ch_releaseQ);
1963 2002
1964 kfree(ch); 2003 kref_put(&ch->kref, srpt_free_ch);
1965} 2004}
1966 2005
1967/** 2006/**
@@ -2046,17 +2085,10 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2046 && param->port == ch->sport->port 2085 && param->port == ch->sport->port
2047 && param->listen_id == ch->sport->sdev->cm_id 2086 && param->listen_id == ch->sport->sdev->cm_id
2048 && ch->cm_id) { 2087 && ch->cm_id) {
2049 if (ch->state != CH_CONNECTING 2088 if (srpt_disconnect_ch(ch) < 0)
2050 && ch->state != CH_LIVE)
2051 continue; 2089 continue;
2052 2090 pr_info("Relogin - closed existing channel %s\n",
2053 /* found an existing channel */ 2091 ch->sess_name);
2054 pr_debug("Found existing channel %s"
2055 " cm_id= %p state= %d\n",
2056 ch->sess_name, ch->cm_id, ch->state);
2057
2058 __srpt_close_ch(ch);
2059
2060 rsp->rsp_flags = 2092 rsp->rsp_flags =
2061 SRP_LOGIN_RSP_MULTICHAN_TERMINATED; 2093 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2062 } 2094 }
@@ -2087,6 +2119,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2087 goto reject; 2119 goto reject;
2088 } 2120 }
2089 2121
2122 kref_init(&ch->kref);
2123 ch->zw_cqe.done = srpt_zerolength_write_done;
2090 INIT_WORK(&ch->release_work, srpt_release_channel_work); 2124 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2091 memcpy(ch->i_port_id, req->initiator_port_id, 16); 2125 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2092 memcpy(ch->t_port_id, req->target_port_id, 16); 2126 memcpy(ch->t_port_id, req->target_port_id, 16);
@@ -2214,7 +2248,7 @@ try_again:
2214 goto out; 2248 goto out;
2215 2249
2216release_channel: 2250release_channel:
2217 srpt_set_ch_state(ch, CH_RELEASING); 2251 srpt_disconnect_ch(ch);
2218 transport_deregister_session_configfs(ch->sess); 2252 transport_deregister_session_configfs(ch->sess);
2219 transport_deregister_session(ch->sess); 2253 transport_deregister_session(ch->sess);
2220 ch->sess = NULL; 2254 ch->sess = NULL;
@@ -2263,7 +2297,6 @@ static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2263 ch->sess_name, ch->qp->qp_num, reason, private_data_len ? 2297 ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2264 "; private data" : "", priv ? priv : " (?)"); 2298 "; private data" : "", priv ? priv : " (?)");
2265 kfree(priv); 2299 kfree(priv);
2266 srpt_drain_channel(ch);
2267} 2300}
2268 2301
2269/** 2302/**
@@ -2292,40 +2325,6 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2292} 2325}
2293 2326
2294/** 2327/**
2295 * srpt_cm_dreq_recv() - Process reception of a DREQ message.
2296 */
2297static void srpt_cm_dreq_recv(struct srpt_rdma_ch *ch)
2298{
2299 unsigned long flags;
2300 bool send_drep = false;
2301
2302 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
2303 ch->state);
2304
2305 spin_lock_irqsave(&ch->spinlock, flags);
2306 switch (ch->state) {
2307 case CH_CONNECTING:
2308 case CH_LIVE:
2309 send_drep = true;
2310 ch->state = CH_DISCONNECTING;
2311 break;
2312 case CH_DISCONNECTING:
2313 case CH_DRAINING:
2314 case CH_RELEASING:
2315 WARN(true, "unexpected channel state %d\n", ch->state);
2316 break;
2317 }
2318 spin_unlock_irqrestore(&ch->spinlock, flags);
2319
2320 if (send_drep) {
2321 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
2322 pr_err("Sending IB DREP failed.\n");
2323 pr_info("Received DREQ and sent DREP for session %s.\n",
2324 ch->sess_name);
2325 }
2326}
2327
2328/**
2329 * srpt_cm_handler() - IB connection manager callback function. 2328 * srpt_cm_handler() - IB connection manager callback function.
2330 * 2329 *
2331 * A non-zero return value will cause the caller destroy the CM ID. 2330 * A non-zero return value will cause the caller destroy the CM ID.
@@ -2356,22 +2355,21 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2356 srpt_cm_rtu_recv(ch); 2355 srpt_cm_rtu_recv(ch);
2357 break; 2356 break;
2358 case IB_CM_DREQ_RECEIVED: 2357 case IB_CM_DREQ_RECEIVED:
2359 srpt_cm_dreq_recv(ch); 2358 srpt_disconnect_ch(ch);
2360 break; 2359 break;
2361 case IB_CM_DREP_RECEIVED: 2360 case IB_CM_DREP_RECEIVED:
2362 pr_info("Received CM DREP message for ch %s-%d.\n", 2361 pr_info("Received CM DREP message for ch %s-%d.\n",
2363 ch->sess_name, ch->qp->qp_num); 2362 ch->sess_name, ch->qp->qp_num);
2364 srpt_drain_channel(ch); 2363 srpt_close_ch(ch);
2365 break; 2364 break;
2366 case IB_CM_TIMEWAIT_EXIT: 2365 case IB_CM_TIMEWAIT_EXIT:
2367 pr_info("Received CM TimeWait exit for ch %s-%d.\n", 2366 pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2368 ch->sess_name, ch->qp->qp_num); 2367 ch->sess_name, ch->qp->qp_num);
2369 srpt_drain_channel(ch); 2368 srpt_close_ch(ch);
2370 break; 2369 break;
2371 case IB_CM_REP_ERROR: 2370 case IB_CM_REP_ERROR:
2372 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name, 2371 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2373 ch->qp->qp_num); 2372 ch->qp->qp_num);
2374 srpt_drain_channel(ch);
2375 break; 2373 break;
2376 case IB_CM_DREQ_ERROR: 2374 case IB_CM_DREQ_ERROR:
2377 pr_info("Received CM DREQ ERROR event.\n"); 2375 pr_info("Received CM DREQ ERROR event.\n");
@@ -2511,7 +2509,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
2511 break; 2509 break;
2512 case CH_DISCONNECTING: 2510 case CH_DISCONNECTING:
2513 case CH_DRAINING: 2511 case CH_DRAINING:
2514 case CH_RELEASING: 2512 case CH_DISCONNECTED:
2515 pr_debug("cmd with tag %lld: channel disconnecting\n", 2513 pr_debug("cmd with tag %lld: channel disconnecting\n",
2516 ioctx->cmd.tag); 2514 ioctx->cmd.tag);
2517 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); 2515 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
@@ -2657,16 +2655,16 @@ static void srpt_refresh_port_work(struct work_struct *work)
2657 */ 2655 */
2658static int srpt_release_sdev(struct srpt_device *sdev) 2656static int srpt_release_sdev(struct srpt_device *sdev)
2659{ 2657{
2660 struct srpt_rdma_ch *ch, *tmp_ch; 2658 int i, res;
2661 int res;
2662 2659
2663 WARN_ON_ONCE(irqs_disabled()); 2660 WARN_ON_ONCE(irqs_disabled());
2664 2661
2665 BUG_ON(!sdev); 2662 BUG_ON(!sdev);
2666 2663
2667 mutex_lock(&sdev->mutex); 2664 mutex_lock(&sdev->mutex);
2668 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) 2665 for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
2669 __srpt_close_ch(ch); 2666 sdev->port[i].enabled = false;
2667 __srpt_close_all_ch(sdev);
2670 mutex_unlock(&sdev->mutex); 2668 mutex_unlock(&sdev->mutex);
2671 2669
2672 res = wait_event_interruptible(sdev->ch_releaseQ, 2670 res = wait_event_interruptible(sdev->ch_releaseQ,
@@ -2963,7 +2961,7 @@ static void srpt_close_session(struct se_session *se_sess)
2963 BUG_ON(ch->release_done); 2961 BUG_ON(ch->release_done);
2964 ch->release_done = &release_done; 2962 ch->release_done = &release_done;
2965 wait = !list_empty(&ch->list); 2963 wait = !list_empty(&ch->list);
2966 __srpt_close_ch(ch); 2964 srpt_disconnect_ch(ch);
2967 mutex_unlock(&sdev->mutex); 2965 mutex_unlock(&sdev->mutex);
2968 2966
2969 if (!wait) 2967 if (!wait)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 5883295a7b97..af9b8b527340 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -218,20 +218,20 @@ struct srpt_send_ioctx {
218 218
219/** 219/**
220 * enum rdma_ch_state - SRP channel state. 220 * enum rdma_ch_state - SRP channel state.
221 * @CH_CONNECTING: QP is in RTR state; waiting for RTU. 221 * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
222 * @CH_LIVE: QP is in RTS state. 222 * @CH_LIVE: QP is in RTS state.
223 * @CH_DISCONNECTING: DREQ has been received; waiting for DREP 223 * @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
224 * or DREQ has been send and waiting for DREP 224 * been received.
225 * or . 225 * @CH_DRAINING: DREP has been received or waiting for DREP timed out
226 * @CH_DRAINING: QP is in ERR state; waiting for last WQE event. 226 * and last work request has been queued.
227 * @CH_RELEASING: Last WQE event has been received; releasing resources. 227 * @CH_DISCONNECTED: Last completion has been received.
228 */ 228 */
229enum rdma_ch_state { 229enum rdma_ch_state {
230 CH_CONNECTING, 230 CH_CONNECTING,
231 CH_LIVE, 231 CH_LIVE,
232 CH_DISCONNECTING, 232 CH_DISCONNECTING,
233 CH_DRAINING, 233 CH_DRAINING,
234 CH_RELEASING 234 CH_DISCONNECTED,
235}; 235};
236 236
237/** 237/**
@@ -267,6 +267,8 @@ struct srpt_rdma_ch {
267 struct ib_cm_id *cm_id; 267 struct ib_cm_id *cm_id;
268 struct ib_qp *qp; 268 struct ib_qp *qp;
269 struct ib_cq *cq; 269 struct ib_cq *cq;
270 struct ib_cqe zw_cqe;
271 struct kref kref;
270 int rq_size; 272 int rq_size;
271 u32 rsp_size; 273 u32 rsp_size;
272 atomic_t sq_wr_avail; 274 atomic_t sq_wr_avail;