aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/sunrpc/xprtrdma/transport.c54
-rw-r--r--net/sunrpc/xprtrdma/verbs.c42
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h4
3 files changed, 52 insertions, 48 deletions
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 289d13cad638..d7c4255e9d5d 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -225,51 +225,35 @@ xprt_rdma_free_addresses(struct rpc_xprt *xprt)
225 } 225 }
226} 226}
227 227
228void 228/**
229rpcrdma_conn_func(struct rpcrdma_ep *ep) 229 * xprt_rdma_connect_worker - establish connection in the background
230{ 230 * @work: worker thread context
231 schedule_delayed_work(&ep->rep_connect_worker, 0); 231 *
232} 232 * Requester holds the xprt's send lock to prevent activity on this
233 233 * transport while a fresh connection is being established. RPC tasks
234void 234 * sleep on the xprt's pending queue waiting for connect to complete.
235rpcrdma_connect_worker(struct work_struct *work) 235 */
236static void
237xprt_rdma_connect_worker(struct work_struct *work)
236{ 238{
237 struct rpcrdma_ep *ep = 239 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
238 container_of(work, struct rpcrdma_ep, rep_connect_worker.work); 240 rx_connect_worker.work);
239 struct rpcrdma_xprt *r_xprt =
240 container_of(ep, struct rpcrdma_xprt, rx_ep);
241 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 241 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
242 int rc;
242 243
243 spin_lock_bh(&xprt->transport_lock); 244 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
244 if (ep->rep_connected > 0) { 245 xprt_clear_connecting(xprt);
246 if (r_xprt->rx_ep.rep_connected > 0) {
245 if (!xprt_test_and_set_connected(xprt)) { 247 if (!xprt_test_and_set_connected(xprt)) {
246 xprt->stat.connect_count++; 248 xprt->stat.connect_count++;
247 xprt->stat.connect_time += (long)jiffies - 249 xprt->stat.connect_time += (long)jiffies -
248 xprt->stat.connect_start; 250 xprt->stat.connect_start;
249 xprt_wake_pending_tasks(xprt, 0); 251 xprt_wake_pending_tasks(xprt, -EAGAIN);
250 } 252 }
251 } else { 253 } else {
252 if (xprt_test_and_clear_connected(xprt)) 254 if (xprt_test_and_clear_connected(xprt))
253 xprt_wake_pending_tasks(xprt, -ENOTCONN); 255 xprt_wake_pending_tasks(xprt, rc);
254 } 256 }
255 spin_unlock_bh(&xprt->transport_lock);
256}
257
258static void
259xprt_rdma_connect_worker(struct work_struct *work)
260{
261 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
262 rx_connect_worker.work);
263 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
264 int rc = 0;
265
266 xprt_clear_connected(xprt);
267
268 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
269 if (rc)
270 xprt_wake_pending_tasks(xprt, rc);
271
272 xprt_clear_connecting(xprt);
273} 257}
274 258
275static void 259static void
@@ -302,8 +286,6 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
302 286
303 cancel_delayed_work_sync(&r_xprt->rx_connect_worker); 287 cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
304 288
305 xprt_clear_connected(xprt);
306
307 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); 289 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
308 rpcrdma_buffer_destroy(&r_xprt->rx_buf); 290 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
309 rpcrdma_ia_close(&r_xprt->rx_ia); 291 rpcrdma_ia_close(&r_xprt->rx_ia);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index c60172f88a0d..abbd3cdc259a 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -108,6 +108,25 @@ rpcrdma_destroy_wq(void)
108 } 108 }
109} 109}
110 110
111/**
112 * rpcrdma_disconnect_worker - Force a disconnect
113 * @work: endpoint to be disconnected
114 *
115 * Provider callbacks can possibly run in an IRQ context. This function
116 * is invoked in a worker thread to guarantee that disconnect wake-up
117 * calls are always done in process context.
118 */
119static void
120rpcrdma_disconnect_worker(struct work_struct *work)
121{
122 struct rpcrdma_ep *ep = container_of(work, struct rpcrdma_ep,
123 rep_disconnect_worker.work);
124 struct rpcrdma_xprt *r_xprt =
125 container_of(ep, struct rpcrdma_xprt, rx_ep);
126
127 xprt_force_disconnect(&r_xprt->rx_xprt);
128}
129
111static void 130static void
112rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) 131rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
113{ 132{
@@ -121,7 +140,7 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
121 140
122 if (ep->rep_connected == 1) { 141 if (ep->rep_connected == 1) {
123 ep->rep_connected = -EIO; 142 ep->rep_connected = -EIO;
124 rpcrdma_conn_func(ep); 143 schedule_delayed_work(&ep->rep_disconnect_worker, 0);
125 wake_up_all(&ep->rep_connect_wait); 144 wake_up_all(&ep->rep_connect_wait);
126 } 145 }
127} 146}
@@ -271,13 +290,14 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
271 ++xprt->connect_cookie; 290 ++xprt->connect_cookie;
272 ep->rep_connected = 1; 291 ep->rep_connected = 1;
273 rpcrdma_update_connect_private(r_xprt, &event->param.conn); 292 rpcrdma_update_connect_private(r_xprt, &event->param.conn);
274 goto connected; 293 wake_up_all(&ep->rep_connect_wait);
294 break;
275 case RDMA_CM_EVENT_CONNECT_ERROR: 295 case RDMA_CM_EVENT_CONNECT_ERROR:
276 ep->rep_connected = -ENOTCONN; 296 ep->rep_connected = -ENOTCONN;
277 goto connected; 297 goto disconnected;
278 case RDMA_CM_EVENT_UNREACHABLE: 298 case RDMA_CM_EVENT_UNREACHABLE:
279 ep->rep_connected = -ENETUNREACH; 299 ep->rep_connected = -ENETUNREACH;
280 goto connected; 300 goto disconnected;
281 case RDMA_CM_EVENT_REJECTED: 301 case RDMA_CM_EVENT_REJECTED:
282 dprintk("rpcrdma: connection to %s:%s rejected: %s\n", 302 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
283 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt), 303 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
@@ -285,12 +305,12 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
285 ep->rep_connected = -ECONNREFUSED; 305 ep->rep_connected = -ECONNREFUSED;
286 if (event->status == IB_CM_REJ_STALE_CONN) 306 if (event->status == IB_CM_REJ_STALE_CONN)
287 ep->rep_connected = -EAGAIN; 307 ep->rep_connected = -EAGAIN;
288 goto connected; 308 goto disconnected;
289 case RDMA_CM_EVENT_DISCONNECTED: 309 case RDMA_CM_EVENT_DISCONNECTED:
290 ++xprt->connect_cookie; 310 ++xprt->connect_cookie;
291 ep->rep_connected = -ECONNABORTED; 311 ep->rep_connected = -ECONNABORTED;
292connected: 312disconnected:
293 rpcrdma_conn_func(ep); 313 xprt_force_disconnect(xprt);
294 wake_up_all(&ep->rep_connect_wait); 314 wake_up_all(&ep->rep_connect_wait);
295 break; 315 break;
296 default: 316 default:
@@ -550,7 +570,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
550 cdata->max_requests >> 2); 570 cdata->max_requests >> 2);
551 ep->rep_send_count = ep->rep_send_batch; 571 ep->rep_send_count = ep->rep_send_batch;
552 init_waitqueue_head(&ep->rep_connect_wait); 572 init_waitqueue_head(&ep->rep_connect_wait);
553 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); 573 INIT_DELAYED_WORK(&ep->rep_disconnect_worker,
574 rpcrdma_disconnect_worker);
554 575
555 sendcq = ib_alloc_cq(ia->ri_device, NULL, 576 sendcq = ib_alloc_cq(ia->ri_device, NULL,
556 ep->rep_attr.cap.max_send_wr + 1, 577 ep->rep_attr.cap.max_send_wr + 1,
@@ -623,7 +644,7 @@ out1:
623void 644void
624rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) 645rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
625{ 646{
626 cancel_delayed_work_sync(&ep->rep_connect_worker); 647 cancel_delayed_work_sync(&ep->rep_disconnect_worker);
627 648
628 if (ia->ri_id && ia->ri_id->qp) { 649 if (ia->ri_id && ia->ri_id->qp) {
629 rpcrdma_ep_disconnect(ep, ia); 650 rpcrdma_ep_disconnect(ep, ia);
@@ -736,6 +757,7 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
736{ 757{
737 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, 758 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
738 rx_ia); 759 rx_ia);
760 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
739 int rc; 761 int rc;
740 762
741retry: 763retry:
@@ -762,6 +784,8 @@ retry:
762 } 784 }
763 785
764 ep->rep_connected = 0; 786 ep->rep_connected = 0;
787 xprt_clear_connected(xprt);
788
765 rpcrdma_post_recvs(r_xprt, true); 789 rpcrdma_post_recvs(r_xprt, true);
766 790
767 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); 791 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index eae21668e692..a13ccb643ce0 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -101,7 +101,7 @@ struct rpcrdma_ep {
101 wait_queue_head_t rep_connect_wait; 101 wait_queue_head_t rep_connect_wait;
102 struct rpcrdma_connect_private rep_cm_private; 102 struct rpcrdma_connect_private rep_cm_private;
103 struct rdma_conn_param rep_remote_cma; 103 struct rdma_conn_param rep_remote_cma;
104 struct delayed_work rep_connect_worker; 104 struct delayed_work rep_disconnect_worker;
105}; 105};
106 106
107/* Pre-allocate extra Work Requests for handling backward receives 107/* Pre-allocate extra Work Requests for handling backward receives
@@ -556,7 +556,6 @@ int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
556 struct rpcrdma_create_data_internal *); 556 struct rpcrdma_create_data_internal *);
557void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *); 557void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
558int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *); 558int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
559void rpcrdma_conn_func(struct rpcrdma_ep *ep);
560void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); 559void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
561 560
562int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, 561int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
@@ -654,7 +653,6 @@ static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
654extern unsigned int xprt_rdma_max_inline_read; 653extern unsigned int xprt_rdma_max_inline_read;
655void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap); 654void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
656void xprt_rdma_free_addresses(struct rpc_xprt *xprt); 655void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
657void rpcrdma_connect_worker(struct work_struct *work);
658void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq); 656void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
659int xprt_rdma_init(void); 657int xprt_rdma_init(void);
660void xprt_rdma_cleanup(void); 658void xprt_rdma_cleanup(void);