aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/svcsock.c35
-rw-r--r--net/sunrpc/xprt.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c15
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c13
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/sunrpc/xprtsock.c26
7 files changed, 70 insertions, 30 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 4e6d406264a0..004a2f9dc432 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -379,6 +379,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
379 lock_sock(sock->sk); 379 lock_sock(sock->sk);
380 sock->sk->sk_sndbuf = snd * 2; 380 sock->sk->sk_sndbuf = snd * 2;
381 sock->sk->sk_rcvbuf = rcv * 2; 381 sock->sk->sk_rcvbuf = rcv * 2;
382 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
382 release_sock(sock->sk); 383 release_sock(sock->sk);
383#endif 384#endif
384} 385}
@@ -831,6 +832,23 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
831 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), 832 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
832 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); 833 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
833 834
835 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
836 /* sndbuf needs to have room for one request
837 * per thread, otherwise we can stall even when the
838 * network isn't a bottleneck.
839 *
840 * We count all threads rather than threads in a
841 * particular pool, which provides an upper bound
842 * on the number of threads which will access the socket.
843 *
844 * rcvbuf just needs to be able to hold a few requests.
845 * Normally they will be removed from the queue
846 * as soon a a complete request arrives.
847 */
848 svc_sock_setbufsize(svsk->sk_sock,
849 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
850 3 * serv->sv_max_mesg);
851
834 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 852 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
835 853
836 /* Receive data. If we haven't got the record length yet, get 854 /* Receive data. If we haven't got the record length yet, get
@@ -1078,6 +1096,15 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1078 1096
1079 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1097 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1080 1098
1099 /* initialise setting must have enough space to
1100 * receive and respond to one request.
1101 * svc_tcp_recvfrom will re-adjust if necessary
1102 */
1103 svc_sock_setbufsize(svsk->sk_sock,
1104 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
1105 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
1106
1107 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1081 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 1108 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1082 if (sk->sk_state != TCP_ESTABLISHED) 1109 if (sk->sk_state != TCP_ESTABLISHED)
1083 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1110 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1147,14 +1174,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1147 /* Initialize the socket */ 1174 /* Initialize the socket */
1148 if (sock->type == SOCK_DGRAM) 1175 if (sock->type == SOCK_DGRAM)
1149 svc_udp_init(svsk, serv); 1176 svc_udp_init(svsk, serv);
1150 else { 1177 else
1151 /* initialise setting must have enough space to
1152 * receive and respond to one request.
1153 */
1154 svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
1155 4 * serv->sv_max_mesg);
1156 svc_tcp_init(svsk, serv); 1178 svc_tcp_init(svsk, serv);
1157 }
1158 1179
1159 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1180 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1160 svsk, svsk->sk_sk); 1181 svsk, svsk->sk_sk);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index a0bfe53f1621..06ca058572f2 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -672,10 +672,8 @@ xprt_init_autodisconnect(unsigned long data)
672 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 672 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
673 goto out_abort; 673 goto out_abort;
674 spin_unlock(&xprt->transport_lock); 674 spin_unlock(&xprt->transport_lock);
675 if (xprt_connecting(xprt)) 675 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
676 xprt_release_write(xprt, NULL); 676 queue_work(rpciod_workqueue, &xprt->task_cleanup);
677 else
678 queue_work(rpciod_workqueue, &xprt->task_cleanup);
679 return; 677 return;
680out_abort: 678out_abort:
681 spin_unlock(&xprt->transport_lock); 679 spin_unlock(&xprt->transport_lock);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 629a28764da9..42a6f9f20285 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -265,7 +265,7 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
265 frmr->page_list->page_list[page_no] = 265 frmr->page_list->page_list[page_no] =
266 ib_dma_map_single(xprt->sc_cm_id->device, 266 ib_dma_map_single(xprt->sc_cm_id->device,
267 page_address(rqstp->rq_arg.pages[page_no]), 267 page_address(rqstp->rq_arg.pages[page_no]),
268 PAGE_SIZE, DMA_TO_DEVICE); 268 PAGE_SIZE, DMA_FROM_DEVICE);
269 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 269 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
270 frmr->page_list->page_list[page_no])) 270 frmr->page_list->page_list[page_no]))
271 goto fatal_err; 271 goto fatal_err;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 6c26a675435a..f11be72a1a80 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
128 page_bytes -= sge_bytes; 128 page_bytes -= sge_bytes;
129 129
130 frmr->page_list->page_list[page_no] = 130 frmr->page_list->page_list[page_no] =
131 ib_dma_map_page(xprt->sc_cm_id->device, page, 0, 131 ib_dma_map_single(xprt->sc_cm_id->device,
132 page_address(page),
132 PAGE_SIZE, DMA_TO_DEVICE); 133 PAGE_SIZE, DMA_TO_DEVICE);
133 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 134 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
134 frmr->page_list->page_list[page_no])) 135 frmr->page_list->page_list[page_no]))
@@ -183,6 +184,7 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
183 184
184 fatal_err: 185 fatal_err:
185 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt); 186 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
187 vec->frmr = NULL;
186 svc_rdma_put_frmr(xprt, frmr); 188 svc_rdma_put_frmr(xprt, frmr);
187 return -EIO; 189 return -EIO;
188} 190}
@@ -516,6 +518,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
516 "svcrdma: could not post a receive buffer, err=%d." 518 "svcrdma: could not post a receive buffer, err=%d."
517 "Closing transport %p.\n", ret, rdma); 519 "Closing transport %p.\n", ret, rdma);
518 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 520 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
521 svc_rdma_put_frmr(rdma, vec->frmr);
519 svc_rdma_put_context(ctxt, 0); 522 svc_rdma_put_context(ctxt, 0);
520 return -ENOTCONN; 523 return -ENOTCONN;
521 } 524 }
@@ -530,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
530 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 533 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
531 534
532 /* Prepare the SGE for the RPCRDMA Header */ 535 /* Prepare the SGE for the RPCRDMA Header */
536 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
537 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
533 ctxt->sge[0].addr = 538 ctxt->sge[0].addr =
534 ib_dma_map_page(rdma->sc_cm_id->device, 539 ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
535 page, 0, PAGE_SIZE, DMA_TO_DEVICE); 540 ctxt->sge[0].length, DMA_TO_DEVICE);
536 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) 541 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
537 goto err; 542 goto err;
538 atomic_inc(&rdma->sc_dma_used); 543 atomic_inc(&rdma->sc_dma_used);
539 544
540 ctxt->direction = DMA_TO_DEVICE; 545 ctxt->direction = DMA_TO_DEVICE;
541 546
542 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
543 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
544
545 /* Determine how many of our SGE are to be transmitted */ 547 /* Determine how many of our SGE are to be transmitted */
546 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { 548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
547 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); 549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
@@ -606,6 +608,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
606 return 0; 608 return 0;
607 609
608 err: 610 err:
611 svc_rdma_unmap_dma(ctxt);
609 svc_rdma_put_frmr(rdma, vec->frmr); 612 svc_rdma_put_frmr(rdma, vec->frmr);
610 svc_rdma_put_context(ctxt, 1); 613 svc_rdma_put_context(ctxt, 1);
611 return -EIO; 614 return -EIO;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 3d810e7df3fb..5151f9f6c573 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
500 BUG_ON(sge_no >= xprt->sc_max_sge); 500 BUG_ON(sge_no >= xprt->sc_max_sge);
501 page = svc_rdma_get_page(); 501 page = svc_rdma_get_page();
502 ctxt->pages[sge_no] = page; 502 ctxt->pages[sge_no] = page;
503 pa = ib_dma_map_page(xprt->sc_cm_id->device, 503 pa = ib_dma_map_single(xprt->sc_cm_id->device,
504 page, 0, PAGE_SIZE, 504 page_address(page), PAGE_SIZE,
505 DMA_FROM_DEVICE); 505 DMA_FROM_DEVICE);
506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) 506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
507 goto err_put_ctxt; 507 goto err_put_ctxt;
@@ -520,8 +520,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
520 svc_xprt_get(&xprt->sc_xprt); 520 svc_xprt_get(&xprt->sc_xprt);
521 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); 521 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
522 if (ret) { 522 if (ret) {
523 svc_xprt_put(&xprt->sc_xprt); 523 svc_rdma_unmap_dma(ctxt);
524 svc_rdma_put_context(ctxt, 1); 524 svc_rdma_put_context(ctxt, 1);
525 svc_xprt_put(&xprt->sc_xprt);
525 } 526 }
526 return ret; 527 return ret;
527 528
@@ -1314,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1314 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1315 1316
1316 /* Prepare SGE for local address */ 1317 /* Prepare SGE for local address */
1317 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, 1318 sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
1318 p, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1319 page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
1319 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { 1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
1320 put_page(p); 1321 put_page(p);
1321 return; 1322 return;
@@ -1342,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1342 if (ret) { 1343 if (ret) {
1343 dprintk("svcrdma: Error %d posting send for protocol error\n", 1344 dprintk("svcrdma: Error %d posting send for protocol error\n",
1344 ret); 1345 ret);
1345 ib_dma_unmap_page(xprt->sc_cm_id->device, 1346 ib_dma_unmap_single(xprt->sc_cm_id->device,
1346 sge.addr, PAGE_SIZE, 1347 sge.addr, PAGE_SIZE,
1347 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1348 svc_rdma_put_context(ctxt, 1); 1349 svc_rdma_put_context(ctxt, 1);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3b21e0cc5e69..465aafc2007f 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1495,7 +1495,8 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1495 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 1495 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1496 frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT; 1496 frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT;
1497 frmr_wr.wr.fast_reg.access_flags = (writing ? 1497 frmr_wr.wr.fast_reg.access_flags = (writing ?
1498 IB_ACCESS_REMOTE_WRITE : IB_ACCESS_REMOTE_READ); 1498 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
1499 IB_ACCESS_REMOTE_READ);
1499 frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; 1500 frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
1500 DECR_CQCOUNT(&r_xprt->rx_ep); 1501 DECR_CQCOUNT(&r_xprt->rx_ep);
1501 1502
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d40ff50887aa..e18596146013 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -807,6 +807,9 @@ static void xs_reset_transport(struct sock_xprt *transport)
807 * 807 *
808 * This is used when all requests are complete; ie, no DRC state remains 808 * This is used when all requests are complete; ie, no DRC state remains
809 * on the server we want to save. 809 * on the server we want to save.
810 *
811 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
812 * xs_reset_transport() zeroing the socket from underneath a writer.
810 */ 813 */
811static void xs_close(struct rpc_xprt *xprt) 814static void xs_close(struct rpc_xprt *xprt)
812{ 815{
@@ -824,6 +827,14 @@ static void xs_close(struct rpc_xprt *xprt)
824 xprt_disconnect_done(xprt); 827 xprt_disconnect_done(xprt);
825} 828}
826 829
830static void xs_tcp_close(struct rpc_xprt *xprt)
831{
832 if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
833 xs_close(xprt);
834 else
835 xs_tcp_shutdown(xprt);
836}
837
827/** 838/**
828 * xs_destroy - prepare to shutdown a transport 839 * xs_destroy - prepare to shutdown a transport
829 * @xprt: doomed transport 840 * @xprt: doomed transport
@@ -1772,6 +1783,15 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1772 xprt, -status, xprt_connected(xprt), 1783 xprt, -status, xprt_connected(xprt),
1773 sock->sk->sk_state); 1784 sock->sk->sk_state);
1774 switch (status) { 1785 switch (status) {
1786 default:
1787 printk("%s: connect returned unhandled error %d\n",
1788 __func__, status);
1789 case -EADDRNOTAVAIL:
1790 /* We're probably in TIME_WAIT. Get rid of existing socket,
1791 * and retry
1792 */
1793 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1794 xprt_force_disconnect(xprt);
1775 case -ECONNREFUSED: 1795 case -ECONNREFUSED:
1776 case -ECONNRESET: 1796 case -ECONNRESET:
1777 case -ENETUNREACH: 1797 case -ENETUNREACH:
@@ -1782,10 +1802,6 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1782 xprt_clear_connecting(xprt); 1802 xprt_clear_connecting(xprt);
1783 return; 1803 return;
1784 } 1804 }
1785 /* get rid of existing socket, and retry */
1786 xs_tcp_shutdown(xprt);
1787 printk("%s: connect returned unhandled error %d\n",
1788 __func__, status);
1789out_eagain: 1805out_eagain:
1790 status = -EAGAIN; 1806 status = -EAGAIN;
1791out: 1807out:
@@ -1994,7 +2010,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1994 .buf_free = rpc_free, 2010 .buf_free = rpc_free,
1995 .send_request = xs_tcp_send_request, 2011 .send_request = xs_tcp_send_request,
1996 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2012 .set_retrans_timeout = xprt_set_retrans_timeout_def,
1997 .close = xs_tcp_shutdown, 2013 .close = xs_tcp_close,
1998 .destroy = xs_destroy, 2014 .destroy = xs_destroy,
1999 .print_stats = xs_tcp_print_stats, 2015 .print_stats = xs_tcp_print_stats,
2000}; 2016};