aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-05-29 11:49:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-05-29 11:49:09 -0400
commitc8bce3d3bdedc7d187fa222a3b36d149bd940d0e (patch)
tree47f335f6faaca36eed1dceb64dc0366ef68ec875
parent5f789cd8bae9b6315b7bf93e24ec1ac85f5a13b9 (diff)
parent98779be861a05c4cb75bed916df72ec0cba8b53d (diff)
Merge branch 'for-2.6.30' of git://linux-nfs.org/~bfields/linux
* 'for-2.6.30' of git://linux-nfs.org/~bfields/linux: svcrdma: dma unmap the correct length for the RPCRDMA header page. nfsd: Revert "svcrpc: take advantage of tcp autotuning" nfsd: fix hung up of nfs client while sync write data to nfs server
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--net/sunrpc/svcsock.c35
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
4 files changed, 42 insertions, 21 deletions
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6c68ffd6b4bb..b660435978d2 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1015,6 +1015,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1015 host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); 1015 host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
1016 set_fs(oldfs); 1016 set_fs(oldfs);
1017 if (host_err >= 0) { 1017 if (host_err >= 0) {
1018 *cnt = host_err;
1018 nfsdstats.io_write += host_err; 1019 nfsdstats.io_write += host_err;
1019 fsnotify_modify(file->f_path.dentry); 1020 fsnotify_modify(file->f_path.dentry);
1020 } 1021 }
@@ -1060,10 +1061,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1060 } 1061 }
1061 1062
1062 dprintk("nfsd: write complete host_err=%d\n", host_err); 1063 dprintk("nfsd: write complete host_err=%d\n", host_err);
1063 if (host_err >= 0) { 1064 if (host_err >= 0)
1064 err = 0; 1065 err = 0;
1065 *cnt = host_err; 1066 else
1066 } else
1067 err = nfserrno(host_err); 1067 err = nfserrno(host_err);
1068out: 1068out:
1069 return err; 1069 return err;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index af3198814c15..9d504234af4a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -345,6 +345,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
345 lock_sock(sock->sk); 345 lock_sock(sock->sk);
346 sock->sk->sk_sndbuf = snd * 2; 346 sock->sk->sk_sndbuf = snd * 2;
347 sock->sk->sk_rcvbuf = rcv * 2; 347 sock->sk->sk_rcvbuf = rcv * 2;
348 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
348 release_sock(sock->sk); 349 release_sock(sock->sk);
349#endif 350#endif
350} 351}
@@ -796,6 +797,23 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
796 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), 797 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
797 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); 798 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
798 799
800 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
801 /* sndbuf needs to have room for one request
802 * per thread, otherwise we can stall even when the
803 * network isn't a bottleneck.
804 *
805 * We count all threads rather than threads in a
806 * particular pool, which provides an upper bound
807 * on the number of threads which will access the socket.
808 *
809 * rcvbuf just needs to be able to hold a few requests.
810 * Normally they will be removed from the queue
811 * as soon a a complete request arrives.
812 */
813 svc_sock_setbufsize(svsk->sk_sock,
814 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
815 3 * serv->sv_max_mesg);
816
799 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 817 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
800 818
801 /* Receive data. If we haven't got the record length yet, get 819 /* Receive data. If we haven't got the record length yet, get
@@ -1043,6 +1061,15 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1043 1061
1044 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1062 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1045 1063
1064 /* initialise setting must have enough space to
1065 * receive and respond to one request.
1066 * svc_tcp_recvfrom will re-adjust if necessary
1067 */
1068 svc_sock_setbufsize(svsk->sk_sock,
1069 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
1070 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
1071
1072 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1046 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 1073 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1047 if (sk->sk_state != TCP_ESTABLISHED) 1074 if (sk->sk_state != TCP_ESTABLISHED)
1048 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1075 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1112,14 +1139,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1112 /* Initialize the socket */ 1139 /* Initialize the socket */
1113 if (sock->type == SOCK_DGRAM) 1140 if (sock->type == SOCK_DGRAM)
1114 svc_udp_init(svsk, serv); 1141 svc_udp_init(svsk, serv);
1115 else { 1142 else
1116 /* initialise setting must have enough space to
1117 * receive and respond to one request.
1118 */
1119 svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
1120 4 * serv->sv_max_mesg);
1121 svc_tcp_init(svsk, serv); 1143 svc_tcp_init(svsk, serv);
1122 }
1123 1144
1124 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1145 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1125 svsk, svsk->sk_sk); 1146 svsk, svsk->sk_sk);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 8b510c5e8777..f11be72a1a80 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
128 page_bytes -= sge_bytes; 128 page_bytes -= sge_bytes;
129 129
130 frmr->page_list->page_list[page_no] = 130 frmr->page_list->page_list[page_no] =
131 ib_dma_map_page(xprt->sc_cm_id->device, page, 0, 131 ib_dma_map_single(xprt->sc_cm_id->device,
132 page_address(page),
132 PAGE_SIZE, DMA_TO_DEVICE); 133 PAGE_SIZE, DMA_TO_DEVICE);
133 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 134 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
134 frmr->page_list->page_list[page_no])) 135 frmr->page_list->page_list[page_no]))
@@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
532 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 533 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
533 534
534 /* Prepare the SGE for the RPCRDMA Header */ 535 /* Prepare the SGE for the RPCRDMA Header */
536 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
537 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
535 ctxt->sge[0].addr = 538 ctxt->sge[0].addr =
536 ib_dma_map_page(rdma->sc_cm_id->device, 539 ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
537 page, 0, PAGE_SIZE, DMA_TO_DEVICE); 540 ctxt->sge[0].length, DMA_TO_DEVICE);
538 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) 541 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
539 goto err; 542 goto err;
540 atomic_inc(&rdma->sc_dma_used); 543 atomic_inc(&rdma->sc_dma_used);
541 544
542 ctxt->direction = DMA_TO_DEVICE; 545 ctxt->direction = DMA_TO_DEVICE;
543 546
544 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
545 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
546
547 /* Determine how many of our SGE are to be transmitted */ 547 /* Determine how many of our SGE are to be transmitted */
548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { 548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); 549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4b0c2fa15e0b..5151f9f6c573 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
500 BUG_ON(sge_no >= xprt->sc_max_sge); 500 BUG_ON(sge_no >= xprt->sc_max_sge);
501 page = svc_rdma_get_page(); 501 page = svc_rdma_get_page();
502 ctxt->pages[sge_no] = page; 502 ctxt->pages[sge_no] = page;
503 pa = ib_dma_map_page(xprt->sc_cm_id->device, 503 pa = ib_dma_map_single(xprt->sc_cm_id->device,
504 page, 0, PAGE_SIZE, 504 page_address(page), PAGE_SIZE,
505 DMA_FROM_DEVICE); 505 DMA_FROM_DEVICE);
506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) 506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
507 goto err_put_ctxt; 507 goto err_put_ctxt;
@@ -1315,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1316 1316
1317 /* Prepare SGE for local address */ 1317 /* Prepare SGE for local address */
1318 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, 1318 sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
1319 p, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1319 page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { 1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
1321 put_page(p); 1321 put_page(p);
1322 return; 1322 return;
@@ -1343,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1343 if (ret) { 1343 if (ret) {
1344 dprintk("svcrdma: Error %d posting send for protocol error\n", 1344 dprintk("svcrdma: Error %d posting send for protocol error\n",
1345 ret); 1345 ret);
1346 ib_dma_unmap_page(xprt->sc_cm_id->device, 1346 ib_dma_unmap_single(xprt->sc_cm_id->device,
1347 sge.addr, PAGE_SIZE, 1347 sge.addr, PAGE_SIZE,
1348 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1349 svc_rdma_put_context(ctxt, 1); 1349 svc_rdma_put_context(ctxt, 1);