From e053d1ab62c8ef0eff3dd4c95448cad3c6d2fbf4 Mon Sep 17 00:00:00 2001 From: Olaf Kirch Date: Wed, 22 Jun 2005 17:16:24 +0000 Subject: [PATCH] RPC: Lazy RPC receive buffer allocation Signed-off-by: Olaf Kirch Signed-off-by: Andreas Gruenbacher Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'net/sunrpc/xprt.c') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index c74a6bb94074..a180ed4952d6 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -725,7 +725,8 @@ csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) goto no_checksum; desc.csum = csum_partial(skb->data, desc.offset, skb->csum); - xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits); + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) + return -1; if (desc.offset != skb->len) { unsigned int csum2; csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); @@ -737,7 +738,8 @@ csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) return -1; return 0; no_checksum: - xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits); + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) + return -1; if (desc.count) return -1; return 0; @@ -907,6 +909,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) struct rpc_rqst *req; struct xdr_buf *rcvbuf; size_t len; + int r; /* Find and lock the request corresponding to this xid */ spin_lock(&xprt->sock_lock); @@ -927,16 +930,30 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) len = xprt->tcp_reclen - xprt->tcp_offset; memcpy(&my_desc, desc, sizeof(my_desc)); my_desc.count = len; - xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, &my_desc, tcp_copy_data); desc->count -= len; desc->offset += len; } else - xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, desc, tcp_copy_data); xprt->tcp_copied += len; xprt->tcp_offset += len; + if (r < 0) { + /* Error when copying to the receive buffer, + * usually because we weren't able to allocate + * additional buffer pages. All we can do now + * is turn off XPRT_COPY_DATA, so the request + * will not receive any additional updates, + * and time out. + * Any remaining data from this record will + * be discarded. + */ + xprt->tcp_flags &= ~XPRT_COPY_DATA; + goto out; + } + if (xprt->tcp_copied == req->rq_private_buf.buflen) xprt->tcp_flags &= ~XPRT_COPY_DATA; else if (xprt->tcp_offset == xprt->tcp_reclen) { @@ -949,6 +966,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) req->rq_task->tk_pid); xprt_complete_rqst(xprt, req, xprt->tcp_copied); } +out: spin_unlock(&xprt->sock_lock); tcp_check_recm(xprt); } -- cgit v1.2.2 From 7e06b53d796a3740307b54aa2799077f8a0c84e7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 22 Jun 2005 17:16:24 +0000 Subject: [PATCH] RPC: fix accounting bug in the case of a truncated RPC message Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) (limited to 'net/sunrpc/xprt.c') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index a180ed4952d6..ef941e7de8bf 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -823,10 +823,15 @@ tcp_copy_data(skb_reader_t *desc, void *p, size_t len) { if (len > desc->count) len = desc->count; - if (skb_copy_bits(desc->skb, desc->offset, p, len)) + if (skb_copy_bits(desc->skb, desc->offset, p, len)) { + dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", + len, desc->count); return 0; + } desc->offset += len; desc->count -= len; + dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", + len, desc->count); return len; } @@ -865,6 +870,8 @@ tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) static void tcp_check_recm(struct rpc_xprt *xprt) { + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); if (xprt->tcp_offset == xprt->tcp_reclen) { xprt->tcp_flags |= XPRT_COPY_RECM; xprt->tcp_offset = 0; @@ -909,7 +916,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) struct rpc_rqst *req; struct xdr_buf *rcvbuf; size_t len; - int r; + ssize_t r; /* Find and lock the request corresponding to this xid */ spin_lock(&xprt->sock_lock); @@ -932,15 +939,17 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) my_desc.count = len; r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, &my_desc, tcp_copy_data); - desc->count -= len; - desc->offset += len; + desc->count -= r; + desc->offset += r; } else r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, desc, tcp_copy_data); - xprt->tcp_copied += len; - xprt->tcp_offset += len; - if (r < 0) { + if (r > 0) { + xprt->tcp_copied += r; + xprt->tcp_offset += r; + } + if (r != len) { /* Error when copying to the receive buffer, * usually because we weren't able to allocate * additional buffer pages. All we can do now @@ -951,9 +960,18 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) * be discarded. */ xprt->tcp_flags &= ~XPRT_COPY_DATA; + dprintk("RPC: XID %08x truncated request\n", + ntohl(xprt->tcp_xid)); + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); goto out; } + dprintk("RPC: XID %08x read %u bytes\n", + ntohl(xprt->tcp_xid), r); + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); + if (xprt->tcp_copied == req->rq_private_buf.buflen) xprt->tcp_flags &= ~XPRT_COPY_DATA; else if (xprt->tcp_offset == xprt->tcp_reclen) { @@ -961,12 +979,12 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) xprt->tcp_flags &= ~XPRT_COPY_DATA; } +out: if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { dprintk("RPC: %4d received reply complete\n", req->rq_task->tk_pid); xprt_complete_rqst(xprt, req, xprt->tcp_copied); } -out: spin_unlock(&xprt->sock_lock); tcp_check_recm(xprt); } @@ -985,6 +1003,7 @@ tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) desc->count -= len; desc->offset += len; xprt->tcp_offset += len; + dprintk("RPC: discarded %u bytes\n", len); tcp_check_recm(xprt); } -- cgit v1.2.2 From 0f9dc2b16884bb5957d010ed8e9114e771a05916 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 22 Jun 2005 17:16:28 +0000 Subject: [PATCH] RPC: Clean up socket autodisconnect Cancel autodisconnect requests inside xprt_transmit() in order to avoid races. Use more efficient del_singleshot_timer_sync() Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net/sunrpc/xprt.c') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index ef941e7de8bf..a74a1289113e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1240,6 +1240,8 @@ xprt_transmit(struct rpc_task *task) list_add_tail(&req->rq_list, &xprt->recv); spin_unlock_bh(&xprt->sock_lock); xprt_reset_majortimeo(req); + /* Turn off autodisconnect */ + del_singleshot_timer_sync(&xprt->timer); } } else if (!req->rq_bytes_sent) return; @@ -1370,8 +1372,6 @@ xprt_reserve(struct rpc_task *task) spin_lock(&xprt->xprt_lock); do_xprt_reserve(task); spin_unlock(&xprt->xprt_lock); - if (task->tk_rqstp) - del_timer_sync(&xprt->timer); } } -- cgit v1.2.2 From 20e5ac828dfd23b9080159c62a34f32d2dcd92fc Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 22 Jun 2005 17:16:28 +0000 Subject: [PATCH] RPC: TCP reconnects are too slow When the network layer reports a connection close, the RPC task waiting to reconnect should be notified so it can retry immediately instead of waiting for the normal connection establishment timeout. This reverts a change made in 2.6.6 as part of adding client support for RPC over TCP socket idle timeouts. Test-plan: Destructive testing with NFS over TCP mounts. Version: Fri, 29 Apr 2005 15:31:46 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net/sunrpc/xprt.c') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index a74a1289113e..2b8789cf8db1 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1101,8 +1101,7 @@ tcp_state_change(struct sock *sk) case TCP_SYN_RECV: break; default: - if (xprt_test_and_clear_connected(xprt)) - rpc_wake_up_status(&xprt->pending, -ENOTCONN); + xprt_disconnect(xprt); break; } out: -- cgit v1.2.2 From ae3884621bf5b4caff7785b9a417f262202965b2 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 22 Jun 2005 17:16:28 +0000 Subject: [PATCH] RPC: kick off socket connect operations faster Make the socket transport kick the event queue to start socket connects immediately. This should improve responsiveness of applications that are sensitive to slow mount operations (like automounters). We are now also careful to cancel the connect worker before destroying the xprt. This eliminates a race where xprt_destroy can finish before the connect worker is even allowed to run. Test-plan: Destructive testing (unplugging the network temporarily). Connectathon with UDP and TCP. Hard-code impossibly small connect timeout. Version: Fri, 29 Apr 2005 15:32:01 -0400 Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'net/sunrpc/xprt.c') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 2b8789cf8db1..eca92405948f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -569,8 +569,11 @@ void xprt_connect(struct rpc_task *task) if (xprt->sock != NULL) schedule_delayed_work(&xprt->sock_connect, RPC_REESTABLISH_TIMEOUT); - else + else { schedule_work(&xprt->sock_connect); + if (!RPC_IS_ASYNC(task)) + flush_scheduled_work(); + } } return; out_write: @@ -1685,6 +1688,10 @@ xprt_shutdown(struct rpc_xprt *xprt) rpc_wake_up(&xprt->backlog); wake_up(&xprt->cong_wait); del_timer_sync(&xprt->timer); + + /* synchronously wait for connect worker to finish */ + cancel_delayed_work(&xprt->sock_connect); + flush_scheduled_work(); } /* -- cgit v1.2.2