aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtsock.c
diff options
context:
space:
mode:
authorChuck Lever <cel@citi.umich.edu>2005-08-11 16:25:47 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-09-23 12:38:26 -0400
commit55aa4f58aa43dc9a51fb80010630d94b96053a2e (patch)
tree7b3d79bdc15c76c25d8a53437b9e4173ebc68f24 /net/sunrpc/xprtsock.c
parent44fbac2288dfed6f1963ac00bf922c3bcd779cd1 (diff)
[PATCH] RPC: client-side transport switch cleanup
Clean-up: change some comments to reflect the realities of the new RPC transport switch mechanism. Get rid of unused xprt_receive() prototype. Also, organize function prototypes in xprt.h by usage and scope. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:07:21 -0400 Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc/xprtsock.c')
-rw-r--r--net/sunrpc/xprtsock.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 182da2edf61c..7f0b9f7f167b 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -11,6 +11,8 @@
11 * Rewrite of larges part of the code in order to stabilize TCP stuff. 11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
12 * Fix behaviour when socket buffer is full. 12 * Fix behaviour when socket buffer is full.
13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
14 *
15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
14 */ 16 */
15 17
16#include <linux/types.h> 18#include <linux/types.h>
@@ -363,7 +365,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
363{ 365{
364 dprintk("RPC: xs_destroy xprt %p\n", xprt); 366 dprintk("RPC: xs_destroy xprt %p\n", xprt);
365 367
366 cancel_delayed_work(&xprt->sock_connect); 368 cancel_delayed_work(&xprt->connect_worker);
367 flush_scheduled_work(); 369 flush_scheduled_work();
368 370
369 xprt_disconnect(xprt); 371 xprt_disconnect(xprt);
@@ -938,11 +940,11 @@ static void xs_connect(struct rpc_task *task)
938 if (!xprt_test_and_set_connecting(xprt)) { 940 if (!xprt_test_and_set_connecting(xprt)) {
939 if (xprt->sock != NULL) { 941 if (xprt->sock != NULL) {
940 dprintk("RPC: xs_connect delayed xprt %p\n", xprt); 942 dprintk("RPC: xs_connect delayed xprt %p\n", xprt);
941 schedule_delayed_work(&xprt->sock_connect, 943 schedule_delayed_work(&xprt->connect_worker,
942 RPC_REESTABLISH_TIMEOUT); 944 RPC_REESTABLISH_TIMEOUT);
943 } else { 945 } else {
944 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 946 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
945 schedule_work(&xprt->sock_connect); 947 schedule_work(&xprt->connect_worker);
946 /* flush_scheduled_work can sleep... */ 948 /* flush_scheduled_work can sleep... */
947 if (!RPC_IS_ASYNC(task)) 949 if (!RPC_IS_ASYNC(task))
948 flush_scheduled_work(); 950 flush_scheduled_work();
@@ -989,7 +991,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
989 /* XXX: header size can vary due to auth type, IPv6, etc. */ 991 /* XXX: header size can vary due to auth type, IPv6, etc. */
990 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 992 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
991 993
992 INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt); 994 INIT_WORK(&xprt->connect_worker, xs_connect_worker, xprt);
993 995
994 xprt->ops = &xs_ops; 996 xprt->ops = &xs_ops;
995 997
@@ -1028,7 +1030,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1028 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; 1030 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
1029 xprt->max_payload = (1U << 31) - 1; 1031 xprt->max_payload = (1U << 31) - 1;
1030 1032
1031 INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt); 1033 INIT_WORK(&xprt->connect_worker, xs_connect_worker, xprt);
1032 1034
1033 xprt->ops = &xs_ops; 1035 xprt->ops = &xs_ops;
1034 1036