aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <cel@citi.umich.edu>2005-08-11 16:25:47 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-09-23 12:38:26 -0400
commit55aa4f58aa43dc9a51fb80010630d94b96053a2e (patch)
tree7b3d79bdc15c76c25d8a53437b9e4173ebc68f24 /net
parent44fbac2288dfed6f1963ac00bf922c3bcd779cd1 (diff)
[PATCH] RPC: client-side transport switch cleanup
Clean-up: change some comments to reflect the realities of the new RPC transport switch mechanism. Get rid of unused xprt_receive() prototype. Also, organize function prototypes in xprt.h by usage and scope. Test-plan: Compile kernel with CONFIG_NFS enabled. Version: Thu, 11 Aug 2005 16:07:21 -0400 Signed-off-by: Chuck Lever <cel@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/xprt.c26
-rw-r--r--net/sunrpc/xprtsock.c12
3 files changed, 20 insertions, 20 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 0d1b010a4a01..4677959d2834 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/net/sunrpc/rpcclnt.c 2 * linux/net/sunrpc/clnt.c
3 * 3 *
4 * This file contains the high-level RPC interface. 4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous 5 * It is modeled as a finite state machine to support both synchronous
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 2f9cd468b953..247fa1ec870c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -10,12 +10,12 @@
10 * one is available. Otherwise, it sleeps on the backlog queue 10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve). 11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into 12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_call(). 13 * the request struct, and calls xprt_transmit().
14 * - xprt_call transmits the message and installs the caller on the 14 * - xprt_transmit sends the message and installs the caller on the
15 * socket's wait list. At the same time, it installs a timer that 15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired. 16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of 17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that socket. If a matching XID is found, the 18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed. 19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is 20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the 21 * fired by the kernel and runs xprt_timer(). It either adjusts the
@@ -32,6 +32,8 @@
32 * tasks that rely on callbacks. 32 * tasks that rely on callbacks.
33 * 33 *
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de> 34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
35 *
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
35 */ 37 */
36 38
37#include <linux/module.h> 39#include <linux/module.h>
@@ -52,8 +54,6 @@
52# define RPCDBG_FACILITY RPCDBG_XPRT 54# define RPCDBG_FACILITY RPCDBG_XPRT
53#endif 55#endif
54 56
55#define XPRT_MAX_BACKOFF (8)
56
57/* 57/*
58 * Local functions 58 * Local functions
59 */ 59 */
@@ -65,9 +65,9 @@ static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
65static int xprt_clear_backlog(struct rpc_xprt *xprt); 65static int xprt_clear_backlog(struct rpc_xprt *xprt);
66 66
67/* 67/*
68 * Serialize write access to sockets, in order to prevent different 68 * Serialize write access to transports, in order to prevent different
69 * requests from interfering with each other. 69 * requests from interfering with each other.
70 * Also prevents TCP socket connects from colliding with writes. 70 * Also prevents transport connects from colliding with writes.
71 */ 71 */
72static int 72static int
73__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) 73__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
@@ -91,7 +91,7 @@ __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
91 clear_bit(XPRT_LOCKED, &xprt->state); 91 clear_bit(XPRT_LOCKED, &xprt->state);
92 smp_mb__after_clear_bit(); 92 smp_mb__after_clear_bit();
93out_sleep: 93out_sleep:
94 dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt); 94 dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
95 task->tk_timeout = 0; 95 task->tk_timeout = 0;
96 task->tk_status = -EAGAIN; 96 task->tk_status = -EAGAIN;
97 if (req && req->rq_ntrans) 97 if (req && req->rq_ntrans)
@@ -144,7 +144,7 @@ out_unlock:
144} 144}
145 145
146/* 146/*
147 * Releases the socket for use by other requests. 147 * Releases the transport for use by other requests.
148 */ 148 */
149static void 149static void
150__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) 150__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
@@ -294,8 +294,7 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
294 return status; 294 return status;
295} 295}
296 296
297static void 297static void xprt_autoclose(void *args)
298xprt_socket_autoclose(void *args)
299{ 298{
300 struct rpc_xprt *xprt = (struct rpc_xprt *)args; 299 struct rpc_xprt *xprt = (struct rpc_xprt *)args;
301 300
@@ -329,7 +328,6 @@ xprt_init_autodisconnect(unsigned long data)
329 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) 328 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
330 goto out_abort; 329 goto out_abort;
331 spin_unlock(&xprt->transport_lock); 330 spin_unlock(&xprt->transport_lock);
332 /* Let keventd close the socket */
333 if (xprt_connecting(xprt)) 331 if (xprt_connecting(xprt))
334 xprt_release_write(xprt, NULL); 332 xprt_release_write(xprt, NULL);
335 else 333 else
@@ -770,7 +768,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
770 768
771 INIT_LIST_HEAD(&xprt->free); 769 INIT_LIST_HEAD(&xprt->free);
772 INIT_LIST_HEAD(&xprt->recv); 770 INIT_LIST_HEAD(&xprt->recv);
773 INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt); 771 INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
774 init_timer(&xprt->timer); 772 init_timer(&xprt->timer);
775 xprt->timer.function = xprt_init_autodisconnect; 773 xprt->timer.function = xprt_init_autodisconnect;
776 xprt->timer.data = (unsigned long) xprt; 774 xprt->timer.data = (unsigned long) xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 182da2edf61c..7f0b9f7f167b 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -11,6 +11,8 @@
11 * Rewrite of larges part of the code in order to stabilize TCP stuff. 11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
12 * Fix behaviour when socket buffer is full. 12 * Fix behaviour when socket buffer is full.
13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
14 *
15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
14 */ 16 */
15 17
16#include <linux/types.h> 18#include <linux/types.h>
@@ -363,7 +365,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
363{ 365{
364 dprintk("RPC: xs_destroy xprt %p\n", xprt); 366 dprintk("RPC: xs_destroy xprt %p\n", xprt);
365 367
366 cancel_delayed_work(&xprt->sock_connect); 368 cancel_delayed_work(&xprt->connect_worker);
367 flush_scheduled_work(); 369 flush_scheduled_work();
368 370
369 xprt_disconnect(xprt); 371 xprt_disconnect(xprt);
@@ -938,11 +940,11 @@ static void xs_connect(struct rpc_task *task)
938 if (!xprt_test_and_set_connecting(xprt)) { 940 if (!xprt_test_and_set_connecting(xprt)) {
939 if (xprt->sock != NULL) { 941 if (xprt->sock != NULL) {
940 dprintk("RPC: xs_connect delayed xprt %p\n", xprt); 942 dprintk("RPC: xs_connect delayed xprt %p\n", xprt);
941 schedule_delayed_work(&xprt->sock_connect, 943 schedule_delayed_work(&xprt->connect_worker,
942 RPC_REESTABLISH_TIMEOUT); 944 RPC_REESTABLISH_TIMEOUT);
943 } else { 945 } else {
944 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 946 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
945 schedule_work(&xprt->sock_connect); 947 schedule_work(&xprt->connect_worker);
946 /* flush_scheduled_work can sleep... */ 948 /* flush_scheduled_work can sleep... */
947 if (!RPC_IS_ASYNC(task)) 949 if (!RPC_IS_ASYNC(task))
948 flush_scheduled_work(); 950 flush_scheduled_work();
@@ -989,7 +991,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
989 /* XXX: header size can vary due to auth type, IPv6, etc. */ 991 /* XXX: header size can vary due to auth type, IPv6, etc. */
990 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 992 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
991 993
992 INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt); 994 INIT_WORK(&xprt->connect_worker, xs_connect_worker, xprt);
993 995
994 xprt->ops = &xs_ops; 996 xprt->ops = &xs_ops;
995 997
@@ -1028,7 +1030,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1028 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; 1030 xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
1029 xprt->max_payload = (1U << 31) - 1; 1031 xprt->max_payload = (1U << 31) - 1;
1030 1032
1031 INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt); 1033 INIT_WORK(&xprt->connect_worker, xs_connect_worker, xprt);
1032 1034
1033 xprt->ops = &xs_ops; 1035 xprt->ops = &xs_ops;
1034 1036