diff options
author | Ricardo Labiaga <Ricardo.Labiaga@netapp.com> | 2009-04-01 09:23:03 -0400 |
---|---|---|
committer | Benny Halevy <bhalevy@panasas.com> | 2009-06-17 17:11:24 -0400 |
commit | 55ae1aabfb108106dd095de2578ceef1c755a8b8 (patch) | |
tree | 7ccb91f5aac0a3646f7d4e8fbe913955aa300f95 /net/sunrpc/xprt.c | |
parent | 44b98efdd0a205bdca2cb63493350d06ff6804b1 (diff) |
nfs41: Add backchannel processing support to RPC state machine
Adds rpc_run_bc_task() which is called by the NFS callback service to
process backchannel requests. It performs similar work to rpc_run_task()
though "schedules" the backchannel task to be executed starting at the
call_trasmit state in the RPC state machine.
It also introduces some miscellaneous updates to the argument validation,
call_transmit, and transport cleanup functions to take into account
that there are now forechannel and backchannel tasks.
Backchannel requests do not carry an RPC message structure, since the
payload has already been XDR encoded using the existing NFSv4 callback
mechanism.
Introduce a new transmit state for the client to reply on to backchannel
requests. This new state simply reserves the transport and issues the
reply. In case of a connection related error, disconnects the transport and
drops the reply. It requires the forechannel to re-establish the connection
and the server to retransmit the request, as stated in NFSv4.1 section
2.9.2 "Client and Server Transport Behavior".
Note: There is no need to loop attempting to reserve the transport. If EAGAIN
is returned by xprt_prepare_transmit(), return with tk_status == 0,
setting tk_action to call_bc_transmit. rpc_execute() will invoke it again
after the task is taken off the sleep queue.
[nfs41: rpc_run_bc_task() need not be exported outside RPC module]
[nfs41: New call_bc_transmit RPC state]
Signed-off-by: Ricardo Labiaga <Ricardo.Labiaga@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[nfs41: Backchannel: No need to loop in call_bc_transmit()]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Ricardo Labiaga <Ricardo.Labiaga@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[rpc_count_iostats incorrectly exits early]
Signed-off-by: Ricardo Labiaga <Ricardo.Labiaga@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[Convert rpc_reply_expected() to inline function]
[Remove unnecessary BUG_ON()]
[Rename variable]
Signed-off-by: Ricardo Labiaga <Ricardo.Labiaga@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Diffstat (limited to 'net/sunrpc/xprt.c')
-rw-r--r-- | net/sunrpc/xprt.c | 38 |
1 files changed, 32 insertions, 6 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 52739f82df1e..0eea2bfe111b 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -12,8 +12,9 @@ | |||
12 | * - Next, the caller puts together the RPC message, stuffs it into | 12 | * - Next, the caller puts together the RPC message, stuffs it into |
13 | * the request struct, and calls xprt_transmit(). | 13 | * the request struct, and calls xprt_transmit(). |
14 | * - xprt_transmit sends the message and installs the caller on the | 14 | * - xprt_transmit sends the message and installs the caller on the |
15 | * transport's wait list. At the same time, it installs a timer that | 15 | * transport's wait list. At the same time, if a reply is expected, |
16 | * is run after the packet's timeout has expired. | 16 | * it installs a timer that is run after the packet's timeout has |
17 | * expired. | ||
17 | * - When a packet arrives, the data_ready handler walks the list of | 18 | * - When a packet arrives, the data_ready handler walks the list of |
18 | * pending requests for that transport. If a matching XID is found, the | 19 | * pending requests for that transport. If a matching XID is found, the |
19 | * caller is woken up, and the timer removed. | 20 | * caller is woken up, and the timer removed. |
@@ -46,6 +47,8 @@ | |||
46 | #include <linux/sunrpc/clnt.h> | 47 | #include <linux/sunrpc/clnt.h> |
47 | #include <linux/sunrpc/metrics.h> | 48 | #include <linux/sunrpc/metrics.h> |
48 | 49 | ||
50 | #include "sunrpc.h" | ||
51 | |||
49 | /* | 52 | /* |
50 | * Local variables | 53 | * Local variables |
51 | */ | 54 | */ |
@@ -873,7 +876,10 @@ void xprt_transmit(struct rpc_task *task) | |||
873 | dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); | 876 | dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); |
874 | 877 | ||
875 | if (!req->rq_received) { | 878 | if (!req->rq_received) { |
876 | if (list_empty(&req->rq_list)) { | 879 | if (list_empty(&req->rq_list) && rpc_reply_expected(task)) { |
880 | /* | ||
881 | * Add to the list only if we're expecting a reply | ||
882 | */ | ||
877 | spin_lock_bh(&xprt->transport_lock); | 883 | spin_lock_bh(&xprt->transport_lock); |
878 | /* Update the softirq receive buffer */ | 884 | /* Update the softirq receive buffer */ |
879 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | 885 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, |
@@ -908,8 +914,13 @@ void xprt_transmit(struct rpc_task *task) | |||
908 | /* Don't race with disconnect */ | 914 | /* Don't race with disconnect */ |
909 | if (!xprt_connected(xprt)) | 915 | if (!xprt_connected(xprt)) |
910 | task->tk_status = -ENOTCONN; | 916 | task->tk_status = -ENOTCONN; |
911 | else if (!req->rq_received) | 917 | else if (!req->rq_received && rpc_reply_expected(task)) { |
918 | /* | ||
919 | * Sleep on the pending queue since | ||
920 | * we're expecting a reply. | ||
921 | */ | ||
912 | rpc_sleep_on(&xprt->pending, task, xprt_timer); | 922 | rpc_sleep_on(&xprt->pending, task, xprt_timer); |
923 | } | ||
913 | spin_unlock_bh(&xprt->transport_lock); | 924 | spin_unlock_bh(&xprt->transport_lock); |
914 | } | 925 | } |
915 | 926 | ||
@@ -982,11 +993,17 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) | |||
982 | */ | 993 | */ |
983 | void xprt_release(struct rpc_task *task) | 994 | void xprt_release(struct rpc_task *task) |
984 | { | 995 | { |
985 | struct rpc_xprt *xprt = task->tk_xprt; | 996 | struct rpc_xprt *xprt; |
986 | struct rpc_rqst *req; | 997 | struct rpc_rqst *req; |
998 | int is_bc_request; | ||
987 | 999 | ||
988 | if (!(req = task->tk_rqstp)) | 1000 | if (!(req = task->tk_rqstp)) |
989 | return; | 1001 | return; |
1002 | |||
1003 | /* Preallocated backchannel request? */ | ||
1004 | is_bc_request = bc_prealloc(req); | ||
1005 | |||
1006 | xprt = req->rq_xprt; | ||
990 | rpc_count_iostats(task); | 1007 | rpc_count_iostats(task); |
991 | spin_lock_bh(&xprt->transport_lock); | 1008 | spin_lock_bh(&xprt->transport_lock); |
992 | xprt->ops->release_xprt(xprt, task); | 1009 | xprt->ops->release_xprt(xprt, task); |
@@ -999,10 +1016,19 @@ void xprt_release(struct rpc_task *task) | |||
999 | mod_timer(&xprt->timer, | 1016 | mod_timer(&xprt->timer, |
1000 | xprt->last_used + xprt->idle_timeout); | 1017 | xprt->last_used + xprt->idle_timeout); |
1001 | spin_unlock_bh(&xprt->transport_lock); | 1018 | spin_unlock_bh(&xprt->transport_lock); |
1002 | xprt->ops->buf_free(req->rq_buffer); | 1019 | if (!bc_prealloc(req)) |
1020 | xprt->ops->buf_free(req->rq_buffer); | ||
1003 | task->tk_rqstp = NULL; | 1021 | task->tk_rqstp = NULL; |
1004 | if (req->rq_release_snd_buf) | 1022 | if (req->rq_release_snd_buf) |
1005 | req->rq_release_snd_buf(req); | 1023 | req->rq_release_snd_buf(req); |
1024 | |||
1025 | /* | ||
1026 | * Early exit if this is a backchannel preallocated request. | ||
1027 | * There is no need to have it added to the RPC slot list. | ||
1028 | */ | ||
1029 | if (is_bc_request) | ||
1030 | return; | ||
1031 | |||
1006 | memset(req, 0, sizeof(*req)); /* mark unused */ | 1032 | memset(req, 0, sizeof(*req)); /* mark unused */ |
1007 | 1033 | ||
1008 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); | 1034 | dprintk("RPC: %5u release request %p\n", task->tk_pid, req); |