aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/Makefile1
-rw-r--r--net/sunrpc/backchannel_rqst.c278
-rw-r--r--net/sunrpc/bc_svc.c81
-rw-r--r--net/sunrpc/clnt.c143
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/stats.c8
-rw-r--r--net/sunrpc/sunrpc.h37
-rw-r--r--net/sunrpc/svc.c134
-rw-r--r--net/sunrpc/svcsock.c39
-rw-r--r--net/sunrpc/xprt.c60
-rw-r--r--net/sunrpc/xprtsock.c216
11 files changed, 924 insertions, 75 deletions
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 5369aa369b35..db73fd2a3f0e 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -13,5 +13,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
13 rpcb_clnt.o timer.o xdr.o \ 13 rpcb_clnt.o timer.o xdr.o \
14 sunrpc_syms.o cache.o rpc_pipe.o \ 14 sunrpc_syms.o cache.o rpc_pipe.o \
15 svc_xprt.o 15 svc_xprt.o
16sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
16sunrpc-$(CONFIG_PROC_FS) += stats.o 17sunrpc-$(CONFIG_PROC_FS) += stats.o
17sunrpc-$(CONFIG_SYSCTL) += sysctl.o 18sunrpc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
new file mode 100644
index 000000000000..5a7d342e3087
--- /dev/null
+++ b/net/sunrpc/backchannel_rqst.c
@@ -0,0 +1,278 @@
1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
25#include <linux/sunrpc/xprt.h>
26
27#ifdef RPC_DEBUG
28#define RPCDBG_FACILITY RPCDBG_TRANS
29#endif
30
31#if defined(CONFIG_NFS_V4_1)
32
33/*
34 * Helper routines that track the number of preallocation elements
35 * on the transport.
36 */
37static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
38{
39 return xprt->bc_alloc_count > 0;
40}
41
42static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
43{
44 xprt->bc_alloc_count += n;
45}
46
47static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
48{
49 return xprt->bc_alloc_count -= n;
50}
51
52/*
53 * Free the preallocated rpc_rqst structure and the memory
54 * buffers hanging off of it.
55 */
56static void xprt_free_allocation(struct rpc_rqst *req)
57{
58 struct xdr_buf *xbufp;
59
60 dprintk("RPC: free allocations for req= %p\n", req);
61 BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
62 xbufp = &req->rq_private_buf;
63 free_page((unsigned long)xbufp->head[0].iov_base);
64 xbufp = &req->rq_snd_buf;
65 free_page((unsigned long)xbufp->head[0].iov_base);
66 list_del(&req->rq_bc_pa_list);
67 kfree(req);
68}
69
70/*
71 * Preallocate up to min_reqs structures and related buffers for use
72 * by the backchannel. This function can be called multiple times
73 * when creating new sessions that use the same rpc_xprt. The
74 * preallocated buffers are added to the pool of resources used by
75 * the rpc_xprt. Anyone of these resources may be used used by an
76 * incoming callback request. It's up to the higher levels in the
77 * stack to enforce that the maximum number of session slots is not
78 * being exceeded.
79 *
80 * Some callback arguments can be large. For example, a pNFS server
81 * using multiple deviceids. The list can be unbound, but the client
82 * has the ability to tell the server the maximum size of the callback
83 * requests. Each deviceID is 16 bytes, so allocate one page
84 * for the arguments to have enough room to receive a number of these
85 * deviceIDs. The NFS client indicates to the pNFS server that its
86 * callback requests can be up to 4096 bytes in size.
87 */
88int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
89{
90 struct page *page_rcv = NULL, *page_snd = NULL;
91 struct xdr_buf *xbufp = NULL;
92 struct rpc_rqst *req, *tmp;
93 struct list_head tmp_list;
94 int i;
95
96 dprintk("RPC: setup backchannel transport\n");
97
98 /*
99 * We use a temporary list to keep track of the preallocated
100 * buffers. Once we're done building the list we splice it
101 * into the backchannel preallocation list off of the rpc_xprt
102 * struct. This helps minimize the amount of time the list
103 * lock is held on the rpc_xprt struct. It also makes cleanup
104 * easier in case of memory allocation errors.
105 */
106 INIT_LIST_HEAD(&tmp_list);
107 for (i = 0; i < min_reqs; i++) {
108 /* Pre-allocate one backchannel rpc_rqst */
109 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
110 if (req == NULL) {
111 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
112 goto out_free;
113 }
114
115 /* Add the allocated buffer to the tmp list */
116 dprintk("RPC: adding req= %p\n", req);
117 list_add(&req->rq_bc_pa_list, &tmp_list);
118
119 req->rq_xprt = xprt;
120 INIT_LIST_HEAD(&req->rq_list);
121 INIT_LIST_HEAD(&req->rq_bc_list);
122
123 /* Preallocate one XDR receive buffer */
124 page_rcv = alloc_page(GFP_KERNEL);
125 if (page_rcv == NULL) {
126 printk(KERN_ERR "Failed to create bc receive xbuf\n");
127 goto out_free;
128 }
129 xbufp = &req->rq_rcv_buf;
130 xbufp->head[0].iov_base = page_address(page_rcv);
131 xbufp->head[0].iov_len = PAGE_SIZE;
132 xbufp->tail[0].iov_base = NULL;
133 xbufp->tail[0].iov_len = 0;
134 xbufp->page_len = 0;
135 xbufp->len = PAGE_SIZE;
136 xbufp->buflen = PAGE_SIZE;
137
138 /* Preallocate one XDR send buffer */
139 page_snd = alloc_page(GFP_KERNEL);
140 if (page_snd == NULL) {
141 printk(KERN_ERR "Failed to create bc snd xbuf\n");
142 goto out_free;
143 }
144
145 xbufp = &req->rq_snd_buf;
146 xbufp->head[0].iov_base = page_address(page_snd);
147 xbufp->head[0].iov_len = 0;
148 xbufp->tail[0].iov_base = NULL;
149 xbufp->tail[0].iov_len = 0;
150 xbufp->page_len = 0;
151 xbufp->len = 0;
152 xbufp->buflen = PAGE_SIZE;
153 }
154
155 /*
156 * Add the temporary list to the backchannel preallocation list
157 */
158 spin_lock_bh(&xprt->bc_pa_lock);
159 list_splice(&tmp_list, &xprt->bc_pa_list);
160 xprt_inc_alloc_count(xprt, min_reqs);
161 spin_unlock_bh(&xprt->bc_pa_lock);
162
163 dprintk("RPC: setup backchannel transport done\n");
164 return 0;
165
166out_free:
167 /*
168 * Memory allocation failed, free the temporary list
169 */
170 list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
171 xprt_free_allocation(req);
172
173 dprintk("RPC: setup backchannel transport failed\n");
174 return -1;
175}
176EXPORT_SYMBOL(xprt_setup_backchannel);
177
178/*
179 * Destroys the backchannel preallocated structures.
180 * Since these structures may have been allocated by multiple calls
181 * to xprt_setup_backchannel, we only destroy up to the maximum number
182 * of reqs specified by the caller.
183 * @xprt: the transport holding the preallocated strucures
184 * @max_reqs the maximum number of preallocated structures to destroy
185 */
186void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
187{
188 struct rpc_rqst *req = NULL, *tmp = NULL;
189
190 dprintk("RPC: destroy backchannel transport\n");
191
192 BUG_ON(max_reqs == 0);
193 spin_lock_bh(&xprt->bc_pa_lock);
194 xprt_dec_alloc_count(xprt, max_reqs);
195 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
196 dprintk("RPC: req=%p\n", req);
197 xprt_free_allocation(req);
198 if (--max_reqs == 0)
199 break;
200 }
201 spin_unlock_bh(&xprt->bc_pa_lock);
202
203 dprintk("RPC: backchannel list empty= %s\n",
204 list_empty(&xprt->bc_pa_list) ? "true" : "false");
205}
206EXPORT_SYMBOL(xprt_destroy_backchannel);
207
208/*
209 * One or more rpc_rqst structure have been preallocated during the
210 * backchannel setup. Buffer space for the send and private XDR buffers
211 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
212 * to this request. Use xprt_free_bc_request to return it.
213 *
214 * Return an available rpc_rqst, otherwise NULL if non are available.
215 */
216struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
217{
218 struct rpc_rqst *req;
219
220 dprintk("RPC: allocate a backchannel request\n");
221 spin_lock_bh(&xprt->bc_pa_lock);
222 if (!list_empty(&xprt->bc_pa_list)) {
223 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
224 rq_bc_pa_list);
225 list_del(&req->rq_bc_pa_list);
226 } else {
227 req = NULL;
228 }
229 spin_unlock_bh(&xprt->bc_pa_lock);
230
231 if (req != NULL) {
232 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
233 req->rq_reply_bytes_recvd = 0;
234 req->rq_bytes_sent = 0;
235 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
236 sizeof(req->rq_private_buf));
237 }
238 dprintk("RPC: backchannel req=%p\n", req);
239 return req;
240}
241
242/*
243 * Return the preallocated rpc_rqst structure and XDR buffers
244 * associated with this rpc_task.
245 */
246void xprt_free_bc_request(struct rpc_rqst *req)
247{
248 struct rpc_xprt *xprt = req->rq_xprt;
249
250 dprintk("RPC: free backchannel req=%p\n", req);
251
252 smp_mb__before_clear_bit();
253 BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
254 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
255 smp_mb__after_clear_bit();
256
257 if (!xprt_need_to_requeue(xprt)) {
258 /*
259 * The last remaining session was destroyed while this
260 * entry was in use. Free the entry and don't attempt
261 * to add back to the list because there is no need to
262 * have anymore preallocated entries.
263 */
264 dprintk("RPC: Last session removed req=%p\n", req);
265 xprt_free_allocation(req);
266 return;
267 }
268
269 /*
270 * Return it to the list of preallocations so that it
271 * may be reused by a new callback request.
272 */
273 spin_lock_bh(&xprt->bc_pa_lock);
274 list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
275 spin_unlock_bh(&xprt->bc_pa_lock);
276}
277
278#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
new file mode 100644
index 000000000000..13f214f53120
--- /dev/null
+++ b/net/sunrpc/bc_svc.c
@@ -0,0 +1,81 @@
1/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24/*
25 * The NFSv4.1 callback service helper routines.
26 * They implement the transport level processing required to send the
27 * reply over an existing open connection previously established by the client.
28 */
29
30#if defined(CONFIG_NFS_V4_1)
31
32#include <linux/module.h>
33
34#include <linux/sunrpc/xprt.h>
35#include <linux/sunrpc/sched.h>
36#include <linux/sunrpc/bc_xprt.h>
37
38#define RPCDBG_FACILITY RPCDBG_SVCDSP
39
40void bc_release_request(struct rpc_task *task)
41{
42 struct rpc_rqst *req = task->tk_rqstp;
43
44 dprintk("RPC: bc_release_request: task= %p\n", task);
45
46 /*
47 * Release this request only if it's a backchannel
48 * preallocated request
49 */
50 if (!bc_prealloc(req))
51 return;
52 xprt_free_bc_request(req);
53}
54
55/* Empty callback ops */
56static const struct rpc_call_ops nfs41_callback_ops = {
57};
58
59
60/*
61 * Send the callback reply
62 */
63int bc_send(struct rpc_rqst *req)
64{
65 struct rpc_task *task;
66 int ret;
67
68 dprintk("RPC: bc_send req= %p\n", req);
69 task = rpc_run_bc_task(req, &nfs41_callback_ops);
70 if (IS_ERR(task))
71 ret = PTR_ERR(task);
72 else {
73 BUG_ON(atomic_read(&task->tk_count) != 1);
74 ret = task->tk_status;
75 rpc_put_task(task);
76 }
77 return ret;
78 dprintk("RPC: bc_send ret= %d \n", ret);
79}
80
81#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 5abab094441f..5bc2f45bddf0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -36,7 +36,9 @@
36#include <linux/sunrpc/clnt.h> 36#include <linux/sunrpc/clnt.h>
37#include <linux/sunrpc/rpc_pipe_fs.h> 37#include <linux/sunrpc/rpc_pipe_fs.h>
38#include <linux/sunrpc/metrics.h> 38#include <linux/sunrpc/metrics.h>
39#include <linux/sunrpc/bc_xprt.h>
39 40
41#include "sunrpc.h"
40 42
41#ifdef RPC_DEBUG 43#ifdef RPC_DEBUG
42# define RPCDBG_FACILITY RPCDBG_CALL 44# define RPCDBG_FACILITY RPCDBG_CALL
@@ -63,6 +65,9 @@ static void call_decode(struct rpc_task *task);
63static void call_bind(struct rpc_task *task); 65static void call_bind(struct rpc_task *task);
64static void call_bind_status(struct rpc_task *task); 66static void call_bind_status(struct rpc_task *task);
65static void call_transmit(struct rpc_task *task); 67static void call_transmit(struct rpc_task *task);
68#if defined(CONFIG_NFS_V4_1)
69static void call_bc_transmit(struct rpc_task *task);
70#endif /* CONFIG_NFS_V4_1 */
66static void call_status(struct rpc_task *task); 71static void call_status(struct rpc_task *task);
67static void call_transmit_status(struct rpc_task *task); 72static void call_transmit_status(struct rpc_task *task);
68static void call_refresh(struct rpc_task *task); 73static void call_refresh(struct rpc_task *task);
@@ -613,6 +618,50 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
613} 618}
614EXPORT_SYMBOL_GPL(rpc_call_async); 619EXPORT_SYMBOL_GPL(rpc_call_async);
615 620
621#if defined(CONFIG_NFS_V4_1)
622/**
623 * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
624 * rpc_execute against it
625 * @ops: RPC call ops
626 */
627struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
628 const struct rpc_call_ops *tk_ops)
629{
630 struct rpc_task *task;
631 struct xdr_buf *xbufp = &req->rq_snd_buf;
632 struct rpc_task_setup task_setup_data = {
633 .callback_ops = tk_ops,
634 };
635
636 dprintk("RPC: rpc_run_bc_task req= %p\n", req);
637 /*
638 * Create an rpc_task to send the data
639 */
640 task = rpc_new_task(&task_setup_data);
641 if (!task) {
642 xprt_free_bc_request(req);
643 goto out;
644 }
645 task->tk_rqstp = req;
646
647 /*
648 * Set up the xdr_buf length.
649 * This also indicates that the buffer is XDR encoded already.
650 */
651 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
652 xbufp->tail[0].iov_len;
653
654 task->tk_action = call_bc_transmit;
655 atomic_inc(&task->tk_count);
656 BUG_ON(atomic_read(&task->tk_count) != 2);
657 rpc_execute(task);
658
659out:
660 dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
661 return task;
662}
663#endif /* CONFIG_NFS_V4_1 */
664
616void 665void
617rpc_call_start(struct rpc_task *task) 666rpc_call_start(struct rpc_task *task)
618{ 667{
@@ -695,6 +744,19 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
695EXPORT_SYMBOL_GPL(rpc_force_rebind); 744EXPORT_SYMBOL_GPL(rpc_force_rebind);
696 745
697/* 746/*
747 * Restart an (async) RPC call from the call_prepare state.
748 * Usually called from within the exit handler.
749 */
750void
751rpc_restart_call_prepare(struct rpc_task *task)
752{
753 if (RPC_ASSASSINATED(task))
754 return;
755 task->tk_action = rpc_prepare_task;
756}
757EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
758
759/*
698 * Restart an (async) RPC call. Usually called from within the 760 * Restart an (async) RPC call. Usually called from within the
699 * exit handler. 761 * exit handler.
700 */ 762 */
@@ -1085,7 +1147,7 @@ call_transmit(struct rpc_task *task)
1085 * in order to allow access to the socket to other RPC requests. 1147 * in order to allow access to the socket to other RPC requests.
1086 */ 1148 */
1087 call_transmit_status(task); 1149 call_transmit_status(task);
1088 if (task->tk_msg.rpc_proc->p_decode != NULL) 1150 if (rpc_reply_expected(task))
1089 return; 1151 return;
1090 task->tk_action = rpc_exit_task; 1152 task->tk_action = rpc_exit_task;
1091 rpc_wake_up_queued_task(&task->tk_xprt->pending, task); 1153 rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
@@ -1120,6 +1182,72 @@ call_transmit_status(struct rpc_task *task)
1120 } 1182 }
1121} 1183}
1122 1184
1185#if defined(CONFIG_NFS_V4_1)
1186/*
1187 * 5b. Send the backchannel RPC reply. On error, drop the reply. In
1188 * addition, disconnect on connectivity errors.
1189 */
1190static void
1191call_bc_transmit(struct rpc_task *task)
1192{
1193 struct rpc_rqst *req = task->tk_rqstp;
1194
1195 BUG_ON(task->tk_status != 0);
1196 task->tk_status = xprt_prepare_transmit(task);
1197 if (task->tk_status == -EAGAIN) {
1198 /*
1199 * Could not reserve the transport. Try again after the
1200 * transport is released.
1201 */
1202 task->tk_status = 0;
1203 task->tk_action = call_bc_transmit;
1204 return;
1205 }
1206
1207 task->tk_action = rpc_exit_task;
1208 if (task->tk_status < 0) {
1209 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1210 "error: %d\n", task->tk_status);
1211 return;
1212 }
1213
1214 xprt_transmit(task);
1215 xprt_end_transmit(task);
1216 dprint_status(task);
1217 switch (task->tk_status) {
1218 case 0:
1219 /* Success */
1220 break;
1221 case -EHOSTDOWN:
1222 case -EHOSTUNREACH:
1223 case -ENETUNREACH:
1224 case -ETIMEDOUT:
1225 /*
1226 * Problem reaching the server. Disconnect and let the
1227 * forechannel reestablish the connection. The server will
1228 * have to retransmit the backchannel request and we'll
1229 * reprocess it. Since these ops are idempotent, there's no
1230 * need to cache our reply at this time.
1231 */
1232 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1233 "error: %d\n", task->tk_status);
1234 xprt_conditional_disconnect(task->tk_xprt,
1235 req->rq_connect_cookie);
1236 break;
1237 default:
1238 /*
1239 * We were unable to reply and will have to drop the
1240 * request. The server should reconnect and retransmit.
1241 */
1242 BUG_ON(task->tk_status == -EAGAIN);
1243 printk(KERN_NOTICE "RPC: Could not send backchannel reply "
1244 "error: %d\n", task->tk_status);
1245 break;
1246 }
1247 rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
1248}
1249#endif /* CONFIG_NFS_V4_1 */
1250
1123/* 1251/*
1124 * 6. Sort out the RPC call status 1252 * 6. Sort out the RPC call status
1125 */ 1253 */
@@ -1130,8 +1258,8 @@ call_status(struct rpc_task *task)
1130 struct rpc_rqst *req = task->tk_rqstp; 1258 struct rpc_rqst *req = task->tk_rqstp;
1131 int status; 1259 int status;
1132 1260
1133 if (req->rq_received > 0 && !req->rq_bytes_sent) 1261 if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
1134 task->tk_status = req->rq_received; 1262 task->tk_status = req->rq_reply_bytes_recvd;
1135 1263
1136 dprint_status(task); 1264 dprint_status(task);
1137 1265
@@ -1248,7 +1376,7 @@ call_decode(struct rpc_task *task)
1248 1376
1249 /* 1377 /*
1250 * Ensure that we see all writes made by xprt_complete_rqst() 1378 * Ensure that we see all writes made by xprt_complete_rqst()
1251 * before it changed req->rq_received. 1379 * before it changed req->rq_reply_bytes_recvd.
1252 */ 1380 */
1253 smp_rmb(); 1381 smp_rmb();
1254 req->rq_rcv_buf.len = req->rq_private_buf.len; 1382 req->rq_rcv_buf.len = req->rq_private_buf.len;
@@ -1289,7 +1417,7 @@ out_retry:
1289 task->tk_status = 0; 1417 task->tk_status = 0;
1290 /* Note: rpc_verify_header() may have freed the RPC slot */ 1418 /* Note: rpc_verify_header() may have freed the RPC slot */
1291 if (task->tk_rqstp == req) { 1419 if (task->tk_rqstp == req) {
1292 req->rq_received = req->rq_rcv_buf.len = 0; 1420 req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
1293 if (task->tk_client->cl_discrtry) 1421 if (task->tk_client->cl_discrtry)
1294 xprt_conditional_disconnect(task->tk_xprt, 1422 xprt_conditional_disconnect(task->tk_xprt,
1295 req->rq_connect_cookie); 1423 req->rq_connect_cookie);
@@ -1377,13 +1505,14 @@ rpc_verify_header(struct rpc_task *task)
1377 } 1505 }
1378 if ((len -= 3) < 0) 1506 if ((len -= 3) < 0)
1379 goto out_overflow; 1507 goto out_overflow;
1380 p += 1; /* skip XID */
1381 1508
1509 p += 1; /* skip XID */
1382 if ((n = ntohl(*p++)) != RPC_REPLY) { 1510 if ((n = ntohl(*p++)) != RPC_REPLY) {
1383 dprintk("RPC: %5u %s: not an RPC reply: %x\n", 1511 dprintk("RPC: %5u %s: not an RPC reply: %x\n",
1384 task->tk_pid, __func__, n); 1512 task->tk_pid, __func__, n);
1385 goto out_garbage; 1513 goto out_garbage;
1386 } 1514 }
1515
1387 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1516 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1388 if (--len < 0) 1517 if (--len < 0)
1389 goto out_overflow; 1518 goto out_overflow;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index ff50a0546865..1102ce1251f7 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(rpc_delay);
569/* 569/*
570 * Helper to call task->tk_ops->rpc_call_prepare 570 * Helper to call task->tk_ops->rpc_call_prepare
571 */ 571 */
572static void rpc_prepare_task(struct rpc_task *task) 572void rpc_prepare_task(struct rpc_task *task)
573{ 573{
574 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 574 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
575} 575}
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 1ef6e46d9da2..1b4e6791ecf3 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -141,12 +141,14 @@ EXPORT_SYMBOL_GPL(rpc_free_iostats);
141void rpc_count_iostats(struct rpc_task *task) 141void rpc_count_iostats(struct rpc_task *task)
142{ 142{
143 struct rpc_rqst *req = task->tk_rqstp; 143 struct rpc_rqst *req = task->tk_rqstp;
144 struct rpc_iostats *stats = task->tk_client->cl_metrics; 144 struct rpc_iostats *stats;
145 struct rpc_iostats *op_metrics; 145 struct rpc_iostats *op_metrics;
146 long rtt, execute, queue; 146 long rtt, execute, queue;
147 147
148 if (!stats || !req) 148 if (!task->tk_client || !task->tk_client->cl_metrics || !req)
149 return; 149 return;
150
151 stats = task->tk_client->cl_metrics;
150 op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx]; 152 op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
151 153
152 op_metrics->om_ops++; 154 op_metrics->om_ops++;
@@ -154,7 +156,7 @@ void rpc_count_iostats(struct rpc_task *task)
154 op_metrics->om_timeouts += task->tk_timeouts; 156 op_metrics->om_timeouts += task->tk_timeouts;
155 157
156 op_metrics->om_bytes_sent += task->tk_bytes_sent; 158 op_metrics->om_bytes_sent += task->tk_bytes_sent;
157 op_metrics->om_bytes_recv += req->rq_received; 159 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
158 160
159 queue = (long)req->rq_xtime - task->tk_start; 161 queue = (long)req->rq_xtime - task->tk_start;
160 if (queue < 0) 162 if (queue < 0)
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
new file mode 100644
index 000000000000..5d9dd742264b
--- /dev/null
+++ b/net/sunrpc/sunrpc.h
@@ -0,0 +1,37 @@
1/******************************************************************************
2
3(c) 2008 NetApp. All Rights Reserved.
4
5NetApp provides this source code under the GPL v2 License.
6The GPL v2 license is available at
7http://opensource.org/licenses/gpl-license.php.
8
9THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
10"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
11LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
12A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
13CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
14EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
15PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
16PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
17LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
18NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
19SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20
21******************************************************************************/
22
23/*
24 * Functions and macros used internally by RPC
25 */
26
27#ifndef _NET_SUNRPC_SUNRPC_H
28#define _NET_SUNRPC_SUNRPC_H
29
30static inline int rpc_reply_expected(struct rpc_task *task)
31{
32 return (task->tk_msg.rpc_proc != NULL) &&
33 (task->tk_msg.rpc_proc->p_decode != NULL);
34}
35
36#endif /* _NET_SUNRPC_SUNRPC_H */
37
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5ed8931dfe98..952f206ff307 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -25,6 +25,7 @@
25#include <linux/sunrpc/stats.h> 25#include <linux/sunrpc/stats.h>
26#include <linux/sunrpc/svcsock.h> 26#include <linux/sunrpc/svcsock.h>
27#include <linux/sunrpc/clnt.h> 27#include <linux/sunrpc/clnt.h>
28#include <linux/sunrpc/bc_xprt.h>
28 29
29#define RPCDBG_FACILITY RPCDBG_SVCDSP 30#define RPCDBG_FACILITY RPCDBG_SVCDSP
30 31
@@ -486,6 +487,10 @@ svc_destroy(struct svc_serv *serv)
486 if (svc_serv_is_pooled(serv)) 487 if (svc_serv_is_pooled(serv))
487 svc_pool_map_put(); 488 svc_pool_map_put();
488 489
490#if defined(CONFIG_NFS_V4_1)
491 svc_sock_destroy(serv->bc_xprt);
492#endif /* CONFIG_NFS_V4_1 */
493
489 svc_unregister(serv); 494 svc_unregister(serv);
490 kfree(serv->sv_pools); 495 kfree(serv->sv_pools);
491 kfree(serv); 496 kfree(serv);
@@ -970,20 +975,18 @@ svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
970} 975}
971 976
972/* 977/*
973 * Process the RPC request. 978 * Common routine for processing the RPC request.
974 */ 979 */
975int 980static int
976svc_process(struct svc_rqst *rqstp) 981svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
977{ 982{
978 struct svc_program *progp; 983 struct svc_program *progp;
979 struct svc_version *versp = NULL; /* compiler food */ 984 struct svc_version *versp = NULL; /* compiler food */
980 struct svc_procedure *procp = NULL; 985 struct svc_procedure *procp = NULL;
981 struct kvec * argv = &rqstp->rq_arg.head[0];
982 struct kvec * resv = &rqstp->rq_res.head[0];
983 struct svc_serv *serv = rqstp->rq_server; 986 struct svc_serv *serv = rqstp->rq_server;
984 kxdrproc_t xdr; 987 kxdrproc_t xdr;
985 __be32 *statp; 988 __be32 *statp;
986 u32 dir, prog, vers, proc; 989 u32 prog, vers, proc;
987 __be32 auth_stat, rpc_stat; 990 __be32 auth_stat, rpc_stat;
988 int auth_res; 991 int auth_res;
989 __be32 *reply_statp; 992 __be32 *reply_statp;
@@ -993,19 +996,6 @@ svc_process(struct svc_rqst *rqstp)
993 if (argv->iov_len < 6*4) 996 if (argv->iov_len < 6*4)
994 goto err_short_len; 997 goto err_short_len;
995 998
996 /* setup response xdr_buf.
997 * Initially it has just one page
998 */
999 rqstp->rq_resused = 1;
1000 resv->iov_base = page_address(rqstp->rq_respages[0]);
1001 resv->iov_len = 0;
1002 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1003 rqstp->rq_res.len = 0;
1004 rqstp->rq_res.page_base = 0;
1005 rqstp->rq_res.page_len = 0;
1006 rqstp->rq_res.buflen = PAGE_SIZE;
1007 rqstp->rq_res.tail[0].iov_base = NULL;
1008 rqstp->rq_res.tail[0].iov_len = 0;
1009 /* Will be turned off only in gss privacy case: */ 999 /* Will be turned off only in gss privacy case: */
1010 rqstp->rq_splice_ok = 1; 1000 rqstp->rq_splice_ok = 1;
1011 /* Will be turned off only when NFSv4 Sessions are used */ 1001 /* Will be turned off only when NFSv4 Sessions are used */
@@ -1014,17 +1004,13 @@ svc_process(struct svc_rqst *rqstp)
1014 /* Setup reply header */ 1004 /* Setup reply header */
1015 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp); 1005 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
1016 1006
1017 rqstp->rq_xid = svc_getu32(argv);
1018 svc_putu32(resv, rqstp->rq_xid); 1007 svc_putu32(resv, rqstp->rq_xid);
1019 1008
1020 dir = svc_getnl(argv);
1021 vers = svc_getnl(argv); 1009 vers = svc_getnl(argv);
1022 1010
1023 /* First words of reply: */ 1011 /* First words of reply: */
1024 svc_putnl(resv, 1); /* REPLY */ 1012 svc_putnl(resv, 1); /* REPLY */
1025 1013
1026 if (dir != 0) /* direction != CALL */
1027 goto err_bad_dir;
1028 if (vers != 2) /* RPC version number */ 1014 if (vers != 2) /* RPC version number */
1029 goto err_bad_rpc; 1015 goto err_bad_rpc;
1030 1016
@@ -1147,7 +1133,7 @@ svc_process(struct svc_rqst *rqstp)
1147 sendit: 1133 sendit:
1148 if (svc_authorise(rqstp)) 1134 if (svc_authorise(rqstp))
1149 goto dropit; 1135 goto dropit;
1150 return svc_send(rqstp); 1136 return 1; /* Caller can now send it */
1151 1137
1152 dropit: 1138 dropit:
1153 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1139 svc_authorise(rqstp); /* doesn't hurt to call this twice */
@@ -1161,12 +1147,6 @@ err_short_len:
1161 1147
1162 goto dropit; /* drop request */ 1148 goto dropit; /* drop request */
1163 1149
1164err_bad_dir:
1165 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1166
1167 serv->sv_stats->rpcbadfmt++;
1168 goto dropit; /* drop request */
1169
1170err_bad_rpc: 1150err_bad_rpc:
1171 serv->sv_stats->rpcbadfmt++; 1151 serv->sv_stats->rpcbadfmt++;
1172 svc_putnl(resv, 1); /* REJECT */ 1152 svc_putnl(resv, 1); /* REJECT */
@@ -1220,6 +1200,100 @@ err_bad:
1220EXPORT_SYMBOL_GPL(svc_process); 1200EXPORT_SYMBOL_GPL(svc_process);
1221 1201
1222/* 1202/*
1203 * Process the RPC request.
1204 */
1205int
1206svc_process(struct svc_rqst *rqstp)
1207{
1208 struct kvec *argv = &rqstp->rq_arg.head[0];
1209 struct kvec *resv = &rqstp->rq_res.head[0];
1210 struct svc_serv *serv = rqstp->rq_server;
1211 u32 dir;
1212 int error;
1213
1214 /*
1215 * Setup response xdr_buf.
1216 * Initially it has just one page
1217 */
1218 rqstp->rq_resused = 1;
1219 resv->iov_base = page_address(rqstp->rq_respages[0]);
1220 resv->iov_len = 0;
1221 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1222 rqstp->rq_res.len = 0;
1223 rqstp->rq_res.page_base = 0;
1224 rqstp->rq_res.page_len = 0;
1225 rqstp->rq_res.buflen = PAGE_SIZE;
1226 rqstp->rq_res.tail[0].iov_base = NULL;
1227 rqstp->rq_res.tail[0].iov_len = 0;
1228
1229 rqstp->rq_xid = svc_getu32(argv);
1230
1231 dir = svc_getnl(argv);
1232 if (dir != 0) {
1233 /* direction != CALL */
1234 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1235 serv->sv_stats->rpcbadfmt++;
1236 svc_drop(rqstp);
1237 return 0;
1238 }
1239
1240 error = svc_process_common(rqstp, argv, resv);
1241 if (error <= 0)
1242 return error;
1243
1244 return svc_send(rqstp);
1245}
1246
1247#if defined(CONFIG_NFS_V4_1)
1248/*
1249 * Process a backchannel RPC request that arrived over an existing
1250 * outbound connection
1251 */
1252int
1253bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1254 struct svc_rqst *rqstp)
1255{
1256 struct kvec *argv = &rqstp->rq_arg.head[0];
1257 struct kvec *resv = &rqstp->rq_res.head[0];
1258 int error;
1259
1260 /* Build the svc_rqst used by the common processing routine */
1261 rqstp->rq_xprt = serv->bc_xprt;
1262 rqstp->rq_xid = req->rq_xid;
1263 rqstp->rq_prot = req->rq_xprt->prot;
1264 rqstp->rq_server = serv;
1265
1266 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1267 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1268 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1269 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1270
1271 /* reset result send buffer "put" position */
1272 resv->iov_len = 0;
1273
1274 if (rqstp->rq_prot != IPPROTO_TCP) {
1275 printk(KERN_ERR "No support for Non-TCP transports!\n");
1276 BUG();
1277 }
1278
1279 /*
1280 * Skip the next two words because they've already been
1281 * processed in the trasport
1282 */
1283 svc_getu32(argv); /* XID */
1284 svc_getnl(argv); /* CALLDIR */
1285
1286 error = svc_process_common(rqstp, argv, resv);
1287 if (error <= 0)
1288 return error;
1289
1290 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1291 return bc_send(req);
1292}
1293EXPORT_SYMBOL(bc_svc_process);
1294#endif /* CONFIG_NFS_V4_1 */
1295
1296/*
1223 * Return (transport-specific) limit on the rpc payload. 1297 * Return (transport-specific) limit on the rpc payload.
1224 */ 1298 */
1225u32 svc_max_payload(const struct svc_rqst *rqstp) 1299u32 svc_max_payload(const struct svc_rqst *rqstp)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9d504234af4a..a2a03e500533 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1327,3 +1327,42 @@ static void svc_sock_free(struct svc_xprt *xprt)
1327 sock_release(svsk->sk_sock); 1327 sock_release(svsk->sk_sock);
1328 kfree(svsk); 1328 kfree(svsk);
1329} 1329}
1330
1331/*
1332 * Create a svc_xprt.
1333 *
1334 * For internal use only (e.g. nfsv4.1 backchannel).
1335 * Callers should typically use the xpo_create() method.
1336 */
1337struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
1338{
1339 struct svc_sock *svsk;
1340 struct svc_xprt *xprt = NULL;
1341
1342 dprintk("svc: %s\n", __func__);
1343 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
1344 if (!svsk)
1345 goto out;
1346
1347 xprt = &svsk->sk_xprt;
1348 if (prot == IPPROTO_TCP)
1349 svc_xprt_init(&svc_tcp_class, xprt, serv);
1350 else if (prot == IPPROTO_UDP)
1351 svc_xprt_init(&svc_udp_class, xprt, serv);
1352 else
1353 BUG();
1354out:
1355 dprintk("svc: %s return %p\n", __func__, xprt);
1356 return xprt;
1357}
1358EXPORT_SYMBOL_GPL(svc_sock_create);
1359
1360/*
1361 * Destroy a svc_sock.
1362 */
1363void svc_sock_destroy(struct svc_xprt *xprt)
1364{
1365 if (xprt)
1366 kfree(container_of(xprt, struct svc_sock, sk_xprt));
1367}
1368EXPORT_SYMBOL_GPL(svc_sock_destroy);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 06ca058572f2..f412a852bc73 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -12,8 +12,9 @@
12 * - Next, the caller puts together the RPC message, stuffs it into 12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit(). 13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the 14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that 15 * transport's wait list. At the same time, if a reply is expected,
16 * is run after the packet's timeout has expired. 16 * it installs a timer that is run after the packet's timeout has
17 * expired.
17 * - When a packet arrives, the data_ready handler walks the list of 18 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the 19 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed. 20 * caller is woken up, and the timer removed.
@@ -46,6 +47,8 @@
46#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
47#include <linux/sunrpc/metrics.h> 48#include <linux/sunrpc/metrics.h>
48 49
50#include "sunrpc.h"
51
49/* 52/*
50 * Local variables 53 * Local variables
51 */ 54 */
@@ -192,8 +195,8 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
192 */ 195 */
193int xprt_reserve_xprt(struct rpc_task *task) 196int xprt_reserve_xprt(struct rpc_task *task)
194{ 197{
195 struct rpc_xprt *xprt = task->tk_xprt;
196 struct rpc_rqst *req = task->tk_rqstp; 198 struct rpc_rqst *req = task->tk_rqstp;
199 struct rpc_xprt *xprt = req->rq_xprt;
197 200
198 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { 201 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
199 if (task == xprt->snd_task) 202 if (task == xprt->snd_task)
@@ -803,9 +806,10 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
803 806
804 list_del_init(&req->rq_list); 807 list_del_init(&req->rq_list);
805 req->rq_private_buf.len = copied; 808 req->rq_private_buf.len = copied;
806 /* Ensure all writes are done before we update req->rq_received */ 809 /* Ensure all writes are done before we update */
810 /* req->rq_reply_bytes_recvd */
807 smp_wmb(); 811 smp_wmb();
808 req->rq_received = copied; 812 req->rq_reply_bytes_recvd = copied;
809 rpc_wake_up_queued_task(&xprt->pending, task); 813 rpc_wake_up_queued_task(&xprt->pending, task);
810} 814}
811EXPORT_SYMBOL_GPL(xprt_complete_rqst); 815EXPORT_SYMBOL_GPL(xprt_complete_rqst);
@@ -820,7 +824,7 @@ static void xprt_timer(struct rpc_task *task)
820 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); 824 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
821 825
822 spin_lock_bh(&xprt->transport_lock); 826 spin_lock_bh(&xprt->transport_lock);
823 if (!req->rq_received) { 827 if (!req->rq_reply_bytes_recvd) {
824 if (xprt->ops->timer) 828 if (xprt->ops->timer)
825 xprt->ops->timer(task); 829 xprt->ops->timer(task);
826 } else 830 } else
@@ -842,8 +846,8 @@ int xprt_prepare_transmit(struct rpc_task *task)
842 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid); 846 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
843 847
844 spin_lock_bh(&xprt->transport_lock); 848 spin_lock_bh(&xprt->transport_lock);
845 if (req->rq_received && !req->rq_bytes_sent) { 849 if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
846 err = req->rq_received; 850 err = req->rq_reply_bytes_recvd;
847 goto out_unlock; 851 goto out_unlock;
848 } 852 }
849 if (!xprt->ops->reserve_xprt(task)) 853 if (!xprt->ops->reserve_xprt(task))
@@ -855,7 +859,7 @@ out_unlock:
855 859
856void xprt_end_transmit(struct rpc_task *task) 860void xprt_end_transmit(struct rpc_task *task)
857{ 861{
858 xprt_release_write(task->tk_xprt, task); 862 xprt_release_write(task->tk_rqstp->rq_xprt, task);
859} 863}
860 864
861/** 865/**
@@ -872,8 +876,11 @@ void xprt_transmit(struct rpc_task *task)
872 876
873 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 877 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
874 878
875 if (!req->rq_received) { 879 if (!req->rq_reply_bytes_recvd) {
876 if (list_empty(&req->rq_list)) { 880 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
881 /*
882 * Add to the list only if we're expecting a reply
883 */
877 spin_lock_bh(&xprt->transport_lock); 884 spin_lock_bh(&xprt->transport_lock);
878 /* Update the softirq receive buffer */ 885 /* Update the softirq receive buffer */
879 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 886 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
@@ -908,8 +915,13 @@ void xprt_transmit(struct rpc_task *task)
908 /* Don't race with disconnect */ 915 /* Don't race with disconnect */
909 if (!xprt_connected(xprt)) 916 if (!xprt_connected(xprt))
910 task->tk_status = -ENOTCONN; 917 task->tk_status = -ENOTCONN;
911 else if (!req->rq_received) 918 else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
919 /*
920 * Sleep on the pending queue since
921 * we're expecting a reply.
922 */
912 rpc_sleep_on(&xprt->pending, task, xprt_timer); 923 rpc_sleep_on(&xprt->pending, task, xprt_timer);
924 }
913 spin_unlock_bh(&xprt->transport_lock); 925 spin_unlock_bh(&xprt->transport_lock);
914} 926}
915 927
@@ -982,11 +994,17 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
982 */ 994 */
983void xprt_release(struct rpc_task *task) 995void xprt_release(struct rpc_task *task)
984{ 996{
985 struct rpc_xprt *xprt = task->tk_xprt; 997 struct rpc_xprt *xprt;
986 struct rpc_rqst *req; 998 struct rpc_rqst *req;
999 int is_bc_request;
987 1000
988 if (!(req = task->tk_rqstp)) 1001 if (!(req = task->tk_rqstp))
989 return; 1002 return;
1003
1004 /* Preallocated backchannel request? */
1005 is_bc_request = bc_prealloc(req);
1006
1007 xprt = req->rq_xprt;
990 rpc_count_iostats(task); 1008 rpc_count_iostats(task);
991 spin_lock_bh(&xprt->transport_lock); 1009 spin_lock_bh(&xprt->transport_lock);
992 xprt->ops->release_xprt(xprt, task); 1010 xprt->ops->release_xprt(xprt, task);
@@ -999,10 +1017,19 @@ void xprt_release(struct rpc_task *task)
999 mod_timer(&xprt->timer, 1017 mod_timer(&xprt->timer,
1000 xprt->last_used + xprt->idle_timeout); 1018 xprt->last_used + xprt->idle_timeout);
1001 spin_unlock_bh(&xprt->transport_lock); 1019 spin_unlock_bh(&xprt->transport_lock);
1002 xprt->ops->buf_free(req->rq_buffer); 1020 if (!bc_prealloc(req))
1021 xprt->ops->buf_free(req->rq_buffer);
1003 task->tk_rqstp = NULL; 1022 task->tk_rqstp = NULL;
1004 if (req->rq_release_snd_buf) 1023 if (req->rq_release_snd_buf)
1005 req->rq_release_snd_buf(req); 1024 req->rq_release_snd_buf(req);
1025
1026 /*
1027 * Early exit if this is a backchannel preallocated request.
1028 * There is no need to have it added to the RPC slot list.
1029 */
1030 if (is_bc_request)
1031 return;
1032
1006 memset(req, 0, sizeof(*req)); /* mark unused */ 1033 memset(req, 0, sizeof(*req)); /* mark unused */
1007 1034
1008 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1035 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
@@ -1049,6 +1076,11 @@ found:
1049 1076
1050 INIT_LIST_HEAD(&xprt->free); 1077 INIT_LIST_HEAD(&xprt->free);
1051 INIT_LIST_HEAD(&xprt->recv); 1078 INIT_LIST_HEAD(&xprt->recv);
1079#if defined(CONFIG_NFS_V4_1)
1080 spin_lock_init(&xprt->bc_pa_lock);
1081 INIT_LIST_HEAD(&xprt->bc_pa_list);
1082#endif /* CONFIG_NFS_V4_1 */
1083
1052 INIT_WORK(&xprt->task_cleanup, xprt_autoclose); 1084 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1053 setup_timer(&xprt->timer, xprt_init_autodisconnect, 1085 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1054 (unsigned long)xprt); 1086 (unsigned long)xprt);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6c2d61586551..9111d11c09fd 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -34,6 +34,9 @@
34#include <linux/sunrpc/sched.h> 34#include <linux/sunrpc/sched.h>
35#include <linux/sunrpc/xprtsock.h> 35#include <linux/sunrpc/xprtsock.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#ifdef CONFIG_NFS_V4_1
38#include <linux/sunrpc/bc_xprt.h>
39#endif
37 40
38#include <net/sock.h> 41#include <net/sock.h>
39#include <net/checksum.h> 42#include <net/checksum.h>
@@ -270,6 +273,13 @@ struct sock_xprt {
270#define TCP_RCV_COPY_FRAGHDR (1UL << 1) 273#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
271#define TCP_RCV_COPY_XID (1UL << 2) 274#define TCP_RCV_COPY_XID (1UL << 2)
272#define TCP_RCV_COPY_DATA (1UL << 3) 275#define TCP_RCV_COPY_DATA (1UL << 3)
276#define TCP_RCV_READ_CALLDIR (1UL << 4)
277#define TCP_RCV_COPY_CALLDIR (1UL << 5)
278
279/*
280 * TCP RPC flags
281 */
282#define TCP_RPC_REPLY (1UL << 6)
273 283
274static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 284static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
275{ 285{
@@ -956,7 +966,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
956 transport->tcp_offset = 0; 966 transport->tcp_offset = 0;
957 967
958 /* Sanity check of the record length */ 968 /* Sanity check of the record length */
959 if (unlikely(transport->tcp_reclen < 4)) { 969 if (unlikely(transport->tcp_reclen < 8)) {
960 dprintk("RPC: invalid TCP record fragment length\n"); 970 dprintk("RPC: invalid TCP record fragment length\n");
961 xprt_force_disconnect(xprt); 971 xprt_force_disconnect(xprt);
962 return; 972 return;
@@ -991,33 +1001,77 @@ static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_r
991 if (used != len) 1001 if (used != len)
992 return; 1002 return;
993 transport->tcp_flags &= ~TCP_RCV_COPY_XID; 1003 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
994 transport->tcp_flags |= TCP_RCV_COPY_DATA; 1004 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
995 transport->tcp_copied = 4; 1005 transport->tcp_copied = 4;
996 dprintk("RPC: reading reply for XID %08x\n", 1006 dprintk("RPC: reading %s XID %08x\n",
1007 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1008 : "request with",
997 ntohl(transport->tcp_xid)); 1009 ntohl(transport->tcp_xid));
998 xs_tcp_check_fraghdr(transport); 1010 xs_tcp_check_fraghdr(transport);
999} 1011}
1000 1012
1001static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 1013static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1014 struct xdr_skb_reader *desc)
1002{ 1015{
1003 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1016 size_t len, used;
1004 struct rpc_rqst *req; 1017 u32 offset;
1018 __be32 calldir;
1019
1020 /*
1021 * We want transport->tcp_offset to be 8 at the end of this routine
1022 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1023 * When this function is called for the first time,
1024 * transport->tcp_offset is 4 (after having already read the xid).
1025 */
1026 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1027 len = sizeof(calldir) - offset;
1028 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
1029 used = xdr_skb_read_bits(desc, &calldir, len);
1030 transport->tcp_offset += used;
1031 if (used != len)
1032 return;
1033 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1034 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1035 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1036 /*
1037 * We don't yet have the XDR buffer, so we will write the calldir
1038 * out after we get the buffer from the 'struct rpc_rqst'
1039 */
1040 if (ntohl(calldir) == RPC_REPLY)
1041 transport->tcp_flags |= TCP_RPC_REPLY;
1042 else
1043 transport->tcp_flags &= ~TCP_RPC_REPLY;
1044 dprintk("RPC: reading %s CALL/REPLY flag %08x\n",
1045 (transport->tcp_flags & TCP_RPC_REPLY) ?
1046 "reply for" : "request with", calldir);
1047 xs_tcp_check_fraghdr(transport);
1048}
1049
1050static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1051 struct xdr_skb_reader *desc,
1052 struct rpc_rqst *req)
1053{
1054 struct sock_xprt *transport =
1055 container_of(xprt, struct sock_xprt, xprt);
1005 struct xdr_buf *rcvbuf; 1056 struct xdr_buf *rcvbuf;
1006 size_t len; 1057 size_t len;
1007 ssize_t r; 1058 ssize_t r;
1008 1059
1009 /* Find and lock the request corresponding to this xid */ 1060 rcvbuf = &req->rq_private_buf;
1010 spin_lock(&xprt->transport_lock); 1061
1011 req = xprt_lookup_rqst(xprt, transport->tcp_xid); 1062 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1012 if (!req) { 1063 /*
1013 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1064 * Save the RPC direction in the XDR buffer
1014 dprintk("RPC: XID %08x request not found!\n", 1065 */
1015 ntohl(transport->tcp_xid)); 1066 __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
1016 spin_unlock(&xprt->transport_lock); 1067 htonl(RPC_REPLY) : 0;
1017 return; 1068
1069 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1070 &calldir, sizeof(calldir));
1071 transport->tcp_copied += sizeof(calldir);
1072 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1018 } 1073 }
1019 1074
1020 rcvbuf = &req->rq_private_buf;
1021 len = desc->count; 1075 len = desc->count;
1022 if (len > transport->tcp_reclen - transport->tcp_offset) { 1076 if (len > transport->tcp_reclen - transport->tcp_offset) {
1023 struct xdr_skb_reader my_desc; 1077 struct xdr_skb_reader my_desc;
@@ -1054,7 +1108,7 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea
1054 "tcp_offset = %u, tcp_reclen = %u\n", 1108 "tcp_offset = %u, tcp_reclen = %u\n",
1055 xprt, transport->tcp_copied, 1109 xprt, transport->tcp_copied,
1056 transport->tcp_offset, transport->tcp_reclen); 1110 transport->tcp_offset, transport->tcp_reclen);
1057 goto out; 1111 return;
1058 } 1112 }
1059 1113
1060 dprintk("RPC: XID %08x read %Zd bytes\n", 1114 dprintk("RPC: XID %08x read %Zd bytes\n",
@@ -1070,11 +1124,125 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea
1070 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1124 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1071 } 1125 }
1072 1126
1073out: 1127 return;
1128}
1129
1130/*
1131 * Finds the request corresponding to the RPC xid and invokes the common
1132 * tcp read code to read the data.
1133 */
1134static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1135 struct xdr_skb_reader *desc)
1136{
1137 struct sock_xprt *transport =
1138 container_of(xprt, struct sock_xprt, xprt);
1139 struct rpc_rqst *req;
1140
1141 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1142
1143 /* Find and lock the request corresponding to this xid */
1144 spin_lock(&xprt->transport_lock);
1145 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1146 if (!req) {
1147 dprintk("RPC: XID %08x request not found!\n",
1148 ntohl(transport->tcp_xid));
1149 spin_unlock(&xprt->transport_lock);
1150 return -1;
1151 }
1152
1153 xs_tcp_read_common(xprt, desc, req);
1154
1074 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1155 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1075 xprt_complete_rqst(req->rq_task, transport->tcp_copied); 1156 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1157
1076 spin_unlock(&xprt->transport_lock); 1158 spin_unlock(&xprt->transport_lock);
1077 xs_tcp_check_fraghdr(transport); 1159 return 0;
1160}
1161
1162#if defined(CONFIG_NFS_V4_1)
1163/*
1164 * Obtains an rpc_rqst previously allocated and invokes the common
1165 * tcp read code to read the data. The result is placed in the callback
1166 * queue.
1167 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1168 * connection and return -1.
1169 */
1170static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
1171 struct xdr_skb_reader *desc)
1172{
1173 struct sock_xprt *transport =
1174 container_of(xprt, struct sock_xprt, xprt);
1175 struct rpc_rqst *req;
1176
1177 req = xprt_alloc_bc_request(xprt);
1178 if (req == NULL) {
1179 printk(KERN_WARNING "Callback slot table overflowed\n");
1180 xprt_force_disconnect(xprt);
1181 return -1;
1182 }
1183
1184 req->rq_xid = transport->tcp_xid;
1185 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1186 xs_tcp_read_common(xprt, desc, req);
1187
1188 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
1189 struct svc_serv *bc_serv = xprt->bc_serv;
1190
1191 /*
1192 * Add callback request to callback list. The callback
1193 * service sleeps on the sv_cb_waitq waiting for new
1194 * requests. Wake it up after adding enqueing the
1195 * request.
1196 */
1197 dprintk("RPC: add callback request to list\n");
1198 spin_lock(&bc_serv->sv_cb_lock);
1199 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
1200 spin_unlock(&bc_serv->sv_cb_lock);
1201 wake_up(&bc_serv->sv_cb_waitq);
1202 }
1203
1204 req->rq_private_buf.len = transport->tcp_copied;
1205
1206 return 0;
1207}
1208
1209static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1210 struct xdr_skb_reader *desc)
1211{
1212 struct sock_xprt *transport =
1213 container_of(xprt, struct sock_xprt, xprt);
1214
1215 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1216 xs_tcp_read_reply(xprt, desc) :
1217 xs_tcp_read_callback(xprt, desc);
1218}
1219#else
1220static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1221 struct xdr_skb_reader *desc)
1222{
1223 return xs_tcp_read_reply(xprt, desc);
1224}
1225#endif /* CONFIG_NFS_V4_1 */
1226
1227/*
1228 * Read data off the transport. This can be either an RPC_CALL or an
1229 * RPC_REPLY. Relay the processing to helper functions.
1230 */
1231static void xs_tcp_read_data(struct rpc_xprt *xprt,
1232 struct xdr_skb_reader *desc)
1233{
1234 struct sock_xprt *transport =
1235 container_of(xprt, struct sock_xprt, xprt);
1236
1237 if (_xs_tcp_read_data(xprt, desc) == 0)
1238 xs_tcp_check_fraghdr(transport);
1239 else {
1240 /*
1241 * The transport_lock protects the request handling.
1242 * There's no need to hold it to update the tcp_flags.
1243 */
1244 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1245 }
1078} 1246}
1079 1247
1080static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1248static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
@@ -1114,9 +1282,14 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
1114 xs_tcp_read_xid(transport, &desc); 1282 xs_tcp_read_xid(transport, &desc);
1115 continue; 1283 continue;
1116 } 1284 }
1285 /* Read in the call/reply flag */
1286 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1287 xs_tcp_read_calldir(transport, &desc);
1288 continue;
1289 }
1117 /* Read in the request data */ 1290 /* Read in the request data */
1118 if (transport->tcp_flags & TCP_RCV_COPY_DATA) { 1291 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1119 xs_tcp_read_request(xprt, &desc); 1292 xs_tcp_read_data(xprt, &desc);
1120 continue; 1293 continue;
1121 } 1294 }
1122 /* Skip over any trailing bytes on short reads */ 1295 /* Skip over any trailing bytes on short reads */
@@ -2010,6 +2183,9 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2010 .buf_free = rpc_free, 2183 .buf_free = rpc_free,
2011 .send_request = xs_tcp_send_request, 2184 .send_request = xs_tcp_send_request,
2012 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2185 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2186#if defined(CONFIG_NFS_V4_1)
2187 .release_request = bc_release_request,
2188#endif /* CONFIG_NFS_V4_1 */
2013 .close = xs_tcp_close, 2189 .close = xs_tcp_close,
2014 .destroy = xs_destroy, 2190 .destroy = xs_destroy,
2015 .print_stats = xs_tcp_print_stats, 2191 .print_stats = xs_tcp_print_stats,