aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2015-12-07 15:52:23 -0500
committerTrond Myklebust <trond.myklebust@primarydata.com>2015-12-07 16:04:59 -0500
commit756b9b37cfb2e3dc76b2e43a8c097402ac736e07 (patch)
tree14503068e419723d06b4c56ec6544a907ae3f85a /net
parent527e9316f8ec44bd53d90fb9f611fa7ffff52bb9 (diff)
SUNRPC: Fix callback channel
The NFSv4.1 callback channel is currently broken because the receive message will keep shrinking because the backchannel receive buffer size never gets reset. The easiest solution to this problem is instead of changing the receive buffer, to rather adjust the copied request. Fixes: 38b7631fbe42 ("nfs4: limit callback decoding to received bytes") Cc: Benjamin Coddington <bcodding@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/backchannel_rqst.c8
-rw-r--r--net/sunrpc/svc.c12
2 files changed, 12 insertions, 8 deletions
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 95f82d8d4888..229956bf8457 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -353,20 +353,12 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
353{ 353{
354 struct rpc_xprt *xprt = req->rq_xprt; 354 struct rpc_xprt *xprt = req->rq_xprt;
355 struct svc_serv *bc_serv = xprt->bc_serv; 355 struct svc_serv *bc_serv = xprt->bc_serv;
356 struct xdr_buf *rq_rcv_buf = &req->rq_rcv_buf;
357 356
358 spin_lock(&xprt->bc_pa_lock); 357 spin_lock(&xprt->bc_pa_lock);
359 list_del(&req->rq_bc_pa_list); 358 list_del(&req->rq_bc_pa_list);
360 xprt_dec_alloc_count(xprt, 1); 359 xprt_dec_alloc_count(xprt, 1);
361 spin_unlock(&xprt->bc_pa_lock); 360 spin_unlock(&xprt->bc_pa_lock);
362 361
363 if (copied <= rq_rcv_buf->head[0].iov_len) {
364 rq_rcv_buf->head[0].iov_len = copied;
365 rq_rcv_buf->page_len = 0;
366 } else {
367 rq_rcv_buf->page_len = copied - rq_rcv_buf->head[0].iov_len;
368 }
369
370 req->rq_private_buf.len = copied; 362 req->rq_private_buf.len = copied;
371 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 363 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
372 364
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 7fccf9675df8..cc9852897395 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1363,7 +1363,19 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1363 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); 1363 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1364 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1364 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1365 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1365 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1366
1367 /* Adjust the argument buffer length */
1366 rqstp->rq_arg.len = req->rq_private_buf.len; 1368 rqstp->rq_arg.len = req->rq_private_buf.len;
1369 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1370 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1371 rqstp->rq_arg.page_len = 0;
1372 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1373 rqstp->rq_arg.page_len)
1374 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1375 rqstp->rq_arg.head[0].iov_len;
1376 else
1377 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1378 rqstp->rq_arg.page_len;
1367 1379
1368 /* reset result send buffer "put" position */ 1380 /* reset result send buffer "put" position */
1369 resv->iov_len = 0; 1381 resv->iov_len = 0;