summaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtsock.c
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2017-08-13 10:03:59 -0400
committerTrond Myklebust <trond.myklebust@primarydata.com>2017-08-16 15:10:15 -0400
commit729749bb8da186e68d97d1b0439f0b1e0059c41d (patch)
treec1d450aa081919168535f06755247f022a09a517 /net/sunrpc/xprtsock.c
parent2ce209c42c01ca976ad680fea52a8e8b9a53643b (diff)
SUNRPC: Don't hold the transport lock across socket copy operations
Instead add a mechanism to ensure that the request doesn't disappear from underneath us while copying from the socket. We do this by preventing xprt_release() from freeing the XDR buffers until the flag RPC_TASK_MSG_RECV has been cleared from the request. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com> Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/xprtsock.c')
-rw-r--r--net/sunrpc/xprtsock.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 4f154d388748..04dbc7027712 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -973,6 +973,8 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt,
973 rovr = xprt_lookup_rqst(xprt, *xp); 973 rovr = xprt_lookup_rqst(xprt, *xp);
974 if (!rovr) 974 if (!rovr)
975 goto out_unlock; 975 goto out_unlock;
976 xprt_pin_rqst(rovr);
977 spin_unlock_bh(&xprt->transport_lock);
976 task = rovr->rq_task; 978 task = rovr->rq_task;
977 979
978 copied = rovr->rq_private_buf.buflen; 980 copied = rovr->rq_private_buf.buflen;
@@ -981,11 +983,14 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt,
981 983
982 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) { 984 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
983 dprintk("RPC: sk_buff copy failed\n"); 985 dprintk("RPC: sk_buff copy failed\n");
984 goto out_unlock; 986 spin_lock_bh(&xprt->transport_lock);
987 goto out_unpin;
985 } 988 }
986 989
990 spin_lock_bh(&xprt->transport_lock);
987 xprt_complete_rqst(task, copied); 991 xprt_complete_rqst(task, copied);
988 992out_unpin:
993 xprt_unpin_rqst(rovr);
989 out_unlock: 994 out_unlock:
990 spin_unlock_bh(&xprt->transport_lock); 995 spin_unlock_bh(&xprt->transport_lock);
991} 996}
@@ -1054,6 +1059,8 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1054 rovr = xprt_lookup_rqst(xprt, *xp); 1059 rovr = xprt_lookup_rqst(xprt, *xp);
1055 if (!rovr) 1060 if (!rovr)
1056 goto out_unlock; 1061 goto out_unlock;
1062 xprt_pin_rqst(rovr);
1063 spin_unlock_bh(&xprt->transport_lock);
1057 task = rovr->rq_task; 1064 task = rovr->rq_task;
1058 1065
1059 if ((copied = rovr->rq_private_buf.buflen) > repsize) 1066 if ((copied = rovr->rq_private_buf.buflen) > repsize)
@@ -1062,14 +1069,17 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1062 /* Suck it into the iovec, verify checksum if not done by hw. */ 1069 /* Suck it into the iovec, verify checksum if not done by hw. */
1063 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { 1070 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1064 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); 1071 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1065 goto out_unlock; 1072 spin_lock_bh(&xprt->transport_lock);
1073 goto out_unpin;
1066 } 1074 }
1067 1075
1068 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); 1076 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1069 1077
1078 spin_lock_bh(&xprt->transport_lock);
1070 xprt_adjust_cwnd(xprt, task, copied); 1079 xprt_adjust_cwnd(xprt, task, copied);
1071 xprt_complete_rqst(task, copied); 1080 xprt_complete_rqst(task, copied);
1072 1081out_unpin:
1082 xprt_unpin_rqst(rovr);
1073 out_unlock: 1083 out_unlock:
1074 spin_unlock_bh(&xprt->transport_lock); 1084 spin_unlock_bh(&xprt->transport_lock);
1075} 1085}
@@ -1351,12 +1361,15 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1351 spin_unlock_bh(&xprt->transport_lock); 1361 spin_unlock_bh(&xprt->transport_lock);
1352 return -1; 1362 return -1;
1353 } 1363 }
1364 xprt_pin_rqst(req);
1365 spin_unlock_bh(&xprt->transport_lock);
1354 1366
1355 xs_tcp_read_common(xprt, desc, req); 1367 xs_tcp_read_common(xprt, desc, req);
1356 1368
1369 spin_lock_bh(&xprt->transport_lock);
1357 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1370 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1358 xprt_complete_rqst(req->rq_task, transport->tcp_copied); 1371 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1359 1372 xprt_unpin_rqst(req);
1360 spin_unlock_bh(&xprt->transport_lock); 1373 spin_unlock_bh(&xprt->transport_lock);
1361 return 0; 1374 return 0;
1362} 1375}