aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-12-16 17:23:20 -0500
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-12-18 15:34:33 -0500
commit26ae9d1c5af1b1d669ca1c28fc02bbca3d778d45 (patch)
treed3bc75de097cbb8e9de3325a6e2bd80230c744a2 /net
parent68791649a725ac58c88b472ea6187853e67b3415 (diff)
xprtrdma: Revert commit e7104a2a9606 ('xprtrdma: Cap req_cqinit').
The root of the problem was that sends (especially unsignalled FASTREG and LOCAL_INV Work Requests) were not properly flow- controlled, which allowed a send queue overrun. Now that the RPC/RDMA reply handler waits for invalidation to complete, the send queue is properly flow-controlled. Thus this limit is no longer necessary. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Devesh Sharma <devesh.sharma@avagotech.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/verbs.c6
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h6
2 files changed, 2 insertions, 10 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 003630733ef3..732c71ce5dca 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -616,10 +616,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
616 616
617 /* set trigger for requesting send completion */ 617 /* set trigger for requesting send completion */
618 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; 618 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
619 if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS) 619 if (ep->rep_cqinit <= 2)
620 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS; 620 ep->rep_cqinit = 0; /* always signal? */
621 else if (ep->rep_cqinit <= 2)
622 ep->rep_cqinit = 0;
623 INIT_CQCOUNT(ep); 621 INIT_CQCOUNT(ep);
624 init_waitqueue_head(&ep->rep_connect_wait); 622 init_waitqueue_head(&ep->rep_connect_wait);
625 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); 623 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index ddae4909982b..728101ddc44b 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -88,12 +88,6 @@ struct rpcrdma_ep {
88 struct delayed_work rep_connect_worker; 88 struct delayed_work rep_connect_worker;
89}; 89};
90 90
91/*
92 * Force a signaled SEND Work Request every so often,
93 * in case the provider needs to do some housekeeping.
94 */
95#define RPCRDMA_MAX_UNSIGNALED_SENDS (32)
96
97#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) 91#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
98#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) 92#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
99 93