diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2018-05-07 15:27:27 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2018-05-11 15:48:57 -0400 |
commit | 2c577bfea85e421bfa91df16ccf5156361aa8d4b (patch) | |
tree | 1ad1b19b5a5cb909f3078165641cabf2172d2362 | |
parent | ecf85b2384ea5f7cb0577bf6143bc46d9ecfe4d3 (diff) |
svcrdma: Remove sc_rq_depth
Clean up: No need to retain rq_depth in struct svcrdma_xprt, it is
used only in svc_rdma_accept().
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r-- | include/linux/sunrpc/svc_rdma.h | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 17 |
2 files changed, 8 insertions, 10 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 37f759d65348..3cb66319a814 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
@@ -101,7 +101,6 @@ struct svcxprt_rdma { | |||
101 | 101 | ||
102 | atomic_t sc_sq_avail; /* SQEs ready to be consumed */ | 102 | atomic_t sc_sq_avail; /* SQEs ready to be consumed */ |
103 | unsigned int sc_sq_depth; /* Depth of SQ */ | 103 | unsigned int sc_sq_depth; /* Depth of SQ */ |
104 | unsigned int sc_rq_depth; /* Depth of RQ */ | ||
105 | __be32 sc_fc_credits; /* Forward credits */ | 104 | __be32 sc_fc_credits; /* Forward credits */ |
106 | u32 sc_max_requests; /* Max requests */ | 105 | u32 sc_max_requests; /* Max requests */ |
107 | u32 sc_max_bc_requests;/* Backward credits */ | 106 | u32 sc_max_bc_requests;/* Backward credits */ |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 05544f2f50d4..ef32c46a234c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -588,9 +588,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
588 | struct rdma_conn_param conn_param; | 588 | struct rdma_conn_param conn_param; |
589 | struct rpcrdma_connect_private pmsg; | 589 | struct rpcrdma_connect_private pmsg; |
590 | struct ib_qp_init_attr qp_attr; | 590 | struct ib_qp_init_attr qp_attr; |
591 | unsigned int ctxts, rq_depth; | ||
591 | struct ib_device *dev; | 592 | struct ib_device *dev; |
592 | struct sockaddr *sap; | 593 | struct sockaddr *sap; |
593 | unsigned int ctxts; | ||
594 | int ret = 0; | 594 | int ret = 0; |
595 | 595 | ||
596 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); | 596 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); |
@@ -621,19 +621,18 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
621 | newxprt->sc_max_req_size = svcrdma_max_req_size; | 621 | newxprt->sc_max_req_size = svcrdma_max_req_size; |
622 | newxprt->sc_max_requests = svcrdma_max_requests; | 622 | newxprt->sc_max_requests = svcrdma_max_requests; |
623 | newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; | 623 | newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; |
624 | newxprt->sc_rq_depth = newxprt->sc_max_requests + | 624 | rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests; |
625 | newxprt->sc_max_bc_requests; | 625 | if (rq_depth > dev->attrs.max_qp_wr) { |
626 | if (newxprt->sc_rq_depth > dev->attrs.max_qp_wr) { | ||
627 | pr_warn("svcrdma: reducing receive depth to %d\n", | 626 | pr_warn("svcrdma: reducing receive depth to %d\n", |
628 | dev->attrs.max_qp_wr); | 627 | dev->attrs.max_qp_wr); |
629 | newxprt->sc_rq_depth = dev->attrs.max_qp_wr; | 628 | rq_depth = dev->attrs.max_qp_wr; |
630 | newxprt->sc_max_requests = newxprt->sc_rq_depth - 2; | 629 | newxprt->sc_max_requests = rq_depth - 2; |
631 | newxprt->sc_max_bc_requests = 2; | 630 | newxprt->sc_max_bc_requests = 2; |
632 | } | 631 | } |
633 | newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests); | 632 | newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests); |
634 | ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES); | 633 | ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES); |
635 | ctxts *= newxprt->sc_max_requests; | 634 | ctxts *= newxprt->sc_max_requests; |
636 | newxprt->sc_sq_depth = newxprt->sc_rq_depth + ctxts; | 635 | newxprt->sc_sq_depth = rq_depth + ctxts; |
637 | if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) { | 636 | if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) { |
638 | pr_warn("svcrdma: reducing send depth to %d\n", | 637 | pr_warn("svcrdma: reducing send depth to %d\n", |
639 | dev->attrs.max_qp_wr); | 638 | dev->attrs.max_qp_wr); |
@@ -655,7 +654,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
655 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); | 654 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); |
656 | goto errout; | 655 | goto errout; |
657 | } | 656 | } |
658 | newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth, | 657 | newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth, |
659 | 0, IB_POLL_WORKQUEUE); | 658 | 0, IB_POLL_WORKQUEUE); |
660 | if (IS_ERR(newxprt->sc_rq_cq)) { | 659 | if (IS_ERR(newxprt->sc_rq_cq)) { |
661 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); | 660 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); |
@@ -668,7 +667,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
668 | qp_attr.port_num = newxprt->sc_port_num; | 667 | qp_attr.port_num = newxprt->sc_port_num; |
669 | qp_attr.cap.max_rdma_ctxs = ctxts; | 668 | qp_attr.cap.max_rdma_ctxs = ctxts; |
670 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts; | 669 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts; |
671 | qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth; | 670 | qp_attr.cap.max_recv_wr = rq_depth; |
672 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; | 671 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; |
673 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; | 672 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; |
674 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | 673 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; |