diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2017-08-28 15:06:22 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2017-09-05 15:15:31 -0400 |
commit | 26fb2254dd33b02a522fac42745693f5969b7d4b (patch) | |
tree | d1d9fd4b08b7b87465d9bebebd013b960739318b | |
parent | 0062818298662d0d05061949d12880146b5ebd65 (diff) |
svcrdma: Estimate Send Queue depth properly
The rdma_rw API adjusts max_send_wr upwards during the
rdma_create_qp() call. If the ULP actually wants to take advantage
of these extra resources, it must increase the size of its send
completion queue (created before rdma_create_qp is called) and
increase its send queue accounting limit.
Use the new rdma_rw_mr_factor API to figure out the correct value
to use for the Send Queue and Send Completion Queue depths.
And, ensure that the chosen Send Queue depth for a newly created
transport does not overrun the QP WR limit of the underlying device.
Lastly, there's no longer a need to carry the Send Queue depth in
struct svcxprt_rdma, since the value is used only in the
svc_rdma_accept() path.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index cdb04f8a0c25..5caf8e722a11 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/workqueue.h> | 51 | #include <linux/workqueue.h> |
52 | #include <rdma/ib_verbs.h> | 52 | #include <rdma/ib_verbs.h> |
53 | #include <rdma/rdma_cm.h> | 53 | #include <rdma/rdma_cm.h> |
54 | #include <rdma/rw.h> | ||
54 | #include <linux/sunrpc/svc_rdma.h> | 55 | #include <linux/sunrpc/svc_rdma.h> |
55 | #include <linux/export.h> | 56 | #include <linux/export.h> |
56 | #include "xprt_rdma.h" | 57 | #include "xprt_rdma.h" |
@@ -713,7 +714,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
713 | struct ib_qp_init_attr qp_attr; | 714 | struct ib_qp_init_attr qp_attr; |
714 | struct ib_device *dev; | 715 | struct ib_device *dev; |
715 | struct sockaddr *sap; | 716 | struct sockaddr *sap; |
716 | unsigned int i; | 717 | unsigned int i, ctxts; |
717 | int ret = 0; | 718 | int ret = 0; |
718 | 719 | ||
719 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); | 720 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); |
@@ -754,7 +755,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
754 | newxprt->sc_max_bc_requests = 2; | 755 | newxprt->sc_max_bc_requests = 2; |
755 | } | 756 | } |
756 | newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests); | 757 | newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests); |
757 | newxprt->sc_sq_depth = newxprt->sc_rq_depth; | 758 | ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES); |
759 | ctxts *= newxprt->sc_max_requests; | ||
760 | newxprt->sc_sq_depth = newxprt->sc_rq_depth + ctxts; | ||
761 | if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) { | ||
762 | pr_warn("svcrdma: reducing send depth to %d\n", | ||
763 | dev->attrs.max_qp_wr); | ||
764 | newxprt->sc_sq_depth = dev->attrs.max_qp_wr; | ||
765 | } | ||
758 | atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); | 766 | atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth); |
759 | 767 | ||
760 | if (!svc_rdma_prealloc_ctxts(newxprt)) | 768 | if (!svc_rdma_prealloc_ctxts(newxprt)) |
@@ -789,8 +797,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
789 | qp_attr.event_handler = qp_event_handler; | 797 | qp_attr.event_handler = qp_event_handler; |
790 | qp_attr.qp_context = &newxprt->sc_xprt; | 798 | qp_attr.qp_context = &newxprt->sc_xprt; |
791 | qp_attr.port_num = newxprt->sc_port_num; | 799 | qp_attr.port_num = newxprt->sc_port_num; |
792 | qp_attr.cap.max_rdma_ctxs = newxprt->sc_max_requests; | 800 | qp_attr.cap.max_rdma_ctxs = ctxts; |
793 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; | 801 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts; |
794 | qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth; | 802 | qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth; |
795 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; | 803 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; |
796 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; | 804 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; |
@@ -858,6 +866,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
858 | dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap)); | 866 | dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap)); |
859 | dprintk(" max_sge : %d\n", newxprt->sc_max_sge); | 867 | dprintk(" max_sge : %d\n", newxprt->sc_max_sge); |
860 | dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth); | 868 | dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth); |
869 | dprintk(" rdma_rw_ctxs : %d\n", ctxts); | ||
861 | dprintk(" max_requests : %d\n", newxprt->sc_max_requests); | 870 | dprintk(" max_requests : %d\n", newxprt->sc_max_requests); |
862 | dprintk(" ord : %d\n", newxprt->sc_ord); | 871 | dprintk(" ord : %d\n", newxprt->sc_ord); |
863 | 872 | ||