aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c5
3 files changed, 9 insertions, 0 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index d25971b42a74..b4b17f44cb29 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -222,6 +222,7 @@ static void rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
222 ctxt->count = count; 222 ctxt->count = count;
223 ctxt->direction = DMA_FROM_DEVICE; 223 ctxt->direction = DMA_FROM_DEVICE;
224 for (i = 0; i < count; i++) { 224 for (i = 0; i < count; i++) {
225 atomic_inc(&xprt->sc_dma_used);
225 ctxt->sge[i].addr = 226 ctxt->sge[i].addr =
226 ib_dma_map_single(xprt->sc_cm_id->device, 227 ib_dma_map_single(xprt->sc_cm_id->device,
227 vec[i].iov_base, vec[i].iov_len, 228 vec[i].iov_base, vec[i].iov_len,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index bdc11a30e937..a19b22b452a3 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -163,6 +163,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
163 sge_bytes = min((size_t)bc, 163 sge_bytes = min((size_t)bc,
164 (size_t)(vec->sge[xdr_sge_no].iov_len-sge_off)); 164 (size_t)(vec->sge[xdr_sge_no].iov_len-sge_off));
165 sge[sge_no].length = sge_bytes; 165 sge[sge_no].length = sge_bytes;
166 atomic_inc(&xprt->sc_dma_used);
166 sge[sge_no].addr = 167 sge[sge_no].addr =
167 ib_dma_map_single(xprt->sc_cm_id->device, 168 ib_dma_map_single(xprt->sc_cm_id->device,
168 (void *) 169 (void *)
@@ -385,6 +386,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
385 ctxt->count = 1; 386 ctxt->count = 1;
386 387
387 /* Prepare the SGE for the RPCRDMA Header */ 388 /* Prepare the SGE for the RPCRDMA Header */
389 atomic_inc(&rdma->sc_dma_used);
388 ctxt->sge[0].addr = 390 ctxt->sge[0].addr =
389 ib_dma_map_page(rdma->sc_cm_id->device, 391 ib_dma_map_page(rdma->sc_cm_id->device,
390 page, 0, PAGE_SIZE, DMA_TO_DEVICE); 392 page, 0, PAGE_SIZE, DMA_TO_DEVICE);
@@ -396,6 +398,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
396 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { 398 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
397 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); 399 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
398 byte_count -= sge_bytes; 400 byte_count -= sge_bytes;
401 atomic_inc(&rdma->sc_dma_used);
399 ctxt->sge[sge_no].addr = 402 ctxt->sge[sge_no].addr =
400 ib_dma_map_single(rdma->sc_cm_id->device, 403 ib_dma_map_single(rdma->sc_cm_id->device,
401 vec->sge[sge_no].iov_base, 404 vec->sge[sge_no].iov_base,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 7e8ee66458ea..6fddd588c031 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -155,6 +155,7 @@ static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
155 struct svcxprt_rdma *xprt = ctxt->xprt; 155 struct svcxprt_rdma *xprt = ctxt->xprt;
156 int i; 156 int i;
157 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { 157 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
158 atomic_dec(&xprt->sc_dma_used);
158 ib_dma_unmap_single(xprt->sc_cm_id->device, 159 ib_dma_unmap_single(xprt->sc_cm_id->device,
159 ctxt->sge[i].addr, 160 ctxt->sge[i].addr,
160 ctxt->sge[i].length, 161 ctxt->sge[i].length,
@@ -519,6 +520,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
519 cma_xprt->sc_max_requests = svcrdma_max_requests; 520 cma_xprt->sc_max_requests = svcrdma_max_requests;
520 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; 521 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
521 atomic_set(&cma_xprt->sc_sq_count, 0); 522 atomic_set(&cma_xprt->sc_sq_count, 0);
523 atomic_set(&cma_xprt->sc_ctxt_used, 0);
522 524
523 if (!listener) { 525 if (!listener) {
524 int reqs = cma_xprt->sc_max_requests; 526 int reqs = cma_xprt->sc_max_requests;
@@ -569,6 +571,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
569 BUG_ON(sge_no >= xprt->sc_max_sge); 571 BUG_ON(sge_no >= xprt->sc_max_sge);
570 page = svc_rdma_get_page(); 572 page = svc_rdma_get_page();
571 ctxt->pages[sge_no] = page; 573 ctxt->pages[sge_no] = page;
574 atomic_inc(&xprt->sc_dma_used);
572 pa = ib_dma_map_page(xprt->sc_cm_id->device, 575 pa = ib_dma_map_page(xprt->sc_cm_id->device,
573 page, 0, PAGE_SIZE, 576 page, 0, PAGE_SIZE,
574 DMA_FROM_DEVICE); 577 DMA_FROM_DEVICE);
@@ -1049,6 +1052,7 @@ static void __svc_rdma_free(struct work_struct *work)
1049 1052
1050 /* Warn if we leaked a resource or under-referenced */ 1053 /* Warn if we leaked a resource or under-referenced */
1051 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); 1054 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
1055 WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
1052 1056
1053 /* Destroy the QP if present (not a listener) */ 1057 /* Destroy the QP if present (not a listener) */
1054 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) 1058 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
@@ -1169,6 +1173,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1169 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 1173 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1170 1174
1171 /* Prepare SGE for local address */ 1175 /* Prepare SGE for local address */
1176 atomic_inc(&xprt->sc_dma_used);
1172 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, 1177 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
1173 p, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1178 p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1174 sge.lkey = xprt->sc_phys_mr->lkey; 1179 sge.lkey = xprt->sc_phys_mr->lkey;