aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2008-05-28 13:08:48 -0400
committerTom Tucker <tom@opengridcomputing.com>2008-07-02 16:01:55 -0400
commite6ab9143719ff76f0b95f0866c4d0f6c743ad2e0 (patch)
tree42e95b916e9fbe814d1d61557af478f557f9d750 /net/sunrpc/xprtrdma
parentf820c57ebf5493d4602cc00577c8b0fadd27a7b8 (diff)
svcrdma: Move the DMA unmap logic to the CQ handler
Separate DMA unmap from context destruction and perform DMA unmapping in the SQ/RQ CQ reap functions. This is necessary to support software based RDMA implementations that actually copy the data in their ib_dma_unmap callback functions and architectures that don't have cache coherent I/O busses. Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index fc86338bcbb2..7e8ee66458ea 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -150,6 +150,18 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
150 return ctxt; 150 return ctxt;
151} 151}
152 152
153static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
154{
155 struct svcxprt_rdma *xprt = ctxt->xprt;
156 int i;
157 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
158 ib_dma_unmap_single(xprt->sc_cm_id->device,
159 ctxt->sge[i].addr,
160 ctxt->sge[i].length,
161 ctxt->direction);
162 }
163}
164
153void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) 165void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
154{ 166{
155 struct svcxprt_rdma *xprt; 167 struct svcxprt_rdma *xprt;
@@ -161,12 +173,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
161 for (i = 0; i < ctxt->count; i++) 173 for (i = 0; i < ctxt->count; i++)
162 put_page(ctxt->pages[i]); 174 put_page(ctxt->pages[i]);
163 175
164 for (i = 0; i < ctxt->count; i++)
165 ib_dma_unmap_single(xprt->sc_cm_id->device,
166 ctxt->sge[i].addr,
167 ctxt->sge[i].length,
168 ctxt->direction);
169
170 spin_lock_bh(&xprt->sc_ctxt_lock); 176 spin_lock_bh(&xprt->sc_ctxt_lock);
171 list_add(&ctxt->free_list, &xprt->sc_ctxt_free); 177 list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
172 spin_unlock_bh(&xprt->sc_ctxt_lock); 178 spin_unlock_bh(&xprt->sc_ctxt_lock);
@@ -328,6 +334,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
328 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; 334 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
329 ctxt->wc_status = wc.status; 335 ctxt->wc_status = wc.status;
330 ctxt->byte_len = wc.byte_len; 336 ctxt->byte_len = wc.byte_len;
337 svc_rdma_unmap_dma(ctxt);
331 if (wc.status != IB_WC_SUCCESS) { 338 if (wc.status != IB_WC_SUCCESS) {
332 /* Close the transport */ 339 /* Close the transport */
333 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); 340 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
@@ -377,6 +384,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
377 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; 384 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
378 xprt = ctxt->xprt; 385 xprt = ctxt->xprt;
379 386
387 svc_rdma_unmap_dma(ctxt);
380 if (wc.status != IB_WC_SUCCESS) 388 if (wc.status != IB_WC_SUCCESS)
381 /* Close the transport */ 389 /* Close the transport */
382 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 390 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);