aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib_recv.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-05-25 14:20:09 -0400
committerAndy Grover <andy.grover@oracle.com>2010-09-08 21:15:22 -0400
commitfc24f78085e8771670af42f2b8929b16a0c98a22 (patch)
tree6396f72983d9674b57292ca3c068e7b14c5c31be /net/rds/ib_recv.c
parent3427e854e1a0e76be8b3d75fc0fa878f59b43693 (diff)
RDS/IB: Remove ib_recv_unmap_page()
All it does is call unmap_sg(), so just call that directly. The comment above unmap_page also may be incorrect, so we shouldn't hold on to it, either. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/ib_recv.c')
-rw-r--r--net/rds/ib_recv.c22
1 files changed, 2 insertions, 20 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 5c7e6acf7c60..48add108ddee 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -51,24 +51,6 @@ static void rds_ib_frag_free(struct rds_page_frag *frag)
51 kmem_cache_free(rds_ib_frag_slab, frag); 51 kmem_cache_free(rds_ib_frag_slab, frag);
52} 52}
53 53
54/*
55 * We map a page at a time. Its fragments are posted in order. This
56 * is called in fragment order as the fragments get send completion events.
57 * Only the last frag in the page performs the unmapping.
58 *
59 * It's OK for ring cleanup to call this in whatever order it likes because
60 * DMA is not in flight and so we can unmap while other ring entries still
61 * hold page references in their frags.
62 */
63static void rds_ib_recv_unmap_page(struct rds_ib_connection *ic,
64 struct rds_ib_recv_work *recv)
65{
66 struct rds_page_frag *frag = recv->r_frag;
67
68 rdsdebug("recv %p frag %p page %p\n", recv, frag, sg_page(&frag->f_sg));
69 ib_dma_unmap_sg(ic->i_cm_id->device, &frag->f_sg, 1, DMA_FROM_DEVICE);
70}
71
72void rds_ib_recv_init_ring(struct rds_ib_connection *ic) 54void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
73{ 55{
74 struct rds_ib_recv_work *recv; 56 struct rds_ib_recv_work *recv;
@@ -105,7 +87,7 @@ static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
105 recv->r_ibinc = NULL; 87 recv->r_ibinc = NULL;
106 } 88 }
107 if (recv->r_frag) { 89 if (recv->r_frag) {
108 rds_ib_recv_unmap_page(ic, recv); 90 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
109 rds_ib_frag_free(recv->r_frag); 91 rds_ib_frag_free(recv->r_frag);
110 recv->r_frag = NULL; 92 recv->r_frag = NULL;
111 } 93 }
@@ -768,7 +750,7 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
768 750
769 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; 751 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
770 752
771 rds_ib_recv_unmap_page(ic, recv); 753 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
772 754
773 /* 755 /*
774 * Also process recvs in connecting state because it is possible 756 * Also process recvs in connecting state because it is possible