summaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorZhu Yanjun <yanjun.zhu@oracle.com>2017-03-13 01:43:48 -0400
committerDavid S. Miller <davem@davemloft.net>2017-03-14 02:20:05 -0400
commit569f41d187ac2c9a9809b9f4e27109fc558053eb (patch)
tree4c56592d57196cf92e3fe224b050dd491c9cfa9a /net/rds
parentedd08f96db4f06beb09acdd71e00b07a1f6c13ca (diff)
rds: ib: unmap the scatter/gather list when error
When some errors occur, the scatter/gather list mapped to DMA addresses should be handled. Cc: Joe Jin <joe.jin@oracle.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/ib_fmr.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index c936b0d47693..86ef907067bb 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -112,29 +112,39 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
112 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); 112 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
113 113
114 if (dma_addr & ~PAGE_MASK) { 114 if (dma_addr & ~PAGE_MASK) {
115 if (i > 0) 115 if (i > 0) {
116 ib_dma_unmap_sg(dev, sg, nents,
117 DMA_BIDIRECTIONAL);
116 return -EINVAL; 118 return -EINVAL;
117 else 119 } else {
118 ++page_cnt; 120 ++page_cnt;
121 }
119 } 122 }
120 if ((dma_addr + dma_len) & ~PAGE_MASK) { 123 if ((dma_addr + dma_len) & ~PAGE_MASK) {
121 if (i < sg_dma_len - 1) 124 if (i < sg_dma_len - 1) {
125 ib_dma_unmap_sg(dev, sg, nents,
126 DMA_BIDIRECTIONAL);
122 return -EINVAL; 127 return -EINVAL;
123 else 128 } else {
124 ++page_cnt; 129 ++page_cnt;
130 }
125 } 131 }
126 132
127 len += dma_len; 133 len += dma_len;
128 } 134 }
129 135
130 page_cnt += len >> PAGE_SHIFT; 136 page_cnt += len >> PAGE_SHIFT;
131 if (page_cnt > ibmr->pool->fmr_attr.max_pages) 137 if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
138 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
132 return -EINVAL; 139 return -EINVAL;
140 }
133 141
134 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC, 142 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
135 rdsibdev_to_node(rds_ibdev)); 143 rdsibdev_to_node(rds_ibdev));
136 if (!dma_pages) 144 if (!dma_pages) {
145 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
137 return -ENOMEM; 146 return -ENOMEM;
147 }
138 148
139 page_cnt = 0; 149 page_cnt = 0;
140 for (i = 0; i < sg_dma_len; ++i) { 150 for (i = 0; i < sg_dma_len; ++i) {
@@ -147,8 +157,10 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
147 } 157 }
148 158
149 ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr); 159 ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
150 if (ret) 160 if (ret) {
161 ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
151 goto out; 162 goto out;
163 }
152 164
153 /* Success - we successfully remapped the MR, so we can 165 /* Success - we successfully remapped the MR, so we can
154 * safely tear down the old mapping. 166 * safely tear down the old mapping.