diff options
author | Vipul Pandya <vipul@chelsio.com> | 2013-03-14 01:09:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-03-14 11:35:59 -0400 |
commit | 0e5eca791c8d8dd7622a58785947f1cab92e595c (patch) | |
tree | 2f7651ca1646b1d50d6b9d27333ce94bd966f165 /drivers/infiniband | |
parent | 42b6a949903d28f59c95f4c71080aa8b41e3d1d1 (diff) |
RDMA/cxgb4: Map pbl buffers for dma if using DSGL.
Signed-off-by: Vipul Pandya <vipul@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 29 |
1 files changed, 23 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 33db9ee307dc..4cb8eb24497c 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -51,7 +51,7 @@ module_param(inline_threshold, int, 0644); | |||
51 | MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)"); | 51 | MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)"); |
52 | 52 | ||
53 | static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, | 53 | static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, |
54 | u32 len, void *data, int wait) | 54 | u32 len, dma_addr_t data, int wait) |
55 | { | 55 | { |
56 | struct sk_buff *skb; | 56 | struct sk_buff *skb; |
57 | struct ulp_mem_io *req; | 57 | struct ulp_mem_io *req; |
@@ -88,7 +88,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, | |||
88 | sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) | | 88 | sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) | |
89 | ULPTX_NSGE(1)); | 89 | ULPTX_NSGE(1)); |
90 | sgl->len0 = cpu_to_be32(len); | 90 | sgl->len0 = cpu_to_be32(len); |
91 | sgl->addr0 = cpu_to_be64(virt_to_phys(data)); | 91 | sgl->addr0 = cpu_to_be64(data); |
92 | 92 | ||
93 | ret = c4iw_ofld_send(rdev, skb); | 93 | ret = c4iw_ofld_send(rdev, skb); |
94 | if (ret) | 94 | if (ret) |
@@ -178,6 +178,13 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) | |||
178 | u32 remain = len; | 178 | u32 remain = len; |
179 | u32 dmalen; | 179 | u32 dmalen; |
180 | int ret = 0; | 180 | int ret = 0; |
181 | dma_addr_t daddr; | ||
182 | dma_addr_t save; | ||
183 | |||
184 | daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE); | ||
185 | if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr)) | ||
186 | return -1; | ||
187 | save = daddr; | ||
181 | 188 | ||
182 | while (remain > inline_threshold) { | 189 | while (remain > inline_threshold) { |
183 | if (remain < T4_ULPTX_MAX_DMA) { | 190 | if (remain < T4_ULPTX_MAX_DMA) { |
@@ -188,16 +195,18 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) | |||
188 | } else | 195 | } else |
189 | dmalen = T4_ULPTX_MAX_DMA; | 196 | dmalen = T4_ULPTX_MAX_DMA; |
190 | remain -= dmalen; | 197 | remain -= dmalen; |
191 | ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, data, | 198 | ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr, |
192 | !remain); | 199 | !remain); |
193 | if (ret) | 200 | if (ret) |
194 | goto out; | 201 | goto out; |
195 | addr += dmalen >> 5; | 202 | addr += dmalen >> 5; |
196 | data += dmalen; | 203 | data += dmalen; |
204 | daddr += dmalen; | ||
197 | } | 205 | } |
198 | if (remain) | 206 | if (remain) |
199 | ret = _c4iw_write_mem_inline(rdev, addr, remain, data); | 207 | ret = _c4iw_write_mem_inline(rdev, addr, remain, data); |
200 | out: | 208 | out: |
209 | dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); | ||
201 | return ret; | 210 | return ret; |
202 | } | 211 | } |
203 | 212 | ||
@@ -209,9 +218,17 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
209 | void *data) | 218 | void *data) |
210 | { | 219 | { |
211 | if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { | 220 | if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { |
212 | if (len > inline_threshold) | 221 | if (len > inline_threshold) { |
213 | return _c4iw_write_mem_dma(rdev, addr, len, data); | 222 | if (_c4iw_write_mem_dma(rdev, addr, len, data)) { |
214 | else | 223 | printk_ratelimited(KERN_WARNING |
224 | "%s: dma map" | ||
225 | " failure (non fatal)\n", | ||
226 | pci_name(rdev->lldi.pdev)); | ||
227 | return _c4iw_write_mem_inline(rdev, addr, len, | ||
228 | data); | ||
229 | } else | ||
230 | return 0; | ||
231 | } else | ||
215 | return _c4iw_write_mem_inline(rdev, addr, len, data); | 232 | return _c4iw_write_mem_inline(rdev, addr, len, data); |
216 | } else | 233 | } else |
217 | return _c4iw_write_mem_inline(rdev, addr, len, data); | 234 | return _c4iw_write_mem_inline(rdev, addr, len, data); |