aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2014-03-28 15:04:43 -0400
committerRoland Dreier <roland@purestorage.com>2014-04-01 14:16:31 -0400
commit49c5c27e05c915f0da4c0e756da313cf09ae2c55 (patch)
tree0a000d6a5c89c42990b9cf4a3fb02fc6bdead8fe /drivers/infiniband
parent446bf432a9b084d9f3471eca309cc53fa434ccc7 (diff)
IB/ipath: Remove ib_sg_dma_address() and ib_sg_dma_len() overloads
The removal of these methods is compensated for by code changes to .map_sg to insure that the vanilla sg_dma_address() and sg_dma_len() will do the same thing as the equivalent former ib_sg_dma_address() and ib_sg_dma_len() calls into the drivers. The introduction of this patch required that the struct ipath_dma_mapping_ops be converted to a C99 initializer. Suggested-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c43
1 files changed, 15 insertions, 28 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
index 644c2c74e054..123a8c053539 100644
--- a/drivers/infiniband/hw/ipath/ipath_dma.c
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -115,6 +115,10 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
115 ret = 0; 115 ret = 0;
116 break; 116 break;
117 } 117 }
118 sg->dma_address = addr + sg->offset;
119#ifdef CONFIG_NEED_SG_DMA_LENGTH
120 sg->dma_length = sg->length;
121#endif
118 } 122 }
119 return ret; 123 return ret;
120} 124}
@@ -126,21 +130,6 @@ static void ipath_unmap_sg(struct ib_device *dev,
126 BUG_ON(!valid_dma_direction(direction)); 130 BUG_ON(!valid_dma_direction(direction));
127} 131}
128 132
129static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
130{
131 u64 addr = (u64) page_address(sg_page(sg));
132
133 if (addr)
134 addr += sg->offset;
135 return addr;
136}
137
138static unsigned int ipath_sg_dma_len(struct ib_device *dev,
139 struct scatterlist *sg)
140{
141 return sg->length;
142}
143
144static void ipath_sync_single_for_cpu(struct ib_device *dev, 133static void ipath_sync_single_for_cpu(struct ib_device *dev,
145 u64 addr, 134 u64 addr,
146 size_t size, 135 size_t size,
@@ -176,17 +165,15 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
176} 165}
177 166
178struct ib_dma_mapping_ops ipath_dma_mapping_ops = { 167struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
179 ipath_mapping_error, 168 .mapping_error = ipath_mapping_error,
180 ipath_dma_map_single, 169 .map_single = ipath_dma_map_single,
181 ipath_dma_unmap_single, 170 .unmap_single = ipath_dma_unmap_single,
182 ipath_dma_map_page, 171 .map_page = ipath_dma_map_page,
183 ipath_dma_unmap_page, 172 .unmap_page = ipath_dma_unmap_page,
184 ipath_map_sg, 173 .map_sg = ipath_map_sg,
185 ipath_unmap_sg, 174 .unmap_sg = ipath_unmap_sg,
186 ipath_sg_dma_address, 175 .sync_single_for_cpu = ipath_sync_single_for_cpu,
187 ipath_sg_dma_len, 176 .sync_single_for_device = ipath_sync_single_for_device,
188 ipath_sync_single_for_cpu, 177 .alloc_coherent = ipath_dma_alloc_coherent,
189 ipath_sync_single_for_device, 178 .free_coherent = ipath_dma_free_coherent
190 ipath_dma_alloc_coherent,
191 ipath_dma_free_coherent
192}; 179};