diff options
author | Mike Marciniszyn <mike.marciniszyn@intel.com> | 2014-03-28 13:26:42 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-04-01 14:16:31 -0400 |
commit | 446bf432a9b084d9f3471eca309cc53fa434ccc7 (patch) | |
tree | 55d470e51d5f5c2852dc0afc97589ad2a5acc447 | |
parent | cfbf8d4857c26a8a307fb7cd258074c9dcd8c691 (diff) |
IB/qib: Remove ib_sg_dma_address() and ib_sg_dma_len() overloads
Remove the overload for .dma_len and .dma_address
The removal of these methods is compensated for by code changes to
.map_sg to insure that the vanilla sg_dma_address() and sg_dma_len()
will do the same thing as the equivalent former ib_sg_dma_address()
and ib_sg_dma_len() calls into the drivers.
Suggested-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Tested-by: Vinod Kumar <vinod.kumar@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/qib/qib_dma.c | 21 |
1 files changed, 4 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c index 2920bb39a65b..59fe092b4b0f 100644 --- a/drivers/infiniband/hw/qib/qib_dma.c +++ b/drivers/infiniband/hw/qib/qib_dma.c | |||
@@ -108,6 +108,10 @@ static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, | |||
108 | ret = 0; | 108 | ret = 0; |
109 | break; | 109 | break; |
110 | } | 110 | } |
111 | sg->dma_address = addr + sg->offset; | ||
112 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | ||
113 | sg->dma_length = sg->length; | ||
114 | #endif | ||
111 | } | 115 | } |
112 | return ret; | 116 | return ret; |
113 | } | 117 | } |
@@ -119,21 +123,6 @@ static void qib_unmap_sg(struct ib_device *dev, | |||
119 | BUG_ON(!valid_dma_direction(direction)); | 123 | BUG_ON(!valid_dma_direction(direction)); |
120 | } | 124 | } |
121 | 125 | ||
122 | static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) | ||
123 | { | ||
124 | u64 addr = (u64) page_address(sg_page(sg)); | ||
125 | |||
126 | if (addr) | ||
127 | addr += sg->offset; | ||
128 | return addr; | ||
129 | } | ||
130 | |||
131 | static unsigned int qib_sg_dma_len(struct ib_device *dev, | ||
132 | struct scatterlist *sg) | ||
133 | { | ||
134 | return sg->length; | ||
135 | } | ||
136 | |||
137 | static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, | 126 | static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, |
138 | size_t size, enum dma_data_direction dir) | 127 | size_t size, enum dma_data_direction dir) |
139 | { | 128 | { |
@@ -173,8 +162,6 @@ struct ib_dma_mapping_ops qib_dma_mapping_ops = { | |||
173 | .unmap_page = qib_dma_unmap_page, | 162 | .unmap_page = qib_dma_unmap_page, |
174 | .map_sg = qib_map_sg, | 163 | .map_sg = qib_map_sg, |
175 | .unmap_sg = qib_unmap_sg, | 164 | .unmap_sg = qib_unmap_sg, |
176 | .dma_address = qib_sg_dma_address, | ||
177 | .dma_len = qib_sg_dma_len, | ||
178 | .sync_single_for_cpu = qib_sync_single_for_cpu, | 165 | .sync_single_for_cpu = qib_sync_single_for_cpu, |
179 | .sync_single_for_device = qib_sync_single_for_device, | 166 | .sync_single_for_device = qib_sync_single_for_device, |
180 | .alloc_coherent = qib_dma_alloc_coherent, | 167 | .alloc_coherent = qib_dma_alloc_coherent, |