aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-07-03 17:28:32 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-07-03 18:27:49 -0400
commit23bc9873ba60ee661d8e9f3a6b22fc3bcc4b7015 (patch)
tree74419df7fa856a7b261ea1914810b21c281e4fa0 /arch/arm/common
parent71695dd8b9eacfcda1b548a5b1780d34213ad654 (diff)
ARM: dmabounce: separate out decision to bounce
Move the decision to perform DMA bouncing out of map_single() into its own stand-alone function. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/dmabounce.c46
1 files changed, 28 insertions, 18 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 3e0fa154858..643e1d66067 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -219,36 +219,46 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
219 return find_safe_buffer(dev->archdata.dmabounce, dma_addr); 219 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220} 220}
221 221
222static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, 222static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
223 enum dma_data_direction dir)
224{ 223{
225 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 224 if (!dev || !dev->archdata.dmabounce)
226 dma_addr_t dma_addr; 225 return 0;
227 int needs_bounce = 0;
228
229 if (device_info)
230 DO_STATS ( device_info->map_op_count++ );
231
232 dma_addr = virt_to_dma(dev, ptr);
233 226
234 if (dev->dma_mask) { 227 if (dev->dma_mask) {
235 unsigned long mask = *dev->dma_mask; 228 unsigned long limit, mask = *dev->dma_mask;
236 unsigned long limit;
237 229
238 limit = (mask + 1) & ~mask; 230 limit = (mask + 1) & ~mask;
239 if (limit && size > limit) { 231 if (limit && size > limit) {
240 dev_err(dev, "DMA mapping too big (requested %#x " 232 dev_err(dev, "DMA mapping too big (requested %#x "
241 "mask %#Lx)\n", size, *dev->dma_mask); 233 "mask %#Lx)\n", size, *dev->dma_mask);
242 return ~0; 234 return -E2BIG;
243 } 235 }
244 236
245 /* 237 /* Figure out if we need to bounce from the DMA mask. */
246 * Figure out if we need to bounce from the DMA mask. 238 if ((dma_addr | (dma_addr + size - 1)) & ~mask)
247 */ 239 return 1;
248 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
249 } 240 }
250 241
251 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { 242 return dma_needs_bounce(dev, dma_addr, size) ? 1 : 0;
243}
244
245static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
246 enum dma_data_direction dir)
247{
248 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
249 dma_addr_t dma_addr;
250 int ret;
251
252 if (device_info)
253 DO_STATS ( device_info->map_op_count++ );
254
255 dma_addr = virt_to_dma(dev, ptr);
256
257 ret = needs_bounce(dev, dma_addr, size);
258 if (ret < 0)
259 return ~0;
260
261 if (ret > 0) {
252 struct safe_buffer *buf; 262 struct safe_buffer *buf;
253 263
254 buf = alloc_safe_buffer(device_info, ptr, size, dir); 264 buf = alloc_safe_buffer(device_info, ptr, size, dir);