aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/dmabounce.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common/dmabounce.c')
-rw-r--r--arch/arm/common/dmabounce.c193
1 files changed, 83 insertions, 110 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 841df7d21c2f..595ecd290ebf 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -79,6 +79,8 @@ struct dmabounce_device_info {
79 struct dmabounce_pool large; 79 struct dmabounce_pool large;
80 80
81 rwlock_t lock; 81 rwlock_t lock;
82
83 int (*needs_bounce)(struct device *, dma_addr_t, size_t);
82}; 84};
83 85
84#ifdef STATS 86#ifdef STATS
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
210 if (!dev || !dev->archdata.dmabounce) 212 if (!dev || !dev->archdata.dmabounce)
211 return NULL; 213 return NULL;
212 if (dma_mapping_error(dev, dma_addr)) { 214 if (dma_mapping_error(dev, dma_addr)) {
213 if (dev) 215 dev_err(dev, "Trying to %s invalid mapping\n", where);
214 dev_err(dev, "Trying to %s invalid mapping\n", where);
215 else
216 pr_err("unknown device: Trying to %s invalid mapping\n", where);
217 return NULL; 216 return NULL;
218 } 217 }
219 return find_safe_buffer(dev->archdata.dmabounce, dma_addr); 218 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220} 219}
221 220
222static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, 221static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
223 enum dma_data_direction dir)
224{ 222{
225 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 223 if (!dev || !dev->archdata.dmabounce)
226 dma_addr_t dma_addr; 224 return 0;
227 int needs_bounce = 0;
228
229 if (device_info)
230 DO_STATS ( device_info->map_op_count++ );
231
232 dma_addr = virt_to_dma(dev, ptr);
233 225
234 if (dev->dma_mask) { 226 if (dev->dma_mask) {
235 unsigned long mask = *dev->dma_mask; 227 unsigned long limit, mask = *dev->dma_mask;
236 unsigned long limit;
237 228
238 limit = (mask + 1) & ~mask; 229 limit = (mask + 1) & ~mask;
239 if (limit && size > limit) { 230 if (limit && size > limit) {
240 dev_err(dev, "DMA mapping too big (requested %#x " 231 dev_err(dev, "DMA mapping too big (requested %#x "
241 "mask %#Lx)\n", size, *dev->dma_mask); 232 "mask %#Lx)\n", size, *dev->dma_mask);
242 return ~0; 233 return -E2BIG;
243 } 234 }
244 235
245 /* 236 /* Figure out if we need to bounce from the DMA mask. */
246 * Figure out if we need to bounce from the DMA mask. 237 if ((dma_addr | (dma_addr + size - 1)) & ~mask)
247 */ 238 return 1;
248 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
249 } 239 }
250 240
251 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { 241 return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
252 struct safe_buffer *buf; 242}
253 243
254 buf = alloc_safe_buffer(device_info, ptr, size, dir); 244static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
255 if (buf == 0) { 245 enum dma_data_direction dir)
256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 246{
257 __func__, ptr); 247 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
258 return ~0; 248 struct safe_buffer *buf;
259 }
260 249
261 dev_dbg(dev, 250 if (device_info)
262 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 251 DO_STATS ( device_info->map_op_count++ );
263 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
264 buf->safe, buf->safe_dma_addr);
265 252
266 if ((dir == DMA_TO_DEVICE) || 253 buf = alloc_safe_buffer(device_info, ptr, size, dir);
267 (dir == DMA_BIDIRECTIONAL)) { 254 if (buf == NULL) {
268 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", 255 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
269 __func__, ptr, buf->safe, size); 256 __func__, ptr);
270 memcpy(buf->safe, ptr, size); 257 return ~0;
271 } 258 }
272 ptr = buf->safe;
273 259
274 dma_addr = buf->safe_dma_addr; 260 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
275 } else { 261 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
276 /* 262 buf->safe, buf->safe_dma_addr);
277 * We don't need to sync the DMA buffer since 263
278 * it was allocated via the coherent allocators. 264 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
279 */ 265 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
280 __dma_single_cpu_to_dev(ptr, size, dir); 266 __func__, ptr, buf->safe, size);
267 memcpy(buf->safe, ptr, size);
281 } 268 }
282 269
283 return dma_addr; 270 return buf->safe_dma_addr;
284} 271}
285 272
286static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, 273static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
287 size_t size, enum dma_data_direction dir) 274 size_t size, enum dma_data_direction dir)
288{ 275{
289 struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); 276 BUG_ON(buf->size != size);
290 277 BUG_ON(buf->direction != dir);
291 if (buf) {
292 BUG_ON(buf->size != size);
293 BUG_ON(buf->direction != dir);
294 278
295 dev_dbg(dev, 279 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
296 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 280 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
297 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 281 buf->safe, buf->safe_dma_addr);
298 buf->safe, buf->safe_dma_addr);
299 282
300 DO_STATS(dev->archdata.dmabounce->bounce_count++); 283 DO_STATS(dev->archdata.dmabounce->bounce_count++);
301 284
302 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 285 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
303 void *ptr = buf->ptr; 286 void *ptr = buf->ptr;
304 287
305 dev_dbg(dev, 288 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
306 "%s: copy back safe %p to unsafe %p size %d\n", 289 __func__, buf->safe, ptr, size);
307 __func__, buf->safe, ptr, size); 290 memcpy(ptr, buf->safe, size);
308 memcpy(ptr, buf->safe, size);
309 291
310 /* 292 /*
311 * Since we may have written to a page cache page, 293 * Since we may have written to a page cache page,
312 * we need to ensure that the data will be coherent 294 * we need to ensure that the data will be coherent
313 * with user mappings. 295 * with user mappings.
314 */ 296 */
315 __cpuc_flush_dcache_area(ptr, size); 297 __cpuc_flush_dcache_area(ptr, size);
316 }
317 free_safe_buffer(dev->archdata.dmabounce, buf);
318 } else {
319 __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
320 } 298 }
299 free_safe_buffer(dev->archdata.dmabounce, buf);
321} 300}
322 301
323/* ************************************************** */ 302/* ************************************************** */
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
328 * substitute the safe buffer for the unsafe one. 307 * substitute the safe buffer for the unsafe one.
329 * (basically move the buffer from an unsafe area to a safe one) 308 * (basically move the buffer from an unsafe area to a safe one)
330 */ 309 */
331dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
332 enum dma_data_direction dir)
333{
334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
335 __func__, ptr, size, dir);
336
337 BUG_ON(!valid_dma_direction(dir));
338
339 return map_single(dev, ptr, size, dir);
340}
341EXPORT_SYMBOL(__dma_map_single);
342
343/*
344 * see if a mapped address was really a "safe" buffer and if so, copy
345 * the data from the safe buffer back to the unsafe buffer and free up
346 * the safe buffer. (basically return things back to the way they
347 * should be)
348 */
349void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350 enum dma_data_direction dir)
351{
352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
353 __func__, (void *) dma_addr, size, dir);
354
355 unmap_single(dev, dma_addr, size, dir);
356}
357EXPORT_SYMBOL(__dma_unmap_single);
358
359dma_addr_t __dma_map_page(struct device *dev, struct page *page, 310dma_addr_t __dma_map_page(struct device *dev, struct page *page,
360 unsigned long offset, size_t size, enum dma_data_direction dir) 311 unsigned long offset, size_t size, enum dma_data_direction dir)
361{ 312{
313 dma_addr_t dma_addr;
314 int ret;
315
362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", 316 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
363 __func__, page, offset, size, dir); 317 __func__, page, offset, size, dir);
364 318
365 BUG_ON(!valid_dma_direction(dir)); 319 dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
320
321 ret = needs_bounce(dev, dma_addr, size);
322 if (ret < 0)
323 return ~0;
324
325 if (ret == 0) {
326 __dma_page_cpu_to_dev(page, offset, size, dir);
327 return dma_addr;
328 }
366 329
367 if (PageHighMem(page)) { 330 if (PageHighMem(page)) {
368 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " 331 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
369 "is not supported\n");
370 return ~0; 332 return ~0;
371 } 333 }
372 334
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page);
383void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 345void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
384 enum dma_data_direction dir) 346 enum dma_data_direction dir)
385{ 347{
386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 348 struct safe_buffer *buf;
387 __func__, (void *) dma_addr, size, dir); 349
350 dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
351 __func__, dma_addr, size, dir);
352
353 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
354 if (!buf) {
355 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
356 dma_addr & ~PAGE_MASK, size, dir);
357 return;
358 }
388 359
389 unmap_single(dev, dma_addr, size, dir); 360 unmap_single(dev, buf, size, dir);
390} 361}
391EXPORT_SYMBOL(__dma_unmap_page); 362EXPORT_SYMBOL(__dma_unmap_page);
392 363
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
461} 432}
462 433
463int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, 434int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
464 unsigned long large_buffer_size) 435 unsigned long large_buffer_size,
436 int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
465{ 437{
466 struct dmabounce_device_info *device_info; 438 struct dmabounce_device_info *device_info;
467 int ret; 439 int ret;
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
497 device_info->dev = dev; 469 device_info->dev = dev;
498 INIT_LIST_HEAD(&device_info->safe_buffers); 470 INIT_LIST_HEAD(&device_info->safe_buffers);
499 rwlock_init(&device_info->lock); 471 rwlock_init(&device_info->lock);
472 device_info->needs_bounce = needs_bounce_fn;
500 473
501#ifdef STATS 474#ifdef STATS
502 device_info->total_allocs = 0; 475 device_info->total_allocs = 0;