aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/common/dmabounce.c33
-rw-r--r--arch/arm/include/asm/dma-mapping.h115
-rw-r--r--arch/arm/mm/dma-mapping.c92
3 files changed, 115 insertions, 125 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index aecc6c3f908f..35c72bcf3d0b 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -435,6 +435,7 @@ int
435dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 435dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
436 enum dma_data_direction dir) 436 enum dma_data_direction dir)
437{ 437{
438 struct scatterlist *s;
438 int i; 439 int i;
439 440
440 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", 441 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -442,14 +443,13 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
442 443
443 BUG_ON(dir == DMA_NONE); 444 BUG_ON(dir == DMA_NONE);
444 445
445 for (i = 0; i < nents; i++, sg++) { 446 for_each_sg(sg, s, nents, i) {
446 struct page *page = sg_page(sg); 447 struct page *page = sg_page(s);
447 unsigned int offset = sg->offset; 448 unsigned int offset = s->offset;
448 unsigned int length = sg->length; 449 unsigned int length = s->length;
449 void *ptr = page_address(page) + offset; 450 void *ptr = page_address(page) + offset;
450 451
451 sg->dma_address = 452 s->dma_address = map_single(dev, ptr, length, dir);
452 map_single(dev, ptr, length, dir);
453 } 453 }
454 454
455 return nents; 455 return nents;
@@ -459,6 +459,7 @@ void
459dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 459dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
460 enum dma_data_direction dir) 460 enum dma_data_direction dir)
461{ 461{
462 struct scatterlist *s;
462 int i; 463 int i;
463 464
464 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", 465 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -466,9 +467,9 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
466 467
467 BUG_ON(dir == DMA_NONE); 468 BUG_ON(dir == DMA_NONE);
468 469
469 for (i = 0; i < nents; i++, sg++) { 470 for_each_sg(sg, s, nents, i) {
470 dma_addr_t dma_addr = sg->dma_address; 471 dma_addr_t dma_addr = s->dma_address;
471 unsigned int length = sg->length; 472 unsigned int length = s->length;
472 473
473 unmap_single(dev, dma_addr, length, dir); 474 unmap_single(dev, dma_addr, length, dir);
474 } 475 }
@@ -502,6 +503,7 @@ void
502dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, 503dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
503 enum dma_data_direction dir) 504 enum dma_data_direction dir)
504{ 505{
506 struct scatterlist *s;
505 int i; 507 int i;
506 508
507 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", 509 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -509,9 +511,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
509 511
510 BUG_ON(dir == DMA_NONE); 512 BUG_ON(dir == DMA_NONE);
511 513
512 for (i = 0; i < nents; i++, sg++) { 514 for_each_sg(sg, s, nents, i) {
513 dma_addr_t dma_addr = sg->dma_address; 515 dma_addr_t dma_addr = s->dma_address;
514 unsigned int length = sg->length; 516 unsigned int length = s->length;
515 517
516 sync_single(dev, dma_addr, length, dir); 518 sync_single(dev, dma_addr, length, dir);
517 } 519 }
@@ -521,6 +523,7 @@ void
521dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, 523dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
522 enum dma_data_direction dir) 524 enum dma_data_direction dir)
523{ 525{
526 struct scatterlist *s;
524 int i; 527 int i;
525 528
526 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", 529 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
@@ -528,9 +531,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
528 531
529 BUG_ON(dir == DMA_NONE); 532 BUG_ON(dir == DMA_NONE);
530 533
531 for (i = 0; i < nents; i++, sg++) { 534 for_each_sg(sg, s, nents, i) {
532 dma_addr_t dma_addr = sg->dma_address; 535 dma_addr_t dma_addr = s->dma_address;
533 unsigned int length = sg->length; 536 unsigned int length = s->length;
534 537
535 sync_single(dev, dma_addr, length, dir); 538 sync_single(dev, dma_addr, length, dir);
536 } 539 }
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7b95d2058395..eff954852c2b 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -282,75 +282,6 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
282} 282}
283 283
284/** 284/**
285 * dma_map_sg - map a set of SG buffers for streaming mode DMA
286 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
287 * @sg: list of buffers
288 * @nents: number of buffers to map
289 * @dir: DMA transfer direction
290 *
291 * Map a set of buffers described by scatterlist in streaming
292 * mode for DMA. This is the scatter-gather version of the
293 * above dma_map_single interface. Here the scatter gather list
294 * elements are each tagged with the appropriate dma address
295 * and length. They are obtained via sg_dma_{address,length}(SG).
296 *
297 * NOTE: An implementation may be able to use a smaller number of
298 * DMA address/length pairs than there are SG table elements.
299 * (for example via virtual mapping capabilities)
300 * The routine returns the number of addr/length pairs actually
301 * used, at most nents.
302 *
303 * Device ownership issues as mentioned above for dma_map_single are
304 * the same here.
305 */
306#ifndef CONFIG_DMABOUNCE
307static inline int
308dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
309 enum dma_data_direction dir)
310{
311 int i;
312
313 for (i = 0; i < nents; i++, sg++) {
314 char *virt;
315
316 sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset;
317 virt = sg_virt(sg);
318
319 if (!arch_is_coherent())
320 dma_cache_maint(virt, sg->length, dir);
321 }
322
323 return nents;
324}
325#else
326extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
327#endif
328
329/**
330 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
331 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
332 * @sg: list of buffers
333 * @nents: number of buffers to map
334 * @dir: DMA transfer direction
335 *
336 * Unmap a set of streaming mode DMA translations.
337 * Again, CPU read rules concerning calls here are the same as for
338 * dma_unmap_single() above.
339 */
340#ifndef CONFIG_DMABOUNCE
341static inline void
342dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
343 enum dma_data_direction dir)
344{
345
346 /* nothing to do */
347}
348#else
349extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
350#endif
351
352
353/**
354 * dma_sync_single_range_for_cpu 285 * dma_sync_single_range_for_cpu
355 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 286 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
356 * @handle: DMA address of buffer 287 * @handle: DMA address of buffer
@@ -405,50 +336,14 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
405 dma_sync_single_range_for_device(dev, handle, 0, size, dir); 336 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
406} 337}
407 338
408 339/*
409/** 340 * The scatter list versions of the above methods.
410 * dma_sync_sg_for_cpu
411 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
412 * @sg: list of buffers
413 * @nents: number of buffers to map
414 * @dir: DMA transfer direction
415 *
416 * Make physical memory consistent for a set of streaming
417 * mode DMA translations after a transfer.
418 *
419 * The same as dma_sync_single_for_* but for a scatter-gather list,
420 * same rules and usage.
421 */ 341 */
422#ifndef CONFIG_DMABOUNCE 342extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
423static inline void 343extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
424dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
425 enum dma_data_direction dir)
426{
427 int i;
428
429 for (i = 0; i < nents; i++, sg++) {
430 char *virt = sg_virt(sg);
431 if (!arch_is_coherent())
432 dma_cache_maint(virt, sg->length, dir);
433 }
434}
435
436static inline void
437dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
438 enum dma_data_direction dir)
439{
440 int i;
441
442 for (i = 0; i < nents; i++, sg++) {
443 char *virt = sg_virt(sg);
444 if (!arch_is_coherent())
445 dma_cache_maint(virt, sg->length, dir);
446 }
447}
448#else
449extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); 344extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
450extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); 345extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
451#endif 346
452 347
453#ifdef CONFIG_DMABOUNCE 348#ifdef CONFIG_DMABOUNCE
454/* 349/*
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 9f312248d5af..7bf3e6fdfb57 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -512,3 +512,95 @@ void dma_cache_maint(const void *start, size_t size, int direction)
512 } 512 }
513} 513}
514EXPORT_SYMBOL(dma_cache_maint); 514EXPORT_SYMBOL(dma_cache_maint);
515
516#ifndef CONFIG_DMABOUNCE
517/**
518 * dma_map_sg - map a set of SG buffers for streaming mode DMA
519 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
520 * @sg: list of buffers
521 * @nents: number of buffers to map
522 * @dir: DMA transfer direction
523 *
524 * Map a set of buffers described by scatterlist in streaming mode for DMA.
525 * This is the scatter-gather version of the dma_map_single interface.
526 * Here the scatter gather list elements are each tagged with the
527 * appropriate dma address and length. They are obtained via
528 * sg_dma_{address,length}.
529 *
530 * Device ownership issues as mentioned for dma_map_single are the same
531 * here.
532 */
533int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
534 enum dma_data_direction dir)
535{
536 struct scatterlist *s;
537 int i;
538
539 for_each_sg(sg, s, nents, i) {
540 s->dma_address = page_to_dma(dev, sg_page(s)) + s->offset;
541
542 if (!arch_is_coherent())
543 dma_cache_maint(sg_virt(s), s->length, dir);
544 }
545
546 return nents;
547}
548EXPORT_SYMBOL(dma_map_sg);
549
550/**
551 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
552 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
553 * @sg: list of buffers
554 * @nents: number of buffers to unmap (returned from dma_map_sg)
555 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
556 *
557 * Unmap a set of streaming mode DMA translations. Again, CPU access
558 * rules concerning calls here are the same as for dma_unmap_single().
559 */
560void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
561 enum dma_data_direction dir)
562{
563 /* nothing to do */
564}
565EXPORT_SYMBOL(dma_unmap_sg);
566
567/**
568 * dma_sync_sg_for_cpu
569 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
570 * @sg: list of buffers
571 * @nents: number of buffers to map (returned from dma_map_sg)
572 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
573 */
574void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
575 int nents, enum dma_data_direction dir)
576{
577 struct scatterlist *s;
578 int i;
579
580 for_each_sg(sg, s, nents, i) {
581 if (!arch_is_coherent())
582 dma_cache_maint(sg_virt(s), s->length, dir);
583 }
584}
585EXPORT_SYMBOL(dma_sync_sg_for_cpu);
586
587/**
588 * dma_sync_sg_for_device
589 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
590 * @sg: list of buffers
591 * @nents: number of buffers to map (returned from dma_map_sg)
592 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
593 */
594void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
595 int nents, enum dma_data_direction dir)
596{
597 struct scatterlist *s;
598 int i;
599
600 for_each_sg(sg, s, nents, i) {
601 if (!arch_is_coherent())
602 dma_cache_maint(sg_virt(s), s->length, dir);
603 }
604}
605EXPORT_SYMBOL(dma_sync_sg_for_device);
606#endif