aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorBecky Bruce <beckyb@kernel.crashing.org>2009-04-08 10:09:15 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-08 10:18:35 -0400
commitceb5ac3264686e75e6951de6a18d4baa9bdecb92 (patch)
tree7e4f70b16dbb548ec8a428a701430da3acd896ec /lib
parent577c9c456f0e1371cbade38eaf91ae8e8a308555 (diff)
swiotlb: comment corrections
Impact: cleanup swiotlb_map/unmap_single are now swiotlb_map/unmap_page; trivially change all the comments to reference new names. Also, there were some comments that should have been referring to just plain old map_single, not swiotlb_map_single; fix those as well. Also change a use of the word "pointer", when what is referred to is actually a dma/physical address. Signed-off-by: Becky Bruce <beckyb@kernel.crashing.org> Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Cc: jeremy@goop.org Cc: ian.campbell@citrix.com LKML-Reference: <1239199761-22886-2-git-send-email-galak@kernel.crashing.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r--lib/swiotlb.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 2b0b5a7d2ced..170cf56af6a9 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,8 +60,8 @@ enum dma_sync_target {
60int swiotlb_force; 60int swiotlb_force;
61 61
62/* 62/*
63 * Used to do a quick range check in swiotlb_unmap_single and 63 * Used to do a quick range check in unmap_single and
64 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this 64 * sync_single_*, to see if the memory was in fact allocated by this
65 * API. 65 * API.
66 */ 66 */
67static char *io_tlb_start, *io_tlb_end; 67static char *io_tlb_start, *io_tlb_end;
@@ -560,7 +560,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
560 size)) { 560 size)) {
561 /* 561 /*
562 * The allocated memory isn't reachable by the device. 562 * The allocated memory isn't reachable by the device.
563 * Fall back on swiotlb_map_single().
564 */ 563 */
565 free_pages((unsigned long) ret, order); 564 free_pages((unsigned long) ret, order);
566 ret = NULL; 565 ret = NULL;
@@ -568,9 +567,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
568 if (!ret) { 567 if (!ret) {
569 /* 568 /*
570 * We are either out of memory or the device can't DMA 569 * We are either out of memory or the device can't DMA
571 * to GFP_DMA memory; fall back on 570 * to GFP_DMA memory; fall back on map_single(), which
572 * swiotlb_map_single(), which will grab memory from 571 * will grab memory from the lowest available address range.
573 * the lowest available address range.
574 */ 572 */
575 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 573 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
576 if (!ret) 574 if (!ret)
@@ -634,7 +632,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
634 * physical address to use is returned. 632 * physical address to use is returned.
635 * 633 *
636 * Once the device is given the dma address, the device owns this memory until 634 * Once the device is given the dma address, the device owns this memory until
637 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. 635 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
638 */ 636 */
639dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 637dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
640 unsigned long offset, size_t size, 638 unsigned long offset, size_t size,
@@ -648,7 +646,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
648 646
649 BUG_ON(dir == DMA_NONE); 647 BUG_ON(dir == DMA_NONE);
650 /* 648 /*
651 * If the pointer passed in happens to be in the device's DMA window, 649 * If the address happens to be in the device's DMA window,
652 * we can safely return the device addr and not worry about bounce 650 * we can safely return the device addr and not worry about bounce
653 * buffering it. 651 * buffering it.
654 */ 652 */
@@ -679,7 +677,7 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
679 677
680/* 678/*
681 * Unmap a single streaming mode DMA translation. The dma_addr and size must 679 * Unmap a single streaming mode DMA translation. The dma_addr and size must
682 * match what was provided for in a previous swiotlb_map_single call. All 680 * match what was provided for in a previous swiotlb_map_page call. All
683 * other usages are undefined. 681 * other usages are undefined.
684 * 682 *
685 * After this call, reads by the cpu to the buffer are guaranteed to see 683 * After this call, reads by the cpu to the buffer are guaranteed to see
@@ -703,7 +701,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
703 * Make physical memory consistent for a single streaming mode DMA translation 701 * Make physical memory consistent for a single streaming mode DMA translation
704 * after a transfer. 702 * after a transfer.
705 * 703 *
706 * If you perform a swiotlb_map_single() but wish to interrogate the buffer 704 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
707 * using the cpu, yet do not wish to teardown the dma mapping, you must 705 * using the cpu, yet do not wish to teardown the dma mapping, you must
708 * call this function before doing so. At the next point you give the dma 706 * call this function before doing so. At the next point you give the dma
709 * address back to the card, you must first perform a 707 * address back to the card, you must first perform a
@@ -777,7 +775,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
777 775
778/* 776/*
779 * Map a set of buffers described by scatterlist in streaming mode for DMA. 777 * Map a set of buffers described by scatterlist in streaming mode for DMA.
780 * This is the scatter-gather version of the above swiotlb_map_single 778 * This is the scatter-gather version of the above swiotlb_map_page
781 * interface. Here the scatter gather list elements are each tagged with the 779 * interface. Here the scatter gather list elements are each tagged with the
782 * appropriate dma address and length. They are obtained via 780 * appropriate dma address and length. They are obtained via
783 * sg_dma_{address,length}(SG). 781 * sg_dma_{address,length}(SG).
@@ -788,7 +786,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
788 * The routine returns the number of addr/length pairs actually 786 * The routine returns the number of addr/length pairs actually
789 * used, at most nents. 787 * used, at most nents.
790 * 788 *
791 * Device ownership issues as mentioned above for swiotlb_map_single are the 789 * Device ownership issues as mentioned above for swiotlb_map_page are the
792 * same here. 790 * same here.
793 */ 791 */
794int 792int
@@ -836,7 +834,7 @@ EXPORT_SYMBOL(swiotlb_map_sg);
836 834
837/* 835/*
838 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 836 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
839 * concerning calls here are the same as for swiotlb_unmap_single() above. 837 * concerning calls here are the same as for swiotlb_unmap_page() above.
840 */ 838 */
841void 839void
842swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 840swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,