diff options
author | Dan Williams <dan.j.williams@intel.com> | 2013-10-18 13:35:22 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-13 19:25:06 -0500 |
commit | 56ea27fd61f546117a35236113be72c8aaec382d (patch) | |
tree | 14c7a110d4b7c8ccd8f0e70a5d5551e417d7e5bf | |
parent | d1cab34c039584ebe76b04d2f2109e0d87d344e1 (diff) |
dmaengine: consolidate memcpy apis
Copying from page to page (dma_async_memcpy_pg_to_pg) is the superset,
make the other two apis use that one in preparation for providing a
common dma unmap implementation. The common implementation just wants
to assume all buffers are mapped with dma_map_page().
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/dma/dmaengine.c | 137 |
1 files changed, 45 insertions, 92 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9162ac80c18f..bbc89df6bc56 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -902,20 +902,23 @@ void dma_async_device_unregister(struct dma_device *device) | |||
902 | EXPORT_SYMBOL(dma_async_device_unregister); | 902 | EXPORT_SYMBOL(dma_async_device_unregister); |
903 | 903 | ||
904 | /** | 904 | /** |
905 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | 905 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page |
906 | * @chan: DMA channel to offload copy to | 906 | * @chan: DMA channel to offload copy to |
907 | * @dest: destination address (virtual) | 907 | * @dest_pg: destination page |
908 | * @src: source address (virtual) | 908 | * @dest_off: offset in page to copy to |
909 | * @src_pg: source page | ||
910 | * @src_off: offset in page to copy from | ||
909 | * @len: length | 911 | * @len: length |
910 | * | 912 | * |
911 | * Both @dest and @src must be mappable to a bus address according to the | 913 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus |
912 | * DMA mapping API rules for streaming mappings. | 914 | * address according to the DMA mapping API rules for streaming mappings. |
913 | * Both @dest and @src must stay memory resident (kernel memory or locked | 915 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident |
914 | * user space pages). | 916 | * (kernel memory or locked user space pages). |
915 | */ | 917 | */ |
916 | dma_cookie_t | 918 | dma_cookie_t |
917 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | 919 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, |
918 | void *src, size_t len) | 920 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, |
921 | size_t len) | ||
919 | { | 922 | { |
920 | struct dma_device *dev = chan->device; | 923 | struct dma_device *dev = chan->device; |
921 | struct dma_async_tx_descriptor *tx; | 924 | struct dma_async_tx_descriptor *tx; |
@@ -923,16 +926,15 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
923 | dma_cookie_t cookie; | 926 | dma_cookie_t cookie; |
924 | unsigned long flags; | 927 | unsigned long flags; |
925 | 928 | ||
926 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 929 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
927 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | 930 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, |
928 | flags = DMA_CTRL_ACK | | 931 | DMA_FROM_DEVICE); |
929 | DMA_COMPL_SRC_UNMAP_SINGLE | | 932 | flags = DMA_CTRL_ACK; |
930 | DMA_COMPL_DEST_UNMAP_SINGLE; | ||
931 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | 933 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); |
932 | 934 | ||
933 | if (!tx) { | 935 | if (!tx) { |
934 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 936 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); |
935 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | 937 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); |
936 | return -ENOMEM; | 938 | return -ENOMEM; |
937 | } | 939 | } |
938 | 940 | ||
@@ -946,6 +948,29 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
946 | 948 | ||
947 | return cookie; | 949 | return cookie; |
948 | } | 950 | } |
951 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | ||
952 | |||
953 | /** | ||
954 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | ||
955 | * @chan: DMA channel to offload copy to | ||
956 | * @dest: destination address (virtual) | ||
957 | * @src: source address (virtual) | ||
958 | * @len: length | ||
959 | * | ||
960 | * Both @dest and @src must be mappable to a bus address according to the | ||
961 | * DMA mapping API rules for streaming mappings. | ||
962 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
963 | * user space pages). | ||
964 | */ | ||
965 | dma_cookie_t | ||
966 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | ||
967 | void *src, size_t len) | ||
968 | { | ||
969 | return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), | ||
970 | (unsigned long) dest & ~PAGE_MASK, | ||
971 | virt_to_page(src), | ||
972 | (unsigned long) src & ~PAGE_MASK, len); | ||
973 | } | ||
949 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | 974 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); |
950 | 975 | ||
951 | /** | 976 | /** |
@@ -963,86 +988,14 @@ EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | |||
963 | */ | 988 | */ |
964 | dma_cookie_t | 989 | dma_cookie_t |
965 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | 990 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, |
966 | unsigned int offset, void *kdata, size_t len) | 991 | unsigned int offset, void *kdata, size_t len) |
967 | { | 992 | { |
968 | struct dma_device *dev = chan->device; | 993 | return dma_async_memcpy_pg_to_pg(chan, page, offset, |
969 | struct dma_async_tx_descriptor *tx; | 994 | virt_to_page(kdata), |
970 | dma_addr_t dma_dest, dma_src; | 995 | (unsigned long) kdata & ~PAGE_MASK, len); |
971 | dma_cookie_t cookie; | ||
972 | unsigned long flags; | ||
973 | |||
974 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | ||
975 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | ||
976 | flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; | ||
977 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | ||
978 | |||
979 | if (!tx) { | ||
980 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | ||
981 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
982 | return -ENOMEM; | ||
983 | } | ||
984 | |||
985 | tx->callback = NULL; | ||
986 | cookie = tx->tx_submit(tx); | ||
987 | |||
988 | preempt_disable(); | ||
989 | __this_cpu_add(chan->local->bytes_transferred, len); | ||
990 | __this_cpu_inc(chan->local->memcpy_count); | ||
991 | preempt_enable(); | ||
992 | |||
993 | return cookie; | ||
994 | } | 996 | } |
995 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | 997 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); |
996 | 998 | ||
997 | /** | ||
998 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | ||
999 | * @chan: DMA channel to offload copy to | ||
1000 | * @dest_pg: destination page | ||
1001 | * @dest_off: offset in page to copy to | ||
1002 | * @src_pg: source page | ||
1003 | * @src_off: offset in page to copy from | ||
1004 | * @len: length | ||
1005 | * | ||
1006 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | ||
1007 | * address according to the DMA mapping API rules for streaming mappings. | ||
1008 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | ||
1009 | * (kernel memory or locked user space pages). | ||
1010 | */ | ||
1011 | dma_cookie_t | ||
1012 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | ||
1013 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, | ||
1014 | size_t len) | ||
1015 | { | ||
1016 | struct dma_device *dev = chan->device; | ||
1017 | struct dma_async_tx_descriptor *tx; | ||
1018 | dma_addr_t dma_dest, dma_src; | ||
1019 | dma_cookie_t cookie; | ||
1020 | unsigned long flags; | ||
1021 | |||
1022 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | ||
1023 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | ||
1024 | DMA_FROM_DEVICE); | ||
1025 | flags = DMA_CTRL_ACK; | ||
1026 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | ||
1027 | |||
1028 | if (!tx) { | ||
1029 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | ||
1030 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
1031 | return -ENOMEM; | ||
1032 | } | ||
1033 | |||
1034 | tx->callback = NULL; | ||
1035 | cookie = tx->tx_submit(tx); | ||
1036 | |||
1037 | preempt_disable(); | ||
1038 | __this_cpu_add(chan->local->bytes_transferred, len); | ||
1039 | __this_cpu_inc(chan->local->memcpy_count); | ||
1040 | preempt_enable(); | ||
1041 | |||
1042 | return cookie; | ||
1043 | } | ||
1044 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | ||
1045 | |||
1046 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | 999 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
1047 | struct dma_chan *chan) | 1000 | struct dma_chan *chan) |
1048 | { | 1001 | { |