aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dmaengine.c
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2013-11-16 01:24:17 -0500
committerVinod Koul <vinod.koul@intel.com>2013-11-16 01:32:36 -0500
commitdf12a3178d340319b1955be6b973a4eb84aff754 (patch)
tree2b9c68f8a6c299d1e5a4026c60117b5c00d46008 /drivers/dma/dmaengine.c
parent2f986ec6fa57a5dcf77f19f5f0d44b1f680a100f (diff)
parent82a1402eaee5dab1f3ab2d5aa4c316451374c5af (diff)
Merge commit 'dmaengine-3.13-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine changes from Dan 1/ Bartlomiej and Dan finalized a rework of the dma address unmap implementation. 2/ In the course of testing 1/ a collection of enhancements to dmatest fell out. Notably basic performance statistics, and fixed / enhanced test control through new module parameters 'run', 'wait', 'noverify', and 'verbose'. Thanks to Andriy and Linus for their review. 3/ Testing the raid related corner cases of 1/ triggered bugs in the recently added 16-source operation support in the ioatdma driver. 4/ Some minor fixes / cleanups to mv_xor and ioatdma. Conflicts: drivers/dma/dmatest.c Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r--drivers/dma/dmaengine.c262
1 files changed, 178 insertions, 84 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 81d876528c70..ea806bdc12ef 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -65,6 +65,7 @@
65#include <linux/acpi.h> 65#include <linux/acpi.h>
66#include <linux/acpi_dma.h> 66#include <linux/acpi_dma.h>
67#include <linux/of_dma.h> 67#include <linux/of_dma.h>
68#include <linux/mempool.h>
68 69
69static DEFINE_MUTEX(dma_list_mutex); 70static DEFINE_MUTEX(dma_list_mutex);
70static DEFINE_IDR(dma_idr); 71static DEFINE_IDR(dma_idr);
@@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device)
901} 902}
902EXPORT_SYMBOL(dma_async_device_unregister); 903EXPORT_SYMBOL(dma_async_device_unregister);
903 904
904/** 905struct dmaengine_unmap_pool {
905 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 906 struct kmem_cache *cache;
906 * @chan: DMA channel to offload copy to 907 const char *name;
907 * @dest: destination address (virtual) 908 mempool_t *pool;
908 * @src: source address (virtual) 909 size_t size;
909 * @len: length 910};
910 *
911 * Both @dest and @src must be mappable to a bus address according to the
912 * DMA mapping API rules for streaming mappings.
913 * Both @dest and @src must stay memory resident (kernel memory or locked
914 * user space pages).
915 */
916dma_cookie_t
917dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
918 void *src, size_t len)
919{
920 struct dma_device *dev = chan->device;
921 struct dma_async_tx_descriptor *tx;
922 dma_addr_t dma_dest, dma_src;
923 dma_cookie_t cookie;
924 unsigned long flags;
925 911
926 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
927 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 913static struct dmaengine_unmap_pool unmap_pool[] = {
928 flags = DMA_CTRL_ACK | 914 __UNMAP_POOL(2),
929 DMA_COMPL_SRC_UNMAP_SINGLE | 915 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
930 DMA_COMPL_DEST_UNMAP_SINGLE; 916 __UNMAP_POOL(16),
931 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 917 __UNMAP_POOL(128),
918 __UNMAP_POOL(256),
919 #endif
920};
932 921
933 if (!tx) { 922static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
934 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 923{
935 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 924 int order = get_count_order(nr);
936 return -ENOMEM; 925
926 switch (order) {
927 case 0 ... 1:
928 return &unmap_pool[0];
929 case 2 ... 4:
930 return &unmap_pool[1];
931 case 5 ... 7:
932 return &unmap_pool[2];
933 case 8:
934 return &unmap_pool[3];
935 default:
936 BUG();
937 return NULL;
937 } 938 }
939}
938 940
939 tx->callback = NULL; 941static void dmaengine_unmap(struct kref *kref)
940 cookie = tx->tx_submit(tx); 942{
943 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
944 struct device *dev = unmap->dev;
945 int cnt, i;
946
947 cnt = unmap->to_cnt;
948 for (i = 0; i < cnt; i++)
949 dma_unmap_page(dev, unmap->addr[i], unmap->len,
950 DMA_TO_DEVICE);
951 cnt += unmap->from_cnt;
952 for (; i < cnt; i++)
953 dma_unmap_page(dev, unmap->addr[i], unmap->len,
954 DMA_FROM_DEVICE);
955 cnt += unmap->bidi_cnt;
956 for (; i < cnt; i++) {
957 if (unmap->addr[i] == 0)
958 continue;
959 dma_unmap_page(dev, unmap->addr[i], unmap->len,
960 DMA_BIDIRECTIONAL);
961 }
962 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
963}
941 964
942 preempt_disable(); 965void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
943 __this_cpu_add(chan->local->bytes_transferred, len); 966{
944 __this_cpu_inc(chan->local->memcpy_count); 967 if (unmap)
945 preempt_enable(); 968 kref_put(&unmap->kref, dmaengine_unmap);
969}
970EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
946 971
947 return cookie; 972static void dmaengine_destroy_unmap_pool(void)
973{
974 int i;
975
976 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
977 struct dmaengine_unmap_pool *p = &unmap_pool[i];
978
979 if (p->pool)
980 mempool_destroy(p->pool);
981 p->pool = NULL;
982 if (p->cache)
983 kmem_cache_destroy(p->cache);
984 p->cache = NULL;
985 }
948} 986}
949EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
950 987
951/** 988static int __init dmaengine_init_unmap_pool(void)
952 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
953 * @chan: DMA channel to offload copy to
954 * @page: destination page
955 * @offset: offset in page to copy to
956 * @kdata: source address (virtual)
957 * @len: length
958 *
959 * Both @page/@offset and @kdata must be mappable to a bus address according
960 * to the DMA mapping API rules for streaming mappings.
961 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
962 * locked user space pages)
963 */
964dma_cookie_t
965dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
966 unsigned int offset, void *kdata, size_t len)
967{ 989{
968 struct dma_device *dev = chan->device; 990 int i;
969 struct dma_async_tx_descriptor *tx;
970 dma_addr_t dma_dest, dma_src;
971 dma_cookie_t cookie;
972 unsigned long flags;
973 991
974 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 992 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
975 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 993 struct dmaengine_unmap_pool *p = &unmap_pool[i];
976 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; 994 size_t size;
977 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
978 995
979 if (!tx) { 996 size = sizeof(struct dmaengine_unmap_data) +
980 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 997 sizeof(dma_addr_t) * p->size;
981 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 998
982 return -ENOMEM; 999 p->cache = kmem_cache_create(p->name, size, 0,
1000 SLAB_HWCACHE_ALIGN, NULL);
1001 if (!p->cache)
1002 break;
1003 p->pool = mempool_create_slab_pool(1, p->cache);
1004 if (!p->pool)
1005 break;
983 } 1006 }
984 1007
985 tx->callback = NULL; 1008 if (i == ARRAY_SIZE(unmap_pool))
986 cookie = tx->tx_submit(tx); 1009 return 0;
987 1010
988 preempt_disable(); 1011 dmaengine_destroy_unmap_pool();
989 __this_cpu_add(chan->local->bytes_transferred, len); 1012 return -ENOMEM;
990 __this_cpu_inc(chan->local->memcpy_count); 1013}
991 preempt_enable();
992 1014
993 return cookie; 1015struct dmaengine_unmap_data *
1016dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1017{
1018 struct dmaengine_unmap_data *unmap;
1019
1020 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1021 if (!unmap)
1022 return NULL;
1023
1024 memset(unmap, 0, sizeof(*unmap));
1025 kref_init(&unmap->kref);
1026 unmap->dev = dev;
1027
1028 return unmap;
994} 1029}
995EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 1030EXPORT_SYMBOL(dmaengine_get_unmap_data);
996 1031
997/** 1032/**
998 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 1033 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
@@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1015{ 1050{
1016 struct dma_device *dev = chan->device; 1051 struct dma_device *dev = chan->device;
1017 struct dma_async_tx_descriptor *tx; 1052 struct dma_async_tx_descriptor *tx;
1018 dma_addr_t dma_dest, dma_src; 1053 struct dmaengine_unmap_data *unmap;
1019 dma_cookie_t cookie; 1054 dma_cookie_t cookie;
1020 unsigned long flags; 1055 unsigned long flags;
1021 1056
1022 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 1057 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
1023 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 1058 if (!unmap)
1024 DMA_FROM_DEVICE); 1059 return -ENOMEM;
1060
1061 unmap->to_cnt = 1;
1062 unmap->from_cnt = 1;
1063 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1064 DMA_TO_DEVICE);
1065 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1066 DMA_FROM_DEVICE);
1067 unmap->len = len;
1025 flags = DMA_CTRL_ACK; 1068 flags = DMA_CTRL_ACK;
1026 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 1069 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1070 len, flags);
1027 1071
1028 if (!tx) { 1072 if (!tx) {
1029 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 1073 dmaengine_unmap_put(unmap);
1030 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
1031 return -ENOMEM; 1074 return -ENOMEM;
1032 } 1075 }
1033 1076
1034 tx->callback = NULL; 1077 dma_set_unmap(tx, unmap);
1035 cookie = tx->tx_submit(tx); 1078 cookie = tx->tx_submit(tx);
1079 dmaengine_unmap_put(unmap);
1036 1080
1037 preempt_disable(); 1081 preempt_disable();
1038 __this_cpu_add(chan->local->bytes_transferred, len); 1082 __this_cpu_add(chan->local->bytes_transferred, len);
@@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1043} 1087}
1044EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 1088EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1045 1089
1090/**
1091 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1092 * @chan: DMA channel to offload copy to
1093 * @dest: destination address (virtual)
1094 * @src: source address (virtual)
1095 * @len: length
1096 *
1097 * Both @dest and @src must be mappable to a bus address according to the
1098 * DMA mapping API rules for streaming mappings.
1099 * Both @dest and @src must stay memory resident (kernel memory or locked
1100 * user space pages).
1101 */
1102dma_cookie_t
1103dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1104 void *src, size_t len)
1105{
1106 return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1107 (unsigned long) dest & ~PAGE_MASK,
1108 virt_to_page(src),
1109 (unsigned long) src & ~PAGE_MASK, len);
1110}
1111EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1112
1113/**
1114 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1115 * @chan: DMA channel to offload copy to
1116 * @page: destination page
1117 * @offset: offset in page to copy to
1118 * @kdata: source address (virtual)
1119 * @len: length
1120 *
1121 * Both @page/@offset and @kdata must be mappable to a bus address according
1122 * to the DMA mapping API rules for streaming mappings.
1123 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1124 * locked user space pages)
1125 */
1126dma_cookie_t
1127dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1128 unsigned int offset, void *kdata, size_t len)
1129{
1130 return dma_async_memcpy_pg_to_pg(chan, page, offset,
1131 virt_to_page(kdata),
1132 (unsigned long) kdata & ~PAGE_MASK, len);
1133}
1134EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1135
1046void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1136void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1047 struct dma_chan *chan) 1137 struct dma_chan *chan)
1048{ 1138{
@@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
1116 1206
1117static int __init dma_bus_init(void) 1207static int __init dma_bus_init(void)
1118{ 1208{
1209 int err = dmaengine_init_unmap_pool();
1210
1211 if (err)
1212 return err;
1119 return class_register(&dma_devclass); 1213 return class_register(&dma_devclass);
1120} 1214}
1121arch_initcall(dma_bus_init); 1215arch_initcall(dma_bus_init);