aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2013-10-18 13:35:24 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-13 19:25:06 -0500
commit45c463ae924c62af4aa64ded1ca831f334a1db65 (patch)
tree1087e0630ec29998f15a7fc2a7dbec2a3ff35e80 /drivers/dma
parentd38a8c622a1b382336c3e152c6caf4e11d1f1b2a (diff)
dmaengine: reference counted unmap data
Hang a common 'unmap' object off of dma descriptors for the purpose of providing a unified unmapping interface. The lifetime of a mapping may span multiple descriptors, so these unmap objects are reference counted by related descriptor. Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> [bzolnier: fix IS_ENABLED() check] [bzolnier: fix release ordering in dmaengine_destroy_unmap_pool()] [bzolnier: fix check for success in dmaengine_init_unmap_pool()] [bzolnier: use mempool_free() instead of kmem_cache_free()] [bzolnier: add missing unmap->len initializations] [bzolnier: add __init tag to dmaengine_init_unmap_pool()] Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> [djbw: move DMAENGINE=n support to this patch for async_tx] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmaengine.c156
1 files changed, 147 insertions, 9 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bbc89df6bc56..e721a1caff7f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -65,6 +65,7 @@
65#include <linux/acpi.h> 65#include <linux/acpi.h>
66#include <linux/acpi_dma.h> 66#include <linux/acpi_dma.h>
67#include <linux/of_dma.h> 67#include <linux/of_dma.h>
68#include <linux/mempool.h>
68 69
69static DEFINE_MUTEX(dma_list_mutex); 70static DEFINE_MUTEX(dma_list_mutex);
70static DEFINE_IDR(dma_idr); 71static DEFINE_IDR(dma_idr);
@@ -901,6 +902,129 @@ void dma_async_device_unregister(struct dma_device *device)
901} 902}
902EXPORT_SYMBOL(dma_async_device_unregister); 903EXPORT_SYMBOL(dma_async_device_unregister);
903 904
905struct dmaengine_unmap_pool {
906 struct kmem_cache *cache;
907 const char *name;
908 mempool_t *pool;
909 size_t size;
910};
911
912#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
913static struct dmaengine_unmap_pool unmap_pool[] = {
914 __UNMAP_POOL(2),
915 #if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
916 __UNMAP_POOL(16),
917 __UNMAP_POOL(128),
918 __UNMAP_POOL(256),
919 #endif
920};
921
922static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
923{
924 int order = get_count_order(nr);
925
926 switch (order) {
927 case 0 ... 1:
928 return &unmap_pool[0];
929 case 2 ... 4:
930 return &unmap_pool[1];
931 case 5 ... 7:
932 return &unmap_pool[2];
933 case 8:
934 return &unmap_pool[3];
935 default:
936 BUG();
937 return NULL;
938 }
939}
940
941static void dmaengine_unmap(struct kref *kref)
942{
943 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
944 struct device *dev = unmap->dev;
945 int cnt, i;
946
947 cnt = unmap->to_cnt;
948 for (i = 0; i < cnt; i++)
949 dma_unmap_page(dev, unmap->addr[i], unmap->len,
950 DMA_TO_DEVICE);
951 cnt += unmap->from_cnt;
952 for (; i < cnt; i++)
953 dma_unmap_page(dev, unmap->addr[i], unmap->len,
954 DMA_FROM_DEVICE);
955 cnt += unmap->bidi_cnt;
956 for (; i < cnt; i++)
957 dma_unmap_page(dev, unmap->addr[i], unmap->len,
958 DMA_BIDIRECTIONAL);
959 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
960}
961
962void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
963{
964 if (unmap)
965 kref_put(&unmap->kref, dmaengine_unmap);
966}
967EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
968
969static void dmaengine_destroy_unmap_pool(void)
970{
971 int i;
972
973 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
974 struct dmaengine_unmap_pool *p = &unmap_pool[i];
975
976 if (p->pool)
977 mempool_destroy(p->pool);
978 p->pool = NULL;
979 if (p->cache)
980 kmem_cache_destroy(p->cache);
981 p->cache = NULL;
982 }
983}
984
985static int __init dmaengine_init_unmap_pool(void)
986{
987 int i;
988
989 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
990 struct dmaengine_unmap_pool *p = &unmap_pool[i];
991 size_t size;
992
993 size = sizeof(struct dmaengine_unmap_data) +
994 sizeof(dma_addr_t) * p->size;
995
996 p->cache = kmem_cache_create(p->name, size, 0,
997 SLAB_HWCACHE_ALIGN, NULL);
998 if (!p->cache)
999 break;
1000 p->pool = mempool_create_slab_pool(1, p->cache);
1001 if (!p->pool)
1002 break;
1003 }
1004
1005 if (i == ARRAY_SIZE(unmap_pool))
1006 return 0;
1007
1008 dmaengine_destroy_unmap_pool();
1009 return -ENOMEM;
1010}
1011
1012static struct dmaengine_unmap_data *
1013dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1014{
1015 struct dmaengine_unmap_data *unmap;
1016
1017 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1018 if (!unmap)
1019 return NULL;
1020
1021 memset(unmap, 0, sizeof(*unmap));
1022 kref_init(&unmap->kref);
1023 unmap->dev = dev;
1024
1025 return unmap;
1026}
1027
904/** 1028/**
905 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 1029 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
906 * @chan: DMA channel to offload copy to 1030 * @chan: DMA channel to offload copy to
@@ -922,24 +1046,34 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
922{ 1046{
923 struct dma_device *dev = chan->device; 1047 struct dma_device *dev = chan->device;
924 struct dma_async_tx_descriptor *tx; 1048 struct dma_async_tx_descriptor *tx;
925 dma_addr_t dma_dest, dma_src; 1049 struct dmaengine_unmap_data *unmap;
926 dma_cookie_t cookie; 1050 dma_cookie_t cookie;
927 unsigned long flags; 1051 unsigned long flags;
928 1052
929 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 1053 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
930 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 1054 if (!unmap)
931 DMA_FROM_DEVICE); 1055 return -ENOMEM;
932 flags = DMA_CTRL_ACK; 1056
933 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 1057 unmap->to_cnt = 1;
1058 unmap->from_cnt = 1;
1059 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1060 DMA_TO_DEVICE);
1061 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1062 DMA_FROM_DEVICE);
1063 unmap->len = len;
1064 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_SRC_UNMAP |
1065 DMA_COMPL_SKIP_DEST_UNMAP;
1066 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1067 len, flags);
934 1068
935 if (!tx) { 1069 if (!tx) {
936 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 1070 dmaengine_unmap_put(unmap);
937 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
938 return -ENOMEM; 1071 return -ENOMEM;
939 } 1072 }
940 1073
941 tx->callback = NULL; 1074 dma_set_unmap(tx, unmap);
942 cookie = tx->tx_submit(tx); 1075 cookie = tx->tx_submit(tx);
1076 dmaengine_unmap_put(unmap);
943 1077
944 preempt_disable(); 1078 preempt_disable();
945 __this_cpu_add(chan->local->bytes_transferred, len); 1079 __this_cpu_add(chan->local->bytes_transferred, len);
@@ -1069,6 +1203,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
1069 1203
1070static int __init dma_bus_init(void) 1204static int __init dma_bus_init(void)
1071{ 1205{
1206 int err = dmaengine_init_unmap_pool();
1207
1208 if (err)
1209 return err;
1072 return class_register(&dma_devclass); 1210 return class_register(&dma_devclass);
1073} 1211}
1074arch_initcall(dma_bus_init); 1212arch_initcall(dma_bus_init);