aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLior Amsalem <alior@marvell.com>2014-08-27 09:52:55 -0400
committerVinod Koul <vinod.koul@intel.com>2014-09-23 10:47:01 -0400
commit22843545b20007ae33bc3774043303e0b44e3d65 (patch)
tree9137a6256f925f8fcce6cb7bec5aafd60208025a /drivers/dma
parent37380b980e2db2e0dfdb920140c75f3cf2e98a27 (diff)
dma: mv_xor: Add support for DMA_INTERRUPT
The driver is capable of supporting DMA_INTERRUPT by issuing a dummy 128-byte transfer. This helps removing a poll in the async_tx stack, replacing it with a completion interrupt. Signed-off-by: Lior Amsalem <alior@marvell.com> Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/mv_xor.c34
-rw-r--r--drivers/dma/mv_xor.h11
2 files changed, 40 insertions, 5 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 744a0077e5aa..769d35c3a82b 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -538,6 +538,24 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
538 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 538 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
539} 539}
540 540
541static struct dma_async_tx_descriptor *
542mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
543{
544 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
545 dma_addr_t src, dest;
546 size_t len;
547
548 src = mv_chan->dummy_src_addr;
549 dest = mv_chan->dummy_dst_addr;
550 len = MV_XOR_MIN_BYTE_COUNT;
551
552 /*
553 * We implement the DMA_INTERRUPT operation as a minimum sized
554 * XOR operation with a single dummy source address.
555 */
556 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
557}
558
541static void mv_xor_free_chan_resources(struct dma_chan *chan) 559static void mv_xor_free_chan_resources(struct dma_chan *chan)
542{ 560{
543 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 561 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -881,6 +899,10 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
881 899
882 dma_free_coherent(dev, MV_XOR_POOL_SIZE, 900 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
883 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 901 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
902 dma_unmap_single(dev, mv_chan->dummy_src_addr,
903 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
904 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
905 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
884 906
885 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, 907 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
886 device_node) { 908 device_node) {
@@ -910,6 +932,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
910 932
911 dma_dev = &mv_chan->dmadev; 933 dma_dev = &mv_chan->dmadev;
912 934
935 /*
936 * These source and destination dummy buffers are used to implement
937 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
938 * Hence, we only need to map the buffers at initialization-time.
939 */
940 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
941 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
942 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
943 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
944
913 /* allocate coherent memory for hardware descriptors 945 /* allocate coherent memory for hardware descriptors
914 * note: writecombine gives slightly better performance, but 946 * note: writecombine gives slightly better performance, but
915 * requires that we explicitly flush the writes 947 * requires that we explicitly flush the writes
@@ -934,6 +966,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
934 dma_dev->dev = &pdev->dev; 966 dma_dev->dev = &pdev->dev;
935 967
936 /* set prep routines based on capability */ 968 /* set prep routines based on capability */
969 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
970 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
937 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 971 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
938 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 972 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
939 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 973 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index de400365bc49..78edc7e44569 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -28,6 +28,9 @@
28#define MV_XOR_THRESHOLD 1 28#define MV_XOR_THRESHOLD 1
29#define MV_XOR_MAX_CHANNELS 2 29#define MV_XOR_MAX_CHANNELS 2
30 30
31#define MV_XOR_MIN_BYTE_COUNT SZ_128
32#define MV_XOR_MAX_BYTE_COUNT (SZ_16M - 1)
33
31/* Values for the XOR_CONFIG register */ 34/* Values for the XOR_CONFIG register */
32#define XOR_OPERATION_MODE_XOR 0 35#define XOR_OPERATION_MODE_XOR 0
33#define XOR_OPERATION_MODE_MEMCPY 2 36#define XOR_OPERATION_MODE_MEMCPY 2
@@ -116,6 +119,9 @@ struct mv_xor_chan {
116 struct list_head all_slots; 119 struct list_head all_slots;
117 int slots_allocated; 120 int slots_allocated;
118 struct tasklet_struct irq_tasklet; 121 struct tasklet_struct irq_tasklet;
122 char dummy_src[MV_XOR_MIN_BYTE_COUNT];
123 char dummy_dst[MV_XOR_MIN_BYTE_COUNT];
124 dma_addr_t dummy_src_addr, dummy_dst_addr;
119}; 125};
120 126
121/** 127/**
@@ -184,9 +190,4 @@ struct mv_xor_desc {
184#define mv_hw_desc_slot_idx(hw_desc, idx) \ 190#define mv_hw_desc_slot_idx(hw_desc, idx) \
185 ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) 191 ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
186 192
187#define MV_XOR_MIN_BYTE_COUNT (128)
188#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
189#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
190
191
192#endif 193#endif