aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ep93xx_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ep93xx_dma.c')
-rw-r--r--drivers/dma/ep93xx_dma.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 59e7a965772b..e6f133b78dc2 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -28,6 +28,8 @@
28 28
29#include <mach/dma.h> 29#include <mach/dma.h>
30 30
31#include "dmaengine.h"
32
31/* M2P registers */ 33/* M2P registers */
32#define M2P_CONTROL 0x0000 34#define M2P_CONTROL 0x0000
33#define M2P_CONTROL_STALLINT BIT(0) 35#define M2P_CONTROL_STALLINT BIT(0)
@@ -122,7 +124,6 @@ struct ep93xx_dma_desc {
122 * @lock: lock protecting the fields following 124 * @lock: lock protecting the fields following
123 * @flags: flags for the channel 125 * @flags: flags for the channel
124 * @buffer: which buffer to use next (0/1) 126 * @buffer: which buffer to use next (0/1)
125 * @last_completed: last completed cookie value
126 * @active: flattened chain of descriptors currently being processed 127 * @active: flattened chain of descriptors currently being processed
127 * @queue: pending descriptors which are handled next 128 * @queue: pending descriptors which are handled next
128 * @free_list: list of free descriptors which can be used 129 * @free_list: list of free descriptors which can be used
@@ -157,7 +158,6 @@ struct ep93xx_dma_chan {
157#define EP93XX_DMA_IS_CYCLIC 0 158#define EP93XX_DMA_IS_CYCLIC 0
158 159
159 int buffer; 160 int buffer;
160 dma_cookie_t last_completed;
161 struct list_head active; 161 struct list_head active;
162 struct list_head queue; 162 struct list_head queue;
163 struct list_head free_list; 163 struct list_head free_list;
@@ -703,7 +703,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
703 desc = ep93xx_dma_get_active(edmac); 703 desc = ep93xx_dma_get_active(edmac);
704 if (desc) { 704 if (desc) {
705 if (desc->complete) { 705 if (desc->complete) {
706 edmac->last_completed = desc->txd.cookie; 706 dma_cookie_complete(&desc->txd);
707 list_splice_init(&edmac->active, &list); 707 list_splice_init(&edmac->active, &list);
708 } 708 }
709 callback = desc->txd.callback; 709 callback = desc->txd.callback;
@@ -783,17 +783,10 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
783 unsigned long flags; 783 unsigned long flags;
784 784
785 spin_lock_irqsave(&edmac->lock, flags); 785 spin_lock_irqsave(&edmac->lock, flags);
786 786 cookie = dma_cookie_assign(tx);
787 cookie = edmac->chan.cookie;
788
789 if (++cookie < 0)
790 cookie = 1;
791 787
792 desc = container_of(tx, struct ep93xx_dma_desc, txd); 788 desc = container_of(tx, struct ep93xx_dma_desc, txd);
793 789
794 edmac->chan.cookie = cookie;
795 desc->txd.cookie = cookie;
796
797 /* 790 /*
798 * If nothing is currently prosessed, we push this descriptor 791 * If nothing is currently prosessed, we push this descriptor
799 * directly to the hardware. Otherwise we put the descriptor 792 * directly to the hardware. Otherwise we put the descriptor
@@ -861,8 +854,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
861 goto fail_clk_disable; 854 goto fail_clk_disable;
862 855
863 spin_lock_irq(&edmac->lock); 856 spin_lock_irq(&edmac->lock);
864 edmac->last_completed = 1; 857 dma_cookie_init(&edmac->chan);
865 edmac->chan.cookie = 1;
866 ret = edmac->edma->hw_setup(edmac); 858 ret = edmac->edma->hw_setup(edmac);
867 spin_unlock_irq(&edmac->lock); 859 spin_unlock_irq(&edmac->lock);
868 860
@@ -983,13 +975,14 @@ fail:
983 * @sg_len: number of entries in @sgl 975 * @sg_len: number of entries in @sgl
984 * @dir: direction of tha DMA transfer 976 * @dir: direction of tha DMA transfer
985 * @flags: flags for the descriptor 977 * @flags: flags for the descriptor
978 * @context: operation context (ignored)
986 * 979 *
987 * Returns a valid DMA descriptor or %NULL in case of failure. 980 * Returns a valid DMA descriptor or %NULL in case of failure.
988 */ 981 */
989static struct dma_async_tx_descriptor * 982static struct dma_async_tx_descriptor *
990ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 983ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
991 unsigned int sg_len, enum dma_transfer_direction dir, 984 unsigned int sg_len, enum dma_transfer_direction dir,
992 unsigned long flags) 985 unsigned long flags, void *context)
993{ 986{
994 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 987 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
995 struct ep93xx_dma_desc *desc, *first; 988 struct ep93xx_dma_desc *desc, *first;
@@ -1056,6 +1049,7 @@ fail:
1056 * @buf_len: length of the buffer (in bytes) 1049 * @buf_len: length of the buffer (in bytes)
1057 * @period_len: lenght of a single period 1050 * @period_len: lenght of a single period
1058 * @dir: direction of the operation 1051 * @dir: direction of the operation
1052 * @context: operation context (ignored)
1059 * 1053 *
1060 * Prepares a descriptor for cyclic DMA operation. This means that once the 1054 * Prepares a descriptor for cyclic DMA operation. This means that once the
1061 * descriptor is submitted, we will be submitting in a @period_len sized 1055 * descriptor is submitted, we will be submitting in a @period_len sized
@@ -1068,7 +1062,7 @@ fail:
1068static struct dma_async_tx_descriptor * 1062static struct dma_async_tx_descriptor *
1069ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 1063ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1070 size_t buf_len, size_t period_len, 1064 size_t buf_len, size_t period_len,
1071 enum dma_transfer_direction dir) 1065 enum dma_transfer_direction dir, void *context)
1072{ 1066{
1073 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1067 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1074 struct ep93xx_dma_desc *desc, *first; 1068 struct ep93xx_dma_desc *desc, *first;
@@ -1248,18 +1242,13 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1248 struct dma_tx_state *state) 1242 struct dma_tx_state *state)
1249{ 1243{
1250 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1244 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1251 dma_cookie_t last_used, last_completed;
1252 enum dma_status ret; 1245 enum dma_status ret;
1253 unsigned long flags; 1246 unsigned long flags;
1254 1247
1255 spin_lock_irqsave(&edmac->lock, flags); 1248 spin_lock_irqsave(&edmac->lock, flags);
1256 last_used = chan->cookie; 1249 ret = dma_cookie_status(chan, cookie, state);
1257 last_completed = edmac->last_completed;
1258 spin_unlock_irqrestore(&edmac->lock, flags); 1250 spin_unlock_irqrestore(&edmac->lock, flags);
1259 1251
1260 ret = dma_async_is_complete(cookie, last_completed, last_used);
1261 dma_set_tx_state(state, last_completed, last_used, 0);
1262
1263 return ret; 1252 return ret;
1264} 1253}
1265 1254