aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/edma.c
diff options
context:
space:
mode:
authorJoel Fernandes <joelf@ti.com>2013-09-03 11:02:46 -0400
committerVinod Koul <vinod.koul@intel.com>2013-09-04 09:08:46 -0400
commit534070622d2c7fbc5cc929aa93541ccd0ae52ab1 (patch)
tree30bd34589b97c189ae86a5a4b0f12f11b1e5379d /drivers/dma/edma.c
parent6fbe24da828ff344372c38441156caefc4a51b3e (diff)
dma: edma: Write out and handle MAX_NR_SG at a given time
Process SG-elements in batches of MAX_NR_SG if they are greater than MAX_NR_SG. Due to this, at any given time only those many slots will be used in the given channel no matter how long the scatter list is. We keep track of how much has been written inorder to process the next batch of elements in the scatter-list and detect completion. For such intermediate transfer completions (one batch of MAX_NR_SG), make use of pause and resume functions instead of start and stop when such intermediate transfer is in progress or completed as we donot want to clear any pending events. Signed-off-by: Joel Fernandes <joelf@ti.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/edma.c')
-rw-r--r--drivers/dma/edma.c77
1 files changed, 51 insertions, 26 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index f9075129d27c..4c1c258a5b54 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -56,6 +56,7 @@ struct edma_desc {
56 struct list_head node; 56 struct list_head node;
57 int absync; 57 int absync;
58 int pset_nr; 58 int pset_nr;
59 int processed;
59 struct edmacc_param pset[0]; 60 struct edmacc_param pset[0];
60}; 61};
61 62
@@ -104,22 +105,34 @@ static void edma_desc_free(struct virt_dma_desc *vdesc)
104/* Dispatch a queued descriptor to the controller (caller holds lock) */ 105/* Dispatch a queued descriptor to the controller (caller holds lock) */
105static void edma_execute(struct edma_chan *echan) 106static void edma_execute(struct edma_chan *echan)
106{ 107{
107 struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan); 108 struct virt_dma_desc *vdesc;
108 struct edma_desc *edesc; 109 struct edma_desc *edesc;
109 int i; 110 struct device *dev = echan->vchan.chan.device->dev;
110 111 int i, j, left, nslots;
111 if (!vdesc) { 112
112 echan->edesc = NULL; 113 /* If either we processed all psets or we're still not started */
113 return; 114 if (!echan->edesc ||
115 echan->edesc->pset_nr == echan->edesc->processed) {
116 /* Get next vdesc */
117 vdesc = vchan_next_desc(&echan->vchan);
118 if (!vdesc) {
119 echan->edesc = NULL;
120 return;
121 }
122 list_del(&vdesc->node);
123 echan->edesc = to_edma_desc(&vdesc->tx);
114 } 124 }
115 125
116 list_del(&vdesc->node); 126 edesc = echan->edesc;
117 127
118 echan->edesc = edesc = to_edma_desc(&vdesc->tx); 128 /* Find out how many left */
129 left = edesc->pset_nr - edesc->processed;
130 nslots = min(MAX_NR_SG, left);
119 131
120 /* Write descriptor PaRAM set(s) */ 132 /* Write descriptor PaRAM set(s) */
121 for (i = 0; i < edesc->pset_nr; i++) { 133 for (i = 0; i < nslots; i++) {
122 edma_write_slot(echan->slot[i], &edesc->pset[i]); 134 j = i + edesc->processed;
135 edma_write_slot(echan->slot[i], &edesc->pset[j]);
123 dev_dbg(echan->vchan.chan.device->dev, 136 dev_dbg(echan->vchan.chan.device->dev,
124 "\n pset[%d]:\n" 137 "\n pset[%d]:\n"
125 " chnum\t%d\n" 138 " chnum\t%d\n"
@@ -132,24 +145,31 @@ static void edma_execute(struct edma_chan *echan)
132 " bidx\t%08x\n" 145 " bidx\t%08x\n"
133 " cidx\t%08x\n" 146 " cidx\t%08x\n"
134 " lkrld\t%08x\n", 147 " lkrld\t%08x\n",
135 i, echan->ch_num, echan->slot[i], 148 j, echan->ch_num, echan->slot[i],
136 edesc->pset[i].opt, 149 edesc->pset[j].opt,
137 edesc->pset[i].src, 150 edesc->pset[j].src,
138 edesc->pset[i].dst, 151 edesc->pset[j].dst,
139 edesc->pset[i].a_b_cnt, 152 edesc->pset[j].a_b_cnt,
140 edesc->pset[i].ccnt, 153 edesc->pset[j].ccnt,
141 edesc->pset[i].src_dst_bidx, 154 edesc->pset[j].src_dst_bidx,
142 edesc->pset[i].src_dst_cidx, 155 edesc->pset[j].src_dst_cidx,
143 edesc->pset[i].link_bcntrld); 156 edesc->pset[j].link_bcntrld);
144 /* Link to the previous slot if not the last set */ 157 /* Link to the previous slot if not the last set */
145 if (i != (edesc->pset_nr - 1)) 158 if (i != (nslots - 1))
146 edma_link(echan->slot[i], echan->slot[i+1]); 159 edma_link(echan->slot[i], echan->slot[i+1]);
147 /* Final pset links to the dummy pset */ 160 /* Final pset links to the dummy pset */
148 else 161 else
149 edma_link(echan->slot[i], echan->ecc->dummy_slot); 162 edma_link(echan->slot[i], echan->ecc->dummy_slot);
150 } 163 }
151 164
152 edma_start(echan->ch_num); 165 edesc->processed += nslots;
166
167 edma_resume(echan->ch_num);
168
169 if (edesc->processed <= MAX_NR_SG) {
170 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
171 edma_start(echan->ch_num);
172 }
153} 173}
154 174
155static int edma_terminate_all(struct edma_chan *echan) 175static int edma_terminate_all(struct edma_chan *echan)
@@ -368,19 +388,24 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
368 struct edma_desc *edesc; 388 struct edma_desc *edesc;
369 unsigned long flags; 389 unsigned long flags;
370 390
371 /* Stop the channel */ 391 /* Pause the channel */
372 edma_stop(echan->ch_num); 392 edma_pause(echan->ch_num);
373 393
374 switch (ch_status) { 394 switch (ch_status) {
375 case DMA_COMPLETE: 395 case DMA_COMPLETE:
376 dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
377
378 spin_lock_irqsave(&echan->vchan.lock, flags); 396 spin_lock_irqsave(&echan->vchan.lock, flags);
379 397
380 edesc = echan->edesc; 398 edesc = echan->edesc;
381 if (edesc) { 399 if (edesc) {
400 if (edesc->processed == edesc->pset_nr) {
401 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
402 edma_stop(echan->ch_num);
403 vchan_cookie_complete(&edesc->vdesc);
404 } else {
405 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
406 }
407
382 edma_execute(echan); 408 edma_execute(echan);
383 vchan_cookie_complete(&edesc->vdesc);
384 } 409 }
385 410
386 spin_unlock_irqrestore(&echan->vchan.lock, flags); 411 spin_unlock_irqrestore(&echan->vchan.lock, flags);