aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorIra Snyder <iws@ovro.caltech.edu>2010-01-06 08:34:06 -0500
committerDan Williams <dan.j.williams@intel.com>2010-02-02 16:51:42 -0500
commit9c3a50b7d7ec45da34e73cac66cde12dd6092dd8 (patch)
treea16b2dd972ba8ebdd9e6796ad8f0027513316f49 /drivers/dma
parenta1c03319018061304be28d131073ac13a5cb86fb (diff)
fsldma: major cleanups and fixes
Fix locking. Use two queues in the driver, one for pending transacions, and one for transactions which are actually running on the hardware. Call dma_run_dependencies() on descriptor cleanup so that the async_tx API works correctly. There are a number of places throughout the code where lists of descriptors are freed in a loop. Create functions to handle this, and use them instead of open-coding the loop each time. Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/fsldma.c386
-rw-r--r--drivers/dma/fsldma.h3
2 files changed, 207 insertions, 182 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 7b5f88cb495b..19011c20390b 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -61,7 +61,6 @@ static void dma_init(struct fsldma_chan *chan)
61 | FSL_DMA_MR_PRC_RM, 32); 61 | FSL_DMA_MR_PRC_RM, 32);
62 break; 62 break;
63 } 63 }
64
65} 64}
66 65
67static void set_sr(struct fsldma_chan *chan, u32 val) 66static void set_sr(struct fsldma_chan *chan, u32 val)
@@ -120,11 +119,6 @@ static dma_addr_t get_cdar(struct fsldma_chan *chan)
120 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; 119 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
121} 120}
122 121
123static void set_ndar(struct fsldma_chan *chan, dma_addr_t addr)
124{
125 DMA_OUT(chan, &chan->regs->ndar, addr, 64);
126}
127
128static dma_addr_t get_ndar(struct fsldma_chan *chan) 122static dma_addr_t get_ndar(struct fsldma_chan *chan)
129{ 123{
130 return DMA_IN(chan, &chan->regs->ndar, 64); 124 return DMA_IN(chan, &chan->regs->ndar, 64);
@@ -178,11 +172,12 @@ static void dma_halt(struct fsldma_chan *chan)
178 172
179 for (i = 0; i < 100; i++) { 173 for (i = 0; i < 100; i++) {
180 if (dma_is_idle(chan)) 174 if (dma_is_idle(chan))
181 break; 175 return;
176
182 udelay(10); 177 udelay(10);
183 } 178 }
184 179
185 if (i >= 100 && !dma_is_idle(chan)) 180 if (!dma_is_idle(chan))
186 dev_err(chan->dev, "DMA halt timeout!\n"); 181 dev_err(chan->dev, "DMA halt timeout!\n");
187} 182}
188 183
@@ -199,27 +194,6 @@ static void set_ld_eol(struct fsldma_chan *chan,
199 | snoop_bits, 64); 194 | snoop_bits, 64);
200} 195}
201 196
202static void append_ld_queue(struct fsldma_chan *chan,
203 struct fsl_desc_sw *new_desc)
204{
205 struct fsl_desc_sw *queue_tail = to_fsl_desc(chan->ld_queue.prev);
206
207 if (list_empty(&chan->ld_queue))
208 return;
209
210 /* Link to the new descriptor physical address and
211 * Enable End-of-segment interrupt for
212 * the last link descriptor.
213 * (the previous node's next link descriptor)
214 *
215 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
216 */
217 queue_tail->hw.next_ln_addr = CPU_TO_DMA(chan,
218 new_desc->async_tx.phys | FSL_DMA_EOSIE |
219 (((chan->feature & FSL_DMA_IP_MASK)
220 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
221}
222
223/** 197/**
224 * fsl_chan_set_src_loop_size - Set source address hold transfer size 198 * fsl_chan_set_src_loop_size - Set source address hold transfer size
225 * @chan : Freescale DMA channel 199 * @chan : Freescale DMA channel
@@ -343,6 +317,31 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
343 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 317 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
344} 318}
345 319
320static void append_ld_queue(struct fsldma_chan *chan,
321 struct fsl_desc_sw *desc)
322{
323 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
324
325 if (list_empty(&chan->ld_pending))
326 goto out_splice;
327
328 /*
329 * Add the hardware descriptor to the chain of hardware descriptors
330 * that already exists in memory.
331 *
332 * This will un-set the EOL bit of the existing transaction, and the
333 * last link in this transaction will become the EOL descriptor.
334 */
335 set_desc_next(chan, &tail->hw, desc->async_tx.phys);
336
337 /*
338 * Add the software descriptor and all children to the list
339 * of pending transactions
340 */
341out_splice:
342 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
343}
344
346static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 345static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
347{ 346{
348 struct fsldma_chan *chan = to_fsl_chan(tx->chan); 347 struct fsldma_chan *chan = to_fsl_chan(tx->chan);
@@ -351,9 +350,12 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
351 unsigned long flags; 350 unsigned long flags;
352 dma_cookie_t cookie; 351 dma_cookie_t cookie;
353 352
354 /* cookie increment and adding to ld_queue must be atomic */
355 spin_lock_irqsave(&chan->desc_lock, flags); 353 spin_lock_irqsave(&chan->desc_lock, flags);
356 354
355 /*
356 * assign cookies to all of the software descriptors
357 * that make up this transaction
358 */
357 cookie = chan->common.cookie; 359 cookie = chan->common.cookie;
358 list_for_each_entry(child, &desc->tx_list, node) { 360 list_for_each_entry(child, &desc->tx_list, node) {
359 cookie++; 361 cookie++;
@@ -364,8 +366,9 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
364 } 366 }
365 367
366 chan->common.cookie = cookie; 368 chan->common.cookie = cookie;
369
370 /* put this transaction onto the tail of the pending queue */
367 append_ld_queue(chan, desc); 371 append_ld_queue(chan, desc);
368 list_splice_init(&desc->tx_list, chan->ld_queue.prev);
369 372
370 spin_unlock_irqrestore(&chan->desc_lock, flags); 373 spin_unlock_irqrestore(&chan->desc_lock, flags);
371 374
@@ -381,20 +384,22 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
381static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 384static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
382 struct fsldma_chan *chan) 385 struct fsldma_chan *chan)
383{ 386{
387 struct fsl_desc_sw *desc;
384 dma_addr_t pdesc; 388 dma_addr_t pdesc;
385 struct fsl_desc_sw *desc_sw; 389
386 390 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
387 desc_sw = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 391 if (!desc) {
388 if (desc_sw) { 392 dev_dbg(chan->dev, "out of memory for link desc\n");
389 memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); 393 return NULL;
390 INIT_LIST_HEAD(&desc_sw->tx_list);
391 dma_async_tx_descriptor_init(&desc_sw->async_tx,
392 &chan->common);
393 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
394 desc_sw->async_tx.phys = pdesc;
395 } 394 }
396 395
397 return desc_sw; 396 memset(desc, 0, sizeof(*desc));
397 INIT_LIST_HEAD(&desc->tx_list);
398 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
399 desc->async_tx.tx_submit = fsl_dma_tx_submit;
400 desc->async_tx.phys = pdesc;
401
402 return desc;
398} 403}
399 404
400 405
@@ -414,45 +419,69 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
414 if (chan->desc_pool) 419 if (chan->desc_pool)
415 return 1; 420 return 1;
416 421
417 /* We need the descriptor to be aligned to 32bytes 422 /*
423 * We need the descriptor to be aligned to 32bytes
418 * for meeting FSL DMA specification requirement. 424 * for meeting FSL DMA specification requirement.
419 */ 425 */
420 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 426 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
421 chan->dev, sizeof(struct fsl_desc_sw), 427 chan->dev,
422 32, 0); 428 sizeof(struct fsl_desc_sw),
429 __alignof__(struct fsl_desc_sw), 0);
423 if (!chan->desc_pool) { 430 if (!chan->desc_pool) {
424 dev_err(chan->dev, "No memory for channel %d " 431 dev_err(chan->dev, "unable to allocate channel %d "
425 "descriptor dma pool.\n", chan->id); 432 "descriptor pool\n", chan->id);
426 return 0; 433 return -ENOMEM;
427 } 434 }
428 435
436 /* there is at least one descriptor free to be allocated */
429 return 1; 437 return 1;
430} 438}
431 439
432/** 440/**
441 * fsldma_free_desc_list - Free all descriptors in a queue
442 * @chan: Freescae DMA channel
443 * @list: the list to free
444 *
445 * LOCKING: must hold chan->desc_lock
446 */
447static void fsldma_free_desc_list(struct fsldma_chan *chan,
448 struct list_head *list)
449{
450 struct fsl_desc_sw *desc, *_desc;
451
452 list_for_each_entry_safe(desc, _desc, list, node) {
453 list_del(&desc->node);
454 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
455 }
456}
457
458static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
459 struct list_head *list)
460{
461 struct fsl_desc_sw *desc, *_desc;
462
463 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
464 list_del(&desc->node);
465 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
466 }
467}
468
469/**
433 * fsl_dma_free_chan_resources - Free all resources of the channel. 470 * fsl_dma_free_chan_resources - Free all resources of the channel.
434 * @chan : Freescale DMA channel 471 * @chan : Freescale DMA channel
435 */ 472 */
436static void fsl_dma_free_chan_resources(struct dma_chan *dchan) 473static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
437{ 474{
438 struct fsldma_chan *chan = to_fsl_chan(dchan); 475 struct fsldma_chan *chan = to_fsl_chan(dchan);
439 struct fsl_desc_sw *desc, *_desc;
440 unsigned long flags; 476 unsigned long flags;
441 477
442 dev_dbg(chan->dev, "Free all channel resources.\n"); 478 dev_dbg(chan->dev, "Free all channel resources.\n");
443 spin_lock_irqsave(&chan->desc_lock, flags); 479 spin_lock_irqsave(&chan->desc_lock, flags);
444 list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { 480 fsldma_free_desc_list(chan, &chan->ld_pending);
445#ifdef FSL_DMA_LD_DEBUG 481 fsldma_free_desc_list(chan, &chan->ld_running);
446 dev_dbg(chan->dev,
447 "LD %p will be released.\n", desc);
448#endif
449 list_del(&desc->node);
450 /* free link descriptor */
451 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
452 }
453 spin_unlock_irqrestore(&chan->desc_lock, flags); 482 spin_unlock_irqrestore(&chan->desc_lock, flags);
454 dma_pool_destroy(chan->desc_pool);
455 483
484 dma_pool_destroy(chan->desc_pool);
456 chan->desc_pool = NULL; 485 chan->desc_pool = NULL;
457} 486}
458 487
@@ -491,7 +520,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
491{ 520{
492 struct fsldma_chan *chan; 521 struct fsldma_chan *chan;
493 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 522 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
494 struct list_head *list;
495 size_t copy; 523 size_t copy;
496 524
497 if (!dchan) 525 if (!dchan)
@@ -550,12 +578,7 @@ fail:
550 if (!first) 578 if (!first)
551 return NULL; 579 return NULL;
552 580
553 list = &first->tx_list; 581 fsldma_free_desc_list_reverse(chan, &first->tx_list);
554 list_for_each_entry_safe_reverse(new, prev, list, node) {
555 list_del(&new->node);
556 dma_pool_free(chan->desc_pool, new, new->async_tx.phys);
557 }
558
559 return NULL; 582 return NULL;
560} 583}
561 584
@@ -578,7 +601,6 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
578 struct fsldma_chan *chan; 601 struct fsldma_chan *chan;
579 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 602 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
580 struct fsl_dma_slave *slave; 603 struct fsl_dma_slave *slave;
581 struct list_head *tx_list;
582 size_t copy; 604 size_t copy;
583 605
584 int i; 606 int i;
@@ -748,19 +770,13 @@ fail:
748 * 770 *
749 * We're re-using variables for the loop, oh well 771 * We're re-using variables for the loop, oh well
750 */ 772 */
751 tx_list = &first->tx_list; 773 fsldma_free_desc_list_reverse(chan, &first->tx_list);
752 list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
753 list_del_init(&new->node);
754 dma_pool_free(chan->desc_pool, new, new->async_tx.phys);
755 }
756
757 return NULL; 774 return NULL;
758} 775}
759 776
760static void fsl_dma_device_terminate_all(struct dma_chan *dchan) 777static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
761{ 778{
762 struct fsldma_chan *chan; 779 struct fsldma_chan *chan;
763 struct fsl_desc_sw *desc, *tmp;
764 unsigned long flags; 780 unsigned long flags;
765 781
766 if (!dchan) 782 if (!dchan)
@@ -774,10 +790,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
774 spin_lock_irqsave(&chan->desc_lock, flags); 790 spin_lock_irqsave(&chan->desc_lock, flags);
775 791
776 /* Remove and free all of the descriptors in the LD queue */ 792 /* Remove and free all of the descriptors in the LD queue */
777 list_for_each_entry_safe(desc, tmp, &chan->ld_queue, node) { 793 fsldma_free_desc_list(chan, &chan->ld_pending);
778 list_del(&desc->node); 794 fsldma_free_desc_list(chan, &chan->ld_running);
779 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
780 }
781 795
782 spin_unlock_irqrestore(&chan->desc_lock, flags); 796 spin_unlock_irqrestore(&chan->desc_lock, flags);
783} 797}
@@ -785,31 +799,48 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
785/** 799/**
786 * fsl_dma_update_completed_cookie - Update the completed cookie. 800 * fsl_dma_update_completed_cookie - Update the completed cookie.
787 * @chan : Freescale DMA channel 801 * @chan : Freescale DMA channel
802 *
803 * CONTEXT: hardirq
788 */ 804 */
789static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) 805static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
790{ 806{
791 struct fsl_desc_sw *cur_desc, *desc; 807 struct fsl_desc_sw *desc;
792 dma_addr_t ld_phy; 808 unsigned long flags;
793 809 dma_cookie_t cookie;
794 ld_phy = get_cdar(chan) & FSL_DMA_NLDA_MASK;
795 810
796 if (ld_phy) { 811 spin_lock_irqsave(&chan->desc_lock, flags);
797 cur_desc = NULL;
798 list_for_each_entry(desc, &chan->ld_queue, node)
799 if (desc->async_tx.phys == ld_phy) {
800 cur_desc = desc;
801 break;
802 }
803 812
804 if (cur_desc && cur_desc->async_tx.cookie) { 813 if (list_empty(&chan->ld_running)) {
805 if (dma_is_idle(chan)) 814 dev_dbg(chan->dev, "no running descriptors\n");
806 chan->completed_cookie = 815 goto out_unlock;
807 cur_desc->async_tx.cookie;
808 else
809 chan->completed_cookie =
810 cur_desc->async_tx.cookie - 1;
811 }
812 } 816 }
817
818 /* Get the last descriptor, update the cookie to that */
819 desc = to_fsl_desc(chan->ld_running.prev);
820 if (dma_is_idle(chan))
821 cookie = desc->async_tx.cookie;
822 else
823 cookie = desc->async_tx.cookie - 1;
824
825 chan->completed_cookie = cookie;
826
827out_unlock:
828 spin_unlock_irqrestore(&chan->desc_lock, flags);
829}
830
831/**
832 * fsldma_desc_status - Check the status of a descriptor
833 * @chan: Freescale DMA channel
834 * @desc: DMA SW descriptor
835 *
836 * This function will return the status of the given descriptor
837 */
838static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
839 struct fsl_desc_sw *desc)
840{
841 return dma_async_is_complete(desc->async_tx.cookie,
842 chan->completed_cookie,
843 chan->common.cookie);
813} 844}
814 845
815/** 846/**
@@ -817,8 +848,6 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
817 * @chan : Freescale DMA channel 848 * @chan : Freescale DMA channel
818 * 849 *
819 * This function clean up the ld_queue of DMA channel. 850 * This function clean up the ld_queue of DMA channel.
820 * If 'in_intr' is set, the function will move the link descriptor to
821 * the recycle list. Otherwise, free it directly.
822 */ 851 */
823static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) 852static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
824{ 853{
@@ -827,80 +856,95 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
827 856
828 spin_lock_irqsave(&chan->desc_lock, flags); 857 spin_lock_irqsave(&chan->desc_lock, flags);
829 858
830 dev_dbg(chan->dev, "chan completed_cookie = %d\n", 859 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
831 chan->completed_cookie); 860 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
832 list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) {
833 dma_async_tx_callback callback; 861 dma_async_tx_callback callback;
834 void *callback_param; 862 void *callback_param;
835 863
836 if (dma_async_is_complete(desc->async_tx.cookie, 864 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
837 chan->completed_cookie, chan->common.cookie)
838 == DMA_IN_PROGRESS)
839 break; 865 break;
840 866
841 callback = desc->async_tx.callback; 867 /* Remove from the list of running transactions */
842 callback_param = desc->async_tx.callback_param;
843
844 /* Remove from ld_queue list */
845 list_del(&desc->node); 868 list_del(&desc->node);
846 869
847 dev_dbg(chan->dev, "link descriptor %p will be recycle.\n",
848 desc);
849 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
850
851 /* Run the link descriptor callback function */ 870 /* Run the link descriptor callback function */
871 callback = desc->async_tx.callback;
872 callback_param = desc->async_tx.callback_param;
852 if (callback) { 873 if (callback) {
853 spin_unlock_irqrestore(&chan->desc_lock, flags); 874 spin_unlock_irqrestore(&chan->desc_lock, flags);
854 dev_dbg(chan->dev, "link descriptor %p callback\n", 875 dev_dbg(chan->dev, "LD %p callback\n", desc);
855 desc);
856 callback(callback_param); 876 callback(callback_param);
857 spin_lock_irqsave(&chan->desc_lock, flags); 877 spin_lock_irqsave(&chan->desc_lock, flags);
858 } 878 }
879
880 /* Run any dependencies, then free the descriptor */
881 dma_run_dependencies(&desc->async_tx);
882 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
859 } 883 }
884
860 spin_unlock_irqrestore(&chan->desc_lock, flags); 885 spin_unlock_irqrestore(&chan->desc_lock, flags);
861} 886}
862 887
863/** 888/**
864 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. 889 * fsl_chan_xfer_ld_queue - transfer any pending transactions
865 * @chan : Freescale DMA channel 890 * @chan : Freescale DMA channel
891 *
892 * This will make sure that any pending transactions will be run.
893 * If the DMA controller is idle, it will be started. Otherwise,
894 * the DMA controller's interrupt handler will start any pending
895 * transactions when it becomes idle.
866 */ 896 */
867static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 897static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
868{ 898{
869 struct list_head *ld_node; 899 struct fsl_desc_sw *desc;
870 dma_addr_t next_dst_addr;
871 unsigned long flags; 900 unsigned long flags;
872 901
873 spin_lock_irqsave(&chan->desc_lock, flags); 902 spin_lock_irqsave(&chan->desc_lock, flags);
874 903
875 if (!dma_is_idle(chan)) 904 /*
905 * If the list of pending descriptors is empty, then we
906 * don't need to do any work at all
907 */
908 if (list_empty(&chan->ld_pending)) {
909 dev_dbg(chan->dev, "no pending LDs\n");
876 goto out_unlock; 910 goto out_unlock;
911 }
877 912
913 /*
914 * The DMA controller is not idle, which means the interrupt
915 * handler will start any queued transactions when it runs
916 * at the end of the current transaction
917 */
918 if (!dma_is_idle(chan)) {
919 dev_dbg(chan->dev, "DMA controller still busy\n");
920 goto out_unlock;
921 }
922
923 /*
924 * TODO:
925 * make sure the dma_halt() function really un-wedges the
926 * controller as much as possible
927 */
878 dma_halt(chan); 928 dma_halt(chan);
879 929
880 /* If there are some link descriptors 930 /*
881 * not transfered in queue. We need to start it. 931 * If there are some link descriptors which have not been
932 * transferred, we need to start the controller
882 */ 933 */
883 934
884 /* Find the first un-transfer desciptor */ 935 /*
885 for (ld_node = chan->ld_queue.next; 936 * Move all elements from the queue of pending transactions
886 (ld_node != &chan->ld_queue) 937 * onto the list of running transactions
887 && (dma_async_is_complete( 938 */
888 to_fsl_desc(ld_node)->async_tx.cookie, 939 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
889 chan->completed_cookie, 940 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
890 chan->common.cookie) == DMA_SUCCESS); 941
891 ld_node = ld_node->next); 942 /*
892 943 * Program the descriptor's address into the DMA controller,
893 if (ld_node != &chan->ld_queue) { 944 * then start the DMA transaction
894 /* Get the ld start address from ld_queue */ 945 */
895 next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys; 946 set_cdar(chan, desc->async_tx.phys);
896 dev_dbg(chan->dev, "xfer LDs staring from 0x%llx\n", 947 dma_start(chan);
897 (unsigned long long)next_dst_addr);
898 set_cdar(chan, next_dst_addr);
899 dma_start(chan);
900 } else {
901 set_cdar(chan, 0);
902 set_ndar(chan, 0);
903 }
904 948
905out_unlock: 949out_unlock:
906 spin_unlock_irqrestore(&chan->desc_lock, flags); 950 spin_unlock_irqrestore(&chan->desc_lock, flags);
@@ -913,30 +957,6 @@ out_unlock:
913static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 957static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
914{ 958{
915 struct fsldma_chan *chan = to_fsl_chan(dchan); 959 struct fsldma_chan *chan = to_fsl_chan(dchan);
916
917#ifdef FSL_DMA_LD_DEBUG
918 struct fsl_desc_sw *ld;
919 unsigned long flags;
920
921 spin_lock_irqsave(&chan->desc_lock, flags);
922 if (list_empty(&chan->ld_queue)) {
923 spin_unlock_irqrestore(&chan->desc_lock, flags);
924 return;
925 }
926
927 dev_dbg(chan->dev, "--memcpy issue--\n");
928 list_for_each_entry(ld, &chan->ld_queue, node) {
929 int i;
930 dev_dbg(chan->dev, "Ch %d, LD %08x\n",
931 chan->id, ld->async_tx.phys);
932 for (i = 0; i < 8; i++)
933 dev_dbg(chan->dev, "LD offset %d: %08x\n",
934 i, *(((u32 *)&ld->hw) + i));
935 }
936 dev_dbg(chan->dev, "----------------\n");
937 spin_unlock_irqrestore(&chan->desc_lock, flags);
938#endif
939
940 fsl_chan_xfer_ld_queue(chan); 960 fsl_chan_xfer_ld_queue(chan);
941} 961}
942 962
@@ -978,10 +998,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
978 int xfer_ld_q = 0; 998 int xfer_ld_q = 0;
979 u32 stat; 999 u32 stat;
980 1000
1001 /* save and clear the status register */
981 stat = get_sr(chan); 1002 stat = get_sr(chan);
982 dev_dbg(chan->dev, "event: channel %d, stat = 0x%x\n", 1003 set_sr(chan, stat);
983 chan->id, stat); 1004 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
984 set_sr(chan, stat); /* Clear the event register */
985 1005
986 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1006 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
987 if (!stat) 1007 if (!stat)
@@ -990,12 +1010,13 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
990 if (stat & FSL_DMA_SR_TE) 1010 if (stat & FSL_DMA_SR_TE)
991 dev_err(chan->dev, "Transfer Error!\n"); 1011 dev_err(chan->dev, "Transfer Error!\n");
992 1012
993 /* Programming Error 1013 /*
1014 * Programming Error
994 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 1015 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
995 * triger a PE interrupt. 1016 * triger a PE interrupt.
996 */ 1017 */
997 if (stat & FSL_DMA_SR_PE) { 1018 if (stat & FSL_DMA_SR_PE) {
998 dev_dbg(chan->dev, "event: Programming Error INT\n"); 1019 dev_dbg(chan->dev, "irq: Programming Error INT\n");
999 if (get_bcr(chan) == 0) { 1020 if (get_bcr(chan) == 0) {
1000 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 1021 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1001 * Now, update the completed cookie, and continue the 1022 * Now, update the completed cookie, and continue the
@@ -1007,34 +1028,37 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1007 stat &= ~FSL_DMA_SR_PE; 1028 stat &= ~FSL_DMA_SR_PE;
1008 } 1029 }
1009 1030
1010 /* If the link descriptor segment transfer finishes, 1031 /*
1032 * If the link descriptor segment transfer finishes,
1011 * we will recycle the used descriptor. 1033 * we will recycle the used descriptor.
1012 */ 1034 */
1013 if (stat & FSL_DMA_SR_EOSI) { 1035 if (stat & FSL_DMA_SR_EOSI) {
1014 dev_dbg(chan->dev, "event: End-of-segments INT\n"); 1036 dev_dbg(chan->dev, "irq: End-of-segments INT\n");
1015 dev_dbg(chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", 1037 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
1016 (unsigned long long)get_cdar(chan), 1038 (unsigned long long)get_cdar(chan),
1017 (unsigned long long)get_ndar(chan)); 1039 (unsigned long long)get_ndar(chan));
1018 stat &= ~FSL_DMA_SR_EOSI; 1040 stat &= ~FSL_DMA_SR_EOSI;
1019 update_cookie = 1; 1041 update_cookie = 1;
1020 } 1042 }
1021 1043
1022 /* For MPC8349, EOCDI event need to update cookie 1044 /*
1045 * For MPC8349, EOCDI event need to update cookie
1023 * and start the next transfer if it exist. 1046 * and start the next transfer if it exist.
1024 */ 1047 */
1025 if (stat & FSL_DMA_SR_EOCDI) { 1048 if (stat & FSL_DMA_SR_EOCDI) {
1026 dev_dbg(chan->dev, "event: End-of-Chain link INT\n"); 1049 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
1027 stat &= ~FSL_DMA_SR_EOCDI; 1050 stat &= ~FSL_DMA_SR_EOCDI;
1028 update_cookie = 1; 1051 update_cookie = 1;
1029 xfer_ld_q = 1; 1052 xfer_ld_q = 1;
1030 } 1053 }
1031 1054
1032 /* If it current transfer is the end-of-transfer, 1055 /*
1056 * If it current transfer is the end-of-transfer,
1033 * we should clear the Channel Start bit for 1057 * we should clear the Channel Start bit for
1034 * prepare next transfer. 1058 * prepare next transfer.
1035 */ 1059 */
1036 if (stat & FSL_DMA_SR_EOLNI) { 1060 if (stat & FSL_DMA_SR_EOLNI) {
1037 dev_dbg(chan->dev, "event: End-of-link INT\n"); 1061 dev_dbg(chan->dev, "irq: End-of-link INT\n");
1038 stat &= ~FSL_DMA_SR_EOLNI; 1062 stat &= ~FSL_DMA_SR_EOLNI;
1039 xfer_ld_q = 1; 1063 xfer_ld_q = 1;
1040 } 1064 }
@@ -1044,10 +1068,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1044 if (xfer_ld_q) 1068 if (xfer_ld_q)
1045 fsl_chan_xfer_ld_queue(chan); 1069 fsl_chan_xfer_ld_queue(chan);
1046 if (stat) 1070 if (stat)
1047 dev_dbg(chan->dev, "event: unhandled sr 0x%02x\n", 1071 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
1048 stat);
1049 1072
1050 dev_dbg(chan->dev, "event: Exit\n"); 1073 dev_dbg(chan->dev, "irq: Exit\n");
1051 tasklet_schedule(&chan->tasklet); 1074 tasklet_schedule(&chan->tasklet);
1052 return IRQ_HANDLED; 1075 return IRQ_HANDLED;
1053} 1076}
@@ -1235,7 +1258,8 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1235 } 1258 }
1236 1259
1237 spin_lock_init(&chan->desc_lock); 1260 spin_lock_init(&chan->desc_lock);
1238 INIT_LIST_HEAD(&chan->ld_queue); 1261 INIT_LIST_HEAD(&chan->ld_pending);
1262 INIT_LIST_HEAD(&chan->ld_running);
1239 1263
1240 chan->common.device = &fdev->common; 1264 chan->common.device = &fdev->common;
1241 1265
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ea3b19c8708c..cb4d6ff51597 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -131,7 +131,8 @@ struct fsldma_chan {
131 struct fsldma_chan_regs __iomem *regs; 131 struct fsldma_chan_regs __iomem *regs;
132 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 132 dma_cookie_t completed_cookie; /* The maximum cookie completed */
133 spinlock_t desc_lock; /* Descriptor operation lock */ 133 spinlock_t desc_lock; /* Descriptor operation lock */
134 struct list_head ld_queue; /* Link descriptors queue */ 134 struct list_head ld_pending; /* Link descriptors queue */
135 struct list_head ld_running; /* Link descriptors queue */
135 struct dma_chan common; /* DMA common channel */ 136 struct dma_chan common; /* DMA common channel */
136 struct dma_pool *desc_pool; /* Descriptors pool */ 137 struct dma_pool *desc_pool; /* Descriptors pool */
137 struct device *dev; /* Channel device */ 138 struct device *dev; /* Channel device */