aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/fsldma.c69
-rw-r--r--drivers/dma/fsldma.h1
2 files changed, 36 insertions, 34 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 2e1af4555b0f..e535cd13f7cc 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,7 +37,12 @@
37 37
38#include "fsldma.h" 38#include "fsldma.h"
39 39
40static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 40#define chan_dbg(chan, fmt, arg...) \
41 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42#define chan_err(chan, fmt, arg...) \
43 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
44
45static const char msg_ld_oom[] = "No free memory for link descriptor";
41 46
42/* 47/*
43 * Register Helpers 48 * Register Helpers
@@ -207,7 +212,7 @@ static void dma_halt(struct fsldma_chan *chan)
207 } 212 }
208 213
209 if (!dma_is_idle(chan)) 214 if (!dma_is_idle(chan))
210 dev_err(chan->dev, "DMA halt timeout!\n"); 215 chan_err(chan, "DMA halt timeout!\n");
211} 216}
212 217
213/** 218/**
@@ -405,7 +410,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
405 410
406 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 411 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
407 if (!desc) { 412 if (!desc) {
408 dev_dbg(chan->dev, "out of memory for link desc\n"); 413 chan_dbg(chan, "out of memory for link descriptor\n");
409 return NULL; 414 return NULL;
410 } 415 }
411 416
@@ -439,13 +444,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
439 * We need the descriptor to be aligned to 32bytes 444 * We need the descriptor to be aligned to 32bytes
440 * for meeting FSL DMA specification requirement. 445 * for meeting FSL DMA specification requirement.
441 */ 446 */
442 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 447 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
443 chan->dev,
444 sizeof(struct fsl_desc_sw), 448 sizeof(struct fsl_desc_sw),
445 __alignof__(struct fsl_desc_sw), 0); 449 __alignof__(struct fsl_desc_sw), 0);
446 if (!chan->desc_pool) { 450 if (!chan->desc_pool) {
447 dev_err(chan->dev, "unable to allocate channel %d " 451 chan_err(chan, "unable to allocate descriptor pool\n");
448 "descriptor pool\n", chan->id);
449 return -ENOMEM; 452 return -ENOMEM;
450 } 453 }
451 454
@@ -491,7 +494,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
491 struct fsldma_chan *chan = to_fsl_chan(dchan); 494 struct fsldma_chan *chan = to_fsl_chan(dchan);
492 unsigned long flags; 495 unsigned long flags;
493 496
494 dev_dbg(chan->dev, "Free all channel resources.\n"); 497 chan_dbg(chan, "free all channel resources\n");
495 spin_lock_irqsave(&chan->desc_lock, flags); 498 spin_lock_irqsave(&chan->desc_lock, flags);
496 fsldma_free_desc_list(chan, &chan->ld_pending); 499 fsldma_free_desc_list(chan, &chan->ld_pending);
497 fsldma_free_desc_list(chan, &chan->ld_running); 500 fsldma_free_desc_list(chan, &chan->ld_running);
@@ -514,7 +517,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
514 517
515 new = fsl_dma_alloc_descriptor(chan); 518 new = fsl_dma_alloc_descriptor(chan);
516 if (!new) { 519 if (!new) {
517 dev_err(chan->dev, msg_ld_oom); 520 chan_err(chan, "%s\n", msg_ld_oom);
518 return NULL; 521 return NULL;
519 } 522 }
520 523
@@ -551,11 +554,11 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
551 /* Allocate the link descriptor from DMA pool */ 554 /* Allocate the link descriptor from DMA pool */
552 new = fsl_dma_alloc_descriptor(chan); 555 new = fsl_dma_alloc_descriptor(chan);
553 if (!new) { 556 if (!new) {
554 dev_err(chan->dev, msg_ld_oom); 557 chan_err(chan, "%s\n", msg_ld_oom);
555 goto fail; 558 goto fail;
556 } 559 }
557#ifdef FSL_DMA_LD_DEBUG 560#ifdef FSL_DMA_LD_DEBUG
558 dev_dbg(chan->dev, "new link desc alloc %p\n", new); 561 chan_dbg(chan, "new link desc alloc %p\n", new);
559#endif 562#endif
560 563
561 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 564 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
@@ -639,11 +642,11 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
639 /* allocate and populate the descriptor */ 642 /* allocate and populate the descriptor */
640 new = fsl_dma_alloc_descriptor(chan); 643 new = fsl_dma_alloc_descriptor(chan);
641 if (!new) { 644 if (!new) {
642 dev_err(chan->dev, msg_ld_oom); 645 chan_err(chan, "%s\n", msg_ld_oom);
643 goto fail; 646 goto fail;
644 } 647 }
645#ifdef FSL_DMA_LD_DEBUG 648#ifdef FSL_DMA_LD_DEBUG
646 dev_dbg(chan->dev, "new link desc alloc %p\n", new); 649 chan_dbg(chan, "new link desc alloc %p\n", new);
647#endif 650#endif
648 651
649 set_desc_cnt(chan, &new->hw, len); 652 set_desc_cnt(chan, &new->hw, len);
@@ -815,7 +818,7 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
815 spin_lock_irqsave(&chan->desc_lock, flags); 818 spin_lock_irqsave(&chan->desc_lock, flags);
816 819
817 if (list_empty(&chan->ld_running)) { 820 if (list_empty(&chan->ld_running)) {
818 dev_dbg(chan->dev, "no running descriptors\n"); 821 chan_dbg(chan, "no running descriptors\n");
819 goto out_unlock; 822 goto out_unlock;
820 } 823 }
821 824
@@ -863,7 +866,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
863 866
864 spin_lock_irqsave(&chan->desc_lock, flags); 867 spin_lock_irqsave(&chan->desc_lock, flags);
865 868
866 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); 869 chan_dbg(chan, "chan completed_cookie = %d\n", chan->completed_cookie);
867 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 870 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
868 dma_async_tx_callback callback; 871 dma_async_tx_callback callback;
869 void *callback_param; 872 void *callback_param;
@@ -879,7 +882,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
879 callback_param = desc->async_tx.callback_param; 882 callback_param = desc->async_tx.callback_param;
880 if (callback) { 883 if (callback) {
881 spin_unlock_irqrestore(&chan->desc_lock, flags); 884 spin_unlock_irqrestore(&chan->desc_lock, flags);
882 dev_dbg(chan->dev, "LD %p callback\n", desc); 885 chan_dbg(chan, "LD %p callback\n", desc);
883 callback(callback_param); 886 callback(callback_param);
884 spin_lock_irqsave(&chan->desc_lock, flags); 887 spin_lock_irqsave(&chan->desc_lock, flags);
885 } 888 }
@@ -913,7 +916,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
913 * don't need to do any work at all 916 * don't need to do any work at all
914 */ 917 */
915 if (list_empty(&chan->ld_pending)) { 918 if (list_empty(&chan->ld_pending)) {
916 dev_dbg(chan->dev, "no pending LDs\n"); 919 chan_dbg(chan, "no pending LDs\n");
917 goto out_unlock; 920 goto out_unlock;
918 } 921 }
919 922
@@ -923,7 +926,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
923 * at the end of the current transaction 926 * at the end of the current transaction
924 */ 927 */
925 if (!dma_is_idle(chan)) { 928 if (!dma_is_idle(chan)) {
926 dev_dbg(chan->dev, "DMA controller still busy\n"); 929 chan_dbg(chan, "DMA controller still busy\n");
927 goto out_unlock; 930 goto out_unlock;
928 } 931 }
929 932
@@ -1003,14 +1006,14 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1003 /* save and clear the status register */ 1006 /* save and clear the status register */
1004 stat = get_sr(chan); 1007 stat = get_sr(chan);
1005 set_sr(chan, stat); 1008 set_sr(chan, stat);
1006 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); 1009 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1007 1010
1008 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1011 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1009 if (!stat) 1012 if (!stat)
1010 return IRQ_NONE; 1013 return IRQ_NONE;
1011 1014
1012 if (stat & FSL_DMA_SR_TE) 1015 if (stat & FSL_DMA_SR_TE)
1013 dev_err(chan->dev, "Transfer Error!\n"); 1016 chan_err(chan, "Transfer Error!\n");
1014 1017
1015 /* 1018 /*
1016 * Programming Error 1019 * Programming Error
@@ -1018,7 +1021,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1018 * triger a PE interrupt. 1021 * triger a PE interrupt.
1019 */ 1022 */
1020 if (stat & FSL_DMA_SR_PE) { 1023 if (stat & FSL_DMA_SR_PE) {
1021 dev_dbg(chan->dev, "irq: Programming Error INT\n"); 1024 chan_dbg(chan, "irq: Programming Error INT\n");
1022 if (get_bcr(chan) == 0) { 1025 if (get_bcr(chan) == 0) {
1023 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 1026 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1024 * Now, update the completed cookie, and continue the 1027 * Now, update the completed cookie, and continue the
@@ -1035,8 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1035 * we will recycle the used descriptor. 1038 * we will recycle the used descriptor.
1036 */ 1039 */
1037 if (stat & FSL_DMA_SR_EOSI) { 1040 if (stat & FSL_DMA_SR_EOSI) {
1038 dev_dbg(chan->dev, "irq: End-of-segments INT\n"); 1041 chan_dbg(chan, "irq: End-of-segments INT\n");
1039 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", 1042 chan_dbg(chan, "irq: clndar 0x%llx, nlndar 0x%llx\n",
1040 (unsigned long long)get_cdar(chan), 1043 (unsigned long long)get_cdar(chan),
1041 (unsigned long long)get_ndar(chan)); 1044 (unsigned long long)get_ndar(chan));
1042 stat &= ~FSL_DMA_SR_EOSI; 1045 stat &= ~FSL_DMA_SR_EOSI;
@@ -1048,7 +1051,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1048 * and start the next transfer if it exist. 1051 * and start the next transfer if it exist.
1049 */ 1052 */
1050 if (stat & FSL_DMA_SR_EOCDI) { 1053 if (stat & FSL_DMA_SR_EOCDI) {
1051 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); 1054 chan_dbg(chan, "irq: End-of-Chain link INT\n");
1052 stat &= ~FSL_DMA_SR_EOCDI; 1055 stat &= ~FSL_DMA_SR_EOCDI;
1053 update_cookie = 1; 1056 update_cookie = 1;
1054 xfer_ld_q = 1; 1057 xfer_ld_q = 1;
@@ -1060,7 +1063,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1060 * prepare next transfer. 1063 * prepare next transfer.
1061 */ 1064 */
1062 if (stat & FSL_DMA_SR_EOLNI) { 1065 if (stat & FSL_DMA_SR_EOLNI) {
1063 dev_dbg(chan->dev, "irq: End-of-link INT\n"); 1066 chan_dbg(chan, "irq: End-of-link INT\n");
1064 stat &= ~FSL_DMA_SR_EOLNI; 1067 stat &= ~FSL_DMA_SR_EOLNI;
1065 xfer_ld_q = 1; 1068 xfer_ld_q = 1;
1066 } 1069 }
@@ -1070,9 +1073,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1070 if (xfer_ld_q) 1073 if (xfer_ld_q)
1071 fsl_chan_xfer_ld_queue(chan); 1074 fsl_chan_xfer_ld_queue(chan);
1072 if (stat) 1075 if (stat)
1073 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); 1076 chan_dbg(chan, "irq: unhandled sr 0x%08x\n", stat);
1074 1077
1075 dev_dbg(chan->dev, "irq: Exit\n"); 1078 chan_dbg(chan, "irq: Exit\n");
1076 tasklet_schedule(&chan->tasklet); 1079 tasklet_schedule(&chan->tasklet);
1077 return IRQ_HANDLED; 1080 return IRQ_HANDLED;
1078} 1081}
@@ -1128,7 +1131,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
1128 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1131 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1129 chan = fdev->chan[i]; 1132 chan = fdev->chan[i];
1130 if (chan && chan->irq != NO_IRQ) { 1133 if (chan && chan->irq != NO_IRQ) {
1131 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); 1134 chan_dbg(chan, "free per-channel IRQ\n");
1132 free_irq(chan->irq, chan); 1135 free_irq(chan->irq, chan);
1133 } 1136 }
1134 } 1137 }
@@ -1155,19 +1158,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
1155 continue; 1158 continue;
1156 1159
1157 if (chan->irq == NO_IRQ) { 1160 if (chan->irq == NO_IRQ) {
1158 dev_err(fdev->dev, "no interrupts property defined for " 1161 chan_err(chan, "interrupts property missing in device tree\n");
1159 "DMA channel %d. Please fix your "
1160 "device tree\n", chan->id);
1161 ret = -ENODEV; 1162 ret = -ENODEV;
1162 goto out_unwind; 1163 goto out_unwind;
1163 } 1164 }
1164 1165
1165 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); 1166 chan_dbg(chan, "request per-channel IRQ\n");
1166 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1167 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1167 "fsldma-chan", chan); 1168 "fsldma-chan", chan);
1168 if (ret) { 1169 if (ret) {
1169 dev_err(fdev->dev, "unable to request IRQ for DMA " 1170 chan_err(chan, "unable to request per-channel IRQ\n");
1170 "channel %d\n", chan->id);
1171 goto out_unwind; 1171 goto out_unwind;
1172 } 1172 }
1173 } 1173 }
@@ -1242,6 +1242,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1242 1242
1243 fdev->chan[chan->id] = chan; 1243 fdev->chan[chan->id] = chan;
1244 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1244 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1245 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1245 1246
1246 /* Initialize the channel */ 1247 /* Initialize the channel */
1247 dma_init(chan); 1248 dma_init(chan);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ba9f403c0fbe..113e7134010b 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -135,6 +135,7 @@ struct fsldma_device {
135#define FSL_DMA_CHAN_START_EXT 0x00002000 135#define FSL_DMA_CHAN_START_EXT 0x00002000
136 136
137struct fsldma_chan { 137struct fsldma_chan {
138 char name[8]; /* Channel name */
138 struct fsldma_chan_regs __iomem *regs; 139 struct fsldma_chan_regs __iomem *regs;
139 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 140 dma_cookie_t completed_cookie; /* The maximum cookie completed */
140 spinlock_t desc_lock; /* Descriptor operation lock */ 141 spinlock_t desc_lock; /* Descriptor operation lock */