aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/fsldma.c
diff options
context:
space:
mode:
authorIra Snyder <iws@ovro.caltech.edu>2011-03-03 02:55:00 -0500
committerDan Williams <dan.j.williams@intel.com>2011-03-11 20:52:37 -0500
commitdc8d4091575ba81e886ebcdfd1e559c981f82f86 (patch)
tree701c55d8e54d8f4c376a0af4ce10356875c86164 /drivers/dma/fsldma.c
parent9c4d1e7bdeb1ed4dc0c3341d40662a6fbc5f2dc2 (diff)
fsldma: reduce locking during descriptor cleanup
This merges the fsl_chan_ld_cleanup() function into the dma_do_tasklet() function to reduce locking overhead. In the best case, we will be able to keep the DMA controller busy while we are freeing used descriptors. In all cases, the spinlock is grabbed two times fewer than before on each transaction. Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r--drivers/dma/fsldma.c108
1 files changed, 46 insertions, 62 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 526579df6033..d300de456c90 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -882,65 +882,15 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
882} 882}
883 883
884/** 884/**
885 * fsl_chan_ld_cleanup - Clean up link descriptors
886 * @chan : Freescale DMA channel
887 *
888 * This function is run after the queue of running descriptors has been
889 * executed by the DMA engine. It will run any callbacks, and then free
890 * the descriptors.
891 *
892 * HARDWARE STATE: idle
893 */
894static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
895{
896 struct fsl_desc_sw *desc, *_desc;
897 LIST_HEAD(ld_cleanup);
898 unsigned long flags;
899
900 spin_lock_irqsave(&chan->desc_lock, flags);
901
902 /* update the cookie if we have some descriptors to cleanup */
903 if (!list_empty(&chan->ld_running)) {
904 dma_cookie_t cookie;
905
906 desc = to_fsl_desc(chan->ld_running.prev);
907 cookie = desc->async_tx.cookie;
908
909 chan->completed_cookie = cookie;
910 chan_dbg(chan, "completed cookie=%d\n", cookie);
911 }
912
913 /*
914 * move the descriptors to a temporary list so we can drop the lock
915 * during the entire cleanup operation
916 */
917 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
918
919 spin_unlock_irqrestore(&chan->desc_lock, flags);
920
921 /* Run the callback for each descriptor, in order */
922 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
923
924 /* Remove from the list of transactions */
925 list_del(&desc->node);
926
927 /* Run all cleanup for this descriptor */
928 fsldma_cleanup_descriptor(chan, desc);
929 }
930}
931
932/**
933 * fsl_chan_xfer_ld_queue - transfer any pending transactions 885 * fsl_chan_xfer_ld_queue - transfer any pending transactions
934 * @chan : Freescale DMA channel 886 * @chan : Freescale DMA channel
935 * 887 *
936 * HARDWARE STATE: idle 888 * HARDWARE STATE: idle
889 * LOCKING: must hold chan->desc_lock
937 */ 890 */
938static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 891static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
939{ 892{
940 struct fsl_desc_sw *desc; 893 struct fsl_desc_sw *desc;
941 unsigned long flags;
942
943 spin_lock_irqsave(&chan->desc_lock, flags);
944 894
945 /* 895 /*
946 * If the list of pending descriptors is empty, then we 896 * If the list of pending descriptors is empty, then we
@@ -948,7 +898,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
948 */ 898 */
949 if (list_empty(&chan->ld_pending)) { 899 if (list_empty(&chan->ld_pending)) {
950 chan_dbg(chan, "no pending LDs\n"); 900 chan_dbg(chan, "no pending LDs\n");
951 goto out_unlock; 901 return;
952 } 902 }
953 903
954 /* 904 /*
@@ -958,7 +908,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
958 */ 908 */
959 if (!chan->idle) { 909 if (!chan->idle) {
960 chan_dbg(chan, "DMA controller still busy\n"); 910 chan_dbg(chan, "DMA controller still busy\n");
961 goto out_unlock; 911 return;
962 } 912 }
963 913
964 /* 914 /*
@@ -996,9 +946,6 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
996 946
997 dma_start(chan); 947 dma_start(chan);
998 chan->idle = false; 948 chan->idle = false;
999
1000out_unlock:
1001 spin_unlock_irqrestore(&chan->desc_lock, flags);
1002} 949}
1003 950
1004/** 951/**
@@ -1008,7 +955,11 @@ out_unlock:
1008static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 955static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
1009{ 956{
1010 struct fsldma_chan *chan = to_fsl_chan(dchan); 957 struct fsldma_chan *chan = to_fsl_chan(dchan);
958 unsigned long flags;
959
960 spin_lock_irqsave(&chan->desc_lock, flags);
1011 fsl_chan_xfer_ld_queue(chan); 961 fsl_chan_xfer_ld_queue(chan);
962 spin_unlock_irqrestore(&chan->desc_lock, flags);
1012} 963}
1013 964
1014/** 965/**
@@ -1109,20 +1060,53 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1109static void dma_do_tasklet(unsigned long data) 1060static void dma_do_tasklet(unsigned long data)
1110{ 1061{
1111 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1062 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1063 struct fsl_desc_sw *desc, *_desc;
1064 LIST_HEAD(ld_cleanup);
1112 unsigned long flags; 1065 unsigned long flags;
1113 1066
1114 chan_dbg(chan, "tasklet entry\n"); 1067 chan_dbg(chan, "tasklet entry\n");
1115 1068
1116 /* run all callbacks, free all used descriptors */
1117 fsl_chan_ld_cleanup(chan);
1118
1119 /* the channel is now idle */
1120 spin_lock_irqsave(&chan->desc_lock, flags); 1069 spin_lock_irqsave(&chan->desc_lock, flags);
1070
1071 /* update the cookie if we have some descriptors to cleanup */
1072 if (!list_empty(&chan->ld_running)) {
1073 dma_cookie_t cookie;
1074
1075 desc = to_fsl_desc(chan->ld_running.prev);
1076 cookie = desc->async_tx.cookie;
1077
1078 chan->completed_cookie = cookie;
1079 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1080 }
1081
1082 /*
1083 * move the descriptors to a temporary list so we can drop the lock
1084 * during the entire cleanup operation
1085 */
1086 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1087
1088 /* the hardware is now idle and ready for more */
1121 chan->idle = true; 1089 chan->idle = true;
1122 spin_unlock_irqrestore(&chan->desc_lock, flags);
1123 1090
1124 /* start any pending transactions automatically */ 1091 /*
1092 * Start any pending transactions automatically
1093 *
1094 * In the ideal case, we keep the DMA controller busy while we go
1095 * ahead and free the descriptors below.
1096 */
1125 fsl_chan_xfer_ld_queue(chan); 1097 fsl_chan_xfer_ld_queue(chan);
1098 spin_unlock_irqrestore(&chan->desc_lock, flags);
1099
1100 /* Run the callback for each descriptor, in order */
1101 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1102
1103 /* Remove from the list of transactions */
1104 list_del(&desc->node);
1105
1106 /* Run all cleanup for this descriptor */
1107 fsldma_cleanup_descriptor(chan, desc);
1108 }
1109
1126 chan_dbg(chan, "tasklet exit\n"); 1110 chan_dbg(chan, "tasklet exit\n");
1127} 1111}
1128 1112