aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorNicolas Ferre <nicolas.ferre@atmel.com>2011-04-30 10:57:46 -0400
committerVinod Koul <vinod.koul@intel.com>2011-05-02 06:12:09 -0400
commit53830cc75974a199b6b654c062ff8c54c58caa0b (patch)
tree2aec11425a76ffe996297c25e71f3dc0eb6ef662 /drivers/dma
parent9b3aa589eaa1366200062ce1f9cc7ddca8d1d578 (diff)
dmaengine: at_hdmac: add cyclic DMA operation support
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/at_hdmac.c231
-rw-r--r--drivers/dma/at_hdmac_regs.h14
2 files changed, 229 insertions, 16 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 13050e646f8e..ed9d92429de8 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
165} 165}
166 166
167/** 167/**
168 * atc_desc_chain - build chain adding a descripor
169 * @first: address of first descripor of the chain
170 * @prev: address of previous descripor of the chain
171 * @desc: descriptor to queue
172 *
173 * Called from prep_* functions
174 */
175static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
176 struct at_desc *desc)
177{
178 if (!(*first)) {
179 *first = desc;
180 } else {
181 /* inform the HW lli about chaining */
182 (*prev)->lli.dscr = desc->txd.phys;
183 /* insert the link descriptor to the LD ring */
184 list_add_tail(&desc->desc_node,
185 &(*first)->tx_list);
186 }
187 *prev = desc;
188}
189
190/**
168 * atc_assign_cookie - compute and assign new cookie 191 * atc_assign_cookie - compute and assign new cookie
169 * @atchan: channel we work on 192 * @atchan: channel we work on
170 * @desc: descriptor to asign cookie for 193 * @desc: descriptor to asign cookie for
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
237static void 260static void
238atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) 261atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
239{ 262{
240 dma_async_tx_callback callback;
241 void *param;
242 struct dma_async_tx_descriptor *txd = &desc->txd; 263 struct dma_async_tx_descriptor *txd = &desc->txd;
243 264
244 dev_vdbg(chan2dev(&atchan->chan_common), 265 dev_vdbg(chan2dev(&atchan->chan_common),
245 "descriptor %u complete\n", txd->cookie); 266 "descriptor %u complete\n", txd->cookie);
246 267
247 atchan->completed_cookie = txd->cookie; 268 atchan->completed_cookie = txd->cookie;
248 callback = txd->callback;
249 param = txd->callback_param;
250 269
251 /* move children to free_list */ 270 /* move children to free_list */
252 list_splice_init(&desc->tx_list, &atchan->free_list); 271 list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
278 } 297 }
279 } 298 }
280 299
281 /* 300 /* for cyclic transfers,
282 * The API requires that no submissions are done from a 301 * no need to replay callback function while stopping */
283 * callback, so we don't need to drop the lock here 302 if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
284 */ 303 dma_async_tx_callback callback = txd->callback;
285 if (callback) 304 void *param = txd->callback_param;
286 callback(param); 305
306 /*
307 * The API requires that no submissions are done from a
308 * callback, so we don't need to drop the lock here
309 */
310 if (callback)
311 callback(param);
312 }
287 313
288 dma_run_dependencies(txd); 314 dma_run_dependencies(txd);
289} 315}
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan)
419 atc_chain_complete(atchan, bad_desc); 445 atc_chain_complete(atchan, bad_desc);
420} 446}
421 447
448/**
449 * atc_handle_cyclic - at the end of a period, run callback function
450 * @atchan: channel used for cyclic operations
451 *
452 * Called with atchan->lock held and bh disabled
453 */
454static void atc_handle_cyclic(struct at_dma_chan *atchan)
455{
456 struct at_desc *first = atc_first_active(atchan);
457 struct dma_async_tx_descriptor *txd = &first->txd;
458 dma_async_tx_callback callback = txd->callback;
459 void *param = txd->callback_param;
460
461 dev_vdbg(chan2dev(&atchan->chan_common),
462 "new cyclic period llp 0x%08x\n",
463 channel_readl(atchan, DSCR));
464
465 if (callback)
466 callback(param);
467}
422 468
423/*-- IRQ & Tasklet ---------------------------------------------------*/ 469/*-- IRQ & Tasklet ---------------------------------------------------*/
424 470
@@ -434,8 +480,10 @@ static void atc_tasklet(unsigned long data)
434 } 480 }
435 481
436 spin_lock(&atchan->lock); 482 spin_lock(&atchan->lock);
437 if (test_and_clear_bit(0, &atchan->error_status)) 483 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
438 atc_handle_error(atchan); 484 atc_handle_error(atchan);
485 else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
486 atc_handle_cyclic(atchan);
439 else 487 else
440 atc_advance_work(atchan); 488 atc_advance_work(atchan);
441 489
@@ -469,7 +517,7 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
469 /* Disable channel on AHB error */ 517 /* Disable channel on AHB error */
470 dma_writel(atdma, CHDR, atchan->mask); 518 dma_writel(atdma, CHDR, atchan->mask);
471 /* Give information to tasklet */ 519 /* Give information to tasklet */
472 set_bit(0, &atchan->error_status); 520 set_bit(ATC_IS_ERROR, &atchan->status);
473 } 521 }
474 tasklet_schedule(&atchan->tasklet); 522 tasklet_schedule(&atchan->tasklet);
475 ret = IRQ_HANDLED; 523 ret = IRQ_HANDLED;
@@ -759,6 +807,148 @@ err_desc_get:
759 return NULL; 807 return NULL;
760} 808}
761 809
810/**
811 * atc_dma_cyclic_check_values
812 * Check for too big/unaligned periods and unaligned DMA buffer
813 */
814static int
815atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
816 size_t period_len, enum dma_data_direction direction)
817{
818 if (period_len > (ATC_BTSIZE_MAX << reg_width))
819 goto err_out;
820 if (unlikely(period_len & ((1 << reg_width) - 1)))
821 goto err_out;
822 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
823 goto err_out;
824 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
825 goto err_out;
826
827 return 0;
828
829err_out:
830 return -EINVAL;
831}
832
833/**
834 * atc_dma_cyclic_fill_desc - Fill one period decriptor
835 */
836static int
837atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
838 unsigned int period_index, dma_addr_t buf_addr,
839 size_t period_len, enum dma_data_direction direction)
840{
841 u32 ctrla;
842 unsigned int reg_width = atslave->reg_width;
843
844 /* prepare common CRTLA value */
845 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
846 | ATC_DST_WIDTH(reg_width)
847 | ATC_SRC_WIDTH(reg_width)
848 | period_len >> reg_width;
849
850 switch (direction) {
851 case DMA_TO_DEVICE:
852 desc->lli.saddr = buf_addr + (period_len * period_index);
853 desc->lli.daddr = atslave->tx_reg;
854 desc->lli.ctrla = ctrla;
855 desc->lli.ctrlb = ATC_DEFAULT_CTRLB
856 | ATC_DST_ADDR_MODE_FIXED
857 | ATC_SRC_ADDR_MODE_INCR
858 | ATC_FC_MEM2PER;
859 break;
860
861 case DMA_FROM_DEVICE:
862 desc->lli.saddr = atslave->rx_reg;
863 desc->lli.daddr = buf_addr + (period_len * period_index);
864 desc->lli.ctrla = ctrla;
865 desc->lli.ctrlb = ATC_DEFAULT_CTRLB
866 | ATC_DST_ADDR_MODE_INCR
867 | ATC_SRC_ADDR_MODE_FIXED
868 | ATC_FC_PER2MEM;
869 break;
870
871 default:
872 return -EINVAL;
873 }
874
875 return 0;
876}
877
878/**
879 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
880 * @chan: the DMA channel to prepare
881 * @buf_addr: physical DMA address where the buffer starts
882 * @buf_len: total number of bytes for the entire buffer
883 * @period_len: number of bytes for each period
884 * @direction: transfer direction, to or from device
885 */
886static struct dma_async_tx_descriptor *
887atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
888 size_t period_len, enum dma_data_direction direction)
889{
890 struct at_dma_chan *atchan = to_at_dma_chan(chan);
891 struct at_dma_slave *atslave = chan->private;
892 struct at_desc *first = NULL;
893 struct at_desc *prev = NULL;
894 unsigned long was_cyclic;
895 unsigned int periods = buf_len / period_len;
896 unsigned int i;
897
898 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
899 direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
900 buf_addr,
901 periods, buf_len, period_len);
902
903 if (unlikely(!atslave || !buf_len || !period_len)) {
904 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
905 return NULL;
906 }
907
908 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
909 if (was_cyclic) {
910 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
911 return NULL;
912 }
913
914 /* Check for too big/unaligned periods and unaligned DMA buffer */
915 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
916 period_len, direction))
917 goto err_out;
918
919 /* build cyclic linked list */
920 for (i = 0; i < periods; i++) {
921 struct at_desc *desc;
922
923 desc = atc_desc_get(atchan);
924 if (!desc)
925 goto err_desc_get;
926
927 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
928 period_len, direction))
929 goto err_desc_get;
930
931 atc_desc_chain(&first, &prev, desc);
932 }
933
934 /* lets make a cyclic list */
935 prev->lli.dscr = first->txd.phys;
936
937 /* First descriptor of the chain embedds additional information */
938 first->txd.cookie = -EBUSY;
939 first->len = buf_len;
940
941 return &first->txd;
942
943err_desc_get:
944 dev_err(chan2dev(chan), "not enough descriptors available\n");
945 atc_desc_put(atchan, first);
946err_out:
947 clear_bit(ATC_IS_CYCLIC, &atchan->status);
948 return NULL;
949}
950
951
762static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 952static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
763 unsigned long arg) 953 unsigned long arg)
764{ 954{
@@ -793,6 +983,9 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
793 list_for_each_entry_safe(desc, _desc, &list, desc_node) 983 list_for_each_entry_safe(desc, _desc, &list, desc_node)
794 atc_chain_complete(atchan, desc); 984 atc_chain_complete(atchan, desc);
795 985
986 /* if channel dedicated to cyclic operations, free it */
987 clear_bit(ATC_IS_CYCLIC, &atchan->status);
988
796 spin_unlock_bh(&atchan->lock); 989 spin_unlock_bh(&atchan->lock);
797 990
798 return 0; 991 return 0;
@@ -853,6 +1046,10 @@ static void atc_issue_pending(struct dma_chan *chan)
853 1046
854 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1047 dev_vdbg(chan2dev(chan), "issue_pending\n");
855 1048
1049 /* Not needed for cyclic transfers */
1050 if (test_bit(ATC_IS_CYCLIC, &atchan->status))
1051 return;
1052
856 spin_lock_bh(&atchan->lock); 1053 spin_lock_bh(&atchan->lock);
857 if (!atc_chan_is_enabled(atchan)) { 1054 if (!atc_chan_is_enabled(atchan)) {
858 atc_advance_work(atchan); 1055 atc_advance_work(atchan);
@@ -959,6 +1156,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
959 } 1156 }
960 list_splice_init(&atchan->free_list, &list); 1157 list_splice_init(&atchan->free_list, &list);
961 atchan->descs_allocated = 0; 1158 atchan->descs_allocated = 0;
1159 atchan->status = 0;
962 1160
963 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1161 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
964} 1162}
@@ -1092,10 +1290,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
1092 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1290 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1093 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1291 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1094 1292
1095 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1293 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
1096 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1294 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1295
1296 if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1297 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1298
1299 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1300 dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1097 atdma->dma_common.device_control = atc_control; 1301 atdma->dma_common.device_control = atc_control;
1098 }
1099 1302
1100 dma_writel(atdma, EN, AT_DMA_ENABLE); 1303 dma_writel(atdma, EN, AT_DMA_ENABLE);
1101 1304
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 8303306ea825..c79a9e07f7be 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -181,12 +181,22 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
181/*-- Channels --------------------------------------------------------*/ 181/*-- Channels --------------------------------------------------------*/
182 182
183/** 183/**
184 * atc_status - information bits stored in channel status flag
185 *
186 * Manipulated with atomic operations.
187 */
188enum atc_status {
189 ATC_IS_ERROR = 0,
190 ATC_IS_CYCLIC = 24,
191};
192
193/**
184 * struct at_dma_chan - internal representation of an Atmel HDMAC channel 194 * struct at_dma_chan - internal representation of an Atmel HDMAC channel
185 * @chan_common: common dmaengine channel object members 195 * @chan_common: common dmaengine channel object members
186 * @device: parent device 196 * @device: parent device
187 * @ch_regs: memory mapped register base 197 * @ch_regs: memory mapped register base
188 * @mask: channel index in a mask 198 * @mask: channel index in a mask
189 * @error_status: transmit error status information from irq handler 199 * @status: transmit status information from irq/prep* functions
190 * to tasklet (use atomic operations) 200 * to tasklet (use atomic operations)
191 * @tasklet: bottom half to finish transaction work 201 * @tasklet: bottom half to finish transaction work
192 * @lock: serializes enqueue/dequeue operations to descriptors lists 202 * @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -201,7 +211,7 @@ struct at_dma_chan {
201 struct at_dma *device; 211 struct at_dma *device;
202 void __iomem *ch_regs; 212 void __iomem *ch_regs;
203 u8 mask; 213 u8 mask;
204 unsigned long error_status; 214 unsigned long status;
205 struct tasklet_struct tasklet; 215 struct tasklet_struct tasklet;
206 216
207 spinlock_t lock; 217 spinlock_t lock;