diff options
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r-- | drivers/dma/fsldma.c | 288 |
1 files changed, 265 insertions, 23 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ef87a8984145..296f9e747fac 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/dmapool.h> | 34 | #include <linux/dmapool.h> |
35 | #include <linux/of_platform.h> | 35 | #include <linux/of_platform.h> |
36 | 36 | ||
37 | #include <asm/fsldma.h> | ||
37 | #include "fsldma.h" | 38 | #include "fsldma.h" |
38 | 39 | ||
39 | static void dma_init(struct fsl_dma_chan *fsl_chan) | 40 | static void dma_init(struct fsl_dma_chan *fsl_chan) |
@@ -280,28 +281,40 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |||
280 | } | 281 | } |
281 | 282 | ||
282 | /** | 283 | /** |
283 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | 284 | * fsl_chan_set_request_count - Set DMA Request Count for external control |
284 | * @fsl_chan : Freescale DMA channel | 285 | * @fsl_chan : Freescale DMA channel |
285 | * @size : Pause control size, 0 for disable external pause control. | 286 | * @size : Number of bytes to transfer in a single request |
286 | * The maximum is 1024. | 287 | * |
288 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | ||
289 | * The DMA request count is how many bytes are allowed to transfer before | ||
290 | * pausing the channel, after which a new assertion of DREQ# resumes channel | ||
291 | * operation. | ||
287 | * | 292 | * |
288 | * The Freescale DMA channel can be controlled by the external | 293 | * A size of 0 disables external pause control. The maximum size is 1024. |
289 | * signal DREQ#. The pause control size is how many bytes are allowed | ||
290 | * to transfer before pausing the channel, after which a new assertion | ||
291 | * of DREQ# resumes channel operation. | ||
292 | */ | 294 | */ |
293 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | 295 | static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) |
294 | { | 296 | { |
295 | if (size > 1024) | 297 | BUG_ON(size > 1024); |
296 | return; | 298 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, |
299 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
300 | | ((__ilog2(size) << 24) & 0x0f000000), | ||
301 | 32); | ||
302 | } | ||
297 | 303 | ||
298 | if (size) { | 304 | /** |
299 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 305 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status |
300 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 306 | * @fsl_chan : Freescale DMA channel |
301 | | ((__ilog2(size) << 24) & 0x0f000000), | 307 | * @enable : 0 is disabled, 1 is enabled. |
302 | 32); | 308 | * |
309 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | ||
310 | * The DMA Request Count feature should be used in addition to this feature | ||
311 | * to set the number of bytes to transfer before pausing the channel. | ||
312 | */ | ||
313 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) | ||
314 | { | ||
315 | if (enable) | ||
303 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | 316 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; |
304 | } else | 317 | else |
305 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | 318 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; |
306 | } | 319 | } |
307 | 320 | ||
@@ -326,7 +339,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | |||
326 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 339 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
327 | { | 340 | { |
328 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | 341 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); |
329 | struct fsl_desc_sw *desc; | 342 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
343 | struct fsl_desc_sw *child; | ||
330 | unsigned long flags; | 344 | unsigned long flags; |
331 | dma_cookie_t cookie; | 345 | dma_cookie_t cookie; |
332 | 346 | ||
@@ -334,7 +348,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
334 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 348 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); |
335 | 349 | ||
336 | cookie = fsl_chan->common.cookie; | 350 | cookie = fsl_chan->common.cookie; |
337 | list_for_each_entry(desc, &tx->tx_list, node) { | 351 | list_for_each_entry(child, &desc->tx_list, node) { |
338 | cookie++; | 352 | cookie++; |
339 | if (cookie < 0) | 353 | if (cookie < 0) |
340 | cookie = 1; | 354 | cookie = 1; |
@@ -343,8 +357,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
343 | } | 357 | } |
344 | 358 | ||
345 | fsl_chan->common.cookie = cookie; | 359 | fsl_chan->common.cookie = cookie; |
346 | append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); | 360 | append_ld_queue(fsl_chan, desc); |
347 | list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); | 361 | list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); |
348 | 362 | ||
349 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 363 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); |
350 | 364 | ||
@@ -366,6 +380,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
366 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | 380 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); |
367 | if (desc_sw) { | 381 | if (desc_sw) { |
368 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | 382 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); |
383 | INIT_LIST_HEAD(&desc_sw->tx_list); | ||
369 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | 384 | dma_async_tx_descriptor_init(&desc_sw->async_tx, |
370 | &fsl_chan->common); | 385 | &fsl_chan->common); |
371 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | 386 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; |
@@ -455,7 +470,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | |||
455 | new->async_tx.flags = flags; | 470 | new->async_tx.flags = flags; |
456 | 471 | ||
457 | /* Insert the link descriptor to the LD ring */ | 472 | /* Insert the link descriptor to the LD ring */ |
458 | list_add_tail(&new->node, &new->async_tx.tx_list); | 473 | list_add_tail(&new->node, &new->tx_list); |
459 | 474 | ||
460 | /* Set End-of-link to the last link descriptor of new list*/ | 475 | /* Set End-of-link to the last link descriptor of new list*/ |
461 | set_ld_eol(fsl_chan, new); | 476 | set_ld_eol(fsl_chan, new); |
@@ -513,7 +528,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
513 | dma_dest += copy; | 528 | dma_dest += copy; |
514 | 529 | ||
515 | /* Insert the link descriptor to the LD ring */ | 530 | /* Insert the link descriptor to the LD ring */ |
516 | list_add_tail(&new->node, &first->async_tx.tx_list); | 531 | list_add_tail(&new->node, &first->tx_list); |
517 | } while (len); | 532 | } while (len); |
518 | 533 | ||
519 | new->async_tx.flags = flags; /* client is in control of this ack */ | 534 | new->async_tx.flags = flags; /* client is in control of this ack */ |
@@ -528,7 +543,7 @@ fail: | |||
528 | if (!first) | 543 | if (!first) |
529 | return NULL; | 544 | return NULL; |
530 | 545 | ||
531 | list = &first->async_tx.tx_list; | 546 | list = &first->tx_list; |
532 | list_for_each_entry_safe_reverse(new, prev, list, node) { | 547 | list_for_each_entry_safe_reverse(new, prev, list, node) { |
533 | list_del(&new->node); | 548 | list_del(&new->node); |
534 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | 549 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); |
@@ -538,6 +553,229 @@ fail: | |||
538 | } | 553 | } |
539 | 554 | ||
540 | /** | 555 | /** |
556 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
557 | * @chan: DMA channel | ||
558 | * @sgl: scatterlist to transfer to/from | ||
559 | * @sg_len: number of entries in @scatterlist | ||
560 | * @direction: DMA direction | ||
561 | * @flags: DMAEngine flags | ||
562 | * | ||
563 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
564 | * DMA_SLAVE API, this gets the device-specific information from the | ||
565 | * chan->private variable. | ||
566 | */ | ||
567 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
568 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
569 | enum dma_data_direction direction, unsigned long flags) | ||
570 | { | ||
571 | struct fsl_dma_chan *fsl_chan; | ||
572 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | ||
573 | struct fsl_dma_slave *slave; | ||
574 | struct list_head *tx_list; | ||
575 | size_t copy; | ||
576 | |||
577 | int i; | ||
578 | struct scatterlist *sg; | ||
579 | size_t sg_used; | ||
580 | size_t hw_used; | ||
581 | struct fsl_dma_hw_addr *hw; | ||
582 | dma_addr_t dma_dst, dma_src; | ||
583 | |||
584 | if (!chan) | ||
585 | return NULL; | ||
586 | |||
587 | if (!chan->private) | ||
588 | return NULL; | ||
589 | |||
590 | fsl_chan = to_fsl_chan(chan); | ||
591 | slave = chan->private; | ||
592 | |||
593 | if (list_empty(&slave->addresses)) | ||
594 | return NULL; | ||
595 | |||
596 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); | ||
597 | hw_used = 0; | ||
598 | |||
599 | /* | ||
600 | * Build the hardware transaction to copy from the scatterlist to | ||
601 | * the hardware, or from the hardware to the scatterlist | ||
602 | * | ||
603 | * If you are copying from the hardware to the scatterlist and it | ||
604 | * takes two hardware entries to fill an entire page, then both | ||
605 | * hardware entries will be coalesced into the same page | ||
606 | * | ||
607 | * If you are copying from the scatterlist to the hardware and a | ||
608 | * single page can fill two hardware entries, then the data will | ||
609 | * be read out of the page into the first hardware entry, and so on | ||
610 | */ | ||
611 | for_each_sg(sgl, sg, sg_len, i) { | ||
612 | sg_used = 0; | ||
613 | |||
614 | /* Loop until the entire scatterlist entry is used */ | ||
615 | while (sg_used < sg_dma_len(sg)) { | ||
616 | |||
617 | /* | ||
618 | * If we've used up the current hardware address/length | ||
619 | * pair, we need to load a new one | ||
620 | * | ||
621 | * This is done in a while loop so that descriptors with | ||
622 | * length == 0 will be skipped | ||
623 | */ | ||
624 | while (hw_used >= hw->length) { | ||
625 | |||
626 | /* | ||
627 | * If the current hardware entry is the last | ||
628 | * entry in the list, we're finished | ||
629 | */ | ||
630 | if (list_is_last(&hw->entry, &slave->addresses)) | ||
631 | goto finished; | ||
632 | |||
633 | /* Get the next hardware address/length pair */ | ||
634 | hw = list_entry(hw->entry.next, | ||
635 | struct fsl_dma_hw_addr, entry); | ||
636 | hw_used = 0; | ||
637 | } | ||
638 | |||
639 | /* Allocate the link descriptor from DMA pool */ | ||
640 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
641 | if (!new) { | ||
642 | dev_err(fsl_chan->dev, "No free memory for " | ||
643 | "link descriptor\n"); | ||
644 | goto fail; | ||
645 | } | ||
646 | #ifdef FSL_DMA_LD_DEBUG | ||
647 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | ||
648 | #endif | ||
649 | |||
650 | /* | ||
651 | * Calculate the maximum number of bytes to transfer, | ||
652 | * making sure it is less than the DMA controller limit | ||
653 | */ | ||
654 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | ||
655 | hw->length - hw_used); | ||
656 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); | ||
657 | |||
658 | /* | ||
659 | * DMA_FROM_DEVICE | ||
660 | * from the hardware to the scatterlist | ||
661 | * | ||
662 | * DMA_TO_DEVICE | ||
663 | * from the scatterlist to the hardware | ||
664 | */ | ||
665 | if (direction == DMA_FROM_DEVICE) { | ||
666 | dma_src = hw->address + hw_used; | ||
667 | dma_dst = sg_dma_address(sg) + sg_used; | ||
668 | } else { | ||
669 | dma_src = sg_dma_address(sg) + sg_used; | ||
670 | dma_dst = hw->address + hw_used; | ||
671 | } | ||
672 | |||
673 | /* Fill in the descriptor */ | ||
674 | set_desc_cnt(fsl_chan, &new->hw, copy); | ||
675 | set_desc_src(fsl_chan, &new->hw, dma_src); | ||
676 | set_desc_dest(fsl_chan, &new->hw, dma_dst); | ||
677 | |||
678 | /* | ||
679 | * If this is not the first descriptor, chain the | ||
680 | * current descriptor after the previous descriptor | ||
681 | */ | ||
682 | if (!first) { | ||
683 | first = new; | ||
684 | } else { | ||
685 | set_desc_next(fsl_chan, &prev->hw, | ||
686 | new->async_tx.phys); | ||
687 | } | ||
688 | |||
689 | new->async_tx.cookie = 0; | ||
690 | async_tx_ack(&new->async_tx); | ||
691 | |||
692 | prev = new; | ||
693 | sg_used += copy; | ||
694 | hw_used += copy; | ||
695 | |||
696 | /* Insert the link descriptor into the LD ring */ | ||
697 | list_add_tail(&new->node, &first->tx_list); | ||
698 | } | ||
699 | } | ||
700 | |||
701 | finished: | ||
702 | |||
703 | /* All of the hardware address/length pairs had length == 0 */ | ||
704 | if (!first || !new) | ||
705 | return NULL; | ||
706 | |||
707 | new->async_tx.flags = flags; | ||
708 | new->async_tx.cookie = -EBUSY; | ||
709 | |||
710 | /* Set End-of-link to the last link descriptor of new list */ | ||
711 | set_ld_eol(fsl_chan, new); | ||
712 | |||
713 | /* Enable extra controller features */ | ||
714 | if (fsl_chan->set_src_loop_size) | ||
715 | fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); | ||
716 | |||
717 | if (fsl_chan->set_dest_loop_size) | ||
718 | fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); | ||
719 | |||
720 | if (fsl_chan->toggle_ext_start) | ||
721 | fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); | ||
722 | |||
723 | if (fsl_chan->toggle_ext_pause) | ||
724 | fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); | ||
725 | |||
726 | if (fsl_chan->set_request_count) | ||
727 | fsl_chan->set_request_count(fsl_chan, slave->request_count); | ||
728 | |||
729 | return &first->async_tx; | ||
730 | |||
731 | fail: | ||
732 | /* If first was not set, then we failed to allocate the very first | ||
733 | * descriptor, and we're done */ | ||
734 | if (!first) | ||
735 | return NULL; | ||
736 | |||
737 | /* | ||
738 | * First is set, so all of the descriptors we allocated have been added | ||
739 | * to first->tx_list, INCLUDING "first" itself. Therefore we | ||
740 | * must traverse the list backwards freeing each descriptor in turn | ||
741 | * | ||
742 | * We're re-using variables for the loop, oh well | ||
743 | */ | ||
744 | tx_list = &first->tx_list; | ||
745 | list_for_each_entry_safe_reverse(new, prev, tx_list, node) { | ||
746 | list_del_init(&new->node); | ||
747 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | ||
748 | } | ||
749 | |||
750 | return NULL; | ||
751 | } | ||
752 | |||
753 | static void fsl_dma_device_terminate_all(struct dma_chan *chan) | ||
754 | { | ||
755 | struct fsl_dma_chan *fsl_chan; | ||
756 | struct fsl_desc_sw *desc, *tmp; | ||
757 | unsigned long flags; | ||
758 | |||
759 | if (!chan) | ||
760 | return; | ||
761 | |||
762 | fsl_chan = to_fsl_chan(chan); | ||
763 | |||
764 | /* Halt the DMA engine */ | ||
765 | dma_halt(fsl_chan); | ||
766 | |||
767 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
768 | |||
769 | /* Remove and free all of the descriptors in the LD queue */ | ||
770 | list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { | ||
771 | list_del(&desc->node); | ||
772 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
773 | } | ||
774 | |||
775 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
776 | } | ||
777 | |||
778 | /** | ||
541 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 779 | * fsl_dma_update_completed_cookie - Update the completed cookie. |
542 | * @fsl_chan : Freescale DMA channel | 780 | * @fsl_chan : Freescale DMA channel |
543 | */ | 781 | */ |
@@ -883,6 +1121,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | |||
883 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | 1121 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; |
884 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | 1122 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; |
885 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | 1123 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; |
1124 | new_fsl_chan->set_request_count = fsl_chan_set_request_count; | ||
886 | } | 1125 | } |
887 | 1126 | ||
888 | spin_lock_init(&new_fsl_chan->desc_lock); | 1127 | spin_lock_init(&new_fsl_chan->desc_lock); |
@@ -962,12 +1201,15 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
962 | 1201 | ||
963 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1202 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
964 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1203 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
1204 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | ||
965 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1205 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
966 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1206 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
967 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | 1207 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
968 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1208 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
969 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | 1209 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; |
970 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1210 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
1211 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | ||
1212 | fdev->common.device_terminate_all = fsl_dma_device_terminate_all; | ||
971 | fdev->common.dev = &dev->dev; | 1213 | fdev->common.dev = &dev->dev; |
972 | 1214 | ||
973 | fdev->irq = irq_of_parse_and_map(dev->node, 0); | 1215 | fdev->irq = irq_of_parse_and_map(dev->node, 0); |