diff options
Diffstat (limited to 'drivers/dma/fsldma.c')
| -rw-r--r-- | drivers/dma/fsldma.c | 328 |
1 files changed, 156 insertions, 172 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index cea08bed9cf9..286c3ac6bdcc 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -35,9 +35,10 @@ | |||
| 35 | #include <linux/dmapool.h> | 35 | #include <linux/dmapool.h> |
| 36 | #include <linux/of_platform.h> | 36 | #include <linux/of_platform.h> |
| 37 | 37 | ||
| 38 | #include <asm/fsldma.h> | ||
| 39 | #include "fsldma.h" | 38 | #include "fsldma.h" |
| 40 | 39 | ||
| 40 | static const char msg_ld_oom[] = "No free memory for link descriptor\n"; | ||
| 41 | |||
| 41 | static void dma_init(struct fsldma_chan *chan) | 42 | static void dma_init(struct fsldma_chan *chan) |
| 42 | { | 43 | { |
| 43 | /* Reset the channel */ | 44 | /* Reset the channel */ |
| @@ -499,7 +500,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
| 499 | 500 | ||
| 500 | new = fsl_dma_alloc_descriptor(chan); | 501 | new = fsl_dma_alloc_descriptor(chan); |
| 501 | if (!new) { | 502 | if (!new) { |
| 502 | dev_err(chan->dev, "No free memory for link descriptor\n"); | 503 | dev_err(chan->dev, msg_ld_oom); |
| 503 | return NULL; | 504 | return NULL; |
| 504 | } | 505 | } |
| 505 | 506 | ||
| @@ -536,8 +537,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
| 536 | /* Allocate the link descriptor from DMA pool */ | 537 | /* Allocate the link descriptor from DMA pool */ |
| 537 | new = fsl_dma_alloc_descriptor(chan); | 538 | new = fsl_dma_alloc_descriptor(chan); |
| 538 | if (!new) { | 539 | if (!new) { |
| 539 | dev_err(chan->dev, | 540 | dev_err(chan->dev, msg_ld_oom); |
| 540 | "No free memory for link descriptor\n"); | ||
| 541 | goto fail; | 541 | goto fail; |
| 542 | } | 542 | } |
| 543 | #ifdef FSL_DMA_LD_DEBUG | 543 | #ifdef FSL_DMA_LD_DEBUG |
| @@ -583,223 +583,205 @@ fail: | |||
| 583 | return NULL; | 583 | return NULL; |
| 584 | } | 584 | } |
| 585 | 585 | ||
| 586 | /** | 586 | static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, |
| 587 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | 587 | struct scatterlist *dst_sg, unsigned int dst_nents, |
| 588 | * @chan: DMA channel | 588 | struct scatterlist *src_sg, unsigned int src_nents, |
| 589 | * @sgl: scatterlist to transfer to/from | 589 | unsigned long flags) |
| 590 | * @sg_len: number of entries in @scatterlist | ||
| 591 | * @direction: DMA direction | ||
| 592 | * @flags: DMAEngine flags | ||
| 593 | * | ||
| 594 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
| 595 | * DMA_SLAVE API, this gets the device-specific information from the | ||
| 596 | * chan->private variable. | ||
| 597 | */ | ||
| 598 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
| 599 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
| 600 | enum dma_data_direction direction, unsigned long flags) | ||
| 601 | { | 590 | { |
| 602 | struct fsldma_chan *chan; | ||
| 603 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 591 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
| 604 | struct fsl_dma_slave *slave; | 592 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 605 | size_t copy; | 593 | size_t dst_avail, src_avail; |
| 606 | 594 | dma_addr_t dst, src; | |
| 607 | int i; | 595 | size_t len; |
| 608 | struct scatterlist *sg; | ||
| 609 | size_t sg_used; | ||
| 610 | size_t hw_used; | ||
| 611 | struct fsl_dma_hw_addr *hw; | ||
| 612 | dma_addr_t dma_dst, dma_src; | ||
| 613 | 596 | ||
| 614 | if (!dchan) | 597 | /* basic sanity checks */ |
| 598 | if (dst_nents == 0 || src_nents == 0) | ||
| 615 | return NULL; | 599 | return NULL; |
| 616 | 600 | ||
| 617 | if (!dchan->private) | 601 | if (dst_sg == NULL || src_sg == NULL) |
| 618 | return NULL; | 602 | return NULL; |
| 619 | 603 | ||
| 620 | chan = to_fsl_chan(dchan); | 604 | /* |
| 621 | slave = dchan->private; | 605 | * TODO: should we check that both scatterlists have the same |
| 606 | * TODO: number of bytes in total? Is that really an error? | ||
| 607 | */ | ||
| 622 | 608 | ||
| 623 | if (list_empty(&slave->addresses)) | 609 | /* get prepared for the loop */ |
| 624 | return NULL; | 610 | dst_avail = sg_dma_len(dst_sg); |
| 611 | src_avail = sg_dma_len(src_sg); | ||
| 625 | 612 | ||
| 626 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); | 613 | /* run until we are out of scatterlist entries */ |
| 627 | hw_used = 0; | 614 | while (true) { |
| 628 | 615 | ||
| 629 | /* | 616 | /* create the largest transaction possible */ |
| 630 | * Build the hardware transaction to copy from the scatterlist to | 617 | len = min_t(size_t, src_avail, dst_avail); |
| 631 | * the hardware, or from the hardware to the scatterlist | 618 | len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); |
| 632 | * | 619 | if (len == 0) |
| 633 | * If you are copying from the hardware to the scatterlist and it | 620 | goto fetch; |
| 634 | * takes two hardware entries to fill an entire page, then both | 621 | |
| 635 | * hardware entries will be coalesced into the same page | 622 | dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; |
| 636 | * | 623 | src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; |
| 637 | * If you are copying from the scatterlist to the hardware and a | 624 | |
| 638 | * single page can fill two hardware entries, then the data will | 625 | /* allocate and populate the descriptor */ |
| 639 | * be read out of the page into the first hardware entry, and so on | 626 | new = fsl_dma_alloc_descriptor(chan); |
| 640 | */ | 627 | if (!new) { |
| 641 | for_each_sg(sgl, sg, sg_len, i) { | 628 | dev_err(chan->dev, msg_ld_oom); |
| 642 | sg_used = 0; | 629 | goto fail; |
| 643 | 630 | } | |
| 644 | /* Loop until the entire scatterlist entry is used */ | ||
| 645 | while (sg_used < sg_dma_len(sg)) { | ||
| 646 | |||
| 647 | /* | ||
| 648 | * If we've used up the current hardware address/length | ||
| 649 | * pair, we need to load a new one | ||
| 650 | * | ||
| 651 | * This is done in a while loop so that descriptors with | ||
| 652 | * length == 0 will be skipped | ||
| 653 | */ | ||
| 654 | while (hw_used >= hw->length) { | ||
| 655 | |||
| 656 | /* | ||
| 657 | * If the current hardware entry is the last | ||
| 658 | * entry in the list, we're finished | ||
| 659 | */ | ||
| 660 | if (list_is_last(&hw->entry, &slave->addresses)) | ||
| 661 | goto finished; | ||
| 662 | |||
| 663 | /* Get the next hardware address/length pair */ | ||
| 664 | hw = list_entry(hw->entry.next, | ||
| 665 | struct fsl_dma_hw_addr, entry); | ||
| 666 | hw_used = 0; | ||
| 667 | } | ||
| 668 | |||
| 669 | /* Allocate the link descriptor from DMA pool */ | ||
| 670 | new = fsl_dma_alloc_descriptor(chan); | ||
| 671 | if (!new) { | ||
| 672 | dev_err(chan->dev, "No free memory for " | ||
| 673 | "link descriptor\n"); | ||
| 674 | goto fail; | ||
| 675 | } | ||
| 676 | #ifdef FSL_DMA_LD_DEBUG | 631 | #ifdef FSL_DMA_LD_DEBUG |
| 677 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | 632 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); |
| 678 | #endif | 633 | #endif |
| 679 | 634 | ||
| 680 | /* | 635 | set_desc_cnt(chan, &new->hw, len); |
| 681 | * Calculate the maximum number of bytes to transfer, | 636 | set_desc_src(chan, &new->hw, src); |
| 682 | * making sure it is less than the DMA controller limit | 637 | set_desc_dst(chan, &new->hw, dst); |
| 683 | */ | ||
| 684 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | ||
| 685 | hw->length - hw_used); | ||
| 686 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); | ||
| 687 | |||
| 688 | /* | ||
| 689 | * DMA_FROM_DEVICE | ||
| 690 | * from the hardware to the scatterlist | ||
| 691 | * | ||
| 692 | * DMA_TO_DEVICE | ||
| 693 | * from the scatterlist to the hardware | ||
| 694 | */ | ||
| 695 | if (direction == DMA_FROM_DEVICE) { | ||
| 696 | dma_src = hw->address + hw_used; | ||
| 697 | dma_dst = sg_dma_address(sg) + sg_used; | ||
| 698 | } else { | ||
| 699 | dma_src = sg_dma_address(sg) + sg_used; | ||
| 700 | dma_dst = hw->address + hw_used; | ||
| 701 | } | ||
| 702 | |||
| 703 | /* Fill in the descriptor */ | ||
| 704 | set_desc_cnt(chan, &new->hw, copy); | ||
| 705 | set_desc_src(chan, &new->hw, dma_src); | ||
| 706 | set_desc_dst(chan, &new->hw, dma_dst); | ||
| 707 | |||
| 708 | /* | ||
| 709 | * If this is not the first descriptor, chain the | ||
| 710 | * current descriptor after the previous descriptor | ||
| 711 | */ | ||
| 712 | if (!first) { | ||
| 713 | first = new; | ||
| 714 | } else { | ||
| 715 | set_desc_next(chan, &prev->hw, | ||
| 716 | new->async_tx.phys); | ||
| 717 | } | ||
| 718 | |||
| 719 | new->async_tx.cookie = 0; | ||
| 720 | async_tx_ack(&new->async_tx); | ||
| 721 | |||
| 722 | prev = new; | ||
| 723 | sg_used += copy; | ||
| 724 | hw_used += copy; | ||
| 725 | |||
| 726 | /* Insert the link descriptor into the LD ring */ | ||
| 727 | list_add_tail(&new->node, &first->tx_list); | ||
| 728 | } | ||
| 729 | } | ||
| 730 | 638 | ||
| 731 | finished: | 639 | if (!first) |
| 640 | first = new; | ||
| 641 | else | ||
| 642 | set_desc_next(chan, &prev->hw, new->async_tx.phys); | ||
| 732 | 643 | ||
| 733 | /* All of the hardware address/length pairs had length == 0 */ | 644 | new->async_tx.cookie = 0; |
| 734 | if (!first || !new) | 645 | async_tx_ack(&new->async_tx); |
| 735 | return NULL; | 646 | prev = new; |
| 736 | 647 | ||
| 737 | new->async_tx.flags = flags; | 648 | /* Insert the link descriptor to the LD ring */ |
| 738 | new->async_tx.cookie = -EBUSY; | 649 | list_add_tail(&new->node, &first->tx_list); |
| 739 | 650 | ||
| 740 | /* Set End-of-link to the last link descriptor of new list */ | 651 | /* update metadata */ |
| 741 | set_ld_eol(chan, new); | 652 | dst_avail -= len; |
| 653 | src_avail -= len; | ||
| 654 | |||
| 655 | fetch: | ||
| 656 | /* fetch the next dst scatterlist entry */ | ||
| 657 | if (dst_avail == 0) { | ||
| 658 | |||
| 659 | /* no more entries: we're done */ | ||
| 660 | if (dst_nents == 0) | ||
| 661 | break; | ||
| 662 | |||
| 663 | /* fetch the next entry: if there are no more: done */ | ||
| 664 | dst_sg = sg_next(dst_sg); | ||
| 665 | if (dst_sg == NULL) | ||
| 666 | break; | ||
| 667 | |||
| 668 | dst_nents--; | ||
| 669 | dst_avail = sg_dma_len(dst_sg); | ||
| 670 | } | ||
| 742 | 671 | ||
| 743 | /* Enable extra controller features */ | 672 | /* fetch the next src scatterlist entry */ |
| 744 | if (chan->set_src_loop_size) | 673 | if (src_avail == 0) { |
| 745 | chan->set_src_loop_size(chan, slave->src_loop_size); | ||
| 746 | 674 | ||
| 747 | if (chan->set_dst_loop_size) | 675 | /* no more entries: we're done */ |
| 748 | chan->set_dst_loop_size(chan, slave->dst_loop_size); | 676 | if (src_nents == 0) |
| 677 | break; | ||
| 749 | 678 | ||
| 750 | if (chan->toggle_ext_start) | 679 | /* fetch the next entry: if there are no more: done */ |
| 751 | chan->toggle_ext_start(chan, slave->external_start); | 680 | src_sg = sg_next(src_sg); |
| 681 | if (src_sg == NULL) | ||
| 682 | break; | ||
| 752 | 683 | ||
| 753 | if (chan->toggle_ext_pause) | 684 | src_nents--; |
| 754 | chan->toggle_ext_pause(chan, slave->external_pause); | 685 | src_avail = sg_dma_len(src_sg); |
| 686 | } | ||
| 687 | } | ||
| 755 | 688 | ||
| 756 | if (chan->set_request_count) | 689 | new->async_tx.flags = flags; /* client is in control of this ack */ |
| 757 | chan->set_request_count(chan, slave->request_count); | 690 | new->async_tx.cookie = -EBUSY; |
| 691 | |||
| 692 | /* Set End-of-link to the last link descriptor of new list */ | ||
| 693 | set_ld_eol(chan, new); | ||
| 758 | 694 | ||
| 759 | return &first->async_tx; | 695 | return &first->async_tx; |
| 760 | 696 | ||
| 761 | fail: | 697 | fail: |
| 762 | /* If first was not set, then we failed to allocate the very first | ||
| 763 | * descriptor, and we're done */ | ||
| 764 | if (!first) | 698 | if (!first) |
| 765 | return NULL; | 699 | return NULL; |
| 766 | 700 | ||
| 701 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
| 702 | return NULL; | ||
| 703 | } | ||
| 704 | |||
| 705 | /** | ||
| 706 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
| 707 | * @chan: DMA channel | ||
| 708 | * @sgl: scatterlist to transfer to/from | ||
| 709 | * @sg_len: number of entries in @scatterlist | ||
| 710 | * @direction: DMA direction | ||
| 711 | * @flags: DMAEngine flags | ||
| 712 | * | ||
| 713 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
| 714 | * DMA_SLAVE API, this gets the device-specific information from the | ||
| 715 | * chan->private variable. | ||
| 716 | */ | ||
| 717 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
| 718 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
| 719 | enum dma_data_direction direction, unsigned long flags) | ||
| 720 | { | ||
| 767 | /* | 721 | /* |
| 768 | * First is set, so all of the descriptors we allocated have been added | 722 | * This operation is not supported on the Freescale DMA controller |
| 769 | * to first->tx_list, INCLUDING "first" itself. Therefore we | ||
| 770 | * must traverse the list backwards freeing each descriptor in turn | ||
| 771 | * | 723 | * |
| 772 | * We're re-using variables for the loop, oh well | 724 | * However, we need to provide the function pointer to allow the |
| 725 | * device_control() method to work. | ||
| 773 | */ | 726 | */ |
| 774 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
| 775 | return NULL; | 727 | return NULL; |
| 776 | } | 728 | } |
| 777 | 729 | ||
| 778 | static int fsl_dma_device_control(struct dma_chan *dchan, | 730 | static int fsl_dma_device_control(struct dma_chan *dchan, |
| 779 | enum dma_ctrl_cmd cmd, unsigned long arg) | 731 | enum dma_ctrl_cmd cmd, unsigned long arg) |
| 780 | { | 732 | { |
| 733 | struct dma_slave_config *config; | ||
| 781 | struct fsldma_chan *chan; | 734 | struct fsldma_chan *chan; |
| 782 | unsigned long flags; | 735 | unsigned long flags; |
| 783 | 736 | int size; | |
| 784 | /* Only supports DMA_TERMINATE_ALL */ | ||
| 785 | if (cmd != DMA_TERMINATE_ALL) | ||
| 786 | return -ENXIO; | ||
| 787 | 737 | ||
| 788 | if (!dchan) | 738 | if (!dchan) |
| 789 | return -EINVAL; | 739 | return -EINVAL; |
| 790 | 740 | ||
| 791 | chan = to_fsl_chan(dchan); | 741 | chan = to_fsl_chan(dchan); |
| 792 | 742 | ||
| 793 | /* Halt the DMA engine */ | 743 | switch (cmd) { |
| 794 | dma_halt(chan); | 744 | case DMA_TERMINATE_ALL: |
| 745 | /* Halt the DMA engine */ | ||
| 746 | dma_halt(chan); | ||
| 795 | 747 | ||
| 796 | spin_lock_irqsave(&chan->desc_lock, flags); | 748 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 797 | 749 | ||
| 798 | /* Remove and free all of the descriptors in the LD queue */ | 750 | /* Remove and free all of the descriptors in the LD queue */ |
| 799 | fsldma_free_desc_list(chan, &chan->ld_pending); | 751 | fsldma_free_desc_list(chan, &chan->ld_pending); |
| 800 | fsldma_free_desc_list(chan, &chan->ld_running); | 752 | fsldma_free_desc_list(chan, &chan->ld_running); |
| 801 | 753 | ||
| 802 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 754 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 755 | return 0; | ||
| 756 | |||
| 757 | case DMA_SLAVE_CONFIG: | ||
| 758 | config = (struct dma_slave_config *)arg; | ||
| 759 | |||
| 760 | /* make sure the channel supports setting burst size */ | ||
| 761 | if (!chan->set_request_count) | ||
| 762 | return -ENXIO; | ||
| 763 | |||
| 764 | /* we set the controller burst size depending on direction */ | ||
| 765 | if (config->direction == DMA_TO_DEVICE) | ||
| 766 | size = config->dst_addr_width * config->dst_maxburst; | ||
| 767 | else | ||
| 768 | size = config->src_addr_width * config->src_maxburst; | ||
| 769 | |||
| 770 | chan->set_request_count(chan, size); | ||
| 771 | return 0; | ||
| 772 | |||
| 773 | case FSLDMA_EXTERNAL_START: | ||
| 774 | |||
| 775 | /* make sure the channel supports external start */ | ||
| 776 | if (!chan->toggle_ext_start) | ||
| 777 | return -ENXIO; | ||
| 778 | |||
| 779 | chan->toggle_ext_start(chan, arg); | ||
| 780 | return 0; | ||
| 781 | |||
| 782 | default: | ||
| 783 | return -ENXIO; | ||
| 784 | } | ||
| 803 | 785 | ||
| 804 | return 0; | 786 | return 0; |
| 805 | } | 787 | } |
| @@ -1327,11 +1309,13 @@ static int __devinit fsldma_of_probe(struct platform_device *op, | |||
| 1327 | 1309 | ||
| 1328 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1310 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
| 1329 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1311 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
| 1312 | dma_cap_set(DMA_SG, fdev->common.cap_mask); | ||
| 1330 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | 1313 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); |
| 1331 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1314 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
| 1332 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1315 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
| 1333 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | 1316 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
| 1334 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1317 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
| 1318 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; | ||
| 1335 | fdev->common.device_tx_status = fsl_tx_status; | 1319 | fdev->common.device_tx_status = fsl_tx_status; |
| 1336 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1320 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
| 1337 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | 1321 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; |
