diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 16:20:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-20 16:20:24 -0500 |
commit | e6d69a60b77a6ea8d5f9d41765c7571bb8d45531 (patch) | |
tree | 4ea3fe7c49a864da2ce7ffb51a703661826dc15d /drivers/dma/ppc4xx | |
parent | 5a1efc6e68a095917277459091fafba6a6baef17 (diff) | |
parent | df12a3178d340319b1955be6b973a4eb84aff754 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine changes from Vinod Koul:
"This brings for slave dmaengine:
- Change dma notification flag to DMA_COMPLETE from DMA_SUCCESS as
dmaengine can only transfer and not verify validaty of dma
transfers
- Bunch of fixes across drivers:
- cppi41 driver fixes from Daniel
- 8 channel freescale dma engine support and updated bindings from
Hongbo
- msx-dma fixes and cleanup by Markus
- DMAengine updates from Dan:
- Bartlomiej and Dan finalized a rework of the dma address unmap
implementation.
- In the course of testing 1/ a collection of enhancements to
dmatest fell out. Notably basic performance statistics, and
fixed / enhanced test control through new module parameters
'run', 'wait', 'noverify', and 'verbose'. Thanks to Andriy and
Linus [Walleij] for their review.
- Testing the raid related corner cases of 1/ triggered bugs in
the recently added 16-source operation support in the ioatdma
driver.
- Some minor fixes / cleanups to mv_xor and ioatdma"
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (99 commits)
dma: mv_xor: Fix mis-usage of mmio 'base' and 'high_base' registers
dma: mv_xor: Remove unneeded NULL address check
ioat: fix ioat3_irq_reinit
ioat: kill msix_single_vector support
raid6test: add new corner case for ioatdma driver
ioatdma: clean up sed pool kmem_cache
ioatdma: fix selection of 16 vs 8 source path
ioatdma: fix sed pool selection
ioatdma: Fix bug in selftest after removal of DMA_MEMSET.
dmatest: verbose mode
dmatest: convert to dmaengine_unmap_data
dmatest: add a 'wait' parameter
dmatest: add basic performance metrics
dmatest: add support for skipping verification and random data setup
dmatest: use pseudo random numbers
dmatest: support xor-only, or pq-only channels in tests
dmatest: restore ability to start test at module load and init
dmatest: cleanup redundant "dmatest: " prefixes
dmatest: replace stored results mechanism, with uniform messages
Revert "dmatest: append verify result to results"
...
Diffstat (limited to 'drivers/dma/ppc4xx')
-rw-r--r-- | drivers/dma/ppc4xx/adma.c | 272 |
1 files changed, 2 insertions, 270 deletions
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index e24b5ef486b5..8da48c6b2a38 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -804,218 +804,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, | |||
804 | } | 804 | } |
805 | 805 | ||
806 | /** | 806 | /** |
807 | * ppc440spe_desc_get_src_addr - extract the source address from the descriptor | ||
808 | */ | ||
809 | static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, | ||
810 | struct ppc440spe_adma_chan *chan, int src_idx) | ||
811 | { | ||
812 | struct dma_cdb *dma_hw_desc; | ||
813 | struct xor_cb *xor_hw_desc; | ||
814 | |||
815 | switch (chan->device->id) { | ||
816 | case PPC440SPE_DMA0_ID: | ||
817 | case PPC440SPE_DMA1_ID: | ||
818 | dma_hw_desc = desc->hw_desc; | ||
819 | /* May have 0, 1, 2, or 3 sources */ | ||
820 | switch (dma_hw_desc->opc) { | ||
821 | case DMA_CDB_OPC_NO_OP: | ||
822 | case DMA_CDB_OPC_DFILL128: | ||
823 | return 0; | ||
824 | case DMA_CDB_OPC_DCHECK128: | ||
825 | if (unlikely(src_idx)) { | ||
826 | printk(KERN_ERR "%s: try to get %d source for" | ||
827 | " DCHECK128\n", __func__, src_idx); | ||
828 | BUG(); | ||
829 | } | ||
830 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
831 | case DMA_CDB_OPC_MULTICAST: | ||
832 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
833 | if (unlikely(src_idx > 2)) { | ||
834 | printk(KERN_ERR "%s: try to get %d source from" | ||
835 | " DMA descr\n", __func__, src_idx); | ||
836 | BUG(); | ||
837 | } | ||
838 | if (src_idx) { | ||
839 | if (le32_to_cpu(dma_hw_desc->sg1u) & | ||
840 | DMA_CUED_XOR_WIN_MSK) { | ||
841 | u8 region; | ||
842 | |||
843 | if (src_idx == 1) | ||
844 | return le32_to_cpu( | ||
845 | dma_hw_desc->sg1l) + | ||
846 | desc->unmap_len; | ||
847 | |||
848 | region = (le32_to_cpu( | ||
849 | dma_hw_desc->sg1u)) >> | ||
850 | DMA_CUED_REGION_OFF; | ||
851 | |||
852 | region &= DMA_CUED_REGION_MSK; | ||
853 | switch (region) { | ||
854 | case DMA_RXOR123: | ||
855 | return le32_to_cpu( | ||
856 | dma_hw_desc->sg1l) + | ||
857 | (desc->unmap_len << 1); | ||
858 | case DMA_RXOR124: | ||
859 | return le32_to_cpu( | ||
860 | dma_hw_desc->sg1l) + | ||
861 | (desc->unmap_len * 3); | ||
862 | case DMA_RXOR125: | ||
863 | return le32_to_cpu( | ||
864 | dma_hw_desc->sg1l) + | ||
865 | (desc->unmap_len << 2); | ||
866 | default: | ||
867 | printk(KERN_ERR | ||
868 | "%s: try to" | ||
869 | " get src3 for region %02x" | ||
870 | "PPC440SPE_DESC_RXOR12?\n", | ||
871 | __func__, region); | ||
872 | BUG(); | ||
873 | } | ||
874 | } else { | ||
875 | printk(KERN_ERR | ||
876 | "%s: try to get %d" | ||
877 | " source for non-cued descr\n", | ||
878 | __func__, src_idx); | ||
879 | BUG(); | ||
880 | } | ||
881 | } | ||
882 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
883 | default: | ||
884 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
885 | __func__, dma_hw_desc->opc); | ||
886 | BUG(); | ||
887 | } | ||
888 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
889 | case PPC440SPE_XOR_ID: | ||
890 | /* May have up to 16 sources */ | ||
891 | xor_hw_desc = desc->hw_desc; | ||
892 | return xor_hw_desc->ops[src_idx].l; | ||
893 | } | ||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | /** | ||
898 | * ppc440spe_desc_get_dest_addr - extract the destination address from the | ||
899 | * descriptor | ||
900 | */ | ||
901 | static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, | ||
902 | struct ppc440spe_adma_chan *chan, int idx) | ||
903 | { | ||
904 | struct dma_cdb *dma_hw_desc; | ||
905 | struct xor_cb *xor_hw_desc; | ||
906 | |||
907 | switch (chan->device->id) { | ||
908 | case PPC440SPE_DMA0_ID: | ||
909 | case PPC440SPE_DMA1_ID: | ||
910 | dma_hw_desc = desc->hw_desc; | ||
911 | |||
912 | if (likely(!idx)) | ||
913 | return le32_to_cpu(dma_hw_desc->sg2l); | ||
914 | return le32_to_cpu(dma_hw_desc->sg3l); | ||
915 | case PPC440SPE_XOR_ID: | ||
916 | xor_hw_desc = desc->hw_desc; | ||
917 | return xor_hw_desc->cbtal; | ||
918 | } | ||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | /** | ||
923 | * ppc440spe_desc_get_src_num - extract the number of source addresses from | ||
924 | * the descriptor | ||
925 | */ | ||
926 | static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, | ||
927 | struct ppc440spe_adma_chan *chan) | ||
928 | { | ||
929 | struct dma_cdb *dma_hw_desc; | ||
930 | struct xor_cb *xor_hw_desc; | ||
931 | |||
932 | switch (chan->device->id) { | ||
933 | case PPC440SPE_DMA0_ID: | ||
934 | case PPC440SPE_DMA1_ID: | ||
935 | dma_hw_desc = desc->hw_desc; | ||
936 | |||
937 | switch (dma_hw_desc->opc) { | ||
938 | case DMA_CDB_OPC_NO_OP: | ||
939 | case DMA_CDB_OPC_DFILL128: | ||
940 | return 0; | ||
941 | case DMA_CDB_OPC_DCHECK128: | ||
942 | return 1; | ||
943 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
944 | case DMA_CDB_OPC_MULTICAST: | ||
945 | /* | ||
946 | * Only for RXOR operations we have more than | ||
947 | * one source | ||
948 | */ | ||
949 | if (le32_to_cpu(dma_hw_desc->sg1u) & | ||
950 | DMA_CUED_XOR_WIN_MSK) { | ||
951 | /* RXOR op, there are 2 or 3 sources */ | ||
952 | if (((le32_to_cpu(dma_hw_desc->sg1u) >> | ||
953 | DMA_CUED_REGION_OFF) & | ||
954 | DMA_CUED_REGION_MSK) == DMA_RXOR12) { | ||
955 | /* RXOR 1-2 */ | ||
956 | return 2; | ||
957 | } else { | ||
958 | /* RXOR 1-2-3/1-2-4/1-2-5 */ | ||
959 | return 3; | ||
960 | } | ||
961 | } | ||
962 | return 1; | ||
963 | default: | ||
964 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
965 | __func__, dma_hw_desc->opc); | ||
966 | BUG(); | ||
967 | } | ||
968 | case PPC440SPE_XOR_ID: | ||
969 | /* up to 16 sources */ | ||
970 | xor_hw_desc = desc->hw_desc; | ||
971 | return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; | ||
972 | default: | ||
973 | BUG(); | ||
974 | } | ||
975 | return 0; | ||
976 | } | ||
977 | |||
978 | /** | ||
979 | * ppc440spe_desc_get_dst_num - get the number of destination addresses in | ||
980 | * this descriptor | ||
981 | */ | ||
982 | static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, | ||
983 | struct ppc440spe_adma_chan *chan) | ||
984 | { | ||
985 | struct dma_cdb *dma_hw_desc; | ||
986 | |||
987 | switch (chan->device->id) { | ||
988 | case PPC440SPE_DMA0_ID: | ||
989 | case PPC440SPE_DMA1_ID: | ||
990 | /* May be 1 or 2 destinations */ | ||
991 | dma_hw_desc = desc->hw_desc; | ||
992 | switch (dma_hw_desc->opc) { | ||
993 | case DMA_CDB_OPC_NO_OP: | ||
994 | case DMA_CDB_OPC_DCHECK128: | ||
995 | return 0; | ||
996 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
997 | case DMA_CDB_OPC_DFILL128: | ||
998 | return 1; | ||
999 | case DMA_CDB_OPC_MULTICAST: | ||
1000 | if (desc->dst_cnt == 2) | ||
1001 | return 2; | ||
1002 | else | ||
1003 | return 1; | ||
1004 | default: | ||
1005 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
1006 | __func__, dma_hw_desc->opc); | ||
1007 | BUG(); | ||
1008 | } | ||
1009 | case PPC440SPE_XOR_ID: | ||
1010 | /* Always only 1 destination */ | ||
1011 | return 1; | ||
1012 | default: | ||
1013 | BUG(); | ||
1014 | } | ||
1015 | return 0; | ||
1016 | } | ||
1017 | |||
1018 | /** | ||
1019 | * ppc440spe_desc_get_link - get the address of the descriptor that | 807 | * ppc440spe_desc_get_link - get the address of the descriptor that |
1020 | * follows this one | 808 | * follows this one |
1021 | */ | 809 | */ |
@@ -1707,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, | |||
1707 | } | 1495 | } |
1708 | } | 1496 | } |
1709 | 1497 | ||
1710 | static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, | ||
1711 | struct ppc440spe_adma_desc_slot *desc) | ||
1712 | { | ||
1713 | u32 src_cnt, dst_cnt; | ||
1714 | dma_addr_t addr; | ||
1715 | |||
1716 | /* | ||
1717 | * get the number of sources & destination | ||
1718 | * included in this descriptor and unmap | ||
1719 | * them all | ||
1720 | */ | ||
1721 | src_cnt = ppc440spe_desc_get_src_num(desc, chan); | ||
1722 | dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); | ||
1723 | |||
1724 | /* unmap destinations */ | ||
1725 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1726 | while (dst_cnt--) { | ||
1727 | addr = ppc440spe_desc_get_dest_addr( | ||
1728 | desc, chan, dst_cnt); | ||
1729 | dma_unmap_page(chan->device->dev, | ||
1730 | addr, desc->unmap_len, | ||
1731 | DMA_FROM_DEVICE); | ||
1732 | } | ||
1733 | } | ||
1734 | |||
1735 | /* unmap sources */ | ||
1736 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1737 | while (src_cnt--) { | ||
1738 | addr = ppc440spe_desc_get_src_addr( | ||
1739 | desc, chan, src_cnt); | ||
1740 | dma_unmap_page(chan->device->dev, | ||
1741 | addr, desc->unmap_len, | ||
1742 | DMA_TO_DEVICE); | ||
1743 | } | ||
1744 | } | ||
1745 | } | ||
1746 | |||
1747 | /** | 1498 | /** |
1748 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called | 1499 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called |
1749 | * upon completion | 1500 | * upon completion |
@@ -1767,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | |||
1767 | desc->async_tx.callback( | 1518 | desc->async_tx.callback( |
1768 | desc->async_tx.callback_param); | 1519 | desc->async_tx.callback_param); |
1769 | 1520 | ||
1770 | /* unmap dma addresses | 1521 | dma_descriptor_unmap(&desc->async_tx); |
1771 | * (unmap_single vs unmap_page?) | ||
1772 | * | ||
1773 | * actually, ppc's dma_unmap_page() functions are empty, so | ||
1774 | * the following code is just for the sake of completeness | ||
1775 | */ | ||
1776 | if (chan && chan->needs_unmap && desc->group_head && | ||
1777 | desc->unmap_len) { | ||
1778 | struct ppc440spe_adma_desc_slot *unmap = | ||
1779 | desc->group_head; | ||
1780 | /* assume 1 slot per op always */ | ||
1781 | u32 slot_count = unmap->slot_cnt; | ||
1782 | |||
1783 | /* Run through the group list and unmap addresses */ | ||
1784 | for (i = 0; i < slot_count; i++) { | ||
1785 | BUG_ON(!unmap); | ||
1786 | ppc440spe_adma_unmap(chan, unmap); | ||
1787 | unmap = unmap->hw_next; | ||
1788 | } | ||
1789 | } | ||
1790 | } | 1522 | } |
1791 | 1523 | ||
1792 | /* run dependent operations */ | 1524 | /* run dependent operations */ |
@@ -3893,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, | |||
3893 | 3625 | ||
3894 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | 3626 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); |
3895 | ret = dma_cookie_status(chan, cookie, txstate); | 3627 | ret = dma_cookie_status(chan, cookie, txstate); |
3896 | if (ret == DMA_SUCCESS) | 3628 | if (ret == DMA_COMPLETE) |
3897 | return ret; | 3629 | return ret; |
3898 | 3630 | ||
3899 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); | 3631 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); |