aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ppc4xx
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2013-11-16 01:24:17 -0500
committerVinod Koul <vinod.koul@intel.com>2013-11-16 01:32:36 -0500
commitdf12a3178d340319b1955be6b973a4eb84aff754 (patch)
tree2b9c68f8a6c299d1e5a4026c60117b5c00d46008 /drivers/dma/ppc4xx
parent2f986ec6fa57a5dcf77f19f5f0d44b1f680a100f (diff)
parent82a1402eaee5dab1f3ab2d5aa4c316451374c5af (diff)
Merge commit 'dmaengine-3.13-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine changes from Dan 1/ Bartlomiej and Dan finalized a rework of the dma address unmap implementation. 2/ In the course of testing 1/ a collection of enhancements to dmatest fell out. Notably basic performance statistics, and fixed / enhanced test control through new module parameters 'run', 'wait', 'noverify', and 'verbose'. Thanks to Andriy and Linus for their review. 3/ Testing the raid related corner cases of 1/ triggered bugs in the recently added 16-source operation support in the ioatdma driver. 4/ Some minor fixes / cleanups to mv_xor and ioatdma. Conflicts: drivers/dma/dmatest.c Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ppc4xx')
-rw-r--r--drivers/dma/ppc4xx/adma.c270
1 files changed, 1 insertions, 269 deletions
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 60e02ae38b04..3005938ba47a 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -802,218 +802,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
802} 802}
803 803
804/** 804/**
805 * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
806 */
807static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
808 struct ppc440spe_adma_chan *chan, int src_idx)
809{
810 struct dma_cdb *dma_hw_desc;
811 struct xor_cb *xor_hw_desc;
812
813 switch (chan->device->id) {
814 case PPC440SPE_DMA0_ID:
815 case PPC440SPE_DMA1_ID:
816 dma_hw_desc = desc->hw_desc;
817 /* May have 0, 1, 2, or 3 sources */
818 switch (dma_hw_desc->opc) {
819 case DMA_CDB_OPC_NO_OP:
820 case DMA_CDB_OPC_DFILL128:
821 return 0;
822 case DMA_CDB_OPC_DCHECK128:
823 if (unlikely(src_idx)) {
824 printk(KERN_ERR "%s: try to get %d source for"
825 " DCHECK128\n", __func__, src_idx);
826 BUG();
827 }
828 return le32_to_cpu(dma_hw_desc->sg1l);
829 case DMA_CDB_OPC_MULTICAST:
830 case DMA_CDB_OPC_MV_SG1_SG2:
831 if (unlikely(src_idx > 2)) {
832 printk(KERN_ERR "%s: try to get %d source from"
833 " DMA descr\n", __func__, src_idx);
834 BUG();
835 }
836 if (src_idx) {
837 if (le32_to_cpu(dma_hw_desc->sg1u) &
838 DMA_CUED_XOR_WIN_MSK) {
839 u8 region;
840
841 if (src_idx == 1)
842 return le32_to_cpu(
843 dma_hw_desc->sg1l) +
844 desc->unmap_len;
845
846 region = (le32_to_cpu(
847 dma_hw_desc->sg1u)) >>
848 DMA_CUED_REGION_OFF;
849
850 region &= DMA_CUED_REGION_MSK;
851 switch (region) {
852 case DMA_RXOR123:
853 return le32_to_cpu(
854 dma_hw_desc->sg1l) +
855 (desc->unmap_len << 1);
856 case DMA_RXOR124:
857 return le32_to_cpu(
858 dma_hw_desc->sg1l) +
859 (desc->unmap_len * 3);
860 case DMA_RXOR125:
861 return le32_to_cpu(
862 dma_hw_desc->sg1l) +
863 (desc->unmap_len << 2);
864 default:
865 printk(KERN_ERR
866 "%s: try to"
867 " get src3 for region %02x"
868 "PPC440SPE_DESC_RXOR12?\n",
869 __func__, region);
870 BUG();
871 }
872 } else {
873 printk(KERN_ERR
874 "%s: try to get %d"
875 " source for non-cued descr\n",
876 __func__, src_idx);
877 BUG();
878 }
879 }
880 return le32_to_cpu(dma_hw_desc->sg1l);
881 default:
882 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
883 __func__, dma_hw_desc->opc);
884 BUG();
885 }
886 return le32_to_cpu(dma_hw_desc->sg1l);
887 case PPC440SPE_XOR_ID:
888 /* May have up to 16 sources */
889 xor_hw_desc = desc->hw_desc;
890 return xor_hw_desc->ops[src_idx].l;
891 }
892 return 0;
893}
894
895/**
896 * ppc440spe_desc_get_dest_addr - extract the destination address from the
897 * descriptor
898 */
899static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
900 struct ppc440spe_adma_chan *chan, int idx)
901{
902 struct dma_cdb *dma_hw_desc;
903 struct xor_cb *xor_hw_desc;
904
905 switch (chan->device->id) {
906 case PPC440SPE_DMA0_ID:
907 case PPC440SPE_DMA1_ID:
908 dma_hw_desc = desc->hw_desc;
909
910 if (likely(!idx))
911 return le32_to_cpu(dma_hw_desc->sg2l);
912 return le32_to_cpu(dma_hw_desc->sg3l);
913 case PPC440SPE_XOR_ID:
914 xor_hw_desc = desc->hw_desc;
915 return xor_hw_desc->cbtal;
916 }
917 return 0;
918}
919
920/**
921 * ppc440spe_desc_get_src_num - extract the number of source addresses from
922 * the descriptor
923 */
924static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
925 struct ppc440spe_adma_chan *chan)
926{
927 struct dma_cdb *dma_hw_desc;
928 struct xor_cb *xor_hw_desc;
929
930 switch (chan->device->id) {
931 case PPC440SPE_DMA0_ID:
932 case PPC440SPE_DMA1_ID:
933 dma_hw_desc = desc->hw_desc;
934
935 switch (dma_hw_desc->opc) {
936 case DMA_CDB_OPC_NO_OP:
937 case DMA_CDB_OPC_DFILL128:
938 return 0;
939 case DMA_CDB_OPC_DCHECK128:
940 return 1;
941 case DMA_CDB_OPC_MV_SG1_SG2:
942 case DMA_CDB_OPC_MULTICAST:
943 /*
944 * Only for RXOR operations we have more than
945 * one source
946 */
947 if (le32_to_cpu(dma_hw_desc->sg1u) &
948 DMA_CUED_XOR_WIN_MSK) {
949 /* RXOR op, there are 2 or 3 sources */
950 if (((le32_to_cpu(dma_hw_desc->sg1u) >>
951 DMA_CUED_REGION_OFF) &
952 DMA_CUED_REGION_MSK) == DMA_RXOR12) {
953 /* RXOR 1-2 */
954 return 2;
955 } else {
956 /* RXOR 1-2-3/1-2-4/1-2-5 */
957 return 3;
958 }
959 }
960 return 1;
961 default:
962 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
963 __func__, dma_hw_desc->opc);
964 BUG();
965 }
966 case PPC440SPE_XOR_ID:
967 /* up to 16 sources */
968 xor_hw_desc = desc->hw_desc;
969 return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
970 default:
971 BUG();
972 }
973 return 0;
974}
975
976/**
977 * ppc440spe_desc_get_dst_num - get the number of destination addresses in
978 * this descriptor
979 */
980static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
981 struct ppc440spe_adma_chan *chan)
982{
983 struct dma_cdb *dma_hw_desc;
984
985 switch (chan->device->id) {
986 case PPC440SPE_DMA0_ID:
987 case PPC440SPE_DMA1_ID:
988 /* May be 1 or 2 destinations */
989 dma_hw_desc = desc->hw_desc;
990 switch (dma_hw_desc->opc) {
991 case DMA_CDB_OPC_NO_OP:
992 case DMA_CDB_OPC_DCHECK128:
993 return 0;
994 case DMA_CDB_OPC_MV_SG1_SG2:
995 case DMA_CDB_OPC_DFILL128:
996 return 1;
997 case DMA_CDB_OPC_MULTICAST:
998 if (desc->dst_cnt == 2)
999 return 2;
1000 else
1001 return 1;
1002 default:
1003 printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
1004 __func__, dma_hw_desc->opc);
1005 BUG();
1006 }
1007 case PPC440SPE_XOR_ID:
1008 /* Always only 1 destination */
1009 return 1;
1010 default:
1011 BUG();
1012 }
1013 return 0;
1014}
1015
1016/**
1017 * ppc440spe_desc_get_link - get the address of the descriptor that 805 * ppc440spe_desc_get_link - get the address of the descriptor that
1018 * follows this one 806 * follows this one
1019 */ 807 */
@@ -1705,43 +1493,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
1705 } 1493 }
1706} 1494}
1707 1495
1708static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
1709 struct ppc440spe_adma_desc_slot *desc)
1710{
1711 u32 src_cnt, dst_cnt;
1712 dma_addr_t addr;
1713
1714 /*
1715 * get the number of sources & destination
1716 * included in this descriptor and unmap
1717 * them all
1718 */
1719 src_cnt = ppc440spe_desc_get_src_num(desc, chan);
1720 dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
1721
1722 /* unmap destinations */
1723 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1724 while (dst_cnt--) {
1725 addr = ppc440spe_desc_get_dest_addr(
1726 desc, chan, dst_cnt);
1727 dma_unmap_page(chan->device->dev,
1728 addr, desc->unmap_len,
1729 DMA_FROM_DEVICE);
1730 }
1731 }
1732
1733 /* unmap sources */
1734 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1735 while (src_cnt--) {
1736 addr = ppc440spe_desc_get_src_addr(
1737 desc, chan, src_cnt);
1738 dma_unmap_page(chan->device->dev,
1739 addr, desc->unmap_len,
1740 DMA_TO_DEVICE);
1741 }
1742 }
1743}
1744
1745/** 1496/**
1746 * ppc440spe_adma_run_tx_complete_actions - call functions to be called 1497 * ppc440spe_adma_run_tx_complete_actions - call functions to be called
1747 * upon completion 1498 * upon completion
@@ -1765,26 +1516,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1765 desc->async_tx.callback( 1516 desc->async_tx.callback(
1766 desc->async_tx.callback_param); 1517 desc->async_tx.callback_param);
1767 1518
1768 /* unmap dma addresses 1519 dma_descriptor_unmap(&desc->async_tx);
1769 * (unmap_single vs unmap_page?)
1770 *
1771 * actually, ppc's dma_unmap_page() functions are empty, so
1772 * the following code is just for the sake of completeness
1773 */
1774 if (chan && chan->needs_unmap && desc->group_head &&
1775 desc->unmap_len) {
1776 struct ppc440spe_adma_desc_slot *unmap =
1777 desc->group_head;
1778 /* assume 1 slot per op always */
1779 u32 slot_count = unmap->slot_cnt;
1780
1781 /* Run through the group list and unmap addresses */
1782 for (i = 0; i < slot_count; i++) {
1783 BUG_ON(!unmap);
1784 ppc440spe_adma_unmap(chan, unmap);
1785 unmap = unmap->hw_next;
1786 }
1787 }
1788 } 1520 }
1789 1521
1790 /* run dependent operations */ 1522 /* run dependent operations */