diff options
author | Nicolas Ferre <nicolas.ferre@atmel.com> | 2014-11-19 06:11:30 -0500 |
---|---|---|
committer | Nicolas Ferre <nicolas.ferre@atmel.com> | 2014-11-19 06:11:30 -0500 |
commit | 51f46e5bc326972f623cad7ac9dadc3cf8159203 (patch) | |
tree | 7a43ce37027d2cb8ecce2eb43b4768a62ff6418c | |
parent | c080d13c1a09d3afccb594e9b2be28940d4b0ef5 (diff) | |
parent | fef4cbf2ab830fcd695d892927386ad9ccc46339 (diff) |
Merge branch 'topic/at_xdmac' of git://git.infradead.org/users/vkoul/slave-dma into at91-3.19-dt2
-rw-r--r-- | Documentation/devicetree/bindings/dma/atmel-xdma.txt | 54 | ||||
-rw-r--r-- | MAINTAINERS | 7 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 7 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/at_xdmac.c | 1524 | ||||
-rw-r--r-- | include/dt-bindings/dma/at91.h | 25 |
6 files changed, 1618 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/dma/atmel-xdma.txt b/Documentation/devicetree/bindings/dma/atmel-xdma.txt new file mode 100644 index 000000000000..0eb2b3207e08 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/atmel-xdma.txt | |||
@@ -0,0 +1,54 @@ | |||
1 | * Atmel Extensible Direct Memory Access Controller (XDMAC) | ||
2 | |||
3 | * XDMA Controller | ||
4 | Required properties: | ||
5 | - compatible: Should be "atmel,<chip>-dma". | ||
6 | <chip> compatible description: | ||
7 | - sama5d4: first SoC adding the XDMAC | ||
8 | - reg: Should contain DMA registers location and length. | ||
9 | - interrupts: Should contain DMA interrupt. | ||
10 | - #dma-cells: Must be <1>, used to represent the number of integer cells in | ||
11 | the dmas property of client devices. | ||
12 | - The 1st cell specifies the channel configuration register: | ||
13 | - bit 13: SIF, source interface identifier, used to get the memory | ||
14 | interface identifier, | ||
15 | - bit 14: DIF, destination interface identifier, used to get the peripheral | ||
16 | interface identifier, | ||
17 | - bit 30-24: PERID, peripheral identifier. | ||
18 | |||
19 | Example: | ||
20 | |||
21 | dma1: dma-controller@f0004000 { | ||
22 | compatible = "atmel,sama5d4-dma"; | ||
23 | reg = <0xf0004000 0x200>; | ||
24 | interrupts = <50 4 0>; | ||
25 | #dma-cells = <1>; | ||
26 | }; | ||
27 | |||
28 | |||
29 | * DMA clients | ||
30 | DMA clients connected to the Atmel XDMA controller must use the format | ||
31 | described in the dma.txt file, using a one-cell specifier for each channel. | ||
32 | The two cells in order are: | ||
33 | 1. A phandle pointing to the DMA controller. | ||
34 | 2. Channel configuration register. Configurable fields are: | ||
35 | - bit 13: SIF, source interface identifier, used to get the memory | ||
36 | interface identifier, | ||
37 | - bit 14: DIF, destination interface identifier, used to get the peripheral | ||
38 | interface identifier, | ||
39 | - bit 30-24: PERID, peripheral identifier. | ||
40 | |||
41 | Example: | ||
42 | |||
43 | i2c2: i2c@f8024000 { | ||
44 | compatible = "atmel,at91sam9x5-i2c"; | ||
45 | reg = <0xf8024000 0x4000>; | ||
46 | interrupts = <34 4 6>; | ||
47 | dmas = <&dma1 | ||
48 | (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ||
49 | | AT91_XDMAC_DT_PERID(6))>, | ||
50 | <&dma1 | ||
51 | (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) | ||
52 | | AT91_XDMAC_DT_PERID(7))>; | ||
53 | dma-names = "tx", "rx"; | ||
54 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 3c6427190be2..fd3771cba0c9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1713,6 +1713,13 @@ F: drivers/dma/at_hdmac.c | |||
1713 | F: drivers/dma/at_hdmac_regs.h | 1713 | F: drivers/dma/at_hdmac_regs.h |
1714 | F: include/linux/platform_data/dma-atmel.h | 1714 | F: include/linux/platform_data/dma-atmel.h |
1715 | 1715 | ||
1716 | ATMEL XDMA DRIVER | ||
1717 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | ||
1718 | L: linux-arm-kernel@lists.infradead.org | ||
1719 | L: dmaengine@vger.kernel.org | ||
1720 | S: Supported | ||
1721 | F: drivers/dma/at_xdmac.c | ||
1722 | |||
1716 | ATMEL I2C DRIVER | 1723 | ATMEL I2C DRIVER |
1717 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | 1724 | M: Ludovic Desroches <ludovic.desroches@atmel.com> |
1718 | L: linux-i2c@vger.kernel.org | 1725 | L: linux-i2c@vger.kernel.org |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index de469821bc1b..8b6fb0f11007 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -107,6 +107,13 @@ config AT_HDMAC | |||
107 | help | 107 | help |
108 | Support the Atmel AHB DMA controller. | 108 | Support the Atmel AHB DMA controller. |
109 | 109 | ||
110 | config AT_XDMAC | ||
111 | tristate "Atmel XDMA support" | ||
112 | depends on ARCH_AT91 | ||
113 | select DMA_ENGINE | ||
114 | help | ||
115 | Support the Atmel XDMA controller. | ||
116 | |||
110 | config FSL_DMA | 117 | config FSL_DMA |
111 | tristate "Freescale Elo series DMA support" | 118 | tristate "Freescale Elo series DMA support" |
112 | depends on FSL_SOC | 119 | depends on FSL_SOC |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index cb626c179911..2022b5451377 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -16,6 +16,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ | |||
16 | obj-$(CONFIG_MV_XOR) += mv_xor.o | 16 | obj-$(CONFIG_MV_XOR) += mv_xor.o |
17 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ | 17 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ |
18 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 18 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
19 | obj-$(CONFIG_AT_XDMAC) += at_xdmac.o | ||
19 | obj-$(CONFIG_MX3_IPU) += ipu/ | 20 | obj-$(CONFIG_MX3_IPU) += ipu/ |
20 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 21 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
21 | obj-$(CONFIG_SH_DMAE_BASE) += sh/ | 22 | obj-$(CONFIG_SH_DMAE_BASE) += sh/ |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c new file mode 100644 index 000000000000..b60d77a22df6 --- /dev/null +++ b/drivers/dma/at_xdmac.c | |||
@@ -0,0 +1,1524 @@ | |||
1 | /* | ||
2 | * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems) | ||
3 | * | ||
4 | * Copyright (C) 2014 Atmel Corporation | ||
5 | * | ||
6 | * Author: Ludovic Desroches <ludovic.desroches@atmel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License version 2 as published by | ||
10 | * the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <asm/barrier.h> | ||
22 | #include <dt-bindings/dma/at91.h> | ||
23 | #include <linux/clk.h> | ||
24 | #include <linux/dmaengine.h> | ||
25 | #include <linux/dmapool.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/irq.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/of_dma.h> | ||
31 | #include <linux/of_platform.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/pm.h> | ||
34 | |||
35 | #include "dmaengine.h" | ||
36 | |||
37 | /* Global registers */ | ||
38 | #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */ | ||
39 | #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */ | ||
40 | #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */ | ||
41 | #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */ | ||
42 | #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */ | ||
43 | #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */ | ||
44 | #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */ | ||
45 | #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */ | ||
46 | #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */ | ||
47 | #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */ | ||
48 | #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */ | ||
49 | #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */ | ||
50 | #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */ | ||
51 | #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */ | ||
52 | #define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */ | ||
53 | #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */ | ||
54 | #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */ | ||
55 | #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */ | ||
56 | #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */ | ||
57 | #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */ | ||
58 | #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */ | ||
59 | |||
60 | /* Channel relative registers offsets */ | ||
61 | #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */ | ||
62 | #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */ | ||
63 | #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */ | ||
64 | #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */ | ||
65 | #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */ | ||
66 | #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */ | ||
67 | #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */ | ||
68 | #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */ | ||
69 | #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */ | ||
70 | #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */ | ||
71 | #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */ | ||
72 | #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */ | ||
73 | #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */ | ||
74 | #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */ | ||
75 | #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */ | ||
76 | #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */ | ||
77 | #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */ | ||
78 | #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */ | ||
79 | #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */ | ||
80 | #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */ | ||
81 | #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */ | ||
82 | #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */ | ||
83 | #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */ | ||
84 | #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */ | ||
85 | #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */ | ||
86 | #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */ | ||
87 | #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */ | ||
88 | #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */ | ||
89 | #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */ | ||
90 | #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */ | ||
91 | #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */ | ||
92 | #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */ | ||
93 | #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */ | ||
94 | #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */ | ||
95 | #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */ | ||
96 | #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */ | ||
97 | #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */ | ||
98 | #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */ | ||
99 | #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ | ||
100 | #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ | ||
101 | #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ | ||
102 | #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ | ||
103 | #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ | ||
104 | #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ | ||
105 | #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */ | ||
106 | #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */ | ||
107 | #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */ | ||
108 | #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */ | ||
109 | #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */ | ||
110 | #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */ | ||
111 | #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */ | ||
112 | #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1) | ||
113 | #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1) | ||
114 | #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1) | ||
115 | #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1) | ||
116 | #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1) | ||
117 | #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */ | ||
118 | #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4) | ||
119 | #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4) | ||
120 | #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */ | ||
121 | #define AT_XDMAC_CC_PROT_SEC (0x0 << 5) | ||
122 | #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5) | ||
123 | #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */ | ||
124 | #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6) | ||
125 | #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6) | ||
126 | #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */ | ||
127 | #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7) | ||
128 | #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7) | ||
129 | #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */ | ||
130 | #define AT_XDMAC_CC_DWIDTH_OFFSET 11 | ||
131 | #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET) | ||
132 | #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */ | ||
133 | #define AT_XDMAC_CC_DWIDTH_BYTE 0x0 | ||
134 | #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1 | ||
135 | #define AT_XDMAC_CC_DWIDTH_WORD 0x2 | ||
136 | #define AT_XDMAC_CC_DWIDTH_DWORD 0x3 | ||
137 | #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */ | ||
138 | #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */ | ||
139 | #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */ | ||
140 | #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16) | ||
141 | #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16) | ||
142 | #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16) | ||
143 | #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16) | ||
144 | #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */ | ||
145 | #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18) | ||
146 | #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18) | ||
147 | #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18) | ||
148 | #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18) | ||
149 | #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */ | ||
150 | #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21) | ||
151 | #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21) | ||
152 | #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */ | ||
153 | #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22) | ||
154 | #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22) | ||
155 | #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ | ||
156 | #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) | ||
157 | #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) | ||
158 | #define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ | ||
159 | #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ | ||
160 | #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ | ||
161 | #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ | ||
162 | |||
163 | #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */ | ||
164 | |||
165 | /* Microblock control members */ | ||
166 | #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */ | ||
167 | #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */ | ||
168 | #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */ | ||
169 | #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */ | ||
170 | #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */ | ||
171 | #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */ | ||
172 | #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */ | ||
173 | #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ | ||
174 | |||
175 | #define AT_XDMAC_MAX_CHAN 0x20 | ||
176 | |||
177 | enum atc_status { | ||
178 | AT_XDMAC_CHAN_IS_CYCLIC = 0, | ||
179 | AT_XDMAC_CHAN_IS_PAUSED, | ||
180 | }; | ||
181 | |||
182 | /* ----- Channels ----- */ | ||
183 | struct at_xdmac_chan { | ||
184 | struct dma_chan chan; | ||
185 | void __iomem *ch_regs; | ||
186 | u32 mask; /* Channel Mask */ | ||
187 | u32 cfg[3]; /* Channel Configuration Register */ | ||
188 | #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */ | ||
189 | #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */ | ||
190 | #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */ | ||
191 | u8 perid; /* Peripheral ID */ | ||
192 | u8 perif; /* Peripheral Interface */ | ||
193 | u8 memif; /* Memory Interface */ | ||
194 | u32 per_src_addr; | ||
195 | u32 per_dst_addr; | ||
196 | u32 save_cim; | ||
197 | u32 save_cnda; | ||
198 | u32 save_cndc; | ||
199 | unsigned long status; | ||
200 | struct tasklet_struct tasklet; | ||
201 | |||
202 | spinlock_t lock; | ||
203 | |||
204 | struct list_head xfers_list; | ||
205 | struct list_head free_descs_list; | ||
206 | }; | ||
207 | |||
208 | |||
209 | /* ----- Controller ----- */ | ||
210 | struct at_xdmac { | ||
211 | struct dma_device dma; | ||
212 | void __iomem *regs; | ||
213 | int irq; | ||
214 | struct clk *clk; | ||
215 | u32 save_gim; | ||
216 | u32 save_gs; | ||
217 | struct dma_pool *at_xdmac_desc_pool; | ||
218 | struct at_xdmac_chan chan[0]; | ||
219 | }; | ||
220 | |||
221 | |||
222 | /* ----- Descriptors ----- */ | ||
223 | |||
224 | /* Linked List Descriptor */ | ||
225 | struct at_xdmac_lld { | ||
226 | dma_addr_t mbr_nda; /* Next Descriptor Member */ | ||
227 | u32 mbr_ubc; /* Microblock Control Member */ | ||
228 | dma_addr_t mbr_sa; /* Source Address Member */ | ||
229 | dma_addr_t mbr_da; /* Destination Address Member */ | ||
230 | u32 mbr_cfg; /* Configuration Register */ | ||
231 | }; | ||
232 | |||
233 | |||
234 | struct at_xdmac_desc { | ||
235 | struct at_xdmac_lld lld; | ||
236 | enum dma_transfer_direction direction; | ||
237 | struct dma_async_tx_descriptor tx_dma_desc; | ||
238 | struct list_head desc_node; | ||
239 | /* Following members are only used by the first descriptor */ | ||
240 | bool active_xfer; | ||
241 | unsigned int xfer_size; | ||
242 | struct list_head descs_list; | ||
243 | struct list_head xfer_node; | ||
244 | }; | ||
245 | |||
246 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) | ||
247 | { | ||
248 | return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40); | ||
249 | } | ||
250 | |||
251 | #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg)) | ||
252 | #define at_xdmac_write(atxdmac, reg, value) \ | ||
253 | writel_relaxed((value), (atxdmac)->regs + (reg)) | ||
254 | |||
255 | #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg)) | ||
256 | #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg)) | ||
257 | |||
258 | static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan) | ||
259 | { | ||
260 | return container_of(dchan, struct at_xdmac_chan, chan); | ||
261 | } | ||
262 | |||
263 | static struct device *chan2dev(struct dma_chan *chan) | ||
264 | { | ||
265 | return &chan->dev->device; | ||
266 | } | ||
267 | |||
268 | static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev) | ||
269 | { | ||
270 | return container_of(ddev, struct at_xdmac, dma); | ||
271 | } | ||
272 | |||
273 | static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd) | ||
274 | { | ||
275 | return container_of(txd, struct at_xdmac_desc, tx_dma_desc); | ||
276 | } | ||
277 | |||
278 | static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan) | ||
279 | { | ||
280 | return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); | ||
281 | } | ||
282 | |||
283 | static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan) | ||
284 | { | ||
285 | return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | ||
286 | } | ||
287 | |||
288 | static inline int at_xdmac_csize(u32 maxburst) | ||
289 | { | ||
290 | int csize; | ||
291 | |||
292 | csize = ffs(maxburst) - 1; | ||
293 | if (csize > 4) | ||
294 | csize = -EINVAL; | ||
295 | |||
296 | return csize; | ||
297 | }; | ||
298 | |||
299 | static inline u8 at_xdmac_get_dwidth(u32 cfg) | ||
300 | { | ||
301 | return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET; | ||
302 | }; | ||
303 | |||
304 | static unsigned int init_nr_desc_per_channel = 64; | ||
305 | module_param(init_nr_desc_per_channel, uint, 0644); | ||
306 | MODULE_PARM_DESC(init_nr_desc_per_channel, | ||
307 | "initial descriptors per channel (default: 64)"); | ||
308 | |||
309 | |||
310 | static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan) | ||
311 | { | ||
312 | return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; | ||
313 | } | ||
314 | |||
315 | static void at_xdmac_off(struct at_xdmac *atxdmac) | ||
316 | { | ||
317 | at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); | ||
318 | |||
319 | /* Wait that all chans are disabled. */ | ||
320 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS)) | ||
321 | cpu_relax(); | ||
322 | |||
323 | at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); | ||
324 | } | ||
325 | |||
326 | /* Call with lock hold. */ | ||
327 | static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, | ||
328 | struct at_xdmac_desc *first) | ||
329 | { | ||
330 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | ||
331 | u32 reg; | ||
332 | |||
333 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); | ||
334 | |||
335 | if (at_xdmac_chan_is_enabled(atchan)) | ||
336 | return; | ||
337 | |||
338 | /* Set transfer as active to not try to start it again. */ | ||
339 | first->active_xfer = true; | ||
340 | |||
341 | /* Tell xdmac where to get the first descriptor. */ | ||
342 | reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys) | ||
343 | | AT_XDMAC_CNDA_NDAIF(atchan->memif); | ||
344 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg); | ||
345 | |||
346 | /* | ||
347 | * When doing memory to memory transfer we need to use the next | ||
348 | * descriptor view 2 since some fields of the configuration register | ||
349 | * depend on transfer size and src/dest addresses. | ||
350 | */ | ||
351 | if (is_slave_direction(first->direction)) { | ||
352 | reg = AT_XDMAC_CNDC_NDVIEW_NDV1; | ||
353 | if (first->direction == DMA_MEM_TO_DEV) | ||
354 | atchan->cfg[AT_XDMAC_CUR_CFG] = | ||
355 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; | ||
356 | else | ||
357 | atchan->cfg[AT_XDMAC_CUR_CFG] = | ||
358 | atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; | ||
359 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, | ||
360 | atchan->cfg[AT_XDMAC_CUR_CFG]); | ||
361 | } else { | ||
362 | /* | ||
363 | * No need to write AT_XDMAC_CC reg, it will be done when the | ||
364 | * descriptor is fecthed. | ||
365 | */ | ||
366 | reg = AT_XDMAC_CNDC_NDVIEW_NDV2; | ||
367 | } | ||
368 | |||
369 | reg |= AT_XDMAC_CNDC_NDDUP | ||
370 | | AT_XDMAC_CNDC_NDSUP | ||
371 | | AT_XDMAC_CNDC_NDE; | ||
372 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg); | ||
373 | |||
374 | dev_vdbg(chan2dev(&atchan->chan), | ||
375 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | ||
376 | __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC), | ||
377 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | ||
378 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | ||
379 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | ||
380 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | ||
381 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | ||
382 | |||
383 | at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff); | ||
384 | reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE; | ||
385 | /* | ||
386 | * There is no end of list when doing cyclic dma, we need to get | ||
387 | * an interrupt after each periods. | ||
388 | */ | ||
389 | if (at_xdmac_chan_is_cyclic(atchan)) | ||
390 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, | ||
391 | reg | AT_XDMAC_CIE_BIE); | ||
392 | else | ||
393 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, | ||
394 | reg | AT_XDMAC_CIE_LIE); | ||
395 | at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask); | ||
396 | dev_vdbg(chan2dev(&atchan->chan), | ||
397 | "%s: enable channel (0x%08x)\n", __func__, atchan->mask); | ||
398 | wmb(); | ||
399 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); | ||
400 | |||
401 | dev_vdbg(chan2dev(&atchan->chan), | ||
402 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | ||
403 | __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC), | ||
404 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | ||
405 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | ||
406 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | ||
407 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | ||
408 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | ||
409 | |||
410 | } | ||
411 | |||
412 | static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) | ||
413 | { | ||
414 | struct at_xdmac_desc *desc = txd_to_at_desc(tx); | ||
415 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); | ||
416 | dma_cookie_t cookie; | ||
417 | |||
418 | spin_lock_bh(&atchan->lock); | ||
419 | cookie = dma_cookie_assign(tx); | ||
420 | |||
421 | dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", | ||
422 | __func__, atchan, desc); | ||
423 | list_add_tail(&desc->xfer_node, &atchan->xfers_list); | ||
424 | if (list_is_singular(&atchan->xfers_list)) | ||
425 | at_xdmac_start_xfer(atchan, desc); | ||
426 | |||
427 | spin_unlock_bh(&atchan->lock); | ||
428 | return cookie; | ||
429 | } | ||
430 | |||
431 | static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, | ||
432 | gfp_t gfp_flags) | ||
433 | { | ||
434 | struct at_xdmac_desc *desc; | ||
435 | struct at_xdmac *atxdmac = to_at_xdmac(chan->device); | ||
436 | dma_addr_t phys; | ||
437 | |||
438 | desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); | ||
439 | if (desc) { | ||
440 | memset(desc, 0, sizeof(*desc)); | ||
441 | INIT_LIST_HEAD(&desc->descs_list); | ||
442 | dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); | ||
443 | desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; | ||
444 | desc->tx_dma_desc.phys = phys; | ||
445 | } | ||
446 | |||
447 | return desc; | ||
448 | } | ||
449 | |||
450 | /* Call must be protected by lock. */ | ||
451 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | ||
452 | { | ||
453 | struct at_xdmac_desc *desc; | ||
454 | |||
455 | if (list_empty(&atchan->free_descs_list)) { | ||
456 | desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); | ||
457 | } else { | ||
458 | desc = list_first_entry(&atchan->free_descs_list, | ||
459 | struct at_xdmac_desc, desc_node); | ||
460 | list_del(&desc->desc_node); | ||
461 | desc->active_xfer = false; | ||
462 | } | ||
463 | |||
464 | return desc; | ||
465 | } | ||
466 | |||
467 | static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec, | ||
468 | struct of_dma *of_dma) | ||
469 | { | ||
470 | struct at_xdmac *atxdmac = of_dma->of_dma_data; | ||
471 | struct at_xdmac_chan *atchan; | ||
472 | struct dma_chan *chan; | ||
473 | struct device *dev = atxdmac->dma.dev; | ||
474 | |||
475 | if (dma_spec->args_count != 1) { | ||
476 | dev_err(dev, "dma phandler args: bad number of args\n"); | ||
477 | return NULL; | ||
478 | } | ||
479 | |||
480 | chan = dma_get_any_slave_channel(&atxdmac->dma); | ||
481 | if (!chan) { | ||
482 | dev_err(dev, "can't get a dma channel\n"); | ||
483 | return NULL; | ||
484 | } | ||
485 | |||
486 | atchan = to_at_xdmac_chan(chan); | ||
487 | atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]); | ||
488 | atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]); | ||
489 | atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]); | ||
490 | dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n", | ||
491 | atchan->memif, atchan->perif, atchan->perid); | ||
492 | |||
493 | return chan; | ||
494 | } | ||
495 | |||
496 | static int at_xdmac_set_slave_config(struct dma_chan *chan, | ||
497 | struct dma_slave_config *sconfig) | ||
498 | { | ||
499 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
500 | u8 dwidth; | ||
501 | int csize; | ||
502 | |||
503 | atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = | ||
504 | AT91_XDMAC_DT_PERID(atchan->perid) | ||
505 | | AT_XDMAC_CC_DAM_INCREMENTED_AM | ||
506 | | AT_XDMAC_CC_SAM_FIXED_AM | ||
507 | | AT_XDMAC_CC_DIF(atchan->memif) | ||
508 | | AT_XDMAC_CC_SIF(atchan->perif) | ||
509 | | AT_XDMAC_CC_SWREQ_HWR_CONNECTED | ||
510 | | AT_XDMAC_CC_DSYNC_PER2MEM | ||
511 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | ||
512 | | AT_XDMAC_CC_TYPE_PER_TRAN; | ||
513 | csize = at_xdmac_csize(sconfig->src_maxburst); | ||
514 | if (csize < 0) { | ||
515 | dev_err(chan2dev(chan), "invalid src maxburst value\n"); | ||
516 | return -EINVAL; | ||
517 | } | ||
518 | atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); | ||
519 | dwidth = ffs(sconfig->src_addr_width) - 1; | ||
520 | atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); | ||
521 | |||
522 | |||
523 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = | ||
524 | AT91_XDMAC_DT_PERID(atchan->perid) | ||
525 | | AT_XDMAC_CC_DAM_FIXED_AM | ||
526 | | AT_XDMAC_CC_SAM_INCREMENTED_AM | ||
527 | | AT_XDMAC_CC_DIF(atchan->perif) | ||
528 | | AT_XDMAC_CC_SIF(atchan->memif) | ||
529 | | AT_XDMAC_CC_SWREQ_HWR_CONNECTED | ||
530 | | AT_XDMAC_CC_DSYNC_MEM2PER | ||
531 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | ||
532 | | AT_XDMAC_CC_TYPE_PER_TRAN; | ||
533 | csize = at_xdmac_csize(sconfig->dst_maxburst); | ||
534 | if (csize < 0) { | ||
535 | dev_err(chan2dev(chan), "invalid src maxburst value\n"); | ||
536 | return -EINVAL; | ||
537 | } | ||
538 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); | ||
539 | dwidth = ffs(sconfig->dst_addr_width) - 1; | ||
540 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); | ||
541 | |||
542 | /* Src and dst addr are needed to configure the link list descriptor. */ | ||
543 | atchan->per_src_addr = sconfig->src_addr; | ||
544 | atchan->per_dst_addr = sconfig->dst_addr; | ||
545 | |||
546 | dev_dbg(chan2dev(chan), | ||
547 | "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", | ||
548 | __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], | ||
549 | atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], | ||
550 | atchan->per_src_addr, atchan->per_dst_addr); | ||
551 | |||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static struct dma_async_tx_descriptor * | ||
556 | at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
557 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
558 | unsigned long flags, void *context) | ||
559 | { | ||
560 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
561 | struct at_xdmac_desc *first = NULL, *prev = NULL; | ||
562 | struct scatterlist *sg; | ||
563 | int i; | ||
564 | u32 cfg; | ||
565 | unsigned int xfer_size = 0; | ||
566 | |||
567 | if (!sgl) | ||
568 | return NULL; | ||
569 | |||
570 | if (!is_slave_direction(direction)) { | ||
571 | dev_err(chan2dev(chan), "invalid DMA direction\n"); | ||
572 | return NULL; | ||
573 | } | ||
574 | |||
575 | dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n", | ||
576 | __func__, sg_len, | ||
577 | direction == DMA_MEM_TO_DEV ? "to device" : "from device", | ||
578 | flags); | ||
579 | |||
580 | /* Protect dma_sconfig field that can be modified by set_slave_conf. */ | ||
581 | spin_lock_bh(&atchan->lock); | ||
582 | |||
583 | /* Prepare descriptors. */ | ||
584 | for_each_sg(sgl, sg, sg_len, i) { | ||
585 | struct at_xdmac_desc *desc = NULL; | ||
586 | u32 len, mem; | ||
587 | |||
588 | len = sg_dma_len(sg); | ||
589 | mem = sg_dma_address(sg); | ||
590 | if (unlikely(!len)) { | ||
591 | dev_err(chan2dev(chan), "sg data length is zero\n"); | ||
592 | spin_unlock_bh(&atchan->lock); | ||
593 | return NULL; | ||
594 | } | ||
595 | dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", | ||
596 | __func__, i, len, mem); | ||
597 | |||
598 | desc = at_xdmac_get_desc(atchan); | ||
599 | if (!desc) { | ||
600 | dev_err(chan2dev(chan), "can't get descriptor\n"); | ||
601 | if (first) | ||
602 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | ||
603 | spin_unlock_bh(&atchan->lock); | ||
604 | return NULL; | ||
605 | } | ||
606 | |||
607 | /* Linked list descriptor setup. */ | ||
608 | if (direction == DMA_DEV_TO_MEM) { | ||
609 | desc->lld.mbr_sa = atchan->per_src_addr; | ||
610 | desc->lld.mbr_da = mem; | ||
611 | cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; | ||
612 | } else { | ||
613 | desc->lld.mbr_sa = mem; | ||
614 | desc->lld.mbr_da = atchan->per_dst_addr; | ||
615 | cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; | ||
616 | } | ||
617 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ | ||
618 | | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | ||
619 | | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | ||
620 | | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ | ||
621 | | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */ | ||
622 | dev_dbg(chan2dev(chan), | ||
623 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", | ||
624 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); | ||
625 | |||
626 | /* Chain lld. */ | ||
627 | if (prev) { | ||
628 | prev->lld.mbr_nda = desc->tx_dma_desc.phys; | ||
629 | dev_dbg(chan2dev(chan), | ||
630 | "%s: chain lld: prev=0x%p, mbr_nda=%pad\n", | ||
631 | __func__, prev, &prev->lld.mbr_nda); | ||
632 | } | ||
633 | |||
634 | prev = desc; | ||
635 | if (!first) | ||
636 | first = desc; | ||
637 | |||
638 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | ||
639 | __func__, desc, first); | ||
640 | list_add_tail(&desc->desc_node, &first->descs_list); | ||
641 | xfer_size += len; | ||
642 | } | ||
643 | |||
644 | spin_unlock_bh(&atchan->lock); | ||
645 | |||
646 | first->tx_dma_desc.flags = flags; | ||
647 | first->xfer_size = xfer_size; | ||
648 | first->direction = direction; | ||
649 | |||
650 | return &first->tx_dma_desc; | ||
651 | } | ||
652 | |||
653 | static struct dma_async_tx_descriptor * | ||
654 | at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | ||
655 | size_t buf_len, size_t period_len, | ||
656 | enum dma_transfer_direction direction, | ||
657 | unsigned long flags) | ||
658 | { | ||
659 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
660 | struct at_xdmac_desc *first = NULL, *prev = NULL; | ||
661 | unsigned int periods = buf_len / period_len; | ||
662 | int i; | ||
663 | u32 cfg; | ||
664 | |||
665 | dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", | ||
666 | __func__, &buf_addr, buf_len, period_len, | ||
667 | direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags); | ||
668 | |||
669 | if (!is_slave_direction(direction)) { | ||
670 | dev_err(chan2dev(chan), "invalid DMA direction\n"); | ||
671 | return NULL; | ||
672 | } | ||
673 | |||
674 | if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) { | ||
675 | dev_err(chan2dev(chan), "channel currently used\n"); | ||
676 | return NULL; | ||
677 | } | ||
678 | |||
679 | for (i = 0; i < periods; i++) { | ||
680 | struct at_xdmac_desc *desc = NULL; | ||
681 | |||
682 | spin_lock_bh(&atchan->lock); | ||
683 | desc = at_xdmac_get_desc(atchan); | ||
684 | if (!desc) { | ||
685 | dev_err(chan2dev(chan), "can't get descriptor\n"); | ||
686 | if (first) | ||
687 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | ||
688 | spin_unlock_bh(&atchan->lock); | ||
689 | return NULL; | ||
690 | } | ||
691 | spin_unlock_bh(&atchan->lock); | ||
692 | dev_dbg(chan2dev(chan), | ||
693 | "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", | ||
694 | __func__, desc, &desc->tx_dma_desc.phys); | ||
695 | |||
696 | if (direction == DMA_DEV_TO_MEM) { | ||
697 | desc->lld.mbr_sa = atchan->per_src_addr; | ||
698 | desc->lld.mbr_da = buf_addr + i * period_len; | ||
699 | cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; | ||
700 | } else { | ||
701 | desc->lld.mbr_sa = buf_addr + i * period_len; | ||
702 | desc->lld.mbr_da = atchan->per_dst_addr; | ||
703 | cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; | ||
704 | } | ||
705 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 | ||
706 | | AT_XDMAC_MBR_UBC_NDEN | ||
707 | | AT_XDMAC_MBR_UBC_NSEN | ||
708 | | AT_XDMAC_MBR_UBC_NDE | ||
709 | | period_len >> at_xdmac_get_dwidth(cfg); | ||
710 | |||
711 | dev_dbg(chan2dev(chan), | ||
712 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", | ||
713 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); | ||
714 | |||
715 | /* Chain lld. */ | ||
716 | if (prev) { | ||
717 | prev->lld.mbr_nda = desc->tx_dma_desc.phys; | ||
718 | dev_dbg(chan2dev(chan), | ||
719 | "%s: chain lld: prev=0x%p, mbr_nda=%pad\n", | ||
720 | __func__, prev, &prev->lld.mbr_nda); | ||
721 | } | ||
722 | |||
723 | prev = desc; | ||
724 | if (!first) | ||
725 | first = desc; | ||
726 | |||
727 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | ||
728 | __func__, desc, first); | ||
729 | list_add_tail(&desc->desc_node, &first->descs_list); | ||
730 | } | ||
731 | |||
732 | prev->lld.mbr_nda = first->tx_dma_desc.phys; | ||
733 | dev_dbg(chan2dev(chan), | ||
734 | "%s: chain lld: prev=0x%p, mbr_nda=%pad\n", | ||
735 | __func__, prev, &prev->lld.mbr_nda); | ||
736 | first->tx_dma_desc.flags = flags; | ||
737 | first->xfer_size = buf_len; | ||
738 | first->direction = direction; | ||
739 | |||
740 | return &first->tx_dma_desc; | ||
741 | } | ||
742 | |||
743 | static struct dma_async_tx_descriptor * | ||
744 | at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
745 | size_t len, unsigned long flags) | ||
746 | { | ||
747 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
748 | struct at_xdmac_desc *first = NULL, *prev = NULL; | ||
749 | size_t remaining_size = len, xfer_size = 0, ublen; | ||
750 | dma_addr_t src_addr = src, dst_addr = dest; | ||
751 | u32 dwidth; | ||
752 | /* | ||
753 | * WARNING: We don't know the direction, it involves we can't | ||
754 | * dynamically set the source and dest interface so we have to use the | ||
755 | * same one. Only interface 0 allows EBI access. Hopefully we can | ||
756 | * access DDR through both ports (at least on SAMA5D4x), so we can use | ||
757 | * the same interface for source and dest, that solves the fact we | ||
758 | * don't know the direction. | ||
759 | */ | ||
760 | u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM | ||
761 | | AT_XDMAC_CC_SAM_INCREMENTED_AM | ||
762 | | AT_XDMAC_CC_DIF(0) | ||
763 | | AT_XDMAC_CC_SIF(0) | ||
764 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | ||
765 | | AT_XDMAC_CC_TYPE_MEM_TRAN; | ||
766 | |||
767 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", | ||
768 | __func__, &src, &dest, len, flags); | ||
769 | |||
770 | if (unlikely(!len)) | ||
771 | return NULL; | ||
772 | |||
773 | /* | ||
774 | * Check address alignment to select the greater data width we can use. | ||
775 | * Some XDMAC implementations don't provide dword transfer, in this | ||
776 | * case selecting dword has the same behavior as selecting word transfers. | ||
777 | */ | ||
778 | if (!((src_addr | dst_addr) & 7)) { | ||
779 | dwidth = AT_XDMAC_CC_DWIDTH_DWORD; | ||
780 | dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__); | ||
781 | } else if (!((src_addr | dst_addr) & 3)) { | ||
782 | dwidth = AT_XDMAC_CC_DWIDTH_WORD; | ||
783 | dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__); | ||
784 | } else if (!((src_addr | dst_addr) & 1)) { | ||
785 | dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD; | ||
786 | dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__); | ||
787 | } else { | ||
788 | dwidth = AT_XDMAC_CC_DWIDTH_BYTE; | ||
789 | dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__); | ||
790 | } | ||
791 | |||
792 | /* Prepare descriptors. */ | ||
793 | while (remaining_size) { | ||
794 | struct at_xdmac_desc *desc = NULL; | ||
795 | |||
796 | dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); | ||
797 | |||
798 | spin_lock_bh(&atchan->lock); | ||
799 | desc = at_xdmac_get_desc(atchan); | ||
800 | spin_unlock_bh(&atchan->lock); | ||
801 | if (!desc) { | ||
802 | dev_err(chan2dev(chan), "can't get descriptor\n"); | ||
803 | if (first) | ||
804 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | ||
805 | return NULL; | ||
806 | } | ||
807 | |||
808 | /* Update src and dest addresses. */ | ||
809 | src_addr += xfer_size; | ||
810 | dst_addr += xfer_size; | ||
811 | |||
812 | if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth) | ||
813 | xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth; | ||
814 | else | ||
815 | xfer_size = remaining_size; | ||
816 | |||
817 | dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size); | ||
818 | |||
819 | /* Check remaining length and change data width if needed. */ | ||
820 | if (!((src_addr | dst_addr | xfer_size) & 7)) { | ||
821 | dwidth = AT_XDMAC_CC_DWIDTH_DWORD; | ||
822 | dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__); | ||
823 | } else if (!((src_addr | dst_addr | xfer_size) & 3)) { | ||
824 | dwidth = AT_XDMAC_CC_DWIDTH_WORD; | ||
825 | dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__); | ||
826 | } else if (!((src_addr | dst_addr | xfer_size) & 1)) { | ||
827 | dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD; | ||
828 | dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__); | ||
829 | } else if ((src_addr | dst_addr | xfer_size) & 1) { | ||
830 | dwidth = AT_XDMAC_CC_DWIDTH_BYTE; | ||
831 | dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__); | ||
832 | } | ||
833 | chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); | ||
834 | |||
835 | ublen = xfer_size >> dwidth; | ||
836 | remaining_size -= xfer_size; | ||
837 | |||
838 | desc->lld.mbr_sa = src_addr; | ||
839 | desc->lld.mbr_da = dst_addr; | ||
840 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 | ||
841 | | AT_XDMAC_MBR_UBC_NDEN | ||
842 | | AT_XDMAC_MBR_UBC_NSEN | ||
843 | | (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0) | ||
844 | | ublen; | ||
845 | desc->lld.mbr_cfg = chan_cc; | ||
846 | |||
847 | dev_dbg(chan2dev(chan), | ||
848 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", | ||
849 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); | ||
850 | |||
851 | /* Chain lld. */ | ||
852 | if (prev) { | ||
853 | prev->lld.mbr_nda = desc->tx_dma_desc.phys; | ||
854 | dev_dbg(chan2dev(chan), | ||
855 | "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n", | ||
856 | __func__, prev, prev->lld.mbr_nda); | ||
857 | } | ||
858 | |||
859 | prev = desc; | ||
860 | if (!first) | ||
861 | first = desc; | ||
862 | |||
863 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | ||
864 | __func__, desc, first); | ||
865 | list_add_tail(&desc->desc_node, &first->descs_list); | ||
866 | } | ||
867 | |||
868 | first->tx_dma_desc.flags = flags; | ||
869 | first->xfer_size = len; | ||
870 | |||
871 | return &first->tx_dma_desc; | ||
872 | } | ||
873 | |||
874 | static enum dma_status | ||
875 | at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | ||
876 | struct dma_tx_state *txstate) | ||
877 | { | ||
878 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
879 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | ||
880 | struct at_xdmac_desc *desc, *_desc; | ||
881 | struct list_head *descs_list; | ||
882 | enum dma_status ret; | ||
883 | int residue; | ||
884 | u32 cur_nda, mask, value; | ||
885 | u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]); | ||
886 | |||
887 | ret = dma_cookie_status(chan, cookie, txstate); | ||
888 | if (ret == DMA_COMPLETE) | ||
889 | return ret; | ||
890 | |||
891 | if (!txstate) | ||
892 | return ret; | ||
893 | |||
894 | spin_lock_bh(&atchan->lock); | ||
895 | |||
896 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); | ||
897 | |||
898 | /* | ||
899 | * If the transfer has not been started yet, don't need to compute the | ||
900 | * residue, it's the transfer length. | ||
901 | */ | ||
902 | if (!desc->active_xfer) { | ||
903 | dma_set_residue(txstate, desc->xfer_size); | ||
904 | spin_unlock_bh(&atchan->lock); | ||
905 | return ret; | ||
906 | } | ||
907 | |||
908 | residue = desc->xfer_size; | ||
909 | /* | ||
910 | * Flush FIFO: only relevant when the transfer is source peripheral | ||
911 | * synchronized. | ||
912 | */ | ||
913 | mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; | ||
914 | value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; | ||
915 | if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) { | ||
916 | at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); | ||
917 | while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) | ||
918 | cpu_relax(); | ||
919 | } | ||
920 | |||
921 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | ||
922 | /* | ||
923 | * Remove size of all microblocks already transferred and the current | ||
924 | * one. Then add the remaining size to transfer of the current | ||
925 | * microblock. | ||
926 | */ | ||
927 | descs_list = &desc->descs_list; | ||
928 | list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { | ||
929 | residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; | ||
930 | if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) | ||
931 | break; | ||
932 | } | ||
933 | residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; | ||
934 | |||
935 | spin_unlock_bh(&atchan->lock); | ||
936 | |||
937 | dma_set_residue(txstate, residue); | ||
938 | |||
939 | dev_dbg(chan2dev(chan), | ||
940 | "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", | ||
941 | __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); | ||
942 | |||
943 | return ret; | ||
944 | } | ||
945 | |||
946 | /* Call must be protected by lock. */ | ||
947 | static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, | ||
948 | struct at_xdmac_desc *desc) | ||
949 | { | ||
950 | dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | ||
951 | |||
952 | /* | ||
953 | * Remove the transfer from the transfer list then move the transfer | ||
954 | * descriptors into the free descriptors list. | ||
955 | */ | ||
956 | list_del(&desc->xfer_node); | ||
957 | list_splice_init(&desc->descs_list, &atchan->free_descs_list); | ||
958 | } | ||
959 | |||
960 | static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) | ||
961 | { | ||
962 | struct at_xdmac_desc *desc; | ||
963 | |||
964 | spin_lock_bh(&atchan->lock); | ||
965 | |||
966 | /* | ||
967 | * If channel is enabled, do nothing, advance_work will be triggered | ||
968 | * after the interruption. | ||
969 | */ | ||
970 | if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { | ||
971 | desc = list_first_entry(&atchan->xfers_list, | ||
972 | struct at_xdmac_desc, | ||
973 | xfer_node); | ||
974 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | ||
975 | if (!desc->active_xfer) | ||
976 | at_xdmac_start_xfer(atchan, desc); | ||
977 | } | ||
978 | |||
979 | spin_unlock_bh(&atchan->lock); | ||
980 | } | ||
981 | |||
982 | static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) | ||
983 | { | ||
984 | struct at_xdmac_desc *desc; | ||
985 | struct dma_async_tx_descriptor *txd; | ||
986 | |||
987 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); | ||
988 | txd = &desc->tx_dma_desc; | ||
989 | |||
990 | if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) | ||
991 | txd->callback(txd->callback_param); | ||
992 | } | ||
993 | |||
994 | static void at_xdmac_tasklet(unsigned long data) | ||
995 | { | ||
996 | struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data; | ||
997 | struct at_xdmac_desc *desc; | ||
998 | u32 error_mask; | ||
999 | |||
1000 | dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", | ||
1001 | __func__, atchan->status); | ||
1002 | |||
1003 | error_mask = AT_XDMAC_CIS_RBEIS | ||
1004 | | AT_XDMAC_CIS_WBEIS | ||
1005 | | AT_XDMAC_CIS_ROIS; | ||
1006 | |||
1007 | if (at_xdmac_chan_is_cyclic(atchan)) { | ||
1008 | at_xdmac_handle_cyclic(atchan); | ||
1009 | } else if ((atchan->status & AT_XDMAC_CIS_LIS) | ||
1010 | || (atchan->status & error_mask)) { | ||
1011 | struct dma_async_tx_descriptor *txd; | ||
1012 | |||
1013 | if (atchan->status & AT_XDMAC_CIS_RBEIS) | ||
1014 | dev_err(chan2dev(&atchan->chan), "read bus error!!!"); | ||
1015 | if (atchan->status & AT_XDMAC_CIS_WBEIS) | ||
1016 | dev_err(chan2dev(&atchan->chan), "write bus error!!!"); | ||
1017 | if (atchan->status & AT_XDMAC_CIS_ROIS) | ||
1018 | dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); | ||
1019 | |||
1020 | spin_lock_bh(&atchan->lock); | ||
1021 | desc = list_first_entry(&atchan->xfers_list, | ||
1022 | struct at_xdmac_desc, | ||
1023 | xfer_node); | ||
1024 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | ||
1025 | BUG_ON(!desc->active_xfer); | ||
1026 | |||
1027 | txd = &desc->tx_dma_desc; | ||
1028 | |||
1029 | at_xdmac_remove_xfer(atchan, desc); | ||
1030 | spin_unlock_bh(&atchan->lock); | ||
1031 | |||
1032 | if (!at_xdmac_chan_is_cyclic(atchan)) { | ||
1033 | dma_cookie_complete(txd); | ||
1034 | if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) | ||
1035 | txd->callback(txd->callback_param); | ||
1036 | } | ||
1037 | |||
1038 | dma_run_dependencies(txd); | ||
1039 | |||
1040 | at_xdmac_advance_work(atchan); | ||
1041 | } | ||
1042 | } | ||
1043 | |||
1044 | static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) | ||
1045 | { | ||
1046 | struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id; | ||
1047 | struct at_xdmac_chan *atchan; | ||
1048 | u32 imr, status, pending; | ||
1049 | u32 chan_imr, chan_status; | ||
1050 | int i, ret = IRQ_NONE; | ||
1051 | |||
1052 | do { | ||
1053 | imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM); | ||
1054 | status = at_xdmac_read(atxdmac, AT_XDMAC_GIS); | ||
1055 | pending = status & imr; | ||
1056 | |||
1057 | dev_vdbg(atxdmac->dma.dev, | ||
1058 | "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n", | ||
1059 | __func__, status, imr, pending); | ||
1060 | |||
1061 | if (!pending) | ||
1062 | break; | ||
1063 | |||
1064 | /* We have to find which channel has generated the interrupt. */ | ||
1065 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | ||
1066 | if (!((1 << i) & pending)) | ||
1067 | continue; | ||
1068 | |||
1069 | atchan = &atxdmac->chan[i]; | ||
1070 | chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); | ||
1071 | chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); | ||
1072 | atchan->status = chan_status & chan_imr; | ||
1073 | dev_vdbg(atxdmac->dma.dev, | ||
1074 | "%s: chan%d: imr=0x%x, status=0x%x\n", | ||
1075 | __func__, i, chan_imr, chan_status); | ||
1076 | dev_vdbg(chan2dev(&atchan->chan), | ||
1077 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | ||
1078 | __func__, | ||
1079 | at_xdmac_chan_read(atchan, AT_XDMAC_CC), | ||
1080 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | ||
1081 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | ||
1082 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | ||
1083 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | ||
1084 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | ||
1085 | |||
1086 | if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) | ||
1087 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | ||
1088 | |||
1089 | tasklet_schedule(&atchan->tasklet); | ||
1090 | ret = IRQ_HANDLED; | ||
1091 | } | ||
1092 | |||
1093 | } while (pending); | ||
1094 | |||
1095 | return ret; | ||
1096 | } | ||
1097 | |||
1098 | static void at_xdmac_issue_pending(struct dma_chan *chan) | ||
1099 | { | ||
1100 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1101 | |||
1102 | dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); | ||
1103 | |||
1104 | if (!at_xdmac_chan_is_cyclic(atchan)) | ||
1105 | at_xdmac_advance_work(atchan); | ||
1106 | |||
1107 | return; | ||
1108 | } | ||
1109 | |||
1110 | static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
1111 | unsigned long arg) | ||
1112 | { | ||
1113 | struct at_xdmac_desc *desc, *_desc; | ||
1114 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1115 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | ||
1116 | int ret = 0; | ||
1117 | |||
1118 | dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd); | ||
1119 | |||
1120 | spin_lock_bh(&atchan->lock); | ||
1121 | |||
1122 | switch (cmd) { | ||
1123 | case DMA_PAUSE: | ||
1124 | at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); | ||
1125 | set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | ||
1126 | break; | ||
1127 | |||
1128 | case DMA_RESUME: | ||
1129 | if (!at_xdmac_chan_is_paused(atchan)) | ||
1130 | break; | ||
1131 | |||
1132 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); | ||
1133 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | ||
1134 | break; | ||
1135 | |||
1136 | case DMA_TERMINATE_ALL: | ||
1137 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | ||
1138 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) | ||
1139 | cpu_relax(); | ||
1140 | |||
1141 | /* Cancel all pending transfers. */ | ||
1142 | list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) | ||
1143 | at_xdmac_remove_xfer(atchan, desc); | ||
1144 | |||
1145 | clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); | ||
1146 | break; | ||
1147 | |||
1148 | case DMA_SLAVE_CONFIG: | ||
1149 | ret = at_xdmac_set_slave_config(chan, | ||
1150 | (struct dma_slave_config *)arg); | ||
1151 | break; | ||
1152 | |||
1153 | default: | ||
1154 | dev_err(chan2dev(chan), | ||
1155 | "unmanaged or unknown dma control cmd: %d\n", cmd); | ||
1156 | ret = -ENXIO; | ||
1157 | } | ||
1158 | |||
1159 | spin_unlock_bh(&atchan->lock); | ||
1160 | |||
1161 | return ret; | ||
1162 | } | ||
1163 | |||
1164 | static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) | ||
1165 | { | ||
1166 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1167 | struct at_xdmac_desc *desc; | ||
1168 | int i; | ||
1169 | |||
1170 | spin_lock_bh(&atchan->lock); | ||
1171 | |||
1172 | if (at_xdmac_chan_is_enabled(atchan)) { | ||
1173 | dev_err(chan2dev(chan), | ||
1174 | "can't allocate channel resources (channel enabled)\n"); | ||
1175 | i = -EIO; | ||
1176 | goto spin_unlock; | ||
1177 | } | ||
1178 | |||
1179 | if (!list_empty(&atchan->free_descs_list)) { | ||
1180 | dev_err(chan2dev(chan), | ||
1181 | "can't allocate channel resources (channel not free from a previous use)\n"); | ||
1182 | i = -EIO; | ||
1183 | goto spin_unlock; | ||
1184 | } | ||
1185 | |||
1186 | for (i = 0; i < init_nr_desc_per_channel; i++) { | ||
1187 | desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC); | ||
1188 | if (!desc) { | ||
1189 | dev_warn(chan2dev(chan), | ||
1190 | "only %d descriptors have been allocated\n", i); | ||
1191 | break; | ||
1192 | } | ||
1193 | list_add_tail(&desc->desc_node, &atchan->free_descs_list); | ||
1194 | } | ||
1195 | |||
1196 | dma_cookie_init(chan); | ||
1197 | |||
1198 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | ||
1199 | |||
1200 | spin_unlock: | ||
1201 | spin_unlock_bh(&atchan->lock); | ||
1202 | return i; | ||
1203 | } | ||
1204 | |||
1205 | static void at_xdmac_free_chan_resources(struct dma_chan *chan) | ||
1206 | { | ||
1207 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1208 | struct at_xdmac *atxdmac = to_at_xdmac(chan->device); | ||
1209 | struct at_xdmac_desc *desc, *_desc; | ||
1210 | |||
1211 | list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { | ||
1212 | dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc); | ||
1213 | list_del(&desc->desc_node); | ||
1214 | dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); | ||
1215 | } | ||
1216 | |||
1217 | return; | ||
1218 | } | ||
1219 | |||
1220 | #define AT_XDMAC_DMA_BUSWIDTHS\ | ||
1221 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ | ||
1222 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ | ||
1223 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ | ||
1224 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\ | ||
1225 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | ||
1226 | |||
1227 | static int at_xdmac_device_slave_caps(struct dma_chan *dchan, | ||
1228 | struct dma_slave_caps *caps) | ||
1229 | { | ||
1230 | |||
1231 | caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | ||
1232 | caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | ||
1233 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1234 | caps->cmd_pause = true; | ||
1235 | caps->cmd_terminate = true; | ||
1236 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1237 | |||
1238 | return 0; | ||
1239 | } | ||
1240 | |||
1241 | #ifdef CONFIG_PM | ||
1242 | static int atmel_xdmac_prepare(struct device *dev) | ||
1243 | { | ||
1244 | struct platform_device *pdev = to_platform_device(dev); | ||
1245 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | ||
1246 | struct dma_chan *chan, *_chan; | ||
1247 | |||
1248 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | ||
1249 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1250 | |||
1251 | /* Wait for transfer completion, except in cyclic case. */ | ||
1252 | if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan)) | ||
1253 | return -EAGAIN; | ||
1254 | } | ||
1255 | return 0; | ||
1256 | } | ||
1257 | #else | ||
1258 | # define atmel_xdmac_prepare NULL | ||
1259 | #endif | ||
1260 | |||
1261 | #ifdef CONFIG_PM_SLEEP | ||
1262 | static int atmel_xdmac_suspend(struct device *dev) | ||
1263 | { | ||
1264 | struct platform_device *pdev = to_platform_device(dev); | ||
1265 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | ||
1266 | struct dma_chan *chan, *_chan; | ||
1267 | |||
1268 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | ||
1269 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | ||
1270 | |||
1271 | if (at_xdmac_chan_is_cyclic(atchan)) { | ||
1272 | if (!at_xdmac_chan_is_paused(atchan)) | ||
1273 | at_xdmac_control(chan, DMA_PAUSE, 0); | ||
1274 | atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); | ||
1275 | atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); | ||
1276 | atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); | ||
1277 | } | ||
1278 | } | ||
1279 | atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); | ||
1280 | |||
1281 | at_xdmac_off(atxdmac); | ||
1282 | clk_disable_unprepare(atxdmac->clk); | ||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | static int atmel_xdmac_resume(struct device *dev) | ||
1287 | { | ||
1288 | struct platform_device *pdev = to_platform_device(dev); | ||
1289 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | ||
1290 | struct at_xdmac_chan *atchan; | ||
1291 | struct dma_chan *chan, *_chan; | ||
1292 | int i; | ||
1293 | u32 cfg; | ||
1294 | |||
1295 | clk_prepare_enable(atxdmac->clk); | ||
1296 | |||
1297 | /* Clear pending interrupts. */ | ||
1298 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | ||
1299 | atchan = &atxdmac->chan[i]; | ||
1300 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) | ||
1301 | cpu_relax(); | ||
1302 | } | ||
1303 | |||
1304 | at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); | ||
1305 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); | ||
1306 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | ||
1307 | atchan = to_at_xdmac_chan(chan); | ||
1308 | cfg = atchan->cfg[AT_XDMAC_CUR_CFG]; | ||
1309 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg); | ||
1310 | if (at_xdmac_chan_is_cyclic(atchan)) { | ||
1311 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); | ||
1312 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); | ||
1313 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); | ||
1314 | wmb(); | ||
1315 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); | ||
1316 | } | ||
1317 | } | ||
1318 | return 0; | ||
1319 | } | ||
1320 | #endif /* CONFIG_PM_SLEEP */ | ||
1321 | |||
1322 | static int at_xdmac_probe(struct platform_device *pdev) | ||
1323 | { | ||
1324 | struct resource *res; | ||
1325 | struct at_xdmac *atxdmac; | ||
1326 | int irq, size, nr_channels, i, ret; | ||
1327 | void __iomem *base; | ||
1328 | u32 reg; | ||
1329 | |||
1330 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1331 | if (!res) | ||
1332 | return -EINVAL; | ||
1333 | |||
1334 | irq = platform_get_irq(pdev, 0); | ||
1335 | if (irq < 0) | ||
1336 | return irq; | ||
1337 | |||
1338 | base = devm_ioremap_resource(&pdev->dev, res); | ||
1339 | if (IS_ERR(base)) | ||
1340 | return PTR_ERR(base); | ||
1341 | |||
1342 | /* | ||
1343 | * Read number of xdmac channels, read helper function can't be used | ||
1344 | * since atxdmac is not yet allocated and we need to know the number | ||
1345 | * of channels to do the allocation. | ||
1346 | */ | ||
1347 | reg = readl_relaxed(base + AT_XDMAC_GTYPE); | ||
1348 | nr_channels = AT_XDMAC_NB_CH(reg); | ||
1349 | if (nr_channels > AT_XDMAC_MAX_CHAN) { | ||
1350 | dev_err(&pdev->dev, "invalid number of channels (%u)\n", | ||
1351 | nr_channels); | ||
1352 | return -EINVAL; | ||
1353 | } | ||
1354 | |||
1355 | size = sizeof(*atxdmac); | ||
1356 | size += nr_channels * sizeof(struct at_xdmac_chan); | ||
1357 | atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | ||
1358 | if (!atxdmac) { | ||
1359 | dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); | ||
1360 | return -ENOMEM; | ||
1361 | } | ||
1362 | |||
1363 | atxdmac->regs = base; | ||
1364 | atxdmac->irq = irq; | ||
1365 | |||
1366 | atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk"); | ||
1367 | if (IS_ERR(atxdmac->clk)) { | ||
1368 | dev_err(&pdev->dev, "can't get dma_clk\n"); | ||
1369 | return PTR_ERR(atxdmac->clk); | ||
1370 | } | ||
1371 | |||
1372 | /* Do not use dev res to prevent races with tasklet */ | ||
1373 | ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac); | ||
1374 | if (ret) { | ||
1375 | dev_err(&pdev->dev, "can't request irq\n"); | ||
1376 | return ret; | ||
1377 | } | ||
1378 | |||
1379 | ret = clk_prepare_enable(atxdmac->clk); | ||
1380 | if (ret) { | ||
1381 | dev_err(&pdev->dev, "can't prepare or enable clock\n"); | ||
1382 | goto err_free_irq; | ||
1383 | } | ||
1384 | |||
1385 | atxdmac->at_xdmac_desc_pool = | ||
1386 | dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | ||
1387 | sizeof(struct at_xdmac_desc), 4, 0); | ||
1388 | if (!atxdmac->at_xdmac_desc_pool) { | ||
1389 | dev_err(&pdev->dev, "no memory for descriptors dma pool\n"); | ||
1390 | ret = -ENOMEM; | ||
1391 | goto err_clk_disable; | ||
1392 | } | ||
1393 | |||
1394 | dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); | ||
1395 | dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); | ||
1396 | dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); | ||
1397 | /* | ||
1398 | * Without DMA_PRIVATE the driver is not able to allocate more than | ||
1399 | * one channel, second allocation fails in private_candidate. | ||
1400 | */ | ||
1401 | dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask); | ||
1402 | atxdmac->dma.dev = &pdev->dev; | ||
1403 | atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources; | ||
1404 | atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources; | ||
1405 | atxdmac->dma.device_tx_status = at_xdmac_tx_status; | ||
1406 | atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; | ||
1407 | atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; | ||
1408 | atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; | ||
1409 | atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; | ||
1410 | atxdmac->dma.device_control = at_xdmac_control; | ||
1411 | atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; | ||
1412 | |||
1413 | /* Disable all chans and interrupts. */ | ||
1414 | at_xdmac_off(atxdmac); | ||
1415 | |||
1416 | /* Init channels. */ | ||
1417 | INIT_LIST_HEAD(&atxdmac->dma.channels); | ||
1418 | for (i = 0; i < nr_channels; i++) { | ||
1419 | struct at_xdmac_chan *atchan = &atxdmac->chan[i]; | ||
1420 | |||
1421 | atchan->chan.device = &atxdmac->dma; | ||
1422 | list_add_tail(&atchan->chan.device_node, | ||
1423 | &atxdmac->dma.channels); | ||
1424 | |||
1425 | atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i); | ||
1426 | atchan->mask = 1 << i; | ||
1427 | |||
1428 | spin_lock_init(&atchan->lock); | ||
1429 | INIT_LIST_HEAD(&atchan->xfers_list); | ||
1430 | INIT_LIST_HEAD(&atchan->free_descs_list); | ||
1431 | tasklet_init(&atchan->tasklet, at_xdmac_tasklet, | ||
1432 | (unsigned long)atchan); | ||
1433 | |||
1434 | /* Clear pending interrupts. */ | ||
1435 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) | ||
1436 | cpu_relax(); | ||
1437 | } | ||
1438 | platform_set_drvdata(pdev, atxdmac); | ||
1439 | |||
1440 | ret = dma_async_device_register(&atxdmac->dma); | ||
1441 | if (ret) { | ||
1442 | dev_err(&pdev->dev, "fail to register DMA engine device\n"); | ||
1443 | goto err_clk_disable; | ||
1444 | } | ||
1445 | |||
1446 | ret = of_dma_controller_register(pdev->dev.of_node, | ||
1447 | at_xdmac_xlate, atxdmac); | ||
1448 | if (ret) { | ||
1449 | dev_err(&pdev->dev, "could not register of dma controller\n"); | ||
1450 | goto err_dma_unregister; | ||
1451 | } | ||
1452 | |||
1453 | dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n", | ||
1454 | nr_channels, atxdmac->regs); | ||
1455 | |||
1456 | return 0; | ||
1457 | |||
1458 | err_dma_unregister: | ||
1459 | dma_async_device_unregister(&atxdmac->dma); | ||
1460 | err_clk_disable: | ||
1461 | clk_disable_unprepare(atxdmac->clk); | ||
1462 | err_free_irq: | ||
1463 | free_irq(atxdmac->irq, atxdmac->dma.dev); | ||
1464 | return ret; | ||
1465 | } | ||
1466 | |||
1467 | static int at_xdmac_remove(struct platform_device *pdev) | ||
1468 | { | ||
1469 | struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); | ||
1470 | int i; | ||
1471 | |||
1472 | at_xdmac_off(atxdmac); | ||
1473 | of_dma_controller_free(pdev->dev.of_node); | ||
1474 | dma_async_device_unregister(&atxdmac->dma); | ||
1475 | clk_disable_unprepare(atxdmac->clk); | ||
1476 | |||
1477 | synchronize_irq(atxdmac->irq); | ||
1478 | |||
1479 | free_irq(atxdmac->irq, atxdmac->dma.dev); | ||
1480 | |||
1481 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | ||
1482 | struct at_xdmac_chan *atchan = &atxdmac->chan[i]; | ||
1483 | |||
1484 | tasklet_kill(&atchan->tasklet); | ||
1485 | at_xdmac_free_chan_resources(&atchan->chan); | ||
1486 | } | ||
1487 | |||
1488 | return 0; | ||
1489 | } | ||
1490 | |||
1491 | static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = { | ||
1492 | .prepare = atmel_xdmac_prepare, | ||
1493 | SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume) | ||
1494 | }; | ||
1495 | |||
1496 | static const struct of_device_id atmel_xdmac_dt_ids[] = { | ||
1497 | { | ||
1498 | .compatible = "atmel,sama5d4-dma", | ||
1499 | }, { | ||
1500 | /* sentinel */ | ||
1501 | } | ||
1502 | }; | ||
1503 | MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids); | ||
1504 | |||
1505 | static struct platform_driver at_xdmac_driver = { | ||
1506 | .probe = at_xdmac_probe, | ||
1507 | .remove = at_xdmac_remove, | ||
1508 | .driver = { | ||
1509 | .name = "at_xdmac", | ||
1510 | .owner = THIS_MODULE, | ||
1511 | .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), | ||
1512 | .pm = &atmel_xdmac_dev_pm_ops, | ||
1513 | } | ||
1514 | }; | ||
1515 | |||
1516 | static int __init at_xdmac_init(void) | ||
1517 | { | ||
1518 | return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe); | ||
1519 | } | ||
1520 | subsys_initcall(at_xdmac_init); | ||
1521 | |||
1522 | MODULE_DESCRIPTION("Atmel Extended DMA Controller driver"); | ||
1523 | MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>"); | ||
1524 | MODULE_LICENSE("GPL"); | ||
diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h index e835037a77b4..ab6cbba45401 100644 --- a/include/dt-bindings/dma/at91.h +++ b/include/dt-bindings/dma/at91.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #ifndef __DT_BINDINGS_AT91_DMA_H__ | 9 | #ifndef __DT_BINDINGS_AT91_DMA_H__ |
10 | #define __DT_BINDINGS_AT91_DMA_H__ | 10 | #define __DT_BINDINGS_AT91_DMA_H__ |
11 | 11 | ||
12 | /* ---------- HDMAC ---------- */ | ||
13 | |||
12 | /* | 14 | /* |
13 | * Source and/or destination peripheral ID | 15 | * Source and/or destination peripheral ID |
14 | */ | 16 | */ |
@@ -24,4 +26,27 @@ | |||
24 | #define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ | 26 | #define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ |
25 | #define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ | 27 | #define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ |
26 | 28 | ||
29 | |||
30 | /* ---------- XDMAC ---------- */ | ||
31 | #define AT91_XDMAC_DT_MEM_IF_MASK (0x1) | ||
32 | #define AT91_XDMAC_DT_MEM_IF_OFFSET (13) | ||
33 | #define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \ | ||
34 | << AT91_XDMAC_DT_MEM_IF_OFFSET) | ||
35 | #define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \ | ||
36 | & AT91_XDMAC_DT_MEM_IF_MASK) | ||
37 | |||
38 | #define AT91_XDMAC_DT_PER_IF_MASK (0x1) | ||
39 | #define AT91_XDMAC_DT_PER_IF_OFFSET (14) | ||
40 | #define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \ | ||
41 | << AT91_XDMAC_DT_PER_IF_OFFSET) | ||
42 | #define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \ | ||
43 | & AT91_XDMAC_DT_PER_IF_MASK) | ||
44 | |||
45 | #define AT91_XDMAC_DT_PERID_MASK (0x7f) | ||
46 | #define AT91_XDMAC_DT_PERID_OFFSET (24) | ||
47 | #define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \ | ||
48 | << AT91_XDMAC_DT_PERID_OFFSET) | ||
49 | #define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \ | ||
50 | & AT91_XDMAC_DT_PERID_MASK) | ||
51 | |||
27 | #endif /* __DT_BINDINGS_AT91_DMA_H__ */ | 52 | #endif /* __DT_BINDINGS_AT91_DMA_H__ */ |