diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/dma | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/dma')
36 files changed, 8088 insertions, 2275 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9520cf02edc8..25cf327cd1cb 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -46,15 +46,22 @@ config INTEL_MID_DMAC | |||
46 | 46 | ||
47 | If unsure, say N. | 47 | If unsure, say N. |
48 | 48 | ||
49 | config ASYNC_TX_DISABLE_CHANNEL_SWITCH | 49 | config ASYNC_TX_ENABLE_CHANNEL_SWITCH |
50 | bool | 50 | bool |
51 | 51 | ||
52 | config AMBA_PL08X | ||
53 | bool "ARM PrimeCell PL080 or PL081 support" | ||
54 | depends on ARM_AMBA && EXPERIMENTAL | ||
55 | select DMA_ENGINE | ||
56 | help | ||
57 | Platform has a PL08x DMAC device | ||
58 | which can provide DMA engine support | ||
59 | |||
52 | config INTEL_IOATDMA | 60 | config INTEL_IOATDMA |
53 | tristate "Intel I/OAT DMA support" | 61 | tristate "Intel I/OAT DMA support" |
54 | depends on PCI && X86 | 62 | depends on PCI && X86 |
55 | select DMA_ENGINE | 63 | select DMA_ENGINE |
56 | select DCA | 64 | select DCA |
57 | select ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
58 | select ASYNC_TX_DISABLE_PQ_VAL_DMA | 65 | select ASYNC_TX_DISABLE_PQ_VAL_DMA |
59 | select ASYNC_TX_DISABLE_XOR_VAL_DMA | 66 | select ASYNC_TX_DISABLE_XOR_VAL_DMA |
60 | help | 67 | help |
@@ -69,12 +76,13 @@ config INTEL_IOP_ADMA | |||
69 | tristate "Intel IOP ADMA support" | 76 | tristate "Intel IOP ADMA support" |
70 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 77 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX |
71 | select DMA_ENGINE | 78 | select DMA_ENGINE |
79 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
72 | help | 80 | help |
73 | Enable support for the Intel(R) IOP Series RAID engines. | 81 | Enable support for the Intel(R) IOP Series RAID engines. |
74 | 82 | ||
75 | config DW_DMAC | 83 | config DW_DMAC |
76 | tristate "Synopsys DesignWare AHB DMA support" | 84 | tristate "Synopsys DesignWare AHB DMA support" |
77 | depends on AVR32 | 85 | depends on HAVE_CLK |
78 | select DMA_ENGINE | 86 | select DMA_ENGINE |
79 | default y if CPU_AT32AP7000 | 87 | default y if CPU_AT32AP7000 |
80 | help | 88 | help |
@@ -93,6 +101,7 @@ config FSL_DMA | |||
93 | tristate "Freescale Elo and Elo Plus DMA support" | 101 | tristate "Freescale Elo and Elo Plus DMA support" |
94 | depends on FSL_SOC | 102 | depends on FSL_SOC |
95 | select DMA_ENGINE | 103 | select DMA_ENGINE |
104 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
96 | ---help--- | 105 | ---help--- |
97 | Enable support for the Freescale Elo and Elo Plus DMA controllers. | 106 | Enable support for the Freescale Elo and Elo Plus DMA controllers. |
98 | The Elo is the DMA controller on some 82xx and 83xx parts, and the | 107 | The Elo is the DMA controller on some 82xx and 83xx parts, and the |
@@ -100,7 +109,7 @@ config FSL_DMA | |||
100 | 109 | ||
101 | config MPC512X_DMA | 110 | config MPC512X_DMA |
102 | tristate "Freescale MPC512x built-in DMA engine support" | 111 | tristate "Freescale MPC512x built-in DMA engine support" |
103 | depends on PPC_MPC512x | 112 | depends on PPC_MPC512x || PPC_MPC831x |
104 | select DMA_ENGINE | 113 | select DMA_ENGINE |
105 | ---help--- | 114 | ---help--- |
106 | Enable support for the Freescale MPC512x built-in DMA engine. | 115 | Enable support for the Freescale MPC512x built-in DMA engine. |
@@ -109,6 +118,7 @@ config MV_XOR | |||
109 | bool "Marvell XOR engine support" | 118 | bool "Marvell XOR engine support" |
110 | depends on PLAT_ORION | 119 | depends on PLAT_ORION |
111 | select DMA_ENGINE | 120 | select DMA_ENGINE |
121 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
112 | ---help--- | 122 | ---help--- |
113 | Enable support for the Marvell XOR engine. | 123 | Enable support for the Marvell XOR engine. |
114 | 124 | ||
@@ -166,6 +176,7 @@ config AMCC_PPC440SPE_ADMA | |||
166 | depends on 440SPe || 440SP | 176 | depends on 440SPe || 440SP |
167 | select DMA_ENGINE | 177 | select DMA_ENGINE |
168 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 178 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
179 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
169 | help | 180 | help |
170 | Enable support for the AMCC PPC440SPe RAID engines. | 181 | Enable support for the AMCC PPC440SPe RAID engines. |
171 | 182 | ||
@@ -189,11 +200,42 @@ config PL330_DMA | |||
189 | platform_data for a dma-pl330 device. | 200 | platform_data for a dma-pl330 device. |
190 | 201 | ||
191 | config PCH_DMA | 202 | config PCH_DMA |
192 | tristate "Topcliff PCH DMA support" | 203 | tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" |
193 | depends on PCI && X86 | 204 | depends on PCI && X86 |
194 | select DMA_ENGINE | 205 | select DMA_ENGINE |
195 | help | 206 | help |
196 | Enable support for the Topcliff PCH DMA engine. | 207 | Enable support for Intel EG20T PCH DMA engine. |
208 | |||
209 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | ||
210 | Output Hub), ML7213 and ML7223. | ||
211 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is | ||
212 | for MP(Media Phone) use. | ||
213 | ML7213/ML7223 is companion chip for Intel Atom E6xx series. | ||
214 | ML7213/ML7223 is completely compatible for Intel EG20T PCH. | ||
215 | |||
216 | config IMX_SDMA | ||
217 | tristate "i.MX SDMA support" | ||
218 | depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5 | ||
219 | select DMA_ENGINE | ||
220 | help | ||
221 | Support the i.MX SDMA engine. This engine is integrated into | ||
222 | Freescale i.MX25/31/35/51 chips. | ||
223 | |||
224 | config IMX_DMA | ||
225 | tristate "i.MX DMA support" | ||
226 | depends on IMX_HAVE_DMA_V1 | ||
227 | select DMA_ENGINE | ||
228 | help | ||
229 | Support the i.MX DMA engine. This engine is integrated into | ||
230 | Freescale i.MX1/21/27 chips. | ||
231 | |||
232 | config MXS_DMA | ||
233 | bool "MXS DMA support" | ||
234 | depends on SOC_IMX23 || SOC_IMX28 | ||
235 | select DMA_ENGINE | ||
236 | help | ||
237 | Support the MXS DMA engine. This engine including APBH-DMA | ||
238 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. | ||
197 | 239 | ||
198 | config DMA_ENGINE | 240 | config DMA_ENGINE |
199 | bool | 241 | bool |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 72bd70384d8a..836095ab3c5c 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -1,9 +1,5 @@ | |||
1 | ifeq ($(CONFIG_DMADEVICES_DEBUG),y) | 1 | ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG |
2 | EXTRA_CFLAGS += -DDEBUG | 2 | ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG |
3 | endif | ||
4 | ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) | ||
5 | EXTRA_CFLAGS += -DVERBOSE_DEBUG | ||
6 | endif | ||
7 | 3 | ||
8 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
9 | obj-$(CONFIG_NET_DMA) += iovlock.o | 5 | obj-$(CONFIG_NET_DMA) += iovlock.o |
@@ -21,7 +17,11 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | |||
21 | obj-$(CONFIG_SH_DMAE) += shdma.o | 17 | obj-$(CONFIG_SH_DMAE) += shdma.o |
22 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | 18 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o |
23 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 19 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
20 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | ||
21 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | ||
22 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | ||
24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
25 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 24 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
26 | obj-$(CONFIG_PL330_DMA) += pl330.o | 25 | obj-$(CONFIG_PL330_DMA) += pl330.o |
27 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 26 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
27 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | ||
diff --git a/drivers/dma/TODO b/drivers/dma/TODO new file mode 100644 index 000000000000..a4af8589330c --- /dev/null +++ b/drivers/dma/TODO | |||
@@ -0,0 +1,14 @@ | |||
1 | TODO for slave dma | ||
2 | |||
3 | 1. Move remaining drivers to use new slave interface | ||
4 | 2. Remove old slave pointer machansim | ||
5 | 3. Make issue_pending to start the transaction in below drivers | ||
6 | - mpc512x_dma | ||
7 | - imx-dma | ||
8 | - imx-sdma | ||
9 | - mxs-dma.c | ||
10 | - dw_dmac | ||
11 | - intel_mid_dma | ||
12 | - ste_dma40 | ||
13 | 4. Check other subsystems for dma drivers and merge/move to dmaengine | ||
14 | 5. Remove dma_slave_config's dma direction. | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c new file mode 100644 index 000000000000..e6d7228b1479 --- /dev/null +++ b/drivers/dma/amba-pl08x.c | |||
@@ -0,0 +1,2078 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 ARM Ltd. | ||
3 | * Copyright (c) 2010 ST-Ericsson SA | ||
4 | * | ||
5 | * Author: Peter Pearse <peter.pearse@arm.com> | ||
6 | * Author: Linus Walleij <linus.walleij@stericsson.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | * | ||
22 | * The full GNU General Public License is in this distribution in the file | ||
23 | * called COPYING. | ||
24 | * | ||
25 | * Documentation: ARM DDI 0196G == PL080 | ||
26 | * Documentation: ARM DDI 0218E == PL081 | ||
27 | * | ||
28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any | ||
29 | * channel. | ||
30 | * | ||
31 | * The PL080 has 8 channels available for simultaneous use, and the PL081 | ||
32 | * has only two channels. So on these DMA controllers the number of channels | ||
33 | * and the number of incoming DMA signals are two totally different things. | ||
34 | * It is usually not possible to theoretically handle all physical signals, | ||
35 | * so a multiplexing scheme with possible denial of use is necessary. | ||
36 | * | ||
37 | * The PL080 has a dual bus master, PL081 has a single master. | ||
38 | * | ||
39 | * Memory to peripheral transfer may be visualized as | ||
40 | * Get data from memory to DMAC | ||
41 | * Until no data left | ||
42 | * On burst request from peripheral | ||
43 | * Destination burst from DMAC to peripheral | ||
44 | * Clear burst request | ||
45 | * Raise terminal count interrupt | ||
46 | * | ||
47 | * For peripherals with a FIFO: | ||
48 | * Source burst size == half the depth of the peripheral FIFO | ||
49 | * Destination burst size == the depth of the peripheral FIFO | ||
50 | * | ||
51 | * (Bursts are irrelevant for mem to mem transfers - there are no burst | ||
52 | * signals, the DMA controller will simply facilitate its AHB master.) | ||
53 | * | ||
54 | * ASSUMES default (little) endianness for DMA transfers | ||
55 | * | ||
56 | * The PL08x has two flow control settings: | ||
57 | * - DMAC flow control: the transfer size defines the number of transfers | ||
58 | * which occur for the current LLI entry, and the DMAC raises TC at the | ||
59 | * end of every LLI entry. Observed behaviour shows the DMAC listening | ||
60 | * to both the BREQ and SREQ signals (contrary to documented), | ||
61 | * transferring data if either is active. The LBREQ and LSREQ signals | ||
62 | * are ignored. | ||
63 | * | ||
64 | * - Peripheral flow control: the transfer size is ignored (and should be | ||
65 | * zero). The data is transferred from the current LLI entry, until | ||
66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC | ||
67 | * will then move to the next LLI entry. | ||
68 | * | ||
69 | * Only the former works sanely with scatter lists, so we only implement | ||
70 | * the DMAC flow control method. However, peripherals which use the LBREQ | ||
71 | * and LSREQ signals (eg, MMCI) are unable to use this mode, which through | ||
72 | * these hardware restrictions prevents them from using scatter DMA. | ||
73 | * | ||
74 | * Global TODO: | ||
75 | * - Break out common code from arch/arm/mach-s3c64xx and share | ||
76 | */ | ||
77 | #include <linux/device.h> | ||
78 | #include <linux/init.h> | ||
79 | #include <linux/module.h> | ||
80 | #include <linux/interrupt.h> | ||
81 | #include <linux/slab.h> | ||
82 | #include <linux/delay.h> | ||
83 | #include <linux/dmapool.h> | ||
84 | #include <linux/dmaengine.h> | ||
85 | #include <linux/amba/bus.h> | ||
86 | #include <linux/amba/pl08x.h> | ||
87 | #include <linux/debugfs.h> | ||
88 | #include <linux/seq_file.h> | ||
89 | |||
90 | #include <asm/hardware/pl080.h> | ||
91 | |||
92 | #define DRIVER_NAME "pl08xdmac" | ||
93 | |||
94 | /** | ||
95 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives | ||
96 | * @channels: the number of channels available in this variant | ||
97 | * @dualmaster: whether this version supports dual AHB masters or not. | ||
98 | */ | ||
99 | struct vendor_data { | ||
100 | u8 channels; | ||
101 | bool dualmaster; | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * PL08X private data structures | ||
106 | * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, | ||
107 | * start & end do not - their bus bit info is in cctl. Also note that these | ||
108 | * are fixed 32-bit quantities. | ||
109 | */ | ||
110 | struct pl08x_lli { | ||
111 | u32 src; | ||
112 | u32 dst; | ||
113 | u32 lli; | ||
114 | u32 cctl; | ||
115 | }; | ||
116 | |||
117 | /** | ||
118 | * struct pl08x_driver_data - the local state holder for the PL08x | ||
119 | * @slave: slave engine for this instance | ||
120 | * @memcpy: memcpy engine for this instance | ||
121 | * @base: virtual memory base (remapped) for the PL08x | ||
122 | * @adev: the corresponding AMBA (PrimeCell) bus entry | ||
123 | * @vd: vendor data for this PL08x variant | ||
124 | * @pd: platform data passed in from the platform/machine | ||
125 | * @phy_chans: array of data for the physical channels | ||
126 | * @pool: a pool for the LLI descriptors | ||
127 | * @pool_ctr: counter of LLIs in the pool | ||
128 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches | ||
129 | * @mem_buses: set to indicate memory transfers on AHB2. | ||
130 | * @lock: a spinlock for this struct | ||
131 | */ | ||
132 | struct pl08x_driver_data { | ||
133 | struct dma_device slave; | ||
134 | struct dma_device memcpy; | ||
135 | void __iomem *base; | ||
136 | struct amba_device *adev; | ||
137 | const struct vendor_data *vd; | ||
138 | struct pl08x_platform_data *pd; | ||
139 | struct pl08x_phy_chan *phy_chans; | ||
140 | struct dma_pool *pool; | ||
141 | int pool_ctr; | ||
142 | u8 lli_buses; | ||
143 | u8 mem_buses; | ||
144 | spinlock_t lock; | ||
145 | }; | ||
146 | |||
147 | /* | ||
148 | * PL08X specific defines | ||
149 | */ | ||
150 | |||
151 | /* | ||
152 | * Memory boundaries: the manual for PL08x says that the controller | ||
153 | * cannot read past a 1KiB boundary, so these defines are used to | ||
154 | * create transfer LLIs that do not cross such boundaries. | ||
155 | */ | ||
156 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | ||
157 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | ||
158 | |||
159 | /* Minimum period between work queue runs */ | ||
160 | #define PL08X_WQ_PERIODMIN 20 | ||
161 | |||
162 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | ||
163 | # define PL08X_LLI_TSFR_SIZE 0x2000 | ||
164 | |||
165 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ | ||
166 | #define PL08X_MAX_ALLOCS 0x40 | ||
167 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) | ||
168 | #define PL08X_ALIGN 8 | ||
169 | |||
170 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | ||
171 | { | ||
172 | return container_of(chan, struct pl08x_dma_chan, chan); | ||
173 | } | ||
174 | |||
175 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) | ||
176 | { | ||
177 | return container_of(tx, struct pl08x_txd, tx); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Physical channel handling | ||
182 | */ | ||
183 | |||
184 | /* Whether a certain channel is busy or not */ | ||
185 | static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | ||
186 | { | ||
187 | unsigned int val; | ||
188 | |||
189 | val = readl(ch->base + PL080_CH_CONFIG); | ||
190 | return val & PL080_CONFIG_ACTIVE; | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Set the initial DMA register values i.e. those for the first LLI | ||
195 | * The next LLI pointer and the configuration interrupt bit have | ||
196 | * been set when the LLIs were constructed. Poke them into the hardware | ||
197 | * and start the transfer. | ||
198 | */ | ||
199 | static void pl08x_start_txd(struct pl08x_dma_chan *plchan, | ||
200 | struct pl08x_txd *txd) | ||
201 | { | ||
202 | struct pl08x_driver_data *pl08x = plchan->host; | ||
203 | struct pl08x_phy_chan *phychan = plchan->phychan; | ||
204 | struct pl08x_lli *lli = &txd->llis_va[0]; | ||
205 | u32 val; | ||
206 | |||
207 | plchan->at = txd; | ||
208 | |||
209 | /* Wait for channel inactive */ | ||
210 | while (pl08x_phy_channel_busy(phychan)) | ||
211 | cpu_relax(); | ||
212 | |||
213 | dev_vdbg(&pl08x->adev->dev, | ||
214 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | ||
215 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", | ||
216 | phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, | ||
217 | txd->ccfg); | ||
218 | |||
219 | writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); | ||
220 | writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); | ||
221 | writel(lli->lli, phychan->base + PL080_CH_LLI); | ||
222 | writel(lli->cctl, phychan->base + PL080_CH_CONTROL); | ||
223 | writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); | ||
224 | |||
225 | /* Enable the DMA channel */ | ||
226 | /* Do not access config register until channel shows as disabled */ | ||
227 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) | ||
228 | cpu_relax(); | ||
229 | |||
230 | /* Do not access config register until channel shows as inactive */ | ||
231 | val = readl(phychan->base + PL080_CH_CONFIG); | ||
232 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) | ||
233 | val = readl(phychan->base + PL080_CH_CONFIG); | ||
234 | |||
235 | writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * Pause the channel by setting the HALT bit. | ||
240 | * | ||
241 | * For M->P transfers, pause the DMAC first and then stop the peripheral - | ||
242 | * the FIFO can only drain if the peripheral is still requesting data. | ||
243 | * (note: this can still timeout if the DMAC FIFO never drains of data.) | ||
244 | * | ||
245 | * For P->M transfers, disable the peripheral first to stop it filling | ||
246 | * the DMAC FIFO, and then pause the DMAC. | ||
247 | */ | ||
248 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | ||
249 | { | ||
250 | u32 val; | ||
251 | int timeout; | ||
252 | |||
253 | /* Set the HALT bit and wait for the FIFO to drain */ | ||
254 | val = readl(ch->base + PL080_CH_CONFIG); | ||
255 | val |= PL080_CONFIG_HALT; | ||
256 | writel(val, ch->base + PL080_CH_CONFIG); | ||
257 | |||
258 | /* Wait for channel inactive */ | ||
259 | for (timeout = 1000; timeout; timeout--) { | ||
260 | if (!pl08x_phy_channel_busy(ch)) | ||
261 | break; | ||
262 | udelay(1); | ||
263 | } | ||
264 | if (pl08x_phy_channel_busy(ch)) | ||
265 | pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); | ||
266 | } | ||
267 | |||
268 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | ||
269 | { | ||
270 | u32 val; | ||
271 | |||
272 | /* Clear the HALT bit */ | ||
273 | val = readl(ch->base + PL080_CH_CONFIG); | ||
274 | val &= ~PL080_CONFIG_HALT; | ||
275 | writel(val, ch->base + PL080_CH_CONFIG); | ||
276 | } | ||
277 | |||
278 | |||
279 | /* | ||
280 | * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and | ||
281 | * clears any pending interrupt status. This should not be used for | ||
282 | * an on-going transfer, but as a method of shutting down a channel | ||
283 | * (eg, when it's no longer used) or terminating a transfer. | ||
284 | */ | ||
285 | static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, | ||
286 | struct pl08x_phy_chan *ch) | ||
287 | { | ||
288 | u32 val = readl(ch->base + PL080_CH_CONFIG); | ||
289 | |||
290 | val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | | ||
291 | PL080_CONFIG_TC_IRQ_MASK); | ||
292 | |||
293 | writel(val, ch->base + PL080_CH_CONFIG); | ||
294 | |||
295 | writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); | ||
296 | writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); | ||
297 | } | ||
298 | |||
299 | static inline u32 get_bytes_in_cctl(u32 cctl) | ||
300 | { | ||
301 | /* The source width defines the number of bytes */ | ||
302 | u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
303 | |||
304 | switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { | ||
305 | case PL080_WIDTH_8BIT: | ||
306 | break; | ||
307 | case PL080_WIDTH_16BIT: | ||
308 | bytes *= 2; | ||
309 | break; | ||
310 | case PL080_WIDTH_32BIT: | ||
311 | bytes *= 4; | ||
312 | break; | ||
313 | } | ||
314 | return bytes; | ||
315 | } | ||
316 | |||
317 | /* The channel should be paused when calling this */ | ||
318 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | ||
319 | { | ||
320 | struct pl08x_phy_chan *ch; | ||
321 | struct pl08x_txd *txd; | ||
322 | unsigned long flags; | ||
323 | size_t bytes = 0; | ||
324 | |||
325 | spin_lock_irqsave(&plchan->lock, flags); | ||
326 | ch = plchan->phychan; | ||
327 | txd = plchan->at; | ||
328 | |||
329 | /* | ||
330 | * Follow the LLIs to get the number of remaining | ||
331 | * bytes in the currently active transaction. | ||
332 | */ | ||
333 | if (ch && txd) { | ||
334 | u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; | ||
335 | |||
336 | /* First get the remaining bytes in the active transfer */ | ||
337 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); | ||
338 | |||
339 | if (clli) { | ||
340 | struct pl08x_lli *llis_va = txd->llis_va; | ||
341 | dma_addr_t llis_bus = txd->llis_bus; | ||
342 | int index; | ||
343 | |||
344 | BUG_ON(clli < llis_bus || clli >= llis_bus + | ||
345 | sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); | ||
346 | |||
347 | /* | ||
348 | * Locate the next LLI - as this is an array, | ||
349 | * it's simple maths to find. | ||
350 | */ | ||
351 | index = (clli - llis_bus) / sizeof(struct pl08x_lli); | ||
352 | |||
353 | for (; index < MAX_NUM_TSFR_LLIS; index++) { | ||
354 | bytes += get_bytes_in_cctl(llis_va[index].cctl); | ||
355 | |||
356 | /* | ||
357 | * A LLI pointer of 0 terminates the LLI list | ||
358 | */ | ||
359 | if (!llis_va[index].lli) | ||
360 | break; | ||
361 | } | ||
362 | } | ||
363 | } | ||
364 | |||
365 | /* Sum up all queued transactions */ | ||
366 | if (!list_empty(&plchan->pend_list)) { | ||
367 | struct pl08x_txd *txdi; | ||
368 | list_for_each_entry(txdi, &plchan->pend_list, node) { | ||
369 | bytes += txdi->len; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
374 | |||
375 | return bytes; | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * Allocate a physical channel for a virtual channel | ||
380 | * | ||
381 | * Try to locate a physical channel to be used for this transfer. If all | ||
382 | * are taken return NULL and the requester will have to cope by using | ||
383 | * some fallback PIO mode or retrying later. | ||
384 | */ | ||
385 | static struct pl08x_phy_chan * | ||
386 | pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | ||
387 | struct pl08x_dma_chan *virt_chan) | ||
388 | { | ||
389 | struct pl08x_phy_chan *ch = NULL; | ||
390 | unsigned long flags; | ||
391 | int i; | ||
392 | |||
393 | for (i = 0; i < pl08x->vd->channels; i++) { | ||
394 | ch = &pl08x->phy_chans[i]; | ||
395 | |||
396 | spin_lock_irqsave(&ch->lock, flags); | ||
397 | |||
398 | if (!ch->serving) { | ||
399 | ch->serving = virt_chan; | ||
400 | ch->signal = -1; | ||
401 | spin_unlock_irqrestore(&ch->lock, flags); | ||
402 | break; | ||
403 | } | ||
404 | |||
405 | spin_unlock_irqrestore(&ch->lock, flags); | ||
406 | } | ||
407 | |||
408 | if (i == pl08x->vd->channels) { | ||
409 | /* No physical channel available, cope with it */ | ||
410 | return NULL; | ||
411 | } | ||
412 | |||
413 | return ch; | ||
414 | } | ||
415 | |||
416 | static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | ||
417 | struct pl08x_phy_chan *ch) | ||
418 | { | ||
419 | unsigned long flags; | ||
420 | |||
421 | spin_lock_irqsave(&ch->lock, flags); | ||
422 | |||
423 | /* Stop the channel and clear its interrupts */ | ||
424 | pl08x_terminate_phy_chan(pl08x, ch); | ||
425 | |||
426 | /* Mark it as free */ | ||
427 | ch->serving = NULL; | ||
428 | spin_unlock_irqrestore(&ch->lock, flags); | ||
429 | } | ||
430 | |||
431 | /* | ||
432 | * LLI handling | ||
433 | */ | ||
434 | |||
435 | static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) | ||
436 | { | ||
437 | switch (coded) { | ||
438 | case PL080_WIDTH_8BIT: | ||
439 | return 1; | ||
440 | case PL080_WIDTH_16BIT: | ||
441 | return 2; | ||
442 | case PL080_WIDTH_32BIT: | ||
443 | return 4; | ||
444 | default: | ||
445 | break; | ||
446 | } | ||
447 | BUG(); | ||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | ||
452 | size_t tsize) | ||
453 | { | ||
454 | u32 retbits = cctl; | ||
455 | |||
456 | /* Remove all src, dst and transfer size bits */ | ||
457 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; | ||
458 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; | ||
459 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
460 | |||
461 | /* Then set the bits according to the parameters */ | ||
462 | switch (srcwidth) { | ||
463 | case 1: | ||
464 | retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; | ||
465 | break; | ||
466 | case 2: | ||
467 | retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; | ||
468 | break; | ||
469 | case 4: | ||
470 | retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; | ||
471 | break; | ||
472 | default: | ||
473 | BUG(); | ||
474 | break; | ||
475 | } | ||
476 | |||
477 | switch (dstwidth) { | ||
478 | case 1: | ||
479 | retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; | ||
480 | break; | ||
481 | case 2: | ||
482 | retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; | ||
483 | break; | ||
484 | case 4: | ||
485 | retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; | ||
486 | break; | ||
487 | default: | ||
488 | BUG(); | ||
489 | break; | ||
490 | } | ||
491 | |||
492 | retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; | ||
493 | return retbits; | ||
494 | } | ||
495 | |||
496 | struct pl08x_lli_build_data { | ||
497 | struct pl08x_txd *txd; | ||
498 | struct pl08x_driver_data *pl08x; | ||
499 | struct pl08x_bus_data srcbus; | ||
500 | struct pl08x_bus_data dstbus; | ||
501 | size_t remainder; | ||
502 | }; | ||
503 | |||
504 | /* | ||
505 | * Autoselect a master bus to use for the transfer this prefers the | ||
506 | * destination bus if both available if fixed address on one bus the | ||
507 | * other will be chosen | ||
508 | */ | ||
509 | static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, | ||
510 | struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) | ||
511 | { | ||
512 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | ||
513 | *mbus = &bd->srcbus; | ||
514 | *sbus = &bd->dstbus; | ||
515 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | ||
516 | *mbus = &bd->dstbus; | ||
517 | *sbus = &bd->srcbus; | ||
518 | } else { | ||
519 | if (bd->dstbus.buswidth == 4) { | ||
520 | *mbus = &bd->dstbus; | ||
521 | *sbus = &bd->srcbus; | ||
522 | } else if (bd->srcbus.buswidth == 4) { | ||
523 | *mbus = &bd->srcbus; | ||
524 | *sbus = &bd->dstbus; | ||
525 | } else if (bd->dstbus.buswidth == 2) { | ||
526 | *mbus = &bd->dstbus; | ||
527 | *sbus = &bd->srcbus; | ||
528 | } else if (bd->srcbus.buswidth == 2) { | ||
529 | *mbus = &bd->srcbus; | ||
530 | *sbus = &bd->dstbus; | ||
531 | } else { | ||
532 | /* bd->srcbus.buswidth == 1 */ | ||
533 | *mbus = &bd->dstbus; | ||
534 | *sbus = &bd->srcbus; | ||
535 | } | ||
536 | } | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * Fills in one LLI for a certain transfer descriptor and advance the counter | ||
541 | */ | ||
542 | static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, | ||
543 | int num_llis, int len, u32 cctl) | ||
544 | { | ||
545 | struct pl08x_lli *llis_va = bd->txd->llis_va; | ||
546 | dma_addr_t llis_bus = bd->txd->llis_bus; | ||
547 | |||
548 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); | ||
549 | |||
550 | llis_va[num_llis].cctl = cctl; | ||
551 | llis_va[num_llis].src = bd->srcbus.addr; | ||
552 | llis_va[num_llis].dst = bd->dstbus.addr; | ||
553 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); | ||
554 | if (bd->pl08x->lli_buses & PL08X_AHB2) | ||
555 | llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; | ||
556 | |||
557 | if (cctl & PL080_CONTROL_SRC_INCR) | ||
558 | bd->srcbus.addr += len; | ||
559 | if (cctl & PL080_CONTROL_DST_INCR) | ||
560 | bd->dstbus.addr += len; | ||
561 | |||
562 | BUG_ON(bd->remainder < len); | ||
563 | |||
564 | bd->remainder -= len; | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Return number of bytes to fill to boundary, or len. | ||
569 | * This calculation works for any value of addr. | ||
570 | */ | ||
571 | static inline size_t pl08x_pre_boundary(u32 addr, size_t len) | ||
572 | { | ||
573 | size_t boundary_len = PL08X_BOUNDARY_SIZE - | ||
574 | (addr & (PL08X_BOUNDARY_SIZE - 1)); | ||
575 | |||
576 | return min(boundary_len, len); | ||
577 | } | ||
578 | |||
579 | /* | ||
580 | * This fills in the table of LLIs for the transfer descriptor | ||
581 | * Note that we assume we never have to change the burst sizes | ||
582 | * Return 0 for error | ||
583 | */ | ||
584 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | ||
585 | struct pl08x_txd *txd) | ||
586 | { | ||
587 | struct pl08x_bus_data *mbus, *sbus; | ||
588 | struct pl08x_lli_build_data bd; | ||
589 | int num_llis = 0; | ||
590 | u32 cctl; | ||
591 | size_t max_bytes_per_lli; | ||
592 | size_t total_bytes = 0; | ||
593 | struct pl08x_lli *llis_va; | ||
594 | |||
595 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, | ||
596 | &txd->llis_bus); | ||
597 | if (!txd->llis_va) { | ||
598 | dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); | ||
599 | return 0; | ||
600 | } | ||
601 | |||
602 | pl08x->pool_ctr++; | ||
603 | |||
604 | /* Get the default CCTL */ | ||
605 | cctl = txd->cctl; | ||
606 | |||
607 | bd.txd = txd; | ||
608 | bd.pl08x = pl08x; | ||
609 | bd.srcbus.addr = txd->src_addr; | ||
610 | bd.dstbus.addr = txd->dst_addr; | ||
611 | |||
612 | /* Find maximum width of the source bus */ | ||
613 | bd.srcbus.maxwidth = | ||
614 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> | ||
615 | PL080_CONTROL_SWIDTH_SHIFT); | ||
616 | |||
617 | /* Find maximum width of the destination bus */ | ||
618 | bd.dstbus.maxwidth = | ||
619 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> | ||
620 | PL080_CONTROL_DWIDTH_SHIFT); | ||
621 | |||
622 | /* Set up the bus widths to the maximum */ | ||
623 | bd.srcbus.buswidth = bd.srcbus.maxwidth; | ||
624 | bd.dstbus.buswidth = bd.dstbus.maxwidth; | ||
625 | dev_vdbg(&pl08x->adev->dev, | ||
626 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | ||
627 | __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); | ||
628 | |||
629 | |||
630 | /* | ||
631 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | ||
632 | */ | ||
633 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * | ||
634 | PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
635 | dev_vdbg(&pl08x->adev->dev, | ||
636 | "%s max bytes per lli = %zu\n", | ||
637 | __func__, max_bytes_per_lli); | ||
638 | |||
639 | /* We need to count this down to zero */ | ||
640 | bd.remainder = txd->len; | ||
641 | dev_vdbg(&pl08x->adev->dev, | ||
642 | "%s remainder = %zu\n", | ||
643 | __func__, bd.remainder); | ||
644 | |||
645 | /* | ||
646 | * Choose bus to align to | ||
647 | * - prefers destination bus if both available | ||
648 | * - if fixed address on one bus chooses other | ||
649 | */ | ||
650 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | ||
651 | |||
652 | if (txd->len < mbus->buswidth) { | ||
653 | /* Less than a bus width available - send as single bytes */ | ||
654 | while (bd.remainder) { | ||
655 | dev_vdbg(&pl08x->adev->dev, | ||
656 | "%s single byte LLIs for a transfer of " | ||
657 | "less than a bus width (remain 0x%08x)\n", | ||
658 | __func__, bd.remainder); | ||
659 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
660 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | ||
661 | total_bytes++; | ||
662 | } | ||
663 | } else { | ||
664 | /* Make one byte LLIs until master bus is aligned */ | ||
665 | while ((mbus->addr) % (mbus->buswidth)) { | ||
666 | dev_vdbg(&pl08x->adev->dev, | ||
667 | "%s adjustment lli for less than bus width " | ||
668 | "(remain 0x%08x)\n", | ||
669 | __func__, bd.remainder); | ||
670 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
671 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | ||
672 | total_bytes++; | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * Master now aligned | ||
677 | * - if slave is not then we must set its width down | ||
678 | */ | ||
679 | if (sbus->addr % sbus->buswidth) { | ||
680 | dev_dbg(&pl08x->adev->dev, | ||
681 | "%s set down bus width to one byte\n", | ||
682 | __func__); | ||
683 | |||
684 | sbus->buswidth = 1; | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * Make largest possible LLIs until less than one bus | ||
689 | * width left | ||
690 | */ | ||
691 | while (bd.remainder > (mbus->buswidth - 1)) { | ||
692 | size_t lli_len, target_len, tsize, odd_bytes; | ||
693 | |||
694 | /* | ||
695 | * If enough left try to send max possible, | ||
696 | * otherwise try to send the remainder | ||
697 | */ | ||
698 | target_len = min(bd.remainder, max_bytes_per_lli); | ||
699 | |||
700 | /* | ||
701 | * Set bus lengths for incrementing buses to the | ||
702 | * number of bytes which fill to next memory boundary, | ||
703 | * limiting on the target length calculated above. | ||
704 | */ | ||
705 | if (cctl & PL080_CONTROL_SRC_INCR) | ||
706 | bd.srcbus.fill_bytes = | ||
707 | pl08x_pre_boundary(bd.srcbus.addr, | ||
708 | target_len); | ||
709 | else | ||
710 | bd.srcbus.fill_bytes = target_len; | ||
711 | |||
712 | if (cctl & PL080_CONTROL_DST_INCR) | ||
713 | bd.dstbus.fill_bytes = | ||
714 | pl08x_pre_boundary(bd.dstbus.addr, | ||
715 | target_len); | ||
716 | else | ||
717 | bd.dstbus.fill_bytes = target_len; | ||
718 | |||
719 | /* Find the nearest */ | ||
720 | lli_len = min(bd.srcbus.fill_bytes, | ||
721 | bd.dstbus.fill_bytes); | ||
722 | |||
723 | BUG_ON(lli_len > bd.remainder); | ||
724 | |||
725 | if (lli_len <= 0) { | ||
726 | dev_err(&pl08x->adev->dev, | ||
727 | "%s lli_len is %zu, <= 0\n", | ||
728 | __func__, lli_len); | ||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | if (lli_len == target_len) { | ||
733 | /* | ||
734 | * Can send what we wanted. | ||
735 | * Maintain alignment | ||
736 | */ | ||
737 | lli_len = (lli_len/mbus->buswidth) * | ||
738 | mbus->buswidth; | ||
739 | odd_bytes = 0; | ||
740 | } else { | ||
741 | /* | ||
742 | * So now we know how many bytes to transfer | ||
743 | * to get to the nearest boundary. The next | ||
744 | * LLI will past the boundary. However, we | ||
745 | * may be working to a boundary on the slave | ||
746 | * bus. We need to ensure the master stays | ||
747 | * aligned, and that we are working in | ||
748 | * multiples of the bus widths. | ||
749 | */ | ||
750 | odd_bytes = lli_len % mbus->buswidth; | ||
751 | lli_len -= odd_bytes; | ||
752 | |||
753 | } | ||
754 | |||
755 | if (lli_len) { | ||
756 | /* | ||
757 | * Check against minimum bus alignment: | ||
758 | * Calculate actual transfer size in relation | ||
759 | * to bus width an get a maximum remainder of | ||
760 | * the smallest bus width - 1 | ||
761 | */ | ||
762 | /* FIXME: use round_down()? */ | ||
763 | tsize = lli_len / min(mbus->buswidth, | ||
764 | sbus->buswidth); | ||
765 | lli_len = tsize * min(mbus->buswidth, | ||
766 | sbus->buswidth); | ||
767 | |||
768 | if (target_len != lli_len) { | ||
769 | dev_vdbg(&pl08x->adev->dev, | ||
770 | "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", | ||
771 | __func__, target_len, lli_len, txd->len); | ||
772 | } | ||
773 | |||
774 | cctl = pl08x_cctl_bits(cctl, | ||
775 | bd.srcbus.buswidth, | ||
776 | bd.dstbus.buswidth, | ||
777 | tsize); | ||
778 | |||
779 | dev_vdbg(&pl08x->adev->dev, | ||
780 | "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", | ||
781 | __func__, lli_len, bd.remainder); | ||
782 | pl08x_fill_lli_for_desc(&bd, num_llis++, | ||
783 | lli_len, cctl); | ||
784 | total_bytes += lli_len; | ||
785 | } | ||
786 | |||
787 | |||
788 | if (odd_bytes) { | ||
789 | /* | ||
790 | * Creep past the boundary, maintaining | ||
791 | * master alignment | ||
792 | */ | ||
793 | int j; | ||
794 | for (j = 0; (j < mbus->buswidth) | ||
795 | && (bd.remainder); j++) { | ||
796 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
797 | dev_vdbg(&pl08x->adev->dev, | ||
798 | "%s align with boundary, single byte (remain 0x%08zx)\n", | ||
799 | __func__, bd.remainder); | ||
800 | pl08x_fill_lli_for_desc(&bd, | ||
801 | num_llis++, 1, cctl); | ||
802 | total_bytes++; | ||
803 | } | ||
804 | } | ||
805 | } | ||
806 | |||
807 | /* | ||
808 | * Send any odd bytes | ||
809 | */ | ||
810 | while (bd.remainder) { | ||
811 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
812 | dev_vdbg(&pl08x->adev->dev, | ||
813 | "%s align with boundary, single odd byte (remain %zu)\n", | ||
814 | __func__, bd.remainder); | ||
815 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); | ||
816 | total_bytes++; | ||
817 | } | ||
818 | } | ||
819 | if (total_bytes != txd->len) { | ||
820 | dev_err(&pl08x->adev->dev, | ||
821 | "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", | ||
822 | __func__, total_bytes, txd->len); | ||
823 | return 0; | ||
824 | } | ||
825 | |||
826 | if (num_llis >= MAX_NUM_TSFR_LLIS) { | ||
827 | dev_err(&pl08x->adev->dev, | ||
828 | "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", | ||
829 | __func__, (u32) MAX_NUM_TSFR_LLIS); | ||
830 | return 0; | ||
831 | } | ||
832 | |||
833 | llis_va = txd->llis_va; | ||
834 | /* The final LLI terminates the LLI. */ | ||
835 | llis_va[num_llis - 1].lli = 0; | ||
836 | /* The final LLI element shall also fire an interrupt. */ | ||
837 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | ||
838 | |||
839 | #ifdef VERBOSE_DEBUG | ||
840 | { | ||
841 | int i; | ||
842 | |||
843 | for (i = 0; i < num_llis; i++) { | ||
844 | dev_vdbg(&pl08x->adev->dev, | ||
845 | "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", | ||
846 | i, | ||
847 | &llis_va[i], | ||
848 | llis_va[i].src, | ||
849 | llis_va[i].dst, | ||
850 | llis_va[i].cctl, | ||
851 | llis_va[i].lli | ||
852 | ); | ||
853 | } | ||
854 | } | ||
855 | #endif | ||
856 | |||
857 | return num_llis; | ||
858 | } | ||
859 | |||
860 | /* You should call this with the struct pl08x lock held */ | ||
861 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | ||
862 | struct pl08x_txd *txd) | ||
863 | { | ||
864 | /* Free the LLI */ | ||
865 | dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); | ||
866 | |||
867 | pl08x->pool_ctr--; | ||
868 | |||
869 | kfree(txd); | ||
870 | } | ||
871 | |||
872 | static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | ||
873 | struct pl08x_dma_chan *plchan) | ||
874 | { | ||
875 | struct pl08x_txd *txdi = NULL; | ||
876 | struct pl08x_txd *next; | ||
877 | |||
878 | if (!list_empty(&plchan->pend_list)) { | ||
879 | list_for_each_entry_safe(txdi, | ||
880 | next, &plchan->pend_list, node) { | ||
881 | list_del(&txdi->node); | ||
882 | pl08x_free_txd(pl08x, txdi); | ||
883 | } | ||
884 | } | ||
885 | } | ||
886 | |||
887 | /* | ||
888 | * The DMA ENGINE API | ||
889 | */ | ||
890 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | ||
891 | { | ||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | static void pl08x_free_chan_resources(struct dma_chan *chan) | ||
896 | { | ||
897 | } | ||
898 | |||
899 | /* | ||
900 | * This should be called with the channel plchan->lock held | ||
901 | */ | ||
902 | static int prep_phy_channel(struct pl08x_dma_chan *plchan, | ||
903 | struct pl08x_txd *txd) | ||
904 | { | ||
905 | struct pl08x_driver_data *pl08x = plchan->host; | ||
906 | struct pl08x_phy_chan *ch; | ||
907 | int ret; | ||
908 | |||
909 | /* Check if we already have a channel */ | ||
910 | if (plchan->phychan) | ||
911 | return 0; | ||
912 | |||
913 | ch = pl08x_get_phy_channel(pl08x, plchan); | ||
914 | if (!ch) { | ||
915 | /* No physical channel available, cope with it */ | ||
916 | dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); | ||
917 | return -EBUSY; | ||
918 | } | ||
919 | |||
920 | /* | ||
921 | * OK we have a physical channel: for memcpy() this is all we | ||
922 | * need, but for slaves the physical signals may be muxed! | ||
923 | * Can the platform allow us to use this channel? | ||
924 | */ | ||
925 | if (plchan->slave && | ||
926 | ch->signal < 0 && | ||
927 | pl08x->pd->get_signal) { | ||
928 | ret = pl08x->pd->get_signal(plchan); | ||
929 | if (ret < 0) { | ||
930 | dev_dbg(&pl08x->adev->dev, | ||
931 | "unable to use physical channel %d for transfer on %s due to platform restrictions\n", | ||
932 | ch->id, plchan->name); | ||
933 | /* Release physical channel & return */ | ||
934 | pl08x_put_phy_channel(pl08x, ch); | ||
935 | return -EBUSY; | ||
936 | } | ||
937 | ch->signal = ret; | ||
938 | |||
939 | /* Assign the flow control signal to this channel */ | ||
940 | if (txd->direction == DMA_TO_DEVICE) | ||
941 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
942 | else if (txd->direction == DMA_FROM_DEVICE) | ||
943 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
944 | } | ||
945 | |||
946 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | ||
947 | ch->id, | ||
948 | ch->signal, | ||
949 | plchan->name); | ||
950 | |||
951 | plchan->phychan_hold++; | ||
952 | plchan->phychan = ch; | ||
953 | |||
954 | return 0; | ||
955 | } | ||
956 | |||
957 | static void release_phy_channel(struct pl08x_dma_chan *plchan) | ||
958 | { | ||
959 | struct pl08x_driver_data *pl08x = plchan->host; | ||
960 | |||
961 | if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { | ||
962 | pl08x->pd->put_signal(plchan); | ||
963 | plchan->phychan->signal = -1; | ||
964 | } | ||
965 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
966 | plchan->phychan = NULL; | ||
967 | } | ||
968 | |||
969 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | ||
970 | { | ||
971 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | ||
972 | struct pl08x_txd *txd = to_pl08x_txd(tx); | ||
973 | unsigned long flags; | ||
974 | |||
975 | spin_lock_irqsave(&plchan->lock, flags); | ||
976 | |||
977 | plchan->chan.cookie += 1; | ||
978 | if (plchan->chan.cookie < 0) | ||
979 | plchan->chan.cookie = 1; | ||
980 | tx->cookie = plchan->chan.cookie; | ||
981 | |||
982 | /* Put this onto the pending list */ | ||
983 | list_add_tail(&txd->node, &plchan->pend_list); | ||
984 | |||
985 | /* | ||
986 | * If there was no physical channel available for this memcpy, | ||
987 | * stack the request up and indicate that the channel is waiting | ||
988 | * for a free physical channel. | ||
989 | */ | ||
990 | if (!plchan->slave && !plchan->phychan) { | ||
991 | /* Do this memcpy whenever there is a channel ready */ | ||
992 | plchan->state = PL08X_CHAN_WAITING; | ||
993 | plchan->waiting = txd; | ||
994 | } else { | ||
995 | plchan->phychan_hold--; | ||
996 | } | ||
997 | |||
998 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
999 | |||
1000 | return tx->cookie; | ||
1001 | } | ||
1002 | |||
1003 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | ||
1004 | struct dma_chan *chan, unsigned long flags) | ||
1005 | { | ||
1006 | struct dma_async_tx_descriptor *retval = NULL; | ||
1007 | |||
1008 | return retval; | ||
1009 | } | ||
1010 | |||
1011 | /* | ||
1012 | * Code accessing dma_async_is_complete() in a tight loop may give problems. | ||
1013 | * If slaves are relying on interrupts to signal completion this function | ||
1014 | * must not be called with interrupts disabled. | ||
1015 | */ | ||
1016 | static enum dma_status | ||
1017 | pl08x_dma_tx_status(struct dma_chan *chan, | ||
1018 | dma_cookie_t cookie, | ||
1019 | struct dma_tx_state *txstate) | ||
1020 | { | ||
1021 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1022 | dma_cookie_t last_used; | ||
1023 | dma_cookie_t last_complete; | ||
1024 | enum dma_status ret; | ||
1025 | u32 bytesleft = 0; | ||
1026 | |||
1027 | last_used = plchan->chan.cookie; | ||
1028 | last_complete = plchan->lc; | ||
1029 | |||
1030 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
1031 | if (ret == DMA_SUCCESS) { | ||
1032 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
1033 | return ret; | ||
1034 | } | ||
1035 | |||
1036 | /* | ||
1037 | * This cookie not complete yet | ||
1038 | */ | ||
1039 | last_used = plchan->chan.cookie; | ||
1040 | last_complete = plchan->lc; | ||
1041 | |||
1042 | /* Get number of bytes left in the active transactions and queue */ | ||
1043 | bytesleft = pl08x_getbytes_chan(plchan); | ||
1044 | |||
1045 | dma_set_tx_state(txstate, last_complete, last_used, | ||
1046 | bytesleft); | ||
1047 | |||
1048 | if (plchan->state == PL08X_CHAN_PAUSED) | ||
1049 | return DMA_PAUSED; | ||
1050 | |||
1051 | /* Whether waiting or running, we're in progress */ | ||
1052 | return DMA_IN_PROGRESS; | ||
1053 | } | ||
1054 | |||
1055 | /* PrimeCell DMA extension */ | ||
1056 | struct burst_table { | ||
1057 | int burstwords; | ||
1058 | u32 reg; | ||
1059 | }; | ||
1060 | |||
1061 | static const struct burst_table burst_sizes[] = { | ||
1062 | { | ||
1063 | .burstwords = 256, | ||
1064 | .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1065 | (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1066 | }, | ||
1067 | { | ||
1068 | .burstwords = 128, | ||
1069 | .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1070 | (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1071 | }, | ||
1072 | { | ||
1073 | .burstwords = 64, | ||
1074 | .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1075 | (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1076 | }, | ||
1077 | { | ||
1078 | .burstwords = 32, | ||
1079 | .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1080 | (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1081 | }, | ||
1082 | { | ||
1083 | .burstwords = 16, | ||
1084 | .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1085 | (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1086 | }, | ||
1087 | { | ||
1088 | .burstwords = 8, | ||
1089 | .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1090 | (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1091 | }, | ||
1092 | { | ||
1093 | .burstwords = 4, | ||
1094 | .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1095 | (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1096 | }, | ||
1097 | { | ||
1098 | .burstwords = 1, | ||
1099 | .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1100 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
1101 | }, | ||
1102 | }; | ||
1103 | |||
1104 | static int dma_set_runtime_config(struct dma_chan *chan, | ||
1105 | struct dma_slave_config *config) | ||
1106 | { | ||
1107 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1108 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1109 | struct pl08x_channel_data *cd = plchan->cd; | ||
1110 | enum dma_slave_buswidth addr_width; | ||
1111 | dma_addr_t addr; | ||
1112 | u32 maxburst; | ||
1113 | u32 cctl = 0; | ||
1114 | int i; | ||
1115 | |||
1116 | if (!plchan->slave) | ||
1117 | return -EINVAL; | ||
1118 | |||
1119 | /* Transfer direction */ | ||
1120 | plchan->runtime_direction = config->direction; | ||
1121 | if (config->direction == DMA_TO_DEVICE) { | ||
1122 | addr = config->dst_addr; | ||
1123 | addr_width = config->dst_addr_width; | ||
1124 | maxburst = config->dst_maxburst; | ||
1125 | } else if (config->direction == DMA_FROM_DEVICE) { | ||
1126 | addr = config->src_addr; | ||
1127 | addr_width = config->src_addr_width; | ||
1128 | maxburst = config->src_maxburst; | ||
1129 | } else { | ||
1130 | dev_err(&pl08x->adev->dev, | ||
1131 | "bad runtime_config: alien transfer direction\n"); | ||
1132 | return -EINVAL; | ||
1133 | } | ||
1134 | |||
1135 | switch (addr_width) { | ||
1136 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
1137 | cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1138 | (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1139 | break; | ||
1140 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
1141 | cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1142 | (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1143 | break; | ||
1144 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
1145 | cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
1146 | (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
1147 | break; | ||
1148 | default: | ||
1149 | dev_err(&pl08x->adev->dev, | ||
1150 | "bad runtime_config: alien address width\n"); | ||
1151 | return -EINVAL; | ||
1152 | } | ||
1153 | |||
1154 | /* | ||
1155 | * Now decide on a maxburst: | ||
1156 | * If this channel will only request single transfers, set this | ||
1157 | * down to ONE element. Also select one element if no maxburst | ||
1158 | * is specified. | ||
1159 | */ | ||
1160 | if (plchan->cd->single || maxburst == 0) { | ||
1161 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
1162 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | ||
1163 | } else { | ||
1164 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) | ||
1165 | if (burst_sizes[i].burstwords <= maxburst) | ||
1166 | break; | ||
1167 | cctl |= burst_sizes[i].reg; | ||
1168 | } | ||
1169 | |||
1170 | plchan->runtime_addr = addr; | ||
1171 | |||
1172 | /* Modify the default channel data to fit PrimeCell request */ | ||
1173 | cd->cctl = cctl; | ||
1174 | |||
1175 | dev_dbg(&pl08x->adev->dev, | ||
1176 | "configured channel %s (%s) for %s, data width %d, " | ||
1177 | "maxburst %d words, LE, CCTL=0x%08x\n", | ||
1178 | dma_chan_name(chan), plchan->name, | ||
1179 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | ||
1180 | addr_width, | ||
1181 | maxburst, | ||
1182 | cctl); | ||
1183 | |||
1184 | return 0; | ||
1185 | } | ||
1186 | |||
1187 | /* | ||
1188 | * Slave transactions callback to the slave device to allow | ||
1189 | * synchronization of slave DMA signals with the DMAC enable | ||
1190 | */ | ||
1191 | static void pl08x_issue_pending(struct dma_chan *chan) | ||
1192 | { | ||
1193 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1194 | unsigned long flags; | ||
1195 | |||
1196 | spin_lock_irqsave(&plchan->lock, flags); | ||
1197 | /* Something is already active, or we're waiting for a channel... */ | ||
1198 | if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { | ||
1199 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1200 | return; | ||
1201 | } | ||
1202 | |||
1203 | /* Take the first element in the queue and execute it */ | ||
1204 | if (!list_empty(&plchan->pend_list)) { | ||
1205 | struct pl08x_txd *next; | ||
1206 | |||
1207 | next = list_first_entry(&plchan->pend_list, | ||
1208 | struct pl08x_txd, | ||
1209 | node); | ||
1210 | list_del(&next->node); | ||
1211 | plchan->state = PL08X_CHAN_RUNNING; | ||
1212 | |||
1213 | pl08x_start_txd(plchan, next); | ||
1214 | } | ||
1215 | |||
1216 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1217 | } | ||
1218 | |||
1219 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | ||
1220 | struct pl08x_txd *txd) | ||
1221 | { | ||
1222 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1223 | unsigned long flags; | ||
1224 | int num_llis, ret; | ||
1225 | |||
1226 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | ||
1227 | if (!num_llis) { | ||
1228 | kfree(txd); | ||
1229 | return -EINVAL; | ||
1230 | } | ||
1231 | |||
1232 | spin_lock_irqsave(&plchan->lock, flags); | ||
1233 | |||
1234 | /* | ||
1235 | * See if we already have a physical channel allocated, | ||
1236 | * else this is the time to try to get one. | ||
1237 | */ | ||
1238 | ret = prep_phy_channel(plchan, txd); | ||
1239 | if (ret) { | ||
1240 | /* | ||
1241 | * No physical channel was available. | ||
1242 | * | ||
1243 | * memcpy transfers can be sorted out at submission time. | ||
1244 | * | ||
1245 | * Slave transfers may have been denied due to platform | ||
1246 | * channel muxing restrictions. Since there is no guarantee | ||
1247 | * that this will ever be resolved, and the signal must be | ||
1248 | * acquired AFTER acquiring the physical channel, we will let | ||
1249 | * them be NACK:ed with -EBUSY here. The drivers can retry | ||
1250 | * the prep() call if they are eager on doing this using DMA. | ||
1251 | */ | ||
1252 | if (plchan->slave) { | ||
1253 | pl08x_free_txd_list(pl08x, plchan); | ||
1254 | pl08x_free_txd(pl08x, txd); | ||
1255 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1256 | return -EBUSY; | ||
1257 | } | ||
1258 | } else | ||
1259 | /* | ||
1260 | * Else we're all set, paused and ready to roll, status | ||
1261 | * will switch to PL08X_CHAN_RUNNING when we call | ||
1262 | * issue_pending(). If there is something running on the | ||
1263 | * channel already we don't change its state. | ||
1264 | */ | ||
1265 | if (plchan->state == PL08X_CHAN_IDLE) | ||
1266 | plchan->state = PL08X_CHAN_PAUSED; | ||
1267 | |||
1268 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1269 | |||
1270 | return 0; | ||
1271 | } | ||
1272 | |||
1273 | /* | ||
1274 | * Given the source and destination available bus masks, select which | ||
1275 | * will be routed to each port. We try to have source and destination | ||
1276 | * on separate ports, but always respect the allowable settings. | ||
1277 | */ | ||
1278 | static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) | ||
1279 | { | ||
1280 | u32 cctl = 0; | ||
1281 | |||
1282 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
1283 | cctl |= PL080_CONTROL_DST_AHB2; | ||
1284 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
1285 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
1286 | |||
1287 | return cctl; | ||
1288 | } | ||
1289 | |||
1290 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | ||
1291 | unsigned long flags) | ||
1292 | { | ||
1293 | struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | ||
1294 | |||
1295 | if (txd) { | ||
1296 | dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); | ||
1297 | txd->tx.flags = flags; | ||
1298 | txd->tx.tx_submit = pl08x_tx_submit; | ||
1299 | INIT_LIST_HEAD(&txd->node); | ||
1300 | |||
1301 | /* Always enable error and terminal interrupts */ | ||
1302 | txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | | ||
1303 | PL080_CONFIG_TC_IRQ_MASK; | ||
1304 | } | ||
1305 | return txd; | ||
1306 | } | ||
1307 | |||
1308 | /* | ||
1309 | * Initialize a descriptor to be used by memcpy submit | ||
1310 | */ | ||
1311 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | ||
1312 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
1313 | size_t len, unsigned long flags) | ||
1314 | { | ||
1315 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1316 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1317 | struct pl08x_txd *txd; | ||
1318 | int ret; | ||
1319 | |||
1320 | txd = pl08x_get_txd(plchan, flags); | ||
1321 | if (!txd) { | ||
1322 | dev_err(&pl08x->adev->dev, | ||
1323 | "%s no memory for descriptor\n", __func__); | ||
1324 | return NULL; | ||
1325 | } | ||
1326 | |||
1327 | txd->direction = DMA_NONE; | ||
1328 | txd->src_addr = src; | ||
1329 | txd->dst_addr = dest; | ||
1330 | txd->len = len; | ||
1331 | |||
1332 | /* Set platform data for m2m */ | ||
1333 | txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1334 | txd->cctl = pl08x->pd->memcpy_channel.cctl & | ||
1335 | ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | ||
1336 | |||
1337 | /* Both to be incremented or the code will break */ | ||
1338 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | ||
1339 | |||
1340 | if (pl08x->vd->dualmaster) | ||
1341 | txd->cctl |= pl08x_select_bus(pl08x, | ||
1342 | pl08x->mem_buses, pl08x->mem_buses); | ||
1343 | |||
1344 | ret = pl08x_prep_channel_resources(plchan, txd); | ||
1345 | if (ret) | ||
1346 | return NULL; | ||
1347 | |||
1348 | return &txd->tx; | ||
1349 | } | ||
1350 | |||
1351 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | ||
1352 | struct dma_chan *chan, struct scatterlist *sgl, | ||
1353 | unsigned int sg_len, enum dma_data_direction direction, | ||
1354 | unsigned long flags) | ||
1355 | { | ||
1356 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1357 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1358 | struct pl08x_txd *txd; | ||
1359 | u8 src_buses, dst_buses; | ||
1360 | int ret; | ||
1361 | |||
1362 | /* | ||
1363 | * Current implementation ASSUMES only one sg | ||
1364 | */ | ||
1365 | if (sg_len != 1) { | ||
1366 | dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", | ||
1367 | __func__); | ||
1368 | BUG(); | ||
1369 | } | ||
1370 | |||
1371 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | ||
1372 | __func__, sgl->length, plchan->name); | ||
1373 | |||
1374 | txd = pl08x_get_txd(plchan, flags); | ||
1375 | if (!txd) { | ||
1376 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | ||
1377 | return NULL; | ||
1378 | } | ||
1379 | |||
1380 | if (direction != plchan->runtime_direction) | ||
1381 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | ||
1382 | "the direction configured for the PrimeCell\n", | ||
1383 | __func__); | ||
1384 | |||
1385 | /* | ||
1386 | * Set up addresses, the PrimeCell configured address | ||
1387 | * will take precedence since this may configure the | ||
1388 | * channel target address dynamically at runtime. | ||
1389 | */ | ||
1390 | txd->direction = direction; | ||
1391 | txd->len = sgl->length; | ||
1392 | |||
1393 | txd->cctl = plchan->cd->cctl & | ||
1394 | ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
1395 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
1396 | PL080_CONTROL_PROT_MASK); | ||
1397 | |||
1398 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
1399 | txd->cctl |= PL080_CONTROL_PROT_SYS; | ||
1400 | |||
1401 | if (direction == DMA_TO_DEVICE) { | ||
1402 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1403 | txd->cctl |= PL080_CONTROL_SRC_INCR; | ||
1404 | txd->src_addr = sgl->dma_address; | ||
1405 | if (plchan->runtime_addr) | ||
1406 | txd->dst_addr = plchan->runtime_addr; | ||
1407 | else | ||
1408 | txd->dst_addr = plchan->cd->addr; | ||
1409 | src_buses = pl08x->mem_buses; | ||
1410 | dst_buses = plchan->cd->periph_buses; | ||
1411 | } else if (direction == DMA_FROM_DEVICE) { | ||
1412 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1413 | txd->cctl |= PL080_CONTROL_DST_INCR; | ||
1414 | if (plchan->runtime_addr) | ||
1415 | txd->src_addr = plchan->runtime_addr; | ||
1416 | else | ||
1417 | txd->src_addr = plchan->cd->addr; | ||
1418 | txd->dst_addr = sgl->dma_address; | ||
1419 | src_buses = plchan->cd->periph_buses; | ||
1420 | dst_buses = pl08x->mem_buses; | ||
1421 | } else { | ||
1422 | dev_err(&pl08x->adev->dev, | ||
1423 | "%s direction unsupported\n", __func__); | ||
1424 | return NULL; | ||
1425 | } | ||
1426 | |||
1427 | txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); | ||
1428 | |||
1429 | ret = pl08x_prep_channel_resources(plchan, txd); | ||
1430 | if (ret) | ||
1431 | return NULL; | ||
1432 | |||
1433 | return &txd->tx; | ||
1434 | } | ||
1435 | |||
1436 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
1437 | unsigned long arg) | ||
1438 | { | ||
1439 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1440 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1441 | unsigned long flags; | ||
1442 | int ret = 0; | ||
1443 | |||
1444 | /* Controls applicable to inactive channels */ | ||
1445 | if (cmd == DMA_SLAVE_CONFIG) { | ||
1446 | return dma_set_runtime_config(chan, | ||
1447 | (struct dma_slave_config *)arg); | ||
1448 | } | ||
1449 | |||
1450 | /* | ||
1451 | * Anything succeeds on channels with no physical allocation and | ||
1452 | * no queued transfers. | ||
1453 | */ | ||
1454 | spin_lock_irqsave(&plchan->lock, flags); | ||
1455 | if (!plchan->phychan && !plchan->at) { | ||
1456 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1457 | return 0; | ||
1458 | } | ||
1459 | |||
1460 | switch (cmd) { | ||
1461 | case DMA_TERMINATE_ALL: | ||
1462 | plchan->state = PL08X_CHAN_IDLE; | ||
1463 | |||
1464 | if (plchan->phychan) { | ||
1465 | pl08x_terminate_phy_chan(pl08x, plchan->phychan); | ||
1466 | |||
1467 | /* | ||
1468 | * Mark physical channel as free and free any slave | ||
1469 | * signal | ||
1470 | */ | ||
1471 | release_phy_channel(plchan); | ||
1472 | } | ||
1473 | /* Dequeue jobs and free LLIs */ | ||
1474 | if (plchan->at) { | ||
1475 | pl08x_free_txd(pl08x, plchan->at); | ||
1476 | plchan->at = NULL; | ||
1477 | } | ||
1478 | /* Dequeue jobs not yet fired as well */ | ||
1479 | pl08x_free_txd_list(pl08x, plchan); | ||
1480 | break; | ||
1481 | case DMA_PAUSE: | ||
1482 | pl08x_pause_phy_chan(plchan->phychan); | ||
1483 | plchan->state = PL08X_CHAN_PAUSED; | ||
1484 | break; | ||
1485 | case DMA_RESUME: | ||
1486 | pl08x_resume_phy_chan(plchan->phychan); | ||
1487 | plchan->state = PL08X_CHAN_RUNNING; | ||
1488 | break; | ||
1489 | default: | ||
1490 | /* Unknown command */ | ||
1491 | ret = -ENXIO; | ||
1492 | break; | ||
1493 | } | ||
1494 | |||
1495 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1496 | |||
1497 | return ret; | ||
1498 | } | ||
1499 | |||
1500 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | ||
1501 | { | ||
1502 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
1503 | char *name = chan_id; | ||
1504 | |||
1505 | /* Check that the channel is not taken! */ | ||
1506 | if (!strcmp(plchan->name, name)) | ||
1507 | return true; | ||
1508 | |||
1509 | return false; | ||
1510 | } | ||
1511 | |||
1512 | /* | ||
1513 | * Just check that the device is there and active | ||
1514 | * TODO: turn this bit on/off depending on the number of physical channels | ||
1515 | * actually used, if it is zero... well shut it off. That will save some | ||
1516 | * power. Cut the clock at the same time. | ||
1517 | */ | ||
1518 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | ||
1519 | { | ||
1520 | u32 val; | ||
1521 | |||
1522 | val = readl(pl08x->base + PL080_CONFIG); | ||
1523 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); | ||
1524 | /* We implicitly clear bit 1 and that means little-endian mode */ | ||
1525 | val |= PL080_CONFIG_ENABLE; | ||
1526 | writel(val, pl08x->base + PL080_CONFIG); | ||
1527 | } | ||
1528 | |||
1529 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | ||
1530 | { | ||
1531 | struct device *dev = txd->tx.chan->device->dev; | ||
1532 | |||
1533 | if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1534 | if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
1535 | dma_unmap_single(dev, txd->src_addr, txd->len, | ||
1536 | DMA_TO_DEVICE); | ||
1537 | else | ||
1538 | dma_unmap_page(dev, txd->src_addr, txd->len, | ||
1539 | DMA_TO_DEVICE); | ||
1540 | } | ||
1541 | if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1542 | if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1543 | dma_unmap_single(dev, txd->dst_addr, txd->len, | ||
1544 | DMA_FROM_DEVICE); | ||
1545 | else | ||
1546 | dma_unmap_page(dev, txd->dst_addr, txd->len, | ||
1547 | DMA_FROM_DEVICE); | ||
1548 | } | ||
1549 | } | ||
1550 | |||
1551 | static void pl08x_tasklet(unsigned long data) | ||
1552 | { | ||
1553 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | ||
1554 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1555 | struct pl08x_txd *txd; | ||
1556 | unsigned long flags; | ||
1557 | |||
1558 | spin_lock_irqsave(&plchan->lock, flags); | ||
1559 | |||
1560 | txd = plchan->at; | ||
1561 | plchan->at = NULL; | ||
1562 | |||
1563 | if (txd) { | ||
1564 | /* Update last completed */ | ||
1565 | plchan->lc = txd->tx.cookie; | ||
1566 | } | ||
1567 | |||
1568 | /* If a new descriptor is queued, set it up plchan->at is NULL here */ | ||
1569 | if (!list_empty(&plchan->pend_list)) { | ||
1570 | struct pl08x_txd *next; | ||
1571 | |||
1572 | next = list_first_entry(&plchan->pend_list, | ||
1573 | struct pl08x_txd, | ||
1574 | node); | ||
1575 | list_del(&next->node); | ||
1576 | |||
1577 | pl08x_start_txd(plchan, next); | ||
1578 | } else if (plchan->phychan_hold) { | ||
1579 | /* | ||
1580 | * This channel is still in use - we have a new txd being | ||
1581 | * prepared and will soon be queued. Don't give up the | ||
1582 | * physical channel. | ||
1583 | */ | ||
1584 | } else { | ||
1585 | struct pl08x_dma_chan *waiting = NULL; | ||
1586 | |||
1587 | /* | ||
1588 | * No more jobs, so free up the physical channel | ||
1589 | * Free any allocated signal on slave transfers too | ||
1590 | */ | ||
1591 | release_phy_channel(plchan); | ||
1592 | plchan->state = PL08X_CHAN_IDLE; | ||
1593 | |||
1594 | /* | ||
1595 | * And NOW before anyone else can grab that free:d up | ||
1596 | * physical channel, see if there is some memcpy pending | ||
1597 | * that seriously needs to start because of being stacked | ||
1598 | * up while we were choking the physical channels with data. | ||
1599 | */ | ||
1600 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | ||
1601 | chan.device_node) { | ||
1602 | if (waiting->state == PL08X_CHAN_WAITING && | ||
1603 | waiting->waiting != NULL) { | ||
1604 | int ret; | ||
1605 | |||
1606 | /* This should REALLY not fail now */ | ||
1607 | ret = prep_phy_channel(waiting, | ||
1608 | waiting->waiting); | ||
1609 | BUG_ON(ret); | ||
1610 | waiting->phychan_hold--; | ||
1611 | waiting->state = PL08X_CHAN_RUNNING; | ||
1612 | waiting->waiting = NULL; | ||
1613 | pl08x_issue_pending(&waiting->chan); | ||
1614 | break; | ||
1615 | } | ||
1616 | } | ||
1617 | } | ||
1618 | |||
1619 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1620 | |||
1621 | if (txd) { | ||
1622 | dma_async_tx_callback callback = txd->tx.callback; | ||
1623 | void *callback_param = txd->tx.callback_param; | ||
1624 | |||
1625 | /* Don't try to unmap buffers on slave channels */ | ||
1626 | if (!plchan->slave) | ||
1627 | pl08x_unmap_buffers(txd); | ||
1628 | |||
1629 | /* Free the descriptor */ | ||
1630 | spin_lock_irqsave(&plchan->lock, flags); | ||
1631 | pl08x_free_txd(pl08x, txd); | ||
1632 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1633 | |||
1634 | /* Callback to signal completion */ | ||
1635 | if (callback) | ||
1636 | callback(callback_param); | ||
1637 | } | ||
1638 | } | ||
1639 | |||
1640 | static irqreturn_t pl08x_irq(int irq, void *dev) | ||
1641 | { | ||
1642 | struct pl08x_driver_data *pl08x = dev; | ||
1643 | u32 mask = 0; | ||
1644 | u32 val; | ||
1645 | int i; | ||
1646 | |||
1647 | val = readl(pl08x->base + PL080_ERR_STATUS); | ||
1648 | if (val) { | ||
1649 | /* An error interrupt (on one or more channels) */ | ||
1650 | dev_err(&pl08x->adev->dev, | ||
1651 | "%s error interrupt, register value 0x%08x\n", | ||
1652 | __func__, val); | ||
1653 | /* | ||
1654 | * Simply clear ALL PL08X error interrupts, | ||
1655 | * regardless of channel and cause | ||
1656 | * FIXME: should be 0x00000003 on PL081 really. | ||
1657 | */ | ||
1658 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | ||
1659 | } | ||
1660 | val = readl(pl08x->base + PL080_INT_STATUS); | ||
1661 | for (i = 0; i < pl08x->vd->channels; i++) { | ||
1662 | if ((1 << i) & val) { | ||
1663 | /* Locate physical channel */ | ||
1664 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; | ||
1665 | struct pl08x_dma_chan *plchan = phychan->serving; | ||
1666 | |||
1667 | /* Schedule tasklet on this channel */ | ||
1668 | tasklet_schedule(&plchan->tasklet); | ||
1669 | |||
1670 | mask |= (1 << i); | ||
1671 | } | ||
1672 | } | ||
1673 | /* Clear only the terminal interrupts on channels we processed */ | ||
1674 | writel(mask, pl08x->base + PL080_TC_CLEAR); | ||
1675 | |||
1676 | return mask ? IRQ_HANDLED : IRQ_NONE; | ||
1677 | } | ||
1678 | |||
1679 | /* | ||
1680 | * Initialise the DMAC memcpy/slave channels. | ||
1681 | * Make a local wrapper to hold required data | ||
1682 | */ | ||
1683 | static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | ||
1684 | struct dma_device *dmadev, | ||
1685 | unsigned int channels, | ||
1686 | bool slave) | ||
1687 | { | ||
1688 | struct pl08x_dma_chan *chan; | ||
1689 | int i; | ||
1690 | |||
1691 | INIT_LIST_HEAD(&dmadev->channels); | ||
1692 | |||
1693 | /* | ||
1694 | * Register as many many memcpy as we have physical channels, | ||
1695 | * we won't always be able to use all but the code will have | ||
1696 | * to cope with that situation. | ||
1697 | */ | ||
1698 | for (i = 0; i < channels; i++) { | ||
1699 | chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); | ||
1700 | if (!chan) { | ||
1701 | dev_err(&pl08x->adev->dev, | ||
1702 | "%s no memory for channel\n", __func__); | ||
1703 | return -ENOMEM; | ||
1704 | } | ||
1705 | |||
1706 | chan->host = pl08x; | ||
1707 | chan->state = PL08X_CHAN_IDLE; | ||
1708 | |||
1709 | if (slave) { | ||
1710 | chan->slave = true; | ||
1711 | chan->name = pl08x->pd->slave_channels[i].bus_id; | ||
1712 | chan->cd = &pl08x->pd->slave_channels[i]; | ||
1713 | } else { | ||
1714 | chan->cd = &pl08x->pd->memcpy_channel; | ||
1715 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | ||
1716 | if (!chan->name) { | ||
1717 | kfree(chan); | ||
1718 | return -ENOMEM; | ||
1719 | } | ||
1720 | } | ||
1721 | if (chan->cd->circular_buffer) { | ||
1722 | dev_err(&pl08x->adev->dev, | ||
1723 | "channel %s: circular buffers not supported\n", | ||
1724 | chan->name); | ||
1725 | kfree(chan); | ||
1726 | continue; | ||
1727 | } | ||
1728 | dev_info(&pl08x->adev->dev, | ||
1729 | "initialize virtual channel \"%s\"\n", | ||
1730 | chan->name); | ||
1731 | |||
1732 | chan->chan.device = dmadev; | ||
1733 | chan->chan.cookie = 0; | ||
1734 | chan->lc = 0; | ||
1735 | |||
1736 | spin_lock_init(&chan->lock); | ||
1737 | INIT_LIST_HEAD(&chan->pend_list); | ||
1738 | tasklet_init(&chan->tasklet, pl08x_tasklet, | ||
1739 | (unsigned long) chan); | ||
1740 | |||
1741 | list_add_tail(&chan->chan.device_node, &dmadev->channels); | ||
1742 | } | ||
1743 | dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", | ||
1744 | i, slave ? "slave" : "memcpy"); | ||
1745 | return i; | ||
1746 | } | ||
1747 | |||
1748 | static void pl08x_free_virtual_channels(struct dma_device *dmadev) | ||
1749 | { | ||
1750 | struct pl08x_dma_chan *chan = NULL; | ||
1751 | struct pl08x_dma_chan *next; | ||
1752 | |||
1753 | list_for_each_entry_safe(chan, | ||
1754 | next, &dmadev->channels, chan.device_node) { | ||
1755 | list_del(&chan->chan.device_node); | ||
1756 | kfree(chan); | ||
1757 | } | ||
1758 | } | ||
1759 | |||
1760 | #ifdef CONFIG_DEBUG_FS | ||
1761 | static const char *pl08x_state_str(enum pl08x_dma_chan_state state) | ||
1762 | { | ||
1763 | switch (state) { | ||
1764 | case PL08X_CHAN_IDLE: | ||
1765 | return "idle"; | ||
1766 | case PL08X_CHAN_RUNNING: | ||
1767 | return "running"; | ||
1768 | case PL08X_CHAN_PAUSED: | ||
1769 | return "paused"; | ||
1770 | case PL08X_CHAN_WAITING: | ||
1771 | return "waiting"; | ||
1772 | default: | ||
1773 | break; | ||
1774 | } | ||
1775 | return "UNKNOWN STATE"; | ||
1776 | } | ||
1777 | |||
1778 | static int pl08x_debugfs_show(struct seq_file *s, void *data) | ||
1779 | { | ||
1780 | struct pl08x_driver_data *pl08x = s->private; | ||
1781 | struct pl08x_dma_chan *chan; | ||
1782 | struct pl08x_phy_chan *ch; | ||
1783 | unsigned long flags; | ||
1784 | int i; | ||
1785 | |||
1786 | seq_printf(s, "PL08x physical channels:\n"); | ||
1787 | seq_printf(s, "CHANNEL:\tUSER:\n"); | ||
1788 | seq_printf(s, "--------\t-----\n"); | ||
1789 | for (i = 0; i < pl08x->vd->channels; i++) { | ||
1790 | struct pl08x_dma_chan *virt_chan; | ||
1791 | |||
1792 | ch = &pl08x->phy_chans[i]; | ||
1793 | |||
1794 | spin_lock_irqsave(&ch->lock, flags); | ||
1795 | virt_chan = ch->serving; | ||
1796 | |||
1797 | seq_printf(s, "%d\t\t%s\n", | ||
1798 | ch->id, virt_chan ? virt_chan->name : "(none)"); | ||
1799 | |||
1800 | spin_unlock_irqrestore(&ch->lock, flags); | ||
1801 | } | ||
1802 | |||
1803 | seq_printf(s, "\nPL08x virtual memcpy channels:\n"); | ||
1804 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | ||
1805 | seq_printf(s, "--------\t------\n"); | ||
1806 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { | ||
1807 | seq_printf(s, "%s\t\t%s\n", chan->name, | ||
1808 | pl08x_state_str(chan->state)); | ||
1809 | } | ||
1810 | |||
1811 | seq_printf(s, "\nPL08x virtual slave channels:\n"); | ||
1812 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | ||
1813 | seq_printf(s, "--------\t------\n"); | ||
1814 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { | ||
1815 | seq_printf(s, "%s\t\t%s\n", chan->name, | ||
1816 | pl08x_state_str(chan->state)); | ||
1817 | } | ||
1818 | |||
1819 | return 0; | ||
1820 | } | ||
1821 | |||
1822 | static int pl08x_debugfs_open(struct inode *inode, struct file *file) | ||
1823 | { | ||
1824 | return single_open(file, pl08x_debugfs_show, inode->i_private); | ||
1825 | } | ||
1826 | |||
1827 | static const struct file_operations pl08x_debugfs_operations = { | ||
1828 | .open = pl08x_debugfs_open, | ||
1829 | .read = seq_read, | ||
1830 | .llseek = seq_lseek, | ||
1831 | .release = single_release, | ||
1832 | }; | ||
1833 | |||
1834 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | ||
1835 | { | ||
1836 | /* Expose a simple debugfs interface to view all clocks */ | ||
1837 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, | ||
1838 | NULL, pl08x, | ||
1839 | &pl08x_debugfs_operations); | ||
1840 | } | ||
1841 | |||
1842 | #else | ||
1843 | static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | ||
1844 | { | ||
1845 | } | ||
1846 | #endif | ||
1847 | |||
1848 | static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | ||
1849 | { | ||
1850 | struct pl08x_driver_data *pl08x; | ||
1851 | const struct vendor_data *vd = id->data; | ||
1852 | int ret = 0; | ||
1853 | int i; | ||
1854 | |||
1855 | ret = amba_request_regions(adev, NULL); | ||
1856 | if (ret) | ||
1857 | return ret; | ||
1858 | |||
1859 | /* Create the driver state holder */ | ||
1860 | pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); | ||
1861 | if (!pl08x) { | ||
1862 | ret = -ENOMEM; | ||
1863 | goto out_no_pl08x; | ||
1864 | } | ||
1865 | |||
1866 | /* Initialize memcpy engine */ | ||
1867 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | ||
1868 | pl08x->memcpy.dev = &adev->dev; | ||
1869 | pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; | ||
1870 | pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; | ||
1871 | pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; | ||
1872 | pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | ||
1873 | pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; | ||
1874 | pl08x->memcpy.device_issue_pending = pl08x_issue_pending; | ||
1875 | pl08x->memcpy.device_control = pl08x_control; | ||
1876 | |||
1877 | /* Initialize slave engine */ | ||
1878 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | ||
1879 | pl08x->slave.dev = &adev->dev; | ||
1880 | pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; | ||
1881 | pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; | ||
1882 | pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | ||
1883 | pl08x->slave.device_tx_status = pl08x_dma_tx_status; | ||
1884 | pl08x->slave.device_issue_pending = pl08x_issue_pending; | ||
1885 | pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; | ||
1886 | pl08x->slave.device_control = pl08x_control; | ||
1887 | |||
1888 | /* Get the platform data */ | ||
1889 | pl08x->pd = dev_get_platdata(&adev->dev); | ||
1890 | if (!pl08x->pd) { | ||
1891 | dev_err(&adev->dev, "no platform data supplied\n"); | ||
1892 | goto out_no_platdata; | ||
1893 | } | ||
1894 | |||
1895 | /* Assign useful pointers to the driver state */ | ||
1896 | pl08x->adev = adev; | ||
1897 | pl08x->vd = vd; | ||
1898 | |||
1899 | /* By default, AHB1 only. If dualmaster, from platform */ | ||
1900 | pl08x->lli_buses = PL08X_AHB1; | ||
1901 | pl08x->mem_buses = PL08X_AHB1; | ||
1902 | if (pl08x->vd->dualmaster) { | ||
1903 | pl08x->lli_buses = pl08x->pd->lli_buses; | ||
1904 | pl08x->mem_buses = pl08x->pd->mem_buses; | ||
1905 | } | ||
1906 | |||
1907 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ | ||
1908 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, | ||
1909 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); | ||
1910 | if (!pl08x->pool) { | ||
1911 | ret = -ENOMEM; | ||
1912 | goto out_no_lli_pool; | ||
1913 | } | ||
1914 | |||
1915 | spin_lock_init(&pl08x->lock); | ||
1916 | |||
1917 | pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); | ||
1918 | if (!pl08x->base) { | ||
1919 | ret = -ENOMEM; | ||
1920 | goto out_no_ioremap; | ||
1921 | } | ||
1922 | |||
1923 | /* Turn on the PL08x */ | ||
1924 | pl08x_ensure_on(pl08x); | ||
1925 | |||
1926 | /* Attach the interrupt handler */ | ||
1927 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | ||
1928 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | ||
1929 | |||
1930 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | ||
1931 | DRIVER_NAME, pl08x); | ||
1932 | if (ret) { | ||
1933 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | ||
1934 | __func__, adev->irq[0]); | ||
1935 | goto out_no_irq; | ||
1936 | } | ||
1937 | |||
1938 | /* Initialize physical channels */ | ||
1939 | pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), | ||
1940 | GFP_KERNEL); | ||
1941 | if (!pl08x->phy_chans) { | ||
1942 | dev_err(&adev->dev, "%s failed to allocate " | ||
1943 | "physical channel holders\n", | ||
1944 | __func__); | ||
1945 | goto out_no_phychans; | ||
1946 | } | ||
1947 | |||
1948 | for (i = 0; i < vd->channels; i++) { | ||
1949 | struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; | ||
1950 | |||
1951 | ch->id = i; | ||
1952 | ch->base = pl08x->base + PL080_Cx_BASE(i); | ||
1953 | spin_lock_init(&ch->lock); | ||
1954 | ch->serving = NULL; | ||
1955 | ch->signal = -1; | ||
1956 | dev_info(&adev->dev, | ||
1957 | "physical channel %d is %s\n", i, | ||
1958 | pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); | ||
1959 | } | ||
1960 | |||
1961 | /* Register as many memcpy channels as there are physical channels */ | ||
1962 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, | ||
1963 | pl08x->vd->channels, false); | ||
1964 | if (ret <= 0) { | ||
1965 | dev_warn(&pl08x->adev->dev, | ||
1966 | "%s failed to enumerate memcpy channels - %d\n", | ||
1967 | __func__, ret); | ||
1968 | goto out_no_memcpy; | ||
1969 | } | ||
1970 | pl08x->memcpy.chancnt = ret; | ||
1971 | |||
1972 | /* Register slave channels */ | ||
1973 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, | ||
1974 | pl08x->pd->num_slave_channels, | ||
1975 | true); | ||
1976 | if (ret <= 0) { | ||
1977 | dev_warn(&pl08x->adev->dev, | ||
1978 | "%s failed to enumerate slave channels - %d\n", | ||
1979 | __func__, ret); | ||
1980 | goto out_no_slave; | ||
1981 | } | ||
1982 | pl08x->slave.chancnt = ret; | ||
1983 | |||
1984 | ret = dma_async_device_register(&pl08x->memcpy); | ||
1985 | if (ret) { | ||
1986 | dev_warn(&pl08x->adev->dev, | ||
1987 | "%s failed to register memcpy as an async device - %d\n", | ||
1988 | __func__, ret); | ||
1989 | goto out_no_memcpy_reg; | ||
1990 | } | ||
1991 | |||
1992 | ret = dma_async_device_register(&pl08x->slave); | ||
1993 | if (ret) { | ||
1994 | dev_warn(&pl08x->adev->dev, | ||
1995 | "%s failed to register slave as an async device - %d\n", | ||
1996 | __func__, ret); | ||
1997 | goto out_no_slave_reg; | ||
1998 | } | ||
1999 | |||
2000 | amba_set_drvdata(adev, pl08x); | ||
2001 | init_pl08x_debugfs(pl08x); | ||
2002 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", | ||
2003 | amba_part(adev), amba_rev(adev), | ||
2004 | (unsigned long long)adev->res.start, adev->irq[0]); | ||
2005 | return 0; | ||
2006 | |||
2007 | out_no_slave_reg: | ||
2008 | dma_async_device_unregister(&pl08x->memcpy); | ||
2009 | out_no_memcpy_reg: | ||
2010 | pl08x_free_virtual_channels(&pl08x->slave); | ||
2011 | out_no_slave: | ||
2012 | pl08x_free_virtual_channels(&pl08x->memcpy); | ||
2013 | out_no_memcpy: | ||
2014 | kfree(pl08x->phy_chans); | ||
2015 | out_no_phychans: | ||
2016 | free_irq(adev->irq[0], pl08x); | ||
2017 | out_no_irq: | ||
2018 | iounmap(pl08x->base); | ||
2019 | out_no_ioremap: | ||
2020 | dma_pool_destroy(pl08x->pool); | ||
2021 | out_no_lli_pool: | ||
2022 | out_no_platdata: | ||
2023 | kfree(pl08x); | ||
2024 | out_no_pl08x: | ||
2025 | amba_release_regions(adev); | ||
2026 | return ret; | ||
2027 | } | ||
2028 | |||
2029 | /* PL080 has 8 channels and the PL080 have just 2 */ | ||
2030 | static struct vendor_data vendor_pl080 = { | ||
2031 | .channels = 8, | ||
2032 | .dualmaster = true, | ||
2033 | }; | ||
2034 | |||
2035 | static struct vendor_data vendor_pl081 = { | ||
2036 | .channels = 2, | ||
2037 | .dualmaster = false, | ||
2038 | }; | ||
2039 | |||
2040 | static struct amba_id pl08x_ids[] = { | ||
2041 | /* PL080 */ | ||
2042 | { | ||
2043 | .id = 0x00041080, | ||
2044 | .mask = 0x000fffff, | ||
2045 | .data = &vendor_pl080, | ||
2046 | }, | ||
2047 | /* PL081 */ | ||
2048 | { | ||
2049 | .id = 0x00041081, | ||
2050 | .mask = 0x000fffff, | ||
2051 | .data = &vendor_pl081, | ||
2052 | }, | ||
2053 | /* Nomadik 8815 PL080 variant */ | ||
2054 | { | ||
2055 | .id = 0x00280880, | ||
2056 | .mask = 0x00ffffff, | ||
2057 | .data = &vendor_pl080, | ||
2058 | }, | ||
2059 | { 0, 0 }, | ||
2060 | }; | ||
2061 | |||
2062 | static struct amba_driver pl08x_amba_driver = { | ||
2063 | .drv.name = DRIVER_NAME, | ||
2064 | .id_table = pl08x_ids, | ||
2065 | .probe = pl08x_probe, | ||
2066 | }; | ||
2067 | |||
2068 | static int __init pl08x_init(void) | ||
2069 | { | ||
2070 | int retval; | ||
2071 | retval = amba_driver_register(&pl08x_amba_driver); | ||
2072 | if (retval) | ||
2073 | printk(KERN_WARNING DRIVER_NAME | ||
2074 | "failed to register as an AMBA device (%d)\n", | ||
2075 | retval); | ||
2076 | return retval; | ||
2077 | } | ||
2078 | subsys_initcall(pl08x_init); | ||
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index a0f3e6a06e06..36144f88d718 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) | 38 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) |
39 | #define ATC_DEFAULT_CTRLA (0) | 39 | #define ATC_DEFAULT_CTRLA (0) |
40 | #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ | 40 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |
41 | |ATC_DIF(1)) | 41 | |ATC_DIF(AT_DMA_MEM_IF)) |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Initial number of descriptors to allocate for each channel. This could | 44 | * Initial number of descriptors to allocate for each channel. This could |
@@ -165,9 +165,32 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | |||
165 | } | 165 | } |
166 | 166 | ||
167 | /** | 167 | /** |
168 | * atc_desc_chain - build chain adding a descripor | ||
169 | * @first: address of first descripor of the chain | ||
170 | * @prev: address of previous descripor of the chain | ||
171 | * @desc: descriptor to queue | ||
172 | * | ||
173 | * Called from prep_* functions | ||
174 | */ | ||
175 | static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, | ||
176 | struct at_desc *desc) | ||
177 | { | ||
178 | if (!(*first)) { | ||
179 | *first = desc; | ||
180 | } else { | ||
181 | /* inform the HW lli about chaining */ | ||
182 | (*prev)->lli.dscr = desc->txd.phys; | ||
183 | /* insert the link descriptor to the LD ring */ | ||
184 | list_add_tail(&desc->desc_node, | ||
185 | &(*first)->tx_list); | ||
186 | } | ||
187 | *prev = desc; | ||
188 | } | ||
189 | |||
190 | /** | ||
168 | * atc_assign_cookie - compute and assign new cookie | 191 | * atc_assign_cookie - compute and assign new cookie |
169 | * @atchan: channel we work on | 192 | * @atchan: channel we work on |
170 | * @desc: descriptor to asign cookie for | 193 | * @desc: descriptor to assign cookie for |
171 | * | 194 | * |
172 | * Called with atchan->lock held and bh disabled | 195 | * Called with atchan->lock held and bh disabled |
173 | */ | 196 | */ |
@@ -237,23 +260,19 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | |||
237 | static void | 260 | static void |
238 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | 261 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) |
239 | { | 262 | { |
240 | dma_async_tx_callback callback; | ||
241 | void *param; | ||
242 | struct dma_async_tx_descriptor *txd = &desc->txd; | 263 | struct dma_async_tx_descriptor *txd = &desc->txd; |
243 | 264 | ||
244 | dev_vdbg(chan2dev(&atchan->chan_common), | 265 | dev_vdbg(chan2dev(&atchan->chan_common), |
245 | "descriptor %u complete\n", txd->cookie); | 266 | "descriptor %u complete\n", txd->cookie); |
246 | 267 | ||
247 | atchan->completed_cookie = txd->cookie; | 268 | atchan->completed_cookie = txd->cookie; |
248 | callback = txd->callback; | ||
249 | param = txd->callback_param; | ||
250 | 269 | ||
251 | /* move children to free_list */ | 270 | /* move children to free_list */ |
252 | list_splice_init(&desc->tx_list, &atchan->free_list); | 271 | list_splice_init(&desc->tx_list, &atchan->free_list); |
253 | /* move myself to free_list */ | 272 | /* move myself to free_list */ |
254 | list_move(&desc->desc_node, &atchan->free_list); | 273 | list_move(&desc->desc_node, &atchan->free_list); |
255 | 274 | ||
256 | /* unmap dma addresses */ | 275 | /* unmap dma addresses (not on slave channels) */ |
257 | if (!atchan->chan_common.private) { | 276 | if (!atchan->chan_common.private) { |
258 | struct device *parent = chan2parent(&atchan->chan_common); | 277 | struct device *parent = chan2parent(&atchan->chan_common); |
259 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 278 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
278 | } | 297 | } |
279 | } | 298 | } |
280 | 299 | ||
281 | /* | 300 | /* for cyclic transfers, |
282 | * The API requires that no submissions are done from a | 301 | * no need to replay callback function while stopping */ |
283 | * callback, so we don't need to drop the lock here | 302 | if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { |
284 | */ | 303 | dma_async_tx_callback callback = txd->callback; |
285 | if (callback) | 304 | void *param = txd->callback_param; |
286 | callback(param); | 305 | |
306 | /* | ||
307 | * The API requires that no submissions are done from a | ||
308 | * callback, so we don't need to drop the lock here | ||
309 | */ | ||
310 | if (callback) | ||
311 | callback(param); | ||
312 | } | ||
287 | 313 | ||
288 | dma_run_dependencies(txd); | 314 | dma_run_dependencies(txd); |
289 | } | 315 | } |
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan) | |||
419 | atc_chain_complete(atchan, bad_desc); | 445 | atc_chain_complete(atchan, bad_desc); |
420 | } | 446 | } |
421 | 447 | ||
448 | /** | ||
449 | * atc_handle_cyclic - at the end of a period, run callback function | ||
450 | * @atchan: channel used for cyclic operations | ||
451 | * | ||
452 | * Called with atchan->lock held and bh disabled | ||
453 | */ | ||
454 | static void atc_handle_cyclic(struct at_dma_chan *atchan) | ||
455 | { | ||
456 | struct at_desc *first = atc_first_active(atchan); | ||
457 | struct dma_async_tx_descriptor *txd = &first->txd; | ||
458 | dma_async_tx_callback callback = txd->callback; | ||
459 | void *param = txd->callback_param; | ||
460 | |||
461 | dev_vdbg(chan2dev(&atchan->chan_common), | ||
462 | "new cyclic period llp 0x%08x\n", | ||
463 | channel_readl(atchan, DSCR)); | ||
464 | |||
465 | if (callback) | ||
466 | callback(param); | ||
467 | } | ||
422 | 468 | ||
423 | /*-- IRQ & Tasklet ---------------------------------------------------*/ | 469 | /*-- IRQ & Tasklet ---------------------------------------------------*/ |
424 | 470 | ||
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data) | |||
426 | { | 472 | { |
427 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; | 473 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; |
428 | 474 | ||
429 | /* Channel cannot be enabled here */ | ||
430 | if (atc_chan_is_enabled(atchan)) { | ||
431 | dev_err(chan2dev(&atchan->chan_common), | ||
432 | "BUG: channel enabled in tasklet\n"); | ||
433 | return; | ||
434 | } | ||
435 | |||
436 | spin_lock(&atchan->lock); | 475 | spin_lock(&atchan->lock); |
437 | if (test_and_clear_bit(0, &atchan->error_status)) | 476 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) |
438 | atc_handle_error(atchan); | 477 | atc_handle_error(atchan); |
478 | else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) | ||
479 | atc_handle_cyclic(atchan); | ||
439 | else | 480 | else |
440 | atc_advance_work(atchan); | 481 | atc_advance_work(atchan); |
441 | 482 | ||
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | |||
464 | 505 | ||
465 | for (i = 0; i < atdma->dma_common.chancnt; i++) { | 506 | for (i = 0; i < atdma->dma_common.chancnt; i++) { |
466 | atchan = &atdma->chan[i]; | 507 | atchan = &atdma->chan[i]; |
467 | if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { | 508 | if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { |
468 | if (pending & AT_DMA_ERR(i)) { | 509 | if (pending & AT_DMA_ERR(i)) { |
469 | /* Disable channel on AHB error */ | 510 | /* Disable channel on AHB error */ |
470 | dma_writel(atdma, CHDR, atchan->mask); | 511 | dma_writel(atdma, CHDR, |
512 | AT_DMA_RES(i) | atchan->mask); | ||
471 | /* Give information to tasklet */ | 513 | /* Give information to tasklet */ |
472 | set_bit(0, &atchan->error_status); | 514 | set_bit(ATC_IS_ERROR, &atchan->status); |
473 | } | 515 | } |
474 | tasklet_schedule(&atchan->tasklet); | 516 | tasklet_schedule(&atchan->tasklet); |
475 | ret = IRQ_HANDLED; | 517 | ret = IRQ_HANDLED; |
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
549 | } | 591 | } |
550 | 592 | ||
551 | ctrla = ATC_DEFAULT_CTRLA; | 593 | ctrla = ATC_DEFAULT_CTRLA; |
552 | ctrlb = ATC_DEFAULT_CTRLB | 594 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
553 | | ATC_SRC_ADDR_MODE_INCR | 595 | | ATC_SRC_ADDR_MODE_INCR |
554 | | ATC_DST_ADDR_MODE_INCR | 596 | | ATC_DST_ADDR_MODE_INCR |
555 | | ATC_FC_MEM2MEM; | 597 | | ATC_FC_MEM2MEM; |
@@ -583,18 +625,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
583 | desc->lli.ctrlb = ctrlb; | 625 | desc->lli.ctrlb = ctrlb; |
584 | 626 | ||
585 | desc->txd.cookie = 0; | 627 | desc->txd.cookie = 0; |
586 | async_tx_ack(&desc->txd); | ||
587 | 628 | ||
588 | if (!first) { | 629 | atc_desc_chain(&first, &prev, desc); |
589 | first = desc; | ||
590 | } else { | ||
591 | /* inform the HW lli about chaining */ | ||
592 | prev->lli.dscr = desc->txd.phys; | ||
593 | /* insert the link descriptor to the LD ring */ | ||
594 | list_add_tail(&desc->desc_node, | ||
595 | &first->tx_list); | ||
596 | } | ||
597 | prev = desc; | ||
598 | } | 630 | } |
599 | 631 | ||
600 | /* First descriptor of the chain embedds additional information */ | 632 | /* First descriptor of the chain embedds additional information */ |
@@ -604,7 +636,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
604 | /* set end-of-link to the last link descriptor of list*/ | 636 | /* set end-of-link to the last link descriptor of list*/ |
605 | set_desc_eol(desc); | 637 | set_desc_eol(desc); |
606 | 638 | ||
607 | desc->txd.flags = flags; /* client is in control of this ack */ | 639 | first->txd.flags = flags; /* client is in control of this ack */ |
608 | 640 | ||
609 | return &first->txd; | 641 | return &first->txd; |
610 | 642 | ||
@@ -640,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
640 | struct scatterlist *sg; | 672 | struct scatterlist *sg; |
641 | size_t total_len = 0; | 673 | size_t total_len = 0; |
642 | 674 | ||
643 | dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", | 675 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", |
676 | sg_len, | ||
644 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | 677 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", |
645 | flags); | 678 | flags); |
646 | 679 | ||
@@ -652,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
652 | reg_width = atslave->reg_width; | 685 | reg_width = atslave->reg_width; |
653 | 686 | ||
654 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; | 687 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; |
655 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; | 688 | ctrlb = ATC_IEN; |
656 | 689 | ||
657 | switch (direction) { | 690 | switch (direction) { |
658 | case DMA_TO_DEVICE: | 691 | case DMA_TO_DEVICE: |
659 | ctrla |= ATC_DST_WIDTH(reg_width); | 692 | ctrla |= ATC_DST_WIDTH(reg_width); |
660 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 693 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
661 | | ATC_SRC_ADDR_MODE_INCR | 694 | | ATC_SRC_ADDR_MODE_INCR |
662 | | ATC_FC_MEM2PER; | 695 | | ATC_FC_MEM2PER |
696 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); | ||
663 | reg = atslave->tx_reg; | 697 | reg = atslave->tx_reg; |
664 | for_each_sg(sgl, sg, sg_len, i) { | 698 | for_each_sg(sgl, sg, sg_len, i) { |
665 | struct at_desc *desc; | 699 | struct at_desc *desc; |
@@ -670,7 +704,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
670 | if (!desc) | 704 | if (!desc) |
671 | goto err_desc_get; | 705 | goto err_desc_get; |
672 | 706 | ||
673 | mem = sg_phys(sg); | 707 | mem = sg_dma_address(sg); |
674 | len = sg_dma_len(sg); | 708 | len = sg_dma_len(sg); |
675 | mem_width = 2; | 709 | mem_width = 2; |
676 | if (unlikely(mem & 3 || len & 3)) | 710 | if (unlikely(mem & 3 || len & 3)) |
@@ -683,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
683 | | len >> mem_width; | 717 | | len >> mem_width; |
684 | desc->lli.ctrlb = ctrlb; | 718 | desc->lli.ctrlb = ctrlb; |
685 | 719 | ||
686 | if (!first) { | 720 | atc_desc_chain(&first, &prev, desc); |
687 | first = desc; | ||
688 | } else { | ||
689 | /* inform the HW lli about chaining */ | ||
690 | prev->lli.dscr = desc->txd.phys; | ||
691 | /* insert the link descriptor to the LD ring */ | ||
692 | list_add_tail(&desc->desc_node, | ||
693 | &first->tx_list); | ||
694 | } | ||
695 | prev = desc; | ||
696 | total_len += len; | 721 | total_len += len; |
697 | } | 722 | } |
698 | break; | 723 | break; |
@@ -700,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
700 | ctrla |= ATC_SRC_WIDTH(reg_width); | 725 | ctrla |= ATC_SRC_WIDTH(reg_width); |
701 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 726 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
702 | | ATC_SRC_ADDR_MODE_FIXED | 727 | | ATC_SRC_ADDR_MODE_FIXED |
703 | | ATC_FC_PER2MEM; | 728 | | ATC_FC_PER2MEM |
729 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); | ||
704 | 730 | ||
705 | reg = atslave->rx_reg; | 731 | reg = atslave->rx_reg; |
706 | for_each_sg(sgl, sg, sg_len, i) { | 732 | for_each_sg(sgl, sg, sg_len, i) { |
@@ -712,7 +738,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
712 | if (!desc) | 738 | if (!desc) |
713 | goto err_desc_get; | 739 | goto err_desc_get; |
714 | 740 | ||
715 | mem = sg_phys(sg); | 741 | mem = sg_dma_address(sg); |
716 | len = sg_dma_len(sg); | 742 | len = sg_dma_len(sg); |
717 | mem_width = 2; | 743 | mem_width = 2; |
718 | if (unlikely(mem & 3 || len & 3)) | 744 | if (unlikely(mem & 3 || len & 3)) |
@@ -722,19 +748,10 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
722 | desc->lli.daddr = mem; | 748 | desc->lli.daddr = mem; |
723 | desc->lli.ctrla = ctrla | 749 | desc->lli.ctrla = ctrla |
724 | | ATC_DST_WIDTH(mem_width) | 750 | | ATC_DST_WIDTH(mem_width) |
725 | | len >> mem_width; | 751 | | len >> reg_width; |
726 | desc->lli.ctrlb = ctrlb; | 752 | desc->lli.ctrlb = ctrlb; |
727 | 753 | ||
728 | if (!first) { | 754 | atc_desc_chain(&first, &prev, desc); |
729 | first = desc; | ||
730 | } else { | ||
731 | /* inform the HW lli about chaining */ | ||
732 | prev->lli.dscr = desc->txd.phys; | ||
733 | /* insert the link descriptor to the LD ring */ | ||
734 | list_add_tail(&desc->desc_node, | ||
735 | &first->tx_list); | ||
736 | } | ||
737 | prev = desc; | ||
738 | total_len += len; | 755 | total_len += len; |
739 | } | 756 | } |
740 | break; | 757 | break; |
@@ -749,52 +766,222 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
749 | first->txd.cookie = -EBUSY; | 766 | first->txd.cookie = -EBUSY; |
750 | first->len = total_len; | 767 | first->len = total_len; |
751 | 768 | ||
752 | /* last link descriptor of list is responsible of flags */ | 769 | /* first link descriptor of list is responsible of flags */ |
753 | prev->txd.flags = flags; /* client is in control of this ack */ | 770 | first->txd.flags = flags; /* client is in control of this ack */ |
771 | |||
772 | return &first->txd; | ||
773 | |||
774 | err_desc_get: | ||
775 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | ||
776 | atc_desc_put(atchan, first); | ||
777 | return NULL; | ||
778 | } | ||
779 | |||
780 | /** | ||
781 | * atc_dma_cyclic_check_values | ||
782 | * Check for too big/unaligned periods and unaligned DMA buffer | ||
783 | */ | ||
784 | static int | ||
785 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | ||
786 | size_t period_len, enum dma_data_direction direction) | ||
787 | { | ||
788 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) | ||
789 | goto err_out; | ||
790 | if (unlikely(period_len & ((1 << reg_width) - 1))) | ||
791 | goto err_out; | ||
792 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | ||
793 | goto err_out; | ||
794 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | ||
795 | goto err_out; | ||
796 | |||
797 | return 0; | ||
798 | |||
799 | err_out: | ||
800 | return -EINVAL; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * atc_dma_cyclic_fill_desc - Fill one period decriptor | ||
805 | */ | ||
806 | static int | ||
807 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | ||
808 | unsigned int period_index, dma_addr_t buf_addr, | ||
809 | size_t period_len, enum dma_data_direction direction) | ||
810 | { | ||
811 | u32 ctrla; | ||
812 | unsigned int reg_width = atslave->reg_width; | ||
813 | |||
814 | /* prepare common CRTLA value */ | ||
815 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla | ||
816 | | ATC_DST_WIDTH(reg_width) | ||
817 | | ATC_SRC_WIDTH(reg_width) | ||
818 | | period_len >> reg_width; | ||
819 | |||
820 | switch (direction) { | ||
821 | case DMA_TO_DEVICE: | ||
822 | desc->lli.saddr = buf_addr + (period_len * period_index); | ||
823 | desc->lli.daddr = atslave->tx_reg; | ||
824 | desc->lli.ctrla = ctrla; | ||
825 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | ||
826 | | ATC_SRC_ADDR_MODE_INCR | ||
827 | | ATC_FC_MEM2PER | ||
828 | | ATC_SIF(AT_DMA_MEM_IF) | ||
829 | | ATC_DIF(AT_DMA_PER_IF); | ||
830 | break; | ||
831 | |||
832 | case DMA_FROM_DEVICE: | ||
833 | desc->lli.saddr = atslave->rx_reg; | ||
834 | desc->lli.daddr = buf_addr + (period_len * period_index); | ||
835 | desc->lli.ctrla = ctrla; | ||
836 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | ||
837 | | ATC_SRC_ADDR_MODE_FIXED | ||
838 | | ATC_FC_PER2MEM | ||
839 | | ATC_SIF(AT_DMA_PER_IF) | ||
840 | | ATC_DIF(AT_DMA_MEM_IF); | ||
841 | break; | ||
842 | |||
843 | default: | ||
844 | return -EINVAL; | ||
845 | } | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | /** | ||
851 | * atc_prep_dma_cyclic - prepare the cyclic DMA transfer | ||
852 | * @chan: the DMA channel to prepare | ||
853 | * @buf_addr: physical DMA address where the buffer starts | ||
854 | * @buf_len: total number of bytes for the entire buffer | ||
855 | * @period_len: number of bytes for each period | ||
856 | * @direction: transfer direction, to or from device | ||
857 | */ | ||
858 | static struct dma_async_tx_descriptor * | ||
859 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
860 | size_t period_len, enum dma_data_direction direction) | ||
861 | { | ||
862 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
863 | struct at_dma_slave *atslave = chan->private; | ||
864 | struct at_desc *first = NULL; | ||
865 | struct at_desc *prev = NULL; | ||
866 | unsigned long was_cyclic; | ||
867 | unsigned int periods = buf_len / period_len; | ||
868 | unsigned int i; | ||
869 | |||
870 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | ||
871 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | ||
872 | buf_addr, | ||
873 | periods, buf_len, period_len); | ||
874 | |||
875 | if (unlikely(!atslave || !buf_len || !period_len)) { | ||
876 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); | ||
877 | return NULL; | ||
878 | } | ||
879 | |||
880 | was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); | ||
881 | if (was_cyclic) { | ||
882 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); | ||
883 | return NULL; | ||
884 | } | ||
885 | |||
886 | /* Check for too big/unaligned periods and unaligned DMA buffer */ | ||
887 | if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, | ||
888 | period_len, direction)) | ||
889 | goto err_out; | ||
890 | |||
891 | /* build cyclic linked list */ | ||
892 | for (i = 0; i < periods; i++) { | ||
893 | struct at_desc *desc; | ||
894 | |||
895 | desc = atc_desc_get(atchan); | ||
896 | if (!desc) | ||
897 | goto err_desc_get; | ||
898 | |||
899 | if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, | ||
900 | period_len, direction)) | ||
901 | goto err_desc_get; | ||
902 | |||
903 | atc_desc_chain(&first, &prev, desc); | ||
904 | } | ||
905 | |||
906 | /* lets make a cyclic list */ | ||
907 | prev->lli.dscr = first->txd.phys; | ||
908 | |||
909 | /* First descriptor of the chain embedds additional information */ | ||
910 | first->txd.cookie = -EBUSY; | ||
911 | first->len = buf_len; | ||
754 | 912 | ||
755 | return &first->txd; | 913 | return &first->txd; |
756 | 914 | ||
757 | err_desc_get: | 915 | err_desc_get: |
758 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | 916 | dev_err(chan2dev(chan), "not enough descriptors available\n"); |
759 | atc_desc_put(atchan, first); | 917 | atc_desc_put(atchan, first); |
918 | err_out: | ||
919 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | ||
760 | return NULL; | 920 | return NULL; |
761 | } | 921 | } |
762 | 922 | ||
923 | |||
763 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 924 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
764 | unsigned long arg) | 925 | unsigned long arg) |
765 | { | 926 | { |
766 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 927 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
767 | struct at_dma *atdma = to_at_dma(chan->device); | 928 | struct at_dma *atdma = to_at_dma(chan->device); |
768 | struct at_desc *desc, *_desc; | 929 | int chan_id = atchan->chan_common.chan_id; |
930 | |||
769 | LIST_HEAD(list); | 931 | LIST_HEAD(list); |
770 | 932 | ||
771 | /* Only supports DMA_TERMINATE_ALL */ | 933 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); |
772 | if (cmd != DMA_TERMINATE_ALL) | ||
773 | return -ENXIO; | ||
774 | 934 | ||
775 | /* | 935 | if (cmd == DMA_PAUSE) { |
776 | * This is only called when something went wrong elsewhere, so | 936 | spin_lock_bh(&atchan->lock); |
777 | * we don't really care about the data. Just disable the | ||
778 | * channel. We still have to poll the channel enable bit due | ||
779 | * to AHB/HSB limitations. | ||
780 | */ | ||
781 | spin_lock_bh(&atchan->lock); | ||
782 | 937 | ||
783 | dma_writel(atdma, CHDR, atchan->mask); | 938 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); |
939 | set_bit(ATC_IS_PAUSED, &atchan->status); | ||
784 | 940 | ||
785 | /* confirm that this channel is disabled */ | 941 | spin_unlock_bh(&atchan->lock); |
786 | while (dma_readl(atdma, CHSR) & atchan->mask) | 942 | } else if (cmd == DMA_RESUME) { |
787 | cpu_relax(); | 943 | if (!test_bit(ATC_IS_PAUSED, &atchan->status)) |
944 | return 0; | ||
788 | 945 | ||
789 | /* active_list entries will end up before queued entries */ | 946 | spin_lock_bh(&atchan->lock); |
790 | list_splice_init(&atchan->queue, &list); | ||
791 | list_splice_init(&atchan->active_list, &list); | ||
792 | 947 | ||
793 | /* Flush all pending and queued descriptors */ | 948 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
794 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 949 | clear_bit(ATC_IS_PAUSED, &atchan->status); |
795 | atc_chain_complete(atchan, desc); | ||
796 | 950 | ||
797 | spin_unlock_bh(&atchan->lock); | 951 | spin_unlock_bh(&atchan->lock); |
952 | } else if (cmd == DMA_TERMINATE_ALL) { | ||
953 | struct at_desc *desc, *_desc; | ||
954 | /* | ||
955 | * This is only called when something went wrong elsewhere, so | ||
956 | * we don't really care about the data. Just disable the | ||
957 | * channel. We still have to poll the channel enable bit due | ||
958 | * to AHB/HSB limitations. | ||
959 | */ | ||
960 | spin_lock_bh(&atchan->lock); | ||
961 | |||
962 | /* disabling channel: must also remove suspend state */ | ||
963 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); | ||
964 | |||
965 | /* confirm that this channel is disabled */ | ||
966 | while (dma_readl(atdma, CHSR) & atchan->mask) | ||
967 | cpu_relax(); | ||
968 | |||
969 | /* active_list entries will end up before queued entries */ | ||
970 | list_splice_init(&atchan->queue, &list); | ||
971 | list_splice_init(&atchan->active_list, &list); | ||
972 | |||
973 | /* Flush all pending and queued descriptors */ | ||
974 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
975 | atc_chain_complete(atchan, desc); | ||
976 | |||
977 | clear_bit(ATC_IS_PAUSED, &atchan->status); | ||
978 | /* if channel dedicated to cyclic operations, free it */ | ||
979 | clear_bit(ATC_IS_CYCLIC, &atchan->status); | ||
980 | |||
981 | spin_unlock_bh(&atchan->lock); | ||
982 | } else { | ||
983 | return -ENXIO; | ||
984 | } | ||
798 | 985 | ||
799 | return 0; | 986 | return 0; |
800 | } | 987 | } |
@@ -836,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan, | |||
836 | 1023 | ||
837 | spin_unlock_bh(&atchan->lock); | 1024 | spin_unlock_bh(&atchan->lock); |
838 | 1025 | ||
839 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 1026 | if (ret != DMA_SUCCESS) |
840 | dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", | 1027 | dma_set_tx_state(txstate, last_complete, last_used, |
841 | cookie, last_complete ? last_complete : 0, | 1028 | atc_first_active(atchan)->len); |
1029 | else | ||
1030 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
1031 | |||
1032 | if (test_bit(ATC_IS_PAUSED, &atchan->status)) | ||
1033 | ret = DMA_PAUSED; | ||
1034 | |||
1035 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", | ||
1036 | ret, cookie, last_complete ? last_complete : 0, | ||
842 | last_used ? last_used : 0); | 1037 | last_used ? last_used : 0); |
843 | 1038 | ||
844 | return ret; | 1039 | return ret; |
@@ -854,11 +1049,15 @@ static void atc_issue_pending(struct dma_chan *chan) | |||
854 | 1049 | ||
855 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | 1050 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
856 | 1051 | ||
1052 | /* Not needed for cyclic transfers */ | ||
1053 | if (test_bit(ATC_IS_CYCLIC, &atchan->status)) | ||
1054 | return; | ||
1055 | |||
1056 | spin_lock_bh(&atchan->lock); | ||
857 | if (!atc_chan_is_enabled(atchan)) { | 1057 | if (!atc_chan_is_enabled(atchan)) { |
858 | spin_lock_bh(&atchan->lock); | ||
859 | atc_advance_work(atchan); | 1058 | atc_advance_work(atchan); |
860 | spin_unlock_bh(&atchan->lock); | ||
861 | } | 1059 | } |
1060 | spin_unlock_bh(&atchan->lock); | ||
862 | } | 1061 | } |
863 | 1062 | ||
864 | /** | 1063 | /** |
@@ -960,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
960 | } | 1159 | } |
961 | list_splice_init(&atchan->free_list, &list); | 1160 | list_splice_init(&atchan->free_list, &list); |
962 | atchan->descs_allocated = 0; | 1161 | atchan->descs_allocated = 0; |
1162 | atchan->status = 0; | ||
963 | 1163 | ||
964 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 1164 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
965 | } | 1165 | } |
@@ -1093,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1093 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) | 1293 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) |
1094 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; | 1294 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; |
1095 | 1295 | ||
1096 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { | 1296 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) |
1097 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; | 1297 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; |
1298 | |||
1299 | if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) | ||
1300 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; | ||
1301 | |||
1302 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) || | ||
1303 | dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) | ||
1098 | atdma->dma_common.device_control = atc_control; | 1304 | atdma->dma_common.device_control = atc_control; |
1099 | } | ||
1100 | 1305 | ||
1101 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1306 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1102 | 1307 | ||
@@ -1210,7 +1415,7 @@ static int __init at_dma_init(void) | |||
1210 | { | 1415 | { |
1211 | return platform_driver_probe(&at_dma_driver, at_dma_probe); | 1416 | return platform_driver_probe(&at_dma_driver, at_dma_probe); |
1212 | } | 1417 | } |
1213 | module_init(at_dma_init); | 1418 | subsys_initcall(at_dma_init); |
1214 | 1419 | ||
1215 | static void __exit at_dma_exit(void) | 1420 | static void __exit at_dma_exit(void) |
1216 | { | 1421 | { |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 495457e3dc4b..087dbf1dd39c 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -103,6 +103,10 @@ | |||
103 | /* Bitfields in CTRLB */ | 103 | /* Bitfields in CTRLB */ |
104 | #define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ | 104 | #define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */ |
105 | #define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ | 105 | #define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */ |
106 | /* Specify AHB interfaces */ | ||
107 | #define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */ | ||
108 | #define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */ | ||
109 | |||
106 | #define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ | 110 | #define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */ |
107 | #define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ | 111 | #define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */ |
108 | #define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ | 112 | #define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */ |
@@ -181,12 +185,23 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd) | |||
181 | /*-- Channels --------------------------------------------------------*/ | 185 | /*-- Channels --------------------------------------------------------*/ |
182 | 186 | ||
183 | /** | 187 | /** |
188 | * atc_status - information bits stored in channel status flag | ||
189 | * | ||
190 | * Manipulated with atomic operations. | ||
191 | */ | ||
192 | enum atc_status { | ||
193 | ATC_IS_ERROR = 0, | ||
194 | ATC_IS_PAUSED = 1, | ||
195 | ATC_IS_CYCLIC = 24, | ||
196 | }; | ||
197 | |||
198 | /** | ||
184 | * struct at_dma_chan - internal representation of an Atmel HDMAC channel | 199 | * struct at_dma_chan - internal representation of an Atmel HDMAC channel |
185 | * @chan_common: common dmaengine channel object members | 200 | * @chan_common: common dmaengine channel object members |
186 | * @device: parent device | 201 | * @device: parent device |
187 | * @ch_regs: memory mapped register base | 202 | * @ch_regs: memory mapped register base |
188 | * @mask: channel index in a mask | 203 | * @mask: channel index in a mask |
189 | * @error_status: transmit error status information from irq handler | 204 | * @status: transmit status information from irq/prep* functions |
190 | * to tasklet (use atomic operations) | 205 | * to tasklet (use atomic operations) |
191 | * @tasklet: bottom half to finish transaction work | 206 | * @tasklet: bottom half to finish transaction work |
192 | * @lock: serializes enqueue/dequeue operations to descriptors lists | 207 | * @lock: serializes enqueue/dequeue operations to descriptors lists |
@@ -201,7 +216,7 @@ struct at_dma_chan { | |||
201 | struct at_dma *device; | 216 | struct at_dma *device; |
202 | void __iomem *ch_regs; | 217 | void __iomem *ch_regs; |
203 | u8 mask; | 218 | u8 mask; |
204 | unsigned long error_status; | 219 | unsigned long status; |
205 | struct tasklet_struct tasklet; | 220 | struct tasklet_struct tasklet; |
206 | 221 | ||
207 | spinlock_t lock; | 222 | spinlock_t lock; |
@@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on) | |||
309 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); | 324 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); |
310 | u32 ebci; | 325 | u32 ebci; |
311 | 326 | ||
312 | /* enable interrupts on buffer chain completion & error */ | 327 | /* enable interrupts on buffer transfer completion & error */ |
313 | ebci = AT_DMA_CBTC(atchan->chan_common.chan_id) | 328 | ebci = AT_DMA_BTC(atchan->chan_common.chan_id) |
314 | | AT_DMA_ERR(atchan->chan_common.chan_id); | 329 | | AT_DMA_ERR(atchan->chan_common.chan_id); |
315 | if (on) | 330 | if (on) |
316 | dma_writel(atdma, EBCIER, ebci); | 331 | dma_writel(atdma, EBCIER, ebci); |
@@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan) | |||
347 | */ | 362 | */ |
348 | static void set_desc_eol(struct at_desc *desc) | 363 | static void set_desc_eol(struct at_desc *desc) |
349 | { | 364 | { |
350 | desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; | 365 | u32 ctrlb = desc->lli.ctrlb; |
366 | |||
367 | ctrlb &= ~ATC_IEN; | ||
368 | ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS; | ||
369 | |||
370 | desc->lli.ctrlb = ctrlb; | ||
351 | desc->lli.dscr = 0; | 371 | desc->lli.dscr = 0; |
352 | } | 372 | } |
353 | 373 | ||
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 557e2272e5b3..af8c0b5ed70f 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -157,6 +157,7 @@ static const struct file_operations coh901318_debugfs_status_operations = { | |||
157 | .owner = THIS_MODULE, | 157 | .owner = THIS_MODULE, |
158 | .open = coh901318_debugfs_open, | 158 | .open = coh901318_debugfs_open, |
159 | .read = coh901318_debugfs_read, | 159 | .read = coh901318_debugfs_read, |
160 | .llseek = default_llseek, | ||
160 | }; | 161 | }; |
161 | 162 | ||
162 | 163 | ||
@@ -528,7 +529,7 @@ static void coh901318_pause(struct dma_chan *chan) | |||
528 | val = readl(virtbase + COH901318_CX_CFG + | 529 | val = readl(virtbase + COH901318_CX_CFG + |
529 | COH901318_CX_CFG_SPACING * channel); | 530 | COH901318_CX_CFG_SPACING * channel); |
530 | 531 | ||
531 | /* Stopping infinit transfer */ | 532 | /* Stopping infinite transfer */ |
532 | if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 && | 533 | if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 && |
533 | (val & COH901318_CX_CFG_CH_ENABLE)) | 534 | (val & COH901318_CX_CFG_CH_ENABLE)) |
534 | cohc->stopped = 1; | 535 | cohc->stopped = 1; |
@@ -848,7 +849,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) | |||
848 | 849 | ||
849 | /* Must clear TC interrupt before calling | 850 | /* Must clear TC interrupt before calling |
850 | * dma_tc_handle | 851 | * dma_tc_handle |
851 | * in case tc_handle initate a new dma job | 852 | * in case tc_handle initiate a new dma job |
852 | */ | 853 | */ |
853 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); | 854 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); |
854 | 855 | ||
@@ -893,7 +894,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) | |||
893 | } | 894 | } |
894 | /* Must clear TC interrupt before calling | 895 | /* Must clear TC interrupt before calling |
895 | * dma_tc_handle | 896 | * dma_tc_handle |
896 | * in case tc_handle initate a new dma job | 897 | * in case tc_handle initiate a new dma job |
897 | */ | 898 | */ |
898 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); | 899 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); |
899 | 900 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9d31d5eb95c1..8bcb15fb959d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -690,8 +690,12 @@ int dma_async_device_register(struct dma_device *device) | |||
690 | !device->device_prep_dma_memset); | 690 | !device->device_prep_dma_memset); |
691 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 691 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
692 | !device->device_prep_dma_interrupt); | 692 | !device->device_prep_dma_interrupt); |
693 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | ||
694 | !device->device_prep_dma_sg); | ||
693 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 695 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
694 | !device->device_prep_slave_sg); | 696 | !device->device_prep_slave_sg); |
697 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | ||
698 | !device->device_prep_dma_cyclic); | ||
695 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 699 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
696 | !device->device_control); | 700 | !device->device_control); |
697 | 701 | ||
@@ -702,7 +706,7 @@ int dma_async_device_register(struct dma_device *device) | |||
702 | BUG_ON(!device->dev); | 706 | BUG_ON(!device->dev); |
703 | 707 | ||
704 | /* note: this only matters in the | 708 | /* note: this only matters in the |
705 | * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case | 709 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
706 | */ | 710 | */ |
707 | if (device_has_all_tx_types(device)) | 711 | if (device_has_all_tx_types(device)) |
708 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | 712 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); |
@@ -976,7 +980,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
976 | struct dma_chan *chan) | 980 | struct dma_chan *chan) |
977 | { | 981 | { |
978 | tx->chan = chan; | 982 | tx->chan = chan; |
979 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 983 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
980 | spin_lock_init(&tx->lock); | 984 | spin_lock_init(&tx->lock); |
981 | #endif | 985 | #endif |
982 | } | 986 | } |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 5589358b684d..b4f5c32b6a47 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO); | |||
54 | MODULE_PARM_DESC(pq_sources, | 54 | MODULE_PARM_DESC(pq_sources, |
55 | "Number of p+q source buffers (default: 3)"); | 55 | "Number of p+q source buffers (default: 3)"); |
56 | 56 | ||
57 | static int timeout = 3000; | ||
58 | module_param(timeout, uint, S_IRUGO); | ||
59 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | ||
60 | "Pass -1 for infinite timeout"); | ||
61 | |||
57 | /* | 62 | /* |
58 | * Initialization patterns. All bytes in the source buffer has bit 7 | 63 | * Initialization patterns. All bytes in the source buffer has bit 7 |
59 | * set, all bytes in the destination buffer has bit 7 cleared. | 64 | * set, all bytes in the destination buffer has bit 7 cleared. |
@@ -285,7 +290,12 @@ static int dmatest_func(void *data) | |||
285 | 290 | ||
286 | set_user_nice(current, 10); | 291 | set_user_nice(current, 10); |
287 | 292 | ||
288 | flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; | 293 | /* |
294 | * src buffers are freed by the DMAEngine code with dma_unmap_single() | ||
295 | * dst buffers are freed by ourselves below | ||
296 | */ | ||
297 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | ||
298 | | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; | ||
289 | 299 | ||
290 | while (!kthread_should_stop() | 300 | while (!kthread_should_stop() |
291 | && !(iterations && total_tests >= iterations)) { | 301 | && !(iterations && total_tests >= iterations)) { |
@@ -294,7 +304,7 @@ static int dmatest_func(void *data) | |||
294 | dma_addr_t dma_srcs[src_cnt]; | 304 | dma_addr_t dma_srcs[src_cnt]; |
295 | dma_addr_t dma_dsts[dst_cnt]; | 305 | dma_addr_t dma_dsts[dst_cnt]; |
296 | struct completion cmp; | 306 | struct completion cmp; |
297 | unsigned long tmo = msecs_to_jiffies(3000); | 307 | unsigned long tmo = msecs_to_jiffies(timeout); |
298 | u8 align = 0; | 308 | u8 align = 0; |
299 | 309 | ||
300 | total_tests++; | 310 | total_tests++; |
@@ -624,5 +634,5 @@ static void __exit dmatest_exit(void) | |||
624 | } | 634 | } |
625 | module_exit(dmatest_exit); | 635 | module_exit(dmatest_exit); |
626 | 636 | ||
627 | MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); | 637 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
628 | MODULE_LICENSE("GPL v2"); | 638 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index a3991ab0d67e..4d180ca9a1d8 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * AVR32 systems.) | 3 | * AVR32 systems.) |
4 | * | 4 | * |
5 | * Copyright (C) 2007-2008 Atmel Corporation | 5 | * Copyright (C) 2007-2008 Atmel Corporation |
6 | * Copyright (C) 2010-2011 ST Microelectronics | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -32,26 +33,30 @@ | |||
32 | * which does not support descriptor writeback. | 33 | * which does not support descriptor writeback. |
33 | */ | 34 | */ |
34 | 35 | ||
35 | /* NOTE: DMS+SMS is system-specific. We should get this information | 36 | #define DWC_DEFAULT_CTLLO(private) ({ \ |
36 | * from the platform code somehow. | 37 | struct dw_dma_slave *__slave = (private); \ |
37 | */ | 38 | int dms = __slave ? __slave->dst_master : 0; \ |
38 | #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ | 39 | int sms = __slave ? __slave->src_master : 1; \ |
39 | | DWC_CTLL_SRC_MSIZE(0) \ | 40 | u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ |
40 | | DWC_CTLL_DMS(0) \ | 41 | u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ |
41 | | DWC_CTLL_SMS(1) \ | 42 | \ |
42 | | DWC_CTLL_LLP_D_EN \ | 43 | (DWC_CTLL_DST_MSIZE(dmsize) \ |
43 | | DWC_CTLL_LLP_S_EN) | 44 | | DWC_CTLL_SRC_MSIZE(smsize) \ |
45 | | DWC_CTLL_LLP_D_EN \ | ||
46 | | DWC_CTLL_LLP_S_EN \ | ||
47 | | DWC_CTLL_DMS(dms) \ | ||
48 | | DWC_CTLL_SMS(sms)); \ | ||
49 | }) | ||
44 | 50 | ||
45 | /* | 51 | /* |
46 | * This is configuration-dependent and usually a funny size like 4095. | 52 | * This is configuration-dependent and usually a funny size like 4095. |
47 | * Let's round it down to the nearest power of two. | ||
48 | * | 53 | * |
49 | * Note that this is a transfer count, i.e. if we transfer 32-bit | 54 | * Note that this is a transfer count, i.e. if we transfer 32-bit |
50 | * words, we can do 8192 bytes per descriptor. | 55 | * words, we can do 16380 bytes per descriptor. |
51 | * | 56 | * |
52 | * This parameter is also system-specific. | 57 | * This parameter is also system-specific. |
53 | */ | 58 | */ |
54 | #define DWC_MAX_COUNT 2048U | 59 | #define DWC_MAX_COUNT 4095U |
55 | 60 | ||
56 | /* | 61 | /* |
57 | * Number of descriptors to allocate for each channel. This should be | 62 | * Number of descriptors to allocate for each channel. This should be |
@@ -84,18 +89,14 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | |||
84 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 89 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); |
85 | } | 90 | } |
86 | 91 | ||
87 | static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) | ||
88 | { | ||
89 | return list_entry(dwc->queue.next, struct dw_desc, desc_node); | ||
90 | } | ||
91 | |||
92 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 92 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
93 | { | 93 | { |
94 | struct dw_desc *desc, *_desc; | 94 | struct dw_desc *desc, *_desc; |
95 | struct dw_desc *ret = NULL; | 95 | struct dw_desc *ret = NULL; |
96 | unsigned int i = 0; | 96 | unsigned int i = 0; |
97 | unsigned long flags; | ||
97 | 98 | ||
98 | spin_lock_bh(&dwc->lock); | 99 | spin_lock_irqsave(&dwc->lock, flags); |
99 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 100 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
100 | if (async_tx_test_ack(&desc->txd)) { | 101 | if (async_tx_test_ack(&desc->txd)) { |
101 | list_del(&desc->desc_node); | 102 | list_del(&desc->desc_node); |
@@ -105,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | |||
105 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 106 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
106 | i++; | 107 | i++; |
107 | } | 108 | } |
108 | spin_unlock_bh(&dwc->lock); | 109 | spin_unlock_irqrestore(&dwc->lock, flags); |
109 | 110 | ||
110 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); | 111 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
111 | 112 | ||
@@ -131,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
131 | */ | 132 | */ |
132 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | 133 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
133 | { | 134 | { |
135 | unsigned long flags; | ||
136 | |||
134 | if (desc) { | 137 | if (desc) { |
135 | struct dw_desc *child; | 138 | struct dw_desc *child; |
136 | 139 | ||
137 | dwc_sync_desc_for_cpu(dwc, desc); | 140 | dwc_sync_desc_for_cpu(dwc, desc); |
138 | 141 | ||
139 | spin_lock_bh(&dwc->lock); | 142 | spin_lock_irqsave(&dwc->lock, flags); |
140 | list_for_each_entry(child, &desc->tx_list, desc_node) | 143 | list_for_each_entry(child, &desc->tx_list, desc_node) |
141 | dev_vdbg(chan2dev(&dwc->chan), | 144 | dev_vdbg(chan2dev(&dwc->chan), |
142 | "moving child desc %p to freelist\n", | 145 | "moving child desc %p to freelist\n", |
@@ -144,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
144 | list_splice_init(&desc->tx_list, &dwc->free_list); | 147 | list_splice_init(&desc->tx_list, &dwc->free_list); |
145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); | 148 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
146 | list_add(&desc->desc_node, &dwc->free_list); | 149 | list_add(&desc->desc_node, &dwc->free_list); |
147 | spin_unlock_bh(&dwc->lock); | 150 | spin_unlock_irqrestore(&dwc->lock, flags); |
148 | } | 151 | } |
149 | } | 152 | } |
150 | 153 | ||
@@ -196,19 +199,31 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
196 | /*----------------------------------------------------------------------*/ | 199 | /*----------------------------------------------------------------------*/ |
197 | 200 | ||
198 | static void | 201 | static void |
199 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | 202 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
203 | bool callback_required) | ||
200 | { | 204 | { |
201 | dma_async_tx_callback callback; | 205 | dma_async_tx_callback callback = NULL; |
202 | void *param; | 206 | void *param = NULL; |
203 | struct dma_async_tx_descriptor *txd = &desc->txd; | 207 | struct dma_async_tx_descriptor *txd = &desc->txd; |
208 | struct dw_desc *child; | ||
209 | unsigned long flags; | ||
204 | 210 | ||
205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 211 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
206 | 212 | ||
213 | spin_lock_irqsave(&dwc->lock, flags); | ||
207 | dwc->completed = txd->cookie; | 214 | dwc->completed = txd->cookie; |
208 | callback = txd->callback; | 215 | if (callback_required) { |
209 | param = txd->callback_param; | 216 | callback = txd->callback; |
217 | param = txd->callback_param; | ||
218 | } | ||
210 | 219 | ||
211 | dwc_sync_desc_for_cpu(dwc, desc); | 220 | dwc_sync_desc_for_cpu(dwc, desc); |
221 | |||
222 | /* async_tx_ack */ | ||
223 | list_for_each_entry(child, &desc->tx_list, desc_node) | ||
224 | async_tx_ack(&child->txd); | ||
225 | async_tx_ack(&desc->txd); | ||
226 | |||
212 | list_splice_init(&desc->tx_list, &dwc->free_list); | 227 | list_splice_init(&desc->tx_list, &dwc->free_list); |
213 | list_move(&desc->desc_node, &dwc->free_list); | 228 | list_move(&desc->desc_node, &dwc->free_list); |
214 | 229 | ||
@@ -232,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
232 | } | 247 | } |
233 | } | 248 | } |
234 | 249 | ||
235 | /* | 250 | spin_unlock_irqrestore(&dwc->lock, flags); |
236 | * The API requires that no submissions are done from a | 251 | |
237 | * callback, so we don't need to drop the lock here | 252 | if (callback_required && callback) |
238 | */ | ||
239 | if (callback) | ||
240 | callback(param); | 253 | callback(param); |
241 | } | 254 | } |
242 | 255 | ||
@@ -244,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
244 | { | 257 | { |
245 | struct dw_desc *desc, *_desc; | 258 | struct dw_desc *desc, *_desc; |
246 | LIST_HEAD(list); | 259 | LIST_HEAD(list); |
260 | unsigned long flags; | ||
247 | 261 | ||
262 | spin_lock_irqsave(&dwc->lock, flags); | ||
248 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 263 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
249 | dev_err(chan2dev(&dwc->chan), | 264 | dev_err(chan2dev(&dwc->chan), |
250 | "BUG: XFER bit set, but channel not idle!\n"); | 265 | "BUG: XFER bit set, but channel not idle!\n"); |
@@ -259,13 +274,16 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
259 | * Submit queued descriptors ASAP, i.e. before we go through | 274 | * Submit queued descriptors ASAP, i.e. before we go through |
260 | * the completed ones. | 275 | * the completed ones. |
261 | */ | 276 | */ |
262 | if (!list_empty(&dwc->queue)) | ||
263 | dwc_dostart(dwc, dwc_first_queued(dwc)); | ||
264 | list_splice_init(&dwc->active_list, &list); | 277 | list_splice_init(&dwc->active_list, &list); |
265 | list_splice_init(&dwc->queue, &dwc->active_list); | 278 | if (!list_empty(&dwc->queue)) { |
279 | list_move(dwc->queue.next, &dwc->active_list); | ||
280 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
281 | } | ||
282 | |||
283 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
266 | 284 | ||
267 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 285 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
268 | dwc_descriptor_complete(dwc, desc); | 286 | dwc_descriptor_complete(dwc, desc, true); |
269 | } | 287 | } |
270 | 288 | ||
271 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 289 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
@@ -274,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
274 | struct dw_desc *desc, *_desc; | 292 | struct dw_desc *desc, *_desc; |
275 | struct dw_desc *child; | 293 | struct dw_desc *child; |
276 | u32 status_xfer; | 294 | u32 status_xfer; |
295 | unsigned long flags; | ||
277 | 296 | ||
297 | spin_lock_irqsave(&dwc->lock, flags); | ||
278 | /* | 298 | /* |
279 | * Clear block interrupt flag before scanning so that we don't | 299 | * Clear block interrupt flag before scanning so that we don't |
280 | * miss any, and read LLP before RAW_XFER to ensure it is | 300 | * miss any, and read LLP before RAW_XFER to ensure it is |
@@ -287,27 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
287 | if (status_xfer & dwc->mask) { | 307 | if (status_xfer & dwc->mask) { |
288 | /* Everything we've submitted is done */ | 308 | /* Everything we've submitted is done */ |
289 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 309 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
310 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
311 | |||
290 | dwc_complete_all(dw, dwc); | 312 | dwc_complete_all(dw, dwc); |
291 | return; | 313 | return; |
292 | } | 314 | } |
293 | 315 | ||
316 | if (list_empty(&dwc->active_list)) { | ||
317 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
318 | return; | ||
319 | } | ||
320 | |||
294 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); | 321 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
295 | 322 | ||
296 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 323 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
297 | if (desc->lli.llp == llp) | 324 | /* check first descriptors addr */ |
325 | if (desc->txd.phys == llp) { | ||
326 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
327 | return; | ||
328 | } | ||
329 | |||
330 | /* check first descriptors llp */ | ||
331 | if (desc->lli.llp == llp) { | ||
298 | /* This one is currently in progress */ | 332 | /* This one is currently in progress */ |
333 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
299 | return; | 334 | return; |
335 | } | ||
300 | 336 | ||
301 | list_for_each_entry(child, &desc->tx_list, desc_node) | 337 | list_for_each_entry(child, &desc->tx_list, desc_node) |
302 | if (child->lli.llp == llp) | 338 | if (child->lli.llp == llp) { |
303 | /* Currently in progress */ | 339 | /* Currently in progress */ |
340 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
304 | return; | 341 | return; |
342 | } | ||
305 | 343 | ||
306 | /* | 344 | /* |
307 | * No descriptors so far seem to be in progress, i.e. | 345 | * No descriptors so far seem to be in progress, i.e. |
308 | * this one must be done. | 346 | * this one must be done. |
309 | */ | 347 | */ |
310 | dwc_descriptor_complete(dwc, desc); | 348 | spin_unlock_irqrestore(&dwc->lock, flags); |
349 | dwc_descriptor_complete(dwc, desc, true); | ||
350 | spin_lock_irqsave(&dwc->lock, flags); | ||
311 | } | 351 | } |
312 | 352 | ||
313 | dev_err(chan2dev(&dwc->chan), | 353 | dev_err(chan2dev(&dwc->chan), |
@@ -319,9 +359,10 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
319 | cpu_relax(); | 359 | cpu_relax(); |
320 | 360 | ||
321 | if (!list_empty(&dwc->queue)) { | 361 | if (!list_empty(&dwc->queue)) { |
322 | dwc_dostart(dwc, dwc_first_queued(dwc)); | 362 | list_move(dwc->queue.next, &dwc->active_list); |
323 | list_splice_init(&dwc->queue, &dwc->active_list); | 363 | dwc_dostart(dwc, dwc_first_active(dwc)); |
324 | } | 364 | } |
365 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
325 | } | 366 | } |
326 | 367 | ||
327 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 368 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
@@ -336,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
336 | { | 377 | { |
337 | struct dw_desc *bad_desc; | 378 | struct dw_desc *bad_desc; |
338 | struct dw_desc *child; | 379 | struct dw_desc *child; |
380 | unsigned long flags; | ||
339 | 381 | ||
340 | dwc_scan_descriptors(dw, dwc); | 382 | dwc_scan_descriptors(dw, dwc); |
341 | 383 | ||
384 | spin_lock_irqsave(&dwc->lock, flags); | ||
385 | |||
342 | /* | 386 | /* |
343 | * The descriptor currently at the head of the active list is | 387 | * The descriptor currently at the head of the active list is |
344 | * borked. Since we don't have any way to report errors, we'll | 388 | * borked. Since we don't have any way to report errors, we'll |
@@ -346,7 +390,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
346 | */ | 390 | */ |
347 | bad_desc = dwc_first_active(dwc); | 391 | bad_desc = dwc_first_active(dwc); |
348 | list_del_init(&bad_desc->desc_node); | 392 | list_del_init(&bad_desc->desc_node); |
349 | list_splice_init(&dwc->queue, dwc->active_list.prev); | 393 | list_move(dwc->queue.next, dwc->active_list.prev); |
350 | 394 | ||
351 | /* Clear the error flag and try to restart the controller */ | 395 | /* Clear the error flag and try to restart the controller */ |
352 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 396 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
@@ -368,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
368 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 412 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
369 | dwc_dump_lli(dwc, &child->lli); | 413 | dwc_dump_lli(dwc, &child->lli); |
370 | 414 | ||
415 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
416 | |||
371 | /* Pretend the descriptor completed successfully */ | 417 | /* Pretend the descriptor completed successfully */ |
372 | dwc_descriptor_complete(dwc, bad_desc); | 418 | dwc_descriptor_complete(dwc, bad_desc, true); |
373 | } | 419 | } |
374 | 420 | ||
375 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 421 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
@@ -392,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr); | |||
392 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | 438 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
393 | u32 status_block, u32 status_err, u32 status_xfer) | 439 | u32 status_block, u32 status_err, u32 status_xfer) |
394 | { | 440 | { |
441 | unsigned long flags; | ||
442 | |||
395 | if (status_block & dwc->mask) { | 443 | if (status_block & dwc->mask) { |
396 | void (*callback)(void *param); | 444 | void (*callback)(void *param); |
397 | void *callback_param; | 445 | void *callback_param; |
@@ -402,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
402 | 450 | ||
403 | callback = dwc->cdesc->period_callback; | 451 | callback = dwc->cdesc->period_callback; |
404 | callback_param = dwc->cdesc->period_callback_param; | 452 | callback_param = dwc->cdesc->period_callback_param; |
405 | if (callback) { | 453 | |
406 | spin_unlock(&dwc->lock); | 454 | if (callback) |
407 | callback(callback_param); | 455 | callback(callback_param); |
408 | spin_lock(&dwc->lock); | ||
409 | } | ||
410 | } | 456 | } |
411 | 457 | ||
412 | /* | 458 | /* |
@@ -420,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
420 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | 466 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " |
421 | "interrupt, stopping DMA transfer\n", | 467 | "interrupt, stopping DMA transfer\n", |
422 | status_xfer ? "xfer" : "error"); | 468 | status_xfer ? "xfer" : "error"); |
469 | |||
470 | spin_lock_irqsave(&dwc->lock, flags); | ||
471 | |||
423 | dev_err(chan2dev(&dwc->chan), | 472 | dev_err(chan2dev(&dwc->chan), |
424 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 473 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
425 | channel_readl(dwc, SAR), | 474 | channel_readl(dwc, SAR), |
@@ -443,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
443 | 492 | ||
444 | for (i = 0; i < dwc->cdesc->periods; i++) | 493 | for (i = 0; i < dwc->cdesc->periods; i++) |
445 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | 494 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); |
495 | |||
496 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
446 | } | 497 | } |
447 | } | 498 | } |
448 | 499 | ||
@@ -466,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data) | |||
466 | 517 | ||
467 | for (i = 0; i < dw->dma.chancnt; i++) { | 518 | for (i = 0; i < dw->dma.chancnt; i++) { |
468 | dwc = &dw->chan[i]; | 519 | dwc = &dw->chan[i]; |
469 | spin_lock(&dwc->lock); | ||
470 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) | 520 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
471 | dwc_handle_cyclic(dw, dwc, status_block, status_err, | 521 | dwc_handle_cyclic(dw, dwc, status_block, status_err, |
472 | status_xfer); | 522 | status_xfer); |
@@ -474,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data) | |||
474 | dwc_handle_error(dw, dwc); | 524 | dwc_handle_error(dw, dwc); |
475 | else if ((status_block | status_xfer) & (1 << i)) | 525 | else if ((status_block | status_xfer) & (1 << i)) |
476 | dwc_scan_descriptors(dw, dwc); | 526 | dwc_scan_descriptors(dw, dwc); |
477 | spin_unlock(&dwc->lock); | ||
478 | } | 527 | } |
479 | 528 | ||
480 | /* | 529 | /* |
@@ -529,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
529 | struct dw_desc *desc = txd_to_dw_desc(tx); | 578 | struct dw_desc *desc = txd_to_dw_desc(tx); |
530 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | 579 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); |
531 | dma_cookie_t cookie; | 580 | dma_cookie_t cookie; |
581 | unsigned long flags; | ||
532 | 582 | ||
533 | spin_lock_bh(&dwc->lock); | 583 | spin_lock_irqsave(&dwc->lock, flags); |
534 | cookie = dwc_assign_cookie(dwc, desc); | 584 | cookie = dwc_assign_cookie(dwc, desc); |
535 | 585 | ||
536 | /* | 586 | /* |
@@ -541,8 +591,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
541 | if (list_empty(&dwc->active_list)) { | 591 | if (list_empty(&dwc->active_list)) { |
542 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 592 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
543 | desc->txd.cookie); | 593 | desc->txd.cookie); |
544 | dwc_dostart(dwc, desc); | ||
545 | list_add_tail(&desc->desc_node, &dwc->active_list); | 594 | list_add_tail(&desc->desc_node, &dwc->active_list); |
595 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
546 | } else { | 596 | } else { |
547 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | 597 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
548 | desc->txd.cookie); | 598 | desc->txd.cookie); |
@@ -550,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
550 | list_add_tail(&desc->desc_node, &dwc->queue); | 600 | list_add_tail(&desc->desc_node, &dwc->queue); |
551 | } | 601 | } |
552 | 602 | ||
553 | spin_unlock_bh(&dwc->lock); | 603 | spin_unlock_irqrestore(&dwc->lock, flags); |
554 | 604 | ||
555 | return cookie; | 605 | return cookie; |
556 | } | 606 | } |
@@ -581,14 +631,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
581 | * We can be a lot more clever here, but this should take care | 631 | * We can be a lot more clever here, but this should take care |
582 | * of the most common optimization. | 632 | * of the most common optimization. |
583 | */ | 633 | */ |
584 | if (!((src | dest | len) & 3)) | 634 | if (!((src | dest | len) & 7)) |
635 | src_width = dst_width = 3; | ||
636 | else if (!((src | dest | len) & 3)) | ||
585 | src_width = dst_width = 2; | 637 | src_width = dst_width = 2; |
586 | else if (!((src | dest | len) & 1)) | 638 | else if (!((src | dest | len) & 1)) |
587 | src_width = dst_width = 1; | 639 | src_width = dst_width = 1; |
588 | else | 640 | else |
589 | src_width = dst_width = 0; | 641 | src_width = dst_width = 0; |
590 | 642 | ||
591 | ctllo = DWC_DEFAULT_CTLLO | 643 | ctllo = DWC_DEFAULT_CTLLO(chan->private) |
592 | | DWC_CTLL_DST_WIDTH(dst_width) | 644 | | DWC_CTLL_DST_WIDTH(dst_width) |
593 | | DWC_CTLL_SRC_WIDTH(src_width) | 645 | | DWC_CTLL_SRC_WIDTH(src_width) |
594 | | DWC_CTLL_DST_INC | 646 | | DWC_CTLL_DST_INC |
@@ -669,17 +721,23 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
669 | 721 | ||
670 | switch (direction) { | 722 | switch (direction) { |
671 | case DMA_TO_DEVICE: | 723 | case DMA_TO_DEVICE: |
672 | ctllo = (DWC_DEFAULT_CTLLO | 724 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
673 | | DWC_CTLL_DST_WIDTH(reg_width) | 725 | | DWC_CTLL_DST_WIDTH(reg_width) |
674 | | DWC_CTLL_DST_FIX | 726 | | DWC_CTLL_DST_FIX |
675 | | DWC_CTLL_SRC_INC | 727 | | DWC_CTLL_SRC_INC |
676 | | DWC_CTLL_FC_M2P); | 728 | | DWC_CTLL_FC(dws->fc)); |
677 | reg = dws->tx_reg; | 729 | reg = dws->tx_reg; |
678 | for_each_sg(sgl, sg, sg_len, i) { | 730 | for_each_sg(sgl, sg, sg_len, i) { |
679 | struct dw_desc *desc; | 731 | struct dw_desc *desc; |
680 | u32 len; | 732 | u32 len, dlen, mem; |
681 | u32 mem; | ||
682 | 733 | ||
734 | mem = sg_phys(sg); | ||
735 | len = sg_dma_len(sg); | ||
736 | mem_width = 2; | ||
737 | if (unlikely(mem & 3 || len & 3)) | ||
738 | mem_width = 0; | ||
739 | |||
740 | slave_sg_todev_fill_desc: | ||
683 | desc = dwc_desc_get(dwc); | 741 | desc = dwc_desc_get(dwc); |
684 | if (!desc) { | 742 | if (!desc) { |
685 | dev_err(chan2dev(chan), | 743 | dev_err(chan2dev(chan), |
@@ -687,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
687 | goto err_desc_get; | 745 | goto err_desc_get; |
688 | } | 746 | } |
689 | 747 | ||
690 | mem = sg_phys(sg); | ||
691 | len = sg_dma_len(sg); | ||
692 | mem_width = 2; | ||
693 | if (unlikely(mem & 3 || len & 3)) | ||
694 | mem_width = 0; | ||
695 | |||
696 | desc->lli.sar = mem; | 748 | desc->lli.sar = mem; |
697 | desc->lli.dar = reg; | 749 | desc->lli.dar = reg; |
698 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 750 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); |
699 | desc->lli.ctlhi = len >> mem_width; | 751 | if ((len >> mem_width) > DWC_MAX_COUNT) { |
752 | dlen = DWC_MAX_COUNT << mem_width; | ||
753 | mem += dlen; | ||
754 | len -= dlen; | ||
755 | } else { | ||
756 | dlen = len; | ||
757 | len = 0; | ||
758 | } | ||
759 | |||
760 | desc->lli.ctlhi = dlen >> mem_width; | ||
700 | 761 | ||
701 | if (!first) { | 762 | if (!first) { |
702 | first = desc; | 763 | first = desc; |
@@ -710,28 +771,23 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
710 | &first->tx_list); | 771 | &first->tx_list); |
711 | } | 772 | } |
712 | prev = desc; | 773 | prev = desc; |
713 | total_len += len; | 774 | total_len += dlen; |
775 | |||
776 | if (len) | ||
777 | goto slave_sg_todev_fill_desc; | ||
714 | } | 778 | } |
715 | break; | 779 | break; |
716 | case DMA_FROM_DEVICE: | 780 | case DMA_FROM_DEVICE: |
717 | ctllo = (DWC_DEFAULT_CTLLO | 781 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
718 | | DWC_CTLL_SRC_WIDTH(reg_width) | 782 | | DWC_CTLL_SRC_WIDTH(reg_width) |
719 | | DWC_CTLL_DST_INC | 783 | | DWC_CTLL_DST_INC |
720 | | DWC_CTLL_SRC_FIX | 784 | | DWC_CTLL_SRC_FIX |
721 | | DWC_CTLL_FC_P2M); | 785 | | DWC_CTLL_FC(dws->fc)); |
722 | 786 | ||
723 | reg = dws->rx_reg; | 787 | reg = dws->rx_reg; |
724 | for_each_sg(sgl, sg, sg_len, i) { | 788 | for_each_sg(sgl, sg, sg_len, i) { |
725 | struct dw_desc *desc; | 789 | struct dw_desc *desc; |
726 | u32 len; | 790 | u32 len, dlen, mem; |
727 | u32 mem; | ||
728 | |||
729 | desc = dwc_desc_get(dwc); | ||
730 | if (!desc) { | ||
731 | dev_err(chan2dev(chan), | ||
732 | "not enough descriptors available\n"); | ||
733 | goto err_desc_get; | ||
734 | } | ||
735 | 791 | ||
736 | mem = sg_phys(sg); | 792 | mem = sg_phys(sg); |
737 | len = sg_dma_len(sg); | 793 | len = sg_dma_len(sg); |
@@ -739,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
739 | if (unlikely(mem & 3 || len & 3)) | 795 | if (unlikely(mem & 3 || len & 3)) |
740 | mem_width = 0; | 796 | mem_width = 0; |
741 | 797 | ||
798 | slave_sg_fromdev_fill_desc: | ||
799 | desc = dwc_desc_get(dwc); | ||
800 | if (!desc) { | ||
801 | dev_err(chan2dev(chan), | ||
802 | "not enough descriptors available\n"); | ||
803 | goto err_desc_get; | ||
804 | } | ||
805 | |||
742 | desc->lli.sar = reg; | 806 | desc->lli.sar = reg; |
743 | desc->lli.dar = mem; | 807 | desc->lli.dar = mem; |
744 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 808 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); |
745 | desc->lli.ctlhi = len >> reg_width; | 809 | if ((len >> reg_width) > DWC_MAX_COUNT) { |
810 | dlen = DWC_MAX_COUNT << reg_width; | ||
811 | mem += dlen; | ||
812 | len -= dlen; | ||
813 | } else { | ||
814 | dlen = len; | ||
815 | len = 0; | ||
816 | } | ||
817 | desc->lli.ctlhi = dlen >> reg_width; | ||
746 | 818 | ||
747 | if (!first) { | 819 | if (!first) { |
748 | first = desc; | 820 | first = desc; |
@@ -756,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
756 | &first->tx_list); | 828 | &first->tx_list); |
757 | } | 829 | } |
758 | prev = desc; | 830 | prev = desc; |
759 | total_len += len; | 831 | total_len += dlen; |
832 | |||
833 | if (len) | ||
834 | goto slave_sg_fromdev_fill_desc; | ||
760 | } | 835 | } |
761 | break; | 836 | break; |
762 | default: | 837 | default: |
@@ -787,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
787 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 862 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
788 | struct dw_dma *dw = to_dw_dma(chan->device); | 863 | struct dw_dma *dw = to_dw_dma(chan->device); |
789 | struct dw_desc *desc, *_desc; | 864 | struct dw_desc *desc, *_desc; |
865 | unsigned long flags; | ||
866 | u32 cfglo; | ||
790 | LIST_HEAD(list); | 867 | LIST_HEAD(list); |
791 | 868 | ||
792 | /* Only supports DMA_TERMINATE_ALL */ | 869 | if (cmd == DMA_PAUSE) { |
793 | if (cmd != DMA_TERMINATE_ALL) | 870 | spin_lock_irqsave(&dwc->lock, flags); |
794 | return -ENXIO; | ||
795 | 871 | ||
796 | /* | 872 | cfglo = channel_readl(dwc, CFG_LO); |
797 | * This is only called when something went wrong elsewhere, so | 873 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
798 | * we don't really care about the data. Just disable the | 874 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) |
799 | * channel. We still have to poll the channel enable bit due | 875 | cpu_relax(); |
800 | * to AHB/HSB limitations. | ||
801 | */ | ||
802 | spin_lock_bh(&dwc->lock); | ||
803 | 876 | ||
804 | channel_clear_bit(dw, CH_EN, dwc->mask); | 877 | dwc->paused = true; |
878 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
879 | } else if (cmd == DMA_RESUME) { | ||
880 | if (!dwc->paused) | ||
881 | return 0; | ||
805 | 882 | ||
806 | while (dma_readl(dw, CH_EN) & dwc->mask) | 883 | spin_lock_irqsave(&dwc->lock, flags); |
807 | cpu_relax(); | ||
808 | 884 | ||
809 | /* active_list entries will end up before queued entries */ | 885 | cfglo = channel_readl(dwc, CFG_LO); |
810 | list_splice_init(&dwc->queue, &list); | 886 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); |
811 | list_splice_init(&dwc->active_list, &list); | 887 | dwc->paused = false; |
812 | 888 | ||
813 | spin_unlock_bh(&dwc->lock); | 889 | spin_unlock_irqrestore(&dwc->lock, flags); |
890 | } else if (cmd == DMA_TERMINATE_ALL) { | ||
891 | spin_lock_irqsave(&dwc->lock, flags); | ||
814 | 892 | ||
815 | /* Flush all pending and queued descriptors */ | 893 | channel_clear_bit(dw, CH_EN, dwc->mask); |
816 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 894 | while (dma_readl(dw, CH_EN) & dwc->mask) |
817 | dwc_descriptor_complete(dwc, desc); | 895 | cpu_relax(); |
896 | |||
897 | dwc->paused = false; | ||
898 | |||
899 | /* active_list entries will end up before queued entries */ | ||
900 | list_splice_init(&dwc->queue, &list); | ||
901 | list_splice_init(&dwc->active_list, &list); | ||
902 | |||
903 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
904 | |||
905 | /* Flush all pending and queued descriptors */ | ||
906 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | ||
907 | dwc_descriptor_complete(dwc, desc, false); | ||
908 | } else | ||
909 | return -ENXIO; | ||
818 | 910 | ||
819 | return 0; | 911 | return 0; |
820 | } | 912 | } |
@@ -842,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan, | |||
842 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 934 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
843 | } | 935 | } |
844 | 936 | ||
845 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 937 | if (ret != DMA_SUCCESS) |
938 | dma_set_tx_state(txstate, last_complete, last_used, | ||
939 | dwc_first_active(dwc)->len); | ||
940 | else | ||
941 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
942 | |||
943 | if (dwc->paused) | ||
944 | return DMA_PAUSED; | ||
846 | 945 | ||
847 | return ret; | 946 | return ret; |
848 | } | 947 | } |
@@ -851,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
851 | { | 950 | { |
852 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 951 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
853 | 952 | ||
854 | spin_lock_bh(&dwc->lock); | ||
855 | if (!list_empty(&dwc->queue)) | 953 | if (!list_empty(&dwc->queue)) |
856 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 954 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
857 | spin_unlock_bh(&dwc->lock); | ||
858 | } | 955 | } |
859 | 956 | ||
860 | static int dwc_alloc_chan_resources(struct dma_chan *chan) | 957 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
@@ -866,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
866 | int i; | 963 | int i; |
867 | u32 cfghi; | 964 | u32 cfghi; |
868 | u32 cfglo; | 965 | u32 cfglo; |
966 | unsigned long flags; | ||
869 | 967 | ||
870 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 968 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
871 | 969 | ||
@@ -889,8 +987,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
889 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | 987 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
890 | 988 | ||
891 | cfghi = dws->cfg_hi; | 989 | cfghi = dws->cfg_hi; |
892 | cfglo = dws->cfg_lo; | 990 | cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
893 | } | 991 | } |
992 | |||
993 | cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); | ||
994 | |||
894 | channel_writel(dwc, CFG_LO, cfglo); | 995 | channel_writel(dwc, CFG_LO, cfglo); |
895 | channel_writel(dwc, CFG_HI, cfghi); | 996 | channel_writel(dwc, CFG_HI, cfghi); |
896 | 997 | ||
@@ -900,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
900 | * doesn't mean what you think it means), and status writeback. | 1001 | * doesn't mean what you think it means), and status writeback. |
901 | */ | 1002 | */ |
902 | 1003 | ||
903 | spin_lock_bh(&dwc->lock); | 1004 | spin_lock_irqsave(&dwc->lock, flags); |
904 | i = dwc->descs_allocated; | 1005 | i = dwc->descs_allocated; |
905 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | 1006 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { |
906 | spin_unlock_bh(&dwc->lock); | 1007 | spin_unlock_irqrestore(&dwc->lock, flags); |
907 | 1008 | ||
908 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 1009 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); |
909 | if (!desc) { | 1010 | if (!desc) { |
910 | dev_info(chan2dev(chan), | 1011 | dev_info(chan2dev(chan), |
911 | "only allocated %d descriptors\n", i); | 1012 | "only allocated %d descriptors\n", i); |
912 | spin_lock_bh(&dwc->lock); | 1013 | spin_lock_irqsave(&dwc->lock, flags); |
913 | break; | 1014 | break; |
914 | } | 1015 | } |
915 | 1016 | ||
@@ -921,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
921 | sizeof(desc->lli), DMA_TO_DEVICE); | 1022 | sizeof(desc->lli), DMA_TO_DEVICE); |
922 | dwc_desc_put(dwc, desc); | 1023 | dwc_desc_put(dwc, desc); |
923 | 1024 | ||
924 | spin_lock_bh(&dwc->lock); | 1025 | spin_lock_irqsave(&dwc->lock, flags); |
925 | i = ++dwc->descs_allocated; | 1026 | i = ++dwc->descs_allocated; |
926 | } | 1027 | } |
927 | 1028 | ||
@@ -930,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
930 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | 1031 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); |
931 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | 1032 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
932 | 1033 | ||
933 | spin_unlock_bh(&dwc->lock); | 1034 | spin_unlock_irqrestore(&dwc->lock, flags); |
934 | 1035 | ||
935 | dev_dbg(chan2dev(chan), | 1036 | dev_dbg(chan2dev(chan), |
936 | "alloc_chan_resources allocated %d descriptors\n", i); | 1037 | "alloc_chan_resources allocated %d descriptors\n", i); |
@@ -943,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
943 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1044 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
944 | struct dw_dma *dw = to_dw_dma(chan->device); | 1045 | struct dw_dma *dw = to_dw_dma(chan->device); |
945 | struct dw_desc *desc, *_desc; | 1046 | struct dw_desc *desc, *_desc; |
1047 | unsigned long flags; | ||
946 | LIST_HEAD(list); | 1048 | LIST_HEAD(list); |
947 | 1049 | ||
948 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | 1050 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
@@ -953,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
953 | BUG_ON(!list_empty(&dwc->queue)); | 1055 | BUG_ON(!list_empty(&dwc->queue)); |
954 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | 1056 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); |
955 | 1057 | ||
956 | spin_lock_bh(&dwc->lock); | 1058 | spin_lock_irqsave(&dwc->lock, flags); |
957 | list_splice_init(&dwc->free_list, &list); | 1059 | list_splice_init(&dwc->free_list, &list); |
958 | dwc->descs_allocated = 0; | 1060 | dwc->descs_allocated = 0; |
959 | 1061 | ||
@@ -962,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
962 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | 1064 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); |
963 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | 1065 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
964 | 1066 | ||
965 | spin_unlock_bh(&dwc->lock); | 1067 | spin_unlock_irqrestore(&dwc->lock, flags); |
966 | 1068 | ||
967 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 1069 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
968 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 1070 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
@@ -987,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
987 | { | 1089 | { |
988 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1090 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
989 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1091 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1092 | unsigned long flags; | ||
990 | 1093 | ||
991 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | 1094 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { |
992 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | 1095 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); |
993 | return -ENODEV; | 1096 | return -ENODEV; |
994 | } | 1097 | } |
995 | 1098 | ||
996 | spin_lock(&dwc->lock); | 1099 | spin_lock_irqsave(&dwc->lock, flags); |
997 | 1100 | ||
998 | /* assert channel is idle */ | 1101 | /* assert channel is idle */ |
999 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1102 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
@@ -1006,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1006 | channel_readl(dwc, LLP), | 1109 | channel_readl(dwc, LLP), |
1007 | channel_readl(dwc, CTL_HI), | 1110 | channel_readl(dwc, CTL_HI), |
1008 | channel_readl(dwc, CTL_LO)); | 1111 | channel_readl(dwc, CTL_LO)); |
1009 | spin_unlock(&dwc->lock); | 1112 | spin_unlock_irqrestore(&dwc->lock, flags); |
1010 | return -EBUSY; | 1113 | return -EBUSY; |
1011 | } | 1114 | } |
1012 | 1115 | ||
@@ -1021,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1021 | 1124 | ||
1022 | channel_set_bit(dw, CH_EN, dwc->mask); | 1125 | channel_set_bit(dw, CH_EN, dwc->mask); |
1023 | 1126 | ||
1024 | spin_unlock(&dwc->lock); | 1127 | spin_unlock_irqrestore(&dwc->lock, flags); |
1025 | 1128 | ||
1026 | return 0; | 1129 | return 0; |
1027 | } | 1130 | } |
@@ -1037,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan) | |||
1037 | { | 1140 | { |
1038 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1141 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1039 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1142 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1143 | unsigned long flags; | ||
1040 | 1144 | ||
1041 | spin_lock(&dwc->lock); | 1145 | spin_lock_irqsave(&dwc->lock, flags); |
1042 | 1146 | ||
1043 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1147 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1044 | while (dma_readl(dw, CH_EN) & dwc->mask) | 1148 | while (dma_readl(dw, CH_EN) & dwc->mask) |
1045 | cpu_relax(); | 1149 | cpu_relax(); |
1046 | 1150 | ||
1047 | spin_unlock(&dwc->lock); | 1151 | spin_unlock_irqrestore(&dwc->lock, flags); |
1048 | } | 1152 | } |
1049 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | 1153 | EXPORT_SYMBOL(dw_dma_cyclic_stop); |
1050 | 1154 | ||
@@ -1073,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1073 | unsigned int reg_width; | 1177 | unsigned int reg_width; |
1074 | unsigned int periods; | 1178 | unsigned int periods; |
1075 | unsigned int i; | 1179 | unsigned int i; |
1180 | unsigned long flags; | ||
1076 | 1181 | ||
1077 | spin_lock_bh(&dwc->lock); | 1182 | spin_lock_irqsave(&dwc->lock, flags); |
1078 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { | 1183 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
1079 | spin_unlock_bh(&dwc->lock); | 1184 | spin_unlock_irqrestore(&dwc->lock, flags); |
1080 | dev_dbg(chan2dev(&dwc->chan), | 1185 | dev_dbg(chan2dev(&dwc->chan), |
1081 | "queue and/or active list are not empty\n"); | 1186 | "queue and/or active list are not empty\n"); |
1082 | return ERR_PTR(-EBUSY); | 1187 | return ERR_PTR(-EBUSY); |
1083 | } | 1188 | } |
1084 | 1189 | ||
1085 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 1190 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1086 | spin_unlock_bh(&dwc->lock); | 1191 | spin_unlock_irqrestore(&dwc->lock, flags); |
1087 | if (was_cyclic) { | 1192 | if (was_cyclic) { |
1088 | dev_dbg(chan2dev(&dwc->chan), | 1193 | dev_dbg(chan2dev(&dwc->chan), |
1089 | "channel already prepared for cyclic DMA\n"); | 1194 | "channel already prepared for cyclic DMA\n"); |
@@ -1126,23 +1231,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1126 | case DMA_TO_DEVICE: | 1231 | case DMA_TO_DEVICE: |
1127 | desc->lli.dar = dws->tx_reg; | 1232 | desc->lli.dar = dws->tx_reg; |
1128 | desc->lli.sar = buf_addr + (period_len * i); | 1233 | desc->lli.sar = buf_addr + (period_len * i); |
1129 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO | 1234 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
1130 | | DWC_CTLL_DST_WIDTH(reg_width) | 1235 | | DWC_CTLL_DST_WIDTH(reg_width) |
1131 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1236 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1132 | | DWC_CTLL_DST_FIX | 1237 | | DWC_CTLL_DST_FIX |
1133 | | DWC_CTLL_SRC_INC | 1238 | | DWC_CTLL_SRC_INC |
1134 | | DWC_CTLL_FC_M2P | 1239 | | DWC_CTLL_FC(dws->fc) |
1135 | | DWC_CTLL_INT_EN); | 1240 | | DWC_CTLL_INT_EN); |
1136 | break; | 1241 | break; |
1137 | case DMA_FROM_DEVICE: | 1242 | case DMA_FROM_DEVICE: |
1138 | desc->lli.dar = buf_addr + (period_len * i); | 1243 | desc->lli.dar = buf_addr + (period_len * i); |
1139 | desc->lli.sar = dws->rx_reg; | 1244 | desc->lli.sar = dws->rx_reg; |
1140 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO | 1245 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
1141 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1246 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1142 | | DWC_CTLL_DST_WIDTH(reg_width) | 1247 | | DWC_CTLL_DST_WIDTH(reg_width) |
1143 | | DWC_CTLL_DST_INC | 1248 | | DWC_CTLL_DST_INC |
1144 | | DWC_CTLL_SRC_FIX | 1249 | | DWC_CTLL_SRC_FIX |
1145 | | DWC_CTLL_FC_P2M | 1250 | | DWC_CTLL_FC(dws->fc) |
1146 | | DWC_CTLL_INT_EN); | 1251 | | DWC_CTLL_INT_EN); |
1147 | break; | 1252 | break; |
1148 | default: | 1253 | default: |
@@ -1197,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1197 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1302 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1198 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | 1303 | struct dw_cyclic_desc *cdesc = dwc->cdesc; |
1199 | int i; | 1304 | int i; |
1305 | unsigned long flags; | ||
1200 | 1306 | ||
1201 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); | 1307 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); |
1202 | 1308 | ||
1203 | if (!cdesc) | 1309 | if (!cdesc) |
1204 | return; | 1310 | return; |
1205 | 1311 | ||
1206 | spin_lock_bh(&dwc->lock); | 1312 | spin_lock_irqsave(&dwc->lock, flags); |
1207 | 1313 | ||
1208 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1314 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1209 | while (dma_readl(dw, CH_EN) & dwc->mask) | 1315 | while (dma_readl(dw, CH_EN) & dwc->mask) |
@@ -1213,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan) | |||
1213 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1319 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1214 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1320 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1215 | 1321 | ||
1216 | spin_unlock_bh(&dwc->lock); | 1322 | spin_unlock_irqrestore(&dwc->lock, flags); |
1217 | 1323 | ||
1218 | for (i = 0; i < cdesc->periods; i++) | 1324 | for (i = 0; i < cdesc->periods; i++) |
1219 | dwc_desc_put(dwc, cdesc->desc[i]); | 1325 | dwc_desc_put(dwc, cdesc->desc[i]); |
@@ -1307,7 +1413,17 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1307 | dwc->chan.device = &dw->dma; | 1413 | dwc->chan.device = &dw->dma; |
1308 | dwc->chan.cookie = dwc->completed = 1; | 1414 | dwc->chan.cookie = dwc->completed = 1; |
1309 | dwc->chan.chan_id = i; | 1415 | dwc->chan.chan_id = i; |
1310 | list_add_tail(&dwc->chan.device_node, &dw->dma.channels); | 1416 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1417 | list_add_tail(&dwc->chan.device_node, | ||
1418 | &dw->dma.channels); | ||
1419 | else | ||
1420 | list_add(&dwc->chan.device_node, &dw->dma.channels); | ||
1421 | |||
1422 | /* 7 is highest priority & 0 is lowest. */ | ||
1423 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | ||
1424 | dwc->priority = 7 - i; | ||
1425 | else | ||
1426 | dwc->priority = i; | ||
1311 | 1427 | ||
1312 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; | 1428 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1313 | spin_lock_init(&dwc->lock); | 1429 | spin_lock_init(&dwc->lock); |
@@ -1335,6 +1451,8 @@ static int __init dw_probe(struct platform_device *pdev) | |||
1335 | 1451 | ||
1336 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 1452 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1337 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1453 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1454 | if (pdata->is_private) | ||
1455 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | ||
1338 | dw->dma.dev = &pdev->dev; | 1456 | dw->dma.dev = &pdev->dev; |
1339 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 1457 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1340 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | 1458 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; |
@@ -1447,7 +1565,7 @@ static int __init dw_init(void) | |||
1447 | { | 1565 | { |
1448 | return platform_driver_probe(&dw_driver, dw_probe); | 1566 | return platform_driver_probe(&dw_driver, dw_probe); |
1449 | } | 1567 | } |
1450 | module_init(dw_init); | 1568 | subsys_initcall(dw_init); |
1451 | 1569 | ||
1452 | static void __exit dw_exit(void) | 1570 | static void __exit dw_exit(void) |
1453 | { | 1571 | { |
@@ -1457,4 +1575,5 @@ module_exit(dw_exit); | |||
1457 | 1575 | ||
1458 | MODULE_LICENSE("GPL v2"); | 1576 | MODULE_LICENSE("GPL v2"); |
1459 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | 1577 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); |
1460 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); | 1578 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
1579 | MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index d9a939f67f46..c3419518d701 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * Driver for the Synopsys DesignWare AHB DMA Controller | 2 | * Driver for the Synopsys DesignWare AHB DMA Controller |
3 | * | 3 | * |
4 | * Copyright (C) 2005-2007 Atmel Corporation | 4 | * Copyright (C) 2005-2007 Atmel Corporation |
5 | * Copyright (C) 2010-2011 ST Microelectronics | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -86,6 +87,7 @@ struct dw_dma_regs { | |||
86 | #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) | 87 | #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) |
87 | #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ | 88 | #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ |
88 | #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ | 89 | #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ |
90 | #define DWC_CTLL_FC(n) ((n) << 20) | ||
89 | #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ | 91 | #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ |
90 | #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ | 92 | #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ |
91 | #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ | 93 | #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ |
@@ -101,6 +103,8 @@ struct dw_dma_regs { | |||
101 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff | 103 | #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff |
102 | 104 | ||
103 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ | 105 | /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ |
106 | #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ | ||
107 | #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ | ||
104 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ | 108 | #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ |
105 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ | 109 | #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ |
106 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ | 110 | #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ |
@@ -134,6 +138,8 @@ struct dw_dma_chan { | |||
134 | struct dma_chan chan; | 138 | struct dma_chan chan; |
135 | void __iomem *ch_regs; | 139 | void __iomem *ch_regs; |
136 | u8 mask; | 140 | u8 mask; |
141 | u8 priority; | ||
142 | bool paused; | ||
137 | 143 | ||
138 | spinlock_t lock; | 144 | spinlock_t lock; |
139 | 145 | ||
@@ -155,9 +161,9 @@ __dwc_regs(struct dw_dma_chan *dwc) | |||
155 | } | 161 | } |
156 | 162 | ||
157 | #define channel_readl(dwc, name) \ | 163 | #define channel_readl(dwc, name) \ |
158 | __raw_readl(&(__dwc_regs(dwc)->name)) | 164 | readl(&(__dwc_regs(dwc)->name)) |
159 | #define channel_writel(dwc, name, val) \ | 165 | #define channel_writel(dwc, name, val) \ |
160 | __raw_writel((val), &(__dwc_regs(dwc)->name)) | 166 | writel((val), &(__dwc_regs(dwc)->name)) |
161 | 167 | ||
162 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) | 168 | static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) |
163 | { | 169 | { |
@@ -181,9 +187,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) | |||
181 | } | 187 | } |
182 | 188 | ||
183 | #define dma_readl(dw, name) \ | 189 | #define dma_readl(dw, name) \ |
184 | __raw_readl(&(__dw_regs(dw)->name)) | 190 | readl(&(__dw_regs(dw)->name)) |
185 | #define dma_writel(dw, name, val) \ | 191 | #define dma_writel(dw, name, val) \ |
186 | __raw_writel((val), &(__dw_regs(dw)->name)) | 192 | writel((val), &(__dw_regs(dw)->name)) |
187 | 193 | ||
188 | #define channel_set_bit(dw, reg, mask) \ | 194 | #define channel_set_bit(dw, reg, mask) \ |
189 | dma_writel(dw, reg, ((mask) << 8) | (mask)) | 195 | dma_writel(dw, reg, ((mask) << 8) | (mask)) |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index cea08bed9cf9..8a781540590c 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | 2 | * Freescale MPC85xx, MPC83xx DMA Engine support |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 4 | * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * Author: | 6 | * Author: |
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | 7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 |
@@ -35,34 +35,18 @@ | |||
35 | #include <linux/dmapool.h> | 35 | #include <linux/dmapool.h> |
36 | #include <linux/of_platform.h> | 36 | #include <linux/of_platform.h> |
37 | 37 | ||
38 | #include <asm/fsldma.h> | ||
39 | #include "fsldma.h" | 38 | #include "fsldma.h" |
40 | 39 | ||
41 | static void dma_init(struct fsldma_chan *chan) | 40 | #define chan_dbg(chan, fmt, arg...) \ |
42 | { | 41 | dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) |
43 | /* Reset the channel */ | 42 | #define chan_err(chan, fmt, arg...) \ |
44 | DMA_OUT(chan, &chan->regs->mr, 0, 32); | 43 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) |
45 | 44 | ||
46 | switch (chan->feature & FSL_DMA_IP_MASK) { | 45 | static const char msg_ld_oom[] = "No free memory for link descriptor"; |
47 | case FSL_DMA_IP_85XX: | 46 | |
48 | /* Set the channel to below modes: | 47 | /* |
49 | * EIE - Error interrupt enable | 48 | * Register Helpers |
50 | * EOSIE - End of segments interrupt enable (basic mode) | 49 | */ |
51 | * EOLNIE - End of links interrupt enable | ||
52 | */ | ||
53 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE | ||
54 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | ||
55 | break; | ||
56 | case FSL_DMA_IP_83XX: | ||
57 | /* Set the channel to below modes: | ||
58 | * EOTIE - End-of-transfer interrupt enable | ||
59 | * PRC_RM - PCI read multiple | ||
60 | */ | ||
61 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | ||
62 | | FSL_DMA_MR_PRC_RM, 32); | ||
63 | break; | ||
64 | } | ||
65 | } | ||
66 | 50 | ||
67 | static void set_sr(struct fsldma_chan *chan, u32 val) | 51 | static void set_sr(struct fsldma_chan *chan, u32 val) |
68 | { | 52 | { |
@@ -74,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan) | |||
74 | return DMA_IN(chan, &chan->regs->sr, 32); | 58 | return DMA_IN(chan, &chan->regs->sr, 32); |
75 | } | 59 | } |
76 | 60 | ||
61 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | ||
62 | { | ||
63 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | ||
64 | } | ||
65 | |||
66 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | ||
67 | { | ||
68 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | ||
69 | } | ||
70 | |||
71 | static u32 get_bcr(struct fsldma_chan *chan) | ||
72 | { | ||
73 | return DMA_IN(chan, &chan->regs->bcr, 32); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Descriptor Helpers | ||
78 | */ | ||
79 | |||
77 | static void set_desc_cnt(struct fsldma_chan *chan, | 80 | static void set_desc_cnt(struct fsldma_chan *chan, |
78 | struct fsl_dma_ld_hw *hw, u32 count) | 81 | struct fsl_dma_ld_hw *hw, u32 count) |
79 | { | 82 | { |
80 | hw->count = CPU_TO_DMA(chan, count, 32); | 83 | hw->count = CPU_TO_DMA(chan, count, 32); |
81 | } | 84 | } |
82 | 85 | ||
86 | static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) | ||
87 | { | ||
88 | return DMA_TO_CPU(chan, desc->hw.count, 32); | ||
89 | } | ||
90 | |||
83 | static void set_desc_src(struct fsldma_chan *chan, | 91 | static void set_desc_src(struct fsldma_chan *chan, |
84 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 92 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
85 | { | 93 | { |
86 | u64 snoop_bits; | 94 | u64 snoop_bits; |
87 | 95 | ||
@@ -90,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan, | |||
90 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | 98 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
91 | } | 99 | } |
92 | 100 | ||
101 | static dma_addr_t get_desc_src(struct fsldma_chan *chan, | ||
102 | struct fsl_desc_sw *desc) | ||
103 | { | ||
104 | u64 snoop_bits; | ||
105 | |||
106 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
107 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
108 | return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; | ||
109 | } | ||
110 | |||
93 | static void set_desc_dst(struct fsldma_chan *chan, | 111 | static void set_desc_dst(struct fsldma_chan *chan, |
94 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 112 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
95 | { | 113 | { |
96 | u64 snoop_bits; | 114 | u64 snoop_bits; |
97 | 115 | ||
@@ -100,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan, | |||
100 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | 118 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
101 | } | 119 | } |
102 | 120 | ||
121 | static dma_addr_t get_desc_dst(struct fsldma_chan *chan, | ||
122 | struct fsl_desc_sw *desc) | ||
123 | { | ||
124 | u64 snoop_bits; | ||
125 | |||
126 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
127 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
128 | return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; | ||
129 | } | ||
130 | |||
103 | static void set_desc_next(struct fsldma_chan *chan, | 131 | static void set_desc_next(struct fsldma_chan *chan, |
104 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 132 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
105 | { | 133 | { |
106 | u64 snoop_bits; | 134 | u64 snoop_bits; |
107 | 135 | ||
@@ -110,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan, | |||
110 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); | 138 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); |
111 | } | 139 | } |
112 | 140 | ||
113 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | 141 | static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |
114 | { | 142 | { |
115 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | 143 | u64 snoop_bits; |
116 | } | ||
117 | 144 | ||
118 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | 145 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
119 | { | 146 | ? FSL_DMA_SNEN : 0; |
120 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | ||
121 | } | ||
122 | 147 | ||
123 | static dma_addr_t get_ndar(struct fsldma_chan *chan) | 148 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, |
124 | { | 149 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL |
125 | return DMA_IN(chan, &chan->regs->ndar, 64); | 150 | | snoop_bits, 64); |
126 | } | 151 | } |
127 | 152 | ||
128 | static u32 get_bcr(struct fsldma_chan *chan) | 153 | /* |
154 | * DMA Engine Hardware Control Helpers | ||
155 | */ | ||
156 | |||
157 | static void dma_init(struct fsldma_chan *chan) | ||
129 | { | 158 | { |
130 | return DMA_IN(chan, &chan->regs->bcr, 32); | 159 | /* Reset the channel */ |
160 | DMA_OUT(chan, &chan->regs->mr, 0, 32); | ||
161 | |||
162 | switch (chan->feature & FSL_DMA_IP_MASK) { | ||
163 | case FSL_DMA_IP_85XX: | ||
164 | /* Set the channel to below modes: | ||
165 | * EIE - Error interrupt enable | ||
166 | * EOLNIE - End of links interrupt enable | ||
167 | * BWC - Bandwidth sharing among channels | ||
168 | */ | ||
169 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC | ||
170 | | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); | ||
171 | break; | ||
172 | case FSL_DMA_IP_83XX: | ||
173 | /* Set the channel to below modes: | ||
174 | * EOTIE - End-of-transfer interrupt enable | ||
175 | * PRC_RM - PCI read multiple | ||
176 | */ | ||
177 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | ||
178 | | FSL_DMA_MR_PRC_RM, 32); | ||
179 | break; | ||
180 | } | ||
131 | } | 181 | } |
132 | 182 | ||
133 | static int dma_is_idle(struct fsldma_chan *chan) | 183 | static int dma_is_idle(struct fsldma_chan *chan) |
@@ -136,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan) | |||
136 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | 186 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
137 | } | 187 | } |
138 | 188 | ||
189 | /* | ||
190 | * Start the DMA controller | ||
191 | * | ||
192 | * Preconditions: | ||
193 | * - the CDAR register must point to the start descriptor | ||
194 | * - the MRn[CS] bit must be cleared | ||
195 | */ | ||
139 | static void dma_start(struct fsldma_chan *chan) | 196 | static void dma_start(struct fsldma_chan *chan) |
140 | { | 197 | { |
141 | u32 mode; | 198 | u32 mode; |
142 | 199 | ||
143 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 200 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
144 | 201 | ||
145 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | 202 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
146 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 203 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); |
147 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); | 204 | mode |= FSL_DMA_MR_EMP_EN; |
148 | mode |= FSL_DMA_MR_EMP_EN; | 205 | } else { |
149 | } else { | 206 | mode &= ~FSL_DMA_MR_EMP_EN; |
150 | mode &= ~FSL_DMA_MR_EMP_EN; | ||
151 | } | ||
152 | } | 207 | } |
153 | 208 | ||
154 | if (chan->feature & FSL_DMA_CHAN_START_EXT) | 209 | if (chan->feature & FSL_DMA_CHAN_START_EXT) { |
155 | mode |= FSL_DMA_MR_EMS_EN; | 210 | mode |= FSL_DMA_MR_EMS_EN; |
156 | else | 211 | } else { |
212 | mode &= ~FSL_DMA_MR_EMS_EN; | ||
157 | mode |= FSL_DMA_MR_CS; | 213 | mode |= FSL_DMA_MR_CS; |
214 | } | ||
158 | 215 | ||
159 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 216 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
160 | } | 217 | } |
@@ -164,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan) | |||
164 | u32 mode; | 221 | u32 mode; |
165 | int i; | 222 | int i; |
166 | 223 | ||
224 | /* read the mode register */ | ||
167 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 225 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
168 | mode |= FSL_DMA_MR_CA; | ||
169 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
170 | 226 | ||
171 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); | 227 | /* |
228 | * The 85xx controller supports channel abort, which will stop | ||
229 | * the current transfer. On 83xx, this bit is the transfer error | ||
230 | * mask bit, which should not be changed. | ||
231 | */ | ||
232 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
233 | mode |= FSL_DMA_MR_CA; | ||
234 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
235 | |||
236 | mode &= ~FSL_DMA_MR_CA; | ||
237 | } | ||
238 | |||
239 | /* stop the DMA controller */ | ||
240 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); | ||
172 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 241 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
173 | 242 | ||
243 | /* wait for the DMA controller to become idle */ | ||
174 | for (i = 0; i < 100; i++) { | 244 | for (i = 0; i < 100; i++) { |
175 | if (dma_is_idle(chan)) | 245 | if (dma_is_idle(chan)) |
176 | return; | 246 | return; |
@@ -179,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan) | |||
179 | } | 249 | } |
180 | 250 | ||
181 | if (!dma_is_idle(chan)) | 251 | if (!dma_is_idle(chan)) |
182 | dev_err(chan->dev, "DMA halt timeout!\n"); | 252 | chan_err(chan, "DMA halt timeout!\n"); |
183 | } | ||
184 | |||
185 | static void set_ld_eol(struct fsldma_chan *chan, | ||
186 | struct fsl_desc_sw *desc) | ||
187 | { | ||
188 | u64 snoop_bits; | ||
189 | |||
190 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
191 | ? FSL_DMA_SNEN : 0; | ||
192 | |||
193 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, | ||
194 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | ||
195 | | snoop_bits, 64); | ||
196 | } | 253 | } |
197 | 254 | ||
198 | /** | 255 | /** |
@@ -318,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) | |||
318 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; | 375 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; |
319 | } | 376 | } |
320 | 377 | ||
321 | static void append_ld_queue(struct fsldma_chan *chan, | 378 | static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) |
322 | struct fsl_desc_sw *desc) | ||
323 | { | 379 | { |
324 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); | 380 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); |
325 | 381 | ||
@@ -360,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
360 | cookie = chan->common.cookie; | 416 | cookie = chan->common.cookie; |
361 | list_for_each_entry(child, &desc->tx_list, node) { | 417 | list_for_each_entry(child, &desc->tx_list, node) { |
362 | cookie++; | 418 | cookie++; |
363 | if (cookie < 0) | 419 | if (cookie < DMA_MIN_COOKIE) |
364 | cookie = 1; | 420 | cookie = DMA_MIN_COOKIE; |
365 | 421 | ||
366 | child->async_tx.cookie = cookie; | 422 | child->async_tx.cookie = cookie; |
367 | } | 423 | } |
@@ -382,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
382 | * | 438 | * |
383 | * Return - The descriptor allocated. NULL for failed. | 439 | * Return - The descriptor allocated. NULL for failed. |
384 | */ | 440 | */ |
385 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | 441 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) |
386 | struct fsldma_chan *chan) | ||
387 | { | 442 | { |
388 | struct fsl_desc_sw *desc; | 443 | struct fsl_desc_sw *desc; |
389 | dma_addr_t pdesc; | 444 | dma_addr_t pdesc; |
390 | 445 | ||
391 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 446 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
392 | if (!desc) { | 447 | if (!desc) { |
393 | dev_dbg(chan->dev, "out of memory for link desc\n"); | 448 | chan_dbg(chan, "out of memory for link descriptor\n"); |
394 | return NULL; | 449 | return NULL; |
395 | } | 450 | } |
396 | 451 | ||
@@ -400,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
400 | desc->async_tx.tx_submit = fsl_dma_tx_submit; | 455 | desc->async_tx.tx_submit = fsl_dma_tx_submit; |
401 | desc->async_tx.phys = pdesc; | 456 | desc->async_tx.phys = pdesc; |
402 | 457 | ||
458 | #ifdef FSL_DMA_LD_DEBUG | ||
459 | chan_dbg(chan, "LD %p allocated\n", desc); | ||
460 | #endif | ||
461 | |||
403 | return desc; | 462 | return desc; |
404 | } | 463 | } |
405 | 464 | ||
406 | |||
407 | /** | 465 | /** |
408 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | 466 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. |
409 | * @chan : Freescale DMA channel | 467 | * @chan : Freescale DMA channel |
@@ -424,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
424 | * We need the descriptor to be aligned to 32bytes | 482 | * We need the descriptor to be aligned to 32bytes |
425 | * for meeting FSL DMA specification requirement. | 483 | * for meeting FSL DMA specification requirement. |
426 | */ | 484 | */ |
427 | chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | 485 | chan->desc_pool = dma_pool_create(chan->name, chan->dev, |
428 | chan->dev, | ||
429 | sizeof(struct fsl_desc_sw), | 486 | sizeof(struct fsl_desc_sw), |
430 | __alignof__(struct fsl_desc_sw), 0); | 487 | __alignof__(struct fsl_desc_sw), 0); |
431 | if (!chan->desc_pool) { | 488 | if (!chan->desc_pool) { |
432 | dev_err(chan->dev, "unable to allocate channel %d " | 489 | chan_err(chan, "unable to allocate descriptor pool\n"); |
433 | "descriptor pool\n", chan->id); | ||
434 | return -ENOMEM; | 490 | return -ENOMEM; |
435 | } | 491 | } |
436 | 492 | ||
@@ -452,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan, | |||
452 | 508 | ||
453 | list_for_each_entry_safe(desc, _desc, list, node) { | 509 | list_for_each_entry_safe(desc, _desc, list, node) { |
454 | list_del(&desc->node); | 510 | list_del(&desc->node); |
511 | #ifdef FSL_DMA_LD_DEBUG | ||
512 | chan_dbg(chan, "LD %p free\n", desc); | ||
513 | #endif | ||
455 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 514 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
456 | } | 515 | } |
457 | } | 516 | } |
@@ -463,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, | |||
463 | 522 | ||
464 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { | 523 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { |
465 | list_del(&desc->node); | 524 | list_del(&desc->node); |
525 | #ifdef FSL_DMA_LD_DEBUG | ||
526 | chan_dbg(chan, "LD %p free\n", desc); | ||
527 | #endif | ||
466 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 528 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
467 | } | 529 | } |
468 | } | 530 | } |
@@ -476,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) | |||
476 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 538 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
477 | unsigned long flags; | 539 | unsigned long flags; |
478 | 540 | ||
479 | dev_dbg(chan->dev, "Free all channel resources.\n"); | 541 | chan_dbg(chan, "free all channel resources\n"); |
480 | spin_lock_irqsave(&chan->desc_lock, flags); | 542 | spin_lock_irqsave(&chan->desc_lock, flags); |
481 | fsldma_free_desc_list(chan, &chan->ld_pending); | 543 | fsldma_free_desc_list(chan, &chan->ld_pending); |
482 | fsldma_free_desc_list(chan, &chan->ld_running); | 544 | fsldma_free_desc_list(chan, &chan->ld_running); |
@@ -499,7 +561,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
499 | 561 | ||
500 | new = fsl_dma_alloc_descriptor(chan); | 562 | new = fsl_dma_alloc_descriptor(chan); |
501 | if (!new) { | 563 | if (!new) { |
502 | dev_err(chan->dev, "No free memory for link descriptor\n"); | 564 | chan_err(chan, "%s\n", msg_ld_oom); |
503 | return NULL; | 565 | return NULL; |
504 | } | 566 | } |
505 | 567 | ||
@@ -509,14 +571,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
509 | /* Insert the link descriptor to the LD ring */ | 571 | /* Insert the link descriptor to the LD ring */ |
510 | list_add_tail(&new->node, &new->tx_list); | 572 | list_add_tail(&new->node, &new->tx_list); |
511 | 573 | ||
512 | /* Set End-of-link to the last link descriptor of new list*/ | 574 | /* Set End-of-link to the last link descriptor of new list */ |
513 | set_ld_eol(chan, new); | 575 | set_ld_eol(chan, new); |
514 | 576 | ||
515 | return &new->async_tx; | 577 | return &new->async_tx; |
516 | } | 578 | } |
517 | 579 | ||
518 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | 580 | static struct dma_async_tx_descriptor * |
519 | struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, | 581 | fsl_dma_prep_memcpy(struct dma_chan *dchan, |
582 | dma_addr_t dma_dst, dma_addr_t dma_src, | ||
520 | size_t len, unsigned long flags) | 583 | size_t len, unsigned long flags) |
521 | { | 584 | { |
522 | struct fsldma_chan *chan; | 585 | struct fsldma_chan *chan; |
@@ -536,13 +599,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
536 | /* Allocate the link descriptor from DMA pool */ | 599 | /* Allocate the link descriptor from DMA pool */ |
537 | new = fsl_dma_alloc_descriptor(chan); | 600 | new = fsl_dma_alloc_descriptor(chan); |
538 | if (!new) { | 601 | if (!new) { |
539 | dev_err(chan->dev, | 602 | chan_err(chan, "%s\n", msg_ld_oom); |
540 | "No free memory for link descriptor\n"); | ||
541 | goto fail; | 603 | goto fail; |
542 | } | 604 | } |
543 | #ifdef FSL_DMA_LD_DEBUG | ||
544 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | ||
545 | #endif | ||
546 | 605 | ||
547 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); | 606 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); |
548 | 607 | ||
@@ -570,7 +629,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
570 | new->async_tx.flags = flags; /* client is in control of this ack */ | 629 | new->async_tx.flags = flags; /* client is in control of this ack */ |
571 | new->async_tx.cookie = -EBUSY; | 630 | new->async_tx.cookie = -EBUSY; |
572 | 631 | ||
573 | /* Set End-of-link to the last link descriptor of new list*/ | 632 | /* Set End-of-link to the last link descriptor of new list */ |
574 | set_ld_eol(chan, new); | 633 | set_ld_eol(chan, new); |
575 | 634 | ||
576 | return &first->async_tx; | 635 | return &first->async_tx; |
@@ -583,362 +642,289 @@ fail: | |||
583 | return NULL; | 642 | return NULL; |
584 | } | 643 | } |
585 | 644 | ||
586 | /** | 645 | static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, |
587 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | 646 | struct scatterlist *dst_sg, unsigned int dst_nents, |
588 | * @chan: DMA channel | 647 | struct scatterlist *src_sg, unsigned int src_nents, |
589 | * @sgl: scatterlist to transfer to/from | 648 | unsigned long flags) |
590 | * @sg_len: number of entries in @scatterlist | ||
591 | * @direction: DMA direction | ||
592 | * @flags: DMAEngine flags | ||
593 | * | ||
594 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
595 | * DMA_SLAVE API, this gets the device-specific information from the | ||
596 | * chan->private variable. | ||
597 | */ | ||
598 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
599 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
600 | enum dma_data_direction direction, unsigned long flags) | ||
601 | { | 649 | { |
602 | struct fsldma_chan *chan; | ||
603 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 650 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
604 | struct fsl_dma_slave *slave; | 651 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
605 | size_t copy; | 652 | size_t dst_avail, src_avail; |
606 | 653 | dma_addr_t dst, src; | |
607 | int i; | 654 | size_t len; |
608 | struct scatterlist *sg; | ||
609 | size_t sg_used; | ||
610 | size_t hw_used; | ||
611 | struct fsl_dma_hw_addr *hw; | ||
612 | dma_addr_t dma_dst, dma_src; | ||
613 | 655 | ||
614 | if (!dchan) | 656 | /* basic sanity checks */ |
657 | if (dst_nents == 0 || src_nents == 0) | ||
615 | return NULL; | 658 | return NULL; |
616 | 659 | ||
617 | if (!dchan->private) | 660 | if (dst_sg == NULL || src_sg == NULL) |
618 | return NULL; | 661 | return NULL; |
619 | 662 | ||
620 | chan = to_fsl_chan(dchan); | 663 | /* |
621 | slave = dchan->private; | 664 | * TODO: should we check that both scatterlists have the same |
665 | * TODO: number of bytes in total? Is that really an error? | ||
666 | */ | ||
622 | 667 | ||
623 | if (list_empty(&slave->addresses)) | 668 | /* get prepared for the loop */ |
624 | return NULL; | 669 | dst_avail = sg_dma_len(dst_sg); |
670 | src_avail = sg_dma_len(src_sg); | ||
625 | 671 | ||
626 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); | 672 | /* run until we are out of scatterlist entries */ |
627 | hw_used = 0; | 673 | while (true) { |
628 | 674 | ||
629 | /* | 675 | /* create the largest transaction possible */ |
630 | * Build the hardware transaction to copy from the scatterlist to | 676 | len = min_t(size_t, src_avail, dst_avail); |
631 | * the hardware, or from the hardware to the scatterlist | 677 | len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); |
632 | * | 678 | if (len == 0) |
633 | * If you are copying from the hardware to the scatterlist and it | 679 | goto fetch; |
634 | * takes two hardware entries to fill an entire page, then both | ||
635 | * hardware entries will be coalesced into the same page | ||
636 | * | ||
637 | * If you are copying from the scatterlist to the hardware and a | ||
638 | * single page can fill two hardware entries, then the data will | ||
639 | * be read out of the page into the first hardware entry, and so on | ||
640 | */ | ||
641 | for_each_sg(sgl, sg, sg_len, i) { | ||
642 | sg_used = 0; | ||
643 | |||
644 | /* Loop until the entire scatterlist entry is used */ | ||
645 | while (sg_used < sg_dma_len(sg)) { | ||
646 | |||
647 | /* | ||
648 | * If we've used up the current hardware address/length | ||
649 | * pair, we need to load a new one | ||
650 | * | ||
651 | * This is done in a while loop so that descriptors with | ||
652 | * length == 0 will be skipped | ||
653 | */ | ||
654 | while (hw_used >= hw->length) { | ||
655 | |||
656 | /* | ||
657 | * If the current hardware entry is the last | ||
658 | * entry in the list, we're finished | ||
659 | */ | ||
660 | if (list_is_last(&hw->entry, &slave->addresses)) | ||
661 | goto finished; | ||
662 | |||
663 | /* Get the next hardware address/length pair */ | ||
664 | hw = list_entry(hw->entry.next, | ||
665 | struct fsl_dma_hw_addr, entry); | ||
666 | hw_used = 0; | ||
667 | } | ||
668 | |||
669 | /* Allocate the link descriptor from DMA pool */ | ||
670 | new = fsl_dma_alloc_descriptor(chan); | ||
671 | if (!new) { | ||
672 | dev_err(chan->dev, "No free memory for " | ||
673 | "link descriptor\n"); | ||
674 | goto fail; | ||
675 | } | ||
676 | #ifdef FSL_DMA_LD_DEBUG | ||
677 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | ||
678 | #endif | ||
679 | 680 | ||
680 | /* | 681 | dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; |
681 | * Calculate the maximum number of bytes to transfer, | 682 | src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; |
682 | * making sure it is less than the DMA controller limit | 683 | |
683 | */ | 684 | /* allocate and populate the descriptor */ |
684 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | 685 | new = fsl_dma_alloc_descriptor(chan); |
685 | hw->length - hw_used); | 686 | if (!new) { |
686 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); | 687 | chan_err(chan, "%s\n", msg_ld_oom); |
687 | 688 | goto fail; | |
688 | /* | ||
689 | * DMA_FROM_DEVICE | ||
690 | * from the hardware to the scatterlist | ||
691 | * | ||
692 | * DMA_TO_DEVICE | ||
693 | * from the scatterlist to the hardware | ||
694 | */ | ||
695 | if (direction == DMA_FROM_DEVICE) { | ||
696 | dma_src = hw->address + hw_used; | ||
697 | dma_dst = sg_dma_address(sg) + sg_used; | ||
698 | } else { | ||
699 | dma_src = sg_dma_address(sg) + sg_used; | ||
700 | dma_dst = hw->address + hw_used; | ||
701 | } | ||
702 | |||
703 | /* Fill in the descriptor */ | ||
704 | set_desc_cnt(chan, &new->hw, copy); | ||
705 | set_desc_src(chan, &new->hw, dma_src); | ||
706 | set_desc_dst(chan, &new->hw, dma_dst); | ||
707 | |||
708 | /* | ||
709 | * If this is not the first descriptor, chain the | ||
710 | * current descriptor after the previous descriptor | ||
711 | */ | ||
712 | if (!first) { | ||
713 | first = new; | ||
714 | } else { | ||
715 | set_desc_next(chan, &prev->hw, | ||
716 | new->async_tx.phys); | ||
717 | } | ||
718 | |||
719 | new->async_tx.cookie = 0; | ||
720 | async_tx_ack(&new->async_tx); | ||
721 | |||
722 | prev = new; | ||
723 | sg_used += copy; | ||
724 | hw_used += copy; | ||
725 | |||
726 | /* Insert the link descriptor into the LD ring */ | ||
727 | list_add_tail(&new->node, &first->tx_list); | ||
728 | } | 689 | } |
729 | } | ||
730 | 690 | ||
731 | finished: | 691 | set_desc_cnt(chan, &new->hw, len); |
692 | set_desc_src(chan, &new->hw, src); | ||
693 | set_desc_dst(chan, &new->hw, dst); | ||
732 | 694 | ||
733 | /* All of the hardware address/length pairs had length == 0 */ | 695 | if (!first) |
734 | if (!first || !new) | 696 | first = new; |
735 | return NULL; | 697 | else |
698 | set_desc_next(chan, &prev->hw, new->async_tx.phys); | ||
736 | 699 | ||
737 | new->async_tx.flags = flags; | 700 | new->async_tx.cookie = 0; |
738 | new->async_tx.cookie = -EBUSY; | 701 | async_tx_ack(&new->async_tx); |
702 | prev = new; | ||
739 | 703 | ||
740 | /* Set End-of-link to the last link descriptor of new list */ | 704 | /* Insert the link descriptor to the LD ring */ |
741 | set_ld_eol(chan, new); | 705 | list_add_tail(&new->node, &first->tx_list); |
742 | 706 | ||
743 | /* Enable extra controller features */ | 707 | /* update metadata */ |
744 | if (chan->set_src_loop_size) | 708 | dst_avail -= len; |
745 | chan->set_src_loop_size(chan, slave->src_loop_size); | 709 | src_avail -= len; |
746 | 710 | ||
747 | if (chan->set_dst_loop_size) | 711 | fetch: |
748 | chan->set_dst_loop_size(chan, slave->dst_loop_size); | 712 | /* fetch the next dst scatterlist entry */ |
713 | if (dst_avail == 0) { | ||
749 | 714 | ||
750 | if (chan->toggle_ext_start) | 715 | /* no more entries: we're done */ |
751 | chan->toggle_ext_start(chan, slave->external_start); | 716 | if (dst_nents == 0) |
717 | break; | ||
718 | |||
719 | /* fetch the next entry: if there are no more: done */ | ||
720 | dst_sg = sg_next(dst_sg); | ||
721 | if (dst_sg == NULL) | ||
722 | break; | ||
723 | |||
724 | dst_nents--; | ||
725 | dst_avail = sg_dma_len(dst_sg); | ||
726 | } | ||
752 | 727 | ||
753 | if (chan->toggle_ext_pause) | 728 | /* fetch the next src scatterlist entry */ |
754 | chan->toggle_ext_pause(chan, slave->external_pause); | 729 | if (src_avail == 0) { |
755 | 730 | ||
756 | if (chan->set_request_count) | 731 | /* no more entries: we're done */ |
757 | chan->set_request_count(chan, slave->request_count); | 732 | if (src_nents == 0) |
733 | break; | ||
734 | |||
735 | /* fetch the next entry: if there are no more: done */ | ||
736 | src_sg = sg_next(src_sg); | ||
737 | if (src_sg == NULL) | ||
738 | break; | ||
739 | |||
740 | src_nents--; | ||
741 | src_avail = sg_dma_len(src_sg); | ||
742 | } | ||
743 | } | ||
744 | |||
745 | new->async_tx.flags = flags; /* client is in control of this ack */ | ||
746 | new->async_tx.cookie = -EBUSY; | ||
747 | |||
748 | /* Set End-of-link to the last link descriptor of new list */ | ||
749 | set_ld_eol(chan, new); | ||
758 | 750 | ||
759 | return &first->async_tx; | 751 | return &first->async_tx; |
760 | 752 | ||
761 | fail: | 753 | fail: |
762 | /* If first was not set, then we failed to allocate the very first | ||
763 | * descriptor, and we're done */ | ||
764 | if (!first) | 754 | if (!first) |
765 | return NULL; | 755 | return NULL; |
766 | 756 | ||
757 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
758 | return NULL; | ||
759 | } | ||
760 | |||
761 | /** | ||
762 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
763 | * @chan: DMA channel | ||
764 | * @sgl: scatterlist to transfer to/from | ||
765 | * @sg_len: number of entries in @scatterlist | ||
766 | * @direction: DMA direction | ||
767 | * @flags: DMAEngine flags | ||
768 | * | ||
769 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
770 | * DMA_SLAVE API, this gets the device-specific information from the | ||
771 | * chan->private variable. | ||
772 | */ | ||
773 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
774 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
775 | enum dma_data_direction direction, unsigned long flags) | ||
776 | { | ||
767 | /* | 777 | /* |
768 | * First is set, so all of the descriptors we allocated have been added | 778 | * This operation is not supported on the Freescale DMA controller |
769 | * to first->tx_list, INCLUDING "first" itself. Therefore we | ||
770 | * must traverse the list backwards freeing each descriptor in turn | ||
771 | * | 779 | * |
772 | * We're re-using variables for the loop, oh well | 780 | * However, we need to provide the function pointer to allow the |
781 | * device_control() method to work. | ||
773 | */ | 782 | */ |
774 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
775 | return NULL; | 783 | return NULL; |
776 | } | 784 | } |
777 | 785 | ||
778 | static int fsl_dma_device_control(struct dma_chan *dchan, | 786 | static int fsl_dma_device_control(struct dma_chan *dchan, |
779 | enum dma_ctrl_cmd cmd, unsigned long arg) | 787 | enum dma_ctrl_cmd cmd, unsigned long arg) |
780 | { | 788 | { |
789 | struct dma_slave_config *config; | ||
781 | struct fsldma_chan *chan; | 790 | struct fsldma_chan *chan; |
782 | unsigned long flags; | 791 | unsigned long flags; |
783 | 792 | int size; | |
784 | /* Only supports DMA_TERMINATE_ALL */ | ||
785 | if (cmd != DMA_TERMINATE_ALL) | ||
786 | return -ENXIO; | ||
787 | 793 | ||
788 | if (!dchan) | 794 | if (!dchan) |
789 | return -EINVAL; | 795 | return -EINVAL; |
790 | 796 | ||
791 | chan = to_fsl_chan(dchan); | 797 | chan = to_fsl_chan(dchan); |
792 | 798 | ||
793 | /* Halt the DMA engine */ | 799 | switch (cmd) { |
794 | dma_halt(chan); | 800 | case DMA_TERMINATE_ALL: |
801 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
795 | 802 | ||
796 | spin_lock_irqsave(&chan->desc_lock, flags); | 803 | /* Halt the DMA engine */ |
804 | dma_halt(chan); | ||
797 | 805 | ||
798 | /* Remove and free all of the descriptors in the LD queue */ | 806 | /* Remove and free all of the descriptors in the LD queue */ |
799 | fsldma_free_desc_list(chan, &chan->ld_pending); | 807 | fsldma_free_desc_list(chan, &chan->ld_pending); |
800 | fsldma_free_desc_list(chan, &chan->ld_running); | 808 | fsldma_free_desc_list(chan, &chan->ld_running); |
809 | chan->idle = true; | ||
801 | 810 | ||
802 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 811 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
812 | return 0; | ||
803 | 813 | ||
804 | return 0; | 814 | case DMA_SLAVE_CONFIG: |
805 | } | 815 | config = (struct dma_slave_config *)arg; |
806 | 816 | ||
807 | /** | 817 | /* make sure the channel supports setting burst size */ |
808 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 818 | if (!chan->set_request_count) |
809 | * @chan : Freescale DMA channel | 819 | return -ENXIO; |
810 | * | ||
811 | * CONTEXT: hardirq | ||
812 | */ | ||
813 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) | ||
814 | { | ||
815 | struct fsl_desc_sw *desc; | ||
816 | unsigned long flags; | ||
817 | dma_cookie_t cookie; | ||
818 | 820 | ||
819 | spin_lock_irqsave(&chan->desc_lock, flags); | 821 | /* we set the controller burst size depending on direction */ |
822 | if (config->direction == DMA_TO_DEVICE) | ||
823 | size = config->dst_addr_width * config->dst_maxburst; | ||
824 | else | ||
825 | size = config->src_addr_width * config->src_maxburst; | ||
820 | 826 | ||
821 | if (list_empty(&chan->ld_running)) { | 827 | chan->set_request_count(chan, size); |
822 | dev_dbg(chan->dev, "no running descriptors\n"); | 828 | return 0; |
823 | goto out_unlock; | ||
824 | } | ||
825 | 829 | ||
826 | /* Get the last descriptor, update the cookie to that */ | 830 | case FSLDMA_EXTERNAL_START: |
827 | desc = to_fsl_desc(chan->ld_running.prev); | ||
828 | if (dma_is_idle(chan)) | ||
829 | cookie = desc->async_tx.cookie; | ||
830 | else { | ||
831 | cookie = desc->async_tx.cookie - 1; | ||
832 | if (unlikely(cookie < DMA_MIN_COOKIE)) | ||
833 | cookie = DMA_MAX_COOKIE; | ||
834 | } | ||
835 | 831 | ||
836 | chan->completed_cookie = cookie; | 832 | /* make sure the channel supports external start */ |
833 | if (!chan->toggle_ext_start) | ||
834 | return -ENXIO; | ||
837 | 835 | ||
838 | out_unlock: | 836 | chan->toggle_ext_start(chan, arg); |
839 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 837 | return 0; |
840 | } | ||
841 | 838 | ||
842 | /** | 839 | default: |
843 | * fsldma_desc_status - Check the status of a descriptor | 840 | return -ENXIO; |
844 | * @chan: Freescale DMA channel | 841 | } |
845 | * @desc: DMA SW descriptor | 842 | |
846 | * | 843 | return 0; |
847 | * This function will return the status of the given descriptor | ||
848 | */ | ||
849 | static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, | ||
850 | struct fsl_desc_sw *desc) | ||
851 | { | ||
852 | return dma_async_is_complete(desc->async_tx.cookie, | ||
853 | chan->completed_cookie, | ||
854 | chan->common.cookie); | ||
855 | } | 844 | } |
856 | 845 | ||
857 | /** | 846 | /** |
858 | * fsl_chan_ld_cleanup - Clean up link descriptors | 847 | * fsldma_cleanup_descriptor - cleanup and free a single link descriptor |
859 | * @chan : Freescale DMA channel | 848 | * @chan: Freescale DMA channel |
849 | * @desc: descriptor to cleanup and free | ||
860 | * | 850 | * |
861 | * This function clean up the ld_queue of DMA channel. | 851 | * This function is used on a descriptor which has been executed by the DMA |
852 | * controller. It will run any callbacks, submit any dependencies, and then | ||
853 | * free the descriptor. | ||
862 | */ | 854 | */ |
863 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) | 855 | static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, |
856 | struct fsl_desc_sw *desc) | ||
864 | { | 857 | { |
865 | struct fsl_desc_sw *desc, *_desc; | 858 | struct dma_async_tx_descriptor *txd = &desc->async_tx; |
866 | unsigned long flags; | 859 | struct device *dev = chan->common.device->dev; |
867 | 860 | dma_addr_t src = get_desc_src(chan, desc); | |
868 | spin_lock_irqsave(&chan->desc_lock, flags); | 861 | dma_addr_t dst = get_desc_dst(chan, desc); |
869 | 862 | u32 len = get_desc_cnt(chan, desc); | |
870 | dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); | 863 | |
871 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { | 864 | /* Run the link descriptor callback function */ |
872 | dma_async_tx_callback callback; | 865 | if (txd->callback) { |
873 | void *callback_param; | 866 | #ifdef FSL_DMA_LD_DEBUG |
874 | 867 | chan_dbg(chan, "LD %p callback\n", desc); | |
875 | if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) | 868 | #endif |
876 | break; | 869 | txd->callback(txd->callback_param); |
870 | } | ||
877 | 871 | ||
878 | /* Remove from the list of running transactions */ | 872 | /* Run any dependencies */ |
879 | list_del(&desc->node); | 873 | dma_run_dependencies(txd); |
880 | 874 | ||
881 | /* Run the link descriptor callback function */ | 875 | /* Unmap the dst buffer, if requested */ |
882 | callback = desc->async_tx.callback; | 876 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
883 | callback_param = desc->async_tx.callback_param; | 877 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
884 | if (callback) { | 878 | dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); |
885 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 879 | else |
886 | dev_dbg(chan->dev, "LD %p callback\n", desc); | 880 | dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); |
887 | callback(callback_param); | 881 | } |
888 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
889 | } | ||
890 | 882 | ||
891 | /* Run any dependencies, then free the descriptor */ | 883 | /* Unmap the src buffer, if requested */ |
892 | dma_run_dependencies(&desc->async_tx); | 884 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
893 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 885 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
886 | dma_unmap_single(dev, src, len, DMA_TO_DEVICE); | ||
887 | else | ||
888 | dma_unmap_page(dev, src, len, DMA_TO_DEVICE); | ||
894 | } | 889 | } |
895 | 890 | ||
896 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 891 | #ifdef FSL_DMA_LD_DEBUG |
892 | chan_dbg(chan, "LD %p free\n", desc); | ||
893 | #endif | ||
894 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
897 | } | 895 | } |
898 | 896 | ||
899 | /** | 897 | /** |
900 | * fsl_chan_xfer_ld_queue - transfer any pending transactions | 898 | * fsl_chan_xfer_ld_queue - transfer any pending transactions |
901 | * @chan : Freescale DMA channel | 899 | * @chan : Freescale DMA channel |
902 | * | 900 | * |
903 | * This will make sure that any pending transactions will be run. | 901 | * HARDWARE STATE: idle |
904 | * If the DMA controller is idle, it will be started. Otherwise, | 902 | * LOCKING: must hold chan->desc_lock |
905 | * the DMA controller's interrupt handler will start any pending | ||
906 | * transactions when it becomes idle. | ||
907 | */ | 903 | */ |
908 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | 904 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) |
909 | { | 905 | { |
910 | struct fsl_desc_sw *desc; | 906 | struct fsl_desc_sw *desc; |
911 | unsigned long flags; | ||
912 | |||
913 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
914 | 907 | ||
915 | /* | 908 | /* |
916 | * If the list of pending descriptors is empty, then we | 909 | * If the list of pending descriptors is empty, then we |
917 | * don't need to do any work at all | 910 | * don't need to do any work at all |
918 | */ | 911 | */ |
919 | if (list_empty(&chan->ld_pending)) { | 912 | if (list_empty(&chan->ld_pending)) { |
920 | dev_dbg(chan->dev, "no pending LDs\n"); | 913 | chan_dbg(chan, "no pending LDs\n"); |
921 | goto out_unlock; | 914 | return; |
922 | } | 915 | } |
923 | 916 | ||
924 | /* | 917 | /* |
925 | * The DMA controller is not idle, which means the interrupt | 918 | * The DMA controller is not idle, which means that the interrupt |
926 | * handler will start any queued transactions when it runs | 919 | * handler will start any queued transactions when it runs after |
927 | * at the end of the current transaction | 920 | * this transaction finishes |
928 | */ | 921 | */ |
929 | if (!dma_is_idle(chan)) { | 922 | if (!chan->idle) { |
930 | dev_dbg(chan->dev, "DMA controller still busy\n"); | 923 | chan_dbg(chan, "DMA controller still busy\n"); |
931 | goto out_unlock; | 924 | return; |
932 | } | 925 | } |
933 | 926 | ||
934 | /* | 927 | /* |
935 | * TODO: | ||
936 | * make sure the dma_halt() function really un-wedges the | ||
937 | * controller as much as possible | ||
938 | */ | ||
939 | dma_halt(chan); | ||
940 | |||
941 | /* | ||
942 | * If there are some link descriptors which have not been | 928 | * If there are some link descriptors which have not been |
943 | * transferred, we need to start the controller | 929 | * transferred, we need to start the controller |
944 | */ | 930 | */ |
@@ -947,18 +933,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | |||
947 | * Move all elements from the queue of pending transactions | 933 | * Move all elements from the queue of pending transactions |
948 | * onto the list of running transactions | 934 | * onto the list of running transactions |
949 | */ | 935 | */ |
936 | chan_dbg(chan, "idle, starting controller\n"); | ||
950 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); | 937 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); |
951 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); | 938 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); |
952 | 939 | ||
953 | /* | 940 | /* |
941 | * The 85xx DMA controller doesn't clear the channel start bit | ||
942 | * automatically at the end of a transfer. Therefore we must clear | ||
943 | * it in software before starting the transfer. | ||
944 | */ | ||
945 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | ||
946 | u32 mode; | ||
947 | |||
948 | mode = DMA_IN(chan, &chan->regs->mr, 32); | ||
949 | mode &= ~FSL_DMA_MR_CS; | ||
950 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
951 | } | ||
952 | |||
953 | /* | ||
954 | * Program the descriptor's address into the DMA controller, | 954 | * Program the descriptor's address into the DMA controller, |
955 | * then start the DMA transaction | 955 | * then start the DMA transaction |
956 | */ | 956 | */ |
957 | set_cdar(chan, desc->async_tx.phys); | 957 | set_cdar(chan, desc->async_tx.phys); |
958 | dma_start(chan); | 958 | get_cdar(chan); |
959 | 959 | ||
960 | out_unlock: | 960 | dma_start(chan); |
961 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 961 | chan->idle = false; |
962 | } | 962 | } |
963 | 963 | ||
964 | /** | 964 | /** |
@@ -968,7 +968,11 @@ out_unlock: | |||
968 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) | 968 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) |
969 | { | 969 | { |
970 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 970 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
971 | unsigned long flags; | ||
972 | |||
973 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
971 | fsl_chan_xfer_ld_queue(chan); | 974 | fsl_chan_xfer_ld_queue(chan); |
975 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
972 | } | 976 | } |
973 | 977 | ||
974 | /** | 978 | /** |
@@ -980,16 +984,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
980 | struct dma_tx_state *txstate) | 984 | struct dma_tx_state *txstate) |
981 | { | 985 | { |
982 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 986 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
983 | dma_cookie_t last_used; | ||
984 | dma_cookie_t last_complete; | 987 | dma_cookie_t last_complete; |
988 | dma_cookie_t last_used; | ||
989 | unsigned long flags; | ||
985 | 990 | ||
986 | fsl_chan_ld_cleanup(chan); | 991 | spin_lock_irqsave(&chan->desc_lock, flags); |
987 | 992 | ||
988 | last_used = dchan->cookie; | ||
989 | last_complete = chan->completed_cookie; | 993 | last_complete = chan->completed_cookie; |
994 | last_used = dchan->cookie; | ||
990 | 995 | ||
991 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 996 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
992 | 997 | ||
998 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
993 | return dma_async_is_complete(cookie, last_complete, last_used); | 999 | return dma_async_is_complete(cookie, last_complete, last_used); |
994 | } | 1000 | } |
995 | 1001 | ||
@@ -1000,21 +1006,20 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
1000 | static irqreturn_t fsldma_chan_irq(int irq, void *data) | 1006 | static irqreturn_t fsldma_chan_irq(int irq, void *data) |
1001 | { | 1007 | { |
1002 | struct fsldma_chan *chan = data; | 1008 | struct fsldma_chan *chan = data; |
1003 | int update_cookie = 0; | ||
1004 | int xfer_ld_q = 0; | ||
1005 | u32 stat; | 1009 | u32 stat; |
1006 | 1010 | ||
1007 | /* save and clear the status register */ | 1011 | /* save and clear the status register */ |
1008 | stat = get_sr(chan); | 1012 | stat = get_sr(chan); |
1009 | set_sr(chan, stat); | 1013 | set_sr(chan, stat); |
1010 | dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); | 1014 | chan_dbg(chan, "irq: stat = 0x%x\n", stat); |
1011 | 1015 | ||
1016 | /* check that this was really our device */ | ||
1012 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | 1017 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); |
1013 | if (!stat) | 1018 | if (!stat) |
1014 | return IRQ_NONE; | 1019 | return IRQ_NONE; |
1015 | 1020 | ||
1016 | if (stat & FSL_DMA_SR_TE) | 1021 | if (stat & FSL_DMA_SR_TE) |
1017 | dev_err(chan->dev, "Transfer Error!\n"); | 1022 | chan_err(chan, "Transfer Error!\n"); |
1018 | 1023 | ||
1019 | /* | 1024 | /* |
1020 | * Programming Error | 1025 | * Programming Error |
@@ -1022,29 +1027,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1022 | * triger a PE interrupt. | 1027 | * triger a PE interrupt. |
1023 | */ | 1028 | */ |
1024 | if (stat & FSL_DMA_SR_PE) { | 1029 | if (stat & FSL_DMA_SR_PE) { |
1025 | dev_dbg(chan->dev, "irq: Programming Error INT\n"); | 1030 | chan_dbg(chan, "irq: Programming Error INT\n"); |
1026 | if (get_bcr(chan) == 0) { | ||
1027 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | ||
1028 | * Now, update the completed cookie, and continue the | ||
1029 | * next uncompleted transfer. | ||
1030 | */ | ||
1031 | update_cookie = 1; | ||
1032 | xfer_ld_q = 1; | ||
1033 | } | ||
1034 | stat &= ~FSL_DMA_SR_PE; | 1031 | stat &= ~FSL_DMA_SR_PE; |
1035 | } | 1032 | if (get_bcr(chan) != 0) |
1036 | 1033 | chan_err(chan, "Programming Error!\n"); | |
1037 | /* | ||
1038 | * If the link descriptor segment transfer finishes, | ||
1039 | * we will recycle the used descriptor. | ||
1040 | */ | ||
1041 | if (stat & FSL_DMA_SR_EOSI) { | ||
1042 | dev_dbg(chan->dev, "irq: End-of-segments INT\n"); | ||
1043 | dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", | ||
1044 | (unsigned long long)get_cdar(chan), | ||
1045 | (unsigned long long)get_ndar(chan)); | ||
1046 | stat &= ~FSL_DMA_SR_EOSI; | ||
1047 | update_cookie = 1; | ||
1048 | } | 1034 | } |
1049 | 1035 | ||
1050 | /* | 1036 | /* |
@@ -1052,10 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1052 | * and start the next transfer if it exist. | 1038 | * and start the next transfer if it exist. |
1053 | */ | 1039 | */ |
1054 | if (stat & FSL_DMA_SR_EOCDI) { | 1040 | if (stat & FSL_DMA_SR_EOCDI) { |
1055 | dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); | 1041 | chan_dbg(chan, "irq: End-of-Chain link INT\n"); |
1056 | stat &= ~FSL_DMA_SR_EOCDI; | 1042 | stat &= ~FSL_DMA_SR_EOCDI; |
1057 | update_cookie = 1; | ||
1058 | xfer_ld_q = 1; | ||
1059 | } | 1043 | } |
1060 | 1044 | ||
1061 | /* | 1045 | /* |
@@ -1064,27 +1048,79 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
1064 | * prepare next transfer. | 1048 | * prepare next transfer. |
1065 | */ | 1049 | */ |
1066 | if (stat & FSL_DMA_SR_EOLNI) { | 1050 | if (stat & FSL_DMA_SR_EOLNI) { |
1067 | dev_dbg(chan->dev, "irq: End-of-link INT\n"); | 1051 | chan_dbg(chan, "irq: End-of-link INT\n"); |
1068 | stat &= ~FSL_DMA_SR_EOLNI; | 1052 | stat &= ~FSL_DMA_SR_EOLNI; |
1069 | xfer_ld_q = 1; | ||
1070 | } | 1053 | } |
1071 | 1054 | ||
1072 | if (update_cookie) | 1055 | /* check that the DMA controller is really idle */ |
1073 | fsl_dma_update_completed_cookie(chan); | 1056 | if (!dma_is_idle(chan)) |
1074 | if (xfer_ld_q) | 1057 | chan_err(chan, "irq: controller not idle!\n"); |
1075 | fsl_chan_xfer_ld_queue(chan); | 1058 | |
1059 | /* check that we handled all of the bits */ | ||
1076 | if (stat) | 1060 | if (stat) |
1077 | dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); | 1061 | chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); |
1078 | 1062 | ||
1079 | dev_dbg(chan->dev, "irq: Exit\n"); | 1063 | /* |
1064 | * Schedule the tasklet to handle all cleanup of the current | ||
1065 | * transaction. It will start a new transaction if there is | ||
1066 | * one pending. | ||
1067 | */ | ||
1080 | tasklet_schedule(&chan->tasklet); | 1068 | tasklet_schedule(&chan->tasklet); |
1069 | chan_dbg(chan, "irq: Exit\n"); | ||
1081 | return IRQ_HANDLED; | 1070 | return IRQ_HANDLED; |
1082 | } | 1071 | } |
1083 | 1072 | ||
1084 | static void dma_do_tasklet(unsigned long data) | 1073 | static void dma_do_tasklet(unsigned long data) |
1085 | { | 1074 | { |
1086 | struct fsldma_chan *chan = (struct fsldma_chan *)data; | 1075 | struct fsldma_chan *chan = (struct fsldma_chan *)data; |
1087 | fsl_chan_ld_cleanup(chan); | 1076 | struct fsl_desc_sw *desc, *_desc; |
1077 | LIST_HEAD(ld_cleanup); | ||
1078 | unsigned long flags; | ||
1079 | |||
1080 | chan_dbg(chan, "tasklet entry\n"); | ||
1081 | |||
1082 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
1083 | |||
1084 | /* update the cookie if we have some descriptors to cleanup */ | ||
1085 | if (!list_empty(&chan->ld_running)) { | ||
1086 | dma_cookie_t cookie; | ||
1087 | |||
1088 | desc = to_fsl_desc(chan->ld_running.prev); | ||
1089 | cookie = desc->async_tx.cookie; | ||
1090 | |||
1091 | chan->completed_cookie = cookie; | ||
1092 | chan_dbg(chan, "completed_cookie=%d\n", cookie); | ||
1093 | } | ||
1094 | |||
1095 | /* | ||
1096 | * move the descriptors to a temporary list so we can drop the lock | ||
1097 | * during the entire cleanup operation | ||
1098 | */ | ||
1099 | list_splice_tail_init(&chan->ld_running, &ld_cleanup); | ||
1100 | |||
1101 | /* the hardware is now idle and ready for more */ | ||
1102 | chan->idle = true; | ||
1103 | |||
1104 | /* | ||
1105 | * Start any pending transactions automatically | ||
1106 | * | ||
1107 | * In the ideal case, we keep the DMA controller busy while we go | ||
1108 | * ahead and free the descriptors below. | ||
1109 | */ | ||
1110 | fsl_chan_xfer_ld_queue(chan); | ||
1111 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
1112 | |||
1113 | /* Run the callback for each descriptor, in order */ | ||
1114 | list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { | ||
1115 | |||
1116 | /* Remove from the list of transactions */ | ||
1117 | list_del(&desc->node); | ||
1118 | |||
1119 | /* Run all cleanup for this descriptor */ | ||
1120 | fsldma_cleanup_descriptor(chan, desc); | ||
1121 | } | ||
1122 | |||
1123 | chan_dbg(chan, "tasklet exit\n"); | ||
1088 | } | 1124 | } |
1089 | 1125 | ||
1090 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) | 1126 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) |
@@ -1132,7 +1168,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev) | |||
1132 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | 1168 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { |
1133 | chan = fdev->chan[i]; | 1169 | chan = fdev->chan[i]; |
1134 | if (chan && chan->irq != NO_IRQ) { | 1170 | if (chan && chan->irq != NO_IRQ) { |
1135 | dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); | 1171 | chan_dbg(chan, "free per-channel IRQ\n"); |
1136 | free_irq(chan->irq, chan); | 1172 | free_irq(chan->irq, chan); |
1137 | } | 1173 | } |
1138 | } | 1174 | } |
@@ -1159,19 +1195,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev) | |||
1159 | continue; | 1195 | continue; |
1160 | 1196 | ||
1161 | if (chan->irq == NO_IRQ) { | 1197 | if (chan->irq == NO_IRQ) { |
1162 | dev_err(fdev->dev, "no interrupts property defined for " | 1198 | chan_err(chan, "interrupts property missing in device tree\n"); |
1163 | "DMA channel %d. Please fix your " | ||
1164 | "device tree\n", chan->id); | ||
1165 | ret = -ENODEV; | 1199 | ret = -ENODEV; |
1166 | goto out_unwind; | 1200 | goto out_unwind; |
1167 | } | 1201 | } |
1168 | 1202 | ||
1169 | dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); | 1203 | chan_dbg(chan, "request per-channel IRQ\n"); |
1170 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, | 1204 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, |
1171 | "fsldma-chan", chan); | 1205 | "fsldma-chan", chan); |
1172 | if (ret) { | 1206 | if (ret) { |
1173 | dev_err(fdev->dev, "unable to request IRQ for DMA " | 1207 | chan_err(chan, "unable to request per-channel IRQ\n"); |
1174 | "channel %d\n", chan->id); | ||
1175 | goto out_unwind; | 1208 | goto out_unwind; |
1176 | } | 1209 | } |
1177 | } | 1210 | } |
@@ -1246,6 +1279,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1246 | 1279 | ||
1247 | fdev->chan[chan->id] = chan; | 1280 | fdev->chan[chan->id] = chan; |
1248 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | 1281 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); |
1282 | snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); | ||
1249 | 1283 | ||
1250 | /* Initialize the channel */ | 1284 | /* Initialize the channel */ |
1251 | dma_init(chan); | 1285 | dma_init(chan); |
@@ -1266,6 +1300,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1266 | spin_lock_init(&chan->desc_lock); | 1300 | spin_lock_init(&chan->desc_lock); |
1267 | INIT_LIST_HEAD(&chan->ld_pending); | 1301 | INIT_LIST_HEAD(&chan->ld_pending); |
1268 | INIT_LIST_HEAD(&chan->ld_running); | 1302 | INIT_LIST_HEAD(&chan->ld_running); |
1303 | chan->idle = true; | ||
1269 | 1304 | ||
1270 | chan->common.device = &fdev->common; | 1305 | chan->common.device = &fdev->common; |
1271 | 1306 | ||
@@ -1297,8 +1332,7 @@ static void fsl_dma_chan_remove(struct fsldma_chan *chan) | |||
1297 | kfree(chan); | 1332 | kfree(chan); |
1298 | } | 1333 | } |
1299 | 1334 | ||
1300 | static int __devinit fsldma_of_probe(struct platform_device *op, | 1335 | static int __devinit fsldma_of_probe(struct platform_device *op) |
1301 | const struct of_device_id *match) | ||
1302 | { | 1336 | { |
1303 | struct fsldma_device *fdev; | 1337 | struct fsldma_device *fdev; |
1304 | struct device_node *child; | 1338 | struct device_node *child; |
@@ -1327,17 +1361,21 @@ static int __devinit fsldma_of_probe(struct platform_device *op, | |||
1327 | 1361 | ||
1328 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1362 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
1329 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1363 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
1364 | dma_cap_set(DMA_SG, fdev->common.cap_mask); | ||
1330 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | 1365 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); |
1331 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1366 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
1332 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1367 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
1333 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | 1368 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
1334 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1369 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
1370 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; | ||
1335 | fdev->common.device_tx_status = fsl_tx_status; | 1371 | fdev->common.device_tx_status = fsl_tx_status; |
1336 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1372 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
1337 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | 1373 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; |
1338 | fdev->common.device_control = fsl_dma_device_control; | 1374 | fdev->common.device_control = fsl_dma_device_control; |
1339 | fdev->common.dev = &op->dev; | 1375 | fdev->common.dev = &op->dev; |
1340 | 1376 | ||
1377 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); | ||
1378 | |||
1341 | dev_set_drvdata(&op->dev, fdev); | 1379 | dev_set_drvdata(&op->dev, fdev); |
1342 | 1380 | ||
1343 | /* | 1381 | /* |
@@ -1410,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = { | |||
1410 | {} | 1448 | {} |
1411 | }; | 1449 | }; |
1412 | 1450 | ||
1413 | static struct of_platform_driver fsldma_of_driver = { | 1451 | static struct platform_driver fsldma_of_driver = { |
1414 | .driver = { | 1452 | .driver = { |
1415 | .name = "fsl-elo-dma", | 1453 | .name = "fsl-elo-dma", |
1416 | .owner = THIS_MODULE, | 1454 | .owner = THIS_MODULE, |
@@ -1426,20 +1464,13 @@ static struct of_platform_driver fsldma_of_driver = { | |||
1426 | 1464 | ||
1427 | static __init int fsldma_init(void) | 1465 | static __init int fsldma_init(void) |
1428 | { | 1466 | { |
1429 | int ret; | ||
1430 | |||
1431 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); | 1467 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); |
1432 | 1468 | return platform_driver_register(&fsldma_of_driver); | |
1433 | ret = of_register_platform_driver(&fsldma_of_driver); | ||
1434 | if (ret) | ||
1435 | pr_err("fsldma: failed to register platform driver\n"); | ||
1436 | |||
1437 | return ret; | ||
1438 | } | 1469 | } |
1439 | 1470 | ||
1440 | static void __exit fsldma_exit(void) | 1471 | static void __exit fsldma_exit(void) |
1441 | { | 1472 | { |
1442 | of_unregister_platform_driver(&fsldma_of_driver); | 1473 | platform_driver_unregister(&fsldma_of_driver); |
1443 | } | 1474 | } |
1444 | 1475 | ||
1445 | subsys_initcall(fsldma_init); | 1476 | subsys_initcall(fsldma_init); |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index cb4d6ff51597..9cb5aa57c677 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 2 | * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: | 4 | * Author: |
5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | 5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 |
@@ -36,6 +36,13 @@ | |||
36 | #define FSL_DMA_MR_DAHE 0x00002000 | 36 | #define FSL_DMA_MR_DAHE 0x00002000 |
37 | #define FSL_DMA_MR_SAHE 0x00001000 | 37 | #define FSL_DMA_MR_SAHE 0x00001000 |
38 | 38 | ||
39 | /* | ||
40 | * Bandwidth/pause control determines how many bytes a given | ||
41 | * channel is allowed to transfer before the DMA engine pauses | ||
42 | * the current channel and switches to the next channel | ||
43 | */ | ||
44 | #define FSL_DMA_MR_BWC 0x08000000 | ||
45 | |||
39 | /* Special MR definition for MPC8349 */ | 46 | /* Special MR definition for MPC8349 */ |
40 | #define FSL_DMA_MR_EOTIE 0x00000080 | 47 | #define FSL_DMA_MR_EOTIE 0x00000080 |
41 | #define FSL_DMA_MR_PRC_RM 0x00000800 | 48 | #define FSL_DMA_MR_PRC_RM 0x00000800 |
@@ -95,8 +102,8 @@ struct fsl_desc_sw { | |||
95 | } __attribute__((aligned(32))); | 102 | } __attribute__((aligned(32))); |
96 | 103 | ||
97 | struct fsldma_chan_regs { | 104 | struct fsldma_chan_regs { |
98 | u32 mr; /* 0x00 - Mode Register */ | 105 | u32 mr; /* 0x00 - Mode Register */ |
99 | u32 sr; /* 0x04 - Status Register */ | 106 | u32 sr; /* 0x04 - Status Register */ |
100 | u64 cdar; /* 0x08 - Current descriptor address register */ | 107 | u64 cdar; /* 0x08 - Current descriptor address register */ |
101 | u64 sar; /* 0x10 - Source Address Register */ | 108 | u64 sar; /* 0x10 - Source Address Register */ |
102 | u64 dar; /* 0x18 - Destination Address Register */ | 109 | u64 dar; /* 0x18 - Destination Address Register */ |
@@ -128,6 +135,7 @@ struct fsldma_device { | |||
128 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | 135 | #define FSL_DMA_CHAN_START_EXT 0x00002000 |
129 | 136 | ||
130 | struct fsldma_chan { | 137 | struct fsldma_chan { |
138 | char name[8]; /* Channel name */ | ||
131 | struct fsldma_chan_regs __iomem *regs; | 139 | struct fsldma_chan_regs __iomem *regs; |
132 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | 140 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ |
133 | spinlock_t desc_lock; /* Descriptor operation lock */ | 141 | spinlock_t desc_lock; /* Descriptor operation lock */ |
@@ -140,6 +148,7 @@ struct fsldma_chan { | |||
140 | int id; /* Raw id of this channel */ | 148 | int id; /* Raw id of this channel */ |
141 | struct tasklet_struct tasklet; | 149 | struct tasklet_struct tasklet; |
142 | u32 feature; | 150 | u32 feature; |
151 | bool idle; /* DMA controller is idle */ | ||
143 | 152 | ||
144 | void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); | 153 | void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); |
145 | void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); | 154 | void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c new file mode 100644 index 000000000000..e18eaabe92b9 --- /dev/null +++ b/drivers/dma/imx-dma.c | |||
@@ -0,0 +1,442 @@ | |||
1 | /* | ||
2 | * drivers/dma/imx-dma.c | ||
3 | * | ||
4 | * This file contains a driver for the Freescale i.MX DMA engine | ||
5 | * found on i.MX1/21/27 | ||
6 | * | ||
7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | ||
8 | * | ||
9 | * The code contained herein is licensed under the GNU General Public | ||
10 | * License. You may obtain a copy of the GNU General Public License | ||
11 | * Version 2 or later at the following locations: | ||
12 | * | ||
13 | * http://www.opensource.org/licenses/gpl-license.html | ||
14 | * http://www.gnu.org/copyleft/gpl.html | ||
15 | */ | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/dmaengine.h> | ||
26 | |||
27 | #include <asm/irq.h> | ||
28 | #include <mach/dma-v1.h> | ||
29 | #include <mach/hardware.h> | ||
30 | |||
31 | struct imxdma_channel { | ||
32 | struct imxdma_engine *imxdma; | ||
33 | unsigned int channel; | ||
34 | unsigned int imxdma_channel; | ||
35 | |||
36 | enum dma_slave_buswidth word_size; | ||
37 | dma_addr_t per_address; | ||
38 | u32 watermark_level; | ||
39 | struct dma_chan chan; | ||
40 | spinlock_t lock; | ||
41 | struct dma_async_tx_descriptor desc; | ||
42 | dma_cookie_t last_completed; | ||
43 | enum dma_status status; | ||
44 | int dma_request; | ||
45 | struct scatterlist *sg_list; | ||
46 | }; | ||
47 | |||
48 | #define MAX_DMA_CHANNELS 8 | ||
49 | |||
50 | struct imxdma_engine { | ||
51 | struct device *dev; | ||
52 | struct device_dma_parameters dma_parms; | ||
53 | struct dma_device dma_device; | ||
54 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; | ||
55 | }; | ||
56 | |||
57 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | ||
58 | { | ||
59 | return container_of(chan, struct imxdma_channel, chan); | ||
60 | } | ||
61 | |||
62 | static void imxdma_handle(struct imxdma_channel *imxdmac) | ||
63 | { | ||
64 | if (imxdmac->desc.callback) | ||
65 | imxdmac->desc.callback(imxdmac->desc.callback_param); | ||
66 | imxdmac->last_completed = imxdmac->desc.cookie; | ||
67 | } | ||
68 | |||
69 | static void imxdma_irq_handler(int channel, void *data) | ||
70 | { | ||
71 | struct imxdma_channel *imxdmac = data; | ||
72 | |||
73 | imxdmac->status = DMA_SUCCESS; | ||
74 | imxdma_handle(imxdmac); | ||
75 | } | ||
76 | |||
77 | static void imxdma_err_handler(int channel, void *data, int error) | ||
78 | { | ||
79 | struct imxdma_channel *imxdmac = data; | ||
80 | |||
81 | imxdmac->status = DMA_ERROR; | ||
82 | imxdma_handle(imxdmac); | ||
83 | } | ||
84 | |||
85 | static void imxdma_progression(int channel, void *data, | ||
86 | struct scatterlist *sg) | ||
87 | { | ||
88 | struct imxdma_channel *imxdmac = data; | ||
89 | |||
90 | imxdmac->status = DMA_SUCCESS; | ||
91 | imxdma_handle(imxdmac); | ||
92 | } | ||
93 | |||
94 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
95 | unsigned long arg) | ||
96 | { | ||
97 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
98 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | ||
99 | int ret; | ||
100 | unsigned int mode = 0; | ||
101 | |||
102 | switch (cmd) { | ||
103 | case DMA_TERMINATE_ALL: | ||
104 | imxdmac->status = DMA_ERROR; | ||
105 | imx_dma_disable(imxdmac->imxdma_channel); | ||
106 | return 0; | ||
107 | case DMA_SLAVE_CONFIG: | ||
108 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { | ||
109 | imxdmac->per_address = dmaengine_cfg->src_addr; | ||
110 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | ||
111 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | ||
112 | } else { | ||
113 | imxdmac->per_address = dmaengine_cfg->dst_addr; | ||
114 | imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; | ||
115 | imxdmac->word_size = dmaengine_cfg->dst_addr_width; | ||
116 | } | ||
117 | |||
118 | switch (imxdmac->word_size) { | ||
119 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
120 | mode = IMX_DMA_MEMSIZE_8; | ||
121 | break; | ||
122 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
123 | mode = IMX_DMA_MEMSIZE_16; | ||
124 | break; | ||
125 | default: | ||
126 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
127 | mode = IMX_DMA_MEMSIZE_32; | ||
128 | break; | ||
129 | } | ||
130 | ret = imx_dma_config_channel(imxdmac->imxdma_channel, | ||
131 | mode | IMX_DMA_TYPE_FIFO, | ||
132 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | ||
133 | imxdmac->dma_request, 1); | ||
134 | |||
135 | if (ret) | ||
136 | return ret; | ||
137 | |||
138 | imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level); | ||
139 | |||
140 | return 0; | ||
141 | default: | ||
142 | return -ENOSYS; | ||
143 | } | ||
144 | |||
145 | return -EINVAL; | ||
146 | } | ||
147 | |||
148 | static enum dma_status imxdma_tx_status(struct dma_chan *chan, | ||
149 | dma_cookie_t cookie, | ||
150 | struct dma_tx_state *txstate) | ||
151 | { | ||
152 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
153 | dma_cookie_t last_used; | ||
154 | enum dma_status ret; | ||
155 | |||
156 | last_used = chan->cookie; | ||
157 | |||
158 | ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); | ||
159 | dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) | ||
165 | { | ||
166 | dma_cookie_t cookie = imxdma->chan.cookie; | ||
167 | |||
168 | if (++cookie < 0) | ||
169 | cookie = 1; | ||
170 | |||
171 | imxdma->chan.cookie = cookie; | ||
172 | imxdma->desc.cookie = cookie; | ||
173 | |||
174 | return cookie; | ||
175 | } | ||
176 | |||
177 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
178 | { | ||
179 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | ||
180 | dma_cookie_t cookie; | ||
181 | |||
182 | spin_lock_irq(&imxdmac->lock); | ||
183 | |||
184 | cookie = imxdma_assign_cookie(imxdmac); | ||
185 | |||
186 | imx_dma_enable(imxdmac->imxdma_channel); | ||
187 | |||
188 | spin_unlock_irq(&imxdmac->lock); | ||
189 | |||
190 | return cookie; | ||
191 | } | ||
192 | |||
193 | static int imxdma_alloc_chan_resources(struct dma_chan *chan) | ||
194 | { | ||
195 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
196 | struct imx_dma_data *data = chan->private; | ||
197 | |||
198 | imxdmac->dma_request = data->dma_request; | ||
199 | |||
200 | dma_async_tx_descriptor_init(&imxdmac->desc, chan); | ||
201 | imxdmac->desc.tx_submit = imxdma_tx_submit; | ||
202 | /* txd.flags will be overwritten in prep funcs */ | ||
203 | imxdmac->desc.flags = DMA_CTRL_ACK; | ||
204 | |||
205 | imxdmac->status = DMA_SUCCESS; | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static void imxdma_free_chan_resources(struct dma_chan *chan) | ||
211 | { | ||
212 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
213 | |||
214 | imx_dma_disable(imxdmac->imxdma_channel); | ||
215 | |||
216 | if (imxdmac->sg_list) { | ||
217 | kfree(imxdmac->sg_list); | ||
218 | imxdmac->sg_list = NULL; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | ||
223 | struct dma_chan *chan, struct scatterlist *sgl, | ||
224 | unsigned int sg_len, enum dma_data_direction direction, | ||
225 | unsigned long flags) | ||
226 | { | ||
227 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
228 | struct scatterlist *sg; | ||
229 | int i, ret, dma_length = 0; | ||
230 | unsigned int dmamode; | ||
231 | |||
232 | if (imxdmac->status == DMA_IN_PROGRESS) | ||
233 | return NULL; | ||
234 | |||
235 | imxdmac->status = DMA_IN_PROGRESS; | ||
236 | |||
237 | for_each_sg(sgl, sg, sg_len, i) { | ||
238 | dma_length += sg->length; | ||
239 | } | ||
240 | |||
241 | if (direction == DMA_FROM_DEVICE) | ||
242 | dmamode = DMA_MODE_READ; | ||
243 | else | ||
244 | dmamode = DMA_MODE_WRITE; | ||
245 | |||
246 | switch (imxdmac->word_size) { | ||
247 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
248 | if (sgl->length & 3 || sgl->dma_address & 3) | ||
249 | return NULL; | ||
250 | break; | ||
251 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
252 | if (sgl->length & 1 || sgl->dma_address & 1) | ||
253 | return NULL; | ||
254 | break; | ||
255 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
256 | break; | ||
257 | default: | ||
258 | return NULL; | ||
259 | } | ||
260 | |||
261 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, | ||
262 | dma_length, imxdmac->per_address, dmamode); | ||
263 | if (ret) | ||
264 | return NULL; | ||
265 | |||
266 | return &imxdmac->desc; | ||
267 | } | ||
268 | |||
269 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | ||
270 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
271 | size_t period_len, enum dma_data_direction direction) | ||
272 | { | ||
273 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
274 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
275 | int i, ret; | ||
276 | unsigned int periods = buf_len / period_len; | ||
277 | unsigned int dmamode; | ||
278 | |||
279 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | ||
280 | __func__, imxdmac->channel, buf_len, period_len); | ||
281 | |||
282 | if (imxdmac->status == DMA_IN_PROGRESS) | ||
283 | return NULL; | ||
284 | imxdmac->status = DMA_IN_PROGRESS; | ||
285 | |||
286 | ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, | ||
287 | imxdma_progression); | ||
288 | if (ret) { | ||
289 | dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); | ||
290 | return NULL; | ||
291 | } | ||
292 | |||
293 | if (imxdmac->sg_list) | ||
294 | kfree(imxdmac->sg_list); | ||
295 | |||
296 | imxdmac->sg_list = kcalloc(periods + 1, | ||
297 | sizeof(struct scatterlist), GFP_KERNEL); | ||
298 | if (!imxdmac->sg_list) | ||
299 | return NULL; | ||
300 | |||
301 | sg_init_table(imxdmac->sg_list, periods); | ||
302 | |||
303 | for (i = 0; i < periods; i++) { | ||
304 | imxdmac->sg_list[i].page_link = 0; | ||
305 | imxdmac->sg_list[i].offset = 0; | ||
306 | imxdmac->sg_list[i].dma_address = dma_addr; | ||
307 | imxdmac->sg_list[i].length = period_len; | ||
308 | dma_addr += period_len; | ||
309 | } | ||
310 | |||
311 | /* close the loop */ | ||
312 | imxdmac->sg_list[periods].offset = 0; | ||
313 | imxdmac->sg_list[periods].length = 0; | ||
314 | imxdmac->sg_list[periods].page_link = | ||
315 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | ||
316 | |||
317 | if (direction == DMA_FROM_DEVICE) | ||
318 | dmamode = DMA_MODE_READ; | ||
319 | else | ||
320 | dmamode = DMA_MODE_WRITE; | ||
321 | |||
322 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, | ||
323 | IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); | ||
324 | if (ret) | ||
325 | return NULL; | ||
326 | |||
327 | return &imxdmac->desc; | ||
328 | } | ||
329 | |||
330 | static void imxdma_issue_pending(struct dma_chan *chan) | ||
331 | { | ||
332 | /* | ||
333 | * Nothing to do. We only have a single descriptor | ||
334 | */ | ||
335 | } | ||
336 | |||
337 | static int __init imxdma_probe(struct platform_device *pdev) | ||
338 | { | ||
339 | struct imxdma_engine *imxdma; | ||
340 | int ret, i; | ||
341 | |||
342 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); | ||
343 | if (!imxdma) | ||
344 | return -ENOMEM; | ||
345 | |||
346 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | ||
347 | |||
348 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | ||
349 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | ||
350 | |||
351 | /* Initialize channel parameters */ | ||
352 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | ||
353 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | ||
354 | |||
355 | imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", | ||
356 | DMA_PRIO_MEDIUM); | ||
357 | if ((int)imxdmac->channel < 0) { | ||
358 | ret = -ENODEV; | ||
359 | goto err_init; | ||
360 | } | ||
361 | |||
362 | imx_dma_setup_handlers(imxdmac->imxdma_channel, | ||
363 | imxdma_irq_handler, imxdma_err_handler, imxdmac); | ||
364 | |||
365 | imxdmac->imxdma = imxdma; | ||
366 | spin_lock_init(&imxdmac->lock); | ||
367 | |||
368 | imxdmac->chan.device = &imxdma->dma_device; | ||
369 | imxdmac->channel = i; | ||
370 | |||
371 | /* Add the channel to the DMAC list */ | ||
372 | list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); | ||
373 | } | ||
374 | |||
375 | imxdma->dev = &pdev->dev; | ||
376 | imxdma->dma_device.dev = &pdev->dev; | ||
377 | |||
378 | imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; | ||
379 | imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; | ||
380 | imxdma->dma_device.device_tx_status = imxdma_tx_status; | ||
381 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; | ||
382 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; | ||
383 | imxdma->dma_device.device_control = imxdma_control; | ||
384 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; | ||
385 | |||
386 | platform_set_drvdata(pdev, imxdma); | ||
387 | |||
388 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; | ||
389 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | ||
390 | |||
391 | ret = dma_async_device_register(&imxdma->dma_device); | ||
392 | if (ret) { | ||
393 | dev_err(&pdev->dev, "unable to register\n"); | ||
394 | goto err_init; | ||
395 | } | ||
396 | |||
397 | return 0; | ||
398 | |||
399 | err_init: | ||
400 | while (--i >= 0) { | ||
401 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | ||
402 | imx_dma_free(imxdmac->imxdma_channel); | ||
403 | } | ||
404 | |||
405 | kfree(imxdma); | ||
406 | return ret; | ||
407 | } | ||
408 | |||
409 | static int __exit imxdma_remove(struct platform_device *pdev) | ||
410 | { | ||
411 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); | ||
412 | int i; | ||
413 | |||
414 | dma_async_device_unregister(&imxdma->dma_device); | ||
415 | |||
416 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | ||
417 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | ||
418 | |||
419 | imx_dma_free(imxdmac->imxdma_channel); | ||
420 | } | ||
421 | |||
422 | kfree(imxdma); | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | static struct platform_driver imxdma_driver = { | ||
428 | .driver = { | ||
429 | .name = "imx-dma", | ||
430 | }, | ||
431 | .remove = __exit_p(imxdma_remove), | ||
432 | }; | ||
433 | |||
434 | static int __init imxdma_module_init(void) | ||
435 | { | ||
436 | return platform_driver_probe(&imxdma_driver, imxdma_probe); | ||
437 | } | ||
438 | subsys_initcall(imxdma_module_init); | ||
439 | |||
440 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | ||
441 | MODULE_DESCRIPTION("i.MX dma driver"); | ||
442 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c new file mode 100644 index 000000000000..b6d1455fa936 --- /dev/null +++ b/drivers/dma/imx-sdma.c | |||
@@ -0,0 +1,1380 @@ | |||
1 | /* | ||
2 | * drivers/dma/imx-sdma.c | ||
3 | * | ||
4 | * This file contains a driver for the Freescale Smart DMA engine | ||
5 | * | ||
6 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | ||
7 | * | ||
8 | * Based on code from Freescale: | ||
9 | * | ||
10 | * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. | ||
11 | * | ||
12 | * The code contained herein is licensed under the GNU General Public | ||
13 | * License. You may obtain a copy of the GNU General Public License | ||
14 | * Version 2 or later at the following locations: | ||
15 | * | ||
16 | * http://www.opensource.org/licenses/gpl-license.html | ||
17 | * http://www.gnu.org/copyleft/gpl.html | ||
18 | */ | ||
19 | |||
20 | #include <linux/init.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/clk.h> | ||
25 | #include <linux/wait.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/semaphore.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/device.h> | ||
30 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/firmware.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/dmaengine.h> | ||
35 | |||
36 | #include <asm/irq.h> | ||
37 | #include <mach/sdma.h> | ||
38 | #include <mach/dma.h> | ||
39 | #include <mach/hardware.h> | ||
40 | |||
41 | /* SDMA registers */ | ||
42 | #define SDMA_H_C0PTR 0x000 | ||
43 | #define SDMA_H_INTR 0x004 | ||
44 | #define SDMA_H_STATSTOP 0x008 | ||
45 | #define SDMA_H_START 0x00c | ||
46 | #define SDMA_H_EVTOVR 0x010 | ||
47 | #define SDMA_H_DSPOVR 0x014 | ||
48 | #define SDMA_H_HOSTOVR 0x018 | ||
49 | #define SDMA_H_EVTPEND 0x01c | ||
50 | #define SDMA_H_DSPENBL 0x020 | ||
51 | #define SDMA_H_RESET 0x024 | ||
52 | #define SDMA_H_EVTERR 0x028 | ||
53 | #define SDMA_H_INTRMSK 0x02c | ||
54 | #define SDMA_H_PSW 0x030 | ||
55 | #define SDMA_H_EVTERRDBG 0x034 | ||
56 | #define SDMA_H_CONFIG 0x038 | ||
57 | #define SDMA_ONCE_ENB 0x040 | ||
58 | #define SDMA_ONCE_DATA 0x044 | ||
59 | #define SDMA_ONCE_INSTR 0x048 | ||
60 | #define SDMA_ONCE_STAT 0x04c | ||
61 | #define SDMA_ONCE_CMD 0x050 | ||
62 | #define SDMA_EVT_MIRROR 0x054 | ||
63 | #define SDMA_ILLINSTADDR 0x058 | ||
64 | #define SDMA_CHN0ADDR 0x05c | ||
65 | #define SDMA_ONCE_RTB 0x060 | ||
66 | #define SDMA_XTRIG_CONF1 0x070 | ||
67 | #define SDMA_XTRIG_CONF2 0x074 | ||
68 | #define SDMA_CHNENBL0_V2 0x200 | ||
69 | #define SDMA_CHNENBL0_V1 0x080 | ||
70 | #define SDMA_CHNPRI_0 0x100 | ||
71 | |||
72 | /* | ||
73 | * Buffer descriptor status values. | ||
74 | */ | ||
75 | #define BD_DONE 0x01 | ||
76 | #define BD_WRAP 0x02 | ||
77 | #define BD_CONT 0x04 | ||
78 | #define BD_INTR 0x08 | ||
79 | #define BD_RROR 0x10 | ||
80 | #define BD_LAST 0x20 | ||
81 | #define BD_EXTD 0x80 | ||
82 | |||
83 | /* | ||
84 | * Data Node descriptor status values. | ||
85 | */ | ||
86 | #define DND_END_OF_FRAME 0x80 | ||
87 | #define DND_END_OF_XFER 0x40 | ||
88 | #define DND_DONE 0x20 | ||
89 | #define DND_UNUSED 0x01 | ||
90 | |||
91 | /* | ||
92 | * IPCV2 descriptor status values. | ||
93 | */ | ||
94 | #define BD_IPCV2_END_OF_FRAME 0x40 | ||
95 | |||
96 | #define IPCV2_MAX_NODES 50 | ||
97 | /* | ||
98 | * Error bit set in the CCB status field by the SDMA, | ||
99 | * in setbd routine, in case of a transfer error | ||
100 | */ | ||
101 | #define DATA_ERROR 0x10000000 | ||
102 | |||
103 | /* | ||
104 | * Buffer descriptor commands. | ||
105 | */ | ||
106 | #define C0_ADDR 0x01 | ||
107 | #define C0_LOAD 0x02 | ||
108 | #define C0_DUMP 0x03 | ||
109 | #define C0_SETCTX 0x07 | ||
110 | #define C0_GETCTX 0x03 | ||
111 | #define C0_SETDM 0x01 | ||
112 | #define C0_SETPM 0x04 | ||
113 | #define C0_GETDM 0x02 | ||
114 | #define C0_GETPM 0x08 | ||
115 | /* | ||
116 | * Change endianness indicator in the BD command field | ||
117 | */ | ||
118 | #define CHANGE_ENDIANNESS 0x80 | ||
119 | |||
120 | /* | ||
121 | * Mode/Count of data node descriptors - IPCv2 | ||
122 | */ | ||
123 | struct sdma_mode_count { | ||
124 | u32 count : 16; /* size of the buffer pointed by this BD */ | ||
125 | u32 status : 8; /* E,R,I,C,W,D status bits stored here */ | ||
126 | u32 command : 8; /* command mostlky used for channel 0 */ | ||
127 | }; | ||
128 | |||
129 | /* | ||
130 | * Buffer descriptor | ||
131 | */ | ||
132 | struct sdma_buffer_descriptor { | ||
133 | struct sdma_mode_count mode; | ||
134 | u32 buffer_addr; /* address of the buffer described */ | ||
135 | u32 ext_buffer_addr; /* extended buffer address */ | ||
136 | } __attribute__ ((packed)); | ||
137 | |||
138 | /** | ||
139 | * struct sdma_channel_control - Channel control Block | ||
140 | * | ||
141 | * @current_bd_ptr current buffer descriptor processed | ||
142 | * @base_bd_ptr first element of buffer descriptor array | ||
143 | * @unused padding. The SDMA engine expects an array of 128 byte | ||
144 | * control blocks | ||
145 | */ | ||
146 | struct sdma_channel_control { | ||
147 | u32 current_bd_ptr; | ||
148 | u32 base_bd_ptr; | ||
149 | u32 unused[2]; | ||
150 | } __attribute__ ((packed)); | ||
151 | |||
152 | /** | ||
153 | * struct sdma_state_registers - SDMA context for a channel | ||
154 | * | ||
155 | * @pc: program counter | ||
156 | * @t: test bit: status of arithmetic & test instruction | ||
157 | * @rpc: return program counter | ||
158 | * @sf: source fault while loading data | ||
159 | * @spc: loop start program counter | ||
160 | * @df: destination fault while storing data | ||
161 | * @epc: loop end program counter | ||
162 | * @lm: loop mode | ||
163 | */ | ||
164 | struct sdma_state_registers { | ||
165 | u32 pc :14; | ||
166 | u32 unused1: 1; | ||
167 | u32 t : 1; | ||
168 | u32 rpc :14; | ||
169 | u32 unused0: 1; | ||
170 | u32 sf : 1; | ||
171 | u32 spc :14; | ||
172 | u32 unused2: 1; | ||
173 | u32 df : 1; | ||
174 | u32 epc :14; | ||
175 | u32 lm : 2; | ||
176 | } __attribute__ ((packed)); | ||
177 | |||
178 | /** | ||
179 | * struct sdma_context_data - sdma context specific to a channel | ||
180 | * | ||
181 | * @channel_state: channel state bits | ||
182 | * @gReg: general registers | ||
183 | * @mda: burst dma destination address register | ||
184 | * @msa: burst dma source address register | ||
185 | * @ms: burst dma status register | ||
186 | * @md: burst dma data register | ||
187 | * @pda: peripheral dma destination address register | ||
188 | * @psa: peripheral dma source address register | ||
189 | * @ps: peripheral dma status register | ||
190 | * @pd: peripheral dma data register | ||
191 | * @ca: CRC polynomial register | ||
192 | * @cs: CRC accumulator register | ||
193 | * @dda: dedicated core destination address register | ||
194 | * @dsa: dedicated core source address register | ||
195 | * @ds: dedicated core status register | ||
196 | * @dd: dedicated core data register | ||
197 | */ | ||
198 | struct sdma_context_data { | ||
199 | struct sdma_state_registers channel_state; | ||
200 | u32 gReg[8]; | ||
201 | u32 mda; | ||
202 | u32 msa; | ||
203 | u32 ms; | ||
204 | u32 md; | ||
205 | u32 pda; | ||
206 | u32 psa; | ||
207 | u32 ps; | ||
208 | u32 pd; | ||
209 | u32 ca; | ||
210 | u32 cs; | ||
211 | u32 dda; | ||
212 | u32 dsa; | ||
213 | u32 ds; | ||
214 | u32 dd; | ||
215 | u32 scratch0; | ||
216 | u32 scratch1; | ||
217 | u32 scratch2; | ||
218 | u32 scratch3; | ||
219 | u32 scratch4; | ||
220 | u32 scratch5; | ||
221 | u32 scratch6; | ||
222 | u32 scratch7; | ||
223 | } __attribute__ ((packed)); | ||
224 | |||
225 | #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) | ||
226 | |||
227 | struct sdma_engine; | ||
228 | |||
229 | /** | ||
230 | * struct sdma_channel - housekeeping for a SDMA channel | ||
231 | * | ||
232 | * @sdma pointer to the SDMA engine for this channel | ||
233 | * @channel the channel number, matches dmaengine chan_id + 1 | ||
234 | * @direction transfer type. Needed for setting SDMA script | ||
235 | * @peripheral_type Peripheral type. Needed for setting SDMA script | ||
236 | * @event_id0 aka dma request line | ||
237 | * @event_id1 for channels that use 2 events | ||
238 | * @word_size peripheral access size | ||
239 | * @buf_tail ID of the buffer that was processed | ||
240 | * @done channel completion | ||
241 | * @num_bd max NUM_BD. number of descriptors currently handling | ||
242 | */ | ||
243 | struct sdma_channel { | ||
244 | struct sdma_engine *sdma; | ||
245 | unsigned int channel; | ||
246 | enum dma_data_direction direction; | ||
247 | enum sdma_peripheral_type peripheral_type; | ||
248 | unsigned int event_id0; | ||
249 | unsigned int event_id1; | ||
250 | enum dma_slave_buswidth word_size; | ||
251 | unsigned int buf_tail; | ||
252 | struct completion done; | ||
253 | unsigned int num_bd; | ||
254 | struct sdma_buffer_descriptor *bd; | ||
255 | dma_addr_t bd_phys; | ||
256 | unsigned int pc_from_device, pc_to_device; | ||
257 | unsigned long flags; | ||
258 | dma_addr_t per_address; | ||
259 | u32 event_mask0, event_mask1; | ||
260 | u32 watermark_level; | ||
261 | u32 shp_addr, per_addr; | ||
262 | struct dma_chan chan; | ||
263 | spinlock_t lock; | ||
264 | struct dma_async_tx_descriptor desc; | ||
265 | dma_cookie_t last_completed; | ||
266 | enum dma_status status; | ||
267 | }; | ||
268 | |||
269 | #define IMX_DMA_SG_LOOP (1 << 0) | ||
270 | |||
271 | #define MAX_DMA_CHANNELS 32 | ||
272 | #define MXC_SDMA_DEFAULT_PRIORITY 1 | ||
273 | #define MXC_SDMA_MIN_PRIORITY 1 | ||
274 | #define MXC_SDMA_MAX_PRIORITY 7 | ||
275 | |||
276 | #define SDMA_FIRMWARE_MAGIC 0x414d4453 | ||
277 | |||
278 | /** | ||
279 | * struct sdma_firmware_header - Layout of the firmware image | ||
280 | * | ||
281 | * @magic "SDMA" | ||
282 | * @version_major increased whenever layout of struct sdma_script_start_addrs | ||
283 | * changes. | ||
284 | * @version_minor firmware minor version (for binary compatible changes) | ||
285 | * @script_addrs_start offset of struct sdma_script_start_addrs in this image | ||
286 | * @num_script_addrs Number of script addresses in this image | ||
287 | * @ram_code_start offset of SDMA ram image in this firmware image | ||
288 | * @ram_code_size size of SDMA ram image | ||
289 | * @script_addrs Stores the start address of the SDMA scripts | ||
290 | * (in SDMA memory space) | ||
291 | */ | ||
292 | struct sdma_firmware_header { | ||
293 | u32 magic; | ||
294 | u32 version_major; | ||
295 | u32 version_minor; | ||
296 | u32 script_addrs_start; | ||
297 | u32 num_script_addrs; | ||
298 | u32 ram_code_start; | ||
299 | u32 ram_code_size; | ||
300 | }; | ||
301 | |||
302 | struct sdma_engine { | ||
303 | struct device *dev; | ||
304 | struct device_dma_parameters dma_parms; | ||
305 | struct sdma_channel channel[MAX_DMA_CHANNELS]; | ||
306 | struct sdma_channel_control *channel_control; | ||
307 | void __iomem *regs; | ||
308 | unsigned int version; | ||
309 | unsigned int num_events; | ||
310 | struct sdma_context_data *context; | ||
311 | dma_addr_t context_phys; | ||
312 | struct dma_device dma_device; | ||
313 | struct clk *clk; | ||
314 | struct sdma_script_start_addrs *script_addrs; | ||
315 | }; | ||
316 | |||
317 | #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ | ||
318 | #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ | ||
319 | #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ | ||
320 | #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ | ||
321 | |||
322 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) | ||
323 | { | ||
324 | u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1); | ||
325 | |||
326 | return chnenbl0 + event * 4; | ||
327 | } | ||
328 | |||
329 | static int sdma_config_ownership(struct sdma_channel *sdmac, | ||
330 | bool event_override, bool mcu_override, bool dsp_override) | ||
331 | { | ||
332 | struct sdma_engine *sdma = sdmac->sdma; | ||
333 | int channel = sdmac->channel; | ||
334 | u32 evt, mcu, dsp; | ||
335 | |||
336 | if (event_override && mcu_override && dsp_override) | ||
337 | return -EINVAL; | ||
338 | |||
339 | evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR); | ||
340 | mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR); | ||
341 | dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR); | ||
342 | |||
343 | if (dsp_override) | ||
344 | dsp &= ~(1 << channel); | ||
345 | else | ||
346 | dsp |= (1 << channel); | ||
347 | |||
348 | if (event_override) | ||
349 | evt &= ~(1 << channel); | ||
350 | else | ||
351 | evt |= (1 << channel); | ||
352 | |||
353 | if (mcu_override) | ||
354 | mcu &= ~(1 << channel); | ||
355 | else | ||
356 | mcu |= (1 << channel); | ||
357 | |||
358 | __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR); | ||
359 | __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR); | ||
360 | __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR); | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * sdma_run_channel - run a channel and wait till it's done | ||
367 | */ | ||
368 | static int sdma_run_channel(struct sdma_channel *sdmac) | ||
369 | { | ||
370 | struct sdma_engine *sdma = sdmac->sdma; | ||
371 | int channel = sdmac->channel; | ||
372 | int ret; | ||
373 | |||
374 | init_completion(&sdmac->done); | ||
375 | |||
376 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | ||
377 | |||
378 | ret = wait_for_completion_timeout(&sdmac->done, HZ); | ||
379 | |||
380 | return ret ? 0 : -ETIMEDOUT; | ||
381 | } | ||
382 | |||
383 | static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | ||
384 | u32 address) | ||
385 | { | ||
386 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; | ||
387 | void *buf_virt; | ||
388 | dma_addr_t buf_phys; | ||
389 | int ret; | ||
390 | |||
391 | buf_virt = dma_alloc_coherent(NULL, | ||
392 | size, | ||
393 | &buf_phys, GFP_KERNEL); | ||
394 | if (!buf_virt) | ||
395 | return -ENOMEM; | ||
396 | |||
397 | bd0->mode.command = C0_SETPM; | ||
398 | bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; | ||
399 | bd0->mode.count = size / 2; | ||
400 | bd0->buffer_addr = buf_phys; | ||
401 | bd0->ext_buffer_addr = address; | ||
402 | |||
403 | memcpy(buf_virt, buf, size); | ||
404 | |||
405 | ret = sdma_run_channel(&sdma->channel[0]); | ||
406 | |||
407 | dma_free_coherent(NULL, size, buf_virt, buf_phys); | ||
408 | |||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) | ||
413 | { | ||
414 | struct sdma_engine *sdma = sdmac->sdma; | ||
415 | int channel = sdmac->channel; | ||
416 | u32 val; | ||
417 | u32 chnenbl = chnenbl_ofs(sdma, event); | ||
418 | |||
419 | val = __raw_readl(sdma->regs + chnenbl); | ||
420 | val |= (1 << channel); | ||
421 | __raw_writel(val, sdma->regs + chnenbl); | ||
422 | } | ||
423 | |||
424 | static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) | ||
425 | { | ||
426 | struct sdma_engine *sdma = sdmac->sdma; | ||
427 | int channel = sdmac->channel; | ||
428 | u32 chnenbl = chnenbl_ofs(sdma, event); | ||
429 | u32 val; | ||
430 | |||
431 | val = __raw_readl(sdma->regs + chnenbl); | ||
432 | val &= ~(1 << channel); | ||
433 | __raw_writel(val, sdma->regs + chnenbl); | ||
434 | } | ||
435 | |||
436 | static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | ||
437 | { | ||
438 | struct sdma_buffer_descriptor *bd; | ||
439 | |||
440 | /* | ||
441 | * loop mode. Iterate over descriptors, re-setup them and | ||
442 | * call callback function. | ||
443 | */ | ||
444 | while (1) { | ||
445 | bd = &sdmac->bd[sdmac->buf_tail]; | ||
446 | |||
447 | if (bd->mode.status & BD_DONE) | ||
448 | break; | ||
449 | |||
450 | if (bd->mode.status & BD_RROR) | ||
451 | sdmac->status = DMA_ERROR; | ||
452 | else | ||
453 | sdmac->status = DMA_IN_PROGRESS; | ||
454 | |||
455 | bd->mode.status |= BD_DONE; | ||
456 | sdmac->buf_tail++; | ||
457 | sdmac->buf_tail %= sdmac->num_bd; | ||
458 | |||
459 | if (sdmac->desc.callback) | ||
460 | sdmac->desc.callback(sdmac->desc.callback_param); | ||
461 | } | ||
462 | } | ||
463 | |||
464 | static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | ||
465 | { | ||
466 | struct sdma_buffer_descriptor *bd; | ||
467 | int i, error = 0; | ||
468 | |||
469 | /* | ||
470 | * non loop mode. Iterate over all descriptors, collect | ||
471 | * errors and call callback function | ||
472 | */ | ||
473 | for (i = 0; i < sdmac->num_bd; i++) { | ||
474 | bd = &sdmac->bd[i]; | ||
475 | |||
476 | if (bd->mode.status & (BD_DONE | BD_RROR)) | ||
477 | error = -EIO; | ||
478 | } | ||
479 | |||
480 | if (error) | ||
481 | sdmac->status = DMA_ERROR; | ||
482 | else | ||
483 | sdmac->status = DMA_SUCCESS; | ||
484 | |||
485 | if (sdmac->desc.callback) | ||
486 | sdmac->desc.callback(sdmac->desc.callback_param); | ||
487 | sdmac->last_completed = sdmac->desc.cookie; | ||
488 | } | ||
489 | |||
490 | static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) | ||
491 | { | ||
492 | complete(&sdmac->done); | ||
493 | |||
494 | /* not interested in channel 0 interrupts */ | ||
495 | if (sdmac->channel == 0) | ||
496 | return; | ||
497 | |||
498 | if (sdmac->flags & IMX_DMA_SG_LOOP) | ||
499 | sdma_handle_channel_loop(sdmac); | ||
500 | else | ||
501 | mxc_sdma_handle_channel_normal(sdmac); | ||
502 | } | ||
503 | |||
504 | static irqreturn_t sdma_int_handler(int irq, void *dev_id) | ||
505 | { | ||
506 | struct sdma_engine *sdma = dev_id; | ||
507 | u32 stat; | ||
508 | |||
509 | stat = __raw_readl(sdma->regs + SDMA_H_INTR); | ||
510 | __raw_writel(stat, sdma->regs + SDMA_H_INTR); | ||
511 | |||
512 | while (stat) { | ||
513 | int channel = fls(stat) - 1; | ||
514 | struct sdma_channel *sdmac = &sdma->channel[channel]; | ||
515 | |||
516 | mxc_sdma_handle_channel(sdmac); | ||
517 | |||
518 | stat &= ~(1 << channel); | ||
519 | } | ||
520 | |||
521 | return IRQ_HANDLED; | ||
522 | } | ||
523 | |||
524 | /* | ||
525 | * sets the pc of SDMA script according to the peripheral type | ||
526 | */ | ||
527 | static void sdma_get_pc(struct sdma_channel *sdmac, | ||
528 | enum sdma_peripheral_type peripheral_type) | ||
529 | { | ||
530 | struct sdma_engine *sdma = sdmac->sdma; | ||
531 | int per_2_emi = 0, emi_2_per = 0; | ||
532 | /* | ||
533 | * These are needed once we start to support transfers between | ||
534 | * two peripherals or memory-to-memory transfers | ||
535 | */ | ||
536 | int per_2_per = 0, emi_2_emi = 0; | ||
537 | |||
538 | sdmac->pc_from_device = 0; | ||
539 | sdmac->pc_to_device = 0; | ||
540 | |||
541 | switch (peripheral_type) { | ||
542 | case IMX_DMATYPE_MEMORY: | ||
543 | emi_2_emi = sdma->script_addrs->ap_2_ap_addr; | ||
544 | break; | ||
545 | case IMX_DMATYPE_DSP: | ||
546 | emi_2_per = sdma->script_addrs->bp_2_ap_addr; | ||
547 | per_2_emi = sdma->script_addrs->ap_2_bp_addr; | ||
548 | break; | ||
549 | case IMX_DMATYPE_FIRI: | ||
550 | per_2_emi = sdma->script_addrs->firi_2_mcu_addr; | ||
551 | emi_2_per = sdma->script_addrs->mcu_2_firi_addr; | ||
552 | break; | ||
553 | case IMX_DMATYPE_UART: | ||
554 | per_2_emi = sdma->script_addrs->uart_2_mcu_addr; | ||
555 | emi_2_per = sdma->script_addrs->mcu_2_app_addr; | ||
556 | break; | ||
557 | case IMX_DMATYPE_UART_SP: | ||
558 | per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; | ||
559 | emi_2_per = sdma->script_addrs->mcu_2_shp_addr; | ||
560 | break; | ||
561 | case IMX_DMATYPE_ATA: | ||
562 | per_2_emi = sdma->script_addrs->ata_2_mcu_addr; | ||
563 | emi_2_per = sdma->script_addrs->mcu_2_ata_addr; | ||
564 | break; | ||
565 | case IMX_DMATYPE_CSPI: | ||
566 | case IMX_DMATYPE_EXT: | ||
567 | case IMX_DMATYPE_SSI: | ||
568 | per_2_emi = sdma->script_addrs->app_2_mcu_addr; | ||
569 | emi_2_per = sdma->script_addrs->mcu_2_app_addr; | ||
570 | break; | ||
571 | case IMX_DMATYPE_SSI_SP: | ||
572 | case IMX_DMATYPE_MMC: | ||
573 | case IMX_DMATYPE_SDHC: | ||
574 | case IMX_DMATYPE_CSPI_SP: | ||
575 | case IMX_DMATYPE_ESAI: | ||
576 | case IMX_DMATYPE_MSHC_SP: | ||
577 | per_2_emi = sdma->script_addrs->shp_2_mcu_addr; | ||
578 | emi_2_per = sdma->script_addrs->mcu_2_shp_addr; | ||
579 | break; | ||
580 | case IMX_DMATYPE_ASRC: | ||
581 | per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; | ||
582 | emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; | ||
583 | per_2_per = sdma->script_addrs->per_2_per_addr; | ||
584 | break; | ||
585 | case IMX_DMATYPE_MSHC: | ||
586 | per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; | ||
587 | emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; | ||
588 | break; | ||
589 | case IMX_DMATYPE_CCM: | ||
590 | per_2_emi = sdma->script_addrs->dptc_dvfs_addr; | ||
591 | break; | ||
592 | case IMX_DMATYPE_SPDIF: | ||
593 | per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; | ||
594 | emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; | ||
595 | break; | ||
596 | case IMX_DMATYPE_IPU_MEMORY: | ||
597 | emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; | ||
598 | break; | ||
599 | default: | ||
600 | break; | ||
601 | } | ||
602 | |||
603 | sdmac->pc_from_device = per_2_emi; | ||
604 | sdmac->pc_to_device = emi_2_per; | ||
605 | } | ||
606 | |||
607 | static int sdma_load_context(struct sdma_channel *sdmac) | ||
608 | { | ||
609 | struct sdma_engine *sdma = sdmac->sdma; | ||
610 | int channel = sdmac->channel; | ||
611 | int load_address; | ||
612 | struct sdma_context_data *context = sdma->context; | ||
613 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; | ||
614 | int ret; | ||
615 | |||
616 | if (sdmac->direction == DMA_FROM_DEVICE) { | ||
617 | load_address = sdmac->pc_from_device; | ||
618 | } else { | ||
619 | load_address = sdmac->pc_to_device; | ||
620 | } | ||
621 | |||
622 | if (load_address < 0) | ||
623 | return load_address; | ||
624 | |||
625 | dev_dbg(sdma->dev, "load_address = %d\n", load_address); | ||
626 | dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); | ||
627 | dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); | ||
628 | dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); | ||
629 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); | ||
630 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); | ||
631 | |||
632 | memset(context, 0, sizeof(*context)); | ||
633 | context->channel_state.pc = load_address; | ||
634 | |||
635 | /* Send by context the event mask,base address for peripheral | ||
636 | * and watermark level | ||
637 | */ | ||
638 | context->gReg[0] = sdmac->event_mask1; | ||
639 | context->gReg[1] = sdmac->event_mask0; | ||
640 | context->gReg[2] = sdmac->per_addr; | ||
641 | context->gReg[6] = sdmac->shp_addr; | ||
642 | context->gReg[7] = sdmac->watermark_level; | ||
643 | |||
644 | bd0->mode.command = C0_SETDM; | ||
645 | bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; | ||
646 | bd0->mode.count = sizeof(*context) / 4; | ||
647 | bd0->buffer_addr = sdma->context_phys; | ||
648 | bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; | ||
649 | |||
650 | ret = sdma_run_channel(&sdma->channel[0]); | ||
651 | |||
652 | return ret; | ||
653 | } | ||
654 | |||
655 | static void sdma_disable_channel(struct sdma_channel *sdmac) | ||
656 | { | ||
657 | struct sdma_engine *sdma = sdmac->sdma; | ||
658 | int channel = sdmac->channel; | ||
659 | |||
660 | __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP); | ||
661 | sdmac->status = DMA_ERROR; | ||
662 | } | ||
663 | |||
664 | static int sdma_config_channel(struct sdma_channel *sdmac) | ||
665 | { | ||
666 | int ret; | ||
667 | |||
668 | sdma_disable_channel(sdmac); | ||
669 | |||
670 | sdmac->event_mask0 = 0; | ||
671 | sdmac->event_mask1 = 0; | ||
672 | sdmac->shp_addr = 0; | ||
673 | sdmac->per_addr = 0; | ||
674 | |||
675 | if (sdmac->event_id0) { | ||
676 | if (sdmac->event_id0 > 32) | ||
677 | return -EINVAL; | ||
678 | sdma_event_enable(sdmac, sdmac->event_id0); | ||
679 | } | ||
680 | |||
681 | switch (sdmac->peripheral_type) { | ||
682 | case IMX_DMATYPE_DSP: | ||
683 | sdma_config_ownership(sdmac, false, true, true); | ||
684 | break; | ||
685 | case IMX_DMATYPE_MEMORY: | ||
686 | sdma_config_ownership(sdmac, false, true, false); | ||
687 | break; | ||
688 | default: | ||
689 | sdma_config_ownership(sdmac, true, true, false); | ||
690 | break; | ||
691 | } | ||
692 | |||
693 | sdma_get_pc(sdmac, sdmac->peripheral_type); | ||
694 | |||
695 | if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && | ||
696 | (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { | ||
697 | /* Handle multiple event channels differently */ | ||
698 | if (sdmac->event_id1) { | ||
699 | sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); | ||
700 | if (sdmac->event_id1 > 31) | ||
701 | sdmac->watermark_level |= 1 << 31; | ||
702 | sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); | ||
703 | if (sdmac->event_id0 > 31) | ||
704 | sdmac->watermark_level |= 1 << 30; | ||
705 | } else { | ||
706 | sdmac->event_mask0 = 1 << sdmac->event_id0; | ||
707 | sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32); | ||
708 | } | ||
709 | /* Watermark Level */ | ||
710 | sdmac->watermark_level |= sdmac->watermark_level; | ||
711 | /* Address */ | ||
712 | sdmac->shp_addr = sdmac->per_address; | ||
713 | } else { | ||
714 | sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ | ||
715 | } | ||
716 | |||
717 | ret = sdma_load_context(sdmac); | ||
718 | |||
719 | return ret; | ||
720 | } | ||
721 | |||
722 | static int sdma_set_channel_priority(struct sdma_channel *sdmac, | ||
723 | unsigned int priority) | ||
724 | { | ||
725 | struct sdma_engine *sdma = sdmac->sdma; | ||
726 | int channel = sdmac->channel; | ||
727 | |||
728 | if (priority < MXC_SDMA_MIN_PRIORITY | ||
729 | || priority > MXC_SDMA_MAX_PRIORITY) { | ||
730 | return -EINVAL; | ||
731 | } | ||
732 | |||
733 | __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); | ||
734 | |||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | static int sdma_request_channel(struct sdma_channel *sdmac) | ||
739 | { | ||
740 | struct sdma_engine *sdma = sdmac->sdma; | ||
741 | int channel = sdmac->channel; | ||
742 | int ret = -EBUSY; | ||
743 | |||
744 | sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL); | ||
745 | if (!sdmac->bd) { | ||
746 | ret = -ENOMEM; | ||
747 | goto out; | ||
748 | } | ||
749 | |||
750 | memset(sdmac->bd, 0, PAGE_SIZE); | ||
751 | |||
752 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; | ||
753 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | ||
754 | |||
755 | clk_enable(sdma->clk); | ||
756 | |||
757 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); | ||
758 | |||
759 | init_completion(&sdmac->done); | ||
760 | |||
761 | sdmac->buf_tail = 0; | ||
762 | |||
763 | return 0; | ||
764 | out: | ||
765 | |||
766 | return ret; | ||
767 | } | ||
768 | |||
769 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | ||
770 | { | ||
771 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | ||
772 | } | ||
773 | |||
774 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) | ||
775 | { | ||
776 | dma_cookie_t cookie = sdmac->chan.cookie; | ||
777 | |||
778 | if (++cookie < 0) | ||
779 | cookie = 1; | ||
780 | |||
781 | sdmac->chan.cookie = cookie; | ||
782 | sdmac->desc.cookie = cookie; | ||
783 | |||
784 | return cookie; | ||
785 | } | ||
786 | |||
787 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | ||
788 | { | ||
789 | return container_of(chan, struct sdma_channel, chan); | ||
790 | } | ||
791 | |||
792 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
793 | { | ||
794 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); | ||
795 | struct sdma_engine *sdma = sdmac->sdma; | ||
796 | dma_cookie_t cookie; | ||
797 | |||
798 | spin_lock_irq(&sdmac->lock); | ||
799 | |||
800 | cookie = sdma_assign_cookie(sdmac); | ||
801 | |||
802 | sdma_enable_channel(sdma, sdmac->channel); | ||
803 | |||
804 | spin_unlock_irq(&sdmac->lock); | ||
805 | |||
806 | return cookie; | ||
807 | } | ||
808 | |||
809 | static int sdma_alloc_chan_resources(struct dma_chan *chan) | ||
810 | { | ||
811 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
812 | struct imx_dma_data *data = chan->private; | ||
813 | int prio, ret; | ||
814 | |||
815 | if (!data) | ||
816 | return -EINVAL; | ||
817 | |||
818 | switch (data->priority) { | ||
819 | case DMA_PRIO_HIGH: | ||
820 | prio = 3; | ||
821 | break; | ||
822 | case DMA_PRIO_MEDIUM: | ||
823 | prio = 2; | ||
824 | break; | ||
825 | case DMA_PRIO_LOW: | ||
826 | default: | ||
827 | prio = 1; | ||
828 | break; | ||
829 | } | ||
830 | |||
831 | sdmac->peripheral_type = data->peripheral_type; | ||
832 | sdmac->event_id0 = data->dma_request; | ||
833 | ret = sdma_set_channel_priority(sdmac, prio); | ||
834 | if (ret) | ||
835 | return ret; | ||
836 | |||
837 | ret = sdma_request_channel(sdmac); | ||
838 | if (ret) | ||
839 | return ret; | ||
840 | |||
841 | dma_async_tx_descriptor_init(&sdmac->desc, chan); | ||
842 | sdmac->desc.tx_submit = sdma_tx_submit; | ||
843 | /* txd.flags will be overwritten in prep funcs */ | ||
844 | sdmac->desc.flags = DMA_CTRL_ACK; | ||
845 | |||
846 | return 0; | ||
847 | } | ||
848 | |||
849 | static void sdma_free_chan_resources(struct dma_chan *chan) | ||
850 | { | ||
851 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
852 | struct sdma_engine *sdma = sdmac->sdma; | ||
853 | |||
854 | sdma_disable_channel(sdmac); | ||
855 | |||
856 | if (sdmac->event_id0) | ||
857 | sdma_event_disable(sdmac, sdmac->event_id0); | ||
858 | if (sdmac->event_id1) | ||
859 | sdma_event_disable(sdmac, sdmac->event_id1); | ||
860 | |||
861 | sdmac->event_id0 = 0; | ||
862 | sdmac->event_id1 = 0; | ||
863 | |||
864 | sdma_set_channel_priority(sdmac, 0); | ||
865 | |||
866 | dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); | ||
867 | |||
868 | clk_disable(sdma->clk); | ||
869 | } | ||
870 | |||
871 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | ||
872 | struct dma_chan *chan, struct scatterlist *sgl, | ||
873 | unsigned int sg_len, enum dma_data_direction direction, | ||
874 | unsigned long flags) | ||
875 | { | ||
876 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
877 | struct sdma_engine *sdma = sdmac->sdma; | ||
878 | int ret, i, count; | ||
879 | int channel = sdmac->channel; | ||
880 | struct scatterlist *sg; | ||
881 | |||
882 | if (sdmac->status == DMA_IN_PROGRESS) | ||
883 | return NULL; | ||
884 | sdmac->status = DMA_IN_PROGRESS; | ||
885 | |||
886 | sdmac->flags = 0; | ||
887 | |||
888 | dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", | ||
889 | sg_len, channel); | ||
890 | |||
891 | sdmac->direction = direction; | ||
892 | ret = sdma_load_context(sdmac); | ||
893 | if (ret) | ||
894 | goto err_out; | ||
895 | |||
896 | if (sg_len > NUM_BD) { | ||
897 | dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", | ||
898 | channel, sg_len, NUM_BD); | ||
899 | ret = -EINVAL; | ||
900 | goto err_out; | ||
901 | } | ||
902 | |||
903 | for_each_sg(sgl, sg, sg_len, i) { | ||
904 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | ||
905 | int param; | ||
906 | |||
907 | bd->buffer_addr = sg->dma_address; | ||
908 | |||
909 | count = sg->length; | ||
910 | |||
911 | if (count > 0xffff) { | ||
912 | dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", | ||
913 | channel, count, 0xffff); | ||
914 | ret = -EINVAL; | ||
915 | goto err_out; | ||
916 | } | ||
917 | |||
918 | bd->mode.count = count; | ||
919 | |||
920 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { | ||
921 | ret = -EINVAL; | ||
922 | goto err_out; | ||
923 | } | ||
924 | |||
925 | switch (sdmac->word_size) { | ||
926 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
927 | bd->mode.command = 0; | ||
928 | if (count & 3 || sg->dma_address & 3) | ||
929 | return NULL; | ||
930 | break; | ||
931 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
932 | bd->mode.command = 2; | ||
933 | if (count & 1 || sg->dma_address & 1) | ||
934 | return NULL; | ||
935 | break; | ||
936 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
937 | bd->mode.command = 1; | ||
938 | break; | ||
939 | default: | ||
940 | return NULL; | ||
941 | } | ||
942 | |||
943 | param = BD_DONE | BD_EXTD | BD_CONT; | ||
944 | |||
945 | if (i + 1 == sg_len) { | ||
946 | param |= BD_INTR; | ||
947 | param |= BD_LAST; | ||
948 | param &= ~BD_CONT; | ||
949 | } | ||
950 | |||
951 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | ||
952 | i, count, sg->dma_address, | ||
953 | param & BD_WRAP ? "wrap" : "", | ||
954 | param & BD_INTR ? " intr" : ""); | ||
955 | |||
956 | bd->mode.status = param; | ||
957 | } | ||
958 | |||
959 | sdmac->num_bd = sg_len; | ||
960 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | ||
961 | |||
962 | return &sdmac->desc; | ||
963 | err_out: | ||
964 | sdmac->status = DMA_ERROR; | ||
965 | return NULL; | ||
966 | } | ||
967 | |||
968 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | ||
969 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
970 | size_t period_len, enum dma_data_direction direction) | ||
971 | { | ||
972 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
973 | struct sdma_engine *sdma = sdmac->sdma; | ||
974 | int num_periods = buf_len / period_len; | ||
975 | int channel = sdmac->channel; | ||
976 | int ret, i = 0, buf = 0; | ||
977 | |||
978 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); | ||
979 | |||
980 | if (sdmac->status == DMA_IN_PROGRESS) | ||
981 | return NULL; | ||
982 | |||
983 | sdmac->status = DMA_IN_PROGRESS; | ||
984 | |||
985 | sdmac->flags |= IMX_DMA_SG_LOOP; | ||
986 | sdmac->direction = direction; | ||
987 | ret = sdma_load_context(sdmac); | ||
988 | if (ret) | ||
989 | goto err_out; | ||
990 | |||
991 | if (num_periods > NUM_BD) { | ||
992 | dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", | ||
993 | channel, num_periods, NUM_BD); | ||
994 | goto err_out; | ||
995 | } | ||
996 | |||
997 | if (period_len > 0xffff) { | ||
998 | dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n", | ||
999 | channel, period_len, 0xffff); | ||
1000 | goto err_out; | ||
1001 | } | ||
1002 | |||
1003 | while (buf < buf_len) { | ||
1004 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | ||
1005 | int param; | ||
1006 | |||
1007 | bd->buffer_addr = dma_addr; | ||
1008 | |||
1009 | bd->mode.count = period_len; | ||
1010 | |||
1011 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
1012 | goto err_out; | ||
1013 | if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
1014 | bd->mode.command = 0; | ||
1015 | else | ||
1016 | bd->mode.command = sdmac->word_size; | ||
1017 | |||
1018 | param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; | ||
1019 | if (i + 1 == num_periods) | ||
1020 | param |= BD_WRAP; | ||
1021 | |||
1022 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | ||
1023 | i, period_len, dma_addr, | ||
1024 | param & BD_WRAP ? "wrap" : "", | ||
1025 | param & BD_INTR ? " intr" : ""); | ||
1026 | |||
1027 | bd->mode.status = param; | ||
1028 | |||
1029 | dma_addr += period_len; | ||
1030 | buf += period_len; | ||
1031 | |||
1032 | i++; | ||
1033 | } | ||
1034 | |||
1035 | sdmac->num_bd = num_periods; | ||
1036 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | ||
1037 | |||
1038 | return &sdmac->desc; | ||
1039 | err_out: | ||
1040 | sdmac->status = DMA_ERROR; | ||
1041 | return NULL; | ||
1042 | } | ||
1043 | |||
1044 | static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
1045 | unsigned long arg) | ||
1046 | { | ||
1047 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
1048 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | ||
1049 | |||
1050 | switch (cmd) { | ||
1051 | case DMA_TERMINATE_ALL: | ||
1052 | sdma_disable_channel(sdmac); | ||
1053 | return 0; | ||
1054 | case DMA_SLAVE_CONFIG: | ||
1055 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { | ||
1056 | sdmac->per_address = dmaengine_cfg->src_addr; | ||
1057 | sdmac->watermark_level = dmaengine_cfg->src_maxburst; | ||
1058 | sdmac->word_size = dmaengine_cfg->src_addr_width; | ||
1059 | } else { | ||
1060 | sdmac->per_address = dmaengine_cfg->dst_addr; | ||
1061 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst; | ||
1062 | sdmac->word_size = dmaengine_cfg->dst_addr_width; | ||
1063 | } | ||
1064 | return sdma_config_channel(sdmac); | ||
1065 | default: | ||
1066 | return -ENOSYS; | ||
1067 | } | ||
1068 | |||
1069 | return -EINVAL; | ||
1070 | } | ||
1071 | |||
1072 | static enum dma_status sdma_tx_status(struct dma_chan *chan, | ||
1073 | dma_cookie_t cookie, | ||
1074 | struct dma_tx_state *txstate) | ||
1075 | { | ||
1076 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
1077 | dma_cookie_t last_used; | ||
1078 | |||
1079 | last_used = chan->cookie; | ||
1080 | |||
1081 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); | ||
1082 | |||
1083 | return sdmac->status; | ||
1084 | } | ||
1085 | |||
1086 | static void sdma_issue_pending(struct dma_chan *chan) | ||
1087 | { | ||
1088 | /* | ||
1089 | * Nothing to do. We only have a single descriptor | ||
1090 | */ | ||
1091 | } | ||
1092 | |||
1093 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 | ||
1094 | |||
1095 | static void sdma_add_scripts(struct sdma_engine *sdma, | ||
1096 | const struct sdma_script_start_addrs *addr) | ||
1097 | { | ||
1098 | s32 *addr_arr = (u32 *)addr; | ||
1099 | s32 *saddr_arr = (u32 *)sdma->script_addrs; | ||
1100 | int i; | ||
1101 | |||
1102 | for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) | ||
1103 | if (addr_arr[i] > 0) | ||
1104 | saddr_arr[i] = addr_arr[i]; | ||
1105 | } | ||
1106 | |||
1107 | static int __init sdma_get_firmware(struct sdma_engine *sdma, | ||
1108 | const char *cpu_name, int to_version) | ||
1109 | { | ||
1110 | const struct firmware *fw; | ||
1111 | char *fwname; | ||
1112 | const struct sdma_firmware_header *header; | ||
1113 | int ret; | ||
1114 | const struct sdma_script_start_addrs *addr; | ||
1115 | unsigned short *ram_code; | ||
1116 | |||
1117 | fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", cpu_name, to_version); | ||
1118 | if (!fwname) | ||
1119 | return -ENOMEM; | ||
1120 | |||
1121 | ret = request_firmware(&fw, fwname, sdma->dev); | ||
1122 | if (ret) { | ||
1123 | kfree(fwname); | ||
1124 | return ret; | ||
1125 | } | ||
1126 | kfree(fwname); | ||
1127 | |||
1128 | if (fw->size < sizeof(*header)) | ||
1129 | goto err_firmware; | ||
1130 | |||
1131 | header = (struct sdma_firmware_header *)fw->data; | ||
1132 | |||
1133 | if (header->magic != SDMA_FIRMWARE_MAGIC) | ||
1134 | goto err_firmware; | ||
1135 | if (header->ram_code_start + header->ram_code_size > fw->size) | ||
1136 | goto err_firmware; | ||
1137 | |||
1138 | addr = (void *)header + header->script_addrs_start; | ||
1139 | ram_code = (void *)header + header->ram_code_start; | ||
1140 | |||
1141 | clk_enable(sdma->clk); | ||
1142 | /* download the RAM image for SDMA */ | ||
1143 | sdma_load_script(sdma, ram_code, | ||
1144 | header->ram_code_size, | ||
1145 | addr->ram_code_start_addr); | ||
1146 | clk_disable(sdma->clk); | ||
1147 | |||
1148 | sdma_add_scripts(sdma, addr); | ||
1149 | |||
1150 | dev_info(sdma->dev, "loaded firmware %d.%d\n", | ||
1151 | header->version_major, | ||
1152 | header->version_minor); | ||
1153 | |||
1154 | err_firmware: | ||
1155 | release_firmware(fw); | ||
1156 | |||
1157 | return ret; | ||
1158 | } | ||
1159 | |||
1160 | static int __init sdma_init(struct sdma_engine *sdma) | ||
1161 | { | ||
1162 | int i, ret; | ||
1163 | dma_addr_t ccb_phys; | ||
1164 | |||
1165 | switch (sdma->version) { | ||
1166 | case 1: | ||
1167 | sdma->num_events = 32; | ||
1168 | break; | ||
1169 | case 2: | ||
1170 | sdma->num_events = 48; | ||
1171 | break; | ||
1172 | default: | ||
1173 | dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version); | ||
1174 | return -ENODEV; | ||
1175 | } | ||
1176 | |||
1177 | clk_enable(sdma->clk); | ||
1178 | |||
1179 | /* Be sure SDMA has not started yet */ | ||
1180 | __raw_writel(0, sdma->regs + SDMA_H_C0PTR); | ||
1181 | |||
1182 | sdma->channel_control = dma_alloc_coherent(NULL, | ||
1183 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + | ||
1184 | sizeof(struct sdma_context_data), | ||
1185 | &ccb_phys, GFP_KERNEL); | ||
1186 | |||
1187 | if (!sdma->channel_control) { | ||
1188 | ret = -ENOMEM; | ||
1189 | goto err_dma_alloc; | ||
1190 | } | ||
1191 | |||
1192 | sdma->context = (void *)sdma->channel_control + | ||
1193 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); | ||
1194 | sdma->context_phys = ccb_phys + | ||
1195 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); | ||
1196 | |||
1197 | /* Zero-out the CCB structures array just allocated */ | ||
1198 | memset(sdma->channel_control, 0, | ||
1199 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); | ||
1200 | |||
1201 | /* disable all channels */ | ||
1202 | for (i = 0; i < sdma->num_events; i++) | ||
1203 | __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); | ||
1204 | |||
1205 | /* All channels have priority 0 */ | ||
1206 | for (i = 0; i < MAX_DMA_CHANNELS; i++) | ||
1207 | __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); | ||
1208 | |||
1209 | ret = sdma_request_channel(&sdma->channel[0]); | ||
1210 | if (ret) | ||
1211 | goto err_dma_alloc; | ||
1212 | |||
1213 | sdma_config_ownership(&sdma->channel[0], false, true, false); | ||
1214 | |||
1215 | /* Set Command Channel (Channel Zero) */ | ||
1216 | __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); | ||
1217 | |||
1218 | /* Set bits of CONFIG register but with static context switching */ | ||
1219 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ | ||
1220 | __raw_writel(0, sdma->regs + SDMA_H_CONFIG); | ||
1221 | |||
1222 | __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); | ||
1223 | |||
1224 | /* Set bits of CONFIG register with given context switching mode */ | ||
1225 | __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | ||
1226 | |||
1227 | /* Initializes channel's priorities */ | ||
1228 | sdma_set_channel_priority(&sdma->channel[0], 7); | ||
1229 | |||
1230 | clk_disable(sdma->clk); | ||
1231 | |||
1232 | return 0; | ||
1233 | |||
1234 | err_dma_alloc: | ||
1235 | clk_disable(sdma->clk); | ||
1236 | dev_err(sdma->dev, "initialisation failed with %d\n", ret); | ||
1237 | return ret; | ||
1238 | } | ||
1239 | |||
1240 | static int __init sdma_probe(struct platform_device *pdev) | ||
1241 | { | ||
1242 | int ret; | ||
1243 | int irq; | ||
1244 | struct resource *iores; | ||
1245 | struct sdma_platform_data *pdata = pdev->dev.platform_data; | ||
1246 | int i; | ||
1247 | struct sdma_engine *sdma; | ||
1248 | |||
1249 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | ||
1250 | if (!sdma) | ||
1251 | return -ENOMEM; | ||
1252 | |||
1253 | sdma->dev = &pdev->dev; | ||
1254 | |||
1255 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1256 | irq = platform_get_irq(pdev, 0); | ||
1257 | if (!iores || irq < 0 || !pdata) { | ||
1258 | ret = -EINVAL; | ||
1259 | goto err_irq; | ||
1260 | } | ||
1261 | |||
1262 | if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { | ||
1263 | ret = -EBUSY; | ||
1264 | goto err_request_region; | ||
1265 | } | ||
1266 | |||
1267 | sdma->clk = clk_get(&pdev->dev, NULL); | ||
1268 | if (IS_ERR(sdma->clk)) { | ||
1269 | ret = PTR_ERR(sdma->clk); | ||
1270 | goto err_clk; | ||
1271 | } | ||
1272 | |||
1273 | sdma->regs = ioremap(iores->start, resource_size(iores)); | ||
1274 | if (!sdma->regs) { | ||
1275 | ret = -ENOMEM; | ||
1276 | goto err_ioremap; | ||
1277 | } | ||
1278 | |||
1279 | ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); | ||
1280 | if (ret) | ||
1281 | goto err_request_irq; | ||
1282 | |||
1283 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); | ||
1284 | if (!sdma->script_addrs) | ||
1285 | goto err_alloc; | ||
1286 | |||
1287 | sdma->version = pdata->sdma_version; | ||
1288 | |||
1289 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1290 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1291 | |||
1292 | INIT_LIST_HEAD(&sdma->dma_device.channels); | ||
1293 | /* Initialize channel parameters */ | ||
1294 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | ||
1295 | struct sdma_channel *sdmac = &sdma->channel[i]; | ||
1296 | |||
1297 | sdmac->sdma = sdma; | ||
1298 | spin_lock_init(&sdmac->lock); | ||
1299 | |||
1300 | sdmac->chan.device = &sdma->dma_device; | ||
1301 | sdmac->channel = i; | ||
1302 | |||
1303 | /* | ||
1304 | * Add the channel to the DMAC list. Do not add channel 0 though | ||
1305 | * because we need it internally in the SDMA driver. This also means | ||
1306 | * that channel 0 in dmaengine counting matches sdma channel 1. | ||
1307 | */ | ||
1308 | if (i) | ||
1309 | list_add_tail(&sdmac->chan.device_node, | ||
1310 | &sdma->dma_device.channels); | ||
1311 | } | ||
1312 | |||
1313 | ret = sdma_init(sdma); | ||
1314 | if (ret) | ||
1315 | goto err_init; | ||
1316 | |||
1317 | if (pdata->script_addrs) | ||
1318 | sdma_add_scripts(sdma, pdata->script_addrs); | ||
1319 | |||
1320 | sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version); | ||
1321 | |||
1322 | sdma->dma_device.dev = &pdev->dev; | ||
1323 | |||
1324 | sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; | ||
1325 | sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; | ||
1326 | sdma->dma_device.device_tx_status = sdma_tx_status; | ||
1327 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; | ||
1328 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | ||
1329 | sdma->dma_device.device_control = sdma_control; | ||
1330 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | ||
1331 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | ||
1332 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | ||
1333 | |||
1334 | ret = dma_async_device_register(&sdma->dma_device); | ||
1335 | if (ret) { | ||
1336 | dev_err(&pdev->dev, "unable to register\n"); | ||
1337 | goto err_init; | ||
1338 | } | ||
1339 | |||
1340 | dev_info(sdma->dev, "initialized\n"); | ||
1341 | |||
1342 | return 0; | ||
1343 | |||
1344 | err_init: | ||
1345 | kfree(sdma->script_addrs); | ||
1346 | err_alloc: | ||
1347 | free_irq(irq, sdma); | ||
1348 | err_request_irq: | ||
1349 | iounmap(sdma->regs); | ||
1350 | err_ioremap: | ||
1351 | clk_put(sdma->clk); | ||
1352 | err_clk: | ||
1353 | release_mem_region(iores->start, resource_size(iores)); | ||
1354 | err_request_region: | ||
1355 | err_irq: | ||
1356 | kfree(sdma); | ||
1357 | return ret; | ||
1358 | } | ||
1359 | |||
1360 | static int __exit sdma_remove(struct platform_device *pdev) | ||
1361 | { | ||
1362 | return -EBUSY; | ||
1363 | } | ||
1364 | |||
1365 | static struct platform_driver sdma_driver = { | ||
1366 | .driver = { | ||
1367 | .name = "imx-sdma", | ||
1368 | }, | ||
1369 | .remove = __exit_p(sdma_remove), | ||
1370 | }; | ||
1371 | |||
1372 | static int __init sdma_module_init(void) | ||
1373 | { | ||
1374 | return platform_driver_probe(&sdma_driver, sdma_probe); | ||
1375 | } | ||
1376 | module_init(sdma_module_init); | ||
1377 | |||
1378 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | ||
1379 | MODULE_DESCRIPTION("i.MX SDMA driver"); | ||
1380 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index c2591e8d9b6e..f653517ef744 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/pm_runtime.h> | ||
28 | #include <linux/intel_mid_dma.h> | 29 | #include <linux/intel_mid_dma.h> |
29 | 30 | ||
30 | #define MAX_CHAN 4 /*max ch across controllers*/ | 31 | #define MAX_CHAN 4 /*max ch across controllers*/ |
@@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size) | |||
91 | int byte_width = 0, block_ts = 0; | 92 | int byte_width = 0, block_ts = 0; |
92 | 93 | ||
93 | switch (tx_width) { | 94 | switch (tx_width) { |
94 | case LNW_DMA_WIDTH_8BIT: | 95 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
95 | byte_width = 1; | 96 | byte_width = 1; |
96 | break; | 97 | break; |
97 | case LNW_DMA_WIDTH_16BIT: | 98 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
98 | byte_width = 2; | 99 | byte_width = 2; |
99 | break; | 100 | break; |
100 | case LNW_DMA_WIDTH_32BIT: | 101 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
101 | default: | 102 | default: |
102 | byte_width = 4; | 103 | byte_width = 4; |
103 | break; | 104 | break; |
@@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
247 | struct middma_device *mid = to_middma_device(midc->chan.device); | 248 | struct middma_device *mid = to_middma_device(midc->chan.device); |
248 | 249 | ||
249 | /* channel is idle */ | 250 | /* channel is idle */ |
250 | if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { | 251 | if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { |
251 | /*error*/ | 252 | /*error*/ |
252 | pr_err("ERR_MDMA: channel is busy in start\n"); | 253 | pr_err("ERR_MDMA: channel is busy in start\n"); |
253 | /* The tasklet will hopefully advance the queue... */ | 254 | /* The tasklet will hopefully advance the queue... */ |
254 | return; | 255 | return; |
255 | } | 256 | } |
256 | 257 | midc->busy = true; | |
257 | /*write registers and en*/ | 258 | /*write registers and en*/ |
258 | iowrite32(first->sar, midc->ch_regs + SAR); | 259 | iowrite32(first->sar, midc->ch_regs + SAR); |
259 | iowrite32(first->dar, midc->ch_regs + DAR); | 260 | iowrite32(first->dar, midc->ch_regs + DAR); |
261 | iowrite32(first->lli_phys, midc->ch_regs + LLP); | ||
260 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); | 262 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); |
261 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); | 263 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); |
262 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); | 264 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); |
@@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
264 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", | 266 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", |
265 | (int)first->sar, (int)first->dar, first->cfg_hi, | 267 | (int)first->sar, (int)first->dar, first->cfg_hi, |
266 | first->cfg_lo, first->ctl_hi, first->ctl_lo); | 268 | first->cfg_lo, first->ctl_hi, first->ctl_lo); |
269 | first->status = DMA_IN_PROGRESS; | ||
267 | 270 | ||
268 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | 271 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); |
269 | first->status = DMA_IN_PROGRESS; | ||
270 | } | 272 | } |
271 | 273 | ||
272 | /** | 274 | /** |
@@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
283 | { | 285 | { |
284 | struct dma_async_tx_descriptor *txd = &desc->txd; | 286 | struct dma_async_tx_descriptor *txd = &desc->txd; |
285 | dma_async_tx_callback callback_txd = NULL; | 287 | dma_async_tx_callback callback_txd = NULL; |
288 | struct intel_mid_dma_lli *llitem; | ||
286 | void *param_txd = NULL; | 289 | void *param_txd = NULL; |
287 | 290 | ||
288 | midc->completed = txd->cookie; | 291 | midc->completed = txd->cookie; |
289 | callback_txd = txd->callback; | 292 | callback_txd = txd->callback; |
290 | param_txd = txd->callback_param; | 293 | param_txd = txd->callback_param; |
291 | 294 | ||
292 | list_move(&desc->desc_node, &midc->free_list); | 295 | if (desc->lli != NULL) { |
293 | 296 | /*clear the DONE bit of completed LLI in memory*/ | |
297 | llitem = desc->lli + desc->current_lli; | ||
298 | llitem->ctl_hi &= CLEAR_DONE; | ||
299 | if (desc->current_lli < desc->lli_length-1) | ||
300 | (desc->current_lli)++; | ||
301 | else | ||
302 | desc->current_lli = 0; | ||
303 | } | ||
294 | spin_unlock_bh(&midc->lock); | 304 | spin_unlock_bh(&midc->lock); |
295 | if (callback_txd) { | 305 | if (callback_txd) { |
296 | pr_debug("MDMA: TXD callback set ... calling\n"); | 306 | pr_debug("MDMA: TXD callback set ... calling\n"); |
297 | callback_txd(param_txd); | 307 | callback_txd(param_txd); |
298 | spin_lock_bh(&midc->lock); | 308 | } |
299 | return; | 309 | if (midc->raw_tfr) { |
310 | desc->status = DMA_SUCCESS; | ||
311 | if (desc->lli != NULL) { | ||
312 | pci_pool_free(desc->lli_pool, desc->lli, | ||
313 | desc->lli_phys); | ||
314 | pci_pool_destroy(desc->lli_pool); | ||
315 | } | ||
316 | list_move(&desc->desc_node, &midc->free_list); | ||
317 | midc->busy = false; | ||
300 | } | 318 | } |
301 | spin_lock_bh(&midc->lock); | 319 | spin_lock_bh(&midc->lock); |
302 | 320 | ||
@@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid, | |||
317 | 335 | ||
318 | /*tx is complete*/ | 336 | /*tx is complete*/ |
319 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 337 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
320 | if (desc->status == DMA_IN_PROGRESS) { | 338 | if (desc->status == DMA_IN_PROGRESS) |
321 | desc->status = DMA_SUCCESS; | ||
322 | midc_descriptor_complete(midc, desc); | 339 | midc_descriptor_complete(midc, desc); |
323 | } | ||
324 | } | 340 | } |
325 | return; | 341 | return; |
326 | } | 342 | } |
343 | /** | ||
344 | * midc_lli_fill_sg - Helper function to convert | ||
345 | * SG list to Linked List Items. | ||
346 | *@midc: Channel | ||
347 | *@desc: DMA descriptor | ||
348 | *@sglist: Pointer to SG list | ||
349 | *@sglen: SG list length | ||
350 | *@flags: DMA transaction flags | ||
351 | * | ||
352 | * Walk through the SG list and convert the SG list into Linked | ||
353 | * List Items (LLI). | ||
354 | */ | ||
355 | static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | ||
356 | struct intel_mid_dma_desc *desc, | ||
357 | struct scatterlist *sglist, | ||
358 | unsigned int sglen, | ||
359 | unsigned int flags) | ||
360 | { | ||
361 | struct intel_mid_dma_slave *mids; | ||
362 | struct scatterlist *sg; | ||
363 | dma_addr_t lli_next, sg_phy_addr; | ||
364 | struct intel_mid_dma_lli *lli_bloc_desc; | ||
365 | union intel_mid_dma_ctl_lo ctl_lo; | ||
366 | union intel_mid_dma_ctl_hi ctl_hi; | ||
367 | int i; | ||
368 | |||
369 | pr_debug("MDMA: Entered midc_lli_fill_sg\n"); | ||
370 | mids = midc->mid_slave; | ||
327 | 371 | ||
372 | lli_bloc_desc = desc->lli; | ||
373 | lli_next = desc->lli_phys; | ||
374 | |||
375 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
376 | ctl_hi.ctl_hi = desc->ctl_hi; | ||
377 | for_each_sg(sglist, sg, sglen, i) { | ||
378 | /*Populate CTL_LOW and LLI values*/ | ||
379 | if (i != sglen - 1) { | ||
380 | lli_next = lli_next + | ||
381 | sizeof(struct intel_mid_dma_lli); | ||
382 | } else { | ||
383 | /*Check for circular list, otherwise terminate LLI to ZERO*/ | ||
384 | if (flags & DMA_PREP_CIRCULAR_LIST) { | ||
385 | pr_debug("MDMA: LLI is configured in circular mode\n"); | ||
386 | lli_next = desc->lli_phys; | ||
387 | } else { | ||
388 | lli_next = 0; | ||
389 | ctl_lo.ctlx.llp_dst_en = 0; | ||
390 | ctl_lo.ctlx.llp_src_en = 0; | ||
391 | } | ||
392 | } | ||
393 | /*Populate CTL_HI values*/ | ||
394 | ctl_hi.ctlx.block_ts = get_block_ts(sg->length, | ||
395 | desc->width, | ||
396 | midc->dma->block_size); | ||
397 | /*Populate SAR and DAR values*/ | ||
398 | sg_phy_addr = sg_phys(sg); | ||
399 | if (desc->dirn == DMA_TO_DEVICE) { | ||
400 | lli_bloc_desc->sar = sg_phy_addr; | ||
401 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; | ||
402 | } else if (desc->dirn == DMA_FROM_DEVICE) { | ||
403 | lli_bloc_desc->sar = mids->dma_slave.src_addr; | ||
404 | lli_bloc_desc->dar = sg_phy_addr; | ||
405 | } | ||
406 | /*Copy values into block descriptor in system memroy*/ | ||
407 | lli_bloc_desc->llp = lli_next; | ||
408 | lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; | ||
409 | lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; | ||
410 | |||
411 | lli_bloc_desc++; | ||
412 | } | ||
413 | /*Copy very first LLI values to descriptor*/ | ||
414 | desc->ctl_lo = desc->lli->ctl_lo; | ||
415 | desc->ctl_hi = desc->lli->ctl_hi; | ||
416 | desc->sar = desc->lli->sar; | ||
417 | desc->dar = desc->lli->dar; | ||
418 | |||
419 | return 0; | ||
420 | } | ||
328 | /***************************************************************************** | 421 | /***************************************************************************** |
329 | DMA engine callback Functions*/ | 422 | DMA engine callback Functions*/ |
330 | /** | 423 | /** |
@@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
349 | desc->txd.cookie = cookie; | 442 | desc->txd.cookie = cookie; |
350 | 443 | ||
351 | 444 | ||
352 | if (list_empty(&midc->active_list)) { | 445 | if (list_empty(&midc->active_list)) |
353 | midc_dostart(midc, desc); | ||
354 | list_add_tail(&desc->desc_node, &midc->active_list); | 446 | list_add_tail(&desc->desc_node, &midc->active_list); |
355 | } else { | 447 | else |
356 | list_add_tail(&desc->desc_node, &midc->queue); | 448 | list_add_tail(&desc->desc_node, &midc->queue); |
357 | } | 449 | |
450 | midc_dostart(midc, desc); | ||
358 | spin_unlock_bh(&midc->lock); | 451 | spin_unlock_bh(&midc->lock); |
359 | 452 | ||
360 | return cookie; | 453 | return cookie; |
@@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
414 | return ret; | 507 | return ret; |
415 | } | 508 | } |
416 | 509 | ||
510 | static int dma_slave_control(struct dma_chan *chan, unsigned long arg) | ||
511 | { | ||
512 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
513 | struct dma_slave_config *slave = (struct dma_slave_config *)arg; | ||
514 | struct intel_mid_dma_slave *mid_slave; | ||
515 | |||
516 | BUG_ON(!midc); | ||
517 | BUG_ON(!slave); | ||
518 | pr_debug("MDMA: slave control called\n"); | ||
519 | |||
520 | mid_slave = to_intel_mid_dma_slave(slave); | ||
521 | |||
522 | BUG_ON(!mid_slave); | ||
523 | |||
524 | midc->mid_slave = mid_slave; | ||
525 | return 0; | ||
526 | } | ||
417 | /** | 527 | /** |
418 | * intel_mid_dma_device_control - DMA device control | 528 | * intel_mid_dma_device_control - DMA device control |
419 | * @chan: chan for DMA control | 529 | * @chan: chan for DMA control |
@@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, | |||
428 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | 538 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
429 | struct middma_device *mid = to_middma_device(chan->device); | 539 | struct middma_device *mid = to_middma_device(chan->device); |
430 | struct intel_mid_dma_desc *desc, *_desc; | 540 | struct intel_mid_dma_desc *desc, *_desc; |
431 | LIST_HEAD(list); | 541 | union intel_mid_dma_cfg_lo cfg_lo; |
542 | |||
543 | if (cmd == DMA_SLAVE_CONFIG) | ||
544 | return dma_slave_control(chan, arg); | ||
432 | 545 | ||
433 | if (cmd != DMA_TERMINATE_ALL) | 546 | if (cmd != DMA_TERMINATE_ALL) |
434 | return -ENXIO; | 547 | return -ENXIO; |
435 | 548 | ||
436 | spin_lock_bh(&midc->lock); | 549 | spin_lock_bh(&midc->lock); |
437 | if (midc->in_use == false) { | 550 | if (midc->busy == false) { |
438 | spin_unlock_bh(&midc->lock); | 551 | spin_unlock_bh(&midc->lock); |
439 | return 0; | 552 | return 0; |
440 | } | 553 | } |
441 | list_splice_init(&midc->free_list, &list); | 554 | /*Suspend and disable the channel*/ |
442 | midc->descs_allocated = 0; | 555 | cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); |
443 | midc->slave = NULL; | 556 | cfg_lo.cfgx.ch_susp = 1; |
444 | 557 | iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); | |
558 | iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | ||
559 | midc->busy = false; | ||
445 | /* Disable interrupts */ | 560 | /* Disable interrupts */ |
446 | disable_dma_interrupt(midc); | 561 | disable_dma_interrupt(midc); |
562 | midc->descs_allocated = 0; | ||
447 | 563 | ||
448 | spin_unlock_bh(&midc->lock); | 564 | spin_unlock_bh(&midc->lock); |
449 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 565 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
450 | pr_debug("MDMA: freeing descriptor %p\n", desc); | 566 | if (desc->lli != NULL) { |
451 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | 567 | pci_pool_free(desc->lli_pool, desc->lli, |
568 | desc->lli_phys); | ||
569 | pci_pool_destroy(desc->lli_pool); | ||
570 | } | ||
571 | list_move(&desc->desc_node, &midc->free_list); | ||
452 | } | 572 | } |
453 | return 0; | 573 | return 0; |
454 | } | 574 | } |
455 | 575 | ||
456 | /** | ||
457 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | ||
458 | * @chan: chan for DMA transfer | ||
459 | * @sgl: scatter gather list | ||
460 | * @sg_len: length of sg txn | ||
461 | * @direction: DMA transfer dirtn | ||
462 | * @flags: DMA flags | ||
463 | * | ||
464 | * Do DMA sg txn: NOT supported now | ||
465 | */ | ||
466 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | ||
467 | struct dma_chan *chan, struct scatterlist *sgl, | ||
468 | unsigned int sg_len, enum dma_data_direction direction, | ||
469 | unsigned long flags) | ||
470 | { | ||
471 | /*not supported now*/ | ||
472 | return NULL; | ||
473 | } | ||
474 | 576 | ||
475 | /** | 577 | /** |
476 | * intel_mid_dma_prep_memcpy - Prep memcpy txn | 578 | * intel_mid_dma_prep_memcpy - Prep memcpy txn |
@@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
495 | union intel_mid_dma_ctl_hi ctl_hi; | 597 | union intel_mid_dma_ctl_hi ctl_hi; |
496 | union intel_mid_dma_cfg_lo cfg_lo; | 598 | union intel_mid_dma_cfg_lo cfg_lo; |
497 | union intel_mid_dma_cfg_hi cfg_hi; | 599 | union intel_mid_dma_cfg_hi cfg_hi; |
498 | enum intel_mid_dma_width width = 0; | 600 | enum dma_slave_buswidth width; |
499 | 601 | ||
500 | pr_debug("MDMA: Prep for memcpy\n"); | 602 | pr_debug("MDMA: Prep for memcpy\n"); |
501 | WARN_ON(!chan); | 603 | BUG_ON(!chan); |
502 | if (!len) | 604 | if (!len) |
503 | return NULL; | 605 | return NULL; |
504 | 606 | ||
505 | mids = chan->private; | ||
506 | WARN_ON(!mids); | ||
507 | |||
508 | midc = to_intel_mid_dma_chan(chan); | 607 | midc = to_intel_mid_dma_chan(chan); |
509 | WARN_ON(!midc); | 608 | BUG_ON(!midc); |
609 | |||
610 | mids = midc->mid_slave; | ||
611 | BUG_ON(!mids); | ||
510 | 612 | ||
511 | pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", | 613 | pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", |
512 | midc->dma->pci_id, midc->ch_id, len); | 614 | midc->dma->pci_id, midc->ch_id, len); |
513 | pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", | 615 | pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", |
514 | mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); | 616 | mids->cfg_mode, mids->dma_slave.direction, |
617 | mids->hs_mode, mids->dma_slave.src_addr_width); | ||
515 | 618 | ||
516 | /*calculate CFG_LO*/ | 619 | /*calculate CFG_LO*/ |
517 | if (mids->hs_mode == LNW_DMA_SW_HS) { | 620 | if (mids->hs_mode == LNW_DMA_SW_HS) { |
@@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
530 | if (midc->dma->pimr_mask) { | 633 | if (midc->dma->pimr_mask) { |
531 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ | 634 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ |
532 | cfg_hi.cfgx.fifo_mode = 1; | 635 | cfg_hi.cfgx.fifo_mode = 1; |
533 | if (mids->dirn == DMA_TO_DEVICE) { | 636 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { |
534 | cfg_hi.cfgx.src_per = 0; | 637 | cfg_hi.cfgx.src_per = 0; |
535 | if (mids->device_instance == 0) | 638 | if (mids->device_instance == 0) |
536 | cfg_hi.cfgx.dst_per = 3; | 639 | cfg_hi.cfgx.dst_per = 3; |
537 | if (mids->device_instance == 1) | 640 | if (mids->device_instance == 1) |
538 | cfg_hi.cfgx.dst_per = 1; | 641 | cfg_hi.cfgx.dst_per = 1; |
539 | } else if (mids->dirn == DMA_FROM_DEVICE) { | 642 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { |
540 | if (mids->device_instance == 0) | 643 | if (mids->device_instance == 0) |
541 | cfg_hi.cfgx.src_per = 2; | 644 | cfg_hi.cfgx.src_per = 2; |
542 | if (mids->device_instance == 1) | 645 | if (mids->device_instance == 1) |
@@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
552 | 655 | ||
553 | /*calculate CTL_HI*/ | 656 | /*calculate CTL_HI*/ |
554 | ctl_hi.ctlx.reser = 0; | 657 | ctl_hi.ctlx.reser = 0; |
555 | width = mids->src_width; | 658 | ctl_hi.ctlx.done = 0; |
659 | width = mids->dma_slave.src_addr_width; | ||
556 | 660 | ||
557 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); | 661 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); |
558 | pr_debug("MDMA:calc len %d for block size %d\n", | 662 | pr_debug("MDMA:calc len %d for block size %d\n", |
@@ -560,21 +664,30 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
560 | /*calculate CTL_LO*/ | 664 | /*calculate CTL_LO*/ |
561 | ctl_lo.ctl_lo = 0; | 665 | ctl_lo.ctl_lo = 0; |
562 | ctl_lo.ctlx.int_en = 1; | 666 | ctl_lo.ctlx.int_en = 1; |
563 | ctl_lo.ctlx.dst_tr_width = mids->dst_width; | 667 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; |
564 | ctl_lo.ctlx.src_tr_width = mids->src_width; | 668 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; |
565 | ctl_lo.ctlx.dst_msize = mids->src_msize; | 669 | |
566 | ctl_lo.ctlx.src_msize = mids->dst_msize; | 670 | /* |
671 | * Here we need some translation from "enum dma_slave_buswidth" | ||
672 | * to the format for our dma controller | ||
673 | * standard intel_mid_dmac's format | ||
674 | * 1 Byte 0b000 | ||
675 | * 2 Bytes 0b001 | ||
676 | * 4 Bytes 0b010 | ||
677 | */ | ||
678 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; | ||
679 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; | ||
567 | 680 | ||
568 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | 681 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { |
569 | ctl_lo.ctlx.tt_fc = 0; | 682 | ctl_lo.ctlx.tt_fc = 0; |
570 | ctl_lo.ctlx.sinc = 0; | 683 | ctl_lo.ctlx.sinc = 0; |
571 | ctl_lo.ctlx.dinc = 0; | 684 | ctl_lo.ctlx.dinc = 0; |
572 | } else { | 685 | } else { |
573 | if (mids->dirn == DMA_TO_DEVICE) { | 686 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { |
574 | ctl_lo.ctlx.sinc = 0; | 687 | ctl_lo.ctlx.sinc = 0; |
575 | ctl_lo.ctlx.dinc = 2; | 688 | ctl_lo.ctlx.dinc = 2; |
576 | ctl_lo.ctlx.tt_fc = 1; | 689 | ctl_lo.ctlx.tt_fc = 1; |
577 | } else if (mids->dirn == DMA_FROM_DEVICE) { | 690 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { |
578 | ctl_lo.ctlx.sinc = 2; | 691 | ctl_lo.ctlx.sinc = 2; |
579 | ctl_lo.ctlx.dinc = 0; | 692 | ctl_lo.ctlx.dinc = 0; |
580 | ctl_lo.ctlx.tt_fc = 2; | 693 | ctl_lo.ctlx.tt_fc = 2; |
@@ -597,7 +710,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
597 | desc->ctl_lo = ctl_lo.ctl_lo; | 710 | desc->ctl_lo = ctl_lo.ctl_lo; |
598 | desc->ctl_hi = ctl_hi.ctl_hi; | 711 | desc->ctl_hi = ctl_hi.ctl_hi; |
599 | desc->width = width; | 712 | desc->width = width; |
600 | desc->dirn = mids->dirn; | 713 | desc->dirn = mids->dma_slave.direction; |
714 | desc->lli_phys = 0; | ||
715 | desc->lli = NULL; | ||
716 | desc->lli_pool = NULL; | ||
601 | return &desc->txd; | 717 | return &desc->txd; |
602 | 718 | ||
603 | err_desc_get: | 719 | err_desc_get: |
@@ -605,6 +721,96 @@ err_desc_get: | |||
605 | midc_desc_put(midc, desc); | 721 | midc_desc_put(midc, desc); |
606 | return NULL; | 722 | return NULL; |
607 | } | 723 | } |
724 | /** | ||
725 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | ||
726 | * @chan: chan for DMA transfer | ||
727 | * @sgl: scatter gather list | ||
728 | * @sg_len: length of sg txn | ||
729 | * @direction: DMA transfer dirtn | ||
730 | * @flags: DMA flags | ||
731 | * | ||
732 | * Prepares LLI based periphral transfer | ||
733 | */ | ||
734 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | ||
735 | struct dma_chan *chan, struct scatterlist *sgl, | ||
736 | unsigned int sg_len, enum dma_data_direction direction, | ||
737 | unsigned long flags) | ||
738 | { | ||
739 | struct intel_mid_dma_chan *midc = NULL; | ||
740 | struct intel_mid_dma_slave *mids = NULL; | ||
741 | struct intel_mid_dma_desc *desc = NULL; | ||
742 | struct dma_async_tx_descriptor *txd = NULL; | ||
743 | union intel_mid_dma_ctl_lo ctl_lo; | ||
744 | |||
745 | pr_debug("MDMA: Prep for slave SG\n"); | ||
746 | |||
747 | if (!sg_len) { | ||
748 | pr_err("MDMA: Invalid SG length\n"); | ||
749 | return NULL; | ||
750 | } | ||
751 | midc = to_intel_mid_dma_chan(chan); | ||
752 | BUG_ON(!midc); | ||
753 | |||
754 | mids = midc->mid_slave; | ||
755 | BUG_ON(!mids); | ||
756 | |||
757 | if (!midc->dma->pimr_mask) { | ||
758 | /* We can still handle sg list with only one item */ | ||
759 | if (sg_len == 1) { | ||
760 | txd = intel_mid_dma_prep_memcpy(chan, | ||
761 | mids->dma_slave.dst_addr, | ||
762 | mids->dma_slave.src_addr, | ||
763 | sgl->length, | ||
764 | flags); | ||
765 | return txd; | ||
766 | } else { | ||
767 | pr_warn("MDMA: SG list is not supported by this controller\n"); | ||
768 | return NULL; | ||
769 | } | ||
770 | } | ||
771 | |||
772 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | ||
773 | sg_len, direction, flags); | ||
774 | |||
775 | txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); | ||
776 | if (NULL == txd) { | ||
777 | pr_err("MDMA: Prep memcpy failed\n"); | ||
778 | return NULL; | ||
779 | } | ||
780 | |||
781 | desc = to_intel_mid_dma_desc(txd); | ||
782 | desc->dirn = direction; | ||
783 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
784 | ctl_lo.ctlx.llp_dst_en = 1; | ||
785 | ctl_lo.ctlx.llp_src_en = 1; | ||
786 | desc->ctl_lo = ctl_lo.ctl_lo; | ||
787 | desc->lli_length = sg_len; | ||
788 | desc->current_lli = 0; | ||
789 | /* DMA coherent memory pool for LLI descriptors*/ | ||
790 | desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", | ||
791 | midc->dma->pdev, | ||
792 | (sizeof(struct intel_mid_dma_lli)*sg_len), | ||
793 | 32, 0); | ||
794 | if (NULL == desc->lli_pool) { | ||
795 | pr_err("MID_DMA:LLI pool create failed\n"); | ||
796 | return NULL; | ||
797 | } | ||
798 | |||
799 | desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); | ||
800 | if (!desc->lli) { | ||
801 | pr_err("MID_DMA: LLI alloc failed\n"); | ||
802 | pci_pool_destroy(desc->lli_pool); | ||
803 | return NULL; | ||
804 | } | ||
805 | |||
806 | midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); | ||
807 | if (flags & DMA_PREP_INTERRUPT) { | ||
808 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
809 | midc->dma_base + MASK_BLOCK); | ||
810 | pr_debug("MDMA:Enabled Block interrupt\n"); | ||
811 | } | ||
812 | return &desc->txd; | ||
813 | } | ||
608 | 814 | ||
609 | /** | 815 | /** |
610 | * intel_mid_dma_free_chan_resources - Frees dma resources | 816 | * intel_mid_dma_free_chan_resources - Frees dma resources |
@@ -618,11 +824,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
618 | struct middma_device *mid = to_middma_device(chan->device); | 824 | struct middma_device *mid = to_middma_device(chan->device); |
619 | struct intel_mid_dma_desc *desc, *_desc; | 825 | struct intel_mid_dma_desc *desc, *_desc; |
620 | 826 | ||
621 | if (true == midc->in_use) { | 827 | if (true == midc->busy) { |
622 | /*trying to free ch in use!!!!!*/ | 828 | /*trying to free ch in use!!!!!*/ |
623 | pr_err("ERR_MDMA: trying to free ch in use\n"); | 829 | pr_err("ERR_MDMA: trying to free ch in use\n"); |
624 | } | 830 | } |
625 | 831 | pm_runtime_put(&mid->pdev->dev); | |
626 | spin_lock_bh(&midc->lock); | 832 | spin_lock_bh(&midc->lock); |
627 | midc->descs_allocated = 0; | 833 | midc->descs_allocated = 0; |
628 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 834 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
@@ -639,6 +845,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
639 | } | 845 | } |
640 | spin_unlock_bh(&midc->lock); | 846 | spin_unlock_bh(&midc->lock); |
641 | midc->in_use = false; | 847 | midc->in_use = false; |
848 | midc->busy = false; | ||
642 | /* Disable CH interrupts */ | 849 | /* Disable CH interrupts */ |
643 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | 850 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); |
644 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | 851 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); |
@@ -659,11 +866,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
659 | dma_addr_t phys; | 866 | dma_addr_t phys; |
660 | int i = 0; | 867 | int i = 0; |
661 | 868 | ||
869 | pm_runtime_get_sync(&mid->pdev->dev); | ||
870 | |||
871 | if (mid->state == SUSPENDED) { | ||
872 | if (dma_resume(mid->pdev)) { | ||
873 | pr_err("ERR_MDMA: resume failed"); | ||
874 | return -EFAULT; | ||
875 | } | ||
876 | } | ||
662 | 877 | ||
663 | /* ASSERT: channel is idle */ | 878 | /* ASSERT: channel is idle */ |
664 | if (test_ch_en(mid->dma_base, midc->ch_id)) { | 879 | if (test_ch_en(mid->dma_base, midc->ch_id)) { |
665 | /*ch is not idle*/ | 880 | /*ch is not idle*/ |
666 | pr_err("ERR_MDMA: ch not idle\n"); | 881 | pr_err("ERR_MDMA: ch not idle\n"); |
882 | pm_runtime_put(&mid->pdev->dev); | ||
667 | return -EIO; | 883 | return -EIO; |
668 | } | 884 | } |
669 | midc->completed = chan->cookie = 1; | 885 | midc->completed = chan->cookie = 1; |
@@ -674,6 +890,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
674 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); | 890 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); |
675 | if (!desc) { | 891 | if (!desc) { |
676 | pr_err("ERR_MDMA: desc failed\n"); | 892 | pr_err("ERR_MDMA: desc failed\n"); |
893 | pm_runtime_put(&mid->pdev->dev); | ||
677 | return -ENOMEM; | 894 | return -ENOMEM; |
678 | /*check*/ | 895 | /*check*/ |
679 | } | 896 | } |
@@ -686,15 +903,16 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
686 | list_add_tail(&desc->desc_node, &midc->free_list); | 903 | list_add_tail(&desc->desc_node, &midc->free_list); |
687 | } | 904 | } |
688 | spin_unlock_bh(&midc->lock); | 905 | spin_unlock_bh(&midc->lock); |
689 | midc->in_use = false; | 906 | midc->in_use = true; |
907 | midc->busy = false; | ||
690 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); | 908 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); |
691 | return i; | 909 | return i; |
692 | } | 910 | } |
693 | 911 | ||
694 | /** | 912 | /** |
695 | * midc_handle_error - Handle DMA txn error | 913 | * midc_handle_error - Handle DMA txn error |
696 | * @mid: controller where error occured | 914 | * @mid: controller where error occurred |
697 | * @midc: chan where error occured | 915 | * @midc: chan where error occurred |
698 | * | 916 | * |
699 | * Scan the descriptor for error | 917 | * Scan the descriptor for error |
700 | */ | 918 | */ |
@@ -715,7 +933,7 @@ static void dma_tasklet(unsigned long data) | |||
715 | { | 933 | { |
716 | struct middma_device *mid = NULL; | 934 | struct middma_device *mid = NULL; |
717 | struct intel_mid_dma_chan *midc = NULL; | 935 | struct intel_mid_dma_chan *midc = NULL; |
718 | u32 status; | 936 | u32 status, raw_tfr, raw_block; |
719 | int i; | 937 | int i; |
720 | 938 | ||
721 | mid = (struct middma_device *)data; | 939 | mid = (struct middma_device *)data; |
@@ -724,8 +942,9 @@ static void dma_tasklet(unsigned long data) | |||
724 | return; | 942 | return; |
725 | } | 943 | } |
726 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); | 944 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); |
727 | status = ioread32(mid->dma_base + RAW_TFR); | 945 | raw_tfr = ioread32(mid->dma_base + RAW_TFR); |
728 | pr_debug("MDMA:RAW_TFR %x\n", status); | 946 | raw_block = ioread32(mid->dma_base + RAW_BLOCK); |
947 | status = raw_tfr | raw_block; | ||
729 | status &= mid->intr_mask; | 948 | status &= mid->intr_mask; |
730 | while (status) { | 949 | while (status) { |
731 | /*txn interrupt*/ | 950 | /*txn interrupt*/ |
@@ -741,15 +960,23 @@ static void dma_tasklet(unsigned long data) | |||
741 | } | 960 | } |
742 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | 961 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", |
743 | status, midc->ch_id, i); | 962 | status, midc->ch_id, i); |
963 | midc->raw_tfr = raw_tfr; | ||
964 | midc->raw_block = raw_block; | ||
965 | spin_lock_bh(&midc->lock); | ||
744 | /*clearing this interrupts first*/ | 966 | /*clearing this interrupts first*/ |
745 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); | 967 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); |
746 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); | 968 | if (raw_block) { |
747 | 969 | iowrite32((1 << midc->ch_id), | |
748 | spin_lock_bh(&midc->lock); | 970 | mid->dma_base + CLEAR_BLOCK); |
971 | } | ||
749 | midc_scan_descriptors(mid, midc); | 972 | midc_scan_descriptors(mid, midc); |
750 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); | 973 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); |
751 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | 974 | iowrite32(UNMASK_INTR_REG(midc->ch_id), |
752 | mid->dma_base + MASK_TFR); | 975 | mid->dma_base + MASK_TFR); |
976 | if (raw_block) { | ||
977 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
978 | mid->dma_base + MASK_BLOCK); | ||
979 | } | ||
753 | spin_unlock_bh(&midc->lock); | 980 | spin_unlock_bh(&midc->lock); |
754 | } | 981 | } |
755 | 982 | ||
@@ -804,29 +1031,28 @@ static void dma_tasklet2(unsigned long data) | |||
804 | static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | 1031 | static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) |
805 | { | 1032 | { |
806 | struct middma_device *mid = data; | 1033 | struct middma_device *mid = data; |
807 | u32 status; | 1034 | u32 tfr_status, err_status; |
808 | int call_tasklet = 0; | 1035 | int call_tasklet = 0; |
809 | 1036 | ||
1037 | tfr_status = ioread32(mid->dma_base + RAW_TFR); | ||
1038 | err_status = ioread32(mid->dma_base + RAW_ERR); | ||
1039 | if (!tfr_status && !err_status) | ||
1040 | return IRQ_NONE; | ||
1041 | |||
810 | /*DMA Interrupt*/ | 1042 | /*DMA Interrupt*/ |
811 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); | 1043 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); |
812 | if (!mid) { | 1044 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); |
813 | pr_err("ERR_MDMA:null pointer mid\n"); | 1045 | tfr_status &= mid->intr_mask; |
814 | return -EINVAL; | 1046 | if (tfr_status) { |
815 | } | ||
816 | |||
817 | status = ioread32(mid->dma_base + RAW_TFR); | ||
818 | pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask); | ||
819 | status &= mid->intr_mask; | ||
820 | if (status) { | ||
821 | /*need to disable intr*/ | 1047 | /*need to disable intr*/ |
822 | iowrite32((status << 8), mid->dma_base + MASK_TFR); | 1048 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); |
823 | pr_debug("MDMA: Calling tasklet %x\n", status); | 1049 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); |
1050 | pr_debug("MDMA: Calling tasklet %x\n", tfr_status); | ||
824 | call_tasklet = 1; | 1051 | call_tasklet = 1; |
825 | } | 1052 | } |
826 | status = ioread32(mid->dma_base + RAW_ERR); | 1053 | err_status &= mid->intr_mask; |
827 | status &= mid->intr_mask; | 1054 | if (err_status) { |
828 | if (status) { | 1055 | iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); |
829 | iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR); | ||
830 | call_tasklet = 1; | 1056 | call_tasklet = 1; |
831 | } | 1057 | } |
832 | if (call_tasklet) | 1058 | if (call_tasklet) |
@@ -849,14 +1075,13 @@ static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) | |||
849 | * mid_setup_dma - Setup the DMA controller | 1075 | * mid_setup_dma - Setup the DMA controller |
850 | * @pdev: Controller PCI device structure | 1076 | * @pdev: Controller PCI device structure |
851 | * | 1077 | * |
852 | * Initilize the DMA controller, channels, registers with DMA engine, | 1078 | * Initialize the DMA controller, channels, registers with DMA engine, |
853 | * ISR. Initilize DMA controller channels. | 1079 | * ISR. Initialize DMA controller channels. |
854 | */ | 1080 | */ |
855 | static int mid_setup_dma(struct pci_dev *pdev) | 1081 | static int mid_setup_dma(struct pci_dev *pdev) |
856 | { | 1082 | { |
857 | struct middma_device *dma = pci_get_drvdata(pdev); | 1083 | struct middma_device *dma = pci_get_drvdata(pdev); |
858 | int err, i; | 1084 | int err, i; |
859 | unsigned int irq_level; | ||
860 | 1085 | ||
861 | /* DMA coherent memory pool for DMA descriptor allocations */ | 1086 | /* DMA coherent memory pool for DMA descriptor allocations */ |
862 | dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, | 1087 | dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, |
@@ -865,7 +1090,6 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
865 | if (NULL == dma->dma_pool) { | 1090 | if (NULL == dma->dma_pool) { |
866 | pr_err("ERR_MDMA:pci_pool_create failed\n"); | 1091 | pr_err("ERR_MDMA:pci_pool_create failed\n"); |
867 | err = -ENOMEM; | 1092 | err = -ENOMEM; |
868 | kfree(dma); | ||
869 | goto err_dma_pool; | 1093 | goto err_dma_pool; |
870 | } | 1094 | } |
871 | 1095 | ||
@@ -875,7 +1099,7 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
875 | dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, | 1099 | dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, |
876 | LNW_PERIPHRAL_MASK_SIZE); | 1100 | LNW_PERIPHRAL_MASK_SIZE); |
877 | if (dma->mask_reg == NULL) { | 1101 | if (dma->mask_reg == NULL) { |
878 | pr_err("ERR_MDMA:Cant map periphral intr space !!\n"); | 1102 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); |
879 | return -ENOMEM; | 1103 | return -ENOMEM; |
880 | } | 1104 | } |
881 | } else | 1105 | } else |
@@ -884,6 +1108,7 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
884 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); | 1108 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); |
885 | /*init CH structures*/ | 1109 | /*init CH structures*/ |
886 | dma->intr_mask = 0; | 1110 | dma->intr_mask = 0; |
1111 | dma->state = RUNNING; | ||
887 | for (i = 0; i < dma->max_chan; i++) { | 1112 | for (i = 0; i < dma->max_chan; i++) { |
888 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | 1113 | struct intel_mid_dma_chan *midch = &dma->ch[i]; |
889 | 1114 | ||
@@ -943,7 +1168,6 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
943 | 1168 | ||
944 | /*register irq */ | 1169 | /*register irq */ |
945 | if (dma->pimr_mask) { | 1170 | if (dma->pimr_mask) { |
946 | irq_level = IRQF_SHARED; | ||
947 | pr_debug("MDMA:Requesting irq shared for DMAC1\n"); | 1171 | pr_debug("MDMA:Requesting irq shared for DMAC1\n"); |
948 | err = request_irq(pdev->irq, intel_mid_dma_interrupt1, | 1172 | err = request_irq(pdev->irq, intel_mid_dma_interrupt1, |
949 | IRQF_SHARED, "INTEL_MID_DMAC1", dma); | 1173 | IRQF_SHARED, "INTEL_MID_DMAC1", dma); |
@@ -951,10 +1175,9 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
951 | goto err_irq; | 1175 | goto err_irq; |
952 | } else { | 1176 | } else { |
953 | dma->intr_mask = 0x03; | 1177 | dma->intr_mask = 0x03; |
954 | irq_level = 0; | ||
955 | pr_debug("MDMA:Requesting irq for DMAC2\n"); | 1178 | pr_debug("MDMA:Requesting irq for DMAC2\n"); |
956 | err = request_irq(pdev->irq, intel_mid_dma_interrupt2, | 1179 | err = request_irq(pdev->irq, intel_mid_dma_interrupt2, |
957 | 0, "INTEL_MID_DMAC2", dma); | 1180 | IRQF_SHARED, "INTEL_MID_DMAC2", dma); |
958 | if (0 != err) | 1181 | if (0 != err) |
959 | goto err_irq; | 1182 | goto err_irq; |
960 | } | 1183 | } |
@@ -977,7 +1200,6 @@ err_engine: | |||
977 | free_irq(pdev->irq, dma); | 1200 | free_irq(pdev->irq, dma); |
978 | err_irq: | 1201 | err_irq: |
979 | pci_pool_destroy(dma->dma_pool); | 1202 | pci_pool_destroy(dma->dma_pool); |
980 | kfree(dma); | ||
981 | err_dma_pool: | 1203 | err_dma_pool: |
982 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | 1204 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); |
983 | return err; | 1205 | return err; |
@@ -1010,7 +1232,7 @@ static void middma_shutdown(struct pci_dev *pdev) | |||
1010 | * @pdev: Controller PCI device structure | 1232 | * @pdev: Controller PCI device structure |
1011 | * @id: pci device id structure | 1233 | * @id: pci device id structure |
1012 | * | 1234 | * |
1013 | * Initilize the PCI device, map BARs, query driver data. | 1235 | * Initialize the PCI device, map BARs, query driver data. |
1014 | * Call setup_dma to complete contoller and chan initilzation | 1236 | * Call setup_dma to complete contoller and chan initilzation |
1015 | */ | 1237 | */ |
1016 | static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, | 1238 | static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, |
@@ -1070,6 +1292,8 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, | |||
1070 | if (err) | 1292 | if (err) |
1071 | goto err_dma; | 1293 | goto err_dma; |
1072 | 1294 | ||
1295 | pm_runtime_put_noidle(&pdev->dev); | ||
1296 | pm_runtime_allow(&pdev->dev); | ||
1073 | return 0; | 1297 | return 0; |
1074 | 1298 | ||
1075 | err_dma: | 1299 | err_dma: |
@@ -1097,6 +1321,9 @@ err_enable_device: | |||
1097 | static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) | 1321 | static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) |
1098 | { | 1322 | { |
1099 | struct middma_device *device = pci_get_drvdata(pdev); | 1323 | struct middma_device *device = pci_get_drvdata(pdev); |
1324 | |||
1325 | pm_runtime_get_noresume(&pdev->dev); | ||
1326 | pm_runtime_forbid(&pdev->dev); | ||
1100 | middma_shutdown(pdev); | 1327 | middma_shutdown(pdev); |
1101 | pci_dev_put(pdev); | 1328 | pci_dev_put(pdev); |
1102 | kfree(device); | 1329 | kfree(device); |
@@ -1104,6 +1331,92 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) | |||
1104 | pci_disable_device(pdev); | 1331 | pci_disable_device(pdev); |
1105 | } | 1332 | } |
1106 | 1333 | ||
1334 | /* Power Management */ | ||
1335 | /* | ||
1336 | * dma_suspend - PCI suspend function | ||
1337 | * | ||
1338 | * @pci: PCI device structure | ||
1339 | * @state: PM message | ||
1340 | * | ||
1341 | * This function is called by OS when a power event occurs | ||
1342 | */ | ||
1343 | int dma_suspend(struct pci_dev *pci, pm_message_t state) | ||
1344 | { | ||
1345 | int i; | ||
1346 | struct middma_device *device = pci_get_drvdata(pci); | ||
1347 | pr_debug("MDMA: dma_suspend called\n"); | ||
1348 | |||
1349 | for (i = 0; i < device->max_chan; i++) { | ||
1350 | if (device->ch[i].in_use) | ||
1351 | return -EAGAIN; | ||
1352 | } | ||
1353 | device->state = SUSPENDED; | ||
1354 | pci_set_drvdata(pci, device); | ||
1355 | pci_save_state(pci); | ||
1356 | pci_disable_device(pci); | ||
1357 | pci_set_power_state(pci, PCI_D3hot); | ||
1358 | return 0; | ||
1359 | } | ||
1360 | |||
1361 | /** | ||
1362 | * dma_resume - PCI resume function | ||
1363 | * | ||
1364 | * @pci: PCI device structure | ||
1365 | * | ||
1366 | * This function is called by OS when a power event occurs | ||
1367 | */ | ||
1368 | int dma_resume(struct pci_dev *pci) | ||
1369 | { | ||
1370 | int ret; | ||
1371 | struct middma_device *device = pci_get_drvdata(pci); | ||
1372 | |||
1373 | pr_debug("MDMA: dma_resume called\n"); | ||
1374 | pci_set_power_state(pci, PCI_D0); | ||
1375 | pci_restore_state(pci); | ||
1376 | ret = pci_enable_device(pci); | ||
1377 | if (ret) { | ||
1378 | pr_err("MDMA: device can't be enabled for %x\n", pci->device); | ||
1379 | return ret; | ||
1380 | } | ||
1381 | device->state = RUNNING; | ||
1382 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
1383 | pci_set_drvdata(pci, device); | ||
1384 | return 0; | ||
1385 | } | ||
1386 | |||
1387 | static int dma_runtime_suspend(struct device *dev) | ||
1388 | { | ||
1389 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1390 | struct middma_device *device = pci_get_drvdata(pci_dev); | ||
1391 | |||
1392 | device->state = SUSPENDED; | ||
1393 | return 0; | ||
1394 | } | ||
1395 | |||
1396 | static int dma_runtime_resume(struct device *dev) | ||
1397 | { | ||
1398 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
1399 | struct middma_device *device = pci_get_drvdata(pci_dev); | ||
1400 | |||
1401 | device->state = RUNNING; | ||
1402 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
1403 | return 0; | ||
1404 | } | ||
1405 | |||
1406 | static int dma_runtime_idle(struct device *dev) | ||
1407 | { | ||
1408 | struct pci_dev *pdev = to_pci_dev(dev); | ||
1409 | struct middma_device *device = pci_get_drvdata(pdev); | ||
1410 | int i; | ||
1411 | |||
1412 | for (i = 0; i < device->max_chan; i++) { | ||
1413 | if (device->ch[i].in_use) | ||
1414 | return -EAGAIN; | ||
1415 | } | ||
1416 | |||
1417 | return pm_schedule_suspend(dev, 0); | ||
1418 | } | ||
1419 | |||
1107 | /****************************************************************************** | 1420 | /****************************************************************************** |
1108 | * PCI stuff | 1421 | * PCI stuff |
1109 | */ | 1422 | */ |
@@ -1116,24 +1429,37 @@ static struct pci_device_id intel_mid_dma_ids[] = { | |||
1116 | }; | 1429 | }; |
1117 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); | 1430 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); |
1118 | 1431 | ||
1119 | static struct pci_driver intel_mid_dma_pci = { | 1432 | static const struct dev_pm_ops intel_mid_dma_pm = { |
1433 | .runtime_suspend = dma_runtime_suspend, | ||
1434 | .runtime_resume = dma_runtime_resume, | ||
1435 | .runtime_idle = dma_runtime_idle, | ||
1436 | }; | ||
1437 | |||
1438 | static struct pci_driver intel_mid_dma_pci_driver = { | ||
1120 | .name = "Intel MID DMA", | 1439 | .name = "Intel MID DMA", |
1121 | .id_table = intel_mid_dma_ids, | 1440 | .id_table = intel_mid_dma_ids, |
1122 | .probe = intel_mid_dma_probe, | 1441 | .probe = intel_mid_dma_probe, |
1123 | .remove = __devexit_p(intel_mid_dma_remove), | 1442 | .remove = __devexit_p(intel_mid_dma_remove), |
1443 | #ifdef CONFIG_PM | ||
1444 | .suspend = dma_suspend, | ||
1445 | .resume = dma_resume, | ||
1446 | .driver = { | ||
1447 | .pm = &intel_mid_dma_pm, | ||
1448 | }, | ||
1449 | #endif | ||
1124 | }; | 1450 | }; |
1125 | 1451 | ||
1126 | static int __init intel_mid_dma_init(void) | 1452 | static int __init intel_mid_dma_init(void) |
1127 | { | 1453 | { |
1128 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", | 1454 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", |
1129 | INTEL_MID_DMA_DRIVER_VERSION); | 1455 | INTEL_MID_DMA_DRIVER_VERSION); |
1130 | return pci_register_driver(&intel_mid_dma_pci); | 1456 | return pci_register_driver(&intel_mid_dma_pci_driver); |
1131 | } | 1457 | } |
1132 | fs_initcall(intel_mid_dma_init); | 1458 | fs_initcall(intel_mid_dma_init); |
1133 | 1459 | ||
1134 | static void __exit intel_mid_dma_exit(void) | 1460 | static void __exit intel_mid_dma_exit(void) |
1135 | { | 1461 | { |
1136 | pci_unregister_driver(&intel_mid_dma_pci); | 1462 | pci_unregister_driver(&intel_mid_dma_pci_driver); |
1137 | } | 1463 | } |
1138 | module_exit(intel_mid_dma_exit); | 1464 | module_exit(intel_mid_dma_exit); |
1139 | 1465 | ||
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index d81aa658ab09..aea5ee88ce03 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h | |||
@@ -29,11 +29,12 @@ | |||
29 | #include <linux/dmapool.h> | 29 | #include <linux/dmapool.h> |
30 | #include <linux/pci_ids.h> | 30 | #include <linux/pci_ids.h> |
31 | 31 | ||
32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.0.5" | 32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0" |
33 | 33 | ||
34 | #define REG_BIT0 0x00000001 | 34 | #define REG_BIT0 0x00000001 |
35 | #define REG_BIT8 0x00000100 | 35 | #define REG_BIT8 0x00000100 |
36 | 36 | #define INT_MASK_WE 0x8 | |
37 | #define CLEAR_DONE 0xFFFFEFFF | ||
37 | #define UNMASK_INTR_REG(chan_num) \ | 38 | #define UNMASK_INTR_REG(chan_num) \ |
38 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | 39 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) |
39 | #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) | 40 | #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) |
@@ -41,6 +42,9 @@ | |||
41 | #define ENABLE_CHANNEL(chan_num) \ | 42 | #define ENABLE_CHANNEL(chan_num) \ |
42 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | 43 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) |
43 | 44 | ||
45 | #define DISABLE_CHANNEL(chan_num) \ | ||
46 | (REG_BIT8 << chan_num) | ||
47 | |||
44 | #define DESCS_PER_CHANNEL 16 | 48 | #define DESCS_PER_CHANNEL 16 |
45 | /*DMA Registers*/ | 49 | /*DMA Registers*/ |
46 | /*registers associated with channel programming*/ | 50 | /*registers associated with channel programming*/ |
@@ -50,6 +54,7 @@ | |||
50 | /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ | 54 | /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ |
51 | #define SAR 0x00 /* Source Address Register*/ | 55 | #define SAR 0x00 /* Source Address Register*/ |
52 | #define DAR 0x08 /* Destination Address Register*/ | 56 | #define DAR 0x08 /* Destination Address Register*/ |
57 | #define LLP 0x10 /* Linked List Pointer Register*/ | ||
53 | #define CTL_LOW 0x18 /* Control Register*/ | 58 | #define CTL_LOW 0x18 /* Control Register*/ |
54 | #define CTL_HIGH 0x1C /* Control Register*/ | 59 | #define CTL_HIGH 0x1C /* Control Register*/ |
55 | #define CFG_LOW 0x40 /* Configuration Register Low*/ | 60 | #define CFG_LOW 0x40 /* Configuration Register Low*/ |
@@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo { | |||
112 | union intel_mid_dma_ctl_hi { | 117 | union intel_mid_dma_ctl_hi { |
113 | struct { | 118 | struct { |
114 | u32 block_ts:12; /*block transfer size*/ | 119 | u32 block_ts:12; /*block transfer size*/ |
115 | /*configured by DMAC*/ | 120 | u32 done:1; /*Done - updated by DMAC*/ |
116 | u32 reser:20; | 121 | u32 reser:19; /*configured by DMAC*/ |
117 | } ctlx; | 122 | } ctlx; |
118 | u32 ctl_hi; | 123 | u32 ctl_hi; |
119 | 124 | ||
@@ -152,6 +157,7 @@ union intel_mid_dma_cfg_hi { | |||
152 | u32 cfg_hi; | 157 | u32 cfg_hi; |
153 | }; | 158 | }; |
154 | 159 | ||
160 | |||
155 | /** | 161 | /** |
156 | * struct intel_mid_dma_chan - internal mid representation of a DMA channel | 162 | * struct intel_mid_dma_chan - internal mid representation of a DMA channel |
157 | * @chan: dma_chan strcture represetation for mid chan | 163 | * @chan: dma_chan strcture represetation for mid chan |
@@ -166,7 +172,10 @@ union intel_mid_dma_cfg_hi { | |||
166 | * @slave: dma slave struture | 172 | * @slave: dma slave struture |
167 | * @descs_allocated: total number of decsiptors allocated | 173 | * @descs_allocated: total number of decsiptors allocated |
168 | * @dma: dma device struture pointer | 174 | * @dma: dma device struture pointer |
175 | * @busy: bool representing if ch is busy (active txn) or not | ||
169 | * @in_use: bool representing if ch is in use or not | 176 | * @in_use: bool representing if ch is in use or not |
177 | * @raw_tfr: raw trf interrupt received | ||
178 | * @raw_block: raw block interrupt received | ||
170 | */ | 179 | */ |
171 | struct intel_mid_dma_chan { | 180 | struct intel_mid_dma_chan { |
172 | struct dma_chan chan; | 181 | struct dma_chan chan; |
@@ -178,10 +187,13 @@ struct intel_mid_dma_chan { | |||
178 | struct list_head active_list; | 187 | struct list_head active_list; |
179 | struct list_head queue; | 188 | struct list_head queue; |
180 | struct list_head free_list; | 189 | struct list_head free_list; |
181 | struct intel_mid_dma_slave *slave; | ||
182 | unsigned int descs_allocated; | 190 | unsigned int descs_allocated; |
183 | struct middma_device *dma; | 191 | struct middma_device *dma; |
192 | bool busy; | ||
184 | bool in_use; | 193 | bool in_use; |
194 | u32 raw_tfr; | ||
195 | u32 raw_block; | ||
196 | struct intel_mid_dma_slave *mid_slave; | ||
185 | }; | 197 | }; |
186 | 198 | ||
187 | static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | 199 | static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( |
@@ -190,6 +202,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | |||
190 | return container_of(chan, struct intel_mid_dma_chan, chan); | 202 | return container_of(chan, struct intel_mid_dma_chan, chan); |
191 | } | 203 | } |
192 | 204 | ||
205 | enum intel_mid_dma_state { | ||
206 | RUNNING = 0, | ||
207 | SUSPENDED, | ||
208 | }; | ||
193 | /** | 209 | /** |
194 | * struct middma_device - internal representation of a DMA device | 210 | * struct middma_device - internal representation of a DMA device |
195 | * @pdev: PCI device | 211 | * @pdev: PCI device |
@@ -205,6 +221,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | |||
205 | * @max_chan: max number of chs supported (from drv_data) | 221 | * @max_chan: max number of chs supported (from drv_data) |
206 | * @block_size: Block size of DMA transfer supported (from drv_data) | 222 | * @block_size: Block size of DMA transfer supported (from drv_data) |
207 | * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) | 223 | * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) |
224 | * @state: dma PM device state | ||
208 | */ | 225 | */ |
209 | struct middma_device { | 226 | struct middma_device { |
210 | struct pci_dev *pdev; | 227 | struct pci_dev *pdev; |
@@ -220,6 +237,7 @@ struct middma_device { | |||
220 | int max_chan; | 237 | int max_chan; |
221 | int block_size; | 238 | int block_size; |
222 | unsigned int pimr_mask; | 239 | unsigned int pimr_mask; |
240 | enum intel_mid_dma_state state; | ||
223 | }; | 241 | }; |
224 | 242 | ||
225 | static inline struct middma_device *to_middma_device(struct dma_device *common) | 243 | static inline struct middma_device *to_middma_device(struct dma_device *common) |
@@ -238,14 +256,27 @@ struct intel_mid_dma_desc { | |||
238 | u32 cfg_lo; | 256 | u32 cfg_lo; |
239 | u32 ctl_lo; | 257 | u32 ctl_lo; |
240 | u32 ctl_hi; | 258 | u32 ctl_hi; |
259 | struct pci_pool *lli_pool; | ||
260 | struct intel_mid_dma_lli *lli; | ||
261 | dma_addr_t lli_phys; | ||
262 | unsigned int lli_length; | ||
263 | unsigned int current_lli; | ||
241 | dma_addr_t next; | 264 | dma_addr_t next; |
242 | enum dma_data_direction dirn; | 265 | enum dma_data_direction dirn; |
243 | enum dma_status status; | 266 | enum dma_status status; |
244 | enum intel_mid_dma_width width; /*width of DMA txn*/ | 267 | enum dma_slave_buswidth width; /*width of DMA txn*/ |
245 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | 268 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ |
246 | 269 | ||
247 | }; | 270 | }; |
248 | 271 | ||
272 | struct intel_mid_dma_lli { | ||
273 | dma_addr_t sar; | ||
274 | dma_addr_t dar; | ||
275 | dma_addr_t llp; | ||
276 | u32 ctl_lo; | ||
277 | u32 ctl_hi; | ||
278 | } __attribute__ ((packed)); | ||
279 | |||
249 | static inline int test_ch_en(void __iomem *dma, u32 ch_no) | 280 | static inline int test_ch_en(void __iomem *dma, u32 ch_no) |
250 | { | 281 | { |
251 | u32 en_reg = ioread32(dma + DMA_CHAN_EN); | 282 | u32 en_reg = ioread32(dma + DMA_CHAN_EN); |
@@ -257,4 +288,14 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc | |||
257 | { | 288 | { |
258 | return container_of(txd, struct intel_mid_dma_desc, txd); | 289 | return container_of(txd, struct intel_mid_dma_desc, txd); |
259 | } | 290 | } |
291 | |||
292 | static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave | ||
293 | (struct dma_slave_config *slave) | ||
294 | { | ||
295 | return container_of(slave, struct intel_mid_dma_slave, dma_slave); | ||
296 | } | ||
297 | |||
298 | |||
299 | int dma_resume(struct pci_dev *pci); | ||
300 | |||
260 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ | 301 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ |
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 8997d3fb9051..0ff7270af25b 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 1 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
2 | ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o | 2 | ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index c9213ead4a26..a4d6cb0c0343 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/prefetch.h> | ||
37 | #include <linux/i7300_idle.h> | 38 | #include <linux/i7300_idle.h> |
38 | #include "dma.h" | 39 | #include "dma.h" |
39 | #include "registers.h" | 40 | #include "registers.h" |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index effd140fc042..5d65f8377971 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/dma-mapping.h> | 35 | #include <linux/dma-mapping.h> |
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/prefetch.h> | ||
37 | #include <linux/i7300_idle.h> | 38 | #include <linux/i7300_idle.h> |
38 | #include "dma.h" | 39 | #include "dma.h" |
39 | #include "dma_v2.h" | 40 | #include "dma_v2.h" |
@@ -507,6 +508,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
507 | struct ioat_ring_ent **ring; | 508 | struct ioat_ring_ent **ring; |
508 | u64 status; | 509 | u64 status; |
509 | int order; | 510 | int order; |
511 | int i = 0; | ||
510 | 512 | ||
511 | /* have we already been set up? */ | 513 | /* have we already been set up? */ |
512 | if (ioat->ring) | 514 | if (ioat->ring) |
@@ -547,8 +549,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
547 | ioat2_start_null_desc(ioat); | 549 | ioat2_start_null_desc(ioat); |
548 | 550 | ||
549 | /* check that we got off the ground */ | 551 | /* check that we got off the ground */ |
550 | udelay(5); | 552 | do { |
551 | status = ioat_chansts(chan); | 553 | udelay(1); |
554 | status = ioat_chansts(chan); | ||
555 | } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); | ||
556 | |||
552 | if (is_ioat_active(status) || is_ioat_idle(status)) { | 557 | if (is_ioat_active(status) || is_ioat_idle(status)) { |
553 | set_bit(IOAT_RUN, &chan->state); | 558 | set_bit(IOAT_RUN, &chan->state); |
554 | return 1 << ioat->alloc_order; | 559 | return 1 << ioat->alloc_order; |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d0f499098479..d845dc4b7103 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/gfp.h> | 60 | #include <linux/gfp.h> |
61 | #include <linux/dmaengine.h> | 61 | #include <linux/dmaengine.h> |
62 | #include <linux/dma-mapping.h> | 62 | #include <linux/dma-mapping.h> |
63 | #include <linux/prefetch.h> | ||
63 | #include "registers.h" | 64 | #include "registers.h" |
64 | #include "hw.h" | 65 | #include "hw.h" |
65 | #include "dma.h" | 66 | #include "dma.h" |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 161c452923b8..e03f811a83dd 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
619 | 619 | ||
620 | if (unlikely(!len)) | 620 | if (unlikely(!len)) |
621 | return NULL; | 621 | return NULL; |
622 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 622 | BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); |
623 | 623 | ||
624 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 624 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
625 | __func__, len); | 625 | __func__, len); |
@@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | |||
652 | 652 | ||
653 | if (unlikely(!len)) | 653 | if (unlikely(!len)) |
654 | return NULL; | 654 | return NULL; |
655 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 655 | BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); |
656 | 656 | ||
657 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 657 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
658 | __func__, len); | 658 | __func__, len); |
@@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
686 | 686 | ||
687 | if (unlikely(!len)) | 687 | if (unlikely(!len)) |
688 | return NULL; | 688 | return NULL; |
689 | BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT)); | 689 | BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); |
690 | 690 | ||
691 | dev_dbg(iop_chan->device->common.dev, | 691 | dev_dbg(iop_chan->device->common.dev, |
692 | "%s src_cnt: %d len: %u flags: %lx\n", | 692 | "%s src_cnt: %d len: %u flags: %lx\n", |
@@ -1261,7 +1261,7 @@ out: | |||
1261 | return err; | 1261 | return err; |
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | #ifdef CONFIG_MD_RAID6_PQ | 1264 | #ifdef CONFIG_RAID6_PQ |
1265 | static int __devinit | 1265 | static int __devinit |
1266 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | 1266 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) |
1267 | { | 1267 | { |
@@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1584 | 1584 | ||
1585 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && | 1585 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && |
1586 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { | 1586 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { |
1587 | #ifdef CONFIG_MD_RAID6_PQ | 1587 | #ifdef CONFIG_RAID6_PQ |
1588 | ret = iop_adma_pq_zero_sum_self_test(adev); | 1588 | ret = iop_adma_pq_zero_sum_self_test(adev); |
1589 | dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); | 1589 | dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); |
1590 | #else | 1590 | #else |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb26ee9773d6..c1a125e7d1df 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, | |||
1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); | 1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); |
1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); | 1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); |
1147 | 1147 | ||
1148 | /* | ||
1149 | * Problem (observed with channel DMAIC_7): after enabling the channel | ||
1150 | * and initialising buffers, there comes an interrupt with current still | ||
1151 | * pointing at buffer 0, whereas it should use buffer 0 first and only | ||
1152 | * generate an interrupt when it is done, then current should already | ||
1153 | * point to buffer 1. This spurious interrupt also comes on channel | ||
1154 | * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the | ||
1155 | * first interrupt, there comes the second with current correctly | ||
1156 | * pointing to buffer 1 this time. But sometimes this second interrupt | ||
1157 | * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling | ||
1158 | * the channel seems to prevent the channel from hanging, but it doesn't | ||
1159 | * prevent the spurious interrupt. This might also be unsafe. Think | ||
1160 | * about the IDMAC controller trying to switch to a buffer, when we | ||
1161 | * clear the ready bit, and re-enable it a moment later. | ||
1162 | */ | ||
1163 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); | ||
1164 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); | ||
1165 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); | ||
1166 | |||
1167 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); | ||
1168 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); | ||
1169 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); | ||
1170 | |||
1171 | spin_unlock_irqrestore(&ipu->lock, flags); | 1148 | spin_unlock_irqrestore(&ipu->lock, flags); |
1172 | 1149 | ||
1173 | return 0; | 1150 | return 0; |
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1246 | 1223 | ||
1247 | /* Other interrupts do not interfere with this channel */ | 1224 | /* Other interrupts do not interfere with this channel */ |
1248 | spin_lock(&ichan->lock); | 1225 | spin_lock(&ichan->lock); |
1249 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && | ||
1250 | ((curbuf >> chan_id) & 1) == ichan->active_buffer && | ||
1251 | !list_is_last(ichan->queue.next, &ichan->queue))) { | ||
1252 | int i = 100; | ||
1253 | |||
1254 | /* This doesn't help. See comment in ipu_disable_channel() */ | ||
1255 | while (--i) { | ||
1256 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); | ||
1257 | if (((curbuf >> chan_id) & 1) != ichan->active_buffer) | ||
1258 | break; | ||
1259 | cpu_relax(); | ||
1260 | } | ||
1261 | |||
1262 | if (!i) { | ||
1263 | spin_unlock(&ichan->lock); | ||
1264 | dev_dbg(dev, | ||
1265 | "IRQ on active buffer on channel %x, active " | ||
1266 | "%d, ready %x, %x, current %x!\n", chan_id, | ||
1267 | ichan->active_buffer, ready0, ready1, curbuf); | ||
1268 | return IRQ_NONE; | ||
1269 | } else | ||
1270 | dev_dbg(dev, | ||
1271 | "Buffer deactivated on channel %x, active " | ||
1272 | "%d, ready %x, %x, current %x, rest %d!\n", chan_id, | ||
1273 | ichan->active_buffer, ready0, ready1, curbuf, i); | ||
1274 | } | ||
1275 | |||
1276 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || | 1226 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || |
1277 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) | 1227 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) |
1278 | )) { | 1228 | )) { |
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index dd8ebc75b667..ab8a4eff072a 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c | |||
@@ -94,9 +94,9 @@ static struct ipu_irq_map *src2map(unsigned int src) | |||
94 | return NULL; | 94 | return NULL; |
95 | } | 95 | } |
96 | 96 | ||
97 | static void ipu_irq_unmask(unsigned int irq) | 97 | static void ipu_irq_unmask(struct irq_data *d) |
98 | { | 98 | { |
99 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 99 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
100 | struct ipu_irq_bank *bank; | 100 | struct ipu_irq_bank *bank; |
101 | uint32_t reg; | 101 | uint32_t reg; |
102 | unsigned long lock_flags; | 102 | unsigned long lock_flags; |
@@ -106,7 +106,7 @@ static void ipu_irq_unmask(unsigned int irq) | |||
106 | bank = map->bank; | 106 | bank = map->bank; |
107 | if (!bank) { | 107 | if (!bank) { |
108 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 108 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 109 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
110 | return; | 110 | return; |
111 | } | 111 | } |
112 | 112 | ||
@@ -117,9 +117,9 @@ static void ipu_irq_unmask(unsigned int irq) | |||
117 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 117 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void ipu_irq_mask(unsigned int irq) | 120 | static void ipu_irq_mask(struct irq_data *d) |
121 | { | 121 | { |
122 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 122 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
123 | struct ipu_irq_bank *bank; | 123 | struct ipu_irq_bank *bank; |
124 | uint32_t reg; | 124 | uint32_t reg; |
125 | unsigned long lock_flags; | 125 | unsigned long lock_flags; |
@@ -129,7 +129,7 @@ static void ipu_irq_mask(unsigned int irq) | |||
129 | bank = map->bank; | 129 | bank = map->bank; |
130 | if (!bank) { | 130 | if (!bank) { |
131 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 131 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 132 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
133 | return; | 133 | return; |
134 | } | 134 | } |
135 | 135 | ||
@@ -140,9 +140,9 @@ static void ipu_irq_mask(unsigned int irq) | |||
140 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 140 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void ipu_irq_ack(unsigned int irq) | 143 | static void ipu_irq_ack(struct irq_data *d) |
144 | { | 144 | { |
145 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 145 | struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); |
146 | struct ipu_irq_bank *bank; | 146 | struct ipu_irq_bank *bank; |
147 | unsigned long lock_flags; | 147 | unsigned long lock_flags; |
148 | 148 | ||
@@ -151,7 +151,7 @@ static void ipu_irq_ack(unsigned int irq) | |||
151 | bank = map->bank; | 151 | bank = map->bank; |
152 | if (!bank) { | 152 | if (!bank) { |
153 | spin_unlock_irqrestore(&bank_lock, lock_flags); | 153 | spin_unlock_irqrestore(&bank_lock, lock_flags); |
154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq); | 154 | pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); |
155 | return; | 155 | return; |
156 | } | 156 | } |
157 | 157 | ||
@@ -167,7 +167,7 @@ static void ipu_irq_ack(unsigned int irq) | |||
167 | */ | 167 | */ |
168 | bool ipu_irq_status(unsigned int irq) | 168 | bool ipu_irq_status(unsigned int irq) |
169 | { | 169 | { |
170 | struct ipu_irq_map *map = get_irq_chip_data(irq); | 170 | struct ipu_irq_map *map = irq_get_chip_data(irq); |
171 | struct ipu_irq_bank *bank; | 171 | struct ipu_irq_bank *bank; |
172 | unsigned long lock_flags; | 172 | unsigned long lock_flags; |
173 | bool ret; | 173 | bool ret; |
@@ -269,7 +269,7 @@ int ipu_irq_unmap(unsigned int source) | |||
269 | /* Chained IRQ handler for IPU error interrupt */ | 269 | /* Chained IRQ handler for IPU error interrupt */ |
270 | static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | 270 | static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) |
271 | { | 271 | { |
272 | struct ipu *ipu = get_irq_data(irq); | 272 | struct ipu *ipu = irq_get_handler_data(irq); |
273 | u32 status; | 273 | u32 status; |
274 | int i, line; | 274 | int i, line; |
275 | 275 | ||
@@ -310,7 +310,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc) | |||
310 | /* Chained IRQ handler for IPU function interrupt */ | 310 | /* Chained IRQ handler for IPU function interrupt */ |
311 | static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | 311 | static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) |
312 | { | 312 | { |
313 | struct ipu *ipu = get_irq_data(irq); | 313 | struct ipu *ipu = irq_desc_get_handler_data(desc); |
314 | u32 status; | 314 | u32 status; |
315 | int i, line; | 315 | int i, line; |
316 | 316 | ||
@@ -345,10 +345,10 @@ static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc) | |||
345 | } | 345 | } |
346 | 346 | ||
347 | static struct irq_chip ipu_irq_chip = { | 347 | static struct irq_chip ipu_irq_chip = { |
348 | .name = "ipu_irq", | 348 | .name = "ipu_irq", |
349 | .ack = ipu_irq_ack, | 349 | .irq_ack = ipu_irq_ack, |
350 | .mask = ipu_irq_mask, | 350 | .irq_mask = ipu_irq_mask, |
351 | .unmask = ipu_irq_unmask, | 351 | .irq_unmask = ipu_irq_unmask, |
352 | }; | 352 | }; |
353 | 353 | ||
354 | /* Install the IRQ handler */ | 354 | /* Install the IRQ handler */ |
@@ -366,26 +366,26 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) | |||
366 | int ret; | 366 | int ret; |
367 | 367 | ||
368 | irq = irq_base + i; | 368 | irq = irq_base + i; |
369 | ret = set_irq_chip(irq, &ipu_irq_chip); | 369 | ret = irq_set_chip(irq, &ipu_irq_chip); |
370 | if (ret < 0) | 370 | if (ret < 0) |
371 | return ret; | 371 | return ret; |
372 | ret = set_irq_chip_data(irq, irq_map + i); | 372 | ret = irq_set_chip_data(irq, irq_map + i); |
373 | if (ret < 0) | 373 | if (ret < 0) |
374 | return ret; | 374 | return ret; |
375 | irq_map[i].ipu = ipu; | 375 | irq_map[i].ipu = ipu; |
376 | irq_map[i].irq = irq; | 376 | irq_map[i].irq = irq; |
377 | irq_map[i].source = -EINVAL; | 377 | irq_map[i].source = -EINVAL; |
378 | set_irq_handler(irq, handle_level_irq); | 378 | irq_set_handler(irq, handle_level_irq); |
379 | #ifdef CONFIG_ARM | 379 | #ifdef CONFIG_ARM |
380 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 380 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
381 | #endif | 381 | #endif |
382 | } | 382 | } |
383 | 383 | ||
384 | set_irq_data(ipu->irq_fn, ipu); | 384 | irq_set_handler_data(ipu->irq_fn, ipu); |
385 | set_irq_chained_handler(ipu->irq_fn, ipu_irq_fn); | 385 | irq_set_chained_handler(ipu->irq_fn, ipu_irq_fn); |
386 | 386 | ||
387 | set_irq_data(ipu->irq_err, ipu); | 387 | irq_set_handler_data(ipu->irq_err, ipu); |
388 | set_irq_chained_handler(ipu->irq_err, ipu_irq_err); | 388 | irq_set_chained_handler(ipu->irq_err, ipu_irq_err); |
389 | 389 | ||
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
@@ -397,17 +397,17 @@ void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev) | |||
397 | 397 | ||
398 | irq_base = pdata->irq_base; | 398 | irq_base = pdata->irq_base; |
399 | 399 | ||
400 | set_irq_chained_handler(ipu->irq_fn, NULL); | 400 | irq_set_chained_handler(ipu->irq_fn, NULL); |
401 | set_irq_data(ipu->irq_fn, NULL); | 401 | irq_set_handler_data(ipu->irq_fn, NULL); |
402 | 402 | ||
403 | set_irq_chained_handler(ipu->irq_err, NULL); | 403 | irq_set_chained_handler(ipu->irq_err, NULL); |
404 | set_irq_data(ipu->irq_err, NULL); | 404 | irq_set_handler_data(ipu->irq_err, NULL); |
405 | 405 | ||
406 | for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) { | 406 | for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) { |
407 | #ifdef CONFIG_ARM | 407 | #ifdef CONFIG_ARM |
408 | set_irq_flags(irq, 0); | 408 | set_irq_flags(irq, 0); |
409 | #endif | 409 | #endif |
410 | set_irq_chip(irq, NULL); | 410 | irq_set_chip(irq, NULL); |
411 | set_irq_chip_data(irq, NULL); | 411 | irq_set_chip_data(irq, NULL); |
412 | } | 412 | } |
413 | } | 413 | } |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 4e9cbf300594..b9bae94f2015 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | 2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. |
3 | * Copyright (C) Semihalf 2009 | 3 | * Copyright (C) Semihalf 2009 |
4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | ||
4 | * | 5 | * |
5 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | 6 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description |
6 | * (defines, structures and comments) was taken from MPC5121 DMA driver | 7 | * (defines, structures and comments) was taken from MPC5121 DMA driver |
@@ -70,6 +71,8 @@ | |||
70 | #define MPC_DMA_DMAES_SBE (1 << 1) | 71 | #define MPC_DMA_DMAES_SBE (1 << 1) |
71 | #define MPC_DMA_DMAES_DBE (1 << 0) | 72 | #define MPC_DMA_DMAES_DBE (1 << 0) |
72 | 73 | ||
74 | #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6) | ||
75 | |||
73 | #define MPC_DMA_TSIZE_1 0x00 | 76 | #define MPC_DMA_TSIZE_1 0x00 |
74 | #define MPC_DMA_TSIZE_2 0x01 | 77 | #define MPC_DMA_TSIZE_2 0x01 |
75 | #define MPC_DMA_TSIZE_4 0x02 | 78 | #define MPC_DMA_TSIZE_4 0x02 |
@@ -104,7 +107,10 @@ struct __attribute__ ((__packed__)) mpc_dma_regs { | |||
104 | /* 0x30 */ | 107 | /* 0x30 */ |
105 | u32 dmahrsh; /* DMA hw request status high(ch63~32) */ | 108 | u32 dmahrsh; /* DMA hw request status high(ch63~32) */ |
106 | u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ | 109 | u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ |
107 | u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ | 110 | union { |
111 | u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ | ||
112 | u32 dmagpor; /* (General purpose register on MPC8308) */ | ||
113 | }; | ||
108 | u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ | 114 | u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ |
109 | /* 0x40 ~ 0xff */ | 115 | /* 0x40 ~ 0xff */ |
110 | u32 reserve0[48]; /* Reserved */ | 116 | u32 reserve0[48]; /* Reserved */ |
@@ -195,7 +201,9 @@ struct mpc_dma { | |||
195 | struct mpc_dma_regs __iomem *regs; | 201 | struct mpc_dma_regs __iomem *regs; |
196 | struct mpc_dma_tcd __iomem *tcd; | 202 | struct mpc_dma_tcd __iomem *tcd; |
197 | int irq; | 203 | int irq; |
204 | int irq2; | ||
198 | uint error_status; | 205 | uint error_status; |
206 | int is_mpc8308; | ||
199 | 207 | ||
200 | /* Lock for error_status field in this structure */ | 208 | /* Lock for error_status field in this structure */ |
201 | spinlock_t error_status_lock; | 209 | spinlock_t error_status_lock; |
@@ -252,11 +260,13 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan) | |||
252 | prev = mdesc; | 260 | prev = mdesc; |
253 | } | 261 | } |
254 | 262 | ||
255 | prev->tcd->start = 0; | ||
256 | prev->tcd->int_maj = 1; | 263 | prev->tcd->int_maj = 1; |
257 | 264 | ||
258 | /* Send first descriptor in chain into hardware */ | 265 | /* Send first descriptor in chain into hardware */ |
259 | memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); | 266 | memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); |
267 | |||
268 | if (first != prev) | ||
269 | mdma->tcd[cid].e_sg = 1; | ||
260 | out_8(&mdma->regs->dmassrt, cid); | 270 | out_8(&mdma->regs->dmassrt, cid); |
261 | } | 271 | } |
262 | 272 | ||
@@ -274,6 +284,9 @@ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) | |||
274 | 284 | ||
275 | spin_lock(&mchan->lock); | 285 | spin_lock(&mchan->lock); |
276 | 286 | ||
287 | out_8(&mdma->regs->dmacint, ch + off); | ||
288 | out_8(&mdma->regs->dmacerr, ch + off); | ||
289 | |||
277 | /* Check error status */ | 290 | /* Check error status */ |
278 | if (es & (1 << ch)) | 291 | if (es & (1 << ch)) |
279 | list_for_each_entry(mdesc, &mchan->active, node) | 292 | list_for_each_entry(mdesc, &mchan->active, node) |
@@ -302,36 +315,68 @@ static irqreturn_t mpc_dma_irq(int irq, void *data) | |||
302 | spin_unlock(&mdma->error_status_lock); | 315 | spin_unlock(&mdma->error_status_lock); |
303 | 316 | ||
304 | /* Handle interrupt on each channel */ | 317 | /* Handle interrupt on each channel */ |
305 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), | 318 | if (mdma->dma.chancnt > 32) { |
319 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), | ||
306 | in_be32(&mdma->regs->dmaerrh), 32); | 320 | in_be32(&mdma->regs->dmaerrh), 32); |
321 | } | ||
307 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), | 322 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), |
308 | in_be32(&mdma->regs->dmaerrl), 0); | 323 | in_be32(&mdma->regs->dmaerrl), 0); |
309 | 324 | ||
310 | /* Ack interrupt on all channels */ | ||
311 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); | ||
312 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); | ||
313 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); | ||
314 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | ||
315 | |||
316 | /* Schedule tasklet */ | 325 | /* Schedule tasklet */ |
317 | tasklet_schedule(&mdma->tasklet); | 326 | tasklet_schedule(&mdma->tasklet); |
318 | 327 | ||
319 | return IRQ_HANDLED; | 328 | return IRQ_HANDLED; |
320 | } | 329 | } |
321 | 330 | ||
322 | /* DMA Tasklet */ | 331 | /* process completed descriptors */ |
323 | static void mpc_dma_tasklet(unsigned long data) | 332 | static void mpc_dma_process_completed(struct mpc_dma *mdma) |
324 | { | 333 | { |
325 | struct mpc_dma *mdma = (void *)data; | ||
326 | dma_cookie_t last_cookie = 0; | 334 | dma_cookie_t last_cookie = 0; |
327 | struct mpc_dma_chan *mchan; | 335 | struct mpc_dma_chan *mchan; |
328 | struct mpc_dma_desc *mdesc; | 336 | struct mpc_dma_desc *mdesc; |
329 | struct dma_async_tx_descriptor *desc; | 337 | struct dma_async_tx_descriptor *desc; |
330 | unsigned long flags; | 338 | unsigned long flags; |
331 | LIST_HEAD(list); | 339 | LIST_HEAD(list); |
332 | uint es; | ||
333 | int i; | 340 | int i; |
334 | 341 | ||
342 | for (i = 0; i < mdma->dma.chancnt; i++) { | ||
343 | mchan = &mdma->channels[i]; | ||
344 | |||
345 | /* Get all completed descriptors */ | ||
346 | spin_lock_irqsave(&mchan->lock, flags); | ||
347 | if (!list_empty(&mchan->completed)) | ||
348 | list_splice_tail_init(&mchan->completed, &list); | ||
349 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
350 | |||
351 | if (list_empty(&list)) | ||
352 | continue; | ||
353 | |||
354 | /* Execute callbacks and run dependencies */ | ||
355 | list_for_each_entry(mdesc, &list, node) { | ||
356 | desc = &mdesc->desc; | ||
357 | |||
358 | if (desc->callback) | ||
359 | desc->callback(desc->callback_param); | ||
360 | |||
361 | last_cookie = desc->cookie; | ||
362 | dma_run_dependencies(desc); | ||
363 | } | ||
364 | |||
365 | /* Free descriptors */ | ||
366 | spin_lock_irqsave(&mchan->lock, flags); | ||
367 | list_splice_tail_init(&list, &mchan->free); | ||
368 | mchan->completed_cookie = last_cookie; | ||
369 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | /* DMA Tasklet */ | ||
374 | static void mpc_dma_tasklet(unsigned long data) | ||
375 | { | ||
376 | struct mpc_dma *mdma = (void *)data; | ||
377 | unsigned long flags; | ||
378 | uint es; | ||
379 | |||
335 | spin_lock_irqsave(&mdma->error_status_lock, flags); | 380 | spin_lock_irqsave(&mdma->error_status_lock, flags); |
336 | es = mdma->error_status; | 381 | es = mdma->error_status; |
337 | mdma->error_status = 0; | 382 | mdma->error_status = 0; |
@@ -370,35 +415,7 @@ static void mpc_dma_tasklet(unsigned long data) | |||
370 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); | 415 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); |
371 | } | 416 | } |
372 | 417 | ||
373 | for (i = 0; i < mdma->dma.chancnt; i++) { | 418 | mpc_dma_process_completed(mdma); |
374 | mchan = &mdma->channels[i]; | ||
375 | |||
376 | /* Get all completed descriptors */ | ||
377 | spin_lock_irqsave(&mchan->lock, flags); | ||
378 | if (!list_empty(&mchan->completed)) | ||
379 | list_splice_tail_init(&mchan->completed, &list); | ||
380 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
381 | |||
382 | if (list_empty(&list)) | ||
383 | continue; | ||
384 | |||
385 | /* Execute callbacks and run dependencies */ | ||
386 | list_for_each_entry(mdesc, &list, node) { | ||
387 | desc = &mdesc->desc; | ||
388 | |||
389 | if (desc->callback) | ||
390 | desc->callback(desc->callback_param); | ||
391 | |||
392 | last_cookie = desc->cookie; | ||
393 | dma_run_dependencies(desc); | ||
394 | } | ||
395 | |||
396 | /* Free descriptors */ | ||
397 | spin_lock_irqsave(&mchan->lock, flags); | ||
398 | list_splice_tail_init(&list, &mchan->free); | ||
399 | mchan->completed_cookie = last_cookie; | ||
400 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
401 | } | ||
402 | } | 419 | } |
403 | 420 | ||
404 | /* Submit descriptor to hardware */ | 421 | /* Submit descriptor to hardware */ |
@@ -563,6 +580,7 @@ static struct dma_async_tx_descriptor * | |||
563 | mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | 580 | mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
564 | size_t len, unsigned long flags) | 581 | size_t len, unsigned long flags) |
565 | { | 582 | { |
583 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | ||
566 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | 584 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
567 | struct mpc_dma_desc *mdesc = NULL; | 585 | struct mpc_dma_desc *mdesc = NULL; |
568 | struct mpc_dma_tcd *tcd; | 586 | struct mpc_dma_tcd *tcd; |
@@ -577,8 +595,11 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
577 | } | 595 | } |
578 | spin_unlock_irqrestore(&mchan->lock, iflags); | 596 | spin_unlock_irqrestore(&mchan->lock, iflags); |
579 | 597 | ||
580 | if (!mdesc) | 598 | if (!mdesc) { |
599 | /* try to free completed descriptors */ | ||
600 | mpc_dma_process_completed(mdma); | ||
581 | return NULL; | 601 | return NULL; |
602 | } | ||
582 | 603 | ||
583 | mdesc->error = 0; | 604 | mdesc->error = 0; |
584 | tcd = mdesc->tcd; | 605 | tcd = mdesc->tcd; |
@@ -591,7 +612,8 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
591 | tcd->dsize = MPC_DMA_TSIZE_32; | 612 | tcd->dsize = MPC_DMA_TSIZE_32; |
592 | tcd->soff = 32; | 613 | tcd->soff = 32; |
593 | tcd->doff = 32; | 614 | tcd->doff = 32; |
594 | } else if (IS_ALIGNED(src | dst | len, 16)) { | 615 | } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { |
616 | /* MPC8308 doesn't support 16 byte transfers */ | ||
595 | tcd->ssize = MPC_DMA_TSIZE_16; | 617 | tcd->ssize = MPC_DMA_TSIZE_16; |
596 | tcd->dsize = MPC_DMA_TSIZE_16; | 618 | tcd->dsize = MPC_DMA_TSIZE_16; |
597 | tcd->soff = 16; | 619 | tcd->soff = 16; |
@@ -627,8 +649,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |||
627 | return &mdesc->desc; | 649 | return &mdesc->desc; |
628 | } | 650 | } |
629 | 651 | ||
630 | static int __devinit mpc_dma_probe(struct platform_device *op, | 652 | static int __devinit mpc_dma_probe(struct platform_device *op) |
631 | const struct of_device_id *match) | ||
632 | { | 653 | { |
633 | struct device_node *dn = op->dev.of_node; | 654 | struct device_node *dn = op->dev.of_node; |
634 | struct device *dev = &op->dev; | 655 | struct device *dev = &op->dev; |
@@ -651,6 +672,15 @@ static int __devinit mpc_dma_probe(struct platform_device *op, | |||
651 | return -EINVAL; | 672 | return -EINVAL; |
652 | } | 673 | } |
653 | 674 | ||
675 | if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { | ||
676 | mdma->is_mpc8308 = 1; | ||
677 | mdma->irq2 = irq_of_parse_and_map(dn, 1); | ||
678 | if (mdma->irq2 == NO_IRQ) { | ||
679 | dev_err(dev, "Error mapping IRQ!\n"); | ||
680 | return -EINVAL; | ||
681 | } | ||
682 | } | ||
683 | |||
654 | retval = of_address_to_resource(dn, 0, &res); | 684 | retval = of_address_to_resource(dn, 0, &res); |
655 | if (retval) { | 685 | if (retval) { |
656 | dev_err(dev, "Error parsing memory region!\n"); | 686 | dev_err(dev, "Error parsing memory region!\n"); |
@@ -681,11 +711,23 @@ static int __devinit mpc_dma_probe(struct platform_device *op, | |||
681 | return -EINVAL; | 711 | return -EINVAL; |
682 | } | 712 | } |
683 | 713 | ||
714 | if (mdma->is_mpc8308) { | ||
715 | retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0, | ||
716 | DRV_NAME, mdma); | ||
717 | if (retval) { | ||
718 | dev_err(dev, "Error requesting IRQ2!\n"); | ||
719 | return -EINVAL; | ||
720 | } | ||
721 | } | ||
722 | |||
684 | spin_lock_init(&mdma->error_status_lock); | 723 | spin_lock_init(&mdma->error_status_lock); |
685 | 724 | ||
686 | dma = &mdma->dma; | 725 | dma = &mdma->dma; |
687 | dma->dev = dev; | 726 | dma->dev = dev; |
688 | dma->chancnt = MPC_DMA_CHANNELS; | 727 | if (!mdma->is_mpc8308) |
728 | dma->chancnt = MPC_DMA_CHANNELS; | ||
729 | else | ||
730 | dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */ | ||
689 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; | 731 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; |
690 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; | 732 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; |
691 | dma->device_issue_pending = mpc_dma_issue_pending; | 733 | dma->device_issue_pending = mpc_dma_issue_pending; |
@@ -721,26 +763,40 @@ static int __devinit mpc_dma_probe(struct platform_device *op, | |||
721 | * - Round-robin group arbitration, | 763 | * - Round-robin group arbitration, |
722 | * - Round-robin channel arbitration. | 764 | * - Round-robin channel arbitration. |
723 | */ | 765 | */ |
724 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | | 766 | if (!mdma->is_mpc8308) { |
725 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); | 767 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
726 | 768 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); | |
727 | /* Disable hardware DMA requests */ | 769 | |
728 | out_be32(&mdma->regs->dmaerqh, 0); | 770 | /* Disable hardware DMA requests */ |
729 | out_be32(&mdma->regs->dmaerql, 0); | 771 | out_be32(&mdma->regs->dmaerqh, 0); |
730 | 772 | out_be32(&mdma->regs->dmaerql, 0); | |
731 | /* Disable error interrupts */ | 773 | |
732 | out_be32(&mdma->regs->dmaeeih, 0); | 774 | /* Disable error interrupts */ |
733 | out_be32(&mdma->regs->dmaeeil, 0); | 775 | out_be32(&mdma->regs->dmaeeih, 0); |
776 | out_be32(&mdma->regs->dmaeeil, 0); | ||
777 | |||
778 | /* Clear interrupts status */ | ||
779 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); | ||
780 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); | ||
781 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); | ||
782 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | ||
783 | |||
784 | /* Route interrupts to IPIC */ | ||
785 | out_be32(&mdma->regs->dmaihsa, 0); | ||
786 | out_be32(&mdma->regs->dmailsa, 0); | ||
787 | } else { | ||
788 | /* MPC8308 has 16 channels and lacks some registers */ | ||
789 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); | ||
734 | 790 | ||
735 | /* Clear interrupts status */ | 791 | /* enable snooping */ |
736 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); | 792 | out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); |
737 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); | 793 | /* Disable error interrupts */ |
738 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); | 794 | out_be32(&mdma->regs->dmaeeil, 0); |
739 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | ||
740 | 795 | ||
741 | /* Route interrupts to IPIC */ | 796 | /* Clear interrupts status */ |
742 | out_be32(&mdma->regs->dmaihsa, 0); | 797 | out_be32(&mdma->regs->dmaintl, 0xFFFF); |
743 | out_be32(&mdma->regs->dmailsa, 0); | 798 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); |
799 | } | ||
744 | 800 | ||
745 | /* Register DMA engine */ | 801 | /* Register DMA engine */ |
746 | dev_set_drvdata(dev, mdma); | 802 | dev_set_drvdata(dev, mdma); |
@@ -770,7 +826,7 @@ static struct of_device_id mpc_dma_match[] = { | |||
770 | {}, | 826 | {}, |
771 | }; | 827 | }; |
772 | 828 | ||
773 | static struct of_platform_driver mpc_dma_driver = { | 829 | static struct platform_driver mpc_dma_driver = { |
774 | .probe = mpc_dma_probe, | 830 | .probe = mpc_dma_probe, |
775 | .remove = __devexit_p(mpc_dma_remove), | 831 | .remove = __devexit_p(mpc_dma_remove), |
776 | .driver = { | 832 | .driver = { |
@@ -782,13 +838,13 @@ static struct of_platform_driver mpc_dma_driver = { | |||
782 | 838 | ||
783 | static int __init mpc_dma_init(void) | 839 | static int __init mpc_dma_init(void) |
784 | { | 840 | { |
785 | return of_register_platform_driver(&mpc_dma_driver); | 841 | return platform_driver_register(&mpc_dma_driver); |
786 | } | 842 | } |
787 | module_init(mpc_dma_init); | 843 | module_init(mpc_dma_init); |
788 | 844 | ||
789 | static void __exit mpc_dma_exit(void) | 845 | static void __exit mpc_dma_exit(void) |
790 | { | 846 | { |
791 | of_unregister_platform_driver(&mpc_dma_driver); | 847 | platform_driver_unregister(&mpc_dma_driver); |
792 | } | 848 | } |
793 | module_exit(mpc_dma_exit); | 849 | module_exit(mpc_dma_exit); |
794 | 850 | ||
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 411d5bf50fc4..954e334e01bb 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -449,7 +449,7 @@ mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |||
449 | static void mv_xor_tasklet(unsigned long data) | 449 | static void mv_xor_tasklet(unsigned long data) |
450 | { | 450 | { |
451 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | 451 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; |
452 | __mv_xor_slot_cleanup(chan); | 452 | mv_xor_slot_cleanup(chan); |
453 | } | 453 | } |
454 | 454 | ||
455 | static struct mv_xor_desc_slot * | 455 | static struct mv_xor_desc_slot * |
@@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
671 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 671 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
672 | return NULL; | 672 | return NULL; |
673 | 673 | ||
674 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | 674 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
675 | 675 | ||
676 | spin_lock_bh(&mv_chan->lock); | 676 | spin_lock_bh(&mv_chan->lock); |
677 | slot_cnt = mv_chan_memcpy_slot_count(len); | 677 | slot_cnt = mv_chan_memcpy_slot_count(len); |
@@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
710 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 710 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
711 | return NULL; | 711 | return NULL; |
712 | 712 | ||
713 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | 713 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
714 | 714 | ||
715 | spin_lock_bh(&mv_chan->lock); | 715 | spin_lock_bh(&mv_chan->lock); |
716 | slot_cnt = mv_chan_memset_slot_count(len); | 716 | slot_cnt = mv_chan_memset_slot_count(len); |
@@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
744 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 744 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
745 | return NULL; | 745 | return NULL; |
746 | 746 | ||
747 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | 747 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
748 | 748 | ||
749 | dev_dbg(mv_chan->device->common.dev, | 749 | dev_dbg(mv_chan->device->common.dev, |
750 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", | 750 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c new file mode 100644 index 000000000000..88aad4f54002 --- /dev/null +++ b/drivers/dma/mxs-dma.c | |||
@@ -0,0 +1,724 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. | ||
3 | * | ||
4 | * Refer to drivers/dma/imx-sdma.c | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/clk.h> | ||
16 | #include <linux/wait.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/semaphore.h> | ||
19 | #include <linux/device.h> | ||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/delay.h> | ||
25 | |||
26 | #include <asm/irq.h> | ||
27 | #include <mach/mxs.h> | ||
28 | #include <mach/dma.h> | ||
29 | #include <mach/common.h> | ||
30 | |||
31 | /* | ||
32 | * NOTE: The term "PIO" throughout the mxs-dma implementation means | ||
33 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, | ||
34 | * dma can program the controller registers of peripheral devices. | ||
35 | */ | ||
36 | |||
37 | #define MXS_DMA_APBH 0 | ||
38 | #define MXS_DMA_APBX 1 | ||
39 | #define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) | ||
40 | |||
41 | #define APBH_VERSION_LATEST 3 | ||
42 | #define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) | ||
43 | |||
44 | #define HW_APBHX_CTRL0 0x000 | ||
45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) | ||
46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) | ||
47 | #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 | ||
48 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 | ||
49 | #define HW_APBHX_CTRL1 0x010 | ||
50 | #define HW_APBHX_CTRL2 0x020 | ||
51 | #define HW_APBHX_CHANNEL_CTRL 0x030 | ||
52 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 | ||
53 | #define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) | ||
54 | #define HW_APBX_VERSION 0x800 | ||
55 | #define BP_APBHX_VERSION_MAJOR 24 | ||
56 | #define HW_APBHX_CHn_NXTCMDAR(n) \ | ||
57 | (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) | ||
58 | #define HW_APBHX_CHn_SEMA(n) \ | ||
59 | (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) | ||
60 | |||
61 | /* | ||
62 | * ccw bits definitions | ||
63 | * | ||
64 | * COMMAND: 0..1 (2) | ||
65 | * CHAIN: 2 (1) | ||
66 | * IRQ: 3 (1) | ||
67 | * NAND_LOCK: 4 (1) - not implemented | ||
68 | * NAND_WAIT4READY: 5 (1) - not implemented | ||
69 | * DEC_SEM: 6 (1) | ||
70 | * WAIT4END: 7 (1) | ||
71 | * HALT_ON_TERMINATE: 8 (1) | ||
72 | * TERMINATE_FLUSH: 9 (1) | ||
73 | * RESERVED: 10..11 (2) | ||
74 | * PIO_NUM: 12..15 (4) | ||
75 | */ | ||
76 | #define BP_CCW_COMMAND 0 | ||
77 | #define BM_CCW_COMMAND (3 << 0) | ||
78 | #define CCW_CHAIN (1 << 2) | ||
79 | #define CCW_IRQ (1 << 3) | ||
80 | #define CCW_DEC_SEM (1 << 6) | ||
81 | #define CCW_WAIT4END (1 << 7) | ||
82 | #define CCW_HALT_ON_TERM (1 << 8) | ||
83 | #define CCW_TERM_FLUSH (1 << 9) | ||
84 | #define BP_CCW_PIO_NUM 12 | ||
85 | #define BM_CCW_PIO_NUM (0xf << 12) | ||
86 | |||
87 | #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) | ||
88 | |||
89 | #define MXS_DMA_CMD_NO_XFER 0 | ||
90 | #define MXS_DMA_CMD_WRITE 1 | ||
91 | #define MXS_DMA_CMD_READ 2 | ||
92 | #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ | ||
93 | |||
94 | struct mxs_dma_ccw { | ||
95 | u32 next; | ||
96 | u16 bits; | ||
97 | u16 xfer_bytes; | ||
98 | #define MAX_XFER_BYTES 0xff00 | ||
99 | u32 bufaddr; | ||
100 | #define MXS_PIO_WORDS 16 | ||
101 | u32 pio_words[MXS_PIO_WORDS]; | ||
102 | }; | ||
103 | |||
104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) | ||
105 | |||
106 | struct mxs_dma_chan { | ||
107 | struct mxs_dma_engine *mxs_dma; | ||
108 | struct dma_chan chan; | ||
109 | struct dma_async_tx_descriptor desc; | ||
110 | struct tasklet_struct tasklet; | ||
111 | int chan_irq; | ||
112 | struct mxs_dma_ccw *ccw; | ||
113 | dma_addr_t ccw_phys; | ||
114 | dma_cookie_t last_completed; | ||
115 | enum dma_status status; | ||
116 | unsigned int flags; | ||
117 | #define MXS_DMA_SG_LOOP (1 << 0) | ||
118 | }; | ||
119 | |||
120 | #define MXS_DMA_CHANNELS 16 | ||
121 | #define MXS_DMA_CHANNELS_MASK 0xffff | ||
122 | |||
123 | struct mxs_dma_engine { | ||
124 | int dev_id; | ||
125 | unsigned int version; | ||
126 | void __iomem *base; | ||
127 | struct clk *clk; | ||
128 | struct dma_device dma_device; | ||
129 | struct device_dma_parameters dma_parms; | ||
130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; | ||
131 | }; | ||
132 | |||
133 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | ||
134 | { | ||
135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
136 | int chan_id = mxs_chan->chan.chan_id; | ||
137 | |||
138 | if (dma_is_apbh() && apbh_is_old()) | ||
139 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), | ||
140 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
141 | else | ||
142 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), | ||
143 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); | ||
144 | } | ||
145 | |||
146 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | ||
147 | { | ||
148 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
149 | int chan_id = mxs_chan->chan.chan_id; | ||
150 | |||
151 | /* set cmd_addr up */ | ||
152 | writel(mxs_chan->ccw_phys, | ||
153 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); | ||
154 | |||
155 | /* enable apbh channel clock */ | ||
156 | if (dma_is_apbh()) { | ||
157 | if (apbh_is_old()) | ||
158 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
159 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
160 | else | ||
161 | writel(1 << chan_id, | ||
162 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
163 | } | ||
164 | |||
165 | /* write 1 to SEMA to kick off the channel */ | ||
166 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); | ||
167 | } | ||
168 | |||
169 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | ||
170 | { | ||
171 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
172 | int chan_id = mxs_chan->chan.chan_id; | ||
173 | |||
174 | /* disable apbh channel clock */ | ||
175 | if (dma_is_apbh()) { | ||
176 | if (apbh_is_old()) | ||
177 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
178 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
179 | else | ||
180 | writel(1 << chan_id, | ||
181 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
182 | } | ||
183 | |||
184 | mxs_chan->status = DMA_SUCCESS; | ||
185 | } | ||
186 | |||
187 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | ||
188 | { | ||
189 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
190 | int chan_id = mxs_chan->chan.chan_id; | ||
191 | |||
192 | /* freeze the channel */ | ||
193 | if (dma_is_apbh() && apbh_is_old()) | ||
194 | writel(1 << chan_id, | ||
195 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
196 | else | ||
197 | writel(1 << chan_id, | ||
198 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); | ||
199 | |||
200 | mxs_chan->status = DMA_PAUSED; | ||
201 | } | ||
202 | |||
203 | static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | ||
204 | { | ||
205 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
206 | int chan_id = mxs_chan->chan.chan_id; | ||
207 | |||
208 | /* unfreeze the channel */ | ||
209 | if (dma_is_apbh() && apbh_is_old()) | ||
210 | writel(1 << chan_id, | ||
211 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); | ||
212 | else | ||
213 | writel(1 << chan_id, | ||
214 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); | ||
215 | |||
216 | mxs_chan->status = DMA_IN_PROGRESS; | ||
217 | } | ||
218 | |||
219 | static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) | ||
220 | { | ||
221 | dma_cookie_t cookie = mxs_chan->chan.cookie; | ||
222 | |||
223 | if (++cookie < 0) | ||
224 | cookie = 1; | ||
225 | |||
226 | mxs_chan->chan.cookie = cookie; | ||
227 | mxs_chan->desc.cookie = cookie; | ||
228 | |||
229 | return cookie; | ||
230 | } | ||
231 | |||
232 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | ||
233 | { | ||
234 | return container_of(chan, struct mxs_dma_chan, chan); | ||
235 | } | ||
236 | |||
237 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
238 | { | ||
239 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); | ||
240 | |||
241 | mxs_dma_enable_chan(mxs_chan); | ||
242 | |||
243 | return mxs_dma_assign_cookie(mxs_chan); | ||
244 | } | ||
245 | |||
246 | static void mxs_dma_tasklet(unsigned long data) | ||
247 | { | ||
248 | struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; | ||
249 | |||
250 | if (mxs_chan->desc.callback) | ||
251 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); | ||
252 | } | ||
253 | |||
254 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | ||
255 | { | ||
256 | struct mxs_dma_engine *mxs_dma = dev_id; | ||
257 | u32 stat1, stat2; | ||
258 | |||
259 | /* completion status */ | ||
260 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); | ||
261 | stat1 &= MXS_DMA_CHANNELS_MASK; | ||
262 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); | ||
263 | |||
264 | /* error status */ | ||
265 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); | ||
266 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); | ||
267 | |||
268 | /* | ||
269 | * When both completion and error of termination bits set at the | ||
270 | * same time, we do not take it as an error. IOW, it only becomes | ||
271 | * an error we need to handler here in case of ether it's (1) an bus | ||
272 | * error or (2) a termination error with no completion. | ||
273 | */ | ||
274 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | ||
275 | (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ | ||
276 | |||
277 | /* combine error and completion status for checking */ | ||
278 | stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; | ||
279 | while (stat1) { | ||
280 | int channel = fls(stat1) - 1; | ||
281 | struct mxs_dma_chan *mxs_chan = | ||
282 | &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; | ||
283 | |||
284 | if (channel >= MXS_DMA_CHANNELS) { | ||
285 | dev_dbg(mxs_dma->dma_device.dev, | ||
286 | "%s: error in channel %d\n", __func__, | ||
287 | channel - MXS_DMA_CHANNELS); | ||
288 | mxs_chan->status = DMA_ERROR; | ||
289 | mxs_dma_reset_chan(mxs_chan); | ||
290 | } else { | ||
291 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) | ||
292 | mxs_chan->status = DMA_IN_PROGRESS; | ||
293 | else | ||
294 | mxs_chan->status = DMA_SUCCESS; | ||
295 | } | ||
296 | |||
297 | stat1 &= ~(1 << channel); | ||
298 | |||
299 | if (mxs_chan->status == DMA_SUCCESS) | ||
300 | mxs_chan->last_completed = mxs_chan->desc.cookie; | ||
301 | |||
302 | /* schedule tasklet on this channel */ | ||
303 | tasklet_schedule(&mxs_chan->tasklet); | ||
304 | } | ||
305 | |||
306 | return IRQ_HANDLED; | ||
307 | } | ||
308 | |||
309 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | ||
310 | { | ||
311 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
312 | struct mxs_dma_data *data = chan->private; | ||
313 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
314 | int ret; | ||
315 | |||
316 | if (!data) | ||
317 | return -EINVAL; | ||
318 | |||
319 | mxs_chan->chan_irq = data->chan_irq; | ||
320 | |||
321 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
322 | &mxs_chan->ccw_phys, GFP_KERNEL); | ||
323 | if (!mxs_chan->ccw) { | ||
324 | ret = -ENOMEM; | ||
325 | goto err_alloc; | ||
326 | } | ||
327 | |||
328 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | ||
329 | |||
330 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | ||
331 | 0, "mxs-dma", mxs_dma); | ||
332 | if (ret) | ||
333 | goto err_irq; | ||
334 | |||
335 | ret = clk_enable(mxs_dma->clk); | ||
336 | if (ret) | ||
337 | goto err_clk; | ||
338 | |||
339 | mxs_dma_reset_chan(mxs_chan); | ||
340 | |||
341 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); | ||
342 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; | ||
343 | |||
344 | /* the descriptor is ready */ | ||
345 | async_tx_ack(&mxs_chan->desc); | ||
346 | |||
347 | return 0; | ||
348 | |||
349 | err_clk: | ||
350 | free_irq(mxs_chan->chan_irq, mxs_dma); | ||
351 | err_irq: | ||
352 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
353 | mxs_chan->ccw, mxs_chan->ccw_phys); | ||
354 | err_alloc: | ||
355 | return ret; | ||
356 | } | ||
357 | |||
358 | static void mxs_dma_free_chan_resources(struct dma_chan *chan) | ||
359 | { | ||
360 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
361 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
362 | |||
363 | mxs_dma_disable_chan(mxs_chan); | ||
364 | |||
365 | free_irq(mxs_chan->chan_irq, mxs_dma); | ||
366 | |||
367 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | ||
368 | mxs_chan->ccw, mxs_chan->ccw_phys); | ||
369 | |||
370 | clk_disable(mxs_dma->clk); | ||
371 | } | ||
372 | |||
373 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | ||
374 | struct dma_chan *chan, struct scatterlist *sgl, | ||
375 | unsigned int sg_len, enum dma_data_direction direction, | ||
376 | unsigned long append) | ||
377 | { | ||
378 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
379 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
380 | struct mxs_dma_ccw *ccw; | ||
381 | struct scatterlist *sg; | ||
382 | int i, j; | ||
383 | u32 *pio; | ||
384 | static int idx; | ||
385 | |||
386 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | ||
387 | return NULL; | ||
388 | |||
389 | if (sg_len + (append ? idx : 0) > NUM_CCW) { | ||
390 | dev_err(mxs_dma->dma_device.dev, | ||
391 | "maximum number of sg exceeded: %d > %d\n", | ||
392 | sg_len, NUM_CCW); | ||
393 | goto err_out; | ||
394 | } | ||
395 | |||
396 | mxs_chan->status = DMA_IN_PROGRESS; | ||
397 | mxs_chan->flags = 0; | ||
398 | |||
399 | /* | ||
400 | * If the sg is prepared with append flag set, the sg | ||
401 | * will be appended to the last prepared sg. | ||
402 | */ | ||
403 | if (append) { | ||
404 | BUG_ON(idx < 1); | ||
405 | ccw = &mxs_chan->ccw[idx - 1]; | ||
406 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | ||
407 | ccw->bits |= CCW_CHAIN; | ||
408 | ccw->bits &= ~CCW_IRQ; | ||
409 | ccw->bits &= ~CCW_DEC_SEM; | ||
410 | ccw->bits &= ~CCW_WAIT4END; | ||
411 | } else { | ||
412 | idx = 0; | ||
413 | } | ||
414 | |||
415 | if (direction == DMA_NONE) { | ||
416 | ccw = &mxs_chan->ccw[idx++]; | ||
417 | pio = (u32 *) sgl; | ||
418 | |||
419 | for (j = 0; j < sg_len;) | ||
420 | ccw->pio_words[j++] = *pio++; | ||
421 | |||
422 | ccw->bits = 0; | ||
423 | ccw->bits |= CCW_IRQ; | ||
424 | ccw->bits |= CCW_DEC_SEM; | ||
425 | ccw->bits |= CCW_WAIT4END; | ||
426 | ccw->bits |= CCW_HALT_ON_TERM; | ||
427 | ccw->bits |= CCW_TERM_FLUSH; | ||
428 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); | ||
429 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); | ||
430 | } else { | ||
431 | for_each_sg(sgl, sg, sg_len, i) { | ||
432 | if (sg->length > MAX_XFER_BYTES) { | ||
433 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", | ||
434 | sg->length, MAX_XFER_BYTES); | ||
435 | goto err_out; | ||
436 | } | ||
437 | |||
438 | ccw = &mxs_chan->ccw[idx++]; | ||
439 | |||
440 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | ||
441 | ccw->bufaddr = sg->dma_address; | ||
442 | ccw->xfer_bytes = sg->length; | ||
443 | |||
444 | ccw->bits = 0; | ||
445 | ccw->bits |= CCW_CHAIN; | ||
446 | ccw->bits |= CCW_HALT_ON_TERM; | ||
447 | ccw->bits |= CCW_TERM_FLUSH; | ||
448 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | ||
449 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, | ||
450 | COMMAND); | ||
451 | |||
452 | if (i + 1 == sg_len) { | ||
453 | ccw->bits &= ~CCW_CHAIN; | ||
454 | ccw->bits |= CCW_IRQ; | ||
455 | ccw->bits |= CCW_DEC_SEM; | ||
456 | ccw->bits |= CCW_WAIT4END; | ||
457 | } | ||
458 | } | ||
459 | } | ||
460 | |||
461 | return &mxs_chan->desc; | ||
462 | |||
463 | err_out: | ||
464 | mxs_chan->status = DMA_ERROR; | ||
465 | return NULL; | ||
466 | } | ||
467 | |||
468 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | ||
469 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
470 | size_t period_len, enum dma_data_direction direction) | ||
471 | { | ||
472 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
473 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
474 | int num_periods = buf_len / period_len; | ||
475 | int i = 0, buf = 0; | ||
476 | |||
477 | if (mxs_chan->status == DMA_IN_PROGRESS) | ||
478 | return NULL; | ||
479 | |||
480 | mxs_chan->status = DMA_IN_PROGRESS; | ||
481 | mxs_chan->flags |= MXS_DMA_SG_LOOP; | ||
482 | |||
483 | if (num_periods > NUM_CCW) { | ||
484 | dev_err(mxs_dma->dma_device.dev, | ||
485 | "maximum number of sg exceeded: %d > %d\n", | ||
486 | num_periods, NUM_CCW); | ||
487 | goto err_out; | ||
488 | } | ||
489 | |||
490 | if (period_len > MAX_XFER_BYTES) { | ||
491 | dev_err(mxs_dma->dma_device.dev, | ||
492 | "maximum period size exceeded: %d > %d\n", | ||
493 | period_len, MAX_XFER_BYTES); | ||
494 | goto err_out; | ||
495 | } | ||
496 | |||
497 | while (buf < buf_len) { | ||
498 | struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; | ||
499 | |||
500 | if (i + 1 == num_periods) | ||
501 | ccw->next = mxs_chan->ccw_phys; | ||
502 | else | ||
503 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); | ||
504 | |||
505 | ccw->bufaddr = dma_addr; | ||
506 | ccw->xfer_bytes = period_len; | ||
507 | |||
508 | ccw->bits = 0; | ||
509 | ccw->bits |= CCW_CHAIN; | ||
510 | ccw->bits |= CCW_IRQ; | ||
511 | ccw->bits |= CCW_HALT_ON_TERM; | ||
512 | ccw->bits |= CCW_TERM_FLUSH; | ||
513 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | ||
514 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | ||
515 | |||
516 | dma_addr += period_len; | ||
517 | buf += period_len; | ||
518 | |||
519 | i++; | ||
520 | } | ||
521 | |||
522 | return &mxs_chan->desc; | ||
523 | |||
524 | err_out: | ||
525 | mxs_chan->status = DMA_ERROR; | ||
526 | return NULL; | ||
527 | } | ||
528 | |||
529 | static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
530 | unsigned long arg) | ||
531 | { | ||
532 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
533 | int ret = 0; | ||
534 | |||
535 | switch (cmd) { | ||
536 | case DMA_TERMINATE_ALL: | ||
537 | mxs_dma_disable_chan(mxs_chan); | ||
538 | break; | ||
539 | case DMA_PAUSE: | ||
540 | mxs_dma_pause_chan(mxs_chan); | ||
541 | break; | ||
542 | case DMA_RESUME: | ||
543 | mxs_dma_resume_chan(mxs_chan); | ||
544 | break; | ||
545 | default: | ||
546 | ret = -ENOSYS; | ||
547 | } | ||
548 | |||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | ||
553 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
554 | { | ||
555 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | ||
556 | dma_cookie_t last_used; | ||
557 | |||
558 | last_used = chan->cookie; | ||
559 | dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); | ||
560 | |||
561 | return mxs_chan->status; | ||
562 | } | ||
563 | |||
564 | static void mxs_dma_issue_pending(struct dma_chan *chan) | ||
565 | { | ||
566 | /* | ||
567 | * Nothing to do. We only have a single descriptor. | ||
568 | */ | ||
569 | } | ||
570 | |||
571 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | ||
572 | { | ||
573 | int ret; | ||
574 | |||
575 | ret = clk_enable(mxs_dma->clk); | ||
576 | if (ret) | ||
577 | goto err_out; | ||
578 | |||
579 | ret = mxs_reset_block(mxs_dma->base); | ||
580 | if (ret) | ||
581 | goto err_out; | ||
582 | |||
583 | /* only major version matters */ | ||
584 | mxs_dma->version = readl(mxs_dma->base + | ||
585 | ((mxs_dma->dev_id == MXS_DMA_APBX) ? | ||
586 | HW_APBX_VERSION : HW_APBH_VERSION)) >> | ||
587 | BP_APBHX_VERSION_MAJOR; | ||
588 | |||
589 | /* enable apbh burst */ | ||
590 | if (dma_is_apbh()) { | ||
591 | writel(BM_APBH_CTRL0_APB_BURST_EN, | ||
592 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
593 | writel(BM_APBH_CTRL0_APB_BURST8_EN, | ||
594 | mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); | ||
595 | } | ||
596 | |||
597 | /* enable irq for all the channels */ | ||
598 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, | ||
599 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); | ||
600 | |||
601 | clk_disable(mxs_dma->clk); | ||
602 | |||
603 | return 0; | ||
604 | |||
605 | err_out: | ||
606 | return ret; | ||
607 | } | ||
608 | |||
609 | static int __init mxs_dma_probe(struct platform_device *pdev) | ||
610 | { | ||
611 | const struct platform_device_id *id_entry = | ||
612 | platform_get_device_id(pdev); | ||
613 | struct mxs_dma_engine *mxs_dma; | ||
614 | struct resource *iores; | ||
615 | int ret, i; | ||
616 | |||
617 | mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); | ||
618 | if (!mxs_dma) | ||
619 | return -ENOMEM; | ||
620 | |||
621 | mxs_dma->dev_id = id_entry->driver_data; | ||
622 | |||
623 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
624 | |||
625 | if (!request_mem_region(iores->start, resource_size(iores), | ||
626 | pdev->name)) { | ||
627 | ret = -EBUSY; | ||
628 | goto err_request_region; | ||
629 | } | ||
630 | |||
631 | mxs_dma->base = ioremap(iores->start, resource_size(iores)); | ||
632 | if (!mxs_dma->base) { | ||
633 | ret = -ENOMEM; | ||
634 | goto err_ioremap; | ||
635 | } | ||
636 | |||
637 | mxs_dma->clk = clk_get(&pdev->dev, NULL); | ||
638 | if (IS_ERR(mxs_dma->clk)) { | ||
639 | ret = PTR_ERR(mxs_dma->clk); | ||
640 | goto err_clk; | ||
641 | } | ||
642 | |||
643 | dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); | ||
644 | dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); | ||
645 | |||
646 | INIT_LIST_HEAD(&mxs_dma->dma_device.channels); | ||
647 | |||
648 | /* Initialize channel parameters */ | ||
649 | for (i = 0; i < MXS_DMA_CHANNELS; i++) { | ||
650 | struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; | ||
651 | |||
652 | mxs_chan->mxs_dma = mxs_dma; | ||
653 | mxs_chan->chan.device = &mxs_dma->dma_device; | ||
654 | |||
655 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, | ||
656 | (unsigned long) mxs_chan); | ||
657 | |||
658 | |||
659 | /* Add the channel to mxs_chan list */ | ||
660 | list_add_tail(&mxs_chan->chan.device_node, | ||
661 | &mxs_dma->dma_device.channels); | ||
662 | } | ||
663 | |||
664 | ret = mxs_dma_init(mxs_dma); | ||
665 | if (ret) | ||
666 | goto err_init; | ||
667 | |||
668 | mxs_dma->dma_device.dev = &pdev->dev; | ||
669 | |||
670 | /* mxs_dma gets 65535 bytes maximum sg size */ | ||
671 | mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; | ||
672 | dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); | ||
673 | |||
674 | mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; | ||
675 | mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; | ||
676 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; | ||
677 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; | ||
678 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; | ||
679 | mxs_dma->dma_device.device_control = mxs_dma_control; | ||
680 | mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; | ||
681 | |||
682 | ret = dma_async_device_register(&mxs_dma->dma_device); | ||
683 | if (ret) { | ||
684 | dev_err(mxs_dma->dma_device.dev, "unable to register\n"); | ||
685 | goto err_init; | ||
686 | } | ||
687 | |||
688 | dev_info(mxs_dma->dma_device.dev, "initialized\n"); | ||
689 | |||
690 | return 0; | ||
691 | |||
692 | err_init: | ||
693 | clk_put(mxs_dma->clk); | ||
694 | err_clk: | ||
695 | iounmap(mxs_dma->base); | ||
696 | err_ioremap: | ||
697 | release_mem_region(iores->start, resource_size(iores)); | ||
698 | err_request_region: | ||
699 | kfree(mxs_dma); | ||
700 | return ret; | ||
701 | } | ||
702 | |||
703 | static struct platform_device_id mxs_dma_type[] = { | ||
704 | { | ||
705 | .name = "mxs-dma-apbh", | ||
706 | .driver_data = MXS_DMA_APBH, | ||
707 | }, { | ||
708 | .name = "mxs-dma-apbx", | ||
709 | .driver_data = MXS_DMA_APBX, | ||
710 | } | ||
711 | }; | ||
712 | |||
713 | static struct platform_driver mxs_dma_driver = { | ||
714 | .driver = { | ||
715 | .name = "mxs-dma", | ||
716 | }, | ||
717 | .id_table = mxs_dma_type, | ||
718 | }; | ||
719 | |||
720 | static int __init mxs_dma_module_init(void) | ||
721 | { | ||
722 | return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); | ||
723 | } | ||
724 | subsys_initcall(mxs_dma_module_init); | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 3533948b88ba..ff5b38f9d45b 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Topcliff PCH DMA controller driver | 2 | * Topcliff PCH DMA controller driver |
3 | * Copyright (c) 2010 Intel Corporation | 3 | * Copyright (c) 2010 Intel Corporation |
4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -76,12 +77,12 @@ struct pch_dma_regs { | |||
76 | u32 dma_ctl0; | 77 | u32 dma_ctl0; |
77 | u32 dma_ctl1; | 78 | u32 dma_ctl1; |
78 | u32 dma_ctl2; | 79 | u32 dma_ctl2; |
79 | u32 reserved1; | 80 | u32 dma_ctl3; |
80 | u32 dma_sts0; | 81 | u32 dma_sts0; |
81 | u32 dma_sts1; | 82 | u32 dma_sts1; |
82 | u32 reserved2; | 83 | u32 dma_sts2; |
83 | u32 reserved3; | 84 | u32 reserved3; |
84 | struct pch_dma_desc_regs desc[0]; | 85 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; |
85 | }; | 86 | }; |
86 | 87 | ||
87 | struct pch_dma_desc { | 88 | struct pch_dma_desc { |
@@ -123,12 +124,13 @@ struct pch_dma { | |||
123 | struct pci_pool *pool; | 124 | struct pci_pool *pool; |
124 | struct pch_dma_regs regs; | 125 | struct pch_dma_regs regs; |
125 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; | 126 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; |
126 | struct pch_dma_chan channels[0]; | 127 | struct pch_dma_chan channels[MAX_CHAN_NR]; |
127 | }; | 128 | }; |
128 | 129 | ||
129 | #define PCH_DMA_CTL0 0x00 | 130 | #define PCH_DMA_CTL0 0x00 |
130 | #define PCH_DMA_CTL1 0x04 | 131 | #define PCH_DMA_CTL1 0x04 |
131 | #define PCH_DMA_CTL2 0x08 | 132 | #define PCH_DMA_CTL2 0x08 |
133 | #define PCH_DMA_CTL3 0x0C | ||
132 | #define PCH_DMA_STS0 0x10 | 134 | #define PCH_DMA_STS0 0x10 |
133 | #define PCH_DMA_STS1 0x14 | 135 | #define PCH_DMA_STS1 0x14 |
134 | 136 | ||
@@ -137,7 +139,8 @@ struct pch_dma { | |||
137 | #define dma_writel(pd, name, val) \ | 139 | #define dma_writel(pd, name, val) \ |
138 | writel((val), (pd)->membase + PCH_DMA_##name) | 140 | writel((val), (pd)->membase + PCH_DMA_##name) |
139 | 141 | ||
140 | static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) | 142 | static inline |
143 | struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) | ||
141 | { | 144 | { |
142 | return container_of(txd, struct pch_dma_desc, txd); | 145 | return container_of(txd, struct pch_dma_desc, txd); |
143 | } | 146 | } |
@@ -162,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan) | |||
162 | return chan->dev->device.parent; | 165 | return chan->dev->device.parent; |
163 | } | 166 | } |
164 | 167 | ||
165 | static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) | 168 | static inline |
169 | struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) | ||
166 | { | 170 | { |
167 | return list_first_entry(&pd_chan->active_list, | 171 | return list_first_entry(&pd_chan->active_list, |
168 | struct pch_dma_desc, desc_node); | 172 | struct pch_dma_desc, desc_node); |
169 | } | 173 | } |
170 | 174 | ||
171 | static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) | 175 | static inline |
176 | struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) | ||
172 | { | 177 | { |
173 | return list_first_entry(&pd_chan->queue, | 178 | return list_first_entry(&pd_chan->queue, |
174 | struct pch_dma_desc, desc_node); | 179 | struct pch_dma_desc, desc_node); |
@@ -198,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
198 | struct pch_dma *pd = to_pd(chan->device); | 203 | struct pch_dma *pd = to_pd(chan->device); |
199 | u32 val; | 204 | u32 val; |
200 | 205 | ||
201 | val = dma_readl(pd, CTL0); | 206 | if (chan->chan_id < 8) { |
207 | val = dma_readl(pd, CTL0); | ||
202 | 208 | ||
203 | if (pd_chan->dir == DMA_TO_DEVICE) | 209 | if (pd_chan->dir == DMA_TO_DEVICE) |
204 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 210 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
205 | DMA_CTL0_DIR_SHIFT_BITS); | 211 | DMA_CTL0_DIR_SHIFT_BITS); |
206 | else | 212 | else |
207 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 213 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
208 | DMA_CTL0_DIR_SHIFT_BITS)); | 214 | DMA_CTL0_DIR_SHIFT_BITS)); |
215 | |||
216 | dma_writel(pd, CTL0, val); | ||
217 | } else { | ||
218 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | ||
219 | val = dma_readl(pd, CTL3); | ||
209 | 220 | ||
210 | dma_writel(pd, CTL0, val); | 221 | if (pd_chan->dir == DMA_TO_DEVICE) |
222 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | ||
223 | DMA_CTL0_DIR_SHIFT_BITS); | ||
224 | else | ||
225 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + | ||
226 | DMA_CTL0_DIR_SHIFT_BITS)); | ||
227 | |||
228 | dma_writel(pd, CTL3, val); | ||
229 | } | ||
211 | 230 | ||
212 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", | 231 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", |
213 | chan->chan_id, val); | 232 | chan->chan_id, val); |
@@ -218,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) | |||
218 | struct pch_dma *pd = to_pd(chan->device); | 237 | struct pch_dma *pd = to_pd(chan->device); |
219 | u32 val; | 238 | u32 val; |
220 | 239 | ||
221 | val = dma_readl(pd, CTL0); | 240 | if (chan->chan_id < 8) { |
241 | val = dma_readl(pd, CTL0); | ||
222 | 242 | ||
223 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | 243 | val &= ~(DMA_CTL0_MODE_MASK_BITS << |
224 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | 244 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
225 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | 245 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
226 | 246 | ||
227 | dma_writel(pd, CTL0, val); | 247 | dma_writel(pd, CTL0, val); |
248 | } else { | ||
249 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | ||
250 | |||
251 | val = dma_readl(pd, CTL3); | ||
252 | |||
253 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | ||
254 | (DMA_CTL0_BITS_PER_CH * ch)); | ||
255 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); | ||
256 | |||
257 | dma_writel(pd, CTL3, val); | ||
258 | |||
259 | } | ||
228 | 260 | ||
229 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | 261 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", |
230 | chan->chan_id, val); | 262 | chan->chan_id, val); |
@@ -250,20 +282,12 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan) | |||
250 | 282 | ||
251 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | 283 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) |
252 | { | 284 | { |
253 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | ||
254 | u32 val; | ||
255 | |||
256 | if (!pdc_is_idle(pd_chan)) { | 285 | if (!pdc_is_idle(pd_chan)) { |
257 | dev_err(chan2dev(&pd_chan->chan), | 286 | dev_err(chan2dev(&pd_chan->chan), |
258 | "BUG: Attempt to start non-idle channel\n"); | 287 | "BUG: Attempt to start non-idle channel\n"); |
259 | return; | 288 | return; |
260 | } | 289 | } |
261 | 290 | ||
262 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | ||
263 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | ||
264 | channel_writel(pd_chan, SIZE, desc->regs.size); | ||
265 | channel_writel(pd_chan, NEXT, desc->regs.next); | ||
266 | |||
267 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", | 291 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", |
268 | pd_chan->chan.chan_id, desc->regs.dev_addr); | 292 | pd_chan->chan.chan_id, desc->regs.dev_addr); |
269 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", | 293 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", |
@@ -273,14 +297,16 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |||
273 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", | 297 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", |
274 | pd_chan->chan.chan_id, desc->regs.next); | 298 | pd_chan->chan.chan_id, desc->regs.next); |
275 | 299 | ||
276 | if (list_empty(&desc->tx_list)) | 300 | if (list_empty(&desc->tx_list)) { |
301 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | ||
302 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | ||
303 | channel_writel(pd_chan, SIZE, desc->regs.size); | ||
304 | channel_writel(pd_chan, NEXT, desc->regs.next); | ||
277 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); | 305 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); |
278 | else | 306 | } else { |
307 | channel_writel(pd_chan, NEXT, desc->txd.phys); | ||
279 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); | 308 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); |
280 | 309 | } | |
281 | val = dma_readl(pd, CTL2); | ||
282 | val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); | ||
283 | dma_writel(pd, CTL2, val); | ||
284 | } | 310 | } |
285 | 311 | ||
286 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, | 312 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, |
@@ -364,7 +390,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
364 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | 390 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); |
365 | dma_cookie_t cookie; | 391 | dma_cookie_t cookie; |
366 | 392 | ||
367 | spin_lock_bh(&pd_chan->lock); | 393 | spin_lock(&pd_chan->lock); |
368 | cookie = pdc_assign_cookie(pd_chan, desc); | 394 | cookie = pdc_assign_cookie(pd_chan, desc); |
369 | 395 | ||
370 | if (list_empty(&pd_chan->active_list)) { | 396 | if (list_empty(&pd_chan->active_list)) { |
@@ -374,7 +400,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |||
374 | list_add_tail(&desc->desc_node, &pd_chan->queue); | 400 | list_add_tail(&desc->desc_node, &pd_chan->queue); |
375 | } | 401 | } |
376 | 402 | ||
377 | spin_unlock_bh(&pd_chan->lock); | 403 | spin_unlock(&pd_chan->lock); |
378 | return 0; | 404 | return 0; |
379 | } | 405 | } |
380 | 406 | ||
@@ -384,7 +410,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |||
384 | struct pch_dma *pd = to_pd(chan->device); | 410 | struct pch_dma *pd = to_pd(chan->device); |
385 | dma_addr_t addr; | 411 | dma_addr_t addr; |
386 | 412 | ||
387 | desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); | 413 | desc = pci_pool_alloc(pd->pool, flags, &addr); |
388 | if (desc) { | 414 | if (desc) { |
389 | memset(desc, 0, sizeof(struct pch_dma_desc)); | 415 | memset(desc, 0, sizeof(struct pch_dma_desc)); |
390 | INIT_LIST_HEAD(&desc->tx_list); | 416 | INIT_LIST_HEAD(&desc->tx_list); |
@@ -401,9 +427,9 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
401 | { | 427 | { |
402 | struct pch_dma_desc *desc, *_d; | 428 | struct pch_dma_desc *desc, *_d; |
403 | struct pch_dma_desc *ret = NULL; | 429 | struct pch_dma_desc *ret = NULL; |
404 | int i; | 430 | int i = 0; |
405 | 431 | ||
406 | spin_lock_bh(&pd_chan->lock); | 432 | spin_lock(&pd_chan->lock); |
407 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { | 433 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
408 | i++; | 434 | i++; |
409 | if (async_tx_test_ack(&desc->txd)) { | 435 | if (async_tx_test_ack(&desc->txd)) { |
@@ -413,15 +439,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
413 | } | 439 | } |
414 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); | 440 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); |
415 | } | 441 | } |
416 | spin_unlock_bh(&pd_chan->lock); | 442 | spin_unlock(&pd_chan->lock); |
417 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); | 443 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); |
418 | 444 | ||
419 | if (!ret) { | 445 | if (!ret) { |
420 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | 446 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); |
421 | if (ret) { | 447 | if (ret) { |
422 | spin_lock_bh(&pd_chan->lock); | 448 | spin_lock(&pd_chan->lock); |
423 | pd_chan->descs_allocated++; | 449 | pd_chan->descs_allocated++; |
424 | spin_unlock_bh(&pd_chan->lock); | 450 | spin_unlock(&pd_chan->lock); |
425 | } else { | 451 | } else { |
426 | dev_err(chan2dev(&pd_chan->chan), | 452 | dev_err(chan2dev(&pd_chan->chan), |
427 | "failed to alloc desc\n"); | 453 | "failed to alloc desc\n"); |
@@ -435,10 +461,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan, | |||
435 | struct pch_dma_desc *desc) | 461 | struct pch_dma_desc *desc) |
436 | { | 462 | { |
437 | if (desc) { | 463 | if (desc) { |
438 | spin_lock_bh(&pd_chan->lock); | 464 | spin_lock(&pd_chan->lock); |
439 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | 465 | list_splice_init(&desc->tx_list, &pd_chan->free_list); |
440 | list_add(&desc->desc_node, &pd_chan->free_list); | 466 | list_add(&desc->desc_node, &pd_chan->free_list); |
441 | spin_unlock_bh(&pd_chan->lock); | 467 | spin_unlock(&pd_chan->lock); |
442 | } | 468 | } |
443 | } | 469 | } |
444 | 470 | ||
@@ -476,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) | |||
476 | spin_unlock_bh(&pd_chan->lock); | 502 | spin_unlock_bh(&pd_chan->lock); |
477 | 503 | ||
478 | pdc_enable_irq(chan, 1); | 504 | pdc_enable_irq(chan, 1); |
479 | pdc_set_dir(chan); | ||
480 | 505 | ||
481 | return pd_chan->descs_allocated; | 506 | return pd_chan->descs_allocated; |
482 | } | 507 | } |
@@ -528,9 +553,9 @@ static void pd_issue_pending(struct dma_chan *chan) | |||
528 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 553 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
529 | 554 | ||
530 | if (pdc_is_idle(pd_chan)) { | 555 | if (pdc_is_idle(pd_chan)) { |
531 | spin_lock_bh(&pd_chan->lock); | 556 | spin_lock(&pd_chan->lock); |
532 | pdc_advance_work(pd_chan); | 557 | pdc_advance_work(pd_chan); |
533 | spin_unlock_bh(&pd_chan->lock); | 558 | spin_unlock(&pd_chan->lock); |
534 | } | 559 | } |
535 | } | 560 | } |
536 | 561 | ||
@@ -559,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
559 | else | 584 | else |
560 | return NULL; | 585 | return NULL; |
561 | 586 | ||
587 | pd_chan->dir = direction; | ||
588 | pdc_set_dir(chan); | ||
589 | |||
562 | for_each_sg(sgl, sg, sg_len, i) { | 590 | for_each_sg(sgl, sg, sg_len, i) { |
563 | desc = pdc_desc_get(pd_chan); | 591 | desc = pdc_desc_get(pd_chan); |
564 | 592 | ||
@@ -590,7 +618,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
590 | goto err_desc_get; | 618 | goto err_desc_get; |
591 | } | 619 | } |
592 | 620 | ||
593 | |||
594 | if (!first) { | 621 | if (!first) { |
595 | first = desc; | 622 | first = desc; |
596 | } else { | 623 | } else { |
@@ -639,13 +666,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
639 | 666 | ||
640 | spin_unlock_bh(&pd_chan->lock); | 667 | spin_unlock_bh(&pd_chan->lock); |
641 | 668 | ||
642 | |||
643 | return 0; | 669 | return 0; |
644 | } | 670 | } |
645 | 671 | ||
646 | static void pdc_tasklet(unsigned long data) | 672 | static void pdc_tasklet(unsigned long data) |
647 | { | 673 | { |
648 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; | 674 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; |
675 | unsigned long flags; | ||
649 | 676 | ||
650 | if (!pdc_is_idle(pd_chan)) { | 677 | if (!pdc_is_idle(pd_chan)) { |
651 | dev_err(chan2dev(&pd_chan->chan), | 678 | dev_err(chan2dev(&pd_chan->chan), |
@@ -653,12 +680,12 @@ static void pdc_tasklet(unsigned long data) | |||
653 | return; | 680 | return; |
654 | } | 681 | } |
655 | 682 | ||
656 | spin_lock_bh(&pd_chan->lock); | 683 | spin_lock_irqsave(&pd_chan->lock, flags); |
657 | if (test_and_clear_bit(0, &pd_chan->err_status)) | 684 | if (test_and_clear_bit(0, &pd_chan->err_status)) |
658 | pdc_handle_error(pd_chan); | 685 | pdc_handle_error(pd_chan); |
659 | else | 686 | else |
660 | pdc_advance_work(pd_chan); | 687 | pdc_advance_work(pd_chan); |
661 | spin_unlock_bh(&pd_chan->lock); | 688 | spin_unlock_irqrestore(&pd_chan->lock, flags); |
662 | } | 689 | } |
663 | 690 | ||
664 | static irqreturn_t pd_irq(int irq, void *devid) | 691 | static irqreturn_t pd_irq(int irq, void *devid) |
@@ -692,6 +719,7 @@ static irqreturn_t pd_irq(int irq, void *devid) | |||
692 | return ret; | 719 | return ret; |
693 | } | 720 | } |
694 | 721 | ||
722 | #ifdef CONFIG_PM | ||
695 | static void pch_dma_save_regs(struct pch_dma *pd) | 723 | static void pch_dma_save_regs(struct pch_dma *pd) |
696 | { | 724 | { |
697 | struct pch_dma_chan *pd_chan; | 725 | struct pch_dma_chan *pd_chan; |
@@ -701,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd) | |||
701 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); | 729 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); |
702 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); | 730 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); |
703 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); | 731 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); |
732 | pd->regs.dma_ctl3 = dma_readl(pd, CTL3); | ||
704 | 733 | ||
705 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | 734 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
706 | pd_chan = to_pd_chan(chan); | 735 | pd_chan = to_pd_chan(chan); |
@@ -723,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd) | |||
723 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); | 752 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); |
724 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); | 753 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); |
725 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); | 754 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); |
755 | dma_writel(pd, CTL3, pd->regs.dma_ctl3); | ||
726 | 756 | ||
727 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | 757 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
728 | pd_chan = to_pd_chan(chan); | 758 | pd_chan = to_pd_chan(chan); |
@@ -769,6 +799,7 @@ static int pch_dma_resume(struct pci_dev *pdev) | |||
769 | 799 | ||
770 | return 0; | 800 | return 0; |
771 | } | 801 | } |
802 | #endif | ||
772 | 803 | ||
773 | static int __devinit pch_dma_probe(struct pci_dev *pdev, | 804 | static int __devinit pch_dma_probe(struct pci_dev *pdev, |
774 | const struct pci_device_id *id) | 805 | const struct pci_device_id *id) |
@@ -847,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev, | |||
847 | 878 | ||
848 | pd_chan->membase = ®s->desc[i]; | 879 | pd_chan->membase = ®s->desc[i]; |
849 | 880 | ||
850 | pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
851 | |||
852 | spin_lock_init(&pd_chan->lock); | 881 | spin_lock_init(&pd_chan->lock); |
853 | 882 | ||
854 | INIT_LIST_HEAD(&pd_chan->active_list); | 883 | INIT_LIST_HEAD(&pd_chan->active_list); |
@@ -920,12 +949,30 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) | |||
920 | } | 949 | } |
921 | 950 | ||
922 | /* PCI Device ID of DMA device */ | 951 | /* PCI Device ID of DMA device */ |
923 | #define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810 | 952 | #define PCI_VENDOR_ID_ROHM 0x10DB |
924 | #define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815 | 953 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 |
925 | 954 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 | |
926 | static const struct pci_device_id pch_dma_id_table[] = { | 955 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 |
927 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 }, | 956 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B |
928 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 }, | 957 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 |
958 | #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 | ||
959 | #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B | ||
960 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E | ||
961 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 | ||
962 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B | ||
963 | |||
964 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { | ||
965 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, | ||
966 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, | ||
967 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ | ||
968 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ | ||
969 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ | ||
970 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ | ||
971 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */ | ||
972 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ | ||
973 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ | ||
974 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ | ||
975 | { 0, }, | ||
929 | }; | 976 | }; |
930 | 977 | ||
931 | static struct pci_driver pch_dma_driver = { | 978 | static struct pci_driver pch_dma_driver = { |
@@ -952,6 +999,7 @@ static void __exit pch_dma_exit(void) | |||
952 | module_init(pch_dma_init); | 999 | module_init(pch_dma_init); |
953 | module_exit(pch_dma_exit); | 1000 | module_exit(pch_dma_exit); |
954 | 1001 | ||
955 | MODULE_DESCRIPTION("Topcliff PCH DMA controller driver"); | 1002 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " |
1003 | "DMA controller driver"); | ||
956 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 1004 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
957 | MODULE_LICENSE("GPL v2"); | 1005 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 7c50f6dfd3f4..6abe1ec1f2ce 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -657,7 +657,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) | |||
657 | } | 657 | } |
658 | 658 | ||
659 | static int __devinit | 659 | static int __devinit |
660 | pl330_probe(struct amba_device *adev, struct amba_id *id) | 660 | pl330_probe(struct amba_device *adev, const struct amba_id *id) |
661 | { | 661 | { |
662 | struct dma_pl330_platdata *pdat; | 662 | struct dma_pl330_platdata *pdat; |
663 | struct dma_pl330_dmac *pdmac; | 663 | struct dma_pl330_dmac *pdmac; |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 0d58a4a4487f..fc457a7e8832 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy( | |||
2313 | if (unlikely(!len)) | 2313 | if (unlikely(!len)) |
2314 | return NULL; | 2314 | return NULL; |
2315 | 2315 | ||
2316 | BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); | 2316 | BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); |
2317 | 2317 | ||
2318 | spin_lock_bh(&ppc440spe_chan->lock); | 2318 | spin_lock_bh(&ppc440spe_chan->lock); |
2319 | 2319 | ||
@@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset( | |||
2354 | if (unlikely(!len)) | 2354 | if (unlikely(!len)) |
2355 | return NULL; | 2355 | return NULL; |
2356 | 2356 | ||
2357 | BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); | 2357 | BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT); |
2358 | 2358 | ||
2359 | spin_lock_bh(&ppc440spe_chan->lock); | 2359 | spin_lock_bh(&ppc440spe_chan->lock); |
2360 | 2360 | ||
@@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( | |||
2397 | dma_dest, dma_src, src_cnt)); | 2397 | dma_dest, dma_src, src_cnt)); |
2398 | if (unlikely(!len)) | 2398 | if (unlikely(!len)) |
2399 | return NULL; | 2399 | return NULL; |
2400 | BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); | 2400 | BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); |
2401 | 2401 | ||
2402 | dev_dbg(ppc440spe_chan->device->common.dev, | 2402 | dev_dbg(ppc440spe_chan->device->common.dev, |
2403 | "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", | 2403 | "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", |
@@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq( | |||
2887 | ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, | 2887 | ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, |
2888 | dst, src, src_cnt)); | 2888 | dst, src, src_cnt)); |
2889 | BUG_ON(!len); | 2889 | BUG_ON(!len); |
2890 | BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); | 2890 | BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); |
2891 | BUG_ON(!src_cnt); | 2891 | BUG_ON(!src_cnt); |
2892 | 2892 | ||
2893 | if (src_cnt == 1 && dst[1] == src[0]) { | 2893 | if (src_cnt == 1 && dst[1] == src[0]) { |
@@ -4393,8 +4393,7 @@ static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev, | |||
4393 | /** | 4393 | /** |
4394 | * ppc440spe_adma_probe - probe the asynch device | 4394 | * ppc440spe_adma_probe - probe the asynch device |
4395 | */ | 4395 | */ |
4396 | static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev, | 4396 | static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev) |
4397 | const struct of_device_id *match) | ||
4398 | { | 4397 | { |
4399 | struct device_node *np = ofdev->dev.of_node; | 4398 | struct device_node *np = ofdev->dev.of_node; |
4400 | struct resource res; | 4399 | struct resource res; |
@@ -4449,9 +4448,8 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev, | |||
4449 | 4448 | ||
4450 | if (!request_mem_region(res.start, resource_size(&res), | 4449 | if (!request_mem_region(res.start, resource_size(&res), |
4451 | dev_driver_string(&ofdev->dev))) { | 4450 | dev_driver_string(&ofdev->dev))) { |
4452 | dev_err(&ofdev->dev, "failed to request memory region " | 4451 | dev_err(&ofdev->dev, "failed to request memory region %pR\n", |
4453 | "(0x%016llx-0x%016llx)\n", | 4452 | &res); |
4454 | (u64)res.start, (u64)res.end); | ||
4455 | initcode = PPC_ADMA_INIT_MEMREG; | 4453 | initcode = PPC_ADMA_INIT_MEMREG; |
4456 | ret = -EBUSY; | 4454 | ret = -EBUSY; |
4457 | goto out; | 4455 | goto out; |
@@ -4945,7 +4943,7 @@ static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = { | |||
4945 | }; | 4943 | }; |
4946 | MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); | 4944 | MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); |
4947 | 4945 | ||
4948 | static struct of_platform_driver ppc440spe_adma_driver = { | 4946 | static struct platform_driver ppc440spe_adma_driver = { |
4949 | .probe = ppc440spe_adma_probe, | 4947 | .probe = ppc440spe_adma_probe, |
4950 | .remove = __devexit_p(ppc440spe_adma_remove), | 4948 | .remove = __devexit_p(ppc440spe_adma_remove), |
4951 | .driver = { | 4949 | .driver = { |
@@ -4963,7 +4961,7 @@ static __init int ppc440spe_adma_init(void) | |||
4963 | if (ret) | 4961 | if (ret) |
4964 | return ret; | 4962 | return ret; |
4965 | 4963 | ||
4966 | ret = of_register_platform_driver(&ppc440spe_adma_driver); | 4964 | ret = platform_driver_register(&ppc440spe_adma_driver); |
4967 | if (ret) { | 4965 | if (ret) { |
4968 | pr_err("%s: failed to register platform driver\n", | 4966 | pr_err("%s: failed to register platform driver\n", |
4969 | __func__); | 4967 | __func__); |
@@ -4997,7 +4995,7 @@ out_dev: | |||
4997 | /* User will not be able to enable h/w RAID-6 */ | 4995 | /* User will not be able to enable h/w RAID-6 */ |
4998 | pr_err("%s: failed to create RAID-6 driver interface\n", | 4996 | pr_err("%s: failed to create RAID-6 driver interface\n", |
4999 | __func__); | 4997 | __func__); |
5000 | of_unregister_platform_driver(&ppc440spe_adma_driver); | 4998 | platform_driver_unregister(&ppc440spe_adma_driver); |
5001 | out_reg: | 4999 | out_reg: |
5002 | dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len); | 5000 | dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len); |
5003 | kfree(ppc440spe_dma_fifo_buf); | 5001 | kfree(ppc440spe_dma_fifo_buf); |
@@ -5012,7 +5010,7 @@ static void __exit ppc440spe_adma_exit(void) | |||
5012 | &driver_attr_enable); | 5010 | &driver_attr_enable); |
5013 | driver_remove_file(&ppc440spe_adma_driver.driver, | 5011 | driver_remove_file(&ppc440spe_adma_driver.driver, |
5014 | &driver_attr_devices); | 5012 | &driver_attr_devices); |
5015 | of_unregister_platform_driver(&ppc440spe_adma_driver); | 5013 | platform_driver_unregister(&ppc440spe_adma_driver); |
5016 | dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len); | 5014 | dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len); |
5017 | kfree(ppc440spe_dma_fifo_buf); | 5015 | kfree(ppc440spe_dma_fifo_buf); |
5018 | } | 5016 | } |
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index eb6b54dbb806..028330044201 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -27,7 +27,10 @@ | |||
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
29 | #include <linux/sh_dma.h> | 29 | #include <linux/sh_dma.h> |
30 | 30 | #include <linux/notifier.h> | |
31 | #include <linux/kdebug.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/rculist.h> | ||
31 | #include "shdma.h" | 34 | #include "shdma.h" |
32 | 35 | ||
33 | /* DMA descriptor control */ | 36 | /* DMA descriptor control */ |
@@ -43,6 +46,13 @@ enum sh_dmae_desc_status { | |||
43 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | 46 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
44 | #define LOG2_DEFAULT_XFER_SIZE 2 | 47 | #define LOG2_DEFAULT_XFER_SIZE 2 |
45 | 48 | ||
49 | /* | ||
50 | * Used for write-side mutual exclusion for the global device list, | ||
51 | * read-side synchronization by way of RCU, and per-controller data. | ||
52 | */ | ||
53 | static DEFINE_SPINLOCK(sh_dmae_lock); | ||
54 | static LIST_HEAD(sh_dmae_devices); | ||
55 | |||
46 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | 56 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ |
47 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; | 57 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; |
48 | 58 | ||
@@ -75,22 +85,35 @@ static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | |||
75 | */ | 85 | */ |
76 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) | 86 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) |
77 | { | 87 | { |
78 | unsigned short dmaor = dmaor_read(shdev); | 88 | unsigned short dmaor; |
89 | unsigned long flags; | ||
79 | 90 | ||
91 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
92 | |||
93 | dmaor = dmaor_read(shdev); | ||
80 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); | 94 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); |
95 | |||
96 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
81 | } | 97 | } |
82 | 98 | ||
83 | static int sh_dmae_rst(struct sh_dmae_device *shdev) | 99 | static int sh_dmae_rst(struct sh_dmae_device *shdev) |
84 | { | 100 | { |
85 | unsigned short dmaor; | 101 | unsigned short dmaor; |
102 | unsigned long flags; | ||
86 | 103 | ||
87 | sh_dmae_ctl_stop(shdev); | 104 | spin_lock_irqsave(&sh_dmae_lock, flags); |
88 | dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; | ||
89 | 105 | ||
90 | dmaor_write(shdev, dmaor); | 106 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); |
91 | if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { | 107 | |
92 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); | 108 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); |
93 | return -EINVAL; | 109 | |
110 | dmaor = dmaor_read(shdev); | ||
111 | |||
112 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
113 | |||
114 | if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { | ||
115 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); | ||
116 | return -EIO; | ||
94 | } | 117 | } |
95 | return 0; | 118 | return 0; |
96 | } | 119 | } |
@@ -174,7 +197,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) | |||
174 | 197 | ||
175 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 198 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
176 | { | 199 | { |
177 | /* When DMA was working, can not set data to CHCR */ | 200 | /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ |
178 | if (dmae_is_busy(sh_chan)) | 201 | if (dmae_is_busy(sh_chan)) |
179 | return -EBUSY; | 202 | return -EBUSY; |
180 | 203 | ||
@@ -190,12 +213,17 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
190 | struct sh_dmae_device, common); | 213 | struct sh_dmae_device, common); |
191 | struct sh_dmae_pdata *pdata = shdev->pdata; | 214 | struct sh_dmae_pdata *pdata = shdev->pdata; |
192 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | 215 | const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; |
193 | u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); | 216 | u16 __iomem *addr = shdev->dmars; |
194 | int shift = chan_pdata->dmars_bit; | 217 | int shift = chan_pdata->dmars_bit; |
195 | 218 | ||
196 | if (dmae_is_busy(sh_chan)) | 219 | if (dmae_is_busy(sh_chan)) |
197 | return -EBUSY; | 220 | return -EBUSY; |
198 | 221 | ||
222 | /* in the case of a missing DMARS resource use first memory window */ | ||
223 | if (!addr) | ||
224 | addr = (u16 __iomem *)shdev->chan_reg; | ||
225 | addr += chan_pdata->dmars / sizeof(u16); | ||
226 | |||
199 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | 227 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), |
200 | addr); | 228 | addr); |
201 | 229 | ||
@@ -315,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | |||
315 | 343 | ||
316 | dmae_set_dmars(sh_chan, cfg->mid_rid); | 344 | dmae_set_dmars(sh_chan, cfg->mid_rid); |
317 | dmae_set_chcr(sh_chan, cfg->chcr); | 345 | dmae_set_chcr(sh_chan, cfg->chcr); |
318 | } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { | 346 | } else { |
319 | dmae_init(sh_chan); | 347 | dmae_init(sh_chan); |
320 | } | 348 | } |
321 | 349 | ||
@@ -364,7 +392,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
364 | LIST_HEAD(list); | 392 | LIST_HEAD(list); |
365 | int descs = sh_chan->descs_allocated; | 393 | int descs = sh_chan->descs_allocated; |
366 | 394 | ||
395 | /* Protect against ISR */ | ||
396 | spin_lock_irq(&sh_chan->desc_lock); | ||
367 | dmae_halt(sh_chan); | 397 | dmae_halt(sh_chan); |
398 | spin_unlock_irq(&sh_chan->desc_lock); | ||
399 | |||
400 | /* Now no new interrupts will occur */ | ||
368 | 401 | ||
369 | /* Prepared and not submitted descriptors can still be on the queue */ | 402 | /* Prepared and not submitted descriptors can still be on the queue */ |
370 | if (!list_empty(&sh_chan->ld_queue)) | 403 | if (!list_empty(&sh_chan->ld_queue)) |
@@ -374,6 +407,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
374 | /* The caller is holding dma_list_mutex */ | 407 | /* The caller is holding dma_list_mutex */ |
375 | struct sh_dmae_slave *param = chan->private; | 408 | struct sh_dmae_slave *param = chan->private; |
376 | clear_bit(param->slave_id, sh_dmae_slave_used); | 409 | clear_bit(param->slave_id, sh_dmae_slave_used); |
410 | chan->private = NULL; | ||
377 | } | 411 | } |
378 | 412 | ||
379 | spin_lock_bh(&sh_chan->desc_lock); | 413 | spin_lock_bh(&sh_chan->desc_lock); |
@@ -553,8 +587,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
553 | if (!chan || !len) | 587 | if (!chan || !len) |
554 | return NULL; | 588 | return NULL; |
555 | 589 | ||
556 | chan->private = NULL; | ||
557 | |||
558 | sh_chan = to_sh_chan(chan); | 590 | sh_chan = to_sh_chan(chan); |
559 | 591 | ||
560 | sg_init_table(&sg, 1); | 592 | sg_init_table(&sg, 1); |
@@ -610,9 +642,9 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
610 | if (!chan) | 642 | if (!chan) |
611 | return -EINVAL; | 643 | return -EINVAL; |
612 | 644 | ||
645 | spin_lock_bh(&sh_chan->desc_lock); | ||
613 | dmae_halt(sh_chan); | 646 | dmae_halt(sh_chan); |
614 | 647 | ||
615 | spin_lock_bh(&sh_chan->desc_lock); | ||
616 | if (!list_empty(&sh_chan->ld_queue)) { | 648 | if (!list_empty(&sh_chan->ld_queue)) { |
617 | /* Record partial transfer */ | 649 | /* Record partial transfer */ |
618 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | 650 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, |
@@ -706,6 +738,14 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all | |||
706 | list_move(&desc->node, &sh_chan->ld_free); | 738 | list_move(&desc->node, &sh_chan->ld_free); |
707 | } | 739 | } |
708 | } | 740 | } |
741 | |||
742 | if (all && !callback) | ||
743 | /* | ||
744 | * Terminating and the loop completed normally: forgive | ||
745 | * uncompleted cookies | ||
746 | */ | ||
747 | sh_chan->completed_cookie = sh_chan->common.cookie; | ||
748 | |||
709 | spin_unlock_bh(&sh_chan->desc_lock); | 749 | spin_unlock_bh(&sh_chan->desc_lock); |
710 | 750 | ||
711 | if (callback) | 751 | if (callback) |
@@ -723,10 +763,6 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | |||
723 | { | 763 | { |
724 | while (__ld_cleanup(sh_chan, all)) | 764 | while (__ld_cleanup(sh_chan, all)) |
725 | ; | 765 | ; |
726 | |||
727 | if (all) | ||
728 | /* Terminating - forgive uncompleted cookies */ | ||
729 | sh_chan->completed_cookie = sh_chan->common.cookie; | ||
730 | } | 766 | } |
731 | 767 | ||
732 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 768 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
@@ -740,7 +776,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | |||
740 | return; | 776 | return; |
741 | } | 777 | } |
742 | 778 | ||
743 | /* Find the first not transferred desciptor */ | 779 | /* Find the first not transferred descriptor */ |
744 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | 780 | list_for_each_entry(desc, &sh_chan->ld_queue, node) |
745 | if (desc->mark == DESC_SUBMITTED) { | 781 | if (desc->mark == DESC_SUBMITTED) { |
746 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", | 782 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", |
@@ -772,8 +808,10 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | |||
772 | 808 | ||
773 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 809 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
774 | 810 | ||
775 | last_used = chan->cookie; | 811 | /* First read completed cookie to avoid a skew */ |
776 | last_complete = sh_chan->completed_cookie; | 812 | last_complete = sh_chan->completed_cookie; |
813 | rmb(); | ||
814 | last_used = chan->cookie; | ||
777 | BUG_ON(last_complete < 0); | 815 | BUG_ON(last_complete < 0); |
778 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 816 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
779 | 817 | ||
@@ -803,8 +841,12 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | |||
803 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 841 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
804 | { | 842 | { |
805 | irqreturn_t ret = IRQ_NONE; | 843 | irqreturn_t ret = IRQ_NONE; |
806 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 844 | struct sh_dmae_chan *sh_chan = data; |
807 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 845 | u32 chcr; |
846 | |||
847 | spin_lock(&sh_chan->desc_lock); | ||
848 | |||
849 | chcr = sh_dmae_readl(sh_chan, CHCR); | ||
808 | 850 | ||
809 | if (chcr & CHCR_TE) { | 851 | if (chcr & CHCR_TE) { |
810 | /* DMA stop */ | 852 | /* DMA stop */ |
@@ -814,13 +856,15 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
814 | tasklet_schedule(&sh_chan->tasklet); | 856 | tasklet_schedule(&sh_chan->tasklet); |
815 | } | 857 | } |
816 | 858 | ||
859 | spin_unlock(&sh_chan->desc_lock); | ||
860 | |||
817 | return ret; | 861 | return ret; |
818 | } | 862 | } |
819 | 863 | ||
820 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 864 | /* Called from error IRQ or NMI */ |
821 | static irqreturn_t sh_dmae_err(int irq, void *data) | 865 | static bool sh_dmae_reset(struct sh_dmae_device *shdev) |
822 | { | 866 | { |
823 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | 867 | unsigned int handled = 0; |
824 | int i; | 868 | int i; |
825 | 869 | ||
826 | /* halt the dma controller */ | 870 | /* halt the dma controller */ |
@@ -829,25 +873,51 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
829 | /* We cannot detect, which channel caused the error, have to reset all */ | 873 | /* We cannot detect, which channel caused the error, have to reset all */ |
830 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { | 874 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { |
831 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 875 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
832 | if (sh_chan) { | 876 | struct sh_desc *desc; |
833 | struct sh_desc *desc; | 877 | LIST_HEAD(dl); |
834 | /* Stop the channel */ | 878 | |
835 | dmae_halt(sh_chan); | 879 | if (!sh_chan) |
836 | /* Complete all */ | 880 | continue; |
837 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 881 | |
838 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 882 | spin_lock(&sh_chan->desc_lock); |
839 | desc->mark = DESC_IDLE; | 883 | |
840 | if (tx->callback) | 884 | /* Stop the channel */ |
841 | tx->callback(tx->callback_param); | 885 | dmae_halt(sh_chan); |
842 | } | 886 | |
843 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); | 887 | list_splice_init(&sh_chan->ld_queue, &dl); |
888 | |||
889 | spin_unlock(&sh_chan->desc_lock); | ||
890 | |||
891 | /* Complete all */ | ||
892 | list_for_each_entry(desc, &dl, node) { | ||
893 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
894 | desc->mark = DESC_IDLE; | ||
895 | if (tx->callback) | ||
896 | tx->callback(tx->callback_param); | ||
844 | } | 897 | } |
898 | |||
899 | spin_lock(&sh_chan->desc_lock); | ||
900 | list_splice(&dl, &sh_chan->ld_free); | ||
901 | spin_unlock(&sh_chan->desc_lock); | ||
902 | |||
903 | handled++; | ||
845 | } | 904 | } |
905 | |||
846 | sh_dmae_rst(shdev); | 906 | sh_dmae_rst(shdev); |
847 | 907 | ||
908 | return !!handled; | ||
909 | } | ||
910 | |||
911 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
912 | { | ||
913 | struct sh_dmae_device *shdev = data; | ||
914 | |||
915 | if (!(dmaor_read(shdev) & DMAOR_AE)) | ||
916 | return IRQ_NONE; | ||
917 | |||
918 | sh_dmae_reset(data); | ||
848 | return IRQ_HANDLED; | 919 | return IRQ_HANDLED; |
849 | } | 920 | } |
850 | #endif | ||
851 | 921 | ||
852 | static void dmae_do_tasklet(unsigned long data) | 922 | static void dmae_do_tasklet(unsigned long data) |
853 | { | 923 | { |
@@ -876,6 +946,54 @@ static void dmae_do_tasklet(unsigned long data) | |||
876 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 946 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
877 | } | 947 | } |
878 | 948 | ||
949 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | ||
950 | { | ||
951 | /* Fast path out if NMIF is not asserted for this controller */ | ||
952 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | ||
953 | return false; | ||
954 | |||
955 | return sh_dmae_reset(shdev); | ||
956 | } | ||
957 | |||
958 | static int sh_dmae_nmi_handler(struct notifier_block *self, | ||
959 | unsigned long cmd, void *data) | ||
960 | { | ||
961 | struct sh_dmae_device *shdev; | ||
962 | int ret = NOTIFY_DONE; | ||
963 | bool triggered; | ||
964 | |||
965 | /* | ||
966 | * Only concern ourselves with NMI events. | ||
967 | * | ||
968 | * Normally we would check the die chain value, but as this needs | ||
969 | * to be architecture independent, check for NMI context instead. | ||
970 | */ | ||
971 | if (!in_nmi()) | ||
972 | return NOTIFY_DONE; | ||
973 | |||
974 | rcu_read_lock(); | ||
975 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { | ||
976 | /* | ||
977 | * Only stop if one of the controllers has NMIF asserted, | ||
978 | * we do not want to interfere with regular address error | ||
979 | * handling or NMI events that don't concern the DMACs. | ||
980 | */ | ||
981 | triggered = sh_dmae_nmi_notify(shdev); | ||
982 | if (triggered == true) | ||
983 | ret = NOTIFY_OK; | ||
984 | } | ||
985 | rcu_read_unlock(); | ||
986 | |||
987 | return ret; | ||
988 | } | ||
989 | |||
990 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | ||
991 | .notifier_call = sh_dmae_nmi_handler, | ||
992 | |||
993 | /* Run before NMI debug handler and KGDB */ | ||
994 | .priority = 1, | ||
995 | }; | ||
996 | |||
879 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | 997 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
880 | int irq, unsigned long flags) | 998 | int irq, unsigned long flags) |
881 | { | 999 | { |
@@ -904,9 +1022,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | |||
904 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | 1022 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
905 | (unsigned long)new_sh_chan); | 1023 | (unsigned long)new_sh_chan); |
906 | 1024 | ||
907 | /* Init the channel */ | ||
908 | dmae_init(new_sh_chan); | ||
909 | |||
910 | spin_lock_init(&new_sh_chan->desc_lock); | 1025 | spin_lock_init(&new_sh_chan->desc_lock); |
911 | 1026 | ||
912 | /* Init descripter manage list */ | 1027 | /* Init descripter manage list */ |
@@ -968,7 +1083,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
968 | unsigned long irqflags = IRQF_DISABLED, | 1083 | unsigned long irqflags = IRQF_DISABLED, |
969 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; | 1084 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; |
970 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; | 1085 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; |
971 | int err, i, irq_cnt = 0, irqres = 0; | 1086 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; |
972 | struct sh_dmae_device *shdev; | 1087 | struct sh_dmae_device *shdev; |
973 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | 1088 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; |
974 | 1089 | ||
@@ -977,7 +1092,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
977 | return -ENODEV; | 1092 | return -ENODEV; |
978 | 1093 | ||
979 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1094 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
980 | /* DMARS area is optional, if absent, this controller cannot do slave DMA */ | 1095 | /* DMARS area is optional */ |
981 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 1096 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
982 | /* | 1097 | /* |
983 | * IRQ resources: | 1098 | * IRQ resources: |
@@ -1029,10 +1144,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1029 | /* platform data */ | 1144 | /* platform data */ |
1030 | shdev->pdata = pdata; | 1145 | shdev->pdata = pdata; |
1031 | 1146 | ||
1147 | platform_set_drvdata(pdev, shdev); | ||
1148 | |||
1032 | pm_runtime_enable(&pdev->dev); | 1149 | pm_runtime_enable(&pdev->dev); |
1033 | pm_runtime_get_sync(&pdev->dev); | 1150 | pm_runtime_get_sync(&pdev->dev); |
1034 | 1151 | ||
1035 | /* reset dma controller */ | 1152 | spin_lock_irq(&sh_dmae_lock); |
1153 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | ||
1154 | spin_unlock_irq(&sh_dmae_lock); | ||
1155 | |||
1156 | /* reset dma controller - only needed as a test */ | ||
1036 | err = sh_dmae_rst(shdev); | 1157 | err = sh_dmae_rst(shdev); |
1037 | if (err) | 1158 | if (err) |
1038 | goto rst_err; | 1159 | goto rst_err; |
@@ -1040,7 +1161,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1040 | INIT_LIST_HEAD(&shdev->common.channels); | 1161 | INIT_LIST_HEAD(&shdev->common.channels); |
1041 | 1162 | ||
1042 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 1163 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
1043 | if (dmars) | 1164 | if (pdata->slave && pdata->slave_num) |
1044 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | 1165 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); |
1045 | 1166 | ||
1046 | shdev->common.device_alloc_chan_resources | 1167 | shdev->common.device_alloc_chan_resources |
@@ -1089,12 +1210,22 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1089 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { | 1210 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { |
1090 | /* Special case - all multiplexed */ | 1211 | /* Special case - all multiplexed */ |
1091 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { | 1212 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { |
1092 | chan_irq[irq_cnt] = chanirq_res->start; | 1213 | if (irq_cnt < SH_DMAC_MAX_CHANNELS) { |
1093 | chan_flag[irq_cnt] = IRQF_SHARED; | 1214 | chan_irq[irq_cnt] = chanirq_res->start; |
1215 | chan_flag[irq_cnt] = IRQF_SHARED; | ||
1216 | } else { | ||
1217 | irq_cap = 1; | ||
1218 | break; | ||
1219 | } | ||
1094 | } | 1220 | } |
1095 | } else { | 1221 | } else { |
1096 | do { | 1222 | do { |
1097 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | 1223 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { |
1224 | if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { | ||
1225 | irq_cap = 1; | ||
1226 | break; | ||
1227 | } | ||
1228 | |||
1098 | if ((errirq_res->flags & IORESOURCE_BITS) == | 1229 | if ((errirq_res->flags & IORESOURCE_BITS) == |
1099 | IORESOURCE_IRQ_SHAREABLE) | 1230 | IORESOURCE_IRQ_SHAREABLE) |
1100 | chan_flag[irq_cnt] = IRQF_SHARED; | 1231 | chan_flag[irq_cnt] = IRQF_SHARED; |
@@ -1105,41 +1236,55 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1105 | i, irq_cnt); | 1236 | i, irq_cnt); |
1106 | chan_irq[irq_cnt++] = i; | 1237 | chan_irq[irq_cnt++] = i; |
1107 | } | 1238 | } |
1239 | |||
1240 | if (irq_cnt >= SH_DMAC_MAX_CHANNELS) | ||
1241 | break; | ||
1242 | |||
1108 | chanirq_res = platform_get_resource(pdev, | 1243 | chanirq_res = platform_get_resource(pdev, |
1109 | IORESOURCE_IRQ, ++irqres); | 1244 | IORESOURCE_IRQ, ++irqres); |
1110 | } while (irq_cnt < pdata->channel_num && chanirq_res); | 1245 | } while (irq_cnt < pdata->channel_num && chanirq_res); |
1111 | } | 1246 | } |
1112 | 1247 | ||
1113 | if (irq_cnt < pdata->channel_num) | ||
1114 | goto eirqres; | ||
1115 | |||
1116 | /* Create DMA Channel */ | 1248 | /* Create DMA Channel */ |
1117 | for (i = 0; i < pdata->channel_num; i++) { | 1249 | for (i = 0; i < irq_cnt; i++) { |
1118 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); | 1250 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); |
1119 | if (err) | 1251 | if (err) |
1120 | goto chan_probe_err; | 1252 | goto chan_probe_err; |
1121 | } | 1253 | } |
1122 | 1254 | ||
1255 | if (irq_cap) | ||
1256 | dev_notice(&pdev->dev, "Attempting to register %d DMA " | ||
1257 | "channels when a maximum of %d are supported.\n", | ||
1258 | pdata->channel_num, SH_DMAC_MAX_CHANNELS); | ||
1259 | |||
1123 | pm_runtime_put(&pdev->dev); | 1260 | pm_runtime_put(&pdev->dev); |
1124 | 1261 | ||
1125 | platform_set_drvdata(pdev, shdev); | ||
1126 | dma_async_device_register(&shdev->common); | 1262 | dma_async_device_register(&shdev->common); |
1127 | 1263 | ||
1128 | return err; | 1264 | return err; |
1129 | 1265 | ||
1130 | chan_probe_err: | 1266 | chan_probe_err: |
1131 | sh_dmae_chan_remove(shdev); | 1267 | sh_dmae_chan_remove(shdev); |
1132 | eirqres: | 1268 | |
1133 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 1269 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) |
1134 | free_irq(errirq, shdev); | 1270 | free_irq(errirq, shdev); |
1135 | eirq_err: | 1271 | eirq_err: |
1136 | #endif | 1272 | #endif |
1137 | rst_err: | 1273 | rst_err: |
1274 | spin_lock_irq(&sh_dmae_lock); | ||
1275 | list_del_rcu(&shdev->node); | ||
1276 | spin_unlock_irq(&sh_dmae_lock); | ||
1277 | |||
1138 | pm_runtime_put(&pdev->dev); | 1278 | pm_runtime_put(&pdev->dev); |
1279 | pm_runtime_disable(&pdev->dev); | ||
1280 | |||
1139 | if (dmars) | 1281 | if (dmars) |
1140 | iounmap(shdev->dmars); | 1282 | iounmap(shdev->dmars); |
1283 | |||
1284 | platform_set_drvdata(pdev, NULL); | ||
1141 | emapdmars: | 1285 | emapdmars: |
1142 | iounmap(shdev->chan_reg); | 1286 | iounmap(shdev->chan_reg); |
1287 | synchronize_rcu(); | ||
1143 | emapchan: | 1288 | emapchan: |
1144 | kfree(shdev); | 1289 | kfree(shdev); |
1145 | ealloc: | 1290 | ealloc: |
@@ -1162,6 +1307,10 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) | |||
1162 | if (errirq > 0) | 1307 | if (errirq > 0) |
1163 | free_irq(errirq, shdev); | 1308 | free_irq(errirq, shdev); |
1164 | 1309 | ||
1310 | spin_lock_irq(&sh_dmae_lock); | ||
1311 | list_del_rcu(&shdev->node); | ||
1312 | spin_unlock_irq(&sh_dmae_lock); | ||
1313 | |||
1165 | /* channel data remove */ | 1314 | /* channel data remove */ |
1166 | sh_dmae_chan_remove(shdev); | 1315 | sh_dmae_chan_remove(shdev); |
1167 | 1316 | ||
@@ -1171,6 +1320,9 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) | |||
1171 | iounmap(shdev->dmars); | 1320 | iounmap(shdev->dmars); |
1172 | iounmap(shdev->chan_reg); | 1321 | iounmap(shdev->chan_reg); |
1173 | 1322 | ||
1323 | platform_set_drvdata(pdev, NULL); | ||
1324 | |||
1325 | synchronize_rcu(); | ||
1174 | kfree(shdev); | 1326 | kfree(shdev); |
1175 | 1327 | ||
1176 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1328 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -1189,17 +1341,88 @@ static void sh_dmae_shutdown(struct platform_device *pdev) | |||
1189 | sh_dmae_ctl_stop(shdev); | 1341 | sh_dmae_ctl_stop(shdev); |
1190 | } | 1342 | } |
1191 | 1343 | ||
1344 | static int sh_dmae_runtime_suspend(struct device *dev) | ||
1345 | { | ||
1346 | return 0; | ||
1347 | } | ||
1348 | |||
1349 | static int sh_dmae_runtime_resume(struct device *dev) | ||
1350 | { | ||
1351 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
1352 | |||
1353 | return sh_dmae_rst(shdev); | ||
1354 | } | ||
1355 | |||
1356 | #ifdef CONFIG_PM | ||
1357 | static int sh_dmae_suspend(struct device *dev) | ||
1358 | { | ||
1359 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
1360 | int i; | ||
1361 | |||
1362 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
1363 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
1364 | if (sh_chan->descs_allocated) | ||
1365 | sh_chan->pm_error = pm_runtime_put_sync(dev); | ||
1366 | } | ||
1367 | |||
1368 | return 0; | ||
1369 | } | ||
1370 | |||
1371 | static int sh_dmae_resume(struct device *dev) | ||
1372 | { | ||
1373 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
1374 | int i; | ||
1375 | |||
1376 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
1377 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
1378 | struct sh_dmae_slave *param = sh_chan->common.private; | ||
1379 | |||
1380 | if (!sh_chan->descs_allocated) | ||
1381 | continue; | ||
1382 | |||
1383 | if (!sh_chan->pm_error) | ||
1384 | pm_runtime_get_sync(dev); | ||
1385 | |||
1386 | if (param) { | ||
1387 | const struct sh_dmae_slave_config *cfg = param->config; | ||
1388 | dmae_set_dmars(sh_chan, cfg->mid_rid); | ||
1389 | dmae_set_chcr(sh_chan, cfg->chcr); | ||
1390 | } else { | ||
1391 | dmae_init(sh_chan); | ||
1392 | } | ||
1393 | } | ||
1394 | |||
1395 | return 0; | ||
1396 | } | ||
1397 | #else | ||
1398 | #define sh_dmae_suspend NULL | ||
1399 | #define sh_dmae_resume NULL | ||
1400 | #endif | ||
1401 | |||
1402 | const struct dev_pm_ops sh_dmae_pm = { | ||
1403 | .suspend = sh_dmae_suspend, | ||
1404 | .resume = sh_dmae_resume, | ||
1405 | .runtime_suspend = sh_dmae_runtime_suspend, | ||
1406 | .runtime_resume = sh_dmae_runtime_resume, | ||
1407 | }; | ||
1408 | |||
1192 | static struct platform_driver sh_dmae_driver = { | 1409 | static struct platform_driver sh_dmae_driver = { |
1193 | .remove = __exit_p(sh_dmae_remove), | 1410 | .remove = __exit_p(sh_dmae_remove), |
1194 | .shutdown = sh_dmae_shutdown, | 1411 | .shutdown = sh_dmae_shutdown, |
1195 | .driver = { | 1412 | .driver = { |
1196 | .owner = THIS_MODULE, | 1413 | .owner = THIS_MODULE, |
1197 | .name = "sh-dma-engine", | 1414 | .name = "sh-dma-engine", |
1415 | .pm = &sh_dmae_pm, | ||
1198 | }, | 1416 | }, |
1199 | }; | 1417 | }; |
1200 | 1418 | ||
1201 | static int __init sh_dmae_init(void) | 1419 | static int __init sh_dmae_init(void) |
1202 | { | 1420 | { |
1421 | /* Wire up NMI handling */ | ||
1422 | int err = register_die_notifier(&sh_dmae_nmi_notifier); | ||
1423 | if (err) | ||
1424 | return err; | ||
1425 | |||
1203 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | 1426 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); |
1204 | } | 1427 | } |
1205 | module_init(sh_dmae_init); | 1428 | module_init(sh_dmae_init); |
@@ -1207,9 +1430,12 @@ module_init(sh_dmae_init); | |||
1207 | static void __exit sh_dmae_exit(void) | 1430 | static void __exit sh_dmae_exit(void) |
1208 | { | 1431 | { |
1209 | platform_driver_unregister(&sh_dmae_driver); | 1432 | platform_driver_unregister(&sh_dmae_driver); |
1433 | |||
1434 | unregister_die_notifier(&sh_dmae_nmi_notifier); | ||
1210 | } | 1435 | } |
1211 | module_exit(sh_dmae_exit); | 1436 | module_exit(sh_dmae_exit); |
1212 | 1437 | ||
1213 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | 1438 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); |
1214 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | 1439 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); |
1215 | MODULE_LICENSE("GPL"); | 1440 | MODULE_LICENSE("GPL"); |
1441 | MODULE_ALIAS("platform:sh-dma-engine"); | ||
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 4021275a0a43..5ae9fc512180 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | 19 | ||
20 | #define SH_DMAC_MAX_CHANNELS 6 | 20 | #define SH_DMAC_MAX_CHANNELS 20 |
21 | #define SH_DMA_SLAVE_NUMBER 256 | 21 | #define SH_DMA_SLAVE_NUMBER 256 |
22 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ | 22 | #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ |
23 | 23 | ||
@@ -37,12 +37,14 @@ struct sh_dmae_chan { | |||
37 | int id; /* Raw id of this channel */ | 37 | int id; /* Raw id of this channel */ |
38 | u32 __iomem *base; | 38 | u32 __iomem *base; |
39 | char dev_id[16]; /* unique name per DMAC of channel */ | 39 | char dev_id[16]; /* unique name per DMAC of channel */ |
40 | int pm_error; | ||
40 | }; | 41 | }; |
41 | 42 | ||
42 | struct sh_dmae_device { | 43 | struct sh_dmae_device { |
43 | struct dma_device common; | 44 | struct dma_device common; |
44 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; | 45 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; |
45 | struct sh_dmae_pdata *pdata; | 46 | struct sh_dmae_pdata *pdata; |
47 | struct list_head node; | ||
46 | u32 __iomem *chan_reg; | 48 | u32 __iomem *chan_reg; |
47 | u16 __iomem *dmars; | 49 | u16 __iomem *dmars; |
48 | }; | 50 | }; |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 17e2600a00cf..8f222d4db7de 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -1,11 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * driver/dma/ste_dma40.c | 2 | * Copyright (C) Ericsson AB 2007-2008 |
3 | * | 3 | * Copyright (C) ST-Ericsson SA 2008-2010 |
4 | * Copyright (C) ST-Ericsson 2007-2010 | 4 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
5 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson | ||
5 | * License terms: GNU General Public License (GPL) version 2 | 6 | * License terms: GNU General Public License (GPL) version 2 |
6 | * Author: Per Friden <per.friden@stericsson.com> | ||
7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | ||
8 | * | ||
9 | */ | 7 | */ |
10 | 8 | ||
11 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
@@ -14,6 +12,7 @@ | |||
14 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/err.h> | ||
17 | 16 | ||
18 | #include <plat/ste_dma40.h> | 17 | #include <plat/ste_dma40.h> |
19 | 18 | ||
@@ -32,6 +31,11 @@ | |||
32 | 31 | ||
33 | /* Hardware requirement on LCLA alignment */ | 32 | /* Hardware requirement on LCLA alignment */ |
34 | #define LCLA_ALIGNMENT 0x40000 | 33 | #define LCLA_ALIGNMENT 0x40000 |
34 | |||
35 | /* Max number of links per event group */ | ||
36 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 | ||
37 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP | ||
38 | |||
35 | /* Attempts before giving up to trying to get pages that are aligned */ | 39 | /* Attempts before giving up to trying to get pages that are aligned */ |
36 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | 40 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 |
37 | 41 | ||
@@ -41,7 +45,7 @@ | |||
41 | #define D40_ALLOC_LOG_FREE 0 | 45 | #define D40_ALLOC_LOG_FREE 0 |
42 | 46 | ||
43 | /* Hardware designer of the block */ | 47 | /* Hardware designer of the block */ |
44 | #define D40_PERIPHID2_DESIGNER 0x8 | 48 | #define D40_HW_DESIGNER 0x8 |
45 | 49 | ||
46 | /** | 50 | /** |
47 | * enum 40_command - The different commands and/or statuses. | 51 | * enum 40_command - The different commands and/or statuses. |
@@ -64,6 +68,7 @@ enum d40_command { | |||
64 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | 68 | * @base: Pointer to memory area when the pre_alloc_lli's are not large |
65 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | 69 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if |
66 | * pre_alloc_lli is used. | 70 | * pre_alloc_lli is used. |
71 | * @dma_addr: DMA address, if mapped | ||
67 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. | 72 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. |
68 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | 73 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, |
69 | * one buffer to one buffer. | 74 | * one buffer to one buffer. |
@@ -71,6 +76,7 @@ enum d40_command { | |||
71 | struct d40_lli_pool { | 76 | struct d40_lli_pool { |
72 | void *base; | 77 | void *base; |
73 | int size; | 78 | int size; |
79 | dma_addr_t dma_addr; | ||
74 | /* Space for dst and src, plus an extra for padding */ | 80 | /* Space for dst and src, plus an extra for padding */ |
75 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; | 81 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
76 | }; | 82 | }; |
@@ -84,18 +90,16 @@ struct d40_lli_pool { | |||
84 | * @lli_log: Same as above but for logical channels. | 90 | * @lli_log: Same as above but for logical channels. |
85 | * @lli_pool: The pool with two entries pre-allocated. | 91 | * @lli_pool: The pool with two entries pre-allocated. |
86 | * @lli_len: Number of llis of current descriptor. | 92 | * @lli_len: Number of llis of current descriptor. |
87 | * @lli_count: Number of transfered llis. | 93 | * @lli_current: Number of transferred llis. |
88 | * @lli_tx_len: Max number of LLIs per transfer, there can be | 94 | * @lcla_alloc: Number of LCLA entries allocated. |
89 | * many transfer for one descriptor. | ||
90 | * @txd: DMA engine struct. Used for among other things for communication | 95 | * @txd: DMA engine struct. Used for among other things for communication |
91 | * during a transfer. | 96 | * during a transfer. |
92 | * @node: List entry. | 97 | * @node: List entry. |
93 | * @dir: The transfer direction of this job. | ||
94 | * @is_in_client_list: true if the client owns this descriptor. | 98 | * @is_in_client_list: true if the client owns this descriptor. |
99 | * the previous one. | ||
95 | * | 100 | * |
96 | * This descriptor is used for both logical and physical transfers. | 101 | * This descriptor is used for both logical and physical transfers. |
97 | */ | 102 | */ |
98 | |||
99 | struct d40_desc { | 103 | struct d40_desc { |
100 | /* LLI physical */ | 104 | /* LLI physical */ |
101 | struct d40_phy_lli_bidir lli_phy; | 105 | struct d40_phy_lli_bidir lli_phy; |
@@ -104,14 +108,14 @@ struct d40_desc { | |||
104 | 108 | ||
105 | struct d40_lli_pool lli_pool; | 109 | struct d40_lli_pool lli_pool; |
106 | int lli_len; | 110 | int lli_len; |
107 | int lli_count; | 111 | int lli_current; |
108 | u32 lli_tx_len; | 112 | int lcla_alloc; |
109 | 113 | ||
110 | struct dma_async_tx_descriptor txd; | 114 | struct dma_async_tx_descriptor txd; |
111 | struct list_head node; | 115 | struct list_head node; |
112 | 116 | ||
113 | enum dma_data_direction dir; | ||
114 | bool is_in_client_list; | 117 | bool is_in_client_list; |
118 | bool cyclic; | ||
115 | }; | 119 | }; |
116 | 120 | ||
117 | /** | 121 | /** |
@@ -123,17 +127,15 @@ struct d40_desc { | |||
123 | * @pages: The number of pages needed for all physical channels. | 127 | * @pages: The number of pages needed for all physical channels. |
124 | * Only used later for clean-up on error | 128 | * Only used later for clean-up on error |
125 | * @lock: Lock to protect the content in this struct. | 129 | * @lock: Lock to protect the content in this struct. |
126 | * @alloc_map: Bitmap mapping between physical channel and LCLA entries. | 130 | * @alloc_map: big map over which LCLA entry is own by which job. |
127 | * @num_blocks: The number of entries of alloc_map. Equals to the | ||
128 | * number of physical channels. | ||
129 | */ | 131 | */ |
130 | struct d40_lcla_pool { | 132 | struct d40_lcla_pool { |
131 | void *base; | 133 | void *base; |
134 | dma_addr_t dma_addr; | ||
132 | void *base_unaligned; | 135 | void *base_unaligned; |
133 | int pages; | 136 | int pages; |
134 | spinlock_t lock; | 137 | spinlock_t lock; |
135 | u32 *alloc_map; | 138 | struct d40_desc **alloc_map; |
136 | int num_blocks; | ||
137 | }; | 139 | }; |
138 | 140 | ||
139 | /** | 141 | /** |
@@ -146,9 +148,7 @@ struct d40_lcla_pool { | |||
146 | * this physical channel. Can also be free or physically allocated. | 148 | * this physical channel. Can also be free or physically allocated. |
147 | * @allocated_dst: Same as for src but is dst. | 149 | * @allocated_dst: Same as for src but is dst. |
148 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | 150 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as |
149 | * event line number. Both allocated_src and allocated_dst can not be | 151 | * event line number. |
150 | * allocated to a physical channel, since the interrupt handler has then | ||
151 | * no way of figure out which one the interrupt belongs to. | ||
152 | */ | 152 | */ |
153 | struct d40_phy_res { | 153 | struct d40_phy_res { |
154 | spinlock_t lock; | 154 | spinlock_t lock; |
@@ -178,6 +178,7 @@ struct d40_base; | |||
178 | * @active: Active descriptor. | 178 | * @active: Active descriptor. |
179 | * @queue: Queued jobs. | 179 | * @queue: Queued jobs. |
180 | * @dma_cfg: The client configuration of this dma channel. | 180 | * @dma_cfg: The client configuration of this dma channel. |
181 | * @configured: whether the dma_cfg configuration is valid | ||
181 | * @base: Pointer to the device instance struct. | 182 | * @base: Pointer to the device instance struct. |
182 | * @src_def_cfg: Default cfg register setting for src. | 183 | * @src_def_cfg: Default cfg register setting for src. |
183 | * @dst_def_cfg: Default cfg register setting for dst. | 184 | * @dst_def_cfg: Default cfg register setting for dst. |
@@ -201,12 +202,12 @@ struct d40_chan { | |||
201 | struct list_head active; | 202 | struct list_head active; |
202 | struct list_head queue; | 203 | struct list_head queue; |
203 | struct stedma40_chan_cfg dma_cfg; | 204 | struct stedma40_chan_cfg dma_cfg; |
205 | bool configured; | ||
204 | struct d40_base *base; | 206 | struct d40_base *base; |
205 | /* Default register configurations */ | 207 | /* Default register configurations */ |
206 | u32 src_def_cfg; | 208 | u32 src_def_cfg; |
207 | u32 dst_def_cfg; | 209 | u32 dst_def_cfg; |
208 | struct d40_def_lcsp log_def; | 210 | struct d40_def_lcsp log_def; |
209 | struct d40_lcla_elem lcla; | ||
210 | struct d40_log_lli_full *lcpa; | 211 | struct d40_log_lli_full *lcpa; |
211 | /* Runtime reconfiguration */ | 212 | /* Runtime reconfiguration */ |
212 | dma_addr_t runtime_addr; | 213 | dma_addr_t runtime_addr; |
@@ -234,7 +235,6 @@ struct d40_chan { | |||
234 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 235 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
235 | * @dma_slave: dma_device channels that can do only do slave transfers. | 236 | * @dma_slave: dma_device channels that can do only do slave transfers. |
236 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 237 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
237 | * @phy_chans: Room for all possible physical channels in system. | ||
238 | * @log_chans: Room for all possible logical channels in system. | 238 | * @log_chans: Room for all possible logical channels in system. |
239 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 239 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
240 | * to log_chans entries. | 240 | * to log_chans entries. |
@@ -305,9 +305,37 @@ struct d40_reg_val { | |||
305 | unsigned int val; | 305 | unsigned int val; |
306 | }; | 306 | }; |
307 | 307 | ||
308 | static int d40_pool_lli_alloc(struct d40_desc *d40d, | 308 | static struct device *chan2dev(struct d40_chan *d40c) |
309 | int lli_len, bool is_log) | 309 | { |
310 | return &d40c->chan.dev->device; | ||
311 | } | ||
312 | |||
313 | static bool chan_is_physical(struct d40_chan *chan) | ||
314 | { | ||
315 | return chan->log_num == D40_PHY_CHAN; | ||
316 | } | ||
317 | |||
318 | static bool chan_is_logical(struct d40_chan *chan) | ||
319 | { | ||
320 | return !chan_is_physical(chan); | ||
321 | } | ||
322 | |||
323 | static void __iomem *chan_base(struct d40_chan *chan) | ||
324 | { | ||
325 | return chan->base->virtbase + D40_DREG_PCBASE + | ||
326 | chan->phy_chan->num * D40_DREG_PCDELTA; | ||
327 | } | ||
328 | |||
329 | #define d40_err(dev, format, arg...) \ | ||
330 | dev_err(dev, "[%s] " format, __func__, ## arg) | ||
331 | |||
332 | #define chan_err(d40c, format, arg...) \ | ||
333 | d40_err(chan2dev(d40c), format, ## arg) | ||
334 | |||
335 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, | ||
336 | int lli_len) | ||
310 | { | 337 | { |
338 | bool is_log = chan_is_logical(d40c); | ||
311 | u32 align; | 339 | u32 align; |
312 | void *base; | 340 | void *base; |
313 | 341 | ||
@@ -321,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, | |||
321 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | 349 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); |
322 | d40d->lli_pool.base = NULL; | 350 | d40d->lli_pool.base = NULL; |
323 | } else { | 351 | } else { |
324 | d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); | 352 | d40d->lli_pool.size = lli_len * 2 * align; |
325 | 353 | ||
326 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | 354 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); |
327 | d40d->lli_pool.base = base; | 355 | d40d->lli_pool.base = base; |
@@ -331,25 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, | |||
331 | } | 359 | } |
332 | 360 | ||
333 | if (is_log) { | 361 | if (is_log) { |
334 | d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, | 362 | d40d->lli_log.src = PTR_ALIGN(base, align); |
335 | align); | 363 | d40d->lli_log.dst = d40d->lli_log.src + lli_len; |
336 | d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, | ||
337 | align); | ||
338 | } else { | ||
339 | d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, | ||
340 | align); | ||
341 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, | ||
342 | align); | ||
343 | 364 | ||
344 | d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); | 365 | d40d->lli_pool.dma_addr = 0; |
345 | d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); | 366 | } else { |
367 | d40d->lli_phy.src = PTR_ALIGN(base, align); | ||
368 | d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; | ||
369 | |||
370 | d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, | ||
371 | d40d->lli_phy.src, | ||
372 | d40d->lli_pool.size, | ||
373 | DMA_TO_DEVICE); | ||
374 | |||
375 | if (dma_mapping_error(d40c->base->dev, | ||
376 | d40d->lli_pool.dma_addr)) { | ||
377 | kfree(d40d->lli_pool.base); | ||
378 | d40d->lli_pool.base = NULL; | ||
379 | d40d->lli_pool.dma_addr = 0; | ||
380 | return -ENOMEM; | ||
381 | } | ||
346 | } | 382 | } |
347 | 383 | ||
348 | return 0; | 384 | return 0; |
349 | } | 385 | } |
350 | 386 | ||
351 | static void d40_pool_lli_free(struct d40_desc *d40d) | 387 | static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) |
352 | { | 388 | { |
389 | if (d40d->lli_pool.dma_addr) | ||
390 | dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, | ||
391 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
392 | |||
353 | kfree(d40d->lli_pool.base); | 393 | kfree(d40d->lli_pool.base); |
354 | d40d->lli_pool.base = NULL; | 394 | d40d->lli_pool.base = NULL; |
355 | d40d->lli_pool.size = 0; | 395 | d40d->lli_pool.size = 0; |
@@ -357,22 +397,67 @@ static void d40_pool_lli_free(struct d40_desc *d40d) | |||
357 | d40d->lli_log.dst = NULL; | 397 | d40d->lli_log.dst = NULL; |
358 | d40d->lli_phy.src = NULL; | 398 | d40d->lli_phy.src = NULL; |
359 | d40d->lli_phy.dst = NULL; | 399 | d40d->lli_phy.dst = NULL; |
360 | d40d->lli_phy.src_addr = 0; | ||
361 | d40d->lli_phy.dst_addr = 0; | ||
362 | } | 400 | } |
363 | 401 | ||
364 | static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, | 402 | static int d40_lcla_alloc_one(struct d40_chan *d40c, |
365 | struct d40_desc *desc) | 403 | struct d40_desc *d40d) |
366 | { | 404 | { |
367 | dma_cookie_t cookie = d40c->chan.cookie; | 405 | unsigned long flags; |
406 | int i; | ||
407 | int ret = -EINVAL; | ||
408 | int p; | ||
409 | |||
410 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | ||
368 | 411 | ||
369 | if (++cookie < 0) | 412 | p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP; |
370 | cookie = 1; | ||
371 | 413 | ||
372 | d40c->chan.cookie = cookie; | 414 | /* |
373 | desc->txd.cookie = cookie; | 415 | * Allocate both src and dst at the same time, therefore the half |
416 | * start on 1 since 0 can't be used since zero is used as end marker. | ||
417 | */ | ||
418 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | ||
419 | if (!d40c->base->lcla_pool.alloc_map[p + i]) { | ||
420 | d40c->base->lcla_pool.alloc_map[p + i] = d40d; | ||
421 | d40d->lcla_alloc++; | ||
422 | ret = i; | ||
423 | break; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | ||
428 | |||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | static int d40_lcla_free_all(struct d40_chan *d40c, | ||
433 | struct d40_desc *d40d) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | int i; | ||
437 | int ret = -EINVAL; | ||
438 | |||
439 | if (chan_is_physical(d40c)) | ||
440 | return 0; | ||
441 | |||
442 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | ||
443 | |||
444 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | ||
445 | if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | ||
446 | D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) { | ||
447 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | ||
448 | D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; | ||
449 | d40d->lcla_alloc--; | ||
450 | if (d40d->lcla_alloc == 0) { | ||
451 | ret = 0; | ||
452 | break; | ||
453 | } | ||
454 | } | ||
455 | } | ||
456 | |||
457 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | ||
458 | |||
459 | return ret; | ||
374 | 460 | ||
375 | return cookie; | ||
376 | } | 461 | } |
377 | 462 | ||
378 | static void d40_desc_remove(struct d40_desc *d40d) | 463 | static void d40_desc_remove(struct d40_desc *d40d) |
@@ -382,28 +467,36 @@ static void d40_desc_remove(struct d40_desc *d40d) | |||
382 | 467 | ||
383 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | 468 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) |
384 | { | 469 | { |
385 | struct d40_desc *d; | 470 | struct d40_desc *desc = NULL; |
386 | struct d40_desc *_d; | ||
387 | 471 | ||
388 | if (!list_empty(&d40c->client)) { | 472 | if (!list_empty(&d40c->client)) { |
473 | struct d40_desc *d; | ||
474 | struct d40_desc *_d; | ||
475 | |||
389 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 476 | list_for_each_entry_safe(d, _d, &d40c->client, node) |
390 | if (async_tx_test_ack(&d->txd)) { | 477 | if (async_tx_test_ack(&d->txd)) { |
391 | d40_pool_lli_free(d); | 478 | d40_pool_lli_free(d40c, d); |
392 | d40_desc_remove(d); | 479 | d40_desc_remove(d); |
480 | desc = d; | ||
481 | memset(desc, 0, sizeof(*desc)); | ||
393 | break; | 482 | break; |
394 | } | 483 | } |
395 | } else { | ||
396 | d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); | ||
397 | if (d != NULL) { | ||
398 | memset(d, 0, sizeof(struct d40_desc)); | ||
399 | INIT_LIST_HEAD(&d->node); | ||
400 | } | ||
401 | } | 484 | } |
402 | return d; | 485 | |
486 | if (!desc) | ||
487 | desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); | ||
488 | |||
489 | if (desc) | ||
490 | INIT_LIST_HEAD(&desc->node); | ||
491 | |||
492 | return desc; | ||
403 | } | 493 | } |
404 | 494 | ||
405 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | 495 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) |
406 | { | 496 | { |
497 | |||
498 | d40_pool_lli_free(d40c, d40d); | ||
499 | d40_lcla_free_all(d40c, d40d); | ||
407 | kmem_cache_free(d40c->base->desc_slab, d40d); | 500 | kmem_cache_free(d40c->base->desc_slab, d40d); |
408 | } | 501 | } |
409 | 502 | ||
@@ -412,6 +505,130 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | |||
412 | list_add_tail(&desc->node, &d40c->active); | 505 | list_add_tail(&desc->node, &d40c->active); |
413 | } | 506 | } |
414 | 507 | ||
508 | static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) | ||
509 | { | ||
510 | struct d40_phy_lli *lli_dst = desc->lli_phy.dst; | ||
511 | struct d40_phy_lli *lli_src = desc->lli_phy.src; | ||
512 | void __iomem *base = chan_base(chan); | ||
513 | |||
514 | writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); | ||
515 | writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); | ||
516 | writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); | ||
517 | writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); | ||
518 | |||
519 | writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); | ||
520 | writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); | ||
521 | writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); | ||
522 | writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); | ||
523 | } | ||
524 | |||
525 | static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | ||
526 | { | ||
527 | struct d40_lcla_pool *pool = &chan->base->lcla_pool; | ||
528 | struct d40_log_lli_bidir *lli = &desc->lli_log; | ||
529 | int lli_current = desc->lli_current; | ||
530 | int lli_len = desc->lli_len; | ||
531 | bool cyclic = desc->cyclic; | ||
532 | int curr_lcla = -EINVAL; | ||
533 | int first_lcla = 0; | ||
534 | bool linkback; | ||
535 | |||
536 | /* | ||
537 | * We may have partially running cyclic transfers, in case we did't get | ||
538 | * enough LCLA entries. | ||
539 | */ | ||
540 | linkback = cyclic && lli_current == 0; | ||
541 | |||
542 | /* | ||
543 | * For linkback, we need one LCLA even with only one link, because we | ||
544 | * can't link back to the one in LCPA space | ||
545 | */ | ||
546 | if (linkback || (lli_len - lli_current > 1)) { | ||
547 | curr_lcla = d40_lcla_alloc_one(chan, desc); | ||
548 | first_lcla = curr_lcla; | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * For linkback, we normally load the LCPA in the loop since we need to | ||
553 | * link it to the second LCLA and not the first. However, if we | ||
554 | * couldn't even get a first LCLA, then we have to run in LCPA and | ||
555 | * reload manually. | ||
556 | */ | ||
557 | if (!linkback || curr_lcla == -EINVAL) { | ||
558 | unsigned int flags = 0; | ||
559 | |||
560 | if (curr_lcla == -EINVAL) | ||
561 | flags |= LLI_TERM_INT; | ||
562 | |||
563 | d40_log_lli_lcpa_write(chan->lcpa, | ||
564 | &lli->dst[lli_current], | ||
565 | &lli->src[lli_current], | ||
566 | curr_lcla, | ||
567 | flags); | ||
568 | lli_current++; | ||
569 | } | ||
570 | |||
571 | if (curr_lcla < 0) | ||
572 | goto out; | ||
573 | |||
574 | for (; lli_current < lli_len; lli_current++) { | ||
575 | unsigned int lcla_offset = chan->phy_chan->num * 1024 + | ||
576 | 8 * curr_lcla * 2; | ||
577 | struct d40_log_lli *lcla = pool->base + lcla_offset; | ||
578 | unsigned int flags = 0; | ||
579 | int next_lcla; | ||
580 | |||
581 | if (lli_current + 1 < lli_len) | ||
582 | next_lcla = d40_lcla_alloc_one(chan, desc); | ||
583 | else | ||
584 | next_lcla = linkback ? first_lcla : -EINVAL; | ||
585 | |||
586 | if (cyclic || next_lcla == -EINVAL) | ||
587 | flags |= LLI_TERM_INT; | ||
588 | |||
589 | if (linkback && curr_lcla == first_lcla) { | ||
590 | /* First link goes in both LCPA and LCLA */ | ||
591 | d40_log_lli_lcpa_write(chan->lcpa, | ||
592 | &lli->dst[lli_current], | ||
593 | &lli->src[lli_current], | ||
594 | next_lcla, flags); | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * One unused LCLA in the cyclic case if the very first | ||
599 | * next_lcla fails... | ||
600 | */ | ||
601 | d40_log_lli_lcla_write(lcla, | ||
602 | &lli->dst[lli_current], | ||
603 | &lli->src[lli_current], | ||
604 | next_lcla, flags); | ||
605 | |||
606 | dma_sync_single_range_for_device(chan->base->dev, | ||
607 | pool->dma_addr, lcla_offset, | ||
608 | 2 * sizeof(struct d40_log_lli), | ||
609 | DMA_TO_DEVICE); | ||
610 | |||
611 | curr_lcla = next_lcla; | ||
612 | |||
613 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | ||
614 | lli_current++; | ||
615 | break; | ||
616 | } | ||
617 | } | ||
618 | |||
619 | out: | ||
620 | desc->lli_current = lli_current; | ||
621 | } | ||
622 | |||
623 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | ||
624 | { | ||
625 | if (chan_is_physical(d40c)) { | ||
626 | d40_phy_lli_load(d40c, d40d); | ||
627 | d40d->lli_current = d40d->lli_len; | ||
628 | } else | ||
629 | d40_log_lli_to_lcxa(d40c, d40d); | ||
630 | } | ||
631 | |||
415 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | 632 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
416 | { | 633 | { |
417 | struct d40_desc *d; | 634 | struct d40_desc *d; |
@@ -443,68 +660,72 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
443 | return d; | 660 | return d; |
444 | } | 661 | } |
445 | 662 | ||
446 | /* Support functions for logical channels */ | 663 | static int d40_psize_2_burst_size(bool is_log, int psize) |
447 | |||
448 | static int d40_lcla_id_get(struct d40_chan *d40c) | ||
449 | { | 664 | { |
450 | int src_id = 0; | 665 | if (is_log) { |
451 | int dst_id = 0; | 666 | if (psize == STEDMA40_PSIZE_LOG_1) |
452 | struct d40_log_lli *lcla_lidx_base = | 667 | return 1; |
453 | d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; | 668 | } else { |
454 | int i; | 669 | if (psize == STEDMA40_PSIZE_PHY_1) |
455 | int lli_per_log = d40c->base->plat_data->llis_per_log; | 670 | return 1; |
456 | unsigned long flags; | 671 | } |
457 | 672 | ||
458 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) | 673 | return 2 << psize; |
459 | return 0; | 674 | } |
460 | 675 | ||
461 | if (d40c->base->lcla_pool.num_blocks > 32) | 676 | /* |
462 | return -EINVAL; | 677 | * The dma only supports transmitting packages up to |
678 | * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of | ||
679 | * dma elements required to send the entire sg list | ||
680 | */ | ||
681 | static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) | ||
682 | { | ||
683 | int dmalen; | ||
684 | u32 max_w = max(data_width1, data_width2); | ||
685 | u32 min_w = min(data_width1, data_width2); | ||
686 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); | ||
463 | 687 | ||
464 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | 688 | if (seg_max > STEDMA40_MAX_SEG_SIZE) |
689 | seg_max -= (1 << max_w); | ||
465 | 690 | ||
466 | for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { | 691 | if (!IS_ALIGNED(size, 1 << max_w)) |
467 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & | 692 | return -EINVAL; |
468 | (0x1 << i))) { | ||
469 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | ||
470 | (0x1 << i); | ||
471 | break; | ||
472 | } | ||
473 | } | ||
474 | src_id = i; | ||
475 | if (src_id >= d40c->base->lcla_pool.num_blocks) | ||
476 | goto err; | ||
477 | 693 | ||
478 | for (; i < d40c->base->lcla_pool.num_blocks; i++) { | 694 | if (size <= seg_max) |
479 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & | 695 | dmalen = 1; |
480 | (0x1 << i))) { | 696 | else { |
481 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | 697 | dmalen = size / seg_max; |
482 | (0x1 << i); | 698 | if (dmalen * seg_max < size) |
483 | break; | 699 | dmalen++; |
484 | } | ||
485 | } | 700 | } |
701 | return dmalen; | ||
702 | } | ||
486 | 703 | ||
487 | dst_id = i; | 704 | static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, |
488 | if (dst_id == src_id) | 705 | u32 data_width1, u32 data_width2) |
489 | goto err; | 706 | { |
490 | 707 | struct scatterlist *sg; | |
491 | d40c->lcla.src_id = src_id; | 708 | int i; |
492 | d40c->lcla.dst_id = dst_id; | 709 | int len = 0; |
493 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; | 710 | int ret; |
494 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; | ||
495 | 711 | ||
496 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | 712 | for_each_sg(sgl, sg, sg_len, i) { |
497 | return 0; | 713 | ret = d40_size_2_dmalen(sg_dma_len(sg), |
498 | err: | 714 | data_width1, data_width2); |
499 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | 715 | if (ret < 0) |
500 | return -EINVAL; | 716 | return ret; |
717 | len += ret; | ||
718 | } | ||
719 | return len; | ||
501 | } | 720 | } |
502 | 721 | ||
722 | /* Support functions for logical channels */ | ||
503 | 723 | ||
504 | static int d40_channel_execute_command(struct d40_chan *d40c, | 724 | static int d40_channel_execute_command(struct d40_chan *d40c, |
505 | enum d40_command command) | 725 | enum d40_command command) |
506 | { | 726 | { |
507 | int status, i; | 727 | u32 status; |
728 | int i; | ||
508 | void __iomem *active_reg; | 729 | void __iomem *active_reg; |
509 | int ret = 0; | 730 | int ret = 0; |
510 | unsigned long flags; | 731 | unsigned long flags; |
@@ -550,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c, | |||
550 | } | 771 | } |
551 | 772 | ||
552 | if (i == D40_SUSPEND_MAX_IT) { | 773 | if (i == D40_SUSPEND_MAX_IT) { |
553 | dev_err(&d40c->chan.dev->device, | 774 | chan_err(d40c, |
554 | "[%s]: unable to suspend the chl %d (log: %d) status %x\n", | 775 | "unable to suspend the chl %d (log: %d) status %x\n", |
555 | __func__, d40c->phy_chan->num, d40c->log_num, | 776 | d40c->phy_chan->num, d40c->log_num, |
556 | status); | 777 | status); |
557 | dump_stack(); | 778 | dump_stack(); |
558 | ret = -EBUSY; | 779 | ret = -EBUSY; |
@@ -567,51 +788,63 @@ done: | |||
567 | static void d40_term_all(struct d40_chan *d40c) | 788 | static void d40_term_all(struct d40_chan *d40c) |
568 | { | 789 | { |
569 | struct d40_desc *d40d; | 790 | struct d40_desc *d40d; |
570 | unsigned long flags; | ||
571 | 791 | ||
572 | /* Release active descriptors */ | 792 | /* Release active descriptors */ |
573 | while ((d40d = d40_first_active_get(d40c))) { | 793 | while ((d40d = d40_first_active_get(d40c))) { |
574 | d40_desc_remove(d40d); | 794 | d40_desc_remove(d40d); |
575 | |||
576 | /* Return desc to free-list */ | ||
577 | d40_desc_free(d40c, d40d); | 795 | d40_desc_free(d40c, d40d); |
578 | } | 796 | } |
579 | 797 | ||
580 | /* Release queued descriptors waiting for transfer */ | 798 | /* Release queued descriptors waiting for transfer */ |
581 | while ((d40d = d40_first_queued(d40c))) { | 799 | while ((d40d = d40_first_queued(d40c))) { |
582 | d40_desc_remove(d40d); | 800 | d40_desc_remove(d40d); |
583 | |||
584 | /* Return desc to free-list */ | ||
585 | d40_desc_free(d40c, d40d); | 801 | d40_desc_free(d40c, d40d); |
586 | } | 802 | } |
587 | 803 | ||
588 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | ||
589 | 804 | ||
590 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= | 805 | d40c->pending_tx = 0; |
591 | (~(0x1 << d40c->lcla.dst_id)); | 806 | d40c->busy = false; |
592 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= | 807 | } |
593 | (~(0x1 << d40c->lcla.src_id)); | ||
594 | 808 | ||
595 | d40c->lcla.src_id = -1; | 809 | static void __d40_config_set_event(struct d40_chan *d40c, bool enable, |
596 | d40c->lcla.dst_id = -1; | 810 | u32 event, int reg) |
811 | { | ||
812 | void __iomem *addr = chan_base(d40c) + reg; | ||
813 | int tries; | ||
597 | 814 | ||
598 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | 815 | if (!enable) { |
816 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
817 | | ~D40_EVENTLINE_MASK(event), addr); | ||
818 | return; | ||
819 | } | ||
599 | 820 | ||
600 | d40c->pending_tx = 0; | 821 | /* |
601 | d40c->busy = false; | 822 | * The hardware sometimes doesn't register the enable when src and dst |
823 | * event lines are active on the same logical channel. Retry to ensure | ||
824 | * it does. Usually only one retry is sufficient. | ||
825 | */ | ||
826 | tries = 100; | ||
827 | while (--tries) { | ||
828 | writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ||
829 | | ~D40_EVENTLINE_MASK(event), addr); | ||
830 | |||
831 | if (readl(addr) & D40_EVENTLINE_MASK(event)) | ||
832 | break; | ||
833 | } | ||
834 | |||
835 | if (tries != 99) | ||
836 | dev_dbg(chan2dev(d40c), | ||
837 | "[%s] workaround enable S%cLNK (%d tries)\n", | ||
838 | __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', | ||
839 | 100 - tries); | ||
840 | |||
841 | WARN_ON(!tries); | ||
602 | } | 842 | } |
603 | 843 | ||
604 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | 844 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) |
605 | { | 845 | { |
606 | u32 val; | ||
607 | unsigned long flags; | 846 | unsigned long flags; |
608 | 847 | ||
609 | /* Notice, that disable requires the physical channel to be stopped */ | ||
610 | if (do_enable) | ||
611 | val = D40_ACTIVATE_EVENTLINE; | ||
612 | else | ||
613 | val = D40_DEACTIVATE_EVENTLINE; | ||
614 | |||
615 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | 848 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); |
616 | 849 | ||
617 | /* Enable event line connected to device (or memcpy) */ | 850 | /* Enable event line connected to device (or memcpy) */ |
@@ -619,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |||
619 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 852 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { |
620 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 853 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
621 | 854 | ||
622 | writel((val << D40_EVENTLINE_POS(event)) | | 855 | __d40_config_set_event(d40c, do_enable, event, |
623 | ~D40_EVENTLINE_MASK(event), | 856 | D40_CHAN_REG_SSLNK); |
624 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
625 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
626 | D40_CHAN_REG_SSLNK); | ||
627 | } | 857 | } |
858 | |||
628 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | 859 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
629 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 860 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
630 | 861 | ||
631 | writel((val << D40_EVENTLINE_POS(event)) | | 862 | __d40_config_set_event(d40c, do_enable, event, |
632 | ~D40_EVENTLINE_MASK(event), | 863 | D40_CHAN_REG_SDLNK); |
633 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
634 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
635 | D40_CHAN_REG_SDLNK); | ||
636 | } | 864 | } |
637 | 865 | ||
638 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | 866 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); |
@@ -640,105 +868,171 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |||
640 | 868 | ||
641 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 869 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
642 | { | 870 | { |
643 | u32 val = 0; | 871 | void __iomem *chanbase = chan_base(d40c); |
872 | u32 val; | ||
873 | |||
874 | val = readl(chanbase + D40_CHAN_REG_SSLNK); | ||
875 | val |= readl(chanbase + D40_CHAN_REG_SDLNK); | ||
644 | 876 | ||
645 | /* If SSLNK or SDLNK is zero all events are disabled */ | ||
646 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | ||
647 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | ||
648 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
649 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
650 | D40_CHAN_REG_SSLNK); | ||
651 | |||
652 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) | ||
653 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
654 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
655 | D40_CHAN_REG_SDLNK); | ||
656 | return val; | 877 | return val; |
657 | } | 878 | } |
658 | 879 | ||
659 | static void d40_config_enable_lidx(struct d40_chan *d40c) | 880 | static u32 d40_get_prmo(struct d40_chan *d40c) |
660 | { | 881 | { |
661 | /* Set LIDX for lcla */ | 882 | static const unsigned int phy_map[] = { |
662 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 883 | [STEDMA40_PCHAN_BASIC_MODE] |
663 | D40_SREG_ELEM_LOG_LIDX_MASK, | 884 | = D40_DREG_PRMO_PCHAN_BASIC, |
664 | d40c->base->virtbase + D40_DREG_PCBASE + | 885 | [STEDMA40_PCHAN_MODULO_MODE] |
665 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); | 886 | = D40_DREG_PRMO_PCHAN_MODULO, |
887 | [STEDMA40_PCHAN_DOUBLE_DST_MODE] | ||
888 | = D40_DREG_PRMO_PCHAN_DOUBLE_DST, | ||
889 | }; | ||
890 | static const unsigned int log_map[] = { | ||
891 | [STEDMA40_LCHAN_SRC_PHY_DST_LOG] | ||
892 | = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, | ||
893 | [STEDMA40_LCHAN_SRC_LOG_DST_PHY] | ||
894 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, | ||
895 | [STEDMA40_LCHAN_SRC_LOG_DST_LOG] | ||
896 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, | ||
897 | }; | ||
666 | 898 | ||
667 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 899 | if (chan_is_physical(d40c)) |
668 | D40_SREG_ELEM_LOG_LIDX_MASK, | 900 | return phy_map[d40c->dma_cfg.mode_opt]; |
669 | d40c->base->virtbase + D40_DREG_PCBASE + | 901 | else |
670 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); | 902 | return log_map[d40c->dma_cfg.mode_opt]; |
671 | } | 903 | } |
672 | 904 | ||
673 | static int d40_config_write(struct d40_chan *d40c) | 905 | static void d40_config_write(struct d40_chan *d40c) |
674 | { | 906 | { |
675 | u32 addr_base; | 907 | u32 addr_base; |
676 | u32 var; | 908 | u32 var; |
677 | int res; | ||
678 | |||
679 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
680 | if (res) | ||
681 | return res; | ||
682 | 909 | ||
683 | /* Odd addresses are even addresses + 4 */ | 910 | /* Odd addresses are even addresses + 4 */ |
684 | addr_base = (d40c->phy_chan->num % 2) * 4; | 911 | addr_base = (d40c->phy_chan->num % 2) * 4; |
685 | /* Setup channel mode to logical or physical */ | 912 | /* Setup channel mode to logical or physical */ |
686 | var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << | 913 | var = ((u32)(chan_is_logical(d40c)) + 1) << |
687 | D40_CHAN_POS(d40c->phy_chan->num); | 914 | D40_CHAN_POS(d40c->phy_chan->num); |
688 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | 915 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); |
689 | 916 | ||
690 | /* Setup operational mode option register */ | 917 | /* Setup operational mode option register */ |
691 | var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & | 918 | var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); |
692 | 0x3) << D40_CHAN_POS(d40c->phy_chan->num); | ||
693 | 919 | ||
694 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | 920 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); |
695 | 921 | ||
696 | if (d40c->log_num != D40_PHY_CHAN) { | 922 | if (chan_is_logical(d40c)) { |
923 | int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) | ||
924 | & D40_SREG_ELEM_LOG_LIDX_MASK; | ||
925 | void __iomem *chanbase = chan_base(d40c); | ||
926 | |||
697 | /* Set default config for CFG reg */ | 927 | /* Set default config for CFG reg */ |
698 | writel(d40c->src_def_cfg, | 928 | writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); |
699 | d40c->base->virtbase + D40_DREG_PCBASE + | 929 | writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); |
700 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
701 | D40_CHAN_REG_SSCFG); | ||
702 | writel(d40c->dst_def_cfg, | ||
703 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
704 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
705 | D40_CHAN_REG_SDCFG); | ||
706 | 930 | ||
707 | d40_config_enable_lidx(d40c); | 931 | /* Set LIDX for lcla */ |
932 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); | ||
933 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); | ||
934 | } | ||
935 | } | ||
936 | |||
937 | static u32 d40_residue(struct d40_chan *d40c) | ||
938 | { | ||
939 | u32 num_elt; | ||
940 | |||
941 | if (chan_is_logical(d40c)) | ||
942 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | ||
943 | >> D40_MEM_LCSP2_ECNT_POS; | ||
944 | else { | ||
945 | u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); | ||
946 | num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) | ||
947 | >> D40_SREG_ELEM_PHY_ECNT_POS; | ||
948 | } | ||
949 | |||
950 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | ||
951 | } | ||
952 | |||
953 | static bool d40_tx_is_linked(struct d40_chan *d40c) | ||
954 | { | ||
955 | bool is_link; | ||
956 | |||
957 | if (chan_is_logical(d40c)) | ||
958 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; | ||
959 | else | ||
960 | is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) | ||
961 | & D40_SREG_LNK_PHYS_LNK_MASK; | ||
962 | |||
963 | return is_link; | ||
964 | } | ||
965 | |||
966 | static int d40_pause(struct d40_chan *d40c) | ||
967 | { | ||
968 | int res = 0; | ||
969 | unsigned long flags; | ||
970 | |||
971 | if (!d40c->busy) | ||
972 | return 0; | ||
973 | |||
974 | spin_lock_irqsave(&d40c->lock, flags); | ||
975 | |||
976 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
977 | if (res == 0) { | ||
978 | if (chan_is_logical(d40c)) { | ||
979 | d40_config_set_event(d40c, false); | ||
980 | /* Resume the other logical channels if any */ | ||
981 | if (d40_chan_has_events(d40c)) | ||
982 | res = d40_channel_execute_command(d40c, | ||
983 | D40_DMA_RUN); | ||
984 | } | ||
708 | } | 985 | } |
986 | |||
987 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
709 | return res; | 988 | return res; |
710 | } | 989 | } |
711 | 990 | ||
712 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 991 | static int d40_resume(struct d40_chan *d40c) |
713 | { | 992 | { |
714 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { | 993 | int res = 0; |
715 | d40_phy_lli_write(d40c->base->virtbase, | 994 | unsigned long flags; |
716 | d40c->phy_chan->num, | 995 | |
717 | d40d->lli_phy.dst, | 996 | if (!d40c->busy) |
718 | d40d->lli_phy.src); | 997 | return 0; |
719 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { | 998 | |
720 | struct d40_log_lli *src = d40d->lli_log.src; | 999 | spin_lock_irqsave(&d40c->lock, flags); |
721 | struct d40_log_lli *dst = d40d->lli_log.dst; | 1000 | |
722 | int s; | 1001 | if (d40c->base->rev == 0) |
723 | 1002 | if (chan_is_logical(d40c)) { | |
724 | src += d40d->lli_count; | 1003 | res = d40_channel_execute_command(d40c, |
725 | dst += d40d->lli_count; | 1004 | D40_DMA_SUSPEND_REQ); |
726 | s = d40_log_lli_write(d40c->lcpa, | 1005 | goto no_suspend; |
727 | d40c->lcla.src, d40c->lcla.dst, | ||
728 | dst, src, | ||
729 | d40c->base->plat_data->llis_per_log); | ||
730 | |||
731 | /* If s equals to zero, the job is not linked */ | ||
732 | if (s > 0) { | ||
733 | (void) dma_map_single(d40c->base->dev, d40c->lcla.src, | ||
734 | s * sizeof(struct d40_log_lli), | ||
735 | DMA_TO_DEVICE); | ||
736 | (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, | ||
737 | s * sizeof(struct d40_log_lli), | ||
738 | DMA_TO_DEVICE); | ||
739 | } | 1006 | } |
1007 | |||
1008 | /* If bytes left to transfer or linked tx resume job */ | ||
1009 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | ||
1010 | |||
1011 | if (chan_is_logical(d40c)) | ||
1012 | d40_config_set_event(d40c, true); | ||
1013 | |||
1014 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | ||
740 | } | 1015 | } |
741 | d40d->lli_count += d40d->lli_tx_len; | 1016 | |
1017 | no_suspend: | ||
1018 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1019 | return res; | ||
1020 | } | ||
1021 | |||
1022 | static int d40_terminate_all(struct d40_chan *chan) | ||
1023 | { | ||
1024 | unsigned long flags; | ||
1025 | int ret = 0; | ||
1026 | |||
1027 | ret = d40_pause(chan); | ||
1028 | if (!ret && chan_is_physical(chan)) | ||
1029 | ret = d40_channel_execute_command(chan, D40_DMA_STOP); | ||
1030 | |||
1031 | spin_lock_irqsave(&chan->lock, flags); | ||
1032 | d40_term_all(chan); | ||
1033 | spin_unlock_irqrestore(&chan->lock, flags); | ||
1034 | |||
1035 | return ret; | ||
742 | } | 1036 | } |
743 | 1037 | ||
744 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 1038 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
@@ -751,7 +1045,12 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
751 | 1045 | ||
752 | spin_lock_irqsave(&d40c->lock, flags); | 1046 | spin_lock_irqsave(&d40c->lock, flags); |
753 | 1047 | ||
754 | tx->cookie = d40_assign_cookie(d40c, d40d); | 1048 | d40c->chan.cookie++; |
1049 | |||
1050 | if (d40c->chan.cookie < 0) | ||
1051 | d40c->chan.cookie = 1; | ||
1052 | |||
1053 | d40d->txd.cookie = d40c->chan.cookie; | ||
755 | 1054 | ||
756 | d40_desc_queue(d40c, d40d); | 1055 | d40_desc_queue(d40c, d40d); |
757 | 1056 | ||
@@ -765,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c) | |||
765 | if (d40c->base->rev == 0) { | 1064 | if (d40c->base->rev == 0) { |
766 | int err; | 1065 | int err; |
767 | 1066 | ||
768 | if (d40c->log_num != D40_PHY_CHAN) { | 1067 | if (chan_is_logical(d40c)) { |
769 | err = d40_channel_execute_command(d40c, | 1068 | err = d40_channel_execute_command(d40c, |
770 | D40_DMA_SUSPEND_REQ); | 1069 | D40_DMA_SUSPEND_REQ); |
771 | if (err) | 1070 | if (err) |
@@ -773,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c) | |||
773 | } | 1072 | } |
774 | } | 1073 | } |
775 | 1074 | ||
776 | if (d40c->log_num != D40_PHY_CHAN) | 1075 | if (chan_is_logical(d40c)) |
777 | d40_config_set_event(d40c, true); | 1076 | d40_config_set_event(d40c, true); |
778 | 1077 | ||
779 | return d40_channel_execute_command(d40c, D40_DMA_RUN); | 1078 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
@@ -814,25 +1113,42 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
814 | { | 1113 | { |
815 | struct d40_desc *d40d; | 1114 | struct d40_desc *d40d; |
816 | 1115 | ||
817 | if (!d40c->phy_chan) | ||
818 | return; | ||
819 | |||
820 | /* Get first active entry from list */ | 1116 | /* Get first active entry from list */ |
821 | d40d = d40_first_active_get(d40c); | 1117 | d40d = d40_first_active_get(d40c); |
822 | 1118 | ||
823 | if (d40d == NULL) | 1119 | if (d40d == NULL) |
824 | return; | 1120 | return; |
825 | 1121 | ||
826 | if (d40d->lli_count < d40d->lli_len) { | 1122 | if (d40d->cyclic) { |
1123 | /* | ||
1124 | * If this was a paritially loaded list, we need to reloaded | ||
1125 | * it, and only when the list is completed. We need to check | ||
1126 | * for done because the interrupt will hit for every link, and | ||
1127 | * not just the last one. | ||
1128 | */ | ||
1129 | if (d40d->lli_current < d40d->lli_len | ||
1130 | && !d40_tx_is_linked(d40c) | ||
1131 | && !d40_residue(d40c)) { | ||
1132 | d40_lcla_free_all(d40c, d40d); | ||
1133 | d40_desc_load(d40c, d40d); | ||
1134 | (void) d40_start(d40c); | ||
1135 | |||
1136 | if (d40d->lli_current == d40d->lli_len) | ||
1137 | d40d->lli_current = 0; | ||
1138 | } | ||
1139 | } else { | ||
1140 | d40_lcla_free_all(d40c, d40d); | ||
827 | 1141 | ||
828 | d40_desc_load(d40c, d40d); | 1142 | if (d40d->lli_current < d40d->lli_len) { |
829 | /* Start dma job */ | 1143 | d40_desc_load(d40c, d40d); |
830 | (void) d40_start(d40c); | 1144 | /* Start dma job */ |
831 | return; | 1145 | (void) d40_start(d40c); |
832 | } | 1146 | return; |
1147 | } | ||
833 | 1148 | ||
834 | if (d40_queue_start(d40c) == NULL) | 1149 | if (d40_queue_start(d40c) == NULL) |
835 | d40c->busy = false; | 1150 | d40c->busy = false; |
1151 | } | ||
836 | 1152 | ||
837 | d40c->pending_tx++; | 1153 | d40c->pending_tx++; |
838 | tasklet_schedule(&d40c->tasklet); | 1154 | tasklet_schedule(&d40c->tasklet); |
@@ -842,7 +1158,7 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
842 | static void dma_tasklet(unsigned long data) | 1158 | static void dma_tasklet(unsigned long data) |
843 | { | 1159 | { |
844 | struct d40_chan *d40c = (struct d40_chan *) data; | 1160 | struct d40_chan *d40c = (struct d40_chan *) data; |
845 | struct d40_desc *d40d_fin; | 1161 | struct d40_desc *d40d; |
846 | unsigned long flags; | 1162 | unsigned long flags; |
847 | dma_async_tx_callback callback; | 1163 | dma_async_tx_callback callback; |
848 | void *callback_param; | 1164 | void *callback_param; |
@@ -850,12 +1166,12 @@ static void dma_tasklet(unsigned long data) | |||
850 | spin_lock_irqsave(&d40c->lock, flags); | 1166 | spin_lock_irqsave(&d40c->lock, flags); |
851 | 1167 | ||
852 | /* Get first active entry from list */ | 1168 | /* Get first active entry from list */ |
853 | d40d_fin = d40_first_active_get(d40c); | 1169 | d40d = d40_first_active_get(d40c); |
854 | 1170 | if (d40d == NULL) | |
855 | if (d40d_fin == NULL) | ||
856 | goto err; | 1171 | goto err; |
857 | 1172 | ||
858 | d40c->completed = d40d_fin->txd.cookie; | 1173 | if (!d40d->cyclic) |
1174 | d40c->completed = d40d->txd.cookie; | ||
859 | 1175 | ||
860 | /* | 1176 | /* |
861 | * If terminating a channel pending_tx is set to zero. | 1177 | * If terminating a channel pending_tx is set to zero. |
@@ -867,19 +1183,21 @@ static void dma_tasklet(unsigned long data) | |||
867 | } | 1183 | } |
868 | 1184 | ||
869 | /* Callback to client */ | 1185 | /* Callback to client */ |
870 | callback = d40d_fin->txd.callback; | 1186 | callback = d40d->txd.callback; |
871 | callback_param = d40d_fin->txd.callback_param; | 1187 | callback_param = d40d->txd.callback_param; |
872 | 1188 | ||
873 | if (async_tx_test_ack(&d40d_fin->txd)) { | 1189 | if (!d40d->cyclic) { |
874 | d40_pool_lli_free(d40d_fin); | 1190 | if (async_tx_test_ack(&d40d->txd)) { |
875 | d40_desc_remove(d40d_fin); | 1191 | d40_pool_lli_free(d40c, d40d); |
876 | /* Return desc to free-list */ | 1192 | d40_desc_remove(d40d); |
877 | d40_desc_free(d40c, d40d_fin); | 1193 | d40_desc_free(d40c, d40d); |
878 | } else { | 1194 | } else { |
879 | if (!d40d_fin->is_in_client_list) { | 1195 | if (!d40d->is_in_client_list) { |
880 | d40_desc_remove(d40d_fin); | 1196 | d40_desc_remove(d40d); |
881 | list_add_tail(&d40d_fin->node, &d40c->client); | 1197 | d40_lcla_free_all(d40c, d40d); |
882 | d40d_fin->is_in_client_list = true; | 1198 | list_add_tail(&d40d->node, &d40c->client); |
1199 | d40d->is_in_client_list = true; | ||
1200 | } | ||
883 | } | 1201 | } |
884 | } | 1202 | } |
885 | 1203 | ||
@@ -890,13 +1208,13 @@ static void dma_tasklet(unsigned long data) | |||
890 | 1208 | ||
891 | spin_unlock_irqrestore(&d40c->lock, flags); | 1209 | spin_unlock_irqrestore(&d40c->lock, flags); |
892 | 1210 | ||
893 | if (callback) | 1211 | if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) |
894 | callback(callback_param); | 1212 | callback(callback_param); |
895 | 1213 | ||
896 | return; | 1214 | return; |
897 | 1215 | ||
898 | err: | 1216 | err: |
899 | /* Rescue manouver if receiving double interrupts */ | 1217 | /* Rescue manoeuvre if receiving double interrupts */ |
900 | if (d40c->pending_tx > 0) | 1218 | if (d40c->pending_tx > 0) |
901 | d40c->pending_tx--; | 1219 | d40c->pending_tx--; |
902 | spin_unlock_irqrestore(&d40c->lock, flags); | 1220 | spin_unlock_irqrestore(&d40c->lock, flags); |
@@ -919,7 +1237,6 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
919 | 1237 | ||
920 | int i; | 1238 | int i; |
921 | u32 regs[ARRAY_SIZE(il)]; | 1239 | u32 regs[ARRAY_SIZE(il)]; |
922 | u32 tmp; | ||
923 | u32 idx; | 1240 | u32 idx; |
924 | u32 row; | 1241 | u32 row; |
925 | long chan = -1; | 1242 | long chan = -1; |
@@ -946,9 +1263,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
946 | idx = chan & (BITS_PER_LONG - 1); | 1263 | idx = chan & (BITS_PER_LONG - 1); |
947 | 1264 | ||
948 | /* ACK interrupt */ | 1265 | /* ACK interrupt */ |
949 | tmp = readl(base->virtbase + il[row].clr); | 1266 | writel(1 << idx, base->virtbase + il[row].clr); |
950 | tmp |= 1 << idx; | ||
951 | writel(tmp, base->virtbase + il[row].clr); | ||
952 | 1267 | ||
953 | if (il[row].offset == D40_PHY_CHAN) | 1268 | if (il[row].offset == D40_PHY_CHAN) |
954 | d40c = base->lookup_phy_chans[idx]; | 1269 | d40c = base->lookup_phy_chans[idx]; |
@@ -959,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
959 | if (!il[row].is_error) | 1274 | if (!il[row].is_error) |
960 | dma_tc_handle(d40c); | 1275 | dma_tc_handle(d40c); |
961 | else | 1276 | else |
962 | dev_err(base->dev, | 1277 | d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", |
963 | "[%s] IRQ chan: %ld offset %d idx %d\n", | 1278 | chan, il[row].offset, idx); |
964 | __func__, chan, il[row].offset, idx); | ||
965 | 1279 | ||
966 | spin_unlock(&d40c->lock); | 1280 | spin_unlock(&d40c->lock); |
967 | } | 1281 | } |
@@ -971,41 +1285,57 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
971 | return IRQ_HANDLED; | 1285 | return IRQ_HANDLED; |
972 | } | 1286 | } |
973 | 1287 | ||
974 | |||
975 | static int d40_validate_conf(struct d40_chan *d40c, | 1288 | static int d40_validate_conf(struct d40_chan *d40c, |
976 | struct stedma40_chan_cfg *conf) | 1289 | struct stedma40_chan_cfg *conf) |
977 | { | 1290 | { |
978 | int res = 0; | 1291 | int res = 0; |
979 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); | 1292 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); |
980 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); | 1293 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); |
981 | bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | 1294 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
982 | == STEDMA40_CHANNEL_IN_LOG_MODE; | 1295 | |
1296 | if (!conf->dir) { | ||
1297 | chan_err(d40c, "Invalid direction.\n"); | ||
1298 | res = -EINVAL; | ||
1299 | } | ||
1300 | |||
1301 | if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && | ||
1302 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && | ||
1303 | d40c->runtime_addr == 0) { | ||
1304 | |||
1305 | chan_err(d40c, "Invalid TX channel address (%d)\n", | ||
1306 | conf->dst_dev_type); | ||
1307 | res = -EINVAL; | ||
1308 | } | ||
1309 | |||
1310 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && | ||
1311 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && | ||
1312 | d40c->runtime_addr == 0) { | ||
1313 | chan_err(d40c, "Invalid RX channel address (%d)\n", | ||
1314 | conf->src_dev_type); | ||
1315 | res = -EINVAL; | ||
1316 | } | ||
983 | 1317 | ||
984 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && | 1318 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && |
985 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { | 1319 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { |
986 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", | 1320 | chan_err(d40c, "Invalid dst\n"); |
987 | __func__); | ||
988 | res = -EINVAL; | 1321 | res = -EINVAL; |
989 | } | 1322 | } |
990 | 1323 | ||
991 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && | 1324 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && |
992 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { | 1325 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { |
993 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", | 1326 | chan_err(d40c, "Invalid src\n"); |
994 | __func__); | ||
995 | res = -EINVAL; | 1327 | res = -EINVAL; |
996 | } | 1328 | } |
997 | 1329 | ||
998 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | 1330 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && |
999 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | 1331 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { |
1000 | dev_err(&d40c->chan.dev->device, | 1332 | chan_err(d40c, "No event line\n"); |
1001 | "[%s] No event line\n", __func__); | ||
1002 | res = -EINVAL; | 1333 | res = -EINVAL; |
1003 | } | 1334 | } |
1004 | 1335 | ||
1005 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | 1336 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && |
1006 | (src_event_group != dst_event_group)) { | 1337 | (src_event_group != dst_event_group)) { |
1007 | dev_err(&d40c->chan.dev->device, | 1338 | chan_err(d40c, "Invalid event group\n"); |
1008 | "[%s] Invalid event group\n", __func__); | ||
1009 | res = -EINVAL; | 1339 | res = -EINVAL; |
1010 | } | 1340 | } |
1011 | 1341 | ||
@@ -1014,9 +1344,20 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1014 | * DMAC HW supports it. Will be added to this driver, | 1344 | * DMAC HW supports it. Will be added to this driver, |
1015 | * in case any dma client requires it. | 1345 | * in case any dma client requires it. |
1016 | */ | 1346 | */ |
1017 | dev_err(&d40c->chan.dev->device, | 1347 | chan_err(d40c, "periph to periph not supported\n"); |
1018 | "[%s] periph to periph not supported\n", | 1348 | res = -EINVAL; |
1019 | __func__); | 1349 | } |
1350 | |||
1351 | if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * | ||
1352 | (1 << conf->src_info.data_width) != | ||
1353 | d40_psize_2_burst_size(is_log, conf->dst_info.psize) * | ||
1354 | (1 << conf->dst_info.data_width)) { | ||
1355 | /* | ||
1356 | * The DMAC hardware only supports | ||
1357 | * src (burst x width) == dst (burst x width) | ||
1358 | */ | ||
1359 | |||
1360 | chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); | ||
1020 | res = -EINVAL; | 1361 | res = -EINVAL; |
1021 | } | 1362 | } |
1022 | 1363 | ||
@@ -1082,7 +1423,6 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, | |||
1082 | 1423 | ||
1083 | spin_lock_irqsave(&phy->lock, flags); | 1424 | spin_lock_irqsave(&phy->lock, flags); |
1084 | if (!log_event_line) { | 1425 | if (!log_event_line) { |
1085 | /* Physical interrupts are masked per physical full channel */ | ||
1086 | phy->allocated_dst = D40_ALLOC_FREE; | 1426 | phy->allocated_dst = D40_ALLOC_FREE; |
1087 | phy->allocated_src = D40_ALLOC_FREE; | 1427 | phy->allocated_src = D40_ALLOC_FREE; |
1088 | is_free = true; | 1428 | is_free = true; |
@@ -1119,10 +1459,7 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1119 | int j; | 1459 | int j; |
1120 | int log_num; | 1460 | int log_num; |
1121 | bool is_src; | 1461 | bool is_src; |
1122 | bool is_log = (d40c->dma_cfg.channel_type & | 1462 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; |
1123 | STEDMA40_CHANNEL_IN_OPER_MODE) | ||
1124 | == STEDMA40_CHANNEL_IN_LOG_MODE; | ||
1125 | |||
1126 | 1463 | ||
1127 | phys = d40c->base->phy_res; | 1464 | phys = d40c->base->phy_res; |
1128 | 1465 | ||
@@ -1223,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c) | |||
1223 | dma_has_cap(DMA_SLAVE, cap)) { | 1560 | dma_has_cap(DMA_SLAVE, cap)) { |
1224 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | 1561 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; |
1225 | } else { | 1562 | } else { |
1226 | dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", | 1563 | chan_err(d40c, "No memcpy\n"); |
1227 | __func__); | ||
1228 | return -EINVAL; | 1564 | return -EINVAL; |
1229 | } | 1565 | } |
1230 | 1566 | ||
@@ -1249,22 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1249 | /* Release client owned descriptors */ | 1585 | /* Release client owned descriptors */ |
1250 | if (!list_empty(&d40c->client)) | 1586 | if (!list_empty(&d40c->client)) |
1251 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 1587 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
1252 | d40_pool_lli_free(d); | 1588 | d40_pool_lli_free(d40c, d); |
1253 | d40_desc_remove(d); | 1589 | d40_desc_remove(d); |
1254 | /* Return desc to free-list */ | ||
1255 | d40_desc_free(d40c, d); | 1590 | d40_desc_free(d40c, d); |
1256 | } | 1591 | } |
1257 | 1592 | ||
1258 | if (phy == NULL) { | 1593 | if (phy == NULL) { |
1259 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", | 1594 | chan_err(d40c, "phy == null\n"); |
1260 | __func__); | ||
1261 | return -EINVAL; | 1595 | return -EINVAL; |
1262 | } | 1596 | } |
1263 | 1597 | ||
1264 | if (phy->allocated_src == D40_ALLOC_FREE && | 1598 | if (phy->allocated_src == D40_ALLOC_FREE && |
1265 | phy->allocated_dst == D40_ALLOC_FREE) { | 1599 | phy->allocated_dst == D40_ALLOC_FREE) { |
1266 | dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", | 1600 | chan_err(d40c, "channel already free\n"); |
1267 | __func__); | ||
1268 | return -EINVAL; | 1601 | return -EINVAL; |
1269 | } | 1602 | } |
1270 | 1603 | ||
@@ -1276,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1276 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1609 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1277 | is_src = true; | 1610 | is_src = true; |
1278 | } else { | 1611 | } else { |
1279 | dev_err(&d40c->chan.dev->device, | 1612 | chan_err(d40c, "Unknown direction\n"); |
1280 | "[%s] Unknown direction\n", __func__); | ||
1281 | return -EINVAL; | 1613 | return -EINVAL; |
1282 | } | 1614 | } |
1283 | 1615 | ||
1284 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1616 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1285 | if (res) { | 1617 | if (res) { |
1286 | dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", | 1618 | chan_err(d40c, "suspend failed\n"); |
1287 | __func__); | ||
1288 | return res; | 1619 | return res; |
1289 | } | 1620 | } |
1290 | 1621 | ||
1291 | if (d40c->log_num != D40_PHY_CHAN) { | 1622 | if (chan_is_logical(d40c)) { |
1292 | /* Release logical channel, deactivate the event line */ | 1623 | /* Release logical channel, deactivate the event line */ |
1293 | 1624 | ||
1294 | d40_config_set_event(d40c, false); | 1625 | d40_config_set_event(d40c, false); |
@@ -1304,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1304 | res = d40_channel_execute_command(d40c, | 1635 | res = d40_channel_execute_command(d40c, |
1305 | D40_DMA_RUN); | 1636 | D40_DMA_RUN); |
1306 | if (res) { | 1637 | if (res) { |
1307 | dev_err(&d40c->chan.dev->device, | 1638 | chan_err(d40c, |
1308 | "[%s] Executing RUN command\n", | 1639 | "Executing RUN command\n"); |
1309 | __func__); | ||
1310 | return res; | 1640 | return res; |
1311 | } | 1641 | } |
1312 | } | 1642 | } |
@@ -1319,44 +1649,19 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1319 | /* Release physical channel */ | 1649 | /* Release physical channel */ |
1320 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1650 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
1321 | if (res) { | 1651 | if (res) { |
1322 | dev_err(&d40c->chan.dev->device, | 1652 | chan_err(d40c, "Failed to stop channel\n"); |
1323 | "[%s] Failed to stop channel\n", __func__); | ||
1324 | return res; | 1653 | return res; |
1325 | } | 1654 | } |
1326 | d40c->phy_chan = NULL; | 1655 | d40c->phy_chan = NULL; |
1327 | /* Invalidate channel type */ | 1656 | d40c->configured = false; |
1328 | d40c->dma_cfg.channel_type = 0; | ||
1329 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 1657 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1330 | 1658 | ||
1331 | return 0; | 1659 | return 0; |
1332 | } | 1660 | } |
1333 | 1661 | ||
1334 | static int d40_pause(struct dma_chan *chan) | ||
1335 | { | ||
1336 | struct d40_chan *d40c = | ||
1337 | container_of(chan, struct d40_chan, chan); | ||
1338 | int res; | ||
1339 | unsigned long flags; | ||
1340 | |||
1341 | spin_lock_irqsave(&d40c->lock, flags); | ||
1342 | |||
1343 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
1344 | if (res == 0) { | ||
1345 | if (d40c->log_num != D40_PHY_CHAN) { | ||
1346 | d40_config_set_event(d40c, false); | ||
1347 | /* Resume the other logical channels if any */ | ||
1348 | if (d40_chan_has_events(d40c)) | ||
1349 | res = d40_channel_execute_command(d40c, | ||
1350 | D40_DMA_RUN); | ||
1351 | } | ||
1352 | } | ||
1353 | |||
1354 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1355 | return res; | ||
1356 | } | ||
1357 | |||
1358 | static bool d40_is_paused(struct d40_chan *d40c) | 1662 | static bool d40_is_paused(struct d40_chan *d40c) |
1359 | { | 1663 | { |
1664 | void __iomem *chanbase = chan_base(d40c); | ||
1360 | bool is_paused = false; | 1665 | bool is_paused = false; |
1361 | unsigned long flags; | 1666 | unsigned long flags; |
1362 | void __iomem *active_reg; | 1667 | void __iomem *active_reg; |
@@ -1365,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1365 | 1670 | ||
1366 | spin_lock_irqsave(&d40c->lock, flags); | 1671 | spin_lock_irqsave(&d40c->lock, flags); |
1367 | 1672 | ||
1368 | if (d40c->log_num == D40_PHY_CHAN) { | 1673 | if (chan_is_physical(d40c)) { |
1369 | if (d40c->phy_chan->num % 2 == 0) | 1674 | if (d40c->phy_chan->num % 2 == 0) |
1370 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 1675 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
1371 | else | 1676 | else |
@@ -1381,16 +1686,17 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
1381 | } | 1686 | } |
1382 | 1687 | ||
1383 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1688 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1384 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) | 1689 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1385 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1690 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1386 | else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 1691 | status = readl(chanbase + D40_CHAN_REG_SDLNK); |
1692 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | ||
1387 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1693 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1388 | else { | 1694 | status = readl(chanbase + D40_CHAN_REG_SSLNK); |
1389 | dev_err(&d40c->chan.dev->device, | 1695 | } else { |
1390 | "[%s] Unknown direction\n", __func__); | 1696 | chan_err(d40c, "Unknown direction\n"); |
1391 | goto _exit; | 1697 | goto _exit; |
1392 | } | 1698 | } |
1393 | status = d40_chan_has_events(d40c); | 1699 | |
1394 | status = (status & D40_EVENTLINE_MASK(event)) >> | 1700 | status = (status & D40_EVENTLINE_MASK(event)) >> |
1395 | D40_EVENTLINE_POS(event); | 1701 | D40_EVENTLINE_POS(event); |
1396 | 1702 | ||
@@ -1403,240 +1709,198 @@ _exit: | |||
1403 | } | 1709 | } |
1404 | 1710 | ||
1405 | 1711 | ||
1406 | static bool d40_tx_is_linked(struct d40_chan *d40c) | 1712 | static u32 stedma40_residue(struct dma_chan *chan) |
1407 | { | ||
1408 | bool is_link; | ||
1409 | |||
1410 | if (d40c->log_num != D40_PHY_CHAN) | ||
1411 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; | ||
1412 | else | ||
1413 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
1414 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
1415 | D40_CHAN_REG_SDLNK) & | ||
1416 | D40_SREG_LNK_PHYS_LNK_MASK; | ||
1417 | return is_link; | ||
1418 | } | ||
1419 | |||
1420 | static u32 d40_residue(struct d40_chan *d40c) | ||
1421 | { | ||
1422 | u32 num_elt; | ||
1423 | |||
1424 | if (d40c->log_num != D40_PHY_CHAN) | ||
1425 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | ||
1426 | >> D40_MEM_LCSP2_ECNT_POS; | ||
1427 | else | ||
1428 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
1429 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
1430 | D40_CHAN_REG_SDELT) & | ||
1431 | D40_SREG_ELEM_PHY_ECNT_MASK) >> | ||
1432 | D40_SREG_ELEM_PHY_ECNT_POS; | ||
1433 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | ||
1434 | } | ||
1435 | |||
1436 | static int d40_resume(struct dma_chan *chan) | ||
1437 | { | 1713 | { |
1438 | struct d40_chan *d40c = | 1714 | struct d40_chan *d40c = |
1439 | container_of(chan, struct d40_chan, chan); | 1715 | container_of(chan, struct d40_chan, chan); |
1440 | int res = 0; | 1716 | u32 bytes_left; |
1441 | unsigned long flags; | 1717 | unsigned long flags; |
1442 | 1718 | ||
1443 | spin_lock_irqsave(&d40c->lock, flags); | 1719 | spin_lock_irqsave(&d40c->lock, flags); |
1720 | bytes_left = d40_residue(d40c); | ||
1721 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1444 | 1722 | ||
1445 | if (d40c->base->rev == 0) | 1723 | return bytes_left; |
1446 | if (d40c->log_num != D40_PHY_CHAN) { | 1724 | } |
1447 | res = d40_channel_execute_command(d40c, | ||
1448 | D40_DMA_SUSPEND_REQ); | ||
1449 | goto no_suspend; | ||
1450 | } | ||
1451 | 1725 | ||
1452 | /* If bytes left to transfer or linked tx resume job */ | 1726 | static int |
1453 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | 1727 | d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, |
1454 | if (d40c->log_num != D40_PHY_CHAN) | 1728 | struct scatterlist *sg_src, struct scatterlist *sg_dst, |
1455 | d40_config_set_event(d40c, true); | 1729 | unsigned int sg_len, dma_addr_t src_dev_addr, |
1456 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1730 | dma_addr_t dst_dev_addr) |
1457 | } | 1731 | { |
1732 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | ||
1733 | struct stedma40_half_channel_info *src_info = &cfg->src_info; | ||
1734 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; | ||
1735 | int ret; | ||
1458 | 1736 | ||
1459 | no_suspend: | 1737 | ret = d40_log_sg_to_lli(sg_src, sg_len, |
1460 | spin_unlock_irqrestore(&d40c->lock, flags); | 1738 | src_dev_addr, |
1461 | return res; | 1739 | desc->lli_log.src, |
1740 | chan->log_def.lcsp1, | ||
1741 | src_info->data_width, | ||
1742 | dst_info->data_width); | ||
1743 | |||
1744 | ret = d40_log_sg_to_lli(sg_dst, sg_len, | ||
1745 | dst_dev_addr, | ||
1746 | desc->lli_log.dst, | ||
1747 | chan->log_def.lcsp3, | ||
1748 | dst_info->data_width, | ||
1749 | src_info->data_width); | ||
1750 | |||
1751 | return ret < 0 ? ret : 0; | ||
1462 | } | 1752 | } |
1463 | 1753 | ||
1464 | static u32 stedma40_residue(struct dma_chan *chan) | 1754 | static int |
1755 | d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, | ||
1756 | struct scatterlist *sg_src, struct scatterlist *sg_dst, | ||
1757 | unsigned int sg_len, dma_addr_t src_dev_addr, | ||
1758 | dma_addr_t dst_dev_addr) | ||
1465 | { | 1759 | { |
1466 | struct d40_chan *d40c = | 1760 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1467 | container_of(chan, struct d40_chan, chan); | 1761 | struct stedma40_half_channel_info *src_info = &cfg->src_info; |
1468 | u32 bytes_left; | 1762 | struct stedma40_half_channel_info *dst_info = &cfg->dst_info; |
1469 | unsigned long flags; | 1763 | unsigned long flags = 0; |
1764 | int ret; | ||
1470 | 1765 | ||
1471 | spin_lock_irqsave(&d40c->lock, flags); | 1766 | if (desc->cyclic) |
1472 | bytes_left = d40_residue(d40c); | 1767 | flags |= LLI_CYCLIC | LLI_TERM_INT; |
1473 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1474 | 1768 | ||
1475 | return bytes_left; | 1769 | ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, |
1770 | desc->lli_phy.src, | ||
1771 | virt_to_phys(desc->lli_phy.src), | ||
1772 | chan->src_def_cfg, | ||
1773 | src_info, dst_info, flags); | ||
1774 | |||
1775 | ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, | ||
1776 | desc->lli_phy.dst, | ||
1777 | virt_to_phys(desc->lli_phy.dst), | ||
1778 | chan->dst_def_cfg, | ||
1779 | dst_info, src_info, flags); | ||
1780 | |||
1781 | dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, | ||
1782 | desc->lli_pool.size, DMA_TO_DEVICE); | ||
1783 | |||
1784 | return ret < 0 ? ret : 0; | ||
1476 | } | 1785 | } |
1477 | 1786 | ||
1478 | /* Public DMA functions in addition to the DMA engine framework */ | ||
1479 | 1787 | ||
1480 | int stedma40_set_psize(struct dma_chan *chan, | 1788 | static struct d40_desc * |
1481 | int src_psize, | 1789 | d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, |
1482 | int dst_psize) | 1790 | unsigned int sg_len, unsigned long dma_flags) |
1483 | { | 1791 | { |
1484 | struct d40_chan *d40c = | 1792 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
1485 | container_of(chan, struct d40_chan, chan); | 1793 | struct d40_desc *desc; |
1486 | unsigned long flags; | 1794 | int ret; |
1487 | 1795 | ||
1488 | spin_lock_irqsave(&d40c->lock, flags); | 1796 | desc = d40_desc_get(chan); |
1797 | if (!desc) | ||
1798 | return NULL; | ||
1489 | 1799 | ||
1490 | if (d40c->log_num != D40_PHY_CHAN) { | 1800 | desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, |
1491 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 1801 | cfg->dst_info.data_width); |
1492 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 1802 | if (desc->lli_len < 0) { |
1493 | d40c->log_def.lcsp1 |= src_psize << | 1803 | chan_err(chan, "Unaligned size\n"); |
1494 | D40_MEM_LCSP1_SCFG_PSIZE_POS; | 1804 | goto err; |
1495 | d40c->log_def.lcsp3 |= dst_psize << | ||
1496 | D40_MEM_LCSP1_SCFG_PSIZE_POS; | ||
1497 | goto out; | ||
1498 | } | 1805 | } |
1499 | 1806 | ||
1500 | if (src_psize == STEDMA40_PSIZE_PHY_1) | 1807 | ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); |
1501 | d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | 1808 | if (ret < 0) { |
1502 | else { | 1809 | chan_err(chan, "Could not allocate lli\n"); |
1503 | d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | 1810 | goto err; |
1504 | d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | ||
1505 | D40_SREG_CFG_PSIZE_POS); | ||
1506 | d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; | ||
1507 | } | 1811 | } |
1508 | 1812 | ||
1509 | if (dst_psize == STEDMA40_PSIZE_PHY_1) | ||
1510 | d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | ||
1511 | else { | ||
1512 | d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | ||
1513 | d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | ||
1514 | D40_SREG_CFG_PSIZE_POS); | ||
1515 | d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; | ||
1516 | } | ||
1517 | out: | ||
1518 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1519 | return 0; | ||
1520 | } | ||
1521 | EXPORT_SYMBOL(stedma40_set_psize); | ||
1522 | 1813 | ||
1523 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | 1814 | desc->lli_current = 0; |
1524 | struct scatterlist *sgl_dst, | 1815 | desc->txd.flags = dma_flags; |
1525 | struct scatterlist *sgl_src, | 1816 | desc->txd.tx_submit = d40_tx_submit; |
1526 | unsigned int sgl_len, | ||
1527 | unsigned long dma_flags) | ||
1528 | { | ||
1529 | int res; | ||
1530 | struct d40_desc *d40d; | ||
1531 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | ||
1532 | chan); | ||
1533 | unsigned long flags; | ||
1534 | 1817 | ||
1535 | if (d40c->phy_chan == NULL) { | 1818 | dma_async_tx_descriptor_init(&desc->txd, &chan->chan); |
1536 | dev_err(&d40c->chan.dev->device, | ||
1537 | "[%s] Unallocated channel.\n", __func__); | ||
1538 | return ERR_PTR(-EINVAL); | ||
1539 | } | ||
1540 | 1819 | ||
1541 | spin_lock_irqsave(&d40c->lock, flags); | 1820 | return desc; |
1542 | d40d = d40_desc_get(d40c); | ||
1543 | 1821 | ||
1544 | if (d40d == NULL) | 1822 | err: |
1545 | goto err; | 1823 | d40_desc_free(chan, desc); |
1824 | return NULL; | ||
1825 | } | ||
1546 | 1826 | ||
1547 | d40d->lli_len = sgl_len; | 1827 | static dma_addr_t |
1548 | d40d->lli_tx_len = d40d->lli_len; | 1828 | d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) |
1549 | d40d->txd.flags = dma_flags; | 1829 | { |
1830 | struct stedma40_platform_data *plat = chan->base->plat_data; | ||
1831 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | ||
1832 | dma_addr_t addr = 0; | ||
1550 | 1833 | ||
1551 | if (d40c->log_num != D40_PHY_CHAN) { | 1834 | if (chan->runtime_addr) |
1552 | if (d40d->lli_len > d40c->base->plat_data->llis_per_log) | 1835 | return chan->runtime_addr; |
1553 | d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; | ||
1554 | 1836 | ||
1555 | if (sgl_len > 1) | 1837 | if (direction == DMA_FROM_DEVICE) |
1556 | /* | 1838 | addr = plat->dev_rx[cfg->src_dev_type]; |
1557 | * Check if there is space available in lcla. If not, | 1839 | else if (direction == DMA_TO_DEVICE) |
1558 | * split list into 1-length and run only in lcpa | 1840 | addr = plat->dev_tx[cfg->dst_dev_type]; |
1559 | * space. | ||
1560 | */ | ||
1561 | if (d40_lcla_id_get(d40c) != 0) | ||
1562 | d40d->lli_tx_len = 1; | ||
1563 | 1841 | ||
1564 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | 1842 | return addr; |
1565 | dev_err(&d40c->chan.dev->device, | 1843 | } |
1566 | "[%s] Out of memory\n", __func__); | ||
1567 | goto err; | ||
1568 | } | ||
1569 | 1844 | ||
1570 | (void) d40_log_sg_to_lli(d40c->lcla.src_id, | 1845 | static struct dma_async_tx_descriptor * |
1571 | sgl_src, | 1846 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
1572 | sgl_len, | 1847 | struct scatterlist *sg_dst, unsigned int sg_len, |
1573 | d40d->lli_log.src, | 1848 | enum dma_data_direction direction, unsigned long dma_flags) |
1574 | d40c->log_def.lcsp1, | 1849 | { |
1575 | d40c->dma_cfg.src_info.data_width, | 1850 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); |
1576 | dma_flags & DMA_PREP_INTERRUPT, | 1851 | dma_addr_t src_dev_addr = 0; |
1577 | d40d->lli_tx_len, | 1852 | dma_addr_t dst_dev_addr = 0; |
1578 | d40c->base->plat_data->llis_per_log); | 1853 | struct d40_desc *desc; |
1579 | 1854 | unsigned long flags; | |
1580 | (void) d40_log_sg_to_lli(d40c->lcla.dst_id, | 1855 | int ret; |
1581 | sgl_dst, | ||
1582 | sgl_len, | ||
1583 | d40d->lli_log.dst, | ||
1584 | d40c->log_def.lcsp3, | ||
1585 | d40c->dma_cfg.dst_info.data_width, | ||
1586 | dma_flags & DMA_PREP_INTERRUPT, | ||
1587 | d40d->lli_tx_len, | ||
1588 | d40c->base->plat_data->llis_per_log); | ||
1589 | 1856 | ||
1857 | if (!chan->phy_chan) { | ||
1858 | chan_err(chan, "Cannot prepare unallocated channel\n"); | ||
1859 | return NULL; | ||
1860 | } | ||
1590 | 1861 | ||
1591 | } else { | ||
1592 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | ||
1593 | dev_err(&d40c->chan.dev->device, | ||
1594 | "[%s] Out of memory\n", __func__); | ||
1595 | goto err; | ||
1596 | } | ||
1597 | 1862 | ||
1598 | res = d40_phy_sg_to_lli(sgl_src, | 1863 | spin_lock_irqsave(&chan->lock, flags); |
1599 | sgl_len, | ||
1600 | 0, | ||
1601 | d40d->lli_phy.src, | ||
1602 | d40d->lli_phy.src_addr, | ||
1603 | d40c->src_def_cfg, | ||
1604 | d40c->dma_cfg.src_info.data_width, | ||
1605 | d40c->dma_cfg.src_info.psize, | ||
1606 | true); | ||
1607 | 1864 | ||
1608 | if (res < 0) | 1865 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
1609 | goto err; | 1866 | if (desc == NULL) |
1867 | goto err; | ||
1610 | 1868 | ||
1611 | res = d40_phy_sg_to_lli(sgl_dst, | 1869 | if (sg_next(&sg_src[sg_len - 1]) == sg_src) |
1612 | sgl_len, | 1870 | desc->cyclic = true; |
1613 | 0, | ||
1614 | d40d->lli_phy.dst, | ||
1615 | d40d->lli_phy.dst_addr, | ||
1616 | d40c->dst_def_cfg, | ||
1617 | d40c->dma_cfg.dst_info.data_width, | ||
1618 | d40c->dma_cfg.dst_info.psize, | ||
1619 | true); | ||
1620 | 1871 | ||
1621 | if (res < 0) | 1872 | if (direction != DMA_NONE) { |
1622 | goto err; | 1873 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
1623 | 1874 | ||
1624 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 1875 | if (direction == DMA_FROM_DEVICE) |
1625 | d40d->lli_pool.size, DMA_TO_DEVICE); | 1876 | src_dev_addr = dev_addr; |
1877 | else if (direction == DMA_TO_DEVICE) | ||
1878 | dst_dev_addr = dev_addr; | ||
1626 | } | 1879 | } |
1627 | 1880 | ||
1628 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 1881 | if (chan_is_logical(chan)) |
1882 | ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, | ||
1883 | sg_len, src_dev_addr, dst_dev_addr); | ||
1884 | else | ||
1885 | ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, | ||
1886 | sg_len, src_dev_addr, dst_dev_addr); | ||
1629 | 1887 | ||
1630 | d40d->txd.tx_submit = d40_tx_submit; | 1888 | if (ret) { |
1889 | chan_err(chan, "Failed to prepare %s sg job: %d\n", | ||
1890 | chan_is_logical(chan) ? "log" : "phy", ret); | ||
1891 | goto err; | ||
1892 | } | ||
1631 | 1893 | ||
1632 | spin_unlock_irqrestore(&d40c->lock, flags); | 1894 | spin_unlock_irqrestore(&chan->lock, flags); |
1895 | |||
1896 | return &desc->txd; | ||
1633 | 1897 | ||
1634 | return &d40d->txd; | ||
1635 | err: | 1898 | err: |
1636 | spin_unlock_irqrestore(&d40c->lock, flags); | 1899 | if (desc) |
1900 | d40_desc_free(chan, desc); | ||
1901 | spin_unlock_irqrestore(&chan->lock, flags); | ||
1637 | return NULL; | 1902 | return NULL; |
1638 | } | 1903 | } |
1639 | EXPORT_SYMBOL(stedma40_memcpy_sg); | ||
1640 | 1904 | ||
1641 | bool stedma40_filter(struct dma_chan *chan, void *data) | 1905 | bool stedma40_filter(struct dma_chan *chan, void *data) |
1642 | { | 1906 | { |
@@ -1652,10 +1916,45 @@ bool stedma40_filter(struct dma_chan *chan, void *data) | |||
1652 | } else | 1916 | } else |
1653 | err = d40_config_memcpy(d40c); | 1917 | err = d40_config_memcpy(d40c); |
1654 | 1918 | ||
1919 | if (!err) | ||
1920 | d40c->configured = true; | ||
1921 | |||
1655 | return err == 0; | 1922 | return err == 0; |
1656 | } | 1923 | } |
1657 | EXPORT_SYMBOL(stedma40_filter); | 1924 | EXPORT_SYMBOL(stedma40_filter); |
1658 | 1925 | ||
1926 | static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) | ||
1927 | { | ||
1928 | bool realtime = d40c->dma_cfg.realtime; | ||
1929 | bool highprio = d40c->dma_cfg.high_priority; | ||
1930 | u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; | ||
1931 | u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; | ||
1932 | u32 event = D40_TYPE_TO_EVENT(dev_type); | ||
1933 | u32 group = D40_TYPE_TO_GROUP(dev_type); | ||
1934 | u32 bit = 1 << event; | ||
1935 | |||
1936 | /* Destination event lines are stored in the upper halfword */ | ||
1937 | if (!src) | ||
1938 | bit <<= 16; | ||
1939 | |||
1940 | writel(bit, d40c->base->virtbase + prioreg + group * 4); | ||
1941 | writel(bit, d40c->base->virtbase + rtreg + group * 4); | ||
1942 | } | ||
1943 | |||
1944 | static void d40_set_prio_realtime(struct d40_chan *d40c) | ||
1945 | { | ||
1946 | if (d40c->base->rev < 3) | ||
1947 | return; | ||
1948 | |||
1949 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | ||
1950 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | ||
1951 | __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); | ||
1952 | |||
1953 | if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || | ||
1954 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | ||
1955 | __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); | ||
1956 | } | ||
1957 | |||
1659 | /* DMA ENGINE functions */ | 1958 | /* DMA ENGINE functions */ |
1660 | static int d40_alloc_chan_resources(struct dma_chan *chan) | 1959 | static int d40_alloc_chan_resources(struct dma_chan *chan) |
1661 | { | 1960 | { |
@@ -1668,16 +1967,11 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
1668 | 1967 | ||
1669 | d40c->completed = chan->cookie = 1; | 1968 | d40c->completed = chan->cookie = 1; |
1670 | 1969 | ||
1671 | /* | 1970 | /* If no dma configuration is set use default configuration (memcpy) */ |
1672 | * If no dma configuration is set (channel_type == 0) | 1971 | if (!d40c->configured) { |
1673 | * use default configuration (memcpy) | ||
1674 | */ | ||
1675 | if (d40c->dma_cfg.channel_type == 0) { | ||
1676 | err = d40_config_memcpy(d40c); | 1972 | err = d40_config_memcpy(d40c); |
1677 | if (err) { | 1973 | if (err) { |
1678 | dev_err(&d40c->chan.dev->device, | 1974 | chan_err(d40c, "Failed to configure memcpy channel\n"); |
1679 | "[%s] Failed to configure memcpy channel\n", | ||
1680 | __func__); | ||
1681 | goto fail; | 1975 | goto fail; |
1682 | } | 1976 | } |
1683 | } | 1977 | } |
@@ -1685,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
1685 | 1979 | ||
1686 | err = d40_allocate_channel(d40c); | 1980 | err = d40_allocate_channel(d40c); |
1687 | if (err) { | 1981 | if (err) { |
1688 | dev_err(&d40c->chan.dev->device, | 1982 | chan_err(d40c, "Failed to allocate channel\n"); |
1689 | "[%s] Failed to allocate channel\n", __func__); | ||
1690 | goto fail; | 1983 | goto fail; |
1691 | } | 1984 | } |
1692 | 1985 | ||
1693 | /* Fill in basic CFG register values */ | 1986 | /* Fill in basic CFG register values */ |
1694 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 1987 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
1695 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); | 1988 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
1989 | |||
1990 | d40_set_prio_realtime(d40c); | ||
1696 | 1991 | ||
1697 | if (d40c->log_num != D40_PHY_CHAN) { | 1992 | if (chan_is_logical(d40c)) { |
1698 | d40_log_cfg(&d40c->dma_cfg, | 1993 | d40_log_cfg(&d40c->dma_cfg, |
1699 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 1994 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
1700 | 1995 | ||
@@ -1712,14 +2007,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
1712 | * resource is free. In case of multiple logical channels | 2007 | * resource is free. In case of multiple logical channels |
1713 | * on the same physical resource, only the first write is necessary. | 2008 | * on the same physical resource, only the first write is necessary. |
1714 | */ | 2009 | */ |
1715 | if (is_free_phy) { | 2010 | if (is_free_phy) |
1716 | err = d40_config_write(d40c); | 2011 | d40_config_write(d40c); |
1717 | if (err) { | ||
1718 | dev_err(&d40c->chan.dev->device, | ||
1719 | "[%s] Failed to configure channel\n", | ||
1720 | __func__); | ||
1721 | } | ||
1722 | } | ||
1723 | fail: | 2012 | fail: |
1724 | spin_unlock_irqrestore(&d40c->lock, flags); | 2013 | spin_unlock_irqrestore(&d40c->lock, flags); |
1725 | return err; | 2014 | return err; |
@@ -1733,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
1733 | unsigned long flags; | 2022 | unsigned long flags; |
1734 | 2023 | ||
1735 | if (d40c->phy_chan == NULL) { | 2024 | if (d40c->phy_chan == NULL) { |
1736 | dev_err(&d40c->chan.dev->device, | 2025 | chan_err(d40c, "Cannot free unallocated channel\n"); |
1737 | "[%s] Cannot free unallocated channel\n", __func__); | ||
1738 | return; | 2026 | return; |
1739 | } | 2027 | } |
1740 | 2028 | ||
@@ -1744,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan) | |||
1744 | err = d40_free_dma(d40c); | 2032 | err = d40_free_dma(d40c); |
1745 | 2033 | ||
1746 | if (err) | 2034 | if (err) |
1747 | dev_err(&d40c->chan.dev->device, | 2035 | chan_err(d40c, "Failed to free channel\n"); |
1748 | "[%s] Failed to free channel\n", __func__); | ||
1749 | spin_unlock_irqrestore(&d40c->lock, flags); | 2036 | spin_unlock_irqrestore(&d40c->lock, flags); |
1750 | } | 2037 | } |
1751 | 2038 | ||
@@ -1755,232 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1755 | size_t size, | 2042 | size_t size, |
1756 | unsigned long dma_flags) | 2043 | unsigned long dma_flags) |
1757 | { | 2044 | { |
1758 | struct d40_desc *d40d; | 2045 | struct scatterlist dst_sg; |
1759 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 2046 | struct scatterlist src_sg; |
1760 | chan); | ||
1761 | unsigned long flags; | ||
1762 | int err = 0; | ||
1763 | |||
1764 | if (d40c->phy_chan == NULL) { | ||
1765 | dev_err(&d40c->chan.dev->device, | ||
1766 | "[%s] Channel is not allocated.\n", __func__); | ||
1767 | return ERR_PTR(-EINVAL); | ||
1768 | } | ||
1769 | |||
1770 | spin_lock_irqsave(&d40c->lock, flags); | ||
1771 | d40d = d40_desc_get(d40c); | ||
1772 | |||
1773 | if (d40d == NULL) { | ||
1774 | dev_err(&d40c->chan.dev->device, | ||
1775 | "[%s] Descriptor is NULL\n", __func__); | ||
1776 | goto err; | ||
1777 | } | ||
1778 | |||
1779 | d40d->txd.flags = dma_flags; | ||
1780 | |||
1781 | dma_async_tx_descriptor_init(&d40d->txd, chan); | ||
1782 | |||
1783 | d40d->txd.tx_submit = d40_tx_submit; | ||
1784 | |||
1785 | if (d40c->log_num != D40_PHY_CHAN) { | ||
1786 | |||
1787 | if (d40_pool_lli_alloc(d40d, 1, true) < 0) { | ||
1788 | dev_err(&d40c->chan.dev->device, | ||
1789 | "[%s] Out of memory\n", __func__); | ||
1790 | goto err; | ||
1791 | } | ||
1792 | d40d->lli_len = 1; | ||
1793 | d40d->lli_tx_len = 1; | ||
1794 | |||
1795 | d40_log_fill_lli(d40d->lli_log.src, | ||
1796 | src, | ||
1797 | size, | ||
1798 | 0, | ||
1799 | d40c->log_def.lcsp1, | ||
1800 | d40c->dma_cfg.src_info.data_width, | ||
1801 | false, true); | ||
1802 | |||
1803 | d40_log_fill_lli(d40d->lli_log.dst, | ||
1804 | dst, | ||
1805 | size, | ||
1806 | 0, | ||
1807 | d40c->log_def.lcsp3, | ||
1808 | d40c->dma_cfg.dst_info.data_width, | ||
1809 | true, true); | ||
1810 | |||
1811 | } else { | ||
1812 | |||
1813 | if (d40_pool_lli_alloc(d40d, 1, false) < 0) { | ||
1814 | dev_err(&d40c->chan.dev->device, | ||
1815 | "[%s] Out of memory\n", __func__); | ||
1816 | goto err; | ||
1817 | } | ||
1818 | |||
1819 | err = d40_phy_fill_lli(d40d->lli_phy.src, | ||
1820 | src, | ||
1821 | size, | ||
1822 | d40c->dma_cfg.src_info.psize, | ||
1823 | 0, | ||
1824 | d40c->src_def_cfg, | ||
1825 | true, | ||
1826 | d40c->dma_cfg.src_info.data_width, | ||
1827 | false); | ||
1828 | if (err) | ||
1829 | goto err_fill_lli; | ||
1830 | |||
1831 | err = d40_phy_fill_lli(d40d->lli_phy.dst, | ||
1832 | dst, | ||
1833 | size, | ||
1834 | d40c->dma_cfg.dst_info.psize, | ||
1835 | 0, | ||
1836 | d40c->dst_def_cfg, | ||
1837 | true, | ||
1838 | d40c->dma_cfg.dst_info.data_width, | ||
1839 | false); | ||
1840 | |||
1841 | if (err) | ||
1842 | goto err_fill_lli; | ||
1843 | |||
1844 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | ||
1845 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
1846 | } | ||
1847 | |||
1848 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1849 | return &d40d->txd; | ||
1850 | |||
1851 | err_fill_lli: | ||
1852 | dev_err(&d40c->chan.dev->device, | ||
1853 | "[%s] Failed filling in PHY LLI\n", __func__); | ||
1854 | d40_pool_lli_free(d40d); | ||
1855 | err: | ||
1856 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
1857 | return NULL; | ||
1858 | } | ||
1859 | |||
1860 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, | ||
1861 | struct d40_chan *d40c, | ||
1862 | struct scatterlist *sgl, | ||
1863 | unsigned int sg_len, | ||
1864 | enum dma_data_direction direction, | ||
1865 | unsigned long dma_flags) | ||
1866 | { | ||
1867 | dma_addr_t dev_addr = 0; | ||
1868 | int total_size; | ||
1869 | |||
1870 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { | ||
1871 | dev_err(&d40c->chan.dev->device, | ||
1872 | "[%s] Out of memory\n", __func__); | ||
1873 | return -ENOMEM; | ||
1874 | } | ||
1875 | |||
1876 | d40d->lli_len = sg_len; | ||
1877 | if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) | ||
1878 | d40d->lli_tx_len = d40d->lli_len; | ||
1879 | else | ||
1880 | d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; | ||
1881 | 2047 | ||
1882 | if (sg_len > 1) | 2048 | sg_init_table(&dst_sg, 1); |
1883 | /* | 2049 | sg_init_table(&src_sg, 1); |
1884 | * Check if there is space available in lcla. | ||
1885 | * If not, split list into 1-length and run only | ||
1886 | * in lcpa space. | ||
1887 | */ | ||
1888 | if (d40_lcla_id_get(d40c) != 0) | ||
1889 | d40d->lli_tx_len = 1; | ||
1890 | |||
1891 | if (direction == DMA_FROM_DEVICE) | ||
1892 | if (d40c->runtime_addr) | ||
1893 | dev_addr = d40c->runtime_addr; | ||
1894 | else | ||
1895 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | ||
1896 | else if (direction == DMA_TO_DEVICE) | ||
1897 | if (d40c->runtime_addr) | ||
1898 | dev_addr = d40c->runtime_addr; | ||
1899 | else | ||
1900 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | ||
1901 | 2050 | ||
1902 | else | 2051 | sg_dma_address(&dst_sg) = dst; |
1903 | return -EINVAL; | 2052 | sg_dma_address(&src_sg) = src; |
1904 | 2053 | ||
1905 | total_size = d40_log_sg_to_dev(&d40c->lcla, | 2054 | sg_dma_len(&dst_sg) = size; |
1906 | sgl, sg_len, | 2055 | sg_dma_len(&src_sg) = size; |
1907 | &d40d->lli_log, | ||
1908 | &d40c->log_def, | ||
1909 | d40c->dma_cfg.src_info.data_width, | ||
1910 | d40c->dma_cfg.dst_info.data_width, | ||
1911 | direction, | ||
1912 | dma_flags & DMA_PREP_INTERRUPT, | ||
1913 | dev_addr, d40d->lli_tx_len, | ||
1914 | d40c->base->plat_data->llis_per_log); | ||
1915 | |||
1916 | if (total_size < 0) | ||
1917 | return -EINVAL; | ||
1918 | 2056 | ||
1919 | return 0; | 2057 | return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); |
1920 | } | 2058 | } |
1921 | 2059 | ||
1922 | static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | 2060 | static struct dma_async_tx_descriptor * |
1923 | struct d40_chan *d40c, | 2061 | d40_prep_memcpy_sg(struct dma_chan *chan, |
1924 | struct scatterlist *sgl, | 2062 | struct scatterlist *dst_sg, unsigned int dst_nents, |
1925 | unsigned int sgl_len, | 2063 | struct scatterlist *src_sg, unsigned int src_nents, |
1926 | enum dma_data_direction direction, | 2064 | unsigned long dma_flags) |
1927 | unsigned long dma_flags) | ||
1928 | { | 2065 | { |
1929 | dma_addr_t src_dev_addr; | 2066 | if (dst_nents != src_nents) |
1930 | dma_addr_t dst_dev_addr; | 2067 | return NULL; |
1931 | int res; | ||
1932 | |||
1933 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | ||
1934 | dev_err(&d40c->chan.dev->device, | ||
1935 | "[%s] Out of memory\n", __func__); | ||
1936 | return -ENOMEM; | ||
1937 | } | ||
1938 | |||
1939 | d40d->lli_len = sgl_len; | ||
1940 | d40d->lli_tx_len = sgl_len; | ||
1941 | |||
1942 | if (direction == DMA_FROM_DEVICE) { | ||
1943 | dst_dev_addr = 0; | ||
1944 | if (d40c->runtime_addr) | ||
1945 | src_dev_addr = d40c->runtime_addr; | ||
1946 | else | ||
1947 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | ||
1948 | } else if (direction == DMA_TO_DEVICE) { | ||
1949 | if (d40c->runtime_addr) | ||
1950 | dst_dev_addr = d40c->runtime_addr; | ||
1951 | else | ||
1952 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | ||
1953 | src_dev_addr = 0; | ||
1954 | } else | ||
1955 | return -EINVAL; | ||
1956 | |||
1957 | res = d40_phy_sg_to_lli(sgl, | ||
1958 | sgl_len, | ||
1959 | src_dev_addr, | ||
1960 | d40d->lli_phy.src, | ||
1961 | d40d->lli_phy.src_addr, | ||
1962 | d40c->src_def_cfg, | ||
1963 | d40c->dma_cfg.src_info.data_width, | ||
1964 | d40c->dma_cfg.src_info.psize, | ||
1965 | true); | ||
1966 | if (res < 0) | ||
1967 | return res; | ||
1968 | |||
1969 | res = d40_phy_sg_to_lli(sgl, | ||
1970 | sgl_len, | ||
1971 | dst_dev_addr, | ||
1972 | d40d->lli_phy.dst, | ||
1973 | d40d->lli_phy.dst_addr, | ||
1974 | d40c->dst_def_cfg, | ||
1975 | d40c->dma_cfg.dst_info.data_width, | ||
1976 | d40c->dma_cfg.dst_info.psize, | ||
1977 | true); | ||
1978 | if (res < 0) | ||
1979 | return res; | ||
1980 | 2068 | ||
1981 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 2069 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); |
1982 | d40d->lli_pool.size, DMA_TO_DEVICE); | ||
1983 | return 0; | ||
1984 | } | 2070 | } |
1985 | 2071 | ||
1986 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2072 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
@@ -1989,51 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
1989 | enum dma_data_direction direction, | 2075 | enum dma_data_direction direction, |
1990 | unsigned long dma_flags) | 2076 | unsigned long dma_flags) |
1991 | { | 2077 | { |
1992 | struct d40_desc *d40d; | 2078 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) |
1993 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 2079 | return NULL; |
1994 | chan); | ||
1995 | unsigned long flags; | ||
1996 | int err; | ||
1997 | |||
1998 | if (d40c->phy_chan == NULL) { | ||
1999 | dev_err(&d40c->chan.dev->device, | ||
2000 | "[%s] Cannot prepare unallocated channel\n", __func__); | ||
2001 | return ERR_PTR(-EINVAL); | ||
2002 | } | ||
2003 | |||
2004 | if (d40c->dma_cfg.pre_transfer) | ||
2005 | d40c->dma_cfg.pre_transfer(chan, | ||
2006 | d40c->dma_cfg.pre_transfer_data, | ||
2007 | sg_dma_len(sgl)); | ||
2008 | 2080 | ||
2009 | spin_lock_irqsave(&d40c->lock, flags); | 2081 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
2010 | d40d = d40_desc_get(d40c); | 2082 | } |
2011 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2012 | 2083 | ||
2013 | if (d40d == NULL) | 2084 | static struct dma_async_tx_descriptor * |
2014 | return NULL; | 2085 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2086 | size_t buf_len, size_t period_len, | ||
2087 | enum dma_data_direction direction) | ||
2088 | { | ||
2089 | unsigned int periods = buf_len / period_len; | ||
2090 | struct dma_async_tx_descriptor *txd; | ||
2091 | struct scatterlist *sg; | ||
2092 | int i; | ||
2015 | 2093 | ||
2016 | if (d40c->log_num != D40_PHY_CHAN) | 2094 | sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); |
2017 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, | 2095 | for (i = 0; i < periods; i++) { |
2018 | direction, dma_flags); | 2096 | sg_dma_address(&sg[i]) = dma_addr; |
2019 | else | 2097 | sg_dma_len(&sg[i]) = period_len; |
2020 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, | 2098 | dma_addr += period_len; |
2021 | direction, dma_flags); | ||
2022 | if (err) { | ||
2023 | dev_err(&d40c->chan.dev->device, | ||
2024 | "[%s] Failed to prepare %s slave sg job: %d\n", | ||
2025 | __func__, | ||
2026 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); | ||
2027 | return NULL; | ||
2028 | } | 2099 | } |
2029 | 2100 | ||
2030 | d40d->txd.flags = dma_flags; | 2101 | sg[periods].offset = 0; |
2102 | sg[periods].length = 0; | ||
2103 | sg[periods].page_link = | ||
2104 | ((unsigned long)sg | 0x01) & ~0x02; | ||
2031 | 2105 | ||
2032 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 2106 | txd = d40_prep_sg(chan, sg, sg, periods, direction, |
2107 | DMA_PREP_INTERRUPT); | ||
2033 | 2108 | ||
2034 | d40d->txd.tx_submit = d40_tx_submit; | 2109 | kfree(sg); |
2035 | 2110 | ||
2036 | return &d40d->txd; | 2111 | return txd; |
2037 | } | 2112 | } |
2038 | 2113 | ||
2039 | static enum dma_status d40_tx_status(struct dma_chan *chan, | 2114 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
@@ -2046,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
2046 | int ret; | 2121 | int ret; |
2047 | 2122 | ||
2048 | if (d40c->phy_chan == NULL) { | 2123 | if (d40c->phy_chan == NULL) { |
2049 | dev_err(&d40c->chan.dev->device, | 2124 | chan_err(d40c, "Cannot read status of unallocated channel\n"); |
2050 | "[%s] Cannot read status of unallocated channel\n", | ||
2051 | __func__); | ||
2052 | return -EINVAL; | 2125 | return -EINVAL; |
2053 | } | 2126 | } |
2054 | 2127 | ||
@@ -2072,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan) | |||
2072 | unsigned long flags; | 2145 | unsigned long flags; |
2073 | 2146 | ||
2074 | if (d40c->phy_chan == NULL) { | 2147 | if (d40c->phy_chan == NULL) { |
2075 | dev_err(&d40c->chan.dev->device, | 2148 | chan_err(d40c, "Channel is not allocated!\n"); |
2076 | "[%s] Channel is not allocated!\n", __func__); | ||
2077 | return; | 2149 | return; |
2078 | } | 2150 | } |
2079 | 2151 | ||
@@ -2166,25 +2238,45 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2166 | return; | 2238 | return; |
2167 | } | 2239 | } |
2168 | 2240 | ||
2169 | if (config_maxburst >= 16) | 2241 | if (chan_is_logical(d40c)) { |
2170 | psize = STEDMA40_PSIZE_LOG_16; | 2242 | if (config_maxburst >= 16) |
2171 | else if (config_maxburst >= 8) | 2243 | psize = STEDMA40_PSIZE_LOG_16; |
2172 | psize = STEDMA40_PSIZE_LOG_8; | 2244 | else if (config_maxburst >= 8) |
2173 | else if (config_maxburst >= 4) | 2245 | psize = STEDMA40_PSIZE_LOG_8; |
2174 | psize = STEDMA40_PSIZE_LOG_4; | 2246 | else if (config_maxburst >= 4) |
2175 | else | 2247 | psize = STEDMA40_PSIZE_LOG_4; |
2176 | psize = STEDMA40_PSIZE_LOG_1; | 2248 | else |
2249 | psize = STEDMA40_PSIZE_LOG_1; | ||
2250 | } else { | ||
2251 | if (config_maxburst >= 16) | ||
2252 | psize = STEDMA40_PSIZE_PHY_16; | ||
2253 | else if (config_maxburst >= 8) | ||
2254 | psize = STEDMA40_PSIZE_PHY_8; | ||
2255 | else if (config_maxburst >= 4) | ||
2256 | psize = STEDMA40_PSIZE_PHY_4; | ||
2257 | else if (config_maxburst >= 2) | ||
2258 | psize = STEDMA40_PSIZE_PHY_2; | ||
2259 | else | ||
2260 | psize = STEDMA40_PSIZE_PHY_1; | ||
2261 | } | ||
2177 | 2262 | ||
2178 | /* Set up all the endpoint configs */ | 2263 | /* Set up all the endpoint configs */ |
2179 | cfg->src_info.data_width = addr_width; | 2264 | cfg->src_info.data_width = addr_width; |
2180 | cfg->src_info.psize = psize; | 2265 | cfg->src_info.psize = psize; |
2181 | cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN; | 2266 | cfg->src_info.big_endian = false; |
2182 | cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2267 | cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; |
2183 | cfg->dst_info.data_width = addr_width; | 2268 | cfg->dst_info.data_width = addr_width; |
2184 | cfg->dst_info.psize = psize; | 2269 | cfg->dst_info.psize = psize; |
2185 | cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN; | 2270 | cfg->dst_info.big_endian = false; |
2186 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2271 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; |
2187 | 2272 | ||
2273 | /* Fill in register values */ | ||
2274 | if (chan_is_logical(d40c)) | ||
2275 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | ||
2276 | else | ||
2277 | d40_phy_cfg(cfg, &d40c->src_def_cfg, | ||
2278 | &d40c->dst_def_cfg, false); | ||
2279 | |||
2188 | /* These settings will take precedence later */ | 2280 | /* These settings will take precedence later */ |
2189 | d40c->runtime_addr = config_addr; | 2281 | d40c->runtime_addr = config_addr; |
2190 | d40c->runtime_direction = config->direction; | 2282 | d40c->runtime_direction = config->direction; |
@@ -2200,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2200 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 2292 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
2201 | unsigned long arg) | 2293 | unsigned long arg) |
2202 | { | 2294 | { |
2203 | unsigned long flags; | ||
2204 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2295 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2205 | 2296 | ||
2206 | if (d40c->phy_chan == NULL) { | 2297 | if (d40c->phy_chan == NULL) { |
2207 | dev_err(&d40c->chan.dev->device, | 2298 | chan_err(d40c, "Channel is not allocated!\n"); |
2208 | "[%s] Channel is not allocated!\n", __func__); | ||
2209 | return -EINVAL; | 2299 | return -EINVAL; |
2210 | } | 2300 | } |
2211 | 2301 | ||
2212 | switch (cmd) { | 2302 | switch (cmd) { |
2213 | case DMA_TERMINATE_ALL: | 2303 | case DMA_TERMINATE_ALL: |
2214 | spin_lock_irqsave(&d40c->lock, flags); | 2304 | return d40_terminate_all(d40c); |
2215 | d40_term_all(d40c); | ||
2216 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
2217 | return 0; | ||
2218 | case DMA_PAUSE: | 2305 | case DMA_PAUSE: |
2219 | return d40_pause(chan); | 2306 | return d40_pause(d40c); |
2220 | case DMA_RESUME: | 2307 | case DMA_RESUME: |
2221 | return d40_resume(chan); | 2308 | return d40_resume(d40c); |
2222 | case DMA_SLAVE_CONFIG: | 2309 | case DMA_SLAVE_CONFIG: |
2223 | d40_set_runtime_config(chan, | 2310 | d40_set_runtime_config(chan, |
2224 | (struct dma_slave_config *) arg); | 2311 | (struct dma_slave_config *) arg); |
@@ -2247,10 +2334,6 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2247 | d40c->base = base; | 2334 | d40c->base = base; |
2248 | d40c->chan.device = dma; | 2335 | d40c->chan.device = dma; |
2249 | 2336 | ||
2250 | /* Invalidate lcla element */ | ||
2251 | d40c->lcla.src_id = -1; | ||
2252 | d40c->lcla.dst_id = -1; | ||
2253 | |||
2254 | spin_lock_init(&d40c->lock); | 2337 | spin_lock_init(&d40c->lock); |
2255 | 2338 | ||
2256 | d40c->log_num = D40_PHY_CHAN; | 2339 | d40c->log_num = D40_PHY_CHAN; |
@@ -2267,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
2267 | } | 2350 | } |
2268 | } | 2351 | } |
2269 | 2352 | ||
2353 | static void d40_ops_init(struct d40_base *base, struct dma_device *dev) | ||
2354 | { | ||
2355 | if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) | ||
2356 | dev->device_prep_slave_sg = d40_prep_slave_sg; | ||
2357 | |||
2358 | if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { | ||
2359 | dev->device_prep_dma_memcpy = d40_prep_memcpy; | ||
2360 | |||
2361 | /* | ||
2362 | * This controller can only access address at even | ||
2363 | * 32bit boundaries, i.e. 2^2 | ||
2364 | */ | ||
2365 | dev->copy_align = 2; | ||
2366 | } | ||
2367 | |||
2368 | if (dma_has_cap(DMA_SG, dev->cap_mask)) | ||
2369 | dev->device_prep_dma_sg = d40_prep_memcpy_sg; | ||
2370 | |||
2371 | if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) | ||
2372 | dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; | ||
2373 | |||
2374 | dev->device_alloc_chan_resources = d40_alloc_chan_resources; | ||
2375 | dev->device_free_chan_resources = d40_free_chan_resources; | ||
2376 | dev->device_issue_pending = d40_issue_pending; | ||
2377 | dev->device_tx_status = d40_tx_status; | ||
2378 | dev->device_control = d40_control; | ||
2379 | dev->dev = base->dev; | ||
2380 | } | ||
2381 | |||
2270 | static int __init d40_dmaengine_init(struct d40_base *base, | 2382 | static int __init d40_dmaengine_init(struct d40_base *base, |
2271 | int num_reserved_chans) | 2383 | int num_reserved_chans) |
2272 | { | 2384 | { |
@@ -2277,22 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2277 | 2389 | ||
2278 | dma_cap_zero(base->dma_slave.cap_mask); | 2390 | dma_cap_zero(base->dma_slave.cap_mask); |
2279 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | 2391 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
2392 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | ||
2280 | 2393 | ||
2281 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; | 2394 | d40_ops_init(base, &base->dma_slave); |
2282 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; | ||
2283 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2284 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; | ||
2285 | base->dma_slave.device_tx_status = d40_tx_status; | ||
2286 | base->dma_slave.device_issue_pending = d40_issue_pending; | ||
2287 | base->dma_slave.device_control = d40_control; | ||
2288 | base->dma_slave.dev = base->dev; | ||
2289 | 2395 | ||
2290 | err = dma_async_device_register(&base->dma_slave); | 2396 | err = dma_async_device_register(&base->dma_slave); |
2291 | 2397 | ||
2292 | if (err) { | 2398 | if (err) { |
2293 | dev_err(base->dev, | 2399 | d40_err(base->dev, "Failed to register slave channels\n"); |
2294 | "[%s] Failed to register slave channels\n", | ||
2295 | __func__); | ||
2296 | goto failure1; | 2400 | goto failure1; |
2297 | } | 2401 | } |
2298 | 2402 | ||
@@ -2301,27 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2301 | 2405 | ||
2302 | dma_cap_zero(base->dma_memcpy.cap_mask); | 2406 | dma_cap_zero(base->dma_memcpy.cap_mask); |
2303 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 2407 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
2408 | dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); | ||
2304 | 2409 | ||
2305 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; | 2410 | d40_ops_init(base, &base->dma_memcpy); |
2306 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; | ||
2307 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2308 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; | ||
2309 | base->dma_memcpy.device_tx_status = d40_tx_status; | ||
2310 | base->dma_memcpy.device_issue_pending = d40_issue_pending; | ||
2311 | base->dma_memcpy.device_control = d40_control; | ||
2312 | base->dma_memcpy.dev = base->dev; | ||
2313 | /* | ||
2314 | * This controller can only access address at even | ||
2315 | * 32bit boundaries, i.e. 2^2 | ||
2316 | */ | ||
2317 | base->dma_memcpy.copy_align = 2; | ||
2318 | 2411 | ||
2319 | err = dma_async_device_register(&base->dma_memcpy); | 2412 | err = dma_async_device_register(&base->dma_memcpy); |
2320 | 2413 | ||
2321 | if (err) { | 2414 | if (err) { |
2322 | dev_err(base->dev, | 2415 | d40_err(base->dev, |
2323 | "[%s] Failed to regsiter memcpy only channels\n", | 2416 | "Failed to regsiter memcpy only channels\n"); |
2324 | __func__); | ||
2325 | goto failure2; | 2417 | goto failure2; |
2326 | } | 2418 | } |
2327 | 2419 | ||
@@ -2331,22 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
2331 | dma_cap_zero(base->dma_both.cap_mask); | 2423 | dma_cap_zero(base->dma_both.cap_mask); |
2332 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 2424 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
2333 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 2425 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
2426 | dma_cap_set(DMA_SG, base->dma_both.cap_mask); | ||
2427 | dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); | ||
2334 | 2428 | ||
2335 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; | 2429 | d40_ops_init(base, &base->dma_both); |
2336 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; | ||
2337 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; | ||
2338 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; | ||
2339 | base->dma_both.device_tx_status = d40_tx_status; | ||
2340 | base->dma_both.device_issue_pending = d40_issue_pending; | ||
2341 | base->dma_both.device_control = d40_control; | ||
2342 | base->dma_both.dev = base->dev; | ||
2343 | base->dma_both.copy_align = 2; | ||
2344 | err = dma_async_device_register(&base->dma_both); | 2430 | err = dma_async_device_register(&base->dma_both); |
2345 | 2431 | ||
2346 | if (err) { | 2432 | if (err) { |
2347 | dev_err(base->dev, | 2433 | d40_err(base->dev, |
2348 | "[%s] Failed to register logical and physical capable channels\n", | 2434 | "Failed to register logical and physical capable channels\n"); |
2349 | __func__); | ||
2350 | goto failure3; | 2435 | goto failure3; |
2351 | } | 2436 | } |
2352 | return 0; | 2437 | return 0; |
@@ -2387,9 +2472,11 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2387 | 2472 | ||
2388 | /* Mark disabled channels as occupied */ | 2473 | /* Mark disabled channels as occupied */ |
2389 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { | 2474 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { |
2390 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2475 | int chan = base->plat_data->disabled_channels[i]; |
2391 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2476 | |
2392 | num_phy_chans_avail--; | 2477 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
2478 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | ||
2479 | num_phy_chans_avail--; | ||
2393 | } | 2480 | } |
2394 | 2481 | ||
2395 | dev_info(base->dev, "%d of %d physical DMA channels available\n", | 2482 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
@@ -2420,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2420 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | 2507 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, |
2421 | /* | 2508 | /* |
2422 | * D40_DREG_PERIPHID2 Depends on HW revision: | 2509 | * D40_DREG_PERIPHID2 Depends on HW revision: |
2423 | * MOP500/HREF ED has 0x0008, | 2510 | * DB8500ed has 0x0008, |
2424 | * ? has 0x0018, | 2511 | * ? has 0x0018, |
2425 | * HREF V1 has 0x0028 | 2512 | * DB8500v1 has 0x0028 |
2513 | * DB8500v2 has 0x0038 | ||
2426 | */ | 2514 | */ |
2427 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | 2515 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, |
2428 | 2516 | ||
@@ -2441,12 +2529,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2441 | int num_phy_chans; | 2529 | int num_phy_chans; |
2442 | int i; | 2530 | int i; |
2443 | u32 val; | 2531 | u32 val; |
2532 | u32 rev; | ||
2444 | 2533 | ||
2445 | clk = clk_get(&pdev->dev, NULL); | 2534 | clk = clk_get(&pdev->dev, NULL); |
2446 | 2535 | ||
2447 | if (IS_ERR(clk)) { | 2536 | if (IS_ERR(clk)) { |
2448 | dev_err(&pdev->dev, "[%s] No matching clock found\n", | 2537 | d40_err(&pdev->dev, "No matching clock found\n"); |
2449 | __func__); | ||
2450 | goto failure; | 2538 | goto failure; |
2451 | } | 2539 | } |
2452 | 2540 | ||
@@ -2469,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2469 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | 2557 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { |
2470 | if (dma_id_regs[i].val != | 2558 | if (dma_id_regs[i].val != |
2471 | readl(virtbase + dma_id_regs[i].reg)) { | 2559 | readl(virtbase + dma_id_regs[i].reg)) { |
2472 | dev_err(&pdev->dev, | 2560 | d40_err(&pdev->dev, |
2473 | "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | 2561 | "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", |
2474 | __func__, | ||
2475 | dma_id_regs[i].val, | 2562 | dma_id_regs[i].val, |
2476 | dma_id_regs[i].reg, | 2563 | dma_id_regs[i].reg, |
2477 | readl(virtbase + dma_id_regs[i].reg)); | 2564 | readl(virtbase + dma_id_regs[i].reg)); |
@@ -2479,21 +2566,25 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2479 | } | 2566 | } |
2480 | } | 2567 | } |
2481 | 2568 | ||
2482 | /* Get silicon revision */ | 2569 | /* Get silicon revision and designer */ |
2483 | val = readl(virtbase + D40_DREG_PERIPHID2); | 2570 | val = readl(virtbase + D40_DREG_PERIPHID2); |
2484 | 2571 | ||
2485 | if ((val & 0xf) != D40_PERIPHID2_DESIGNER) { | 2572 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != |
2486 | dev_err(&pdev->dev, | 2573 | D40_HW_DESIGNER) { |
2487 | "[%s] Unknown designer! Got %x wanted %x\n", | 2574 | d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", |
2488 | __func__, val & 0xf, D40_PERIPHID2_DESIGNER); | 2575 | val & D40_DREG_PERIPHID2_DESIGNER_MASK, |
2576 | D40_HW_DESIGNER); | ||
2489 | goto failure; | 2577 | goto failure; |
2490 | } | 2578 | } |
2491 | 2579 | ||
2580 | rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> | ||
2581 | D40_DREG_PERIPHID2_REV_POS; | ||
2582 | |||
2492 | /* The number of physical channels on this HW */ | 2583 | /* The number of physical channels on this HW */ |
2493 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 2584 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
2494 | 2585 | ||
2495 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 2586 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", |
2496 | (val >> 4) & 0xf, res->start); | 2587 | rev, res->start); |
2497 | 2588 | ||
2498 | plat_data = pdev->dev.platform_data; | 2589 | plat_data = pdev->dev.platform_data; |
2499 | 2590 | ||
@@ -2511,11 +2602,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2511 | sizeof(struct d40_chan), GFP_KERNEL); | 2602 | sizeof(struct d40_chan), GFP_KERNEL); |
2512 | 2603 | ||
2513 | if (base == NULL) { | 2604 | if (base == NULL) { |
2514 | dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); | 2605 | d40_err(&pdev->dev, "Out of memory\n"); |
2515 | goto failure; | 2606 | goto failure; |
2516 | } | 2607 | } |
2517 | 2608 | ||
2518 | base->rev = (val >> 4) & 0xf; | 2609 | base->rev = rev; |
2519 | base->clk = clk; | 2610 | base->clk = clk; |
2520 | base->num_phy_chans = num_phy_chans; | 2611 | base->num_phy_chans = num_phy_chans; |
2521 | base->num_log_chans = num_log_chans; | 2612 | base->num_log_chans = num_log_chans; |
@@ -2549,7 +2640,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2549 | if (!base->lookup_log_chans) | 2640 | if (!base->lookup_log_chans) |
2550 | goto failure; | 2641 | goto failure; |
2551 | } | 2642 | } |
2552 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), | 2643 | |
2644 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * | ||
2645 | sizeof(struct d40_desc *) * | ||
2646 | D40_LCLA_LINK_PER_EVENT_GRP, | ||
2553 | GFP_KERNEL); | 2647 | GFP_KERNEL); |
2554 | if (!base->lcla_pool.alloc_map) | 2648 | if (!base->lcla_pool.alloc_map) |
2555 | goto failure; | 2649 | goto failure; |
@@ -2563,7 +2657,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2563 | return base; | 2657 | return base; |
2564 | 2658 | ||
2565 | failure: | 2659 | failure: |
2566 | if (clk) { | 2660 | if (!IS_ERR(clk)) { |
2567 | clk_disable(clk); | 2661 | clk_disable(clk); |
2568 | clk_put(clk); | 2662 | clk_put(clk); |
2569 | } | 2663 | } |
@@ -2655,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base) | |||
2655 | 2749 | ||
2656 | static int __init d40_lcla_allocate(struct d40_base *base) | 2750 | static int __init d40_lcla_allocate(struct d40_base *base) |
2657 | { | 2751 | { |
2752 | struct d40_lcla_pool *pool = &base->lcla_pool; | ||
2658 | unsigned long *page_list; | 2753 | unsigned long *page_list; |
2659 | int i, j; | 2754 | int i, j; |
2660 | int ret = 0; | 2755 | int ret = 0; |
@@ -2680,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base) | |||
2680 | base->lcla_pool.pages); | 2775 | base->lcla_pool.pages); |
2681 | if (!page_list[i]) { | 2776 | if (!page_list[i]) { |
2682 | 2777 | ||
2683 | dev_err(base->dev, | 2778 | d40_err(base->dev, "Failed to allocate %d pages.\n", |
2684 | "[%s] Failed to allocate %d pages.\n", | 2779 | base->lcla_pool.pages); |
2685 | __func__, base->lcla_pool.pages); | ||
2686 | 2780 | ||
2687 | for (j = 0; j < i; j++) | 2781 | for (j = 0; j < i; j++) |
2688 | free_pages(page_list[j], base->lcla_pool.pages); | 2782 | free_pages(page_list[j], base->lcla_pool.pages); |
@@ -2700,8 +2794,10 @@ static int __init d40_lcla_allocate(struct d40_base *base) | |||
2700 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { | 2794 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { |
2701 | base->lcla_pool.base = (void *)page_list[i]; | 2795 | base->lcla_pool.base = (void *)page_list[i]; |
2702 | } else { | 2796 | } else { |
2703 | /* After many attempts, no succees with finding the correct | 2797 | /* |
2704 | * alignment try with allocating a big buffer */ | 2798 | * After many attempts and no succees with finding the correct |
2799 | * alignment, try with allocating a big buffer. | ||
2800 | */ | ||
2705 | dev_warn(base->dev, | 2801 | dev_warn(base->dev, |
2706 | "[%s] Failed to get %d pages @ 18 bit align.\n", | 2802 | "[%s] Failed to get %d pages @ 18 bit align.\n", |
2707 | __func__, base->lcla_pool.pages); | 2803 | __func__, base->lcla_pool.pages); |
@@ -2718,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base) | |||
2718 | LCLA_ALIGNMENT); | 2814 | LCLA_ALIGNMENT); |
2719 | } | 2815 | } |
2720 | 2816 | ||
2817 | pool->dma_addr = dma_map_single(base->dev, pool->base, | ||
2818 | SZ_1K * base->num_phy_chans, | ||
2819 | DMA_TO_DEVICE); | ||
2820 | if (dma_mapping_error(base->dev, pool->dma_addr)) { | ||
2821 | pool->dma_addr = 0; | ||
2822 | ret = -ENOMEM; | ||
2823 | goto failure; | ||
2824 | } | ||
2825 | |||
2721 | writel(virt_to_phys(base->lcla_pool.base), | 2826 | writel(virt_to_phys(base->lcla_pool.base), |
2722 | base->virtbase + D40_DREG_LCLA); | 2827 | base->virtbase + D40_DREG_LCLA); |
2723 | failure: | 2828 | failure: |
@@ -2750,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2750 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | 2855 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); |
2751 | if (!res) { | 2856 | if (!res) { |
2752 | ret = -ENOENT; | 2857 | ret = -ENOENT; |
2753 | dev_err(&pdev->dev, | 2858 | d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); |
2754 | "[%s] No \"lcpa\" memory resource\n", | ||
2755 | __func__); | ||
2756 | goto failure; | 2859 | goto failure; |
2757 | } | 2860 | } |
2758 | base->lcpa_size = resource_size(res); | 2861 | base->lcpa_size = resource_size(res); |
@@ -2761,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2761 | if (request_mem_region(res->start, resource_size(res), | 2864 | if (request_mem_region(res->start, resource_size(res), |
2762 | D40_NAME " I/O lcpa") == NULL) { | 2865 | D40_NAME " I/O lcpa") == NULL) { |
2763 | ret = -EBUSY; | 2866 | ret = -EBUSY; |
2764 | dev_err(&pdev->dev, | 2867 | d40_err(&pdev->dev, |
2765 | "[%s] Failed to request LCPA region 0x%x-0x%x\n", | 2868 | "Failed to request LCPA region 0x%x-0x%x\n", |
2766 | __func__, res->start, res->end); | 2869 | res->start, res->end); |
2767 | goto failure; | 2870 | goto failure; |
2768 | } | 2871 | } |
2769 | 2872 | ||
@@ -2779,29 +2882,23 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2779 | base->lcpa_base = ioremap(res->start, resource_size(res)); | 2882 | base->lcpa_base = ioremap(res->start, resource_size(res)); |
2780 | if (!base->lcpa_base) { | 2883 | if (!base->lcpa_base) { |
2781 | ret = -ENOMEM; | 2884 | ret = -ENOMEM; |
2782 | dev_err(&pdev->dev, | 2885 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
2783 | "[%s] Failed to ioremap LCPA region\n", | ||
2784 | __func__); | ||
2785 | goto failure; | 2886 | goto failure; |
2786 | } | 2887 | } |
2787 | 2888 | ||
2788 | ret = d40_lcla_allocate(base); | 2889 | ret = d40_lcla_allocate(base); |
2789 | if (ret) { | 2890 | if (ret) { |
2790 | dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", | 2891 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
2791 | __func__); | ||
2792 | goto failure; | 2892 | goto failure; |
2793 | } | 2893 | } |
2794 | 2894 | ||
2795 | spin_lock_init(&base->lcla_pool.lock); | 2895 | spin_lock_init(&base->lcla_pool.lock); |
2796 | 2896 | ||
2797 | base->lcla_pool.num_blocks = base->num_phy_chans; | ||
2798 | |||
2799 | base->irq = platform_get_irq(pdev, 0); | 2897 | base->irq = platform_get_irq(pdev, 0); |
2800 | 2898 | ||
2801 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | 2899 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); |
2802 | |||
2803 | if (ret) { | 2900 | if (ret) { |
2804 | dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); | 2901 | d40_err(&pdev->dev, "No IRQ defined\n"); |
2805 | goto failure; | 2902 | goto failure; |
2806 | } | 2903 | } |
2807 | 2904 | ||
@@ -2820,11 +2917,18 @@ failure: | |||
2820 | kmem_cache_destroy(base->desc_slab); | 2917 | kmem_cache_destroy(base->desc_slab); |
2821 | if (base->virtbase) | 2918 | if (base->virtbase) |
2822 | iounmap(base->virtbase); | 2919 | iounmap(base->virtbase); |
2920 | |||
2921 | if (base->lcla_pool.dma_addr) | ||
2922 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | ||
2923 | SZ_1K * base->num_phy_chans, | ||
2924 | DMA_TO_DEVICE); | ||
2925 | |||
2823 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) | 2926 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
2824 | free_pages((unsigned long)base->lcla_pool.base, | 2927 | free_pages((unsigned long)base->lcla_pool.base, |
2825 | base->lcla_pool.pages); | 2928 | base->lcla_pool.pages); |
2826 | if (base->lcla_pool.base_unaligned) | 2929 | |
2827 | kfree(base->lcla_pool.base_unaligned); | 2930 | kfree(base->lcla_pool.base_unaligned); |
2931 | |||
2828 | if (base->phy_lcpa) | 2932 | if (base->phy_lcpa) |
2829 | release_mem_region(base->phy_lcpa, | 2933 | release_mem_region(base->phy_lcpa, |
2830 | base->lcpa_size); | 2934 | base->lcpa_size); |
@@ -2843,7 +2947,7 @@ failure: | |||
2843 | kfree(base); | 2947 | kfree(base); |
2844 | } | 2948 | } |
2845 | 2949 | ||
2846 | dev_err(&pdev->dev, "[%s] probe failed\n", __func__); | 2950 | d40_err(&pdev->dev, "probe failed\n"); |
2847 | return ret; | 2951 | return ret; |
2848 | } | 2952 | } |
2849 | 2953 | ||
@@ -2854,8 +2958,8 @@ static struct platform_driver d40_driver = { | |||
2854 | }, | 2958 | }, |
2855 | }; | 2959 | }; |
2856 | 2960 | ||
2857 | int __init stedma40_init(void) | 2961 | static int __init stedma40_init(void) |
2858 | { | 2962 | { |
2859 | return platform_driver_probe(&d40_driver, d40_probe); | 2963 | return platform_driver_probe(&d40_driver, d40_probe); |
2860 | } | 2964 | } |
2861 | arch_initcall(stedma40_init); | 2965 | subsys_initcall(stedma40_init); |
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index d937f76d6e2e..cad9e1daedff 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
@@ -1,10 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * driver/dma/ste_dma40_ll.c | 2 | * Copyright (C) ST-Ericsson SA 2007-2010 |
3 | * | 3 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
4 | * Copyright (C) ST-Ericsson 2007-2010 | 4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
6 | * Author: Per Friden <per.friden@stericsson.com> | ||
7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | ||
8 | */ | 6 | */ |
9 | 7 | ||
10 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
@@ -39,16 +37,13 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg, | |||
39 | cfg->dir == STEDMA40_PERIPH_TO_PERIPH) | 37 | cfg->dir == STEDMA40_PERIPH_TO_PERIPH) |
40 | l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; | 38 | l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; |
41 | 39 | ||
42 | l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS; | ||
43 | l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; | 40 | l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; |
44 | l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; | 41 | l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; |
45 | l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; | 42 | l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; |
46 | l3 |= 1 << D40_MEM_LCSP3_DTCP_POS; | ||
47 | 43 | ||
48 | l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; | 44 | l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; |
49 | l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 45 | l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; |
50 | l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; | 46 | l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; |
51 | l1 |= 1 << D40_MEM_LCSP1_STCP_POS; | ||
52 | 47 | ||
53 | *lcsp1 = l1; | 48 | *lcsp1 = l1; |
54 | *lcsp3 = l3; | 49 | *lcsp3 = l3; |
@@ -113,28 +108,32 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | |||
113 | src |= 1 << D40_SREG_CFG_LOG_GIM_POS; | 108 | src |= 1 << D40_SREG_CFG_LOG_GIM_POS; |
114 | } | 109 | } |
115 | 110 | ||
116 | if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) { | 111 | if (cfg->high_priority) { |
117 | src |= 1 << D40_SREG_CFG_PRI_POS; | 112 | src |= 1 << D40_SREG_CFG_PRI_POS; |
118 | dst |= 1 << D40_SREG_CFG_PRI_POS; | 113 | dst |= 1 << D40_SREG_CFG_PRI_POS; |
119 | } | 114 | } |
120 | 115 | ||
121 | src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS; | 116 | if (cfg->src_info.big_endian) |
122 | dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS; | 117 | src |= 1 << D40_SREG_CFG_LBE_POS; |
118 | if (cfg->dst_info.big_endian) | ||
119 | dst |= 1 << D40_SREG_CFG_LBE_POS; | ||
123 | 120 | ||
124 | *src_cfg = src; | 121 | *src_cfg = src; |
125 | *dst_cfg = dst; | 122 | *dst_cfg = dst; |
126 | } | 123 | } |
127 | 124 | ||
128 | int d40_phy_fill_lli(struct d40_phy_lli *lli, | 125 | static int d40_phy_fill_lli(struct d40_phy_lli *lli, |
129 | dma_addr_t data, | 126 | dma_addr_t data, |
130 | u32 data_size, | 127 | u32 data_size, |
131 | int psize, | 128 | dma_addr_t next_lli, |
132 | dma_addr_t next_lli, | 129 | u32 reg_cfg, |
133 | u32 reg_cfg, | 130 | struct stedma40_half_channel_info *info, |
134 | bool term_int, | 131 | unsigned int flags) |
135 | u32 data_width, | ||
136 | bool is_device) | ||
137 | { | 132 | { |
133 | bool addr_inc = flags & LLI_ADDR_INC; | ||
134 | bool term_int = flags & LLI_TERM_INT; | ||
135 | unsigned int data_width = info->data_width; | ||
136 | int psize = info->psize; | ||
138 | int num_elems; | 137 | int num_elems; |
139 | 138 | ||
140 | if (psize == STEDMA40_PSIZE_PHY_1) | 139 | if (psize == STEDMA40_PSIZE_PHY_1) |
@@ -142,13 +141,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
142 | else | 141 | else |
143 | num_elems = 2 << psize; | 142 | num_elems = 2 << psize; |
144 | 143 | ||
145 | /* | ||
146 | * Size is 16bit. data_width is 8, 16, 32 or 64 bit | ||
147 | * Block large than 64 KiB must be split. | ||
148 | */ | ||
149 | if (data_size > (0xffff << data_width)) | ||
150 | return -EINVAL; | ||
151 | |||
152 | /* Must be aligned */ | 144 | /* Must be aligned */ |
153 | if (!IS_ALIGNED(data, 0x1 << data_width)) | 145 | if (!IS_ALIGNED(data, 0x1 << data_width)) |
154 | return -EINVAL; | 146 | return -EINVAL; |
@@ -164,7 +156,7 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
164 | * Distance to next element sized entry. | 156 | * Distance to next element sized entry. |
165 | * Usually the size of the element unless you want gaps. | 157 | * Usually the size of the element unless you want gaps. |
166 | */ | 158 | */ |
167 | if (!is_device) | 159 | if (addr_inc) |
168 | lli->reg_elt |= (0x1 << data_width) << | 160 | lli->reg_elt |= (0x1 << data_width) << |
169 | D40_SREG_ELEM_PHY_EIDX_POS; | 161 | D40_SREG_ELEM_PHY_EIDX_POS; |
170 | 162 | ||
@@ -190,98 +182,190 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
190 | return 0; | 182 | return 0; |
191 | } | 183 | } |
192 | 184 | ||
185 | static int d40_seg_size(int size, int data_width1, int data_width2) | ||
186 | { | ||
187 | u32 max_w = max(data_width1, data_width2); | ||
188 | u32 min_w = min(data_width1, data_width2); | ||
189 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); | ||
190 | |||
191 | if (seg_max > STEDMA40_MAX_SEG_SIZE) | ||
192 | seg_max -= (1 << max_w); | ||
193 | |||
194 | if (size <= seg_max) | ||
195 | return size; | ||
196 | |||
197 | if (size <= 2 * seg_max) | ||
198 | return ALIGN(size / 2, 1 << max_w); | ||
199 | |||
200 | return seg_max; | ||
201 | } | ||
202 | |||
203 | static struct d40_phy_lli * | ||
204 | d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, | ||
205 | dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg, | ||
206 | struct stedma40_half_channel_info *info, | ||
207 | struct stedma40_half_channel_info *otherinfo, | ||
208 | unsigned long flags) | ||
209 | { | ||
210 | bool lastlink = flags & LLI_LAST_LINK; | ||
211 | bool addr_inc = flags & LLI_ADDR_INC; | ||
212 | bool term_int = flags & LLI_TERM_INT; | ||
213 | bool cyclic = flags & LLI_CYCLIC; | ||
214 | int err; | ||
215 | dma_addr_t next = lli_phys; | ||
216 | int size_rest = size; | ||
217 | int size_seg = 0; | ||
218 | |||
219 | /* | ||
220 | * This piece may be split up based on d40_seg_size(); we only want the | ||
221 | * term int on the last part. | ||
222 | */ | ||
223 | if (term_int) | ||
224 | flags &= ~LLI_TERM_INT; | ||
225 | |||
226 | do { | ||
227 | size_seg = d40_seg_size(size_rest, info->data_width, | ||
228 | otherinfo->data_width); | ||
229 | size_rest -= size_seg; | ||
230 | |||
231 | if (size_rest == 0 && term_int) | ||
232 | flags |= LLI_TERM_INT; | ||
233 | |||
234 | if (size_rest == 0 && lastlink) | ||
235 | next = cyclic ? first_phys : 0; | ||
236 | else | ||
237 | next = ALIGN(next + sizeof(struct d40_phy_lli), | ||
238 | D40_LLI_ALIGN); | ||
239 | |||
240 | err = d40_phy_fill_lli(lli, addr, size_seg, next, | ||
241 | reg_cfg, info, flags); | ||
242 | |||
243 | if (err) | ||
244 | goto err; | ||
245 | |||
246 | lli++; | ||
247 | if (addr_inc) | ||
248 | addr += size_seg; | ||
249 | } while (size_rest); | ||
250 | |||
251 | return lli; | ||
252 | |||
253 | err: | ||
254 | return NULL; | ||
255 | } | ||
256 | |||
193 | int d40_phy_sg_to_lli(struct scatterlist *sg, | 257 | int d40_phy_sg_to_lli(struct scatterlist *sg, |
194 | int sg_len, | 258 | int sg_len, |
195 | dma_addr_t target, | 259 | dma_addr_t target, |
196 | struct d40_phy_lli *lli, | 260 | struct d40_phy_lli *lli_sg, |
197 | dma_addr_t lli_phys, | 261 | dma_addr_t lli_phys, |
198 | u32 reg_cfg, | 262 | u32 reg_cfg, |
199 | u32 data_width, | 263 | struct stedma40_half_channel_info *info, |
200 | int psize, | 264 | struct stedma40_half_channel_info *otherinfo, |
201 | bool term_int) | 265 | unsigned long flags) |
202 | { | 266 | { |
203 | int total_size = 0; | 267 | int total_size = 0; |
204 | int i; | 268 | int i; |
205 | struct scatterlist *current_sg = sg; | 269 | struct scatterlist *current_sg = sg; |
206 | dma_addr_t next_lli_phys; | 270 | struct d40_phy_lli *lli = lli_sg; |
207 | dma_addr_t dst; | 271 | dma_addr_t l_phys = lli_phys; |
208 | int err = 0; | 272 | |
273 | if (!target) | ||
274 | flags |= LLI_ADDR_INC; | ||
209 | 275 | ||
210 | for_each_sg(sg, current_sg, sg_len, i) { | 276 | for_each_sg(sg, current_sg, sg_len, i) { |
277 | dma_addr_t sg_addr = sg_dma_address(current_sg); | ||
278 | unsigned int len = sg_dma_len(current_sg); | ||
279 | dma_addr_t dst = target ?: sg_addr; | ||
211 | 280 | ||
212 | total_size += sg_dma_len(current_sg); | 281 | total_size += sg_dma_len(current_sg); |
213 | 282 | ||
214 | /* If this scatter list entry is the last one, no next link */ | 283 | if (i == sg_len - 1) |
215 | if (sg_len - 1 == i) | 284 | flags |= LLI_TERM_INT | LLI_LAST_LINK; |
216 | next_lli_phys = 0; | ||
217 | else | ||
218 | next_lli_phys = ALIGN(lli_phys + (i + 1) * | ||
219 | sizeof(struct d40_phy_lli), | ||
220 | D40_LLI_ALIGN); | ||
221 | 285 | ||
222 | if (target) | 286 | l_phys = ALIGN(lli_phys + (lli - lli_sg) * |
223 | dst = target; | 287 | sizeof(struct d40_phy_lli), D40_LLI_ALIGN); |
224 | else | 288 | |
225 | dst = sg_phys(current_sg); | 289 | lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys, |
226 | 290 | reg_cfg, info, otherinfo, flags); | |
227 | err = d40_phy_fill_lli(&lli[i], | 291 | |
228 | dst, | 292 | if (lli == NULL) |
229 | sg_dma_len(current_sg), | 293 | return -EINVAL; |
230 | psize, | ||
231 | next_lli_phys, | ||
232 | reg_cfg, | ||
233 | !next_lli_phys, | ||
234 | data_width, | ||
235 | target == dst); | ||
236 | if (err) | ||
237 | goto err; | ||
238 | } | 294 | } |
239 | 295 | ||
240 | return total_size; | 296 | return total_size; |
241 | err: | ||
242 | return err; | ||
243 | } | 297 | } |
244 | 298 | ||
245 | 299 | ||
246 | void d40_phy_lli_write(void __iomem *virtbase, | 300 | /* DMA logical lli operations */ |
247 | u32 phy_chan_num, | 301 | |
248 | struct d40_phy_lli *lli_dst, | 302 | static void d40_log_lli_link(struct d40_log_lli *lli_dst, |
249 | struct d40_phy_lli *lli_src) | 303 | struct d40_log_lli *lli_src, |
304 | int next, unsigned int flags) | ||
250 | { | 305 | { |
306 | bool interrupt = flags & LLI_TERM_INT; | ||
307 | u32 slos = 0; | ||
308 | u32 dlos = 0; | ||
309 | |||
310 | if (next != -EINVAL) { | ||
311 | slos = next * 2; | ||
312 | dlos = next * 2 + 1; | ||
313 | } | ||
314 | |||
315 | if (interrupt) { | ||
316 | lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; | ||
317 | lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; | ||
318 | } | ||
251 | 319 | ||
252 | writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE + | 320 | lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) | |
253 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG); | 321 | (slos << D40_MEM_LCSP1_SLOS_POS); |
254 | writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE + | ||
255 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); | ||
256 | writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE + | ||
257 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR); | ||
258 | writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE + | ||
259 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK); | ||
260 | |||
261 | writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE + | ||
262 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG); | ||
263 | writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE + | ||
264 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); | ||
265 | writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE + | ||
266 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR); | ||
267 | writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE + | ||
268 | phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK); | ||
269 | 322 | ||
323 | lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) | | ||
324 | (dlos << D40_MEM_LCSP1_SLOS_POS); | ||
270 | } | 325 | } |
271 | 326 | ||
272 | /* DMA logical lli operations */ | 327 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, |
328 | struct d40_log_lli *lli_dst, | ||
329 | struct d40_log_lli *lli_src, | ||
330 | int next, unsigned int flags) | ||
331 | { | ||
332 | d40_log_lli_link(lli_dst, lli_src, next, flags); | ||
333 | |||
334 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); | ||
335 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); | ||
336 | writel(lli_dst->lcsp02, &lcpa[0].lcsp2); | ||
337 | writel(lli_dst->lcsp13, &lcpa[0].lcsp3); | ||
338 | } | ||
339 | |||
340 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | ||
341 | struct d40_log_lli *lli_dst, | ||
342 | struct d40_log_lli *lli_src, | ||
343 | int next, unsigned int flags) | ||
344 | { | ||
345 | d40_log_lli_link(lli_dst, lli_src, next, flags); | ||
273 | 346 | ||
274 | void d40_log_fill_lli(struct d40_log_lli *lli, | 347 | writel(lli_src->lcsp02, &lcla[0].lcsp02); |
275 | dma_addr_t data, u32 data_size, | 348 | writel(lli_src->lcsp13, &lcla[0].lcsp13); |
276 | u32 lli_next_off, u32 reg_cfg, | 349 | writel(lli_dst->lcsp02, &lcla[1].lcsp02); |
277 | u32 data_width, | 350 | writel(lli_dst->lcsp13, &lcla[1].lcsp13); |
278 | bool term_int, bool addr_inc) | 351 | } |
352 | |||
353 | static void d40_log_fill_lli(struct d40_log_lli *lli, | ||
354 | dma_addr_t data, u32 data_size, | ||
355 | u32 reg_cfg, | ||
356 | u32 data_width, | ||
357 | unsigned int flags) | ||
279 | { | 358 | { |
359 | bool addr_inc = flags & LLI_ADDR_INC; | ||
360 | |||
280 | lli->lcsp13 = reg_cfg; | 361 | lli->lcsp13 = reg_cfg; |
281 | 362 | ||
282 | /* The number of elements to transfer */ | 363 | /* The number of elements to transfer */ |
283 | lli->lcsp02 = ((data_size >> data_width) << | 364 | lli->lcsp02 = ((data_size >> data_width) << |
284 | D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; | 365 | D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; |
366 | |||
367 | BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE); | ||
368 | |||
285 | /* 16 LSBs address of the current element */ | 369 | /* 16 LSBs address of the current element */ |
286 | lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; | 370 | lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; |
287 | /* 16 MSBs address of the current element */ | 371 | /* 16 MSBs address of the current element */ |
@@ -290,165 +374,67 @@ void d40_log_fill_lli(struct d40_log_lli *lli, | |||
290 | if (addr_inc) | 374 | if (addr_inc) |
291 | lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; | 375 | lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; |
292 | 376 | ||
293 | lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; | ||
294 | /* If this scatter list entry is the last one, no next link */ | ||
295 | lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) & | ||
296 | D40_MEM_LCSP1_SLOS_MASK; | ||
297 | |||
298 | if (term_int) | ||
299 | lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; | ||
300 | else | ||
301 | lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK; | ||
302 | } | 377 | } |
303 | 378 | ||
304 | int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, | 379 | static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, |
305 | struct scatterlist *sg, | 380 | dma_addr_t addr, |
306 | int sg_len, | 381 | int size, |
307 | struct d40_log_lli_bidir *lli, | 382 | u32 lcsp13, /* src or dst*/ |
308 | struct d40_def_lcsp *lcsp, | 383 | u32 data_width1, |
309 | u32 src_data_width, | 384 | u32 data_width2, |
310 | u32 dst_data_width, | 385 | unsigned int flags) |
311 | enum dma_data_direction direction, | ||
312 | bool term_int, dma_addr_t dev_addr, int max_len, | ||
313 | int llis_per_log) | ||
314 | { | 386 | { |
315 | int total_size = 0; | 387 | bool addr_inc = flags & LLI_ADDR_INC; |
316 | struct scatterlist *current_sg = sg; | 388 | struct d40_log_lli *lli = lli_sg; |
317 | int i; | 389 | int size_rest = size; |
318 | u32 next_lli_off_dst = 0; | 390 | int size_seg = 0; |
319 | u32 next_lli_off_src = 0; | 391 | |
320 | 392 | do { | |
321 | for_each_sg(sg, current_sg, sg_len, i) { | 393 | size_seg = d40_seg_size(size_rest, data_width1, data_width2); |
322 | total_size += sg_dma_len(current_sg); | 394 | size_rest -= size_seg; |
323 | 395 | ||
324 | /* | 396 | d40_log_fill_lli(lli, |
325 | * If this scatter list entry is the last one or | 397 | addr, |
326 | * max length, terminate link. | 398 | size_seg, |
327 | */ | 399 | lcsp13, data_width1, |
328 | if (sg_len - 1 == i || ((i+1) % max_len == 0)) { | 400 | flags); |
329 | next_lli_off_src = 0; | 401 | if (addr_inc) |
330 | next_lli_off_dst = 0; | 402 | addr += size_seg; |
331 | } else { | 403 | lli++; |
332 | if (next_lli_off_dst == 0 && | 404 | } while (size_rest); |
333 | next_lli_off_src == 0) { | 405 | |
334 | /* The first lli will be at next_lli_off */ | 406 | return lli; |
335 | next_lli_off_dst = (lcla->dst_id * | ||
336 | llis_per_log + 1); | ||
337 | next_lli_off_src = (lcla->src_id * | ||
338 | llis_per_log + 1); | ||
339 | } else { | ||
340 | next_lli_off_dst++; | ||
341 | next_lli_off_src++; | ||
342 | } | ||
343 | } | ||
344 | |||
345 | if (direction == DMA_TO_DEVICE) { | ||
346 | d40_log_fill_lli(&lli->src[i], | ||
347 | sg_phys(current_sg), | ||
348 | sg_dma_len(current_sg), | ||
349 | next_lli_off_src, | ||
350 | lcsp->lcsp1, src_data_width, | ||
351 | false, | ||
352 | true); | ||
353 | d40_log_fill_lli(&lli->dst[i], | ||
354 | dev_addr, | ||
355 | sg_dma_len(current_sg), | ||
356 | next_lli_off_dst, | ||
357 | lcsp->lcsp3, dst_data_width, | ||
358 | /* No next == terminal interrupt */ | ||
359 | term_int && !next_lli_off_dst, | ||
360 | false); | ||
361 | } else { | ||
362 | d40_log_fill_lli(&lli->dst[i], | ||
363 | sg_phys(current_sg), | ||
364 | sg_dma_len(current_sg), | ||
365 | next_lli_off_dst, | ||
366 | lcsp->lcsp3, dst_data_width, | ||
367 | /* No next == terminal interrupt */ | ||
368 | term_int && !next_lli_off_dst, | ||
369 | true); | ||
370 | d40_log_fill_lli(&lli->src[i], | ||
371 | dev_addr, | ||
372 | sg_dma_len(current_sg), | ||
373 | next_lli_off_src, | ||
374 | lcsp->lcsp1, src_data_width, | ||
375 | false, | ||
376 | false); | ||
377 | } | ||
378 | } | ||
379 | return total_size; | ||
380 | } | 407 | } |
381 | 408 | ||
382 | int d40_log_sg_to_lli(int lcla_id, | 409 | int d40_log_sg_to_lli(struct scatterlist *sg, |
383 | struct scatterlist *sg, | ||
384 | int sg_len, | 410 | int sg_len, |
411 | dma_addr_t dev_addr, | ||
385 | struct d40_log_lli *lli_sg, | 412 | struct d40_log_lli *lli_sg, |
386 | u32 lcsp13, /* src or dst*/ | 413 | u32 lcsp13, /* src or dst*/ |
387 | u32 data_width, | 414 | u32 data_width1, u32 data_width2) |
388 | bool term_int, int max_len, int llis_per_log) | ||
389 | { | 415 | { |
390 | int total_size = 0; | 416 | int total_size = 0; |
391 | struct scatterlist *current_sg = sg; | 417 | struct scatterlist *current_sg = sg; |
392 | int i; | 418 | int i; |
393 | u32 next_lli_off = 0; | 419 | struct d40_log_lli *lli = lli_sg; |
394 | 420 | unsigned long flags = 0; | |
395 | for_each_sg(sg, current_sg, sg_len, i) { | ||
396 | total_size += sg_dma_len(current_sg); | ||
397 | |||
398 | /* | ||
399 | * If this scatter list entry is the last one or | ||
400 | * max length, terminate link. | ||
401 | */ | ||
402 | if (sg_len - 1 == i || ((i+1) % max_len == 0)) | ||
403 | next_lli_off = 0; | ||
404 | else { | ||
405 | if (next_lli_off == 0) | ||
406 | /* The first lli will be at next_lli_off */ | ||
407 | next_lli_off = lcla_id * llis_per_log + 1; | ||
408 | else | ||
409 | next_lli_off++; | ||
410 | } | ||
411 | |||
412 | d40_log_fill_lli(&lli_sg[i], | ||
413 | sg_phys(current_sg), | ||
414 | sg_dma_len(current_sg), | ||
415 | next_lli_off, | ||
416 | lcsp13, data_width, | ||
417 | term_int && !next_lli_off, | ||
418 | true); | ||
419 | } | ||
420 | return total_size; | ||
421 | } | ||
422 | |||
423 | int d40_log_lli_write(struct d40_log_lli_full *lcpa, | ||
424 | struct d40_log_lli *lcla_src, | ||
425 | struct d40_log_lli *lcla_dst, | ||
426 | struct d40_log_lli *lli_dst, | ||
427 | struct d40_log_lli *lli_src, | ||
428 | int llis_per_log) | ||
429 | { | ||
430 | u32 slos; | ||
431 | u32 dlos; | ||
432 | int i; | ||
433 | 421 | ||
434 | writel(lli_src->lcsp02, &lcpa->lcsp0); | 422 | if (!dev_addr) |
435 | writel(lli_src->lcsp13, &lcpa->lcsp1); | 423 | flags |= LLI_ADDR_INC; |
436 | writel(lli_dst->lcsp02, &lcpa->lcsp2); | ||
437 | writel(lli_dst->lcsp13, &lcpa->lcsp3); | ||
438 | 424 | ||
439 | slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK; | 425 | for_each_sg(sg, current_sg, sg_len, i) { |
440 | dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK; | 426 | dma_addr_t sg_addr = sg_dma_address(current_sg); |
427 | unsigned int len = sg_dma_len(current_sg); | ||
428 | dma_addr_t addr = dev_addr ?: sg_addr; | ||
441 | 429 | ||
442 | for (i = 0; (i < llis_per_log) && slos && dlos; i++) { | 430 | total_size += sg_dma_len(current_sg); |
443 | writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02); | ||
444 | writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13); | ||
445 | writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02); | ||
446 | writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13); | ||
447 | 431 | ||
448 | slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; | 432 | lli = d40_log_buf_to_lli(lli, addr, len, |
449 | dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; | 433 | lcsp13, |
434 | data_width1, | ||
435 | data_width2, | ||
436 | flags); | ||
450 | } | 437 | } |
451 | 438 | ||
452 | return i; | 439 | return total_size; |
453 | |||
454 | } | 440 | } |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 9c0fa2f5fe57..195ee65ee7f3 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -1,10 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * driver/dma/ste_dma40_ll.h | 2 | * Copyright (C) ST-Ericsson SA 2007-2010 |
3 | * | 3 | * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson SA |
4 | * Copyright (C) ST-Ericsson 2007-2010 | 4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson SA |
5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
6 | * Author: Per Friden <per.friden@stericsson.com> | ||
7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | ||
8 | */ | 6 | */ |
9 | #ifndef STE_DMA40_LL_H | 7 | #ifndef STE_DMA40_LL_H |
10 | #define STE_DMA40_LL_H | 8 | #define STE_DMA40_LL_H |
@@ -132,6 +130,13 @@ | |||
132 | #define D40_DREG_PRMSO 0x014 | 130 | #define D40_DREG_PRMSO 0x014 |
133 | #define D40_DREG_PRMOE 0x018 | 131 | #define D40_DREG_PRMOE 0x018 |
134 | #define D40_DREG_PRMOO 0x01C | 132 | #define D40_DREG_PRMOO 0x01C |
133 | #define D40_DREG_PRMO_PCHAN_BASIC 0x1 | ||
134 | #define D40_DREG_PRMO_PCHAN_MODULO 0x2 | ||
135 | #define D40_DREG_PRMO_PCHAN_DOUBLE_DST 0x3 | ||
136 | #define D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG 0x1 | ||
137 | #define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY 0x2 | ||
138 | #define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG 0x3 | ||
139 | |||
135 | #define D40_DREG_LCPA 0x020 | 140 | #define D40_DREG_LCPA 0x020 |
136 | #define D40_DREG_LCLA 0x024 | 141 | #define D40_DREG_LCLA 0x024 |
137 | #define D40_DREG_ACTIVE 0x050 | 142 | #define D40_DREG_ACTIVE 0x050 |
@@ -158,11 +163,30 @@ | |||
158 | #define D40_DREG_LCEIS1 0x0B4 | 163 | #define D40_DREG_LCEIS1 0x0B4 |
159 | #define D40_DREG_LCEIS2 0x0B8 | 164 | #define D40_DREG_LCEIS2 0x0B8 |
160 | #define D40_DREG_LCEIS3 0x0BC | 165 | #define D40_DREG_LCEIS3 0x0BC |
166 | #define D40_DREG_PSEG1 0x110 | ||
167 | #define D40_DREG_PSEG2 0x114 | ||
168 | #define D40_DREG_PSEG3 0x118 | ||
169 | #define D40_DREG_PSEG4 0x11C | ||
170 | #define D40_DREG_PCEG1 0x120 | ||
171 | #define D40_DREG_PCEG2 0x124 | ||
172 | #define D40_DREG_PCEG3 0x128 | ||
173 | #define D40_DREG_PCEG4 0x12C | ||
174 | #define D40_DREG_RSEG1 0x130 | ||
175 | #define D40_DREG_RSEG2 0x134 | ||
176 | #define D40_DREG_RSEG3 0x138 | ||
177 | #define D40_DREG_RSEG4 0x13C | ||
178 | #define D40_DREG_RCEG1 0x140 | ||
179 | #define D40_DREG_RCEG2 0x144 | ||
180 | #define D40_DREG_RCEG3 0x148 | ||
181 | #define D40_DREG_RCEG4 0x14C | ||
161 | #define D40_DREG_STFU 0xFC8 | 182 | #define D40_DREG_STFU 0xFC8 |
162 | #define D40_DREG_ICFG 0xFCC | 183 | #define D40_DREG_ICFG 0xFCC |
163 | #define D40_DREG_PERIPHID0 0xFE0 | 184 | #define D40_DREG_PERIPHID0 0xFE0 |
164 | #define D40_DREG_PERIPHID1 0xFE4 | 185 | #define D40_DREG_PERIPHID1 0xFE4 |
165 | #define D40_DREG_PERIPHID2 0xFE8 | 186 | #define D40_DREG_PERIPHID2 0xFE8 |
187 | #define D40_DREG_PERIPHID2_REV_POS 4 | ||
188 | #define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS) | ||
189 | #define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf | ||
166 | #define D40_DREG_PERIPHID3 0xFEC | 190 | #define D40_DREG_PERIPHID3 0xFEC |
167 | #define D40_DREG_CELLID0 0xFF0 | 191 | #define D40_DREG_CELLID0 0xFF0 |
168 | #define D40_DREG_CELLID1 0xFF4 | 192 | #define D40_DREG_CELLID1 0xFF4 |
@@ -199,8 +223,6 @@ struct d40_phy_lli { | |||
199 | * | 223 | * |
200 | * @src: Register settings for src channel. | 224 | * @src: Register settings for src channel. |
201 | * @dst: Register settings for dst channel. | 225 | * @dst: Register settings for dst channel. |
202 | * @dst_addr: Physical destination address. | ||
203 | * @src_addr: Physical source address. | ||
204 | * | 226 | * |
205 | * All DMA transfers have a source and a destination. | 227 | * All DMA transfers have a source and a destination. |
206 | */ | 228 | */ |
@@ -208,8 +230,6 @@ struct d40_phy_lli { | |||
208 | struct d40_phy_lli_bidir { | 230 | struct d40_phy_lli_bidir { |
209 | struct d40_phy_lli *src; | 231 | struct d40_phy_lli *src; |
210 | struct d40_phy_lli *dst; | 232 | struct d40_phy_lli *dst; |
211 | dma_addr_t dst_addr; | ||
212 | dma_addr_t src_addr; | ||
213 | }; | 233 | }; |
214 | 234 | ||
215 | 235 | ||
@@ -271,29 +291,23 @@ struct d40_def_lcsp { | |||
271 | u32 lcsp1; | 291 | u32 lcsp1; |
272 | }; | 292 | }; |
273 | 293 | ||
274 | /** | ||
275 | * struct d40_lcla_elem - Info for one LCA element. | ||
276 | * | ||
277 | * @src_id: logical channel src id | ||
278 | * @dst_id: logical channel dst id | ||
279 | * @src: LCPA formated src parameters | ||
280 | * @dst: LCPA formated dst parameters | ||
281 | * | ||
282 | */ | ||
283 | struct d40_lcla_elem { | ||
284 | int src_id; | ||
285 | int dst_id; | ||
286 | struct d40_log_lli *src; | ||
287 | struct d40_log_lli *dst; | ||
288 | }; | ||
289 | |||
290 | /* Physical channels */ | 294 | /* Physical channels */ |
291 | 295 | ||
296 | enum d40_lli_flags { | ||
297 | LLI_ADDR_INC = 1 << 0, | ||
298 | LLI_TERM_INT = 1 << 1, | ||
299 | LLI_CYCLIC = 1 << 2, | ||
300 | LLI_LAST_LINK = 1 << 3, | ||
301 | }; | ||
302 | |||
292 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | 303 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, |
293 | u32 *src_cfg, u32 *dst_cfg, bool is_log); | 304 | u32 *src_cfg, |
305 | u32 *dst_cfg, | ||
306 | bool is_log); | ||
294 | 307 | ||
295 | void d40_log_cfg(struct stedma40_chan_cfg *cfg, | 308 | void d40_log_cfg(struct stedma40_chan_cfg *cfg, |
296 | u32 *lcsp1, u32 *lcsp2); | 309 | u32 *lcsp1, |
310 | u32 *lcsp2); | ||
297 | 311 | ||
298 | int d40_phy_sg_to_lli(struct scatterlist *sg, | 312 | int d40_phy_sg_to_lli(struct scatterlist *sg, |
299 | int sg_len, | 313 | int sg_len, |
@@ -301,57 +315,27 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
301 | struct d40_phy_lli *lli, | 315 | struct d40_phy_lli *lli, |
302 | dma_addr_t lli_phys, | 316 | dma_addr_t lli_phys, |
303 | u32 reg_cfg, | 317 | u32 reg_cfg, |
304 | u32 data_width, | 318 | struct stedma40_half_channel_info *info, |
305 | int psize, | 319 | struct stedma40_half_channel_info *otherinfo, |
306 | bool term_int); | 320 | unsigned long flags); |
307 | |||
308 | int d40_phy_fill_lli(struct d40_phy_lli *lli, | ||
309 | dma_addr_t data, | ||
310 | u32 data_size, | ||
311 | int psize, | ||
312 | dma_addr_t next_lli, | ||
313 | u32 reg_cfg, | ||
314 | bool term_int, | ||
315 | u32 data_width, | ||
316 | bool is_device); | ||
317 | |||
318 | void d40_phy_lli_write(void __iomem *virtbase, | ||
319 | u32 phy_chan_num, | ||
320 | struct d40_phy_lli *lli_dst, | ||
321 | struct d40_phy_lli *lli_src); | ||
322 | 321 | ||
323 | /* Logical channels */ | 322 | /* Logical channels */ |
324 | 323 | ||
325 | void d40_log_fill_lli(struct d40_log_lli *lli, | 324 | int d40_log_sg_to_lli(struct scatterlist *sg, |
326 | dma_addr_t data, u32 data_size, | ||
327 | u32 lli_next_off, u32 reg_cfg, | ||
328 | u32 data_width, | ||
329 | bool term_int, bool addr_inc); | ||
330 | |||
331 | int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, | ||
332 | struct scatterlist *sg, | ||
333 | int sg_len, | ||
334 | struct d40_log_lli_bidir *lli, | ||
335 | struct d40_def_lcsp *lcsp, | ||
336 | u32 src_data_width, | ||
337 | u32 dst_data_width, | ||
338 | enum dma_data_direction direction, | ||
339 | bool term_int, dma_addr_t dev_addr, int max_len, | ||
340 | int llis_per_log); | ||
341 | |||
342 | int d40_log_lli_write(struct d40_log_lli_full *lcpa, | ||
343 | struct d40_log_lli *lcla_src, | ||
344 | struct d40_log_lli *lcla_dst, | ||
345 | struct d40_log_lli *lli_dst, | ||
346 | struct d40_log_lli *lli_src, | ||
347 | int llis_per_log); | ||
348 | |||
349 | int d40_log_sg_to_lli(int lcla_id, | ||
350 | struct scatterlist *sg, | ||
351 | int sg_len, | 325 | int sg_len, |
326 | dma_addr_t dev_addr, | ||
352 | struct d40_log_lli *lli_sg, | 327 | struct d40_log_lli *lli_sg, |
353 | u32 lcsp13, /* src or dst*/ | 328 | u32 lcsp13, /* src or dst*/ |
354 | u32 data_width, | 329 | u32 data_width1, u32 data_width2); |
355 | bool term_int, int max_len, int llis_per_log); | 330 | |
331 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | ||
332 | struct d40_log_lli *lli_dst, | ||
333 | struct d40_log_lli *lli_src, | ||
334 | int next, unsigned int flags); | ||
335 | |||
336 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | ||
337 | struct d40_log_lli *lli_dst, | ||
338 | struct d40_log_lli *lli_src, | ||
339 | int next, unsigned int flags); | ||
356 | 340 | ||
357 | #endif /* STE_DMA40_LLI_H */ | 341 | #endif /* STE_DMA40_LLI_H */ |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 2ec1ed56f204..f69f90a61873 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -629,7 +629,7 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
629 | desc_node) | 629 | desc_node) |
630 | list_move(&td_desc->desc_node, &td_chan->free_list); | 630 | list_move(&td_desc->desc_node, &td_chan->free_list); |
631 | 631 | ||
632 | /* now tear down the runnning */ | 632 | /* now tear down the running */ |
633 | __td_finish(td_chan); | 633 | __td_finish(td_chan); |
634 | spin_unlock_bh(&td_chan->lock); | 634 | spin_unlock_bh(&td_chan->lock); |
635 | 635 | ||
@@ -759,7 +759,7 @@ static int __devinit td_probe(struct platform_device *pdev) | |||
759 | pdata->channels + i; | 759 | pdata->channels + i; |
760 | 760 | ||
761 | /* even channels are RX, odd are TX */ | 761 | /* even channels are RX, odd are TX */ |
762 | if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { | 762 | if ((i % 2) == pchan->rx) { |
763 | dev_err(&pdev->dev, "Wrong channel configuration\n"); | 763 | dev_err(&pdev->dev, "Wrong channel configuration\n"); |
764 | err = -EINVAL; | 764 | err = -EINVAL; |
765 | goto err_tasklet_kill; | 765 | goto err_tasklet_kill; |