diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-17 13:54:41 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-17 13:54:41 -0500 |
commit | e1288cd72f54e7fc16ae9ebb4d0647537ef848d4 (patch) | |
tree | b4fd87b9307d8041fb680cb9b8fbf787ec968df7 /drivers/dma | |
parent | e78bf5e6cbe837daa6ab628a5f679548742994d3 (diff) | |
parent | 94ae85220a07d357d4937086c490854f63344de4 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (63 commits)
ARM: PL08x: cleanup comments
Update CONFIG_MD_RAID6_PQ to CONFIG_RAID6_PQ in drivers/dma/iop-adma.c
ARM: PL08x: fix a warning
Fix dmaengine_submit() return type
dmaengine: at_hdmac: fix race while monitoring channel status
dmaengine: at_hdmac: flags located in first descriptor
dmaengine: at_hdmac: use subsys_initcall instead of module_init
dmaengine: at_hdmac: no need set ACK in new descriptor
dmaengine: at_hdmac: trivial add precision to unmapping comment
dmaengine: at_hdmac: use dma_address to program DMA hardware
pch_dma: support new device ML7213 IOH
ARM: PL08x: prevent dma_set_runtime_config() reconfiguring memcpy channels
ARM: PL08x: allow dma_set_runtime_config() to return errors
ARM: PL08x: fix locking between prepare function and submit function
ARM: PL08x: introduce 'phychan_hold' to hold on to physical channels
ARM: PL08x: put txd's on the pending list in pl08x_tx_submit()
ARM: PL08x: rename 'desc_list' as 'pend_list'
ARM: PL08x: implement unmapping of memcpy buffers
ARM: PL08x: store prep_* flags in async_tx structure
ARM: PL08x: shrink srcbus/dstbus in txd structure
...
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 9 | ||||
-rw-r--r-- | drivers/dma/amba-pl08x.c | 1168 | ||||
-rw-r--r-- | drivers/dma/at_hdmac.c | 19 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 4 | ||||
-rw-r--r-- | drivers/dma/intel_mid_dma.c | 33 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 4 | ||||
-rw-r--r-- | drivers/dma/pch_dma.c | 19 | ||||
-rw-r--r-- | drivers/dma/ste_dma40.c | 191 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.c | 246 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 36 |
10 files changed, 935 insertions, 794 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index ef138731c0ea..1c28816152fa 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -200,11 +200,16 @@ config PL330_DMA | |||
200 | platform_data for a dma-pl330 device. | 200 | platform_data for a dma-pl330 device. |
201 | 201 | ||
202 | config PCH_DMA | 202 | config PCH_DMA |
203 | tristate "Topcliff (Intel EG20T) PCH DMA support" | 203 | tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support" |
204 | depends on PCI && X86 | 204 | depends on PCI && X86 |
205 | select DMA_ENGINE | 205 | select DMA_ENGINE |
206 | help | 206 | help |
207 | Enable support for the Topcliff (Intel EG20T) PCH DMA engine. | 207 | Enable support for Intel EG20T PCH DMA engine. |
208 | |||
209 | This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/ | ||
210 | Output Hub) which is for IVI(In-Vehicle Infotainment) use. | ||
211 | ML7213 is companion chip for Intel Atom E6xx series. | ||
212 | ML7213 is completely compatible for Intel EG20T PCH. | ||
208 | 213 | ||
209 | config IMX_SDMA | 214 | config IMX_SDMA |
210 | tristate "i.MX SDMA support" | 215 | tristate "i.MX SDMA support" |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index b605cc9ac3a2..297f48b0cba9 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -19,14 +19,14 @@ | |||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 |
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
21 | * | 21 | * |
22 | * The full GNU General Public License is iin this distribution in the | 22 | * The full GNU General Public License is in this distribution in the file |
23 | * file called COPYING. | 23 | * called COPYING. |
24 | * | 24 | * |
25 | * Documentation: ARM DDI 0196G == PL080 | 25 | * Documentation: ARM DDI 0196G == PL080 |
26 | * Documentation: ARM DDI 0218E == PL081 | 26 | * Documentation: ARM DDI 0218E == PL081 |
27 | * | 27 | * |
28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to | 28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any |
29 | * any channel. | 29 | * channel. |
30 | * | 30 | * |
31 | * The PL080 has 8 channels available for simultaneous use, and the PL081 | 31 | * The PL080 has 8 channels available for simultaneous use, and the PL081 |
32 | * has only two channels. So on these DMA controllers the number of channels | 32 | * has only two channels. So on these DMA controllers the number of channels |
@@ -53,7 +53,23 @@ | |||
53 | * | 53 | * |
54 | * ASSUMES default (little) endianness for DMA transfers | 54 | * ASSUMES default (little) endianness for DMA transfers |
55 | * | 55 | * |
56 | * Only DMAC flow control is implemented | 56 | * The PL08x has two flow control settings: |
57 | * - DMAC flow control: the transfer size defines the number of transfers | ||
58 | * which occur for the current LLI entry, and the DMAC raises TC at the | ||
59 | * end of every LLI entry. Observed behaviour shows the DMAC listening | ||
60 | * to both the BREQ and SREQ signals (contrary to documented), | ||
61 | * transferring data if either is active. The LBREQ and LSREQ signals | ||
62 | * are ignored. | ||
63 | * | ||
64 | * - Peripheral flow control: the transfer size is ignored (and should be | ||
65 | * zero). The data is transferred from the current LLI entry, until | ||
66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC | ||
67 | * will then move to the next LLI entry. | ||
68 | * | ||
69 | * Only the former works sanely with scatter lists, so we only implement | ||
70 | * the DMAC flow control method. However, peripherals which use the LBREQ | ||
71 | * and LSREQ signals (eg, MMCI) are unable to use this mode, which through | ||
72 | * these hardware restrictions prevents them from using scatter DMA. | ||
57 | * | 73 | * |
58 | * Global TODO: | 74 | * Global TODO: |
59 | * - Break out common code from arch/arm/mach-s3c64xx and share | 75 | * - Break out common code from arch/arm/mach-s3c64xx and share |
@@ -61,50 +77,39 @@ | |||
61 | #include <linux/device.h> | 77 | #include <linux/device.h> |
62 | #include <linux/init.h> | 78 | #include <linux/init.h> |
63 | #include <linux/module.h> | 79 | #include <linux/module.h> |
64 | #include <linux/pci.h> | ||
65 | #include <linux/interrupt.h> | 80 | #include <linux/interrupt.h> |
66 | #include <linux/slab.h> | 81 | #include <linux/slab.h> |
67 | #include <linux/dmapool.h> | 82 | #include <linux/dmapool.h> |
68 | #include <linux/amba/bus.h> | ||
69 | #include <linux/dmaengine.h> | 83 | #include <linux/dmaengine.h> |
84 | #include <linux/amba/bus.h> | ||
70 | #include <linux/amba/pl08x.h> | 85 | #include <linux/amba/pl08x.h> |
71 | #include <linux/debugfs.h> | 86 | #include <linux/debugfs.h> |
72 | #include <linux/seq_file.h> | 87 | #include <linux/seq_file.h> |
73 | 88 | ||
74 | #include <asm/hardware/pl080.h> | 89 | #include <asm/hardware/pl080.h> |
75 | #include <asm/dma.h> | ||
76 | #include <asm/mach/dma.h> | ||
77 | #include <asm/atomic.h> | ||
78 | #include <asm/processor.h> | ||
79 | #include <asm/cacheflush.h> | ||
80 | 90 | ||
81 | #define DRIVER_NAME "pl08xdmac" | 91 | #define DRIVER_NAME "pl08xdmac" |
82 | 92 | ||
83 | /** | 93 | /** |
84 | * struct vendor_data - vendor-specific config parameters | 94 | * struct vendor_data - vendor-specific config parameters for PL08x derivatives |
85 | * for PL08x derivates | ||
86 | * @name: the name of this specific variant | ||
87 | * @channels: the number of channels available in this variant | 95 | * @channels: the number of channels available in this variant |
88 | * @dualmaster: whether this version supports dual AHB masters | 96 | * @dualmaster: whether this version supports dual AHB masters or not. |
89 | * or not. | ||
90 | */ | 97 | */ |
91 | struct vendor_data { | 98 | struct vendor_data { |
92 | char *name; | ||
93 | u8 channels; | 99 | u8 channels; |
94 | bool dualmaster; | 100 | bool dualmaster; |
95 | }; | 101 | }; |
96 | 102 | ||
97 | /* | 103 | /* |
98 | * PL08X private data structures | 104 | * PL08X private data structures |
99 | * An LLI struct - see pl08x TRM | 105 | * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, |
100 | * Note that next uses bit[0] as a bus bit, | 106 | * start & end do not - their bus bit info is in cctl. Also note that these |
101 | * start & end do not - their bus bit info | 107 | * are fixed 32-bit quantities. |
102 | * is in cctl | ||
103 | */ | 108 | */ |
104 | struct lli { | 109 | struct pl08x_lli { |
105 | dma_addr_t src; | 110 | u32 src; |
106 | dma_addr_t dst; | 111 | u32 dst; |
107 | dma_addr_t next; | 112 | u32 lli; |
108 | u32 cctl; | 113 | u32 cctl; |
109 | }; | 114 | }; |
110 | 115 | ||
@@ -119,6 +124,8 @@ struct lli { | |||
119 | * @phy_chans: array of data for the physical channels | 124 | * @phy_chans: array of data for the physical channels |
120 | * @pool: a pool for the LLI descriptors | 125 | * @pool: a pool for the LLI descriptors |
121 | * @pool_ctr: counter of LLIs in the pool | 126 | * @pool_ctr: counter of LLIs in the pool |
127 | * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches | ||
128 | * @mem_buses: set to indicate memory transfers on AHB2. | ||
122 | * @lock: a spinlock for this struct | 129 | * @lock: a spinlock for this struct |
123 | */ | 130 | */ |
124 | struct pl08x_driver_data { | 131 | struct pl08x_driver_data { |
@@ -126,11 +133,13 @@ struct pl08x_driver_data { | |||
126 | struct dma_device memcpy; | 133 | struct dma_device memcpy; |
127 | void __iomem *base; | 134 | void __iomem *base; |
128 | struct amba_device *adev; | 135 | struct amba_device *adev; |
129 | struct vendor_data *vd; | 136 | const struct vendor_data *vd; |
130 | struct pl08x_platform_data *pd; | 137 | struct pl08x_platform_data *pd; |
131 | struct pl08x_phy_chan *phy_chans; | 138 | struct pl08x_phy_chan *phy_chans; |
132 | struct dma_pool *pool; | 139 | struct dma_pool *pool; |
133 | int pool_ctr; | 140 | int pool_ctr; |
141 | u8 lli_buses; | ||
142 | u8 mem_buses; | ||
134 | spinlock_t lock; | 143 | spinlock_t lock; |
135 | }; | 144 | }; |
136 | 145 | ||
@@ -152,9 +161,9 @@ struct pl08x_driver_data { | |||
152 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | 161 | /* Size (bytes) of each LLI buffer allocated for one transfer */ |
153 | # define PL08X_LLI_TSFR_SIZE 0x2000 | 162 | # define PL08X_LLI_TSFR_SIZE 0x2000 |
154 | 163 | ||
155 | /* Maximimum times we call dma_pool_alloc on this pool without freeing */ | 164 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ |
156 | #define PL08X_MAX_ALLOCS 0x40 | 165 | #define PL08X_MAX_ALLOCS 0x40 |
157 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) | 166 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) |
158 | #define PL08X_ALIGN 8 | 167 | #define PL08X_ALIGN 8 |
159 | 168 | ||
160 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | 169 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) |
@@ -162,6 +171,11 @@ static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | |||
162 | return container_of(chan, struct pl08x_dma_chan, chan); | 171 | return container_of(chan, struct pl08x_dma_chan, chan); |
163 | } | 172 | } |
164 | 173 | ||
174 | static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) | ||
175 | { | ||
176 | return container_of(tx, struct pl08x_txd, tx); | ||
177 | } | ||
178 | |||
165 | /* | 179 | /* |
166 | * Physical channel handling | 180 | * Physical channel handling |
167 | */ | 181 | */ |
@@ -177,88 +191,47 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | |||
177 | 191 | ||
178 | /* | 192 | /* |
179 | * Set the initial DMA register values i.e. those for the first LLI | 193 | * Set the initial DMA register values i.e. those for the first LLI |
180 | * The next lli pointer and the configuration interrupt bit have | 194 | * The next LLI pointer and the configuration interrupt bit have |
181 | * been set when the LLIs were constructed | 195 | * been set when the LLIs were constructed. Poke them into the hardware |
196 | * and start the transfer. | ||
182 | */ | 197 | */ |
183 | static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, | 198 | static void pl08x_start_txd(struct pl08x_dma_chan *plchan, |
184 | struct pl08x_phy_chan *ch) | 199 | struct pl08x_txd *txd) |
185 | { | ||
186 | /* Wait for channel inactive */ | ||
187 | while (pl08x_phy_channel_busy(ch)) | ||
188 | ; | ||
189 | |||
190 | dev_vdbg(&pl08x->adev->dev, | ||
191 | "WRITE channel %d: csrc=%08x, cdst=%08x, " | ||
192 | "cctl=%08x, clli=%08x, ccfg=%08x\n", | ||
193 | ch->id, | ||
194 | ch->csrc, | ||
195 | ch->cdst, | ||
196 | ch->cctl, | ||
197 | ch->clli, | ||
198 | ch->ccfg); | ||
199 | |||
200 | writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); | ||
201 | writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); | ||
202 | writel(ch->clli, ch->base + PL080_CH_LLI); | ||
203 | writel(ch->cctl, ch->base + PL080_CH_CONTROL); | ||
204 | writel(ch->ccfg, ch->base + PL080_CH_CONFIG); | ||
205 | } | ||
206 | |||
207 | static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) | ||
208 | { | 200 | { |
209 | struct pl08x_channel_data *cd = plchan->cd; | 201 | struct pl08x_driver_data *pl08x = plchan->host; |
210 | struct pl08x_phy_chan *phychan = plchan->phychan; | 202 | struct pl08x_phy_chan *phychan = plchan->phychan; |
211 | struct pl08x_txd *txd = plchan->at; | 203 | struct pl08x_lli *lli = &txd->llis_va[0]; |
212 | |||
213 | /* Copy the basic control register calculated at transfer config */ | ||
214 | phychan->csrc = txd->csrc; | ||
215 | phychan->cdst = txd->cdst; | ||
216 | phychan->clli = txd->clli; | ||
217 | phychan->cctl = txd->cctl; | ||
218 | |||
219 | /* Assign the signal to the proper control registers */ | ||
220 | phychan->ccfg = cd->ccfg; | ||
221 | phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; | ||
222 | phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; | ||
223 | /* If it wasn't set from AMBA, ignore it */ | ||
224 | if (txd->direction == DMA_TO_DEVICE) | ||
225 | /* Select signal as destination */ | ||
226 | phychan->ccfg |= | ||
227 | (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); | ||
228 | else if (txd->direction == DMA_FROM_DEVICE) | ||
229 | /* Select signal as source */ | ||
230 | phychan->ccfg |= | ||
231 | (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); | ||
232 | /* Always enable error interrupts */ | ||
233 | phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; | ||
234 | /* Always enable terminal interrupts */ | ||
235 | phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * Enable the DMA channel | ||
240 | * Assumes all other configuration bits have been set | ||
241 | * as desired before this code is called | ||
242 | */ | ||
243 | static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, | ||
244 | struct pl08x_phy_chan *ch) | ||
245 | { | ||
246 | u32 val; | 204 | u32 val; |
247 | 205 | ||
248 | /* | 206 | plchan->at = txd; |
249 | * Do not access config register until channel shows as disabled | ||
250 | */ | ||
251 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) | ||
252 | ; | ||
253 | 207 | ||
254 | /* | 208 | /* Wait for channel inactive */ |
255 | * Do not access config register until channel shows as inactive | 209 | while (pl08x_phy_channel_busy(phychan)) |
256 | */ | 210 | cpu_relax(); |
257 | val = readl(ch->base + PL080_CH_CONFIG); | 211 | |
212 | dev_vdbg(&pl08x->adev->dev, | ||
213 | "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " | ||
214 | "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", | ||
215 | phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, | ||
216 | txd->ccfg); | ||
217 | |||
218 | writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); | ||
219 | writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); | ||
220 | writel(lli->lli, phychan->base + PL080_CH_LLI); | ||
221 | writel(lli->cctl, phychan->base + PL080_CH_CONTROL); | ||
222 | writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); | ||
223 | |||
224 | /* Enable the DMA channel */ | ||
225 | /* Do not access config register until channel shows as disabled */ | ||
226 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) | ||
227 | cpu_relax(); | ||
228 | |||
229 | /* Do not access config register until channel shows as inactive */ | ||
230 | val = readl(phychan->base + PL080_CH_CONFIG); | ||
258 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) | 231 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) |
259 | val = readl(ch->base + PL080_CH_CONFIG); | 232 | val = readl(phychan->base + PL080_CH_CONFIG); |
260 | 233 | ||
261 | writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); | 234 | writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); |
262 | } | 235 | } |
263 | 236 | ||
264 | /* | 237 | /* |
@@ -266,10 +239,8 @@ static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, | |||
266 | * | 239 | * |
267 | * Disabling individual channels could lose data. | 240 | * Disabling individual channels could lose data. |
268 | * | 241 | * |
269 | * Disable the peripheral DMA after disabling the DMAC | 242 | * Disable the peripheral DMA after disabling the DMAC in order to allow |
270 | * in order to allow the DMAC FIFO to drain, and | 243 | * the DMAC FIFO to drain, and hence allow the channel to show inactive |
271 | * hence allow the channel to show inactive | ||
272 | * | ||
273 | */ | 244 | */ |
274 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | 245 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) |
275 | { | 246 | { |
@@ -282,7 +253,7 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | |||
282 | 253 | ||
283 | /* Wait for channel inactive */ | 254 | /* Wait for channel inactive */ |
284 | while (pl08x_phy_channel_busy(ch)) | 255 | while (pl08x_phy_channel_busy(ch)) |
285 | ; | 256 | cpu_relax(); |
286 | } | 257 | } |
287 | 258 | ||
288 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | 259 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) |
@@ -333,54 +304,56 @@ static inline u32 get_bytes_in_cctl(u32 cctl) | |||
333 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | 304 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) |
334 | { | 305 | { |
335 | struct pl08x_phy_chan *ch; | 306 | struct pl08x_phy_chan *ch; |
336 | struct pl08x_txd *txdi = NULL; | ||
337 | struct pl08x_txd *txd; | 307 | struct pl08x_txd *txd; |
338 | unsigned long flags; | 308 | unsigned long flags; |
339 | u32 bytes = 0; | 309 | size_t bytes = 0; |
340 | 310 | ||
341 | spin_lock_irqsave(&plchan->lock, flags); | 311 | spin_lock_irqsave(&plchan->lock, flags); |
342 | |||
343 | ch = plchan->phychan; | 312 | ch = plchan->phychan; |
344 | txd = plchan->at; | 313 | txd = plchan->at; |
345 | 314 | ||
346 | /* | 315 | /* |
347 | * Next follow the LLIs to get the number of pending bytes in the | 316 | * Follow the LLIs to get the number of remaining |
348 | * currently active transaction. | 317 | * bytes in the currently active transaction. |
349 | */ | 318 | */ |
350 | if (ch && txd) { | 319 | if (ch && txd) { |
351 | struct lli *llis_va = txd->llis_va; | 320 | u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; |
352 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | ||
353 | u32 clli = readl(ch->base + PL080_CH_LLI); | ||
354 | 321 | ||
355 | /* First get the bytes in the current active LLI */ | 322 | /* First get the remaining bytes in the active transfer */ |
356 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); | 323 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); |
357 | 324 | ||
358 | if (clli) { | 325 | if (clli) { |
359 | int i = 0; | 326 | struct pl08x_lli *llis_va = txd->llis_va; |
327 | dma_addr_t llis_bus = txd->llis_bus; | ||
328 | int index; | ||
329 | |||
330 | BUG_ON(clli < llis_bus || clli >= llis_bus + | ||
331 | sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); | ||
332 | |||
333 | /* | ||
334 | * Locate the next LLI - as this is an array, | ||
335 | * it's simple maths to find. | ||
336 | */ | ||
337 | index = (clli - llis_bus) / sizeof(struct pl08x_lli); | ||
360 | 338 | ||
361 | /* Forward to the LLI pointed to by clli */ | 339 | for (; index < MAX_NUM_TSFR_LLIS; index++) { |
362 | while ((clli != (u32) &(llis_bus[i])) && | 340 | bytes += get_bytes_in_cctl(llis_va[index].cctl); |
363 | (i < MAX_NUM_TSFR_LLIS)) | ||
364 | i++; | ||
365 | 341 | ||
366 | while (clli) { | ||
367 | bytes += get_bytes_in_cctl(llis_va[i].cctl); | ||
368 | /* | 342 | /* |
369 | * A clli of 0x00000000 will terminate the | 343 | * A LLI pointer of 0 terminates the LLI list |
370 | * LLI list | ||
371 | */ | 344 | */ |
372 | clli = llis_va[i].next; | 345 | if (!llis_va[index].lli) |
373 | i++; | 346 | break; |
374 | } | 347 | } |
375 | } | 348 | } |
376 | } | 349 | } |
377 | 350 | ||
378 | /* Sum up all queued transactions */ | 351 | /* Sum up all queued transactions */ |
379 | if (!list_empty(&plchan->desc_list)) { | 352 | if (!list_empty(&plchan->pend_list)) { |
380 | list_for_each_entry(txdi, &plchan->desc_list, node) { | 353 | struct pl08x_txd *txdi; |
354 | list_for_each_entry(txdi, &plchan->pend_list, node) { | ||
381 | bytes += txdi->len; | 355 | bytes += txdi->len; |
382 | } | 356 | } |
383 | |||
384 | } | 357 | } |
385 | 358 | ||
386 | spin_unlock_irqrestore(&plchan->lock, flags); | 359 | spin_unlock_irqrestore(&plchan->lock, flags); |
@@ -390,6 +363,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |||
390 | 363 | ||
391 | /* | 364 | /* |
392 | * Allocate a physical channel for a virtual channel | 365 | * Allocate a physical channel for a virtual channel |
366 | * | ||
367 | * Try to locate a physical channel to be used for this transfer. If all | ||
368 | * are taken return NULL and the requester will have to cope by using | ||
369 | * some fallback PIO mode or retrying later. | ||
393 | */ | 370 | */ |
394 | static struct pl08x_phy_chan * | 371 | static struct pl08x_phy_chan * |
395 | pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | 372 | pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, |
@@ -399,12 +376,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |||
399 | unsigned long flags; | 376 | unsigned long flags; |
400 | int i; | 377 | int i; |
401 | 378 | ||
402 | /* | ||
403 | * Try to locate a physical channel to be used for | ||
404 | * this transfer. If all are taken return NULL and | ||
405 | * the requester will have to cope by using some fallback | ||
406 | * PIO mode or retrying later. | ||
407 | */ | ||
408 | for (i = 0; i < pl08x->vd->channels; i++) { | 379 | for (i = 0; i < pl08x->vd->channels; i++) { |
409 | ch = &pl08x->phy_chans[i]; | 380 | ch = &pl08x->phy_chans[i]; |
410 | 381 | ||
@@ -465,11 +436,11 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) | |||
465 | } | 436 | } |
466 | 437 | ||
467 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | 438 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, |
468 | u32 tsize) | 439 | size_t tsize) |
469 | { | 440 | { |
470 | u32 retbits = cctl; | 441 | u32 retbits = cctl; |
471 | 442 | ||
472 | /* Remove all src, dst and transfersize bits */ | 443 | /* Remove all src, dst and transfer size bits */ |
473 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; | 444 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; |
474 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; | 445 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; |
475 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; | 446 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; |
@@ -509,95 +480,87 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |||
509 | return retbits; | 480 | return retbits; |
510 | } | 481 | } |
511 | 482 | ||
483 | struct pl08x_lli_build_data { | ||
484 | struct pl08x_txd *txd; | ||
485 | struct pl08x_driver_data *pl08x; | ||
486 | struct pl08x_bus_data srcbus; | ||
487 | struct pl08x_bus_data dstbus; | ||
488 | size_t remainder; | ||
489 | }; | ||
490 | |||
512 | /* | 491 | /* |
513 | * Autoselect a master bus to use for the transfer | 492 | * Autoselect a master bus to use for the transfer this prefers the |
514 | * this prefers the destination bus if both available | 493 | * destination bus if both available if fixed address on one bus the |
515 | * if fixed address on one bus the other will be chosen | 494 | * other will be chosen |
516 | */ | 495 | */ |
517 | void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, | 496 | static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, |
518 | struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, | 497 | struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) |
519 | struct pl08x_bus_data **sbus, u32 cctl) | ||
520 | { | 498 | { |
521 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | 499 | if (!(cctl & PL080_CONTROL_DST_INCR)) { |
522 | *mbus = src_bus; | 500 | *mbus = &bd->srcbus; |
523 | *sbus = dst_bus; | 501 | *sbus = &bd->dstbus; |
524 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | 502 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { |
525 | *mbus = dst_bus; | 503 | *mbus = &bd->dstbus; |
526 | *sbus = src_bus; | 504 | *sbus = &bd->srcbus; |
527 | } else { | 505 | } else { |
528 | if (dst_bus->buswidth == 4) { | 506 | if (bd->dstbus.buswidth == 4) { |
529 | *mbus = dst_bus; | 507 | *mbus = &bd->dstbus; |
530 | *sbus = src_bus; | 508 | *sbus = &bd->srcbus; |
531 | } else if (src_bus->buswidth == 4) { | 509 | } else if (bd->srcbus.buswidth == 4) { |
532 | *mbus = src_bus; | 510 | *mbus = &bd->srcbus; |
533 | *sbus = dst_bus; | 511 | *sbus = &bd->dstbus; |
534 | } else if (dst_bus->buswidth == 2) { | 512 | } else if (bd->dstbus.buswidth == 2) { |
535 | *mbus = dst_bus; | 513 | *mbus = &bd->dstbus; |
536 | *sbus = src_bus; | 514 | *sbus = &bd->srcbus; |
537 | } else if (src_bus->buswidth == 2) { | 515 | } else if (bd->srcbus.buswidth == 2) { |
538 | *mbus = src_bus; | 516 | *mbus = &bd->srcbus; |
539 | *sbus = dst_bus; | 517 | *sbus = &bd->dstbus; |
540 | } else { | 518 | } else { |
541 | /* src_bus->buswidth == 1 */ | 519 | /* bd->srcbus.buswidth == 1 */ |
542 | *mbus = dst_bus; | 520 | *mbus = &bd->dstbus; |
543 | *sbus = src_bus; | 521 | *sbus = &bd->srcbus; |
544 | } | 522 | } |
545 | } | 523 | } |
546 | } | 524 | } |
547 | 525 | ||
548 | /* | 526 | /* |
549 | * Fills in one LLI for a certain transfer descriptor | 527 | * Fills in one LLI for a certain transfer descriptor and advance the counter |
550 | * and advance the counter | ||
551 | */ | 528 | */ |
552 | int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, | 529 | static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, |
553 | struct pl08x_txd *txd, int num_llis, int len, | 530 | int num_llis, int len, u32 cctl) |
554 | u32 cctl, u32 *remainder) | ||
555 | { | 531 | { |
556 | struct lli *llis_va = txd->llis_va; | 532 | struct pl08x_lli *llis_va = bd->txd->llis_va; |
557 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | 533 | dma_addr_t llis_bus = bd->txd->llis_bus; |
558 | 534 | ||
559 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); | 535 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); |
560 | 536 | ||
561 | llis_va[num_llis].cctl = cctl; | 537 | llis_va[num_llis].cctl = cctl; |
562 | llis_va[num_llis].src = txd->srcbus.addr; | 538 | llis_va[num_llis].src = bd->srcbus.addr; |
563 | llis_va[num_llis].dst = txd->dstbus.addr; | 539 | llis_va[num_llis].dst = bd->dstbus.addr; |
564 | 540 | llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); | |
565 | /* | 541 | if (bd->pl08x->lli_buses & PL08X_AHB2) |
566 | * On versions with dual masters, you can optionally AND on | 542 | llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; |
567 | * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read | ||
568 | * in new LLIs with that controller, but we always try to | ||
569 | * choose AHB1 to point into memory. The idea is to have AHB2 | ||
570 | * fixed on the peripheral and AHB1 messing around in the | ||
571 | * memory. So we don't manipulate this bit currently. | ||
572 | */ | ||
573 | |||
574 | llis_va[num_llis].next = | ||
575 | (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); | ||
576 | 543 | ||
577 | if (cctl & PL080_CONTROL_SRC_INCR) | 544 | if (cctl & PL080_CONTROL_SRC_INCR) |
578 | txd->srcbus.addr += len; | 545 | bd->srcbus.addr += len; |
579 | if (cctl & PL080_CONTROL_DST_INCR) | 546 | if (cctl & PL080_CONTROL_DST_INCR) |
580 | txd->dstbus.addr += len; | 547 | bd->dstbus.addr += len; |
581 | 548 | ||
582 | *remainder -= len; | 549 | BUG_ON(bd->remainder < len); |
583 | 550 | ||
584 | return num_llis + 1; | 551 | bd->remainder -= len; |
585 | } | 552 | } |
586 | 553 | ||
587 | /* | 554 | /* |
588 | * Return number of bytes to fill to boundary, or len | 555 | * Return number of bytes to fill to boundary, or len. |
556 | * This calculation works for any value of addr. | ||
589 | */ | 557 | */ |
590 | static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | 558 | static inline size_t pl08x_pre_boundary(u32 addr, size_t len) |
591 | { | 559 | { |
592 | u32 boundary; | 560 | size_t boundary_len = PL08X_BOUNDARY_SIZE - |
593 | 561 | (addr & (PL08X_BOUNDARY_SIZE - 1)); | |
594 | boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) | ||
595 | << PL08X_BOUNDARY_SHIFT; | ||
596 | 562 | ||
597 | if (boundary < addr + len) | 563 | return min(boundary_len, len); |
598 | return boundary - addr; | ||
599 | else | ||
600 | return len; | ||
601 | } | 564 | } |
602 | 565 | ||
603 | /* | 566 | /* |
@@ -608,20 +571,13 @@ static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | |||
608 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | 571 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, |
609 | struct pl08x_txd *txd) | 572 | struct pl08x_txd *txd) |
610 | { | 573 | { |
611 | struct pl08x_channel_data *cd = txd->cd; | ||
612 | struct pl08x_bus_data *mbus, *sbus; | 574 | struct pl08x_bus_data *mbus, *sbus; |
613 | u32 remainder; | 575 | struct pl08x_lli_build_data bd; |
614 | int num_llis = 0; | 576 | int num_llis = 0; |
615 | u32 cctl; | 577 | u32 cctl; |
616 | int max_bytes_per_lli; | 578 | size_t max_bytes_per_lli; |
617 | int total_bytes = 0; | 579 | size_t total_bytes = 0; |
618 | struct lli *llis_va; | 580 | struct pl08x_lli *llis_va; |
619 | struct lli *llis_bus; | ||
620 | |||
621 | if (!txd) { | ||
622 | dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); | ||
623 | return 0; | ||
624 | } | ||
625 | 581 | ||
626 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, | 582 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, |
627 | &txd->llis_bus); | 583 | &txd->llis_bus); |
@@ -632,121 +588,79 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
632 | 588 | ||
633 | pl08x->pool_ctr++; | 589 | pl08x->pool_ctr++; |
634 | 590 | ||
635 | /* | 591 | /* Get the default CCTL */ |
636 | * Initialize bus values for this transfer | 592 | cctl = txd->cctl; |
637 | * from the passed optimal values | ||
638 | */ | ||
639 | if (!cd) { | ||
640 | dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); | ||
641 | return 0; | ||
642 | } | ||
643 | 593 | ||
644 | /* Get the default CCTL from the platform data */ | 594 | bd.txd = txd; |
645 | cctl = cd->cctl; | 595 | bd.pl08x = pl08x; |
646 | 596 | bd.srcbus.addr = txd->src_addr; | |
647 | /* | 597 | bd.dstbus.addr = txd->dst_addr; |
648 | * On the PL080 we have two bus masters and we | ||
649 | * should select one for source and one for | ||
650 | * destination. We try to use AHB2 for the | ||
651 | * bus which does not increment (typically the | ||
652 | * peripheral) else we just choose something. | ||
653 | */ | ||
654 | cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | ||
655 | if (pl08x->vd->dualmaster) { | ||
656 | if (cctl & PL080_CONTROL_SRC_INCR) | ||
657 | /* Source increments, use AHB2 for destination */ | ||
658 | cctl |= PL080_CONTROL_DST_AHB2; | ||
659 | else if (cctl & PL080_CONTROL_DST_INCR) | ||
660 | /* Destination increments, use AHB2 for source */ | ||
661 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
662 | else | ||
663 | /* Just pick something, source AHB1 dest AHB2 */ | ||
664 | cctl |= PL080_CONTROL_DST_AHB2; | ||
665 | } | ||
666 | 598 | ||
667 | /* Find maximum width of the source bus */ | 599 | /* Find maximum width of the source bus */ |
668 | txd->srcbus.maxwidth = | 600 | bd.srcbus.maxwidth = |
669 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> | 601 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> |
670 | PL080_CONTROL_SWIDTH_SHIFT); | 602 | PL080_CONTROL_SWIDTH_SHIFT); |
671 | 603 | ||
672 | /* Find maximum width of the destination bus */ | 604 | /* Find maximum width of the destination bus */ |
673 | txd->dstbus.maxwidth = | 605 | bd.dstbus.maxwidth = |
674 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> | 606 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> |
675 | PL080_CONTROL_DWIDTH_SHIFT); | 607 | PL080_CONTROL_DWIDTH_SHIFT); |
676 | 608 | ||
677 | /* Set up the bus widths to the maximum */ | 609 | /* Set up the bus widths to the maximum */ |
678 | txd->srcbus.buswidth = txd->srcbus.maxwidth; | 610 | bd.srcbus.buswidth = bd.srcbus.maxwidth; |
679 | txd->dstbus.buswidth = txd->dstbus.maxwidth; | 611 | bd.dstbus.buswidth = bd.dstbus.maxwidth; |
680 | dev_vdbg(&pl08x->adev->dev, | 612 | dev_vdbg(&pl08x->adev->dev, |
681 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | 613 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", |
682 | __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); | 614 | __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); |
683 | 615 | ||
684 | 616 | ||
685 | /* | 617 | /* |
686 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | 618 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) |
687 | */ | 619 | */ |
688 | max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * | 620 | max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * |
689 | PL080_CONTROL_TRANSFER_SIZE_MASK; | 621 | PL080_CONTROL_TRANSFER_SIZE_MASK; |
690 | dev_vdbg(&pl08x->adev->dev, | 622 | dev_vdbg(&pl08x->adev->dev, |
691 | "%s max bytes per lli = %d\n", | 623 | "%s max bytes per lli = %zu\n", |
692 | __func__, max_bytes_per_lli); | 624 | __func__, max_bytes_per_lli); |
693 | 625 | ||
694 | /* We need to count this down to zero */ | 626 | /* We need to count this down to zero */ |
695 | remainder = txd->len; | 627 | bd.remainder = txd->len; |
696 | dev_vdbg(&pl08x->adev->dev, | 628 | dev_vdbg(&pl08x->adev->dev, |
697 | "%s remainder = %d\n", | 629 | "%s remainder = %zu\n", |
698 | __func__, remainder); | 630 | __func__, bd.remainder); |
699 | 631 | ||
700 | /* | 632 | /* |
701 | * Choose bus to align to | 633 | * Choose bus to align to |
702 | * - prefers destination bus if both available | 634 | * - prefers destination bus if both available |
703 | * - if fixed address on one bus chooses other | 635 | * - if fixed address on one bus chooses other |
704 | * - modifies cctl to choose an apropriate master | ||
705 | */ | ||
706 | pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, | ||
707 | &mbus, &sbus, cctl); | ||
708 | |||
709 | |||
710 | /* | ||
711 | * The lowest bit of the LLI register | ||
712 | * is also used to indicate which master to | ||
713 | * use for reading the LLIs. | ||
714 | */ | 636 | */ |
637 | pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); | ||
715 | 638 | ||
716 | if (txd->len < mbus->buswidth) { | 639 | if (txd->len < mbus->buswidth) { |
717 | /* | 640 | /* Less than a bus width available - send as single bytes */ |
718 | * Less than a bus width available | 641 | while (bd.remainder) { |
719 | * - send as single bytes | ||
720 | */ | ||
721 | while (remainder) { | ||
722 | dev_vdbg(&pl08x->adev->dev, | 642 | dev_vdbg(&pl08x->adev->dev, |
723 | "%s single byte LLIs for a transfer of " | 643 | "%s single byte LLIs for a transfer of " |
724 | "less than a bus width (remain %08x)\n", | 644 | "less than a bus width (remain 0x%08x)\n", |
725 | __func__, remainder); | 645 | __func__, bd.remainder); |
726 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 646 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
727 | num_llis = | 647 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
728 | pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, | ||
729 | cctl, &remainder); | ||
730 | total_bytes++; | 648 | total_bytes++; |
731 | } | 649 | } |
732 | } else { | 650 | } else { |
733 | /* | 651 | /* Make one byte LLIs until master bus is aligned */ |
734 | * Make one byte LLIs until master bus is aligned | ||
735 | * - slave will then be aligned also | ||
736 | */ | ||
737 | while ((mbus->addr) % (mbus->buswidth)) { | 652 | while ((mbus->addr) % (mbus->buswidth)) { |
738 | dev_vdbg(&pl08x->adev->dev, | 653 | dev_vdbg(&pl08x->adev->dev, |
739 | "%s adjustment lli for less than bus width " | 654 | "%s adjustment lli for less than bus width " |
740 | "(remain %08x)\n", | 655 | "(remain 0x%08x)\n", |
741 | __func__, remainder); | 656 | __func__, bd.remainder); |
742 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 657 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
743 | num_llis = pl08x_fill_lli_for_desc | 658 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
744 | (pl08x, txd, num_llis, 1, cctl, &remainder); | ||
745 | total_bytes++; | 659 | total_bytes++; |
746 | } | 660 | } |
747 | 661 | ||
748 | /* | 662 | /* |
749 | * Master now aligned | 663 | * Master now aligned |
750 | * - if slave is not then we must set its width down | 664 | * - if slave is not then we must set its width down |
751 | */ | 665 | */ |
752 | if (sbus->addr % sbus->buswidth) { | 666 | if (sbus->addr % sbus->buswidth) { |
@@ -761,63 +675,51 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
761 | * Make largest possible LLIs until less than one bus | 675 | * Make largest possible LLIs until less than one bus |
762 | * width left | 676 | * width left |
763 | */ | 677 | */ |
764 | while (remainder > (mbus->buswidth - 1)) { | 678 | while (bd.remainder > (mbus->buswidth - 1)) { |
765 | int lli_len, target_len; | 679 | size_t lli_len, target_len, tsize, odd_bytes; |
766 | int tsize; | ||
767 | int odd_bytes; | ||
768 | 680 | ||
769 | /* | 681 | /* |
770 | * If enough left try to send max possible, | 682 | * If enough left try to send max possible, |
771 | * otherwise try to send the remainder | 683 | * otherwise try to send the remainder |
772 | */ | 684 | */ |
773 | target_len = remainder; | 685 | target_len = min(bd.remainder, max_bytes_per_lli); |
774 | if (remainder > max_bytes_per_lli) | ||
775 | target_len = max_bytes_per_lli; | ||
776 | 686 | ||
777 | /* | 687 | /* |
778 | * Set bus lengths for incrementing busses | 688 | * Set bus lengths for incrementing buses to the |
779 | * to number of bytes which fill to next memory | 689 | * number of bytes which fill to next memory boundary, |
780 | * boundary | 690 | * limiting on the target length calculated above. |
781 | */ | 691 | */ |
782 | if (cctl & PL080_CONTROL_SRC_INCR) | 692 | if (cctl & PL080_CONTROL_SRC_INCR) |
783 | txd->srcbus.fill_bytes = | 693 | bd.srcbus.fill_bytes = |
784 | pl08x_pre_boundary( | 694 | pl08x_pre_boundary(bd.srcbus.addr, |
785 | txd->srcbus.addr, | 695 | target_len); |
786 | remainder); | ||
787 | else | 696 | else |
788 | txd->srcbus.fill_bytes = | 697 | bd.srcbus.fill_bytes = target_len; |
789 | max_bytes_per_lli; | ||
790 | 698 | ||
791 | if (cctl & PL080_CONTROL_DST_INCR) | 699 | if (cctl & PL080_CONTROL_DST_INCR) |
792 | txd->dstbus.fill_bytes = | 700 | bd.dstbus.fill_bytes = |
793 | pl08x_pre_boundary( | 701 | pl08x_pre_boundary(bd.dstbus.addr, |
794 | txd->dstbus.addr, | 702 | target_len); |
795 | remainder); | ||
796 | else | 703 | else |
797 | txd->dstbus.fill_bytes = | 704 | bd.dstbus.fill_bytes = target_len; |
798 | max_bytes_per_lli; | ||
799 | 705 | ||
800 | /* | 706 | /* Find the nearest */ |
801 | * Find the nearest | 707 | lli_len = min(bd.srcbus.fill_bytes, |
802 | */ | 708 | bd.dstbus.fill_bytes); |
803 | lli_len = min(txd->srcbus.fill_bytes, | ||
804 | txd->dstbus.fill_bytes); | ||
805 | 709 | ||
806 | BUG_ON(lli_len > remainder); | 710 | BUG_ON(lli_len > bd.remainder); |
807 | 711 | ||
808 | if (lli_len <= 0) { | 712 | if (lli_len <= 0) { |
809 | dev_err(&pl08x->adev->dev, | 713 | dev_err(&pl08x->adev->dev, |
810 | "%s lli_len is %d, <= 0\n", | 714 | "%s lli_len is %zu, <= 0\n", |
811 | __func__, lli_len); | 715 | __func__, lli_len); |
812 | return 0; | 716 | return 0; |
813 | } | 717 | } |
814 | 718 | ||
815 | if (lli_len == target_len) { | 719 | if (lli_len == target_len) { |
816 | /* | 720 | /* |
817 | * Can send what we wanted | 721 | * Can send what we wanted. |
818 | */ | 722 | * Maintain alignment |
819 | /* | ||
820 | * Maintain alignment | ||
821 | */ | 723 | */ |
822 | lli_len = (lli_len/mbus->buswidth) * | 724 | lli_len = (lli_len/mbus->buswidth) * |
823 | mbus->buswidth; | 725 | mbus->buswidth; |
@@ -825,17 +727,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
825 | } else { | 727 | } else { |
826 | /* | 728 | /* |
827 | * So now we know how many bytes to transfer | 729 | * So now we know how many bytes to transfer |
828 | * to get to the nearest boundary | 730 | * to get to the nearest boundary. The next |
829 | * The next lli will past the boundary | 731 | * LLI will past the boundary. However, we |
830 | * - however we may be working to a boundary | 732 | * may be working to a boundary on the slave |
831 | * on the slave bus | 733 | * bus. We need to ensure the master stays |
832 | * We need to ensure the master stays aligned | 734 | * aligned, and that we are working in |
735 | * multiples of the bus widths. | ||
833 | */ | 736 | */ |
834 | odd_bytes = lli_len % mbus->buswidth; | 737 | odd_bytes = lli_len % mbus->buswidth; |
835 | /* | ||
836 | * - and that we are working in multiples | ||
837 | * of the bus widths | ||
838 | */ | ||
839 | lli_len -= odd_bytes; | 738 | lli_len -= odd_bytes; |
840 | 739 | ||
841 | } | 740 | } |
@@ -855,41 +754,38 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
855 | 754 | ||
856 | if (target_len != lli_len) { | 755 | if (target_len != lli_len) { |
857 | dev_vdbg(&pl08x->adev->dev, | 756 | dev_vdbg(&pl08x->adev->dev, |
858 | "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", | 757 | "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", |
859 | __func__, target_len, lli_len, txd->len); | 758 | __func__, target_len, lli_len, txd->len); |
860 | } | 759 | } |
861 | 760 | ||
862 | cctl = pl08x_cctl_bits(cctl, | 761 | cctl = pl08x_cctl_bits(cctl, |
863 | txd->srcbus.buswidth, | 762 | bd.srcbus.buswidth, |
864 | txd->dstbus.buswidth, | 763 | bd.dstbus.buswidth, |
865 | tsize); | 764 | tsize); |
866 | 765 | ||
867 | dev_vdbg(&pl08x->adev->dev, | 766 | dev_vdbg(&pl08x->adev->dev, |
868 | "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", | 767 | "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", |
869 | __func__, lli_len, remainder); | 768 | __func__, lli_len, bd.remainder); |
870 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, | 769 | pl08x_fill_lli_for_desc(&bd, num_llis++, |
871 | num_llis, lli_len, cctl, | 770 | lli_len, cctl); |
872 | &remainder); | ||
873 | total_bytes += lli_len; | 771 | total_bytes += lli_len; |
874 | } | 772 | } |
875 | 773 | ||
876 | 774 | ||
877 | if (odd_bytes) { | 775 | if (odd_bytes) { |
878 | /* | 776 | /* |
879 | * Creep past the boundary, | 777 | * Creep past the boundary, maintaining |
880 | * maintaining master alignment | 778 | * master alignment |
881 | */ | 779 | */ |
882 | int j; | 780 | int j; |
883 | for (j = 0; (j < mbus->buswidth) | 781 | for (j = 0; (j < mbus->buswidth) |
884 | && (remainder); j++) { | 782 | && (bd.remainder); j++) { |
885 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 783 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
886 | dev_vdbg(&pl08x->adev->dev, | 784 | dev_vdbg(&pl08x->adev->dev, |
887 | "%s align with boundardy, single byte (remain %08x)\n", | 785 | "%s align with boundary, single byte (remain 0x%08zx)\n", |
888 | __func__, remainder); | 786 | __func__, bd.remainder); |
889 | num_llis = | 787 | pl08x_fill_lli_for_desc(&bd, |
890 | pl08x_fill_lli_for_desc(pl08x, | 788 | num_llis++, 1, cctl); |
891 | txd, num_llis, 1, | ||
892 | cctl, &remainder); | ||
893 | total_bytes++; | 789 | total_bytes++; |
894 | } | 790 | } |
895 | } | 791 | } |
@@ -898,25 +794,18 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
898 | /* | 794 | /* |
899 | * Send any odd bytes | 795 | * Send any odd bytes |
900 | */ | 796 | */ |
901 | if (remainder < 0) { | 797 | while (bd.remainder) { |
902 | dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", | ||
903 | __func__, remainder); | ||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | while (remainder) { | ||
908 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | 798 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); |
909 | dev_vdbg(&pl08x->adev->dev, | 799 | dev_vdbg(&pl08x->adev->dev, |
910 | "%s align with boundardy, single odd byte (remain %d)\n", | 800 | "%s align with boundary, single odd byte (remain %zu)\n", |
911 | __func__, remainder); | 801 | __func__, bd.remainder); |
912 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, | 802 | pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); |
913 | 1, cctl, &remainder); | ||
914 | total_bytes++; | 803 | total_bytes++; |
915 | } | 804 | } |
916 | } | 805 | } |
917 | if (total_bytes != txd->len) { | 806 | if (total_bytes != txd->len) { |
918 | dev_err(&pl08x->adev->dev, | 807 | dev_err(&pl08x->adev->dev, |
919 | "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", | 808 | "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", |
920 | __func__, total_bytes, txd->len); | 809 | __func__, total_bytes, txd->len); |
921 | return 0; | 810 | return 0; |
922 | } | 811 | } |
@@ -927,41 +816,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
927 | __func__, (u32) MAX_NUM_TSFR_LLIS); | 816 | __func__, (u32) MAX_NUM_TSFR_LLIS); |
928 | return 0; | 817 | return 0; |
929 | } | 818 | } |
930 | /* | ||
931 | * Decide whether this is a loop or a terminated transfer | ||
932 | */ | ||
933 | llis_va = txd->llis_va; | ||
934 | llis_bus = (struct lli *) txd->llis_bus; | ||
935 | 819 | ||
936 | if (cd->circular_buffer) { | 820 | llis_va = txd->llis_va; |
937 | /* | 821 | /* The final LLI terminates the LLI. */ |
938 | * Loop the circular buffer so that the next element | 822 | llis_va[num_llis - 1].lli = 0; |
939 | * points back to the beginning of the LLI. | 823 | /* The final LLI element shall also fire an interrupt. */ |
940 | */ | 824 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; |
941 | llis_va[num_llis - 1].next = | ||
942 | (dma_addr_t)((unsigned int)&(llis_bus[0])); | ||
943 | } else { | ||
944 | /* | ||
945 | * On non-circular buffers, the final LLI terminates | ||
946 | * the LLI. | ||
947 | */ | ||
948 | llis_va[num_llis - 1].next = 0; | ||
949 | /* | ||
950 | * The final LLI element shall also fire an interrupt | ||
951 | */ | ||
952 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | ||
953 | } | ||
954 | |||
955 | /* Now store the channel register values */ | ||
956 | txd->csrc = llis_va[0].src; | ||
957 | txd->cdst = llis_va[0].dst; | ||
958 | if (num_llis > 1) | ||
959 | txd->clli = llis_va[0].next; | ||
960 | else | ||
961 | txd->clli = 0; | ||
962 | |||
963 | txd->cctl = llis_va[0].cctl; | ||
964 | /* ccfg will be set at physical channel allocation time */ | ||
965 | 825 | ||
966 | #ifdef VERBOSE_DEBUG | 826 | #ifdef VERBOSE_DEBUG |
967 | { | 827 | { |
@@ -969,13 +829,13 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
969 | 829 | ||
970 | for (i = 0; i < num_llis; i++) { | 830 | for (i = 0; i < num_llis; i++) { |
971 | dev_vdbg(&pl08x->adev->dev, | 831 | dev_vdbg(&pl08x->adev->dev, |
972 | "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", | 832 | "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", |
973 | i, | 833 | i, |
974 | &llis_va[i], | 834 | &llis_va[i], |
975 | llis_va[i].src, | 835 | llis_va[i].src, |
976 | llis_va[i].dst, | 836 | llis_va[i].dst, |
977 | llis_va[i].cctl, | 837 | llis_va[i].cctl, |
978 | llis_va[i].next | 838 | llis_va[i].lli |
979 | ); | 839 | ); |
980 | } | 840 | } |
981 | } | 841 | } |
@@ -988,14 +848,8 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
988 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | 848 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, |
989 | struct pl08x_txd *txd) | 849 | struct pl08x_txd *txd) |
990 | { | 850 | { |
991 | if (!txd) | ||
992 | dev_err(&pl08x->adev->dev, | ||
993 | "%s no descriptor to free\n", | ||
994 | __func__); | ||
995 | |||
996 | /* Free the LLI */ | 851 | /* Free the LLI */ |
997 | dma_pool_free(pl08x->pool, txd->llis_va, | 852 | dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); |
998 | txd->llis_bus); | ||
999 | 853 | ||
1000 | pl08x->pool_ctr--; | 854 | pl08x->pool_ctr--; |
1001 | 855 | ||
@@ -1008,13 +862,12 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | |||
1008 | struct pl08x_txd *txdi = NULL; | 862 | struct pl08x_txd *txdi = NULL; |
1009 | struct pl08x_txd *next; | 863 | struct pl08x_txd *next; |
1010 | 864 | ||
1011 | if (!list_empty(&plchan->desc_list)) { | 865 | if (!list_empty(&plchan->pend_list)) { |
1012 | list_for_each_entry_safe(txdi, | 866 | list_for_each_entry_safe(txdi, |
1013 | next, &plchan->desc_list, node) { | 867 | next, &plchan->pend_list, node) { |
1014 | list_del(&txdi->node); | 868 | list_del(&txdi->node); |
1015 | pl08x_free_txd(pl08x, txdi); | 869 | pl08x_free_txd(pl08x, txdi); |
1016 | } | 870 | } |
1017 | |||
1018 | } | 871 | } |
1019 | } | 872 | } |
1020 | 873 | ||
@@ -1069,6 +922,12 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
1069 | return -EBUSY; | 922 | return -EBUSY; |
1070 | } | 923 | } |
1071 | ch->signal = ret; | 924 | ch->signal = ret; |
925 | |||
926 | /* Assign the flow control signal to this channel */ | ||
927 | if (txd->direction == DMA_TO_DEVICE) | ||
928 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
929 | else if (txd->direction == DMA_FROM_DEVICE) | ||
930 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
1072 | } | 931 | } |
1073 | 932 | ||
1074 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | 933 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", |
@@ -1076,19 +935,54 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
1076 | ch->signal, | 935 | ch->signal, |
1077 | plchan->name); | 936 | plchan->name); |
1078 | 937 | ||
938 | plchan->phychan_hold++; | ||
1079 | plchan->phychan = ch; | 939 | plchan->phychan = ch; |
1080 | 940 | ||
1081 | return 0; | 941 | return 0; |
1082 | } | 942 | } |
1083 | 943 | ||
944 | static void release_phy_channel(struct pl08x_dma_chan *plchan) | ||
945 | { | ||
946 | struct pl08x_driver_data *pl08x = plchan->host; | ||
947 | |||
948 | if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { | ||
949 | pl08x->pd->put_signal(plchan); | ||
950 | plchan->phychan->signal = -1; | ||
951 | } | ||
952 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
953 | plchan->phychan = NULL; | ||
954 | } | ||
955 | |||
1084 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | 956 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) |
1085 | { | 957 | { |
1086 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | 958 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); |
959 | struct pl08x_txd *txd = to_pl08x_txd(tx); | ||
960 | unsigned long flags; | ||
1087 | 961 | ||
1088 | atomic_inc(&plchan->last_issued); | 962 | spin_lock_irqsave(&plchan->lock, flags); |
1089 | tx->cookie = atomic_read(&plchan->last_issued); | 963 | |
1090 | /* This unlock follows the lock in the prep() function */ | 964 | plchan->chan.cookie += 1; |
1091 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | 965 | if (plchan->chan.cookie < 0) |
966 | plchan->chan.cookie = 1; | ||
967 | tx->cookie = plchan->chan.cookie; | ||
968 | |||
969 | /* Put this onto the pending list */ | ||
970 | list_add_tail(&txd->node, &plchan->pend_list); | ||
971 | |||
972 | /* | ||
973 | * If there was no physical channel available for this memcpy, | ||
974 | * stack the request up and indicate that the channel is waiting | ||
975 | * for a free physical channel. | ||
976 | */ | ||
977 | if (!plchan->slave && !plchan->phychan) { | ||
978 | /* Do this memcpy whenever there is a channel ready */ | ||
979 | plchan->state = PL08X_CHAN_WAITING; | ||
980 | plchan->waiting = txd; | ||
981 | } else { | ||
982 | plchan->phychan_hold--; | ||
983 | } | ||
984 | |||
985 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1092 | 986 | ||
1093 | return tx->cookie; | 987 | return tx->cookie; |
1094 | } | 988 | } |
@@ -1102,10 +996,9 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | |||
1102 | } | 996 | } |
1103 | 997 | ||
1104 | /* | 998 | /* |
1105 | * Code accessing dma_async_is_complete() in a tight loop | 999 | * Code accessing dma_async_is_complete() in a tight loop may give problems. |
1106 | * may give problems - could schedule where indicated. | 1000 | * If slaves are relying on interrupts to signal completion this function |
1107 | * If slaves are relying on interrupts to signal completion this | 1001 | * must not be called with interrupts disabled. |
1108 | * function must not be called with interrupts disabled | ||
1109 | */ | 1002 | */ |
1110 | static enum dma_status | 1003 | static enum dma_status |
1111 | pl08x_dma_tx_status(struct dma_chan *chan, | 1004 | pl08x_dma_tx_status(struct dma_chan *chan, |
@@ -1118,7 +1011,7 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
1118 | enum dma_status ret; | 1011 | enum dma_status ret; |
1119 | u32 bytesleft = 0; | 1012 | u32 bytesleft = 0; |
1120 | 1013 | ||
1121 | last_used = atomic_read(&plchan->last_issued); | 1014 | last_used = plchan->chan.cookie; |
1122 | last_complete = plchan->lc; | 1015 | last_complete = plchan->lc; |
1123 | 1016 | ||
1124 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 1017 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
@@ -1128,13 +1021,9 @@ pl08x_dma_tx_status(struct dma_chan *chan, | |||
1128 | } | 1021 | } |
1129 | 1022 | ||
1130 | /* | 1023 | /* |
1131 | * schedule(); could be inserted here | ||
1132 | */ | ||
1133 | |||
1134 | /* | ||
1135 | * This cookie not complete yet | 1024 | * This cookie not complete yet |
1136 | */ | 1025 | */ |
1137 | last_used = atomic_read(&plchan->last_issued); | 1026 | last_used = plchan->chan.cookie; |
1138 | last_complete = plchan->lc; | 1027 | last_complete = plchan->lc; |
1139 | 1028 | ||
1140 | /* Get number of bytes left in the active transactions and queue */ | 1029 | /* Get number of bytes left in the active transactions and queue */ |
@@ -1199,37 +1088,35 @@ static const struct burst_table burst_sizes[] = { | |||
1199 | }, | 1088 | }, |
1200 | }; | 1089 | }; |
1201 | 1090 | ||
1202 | static void dma_set_runtime_config(struct dma_chan *chan, | 1091 | static int dma_set_runtime_config(struct dma_chan *chan, |
1203 | struct dma_slave_config *config) | 1092 | struct dma_slave_config *config) |
1204 | { | 1093 | { |
1205 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1094 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1206 | struct pl08x_driver_data *pl08x = plchan->host; | 1095 | struct pl08x_driver_data *pl08x = plchan->host; |
1207 | struct pl08x_channel_data *cd = plchan->cd; | 1096 | struct pl08x_channel_data *cd = plchan->cd; |
1208 | enum dma_slave_buswidth addr_width; | 1097 | enum dma_slave_buswidth addr_width; |
1098 | dma_addr_t addr; | ||
1209 | u32 maxburst; | 1099 | u32 maxburst; |
1210 | u32 cctl = 0; | 1100 | u32 cctl = 0; |
1211 | /* Mask out all except src and dst channel */ | 1101 | int i; |
1212 | u32 ccfg = cd->ccfg & 0x000003DEU; | 1102 | |
1213 | int i = 0; | 1103 | if (!plchan->slave) |
1104 | return -EINVAL; | ||
1214 | 1105 | ||
1215 | /* Transfer direction */ | 1106 | /* Transfer direction */ |
1216 | plchan->runtime_direction = config->direction; | 1107 | plchan->runtime_direction = config->direction; |
1217 | if (config->direction == DMA_TO_DEVICE) { | 1108 | if (config->direction == DMA_TO_DEVICE) { |
1218 | plchan->runtime_addr = config->dst_addr; | 1109 | addr = config->dst_addr; |
1219 | cctl |= PL080_CONTROL_SRC_INCR; | ||
1220 | ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1221 | addr_width = config->dst_addr_width; | 1110 | addr_width = config->dst_addr_width; |
1222 | maxburst = config->dst_maxburst; | 1111 | maxburst = config->dst_maxburst; |
1223 | } else if (config->direction == DMA_FROM_DEVICE) { | 1112 | } else if (config->direction == DMA_FROM_DEVICE) { |
1224 | plchan->runtime_addr = config->src_addr; | 1113 | addr = config->src_addr; |
1225 | cctl |= PL080_CONTROL_DST_INCR; | ||
1226 | ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1227 | addr_width = config->src_addr_width; | 1114 | addr_width = config->src_addr_width; |
1228 | maxburst = config->src_maxburst; | 1115 | maxburst = config->src_maxburst; |
1229 | } else { | 1116 | } else { |
1230 | dev_err(&pl08x->adev->dev, | 1117 | dev_err(&pl08x->adev->dev, |
1231 | "bad runtime_config: alien transfer direction\n"); | 1118 | "bad runtime_config: alien transfer direction\n"); |
1232 | return; | 1119 | return -EINVAL; |
1233 | } | 1120 | } |
1234 | 1121 | ||
1235 | switch (addr_width) { | 1122 | switch (addr_width) { |
@@ -1248,42 +1135,40 @@ static void dma_set_runtime_config(struct dma_chan *chan, | |||
1248 | default: | 1135 | default: |
1249 | dev_err(&pl08x->adev->dev, | 1136 | dev_err(&pl08x->adev->dev, |
1250 | "bad runtime_config: alien address width\n"); | 1137 | "bad runtime_config: alien address width\n"); |
1251 | return; | 1138 | return -EINVAL; |
1252 | } | 1139 | } |
1253 | 1140 | ||
1254 | /* | 1141 | /* |
1255 | * Now decide on a maxburst: | 1142 | * Now decide on a maxburst: |
1256 | * If this channel will only request single transfers, set | 1143 | * If this channel will only request single transfers, set this |
1257 | * this down to ONE element. | 1144 | * down to ONE element. Also select one element if no maxburst |
1145 | * is specified. | ||
1258 | */ | 1146 | */ |
1259 | if (plchan->cd->single) { | 1147 | if (plchan->cd->single || maxburst == 0) { |
1260 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | 1148 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | |
1261 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | 1149 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); |
1262 | } else { | 1150 | } else { |
1263 | while (i < ARRAY_SIZE(burst_sizes)) { | 1151 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) |
1264 | if (burst_sizes[i].burstwords <= maxburst) | 1152 | if (burst_sizes[i].burstwords <= maxburst) |
1265 | break; | 1153 | break; |
1266 | i++; | ||
1267 | } | ||
1268 | cctl |= burst_sizes[i].reg; | 1154 | cctl |= burst_sizes[i].reg; |
1269 | } | 1155 | } |
1270 | 1156 | ||
1271 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | 1157 | plchan->runtime_addr = addr; |
1272 | cctl &= ~PL080_CONTROL_PROT_MASK; | ||
1273 | cctl |= PL080_CONTROL_PROT_SYS; | ||
1274 | 1158 | ||
1275 | /* Modify the default channel data to fit PrimeCell request */ | 1159 | /* Modify the default channel data to fit PrimeCell request */ |
1276 | cd->cctl = cctl; | 1160 | cd->cctl = cctl; |
1277 | cd->ccfg = ccfg; | ||
1278 | 1161 | ||
1279 | dev_dbg(&pl08x->adev->dev, | 1162 | dev_dbg(&pl08x->adev->dev, |
1280 | "configured channel %s (%s) for %s, data width %d, " | 1163 | "configured channel %s (%s) for %s, data width %d, " |
1281 | "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", | 1164 | "maxburst %d words, LE, CCTL=0x%08x\n", |
1282 | dma_chan_name(chan), plchan->name, | 1165 | dma_chan_name(chan), plchan->name, |
1283 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 1166 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", |
1284 | addr_width, | 1167 | addr_width, |
1285 | maxburst, | 1168 | maxburst, |
1286 | cctl, ccfg); | 1169 | cctl); |
1170 | |||
1171 | return 0; | ||
1287 | } | 1172 | } |
1288 | 1173 | ||
1289 | /* | 1174 | /* |
@@ -1293,35 +1178,26 @@ static void dma_set_runtime_config(struct dma_chan *chan, | |||
1293 | static void pl08x_issue_pending(struct dma_chan *chan) | 1178 | static void pl08x_issue_pending(struct dma_chan *chan) |
1294 | { | 1179 | { |
1295 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1180 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1296 | struct pl08x_driver_data *pl08x = plchan->host; | ||
1297 | unsigned long flags; | 1181 | unsigned long flags; |
1298 | 1182 | ||
1299 | spin_lock_irqsave(&plchan->lock, flags); | 1183 | spin_lock_irqsave(&plchan->lock, flags); |
1300 | /* Something is already active */ | 1184 | /* Something is already active, or we're waiting for a channel... */ |
1301 | if (plchan->at) { | 1185 | if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { |
1302 | spin_unlock_irqrestore(&plchan->lock, flags); | 1186 | spin_unlock_irqrestore(&plchan->lock, flags); |
1303 | return; | ||
1304 | } | ||
1305 | |||
1306 | /* Didn't get a physical channel so waiting for it ... */ | ||
1307 | if (plchan->state == PL08X_CHAN_WAITING) | ||
1308 | return; | 1187 | return; |
1188 | } | ||
1309 | 1189 | ||
1310 | /* Take the first element in the queue and execute it */ | 1190 | /* Take the first element in the queue and execute it */ |
1311 | if (!list_empty(&plchan->desc_list)) { | 1191 | if (!list_empty(&plchan->pend_list)) { |
1312 | struct pl08x_txd *next; | 1192 | struct pl08x_txd *next; |
1313 | 1193 | ||
1314 | next = list_first_entry(&plchan->desc_list, | 1194 | next = list_first_entry(&plchan->pend_list, |
1315 | struct pl08x_txd, | 1195 | struct pl08x_txd, |
1316 | node); | 1196 | node); |
1317 | list_del(&next->node); | 1197 | list_del(&next->node); |
1318 | plchan->at = next; | ||
1319 | plchan->state = PL08X_CHAN_RUNNING; | 1198 | plchan->state = PL08X_CHAN_RUNNING; |
1320 | 1199 | ||
1321 | /* Configure the physical channel for the active txd */ | 1200 | pl08x_start_txd(plchan, next); |
1322 | pl08x_config_phychan_for_txd(plchan); | ||
1323 | pl08x_set_cregs(pl08x, plchan->phychan); | ||
1324 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | ||
1325 | } | 1201 | } |
1326 | 1202 | ||
1327 | spin_unlock_irqrestore(&plchan->lock, flags); | 1203 | spin_unlock_irqrestore(&plchan->lock, flags); |
@@ -1330,30 +1206,17 @@ static void pl08x_issue_pending(struct dma_chan *chan) | |||
1330 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | 1206 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, |
1331 | struct pl08x_txd *txd) | 1207 | struct pl08x_txd *txd) |
1332 | { | 1208 | { |
1333 | int num_llis; | ||
1334 | struct pl08x_driver_data *pl08x = plchan->host; | 1209 | struct pl08x_driver_data *pl08x = plchan->host; |
1335 | int ret; | 1210 | unsigned long flags; |
1211 | int num_llis, ret; | ||
1336 | 1212 | ||
1337 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | 1213 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); |
1338 | 1214 | if (!num_llis) { | |
1339 | if (!num_llis) | 1215 | kfree(txd); |
1340 | return -EINVAL; | 1216 | return -EINVAL; |
1217 | } | ||
1341 | 1218 | ||
1342 | spin_lock_irqsave(&plchan->lock, plchan->lockflags); | 1219 | spin_lock_irqsave(&plchan->lock, flags); |
1343 | |||
1344 | /* | ||
1345 | * If this device is not using a circular buffer then | ||
1346 | * queue this new descriptor for transfer. | ||
1347 | * The descriptor for a circular buffer continues | ||
1348 | * to be used until the channel is freed. | ||
1349 | */ | ||
1350 | if (txd->cd->circular_buffer) | ||
1351 | dev_err(&pl08x->adev->dev, | ||
1352 | "%s attempting to queue a circular buffer\n", | ||
1353 | __func__); | ||
1354 | else | ||
1355 | list_add_tail(&txd->node, | ||
1356 | &plchan->desc_list); | ||
1357 | 1220 | ||
1358 | /* | 1221 | /* |
1359 | * See if we already have a physical channel allocated, | 1222 | * See if we already have a physical channel allocated, |
@@ -1362,45 +1225,74 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |||
1362 | ret = prep_phy_channel(plchan, txd); | 1225 | ret = prep_phy_channel(plchan, txd); |
1363 | if (ret) { | 1226 | if (ret) { |
1364 | /* | 1227 | /* |
1365 | * No physical channel available, we will | 1228 | * No physical channel was available. |
1366 | * stack up the memcpy channels until there is a channel | 1229 | * |
1367 | * available to handle it whereas slave transfers may | 1230 | * memcpy transfers can be sorted out at submission time. |
1368 | * have been denied due to platform channel muxing restrictions | 1231 | * |
1369 | * and since there is no guarantee that this will ever be | 1232 | * Slave transfers may have been denied due to platform |
1370 | * resolved, and since the signal must be aquired AFTER | 1233 | * channel muxing restrictions. Since there is no guarantee |
1371 | * aquiring the physical channel, we will let them be NACK:ed | 1234 | * that this will ever be resolved, and the signal must be |
1372 | * with -EBUSY here. The drivers can alway retry the prep() | 1235 | * acquired AFTER acquiring the physical channel, we will let |
1373 | * call if they are eager on doing this using DMA. | 1236 | * them be NACK:ed with -EBUSY here. The drivers can retry |
1237 | * the prep() call if they are eager on doing this using DMA. | ||
1374 | */ | 1238 | */ |
1375 | if (plchan->slave) { | 1239 | if (plchan->slave) { |
1376 | pl08x_free_txd_list(pl08x, plchan); | 1240 | pl08x_free_txd_list(pl08x, plchan); |
1377 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | 1241 | pl08x_free_txd(pl08x, txd); |
1242 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1378 | return -EBUSY; | 1243 | return -EBUSY; |
1379 | } | 1244 | } |
1380 | /* Do this memcpy whenever there is a channel ready */ | ||
1381 | plchan->state = PL08X_CHAN_WAITING; | ||
1382 | plchan->waiting = txd; | ||
1383 | } else | 1245 | } else |
1384 | /* | 1246 | /* |
1385 | * Else we're all set, paused and ready to roll, | 1247 | * Else we're all set, paused and ready to roll, status |
1386 | * status will switch to PL08X_CHAN_RUNNING when | 1248 | * will switch to PL08X_CHAN_RUNNING when we call |
1387 | * we call issue_pending(). If there is something | 1249 | * issue_pending(). If there is something running on the |
1388 | * running on the channel already we don't change | 1250 | * channel already we don't change its state. |
1389 | * its state. | ||
1390 | */ | 1251 | */ |
1391 | if (plchan->state == PL08X_CHAN_IDLE) | 1252 | if (plchan->state == PL08X_CHAN_IDLE) |
1392 | plchan->state = PL08X_CHAN_PAUSED; | 1253 | plchan->state = PL08X_CHAN_PAUSED; |
1393 | 1254 | ||
1394 | /* | 1255 | spin_unlock_irqrestore(&plchan->lock, flags); |
1395 | * Notice that we leave plchan->lock locked on purpose: | ||
1396 | * it will be unlocked in the subsequent tx_submit() | ||
1397 | * call. This is a consequence of the current API. | ||
1398 | */ | ||
1399 | 1256 | ||
1400 | return 0; | 1257 | return 0; |
1401 | } | 1258 | } |
1402 | 1259 | ||
1403 | /* | 1260 | /* |
1261 | * Given the source and destination available bus masks, select which | ||
1262 | * will be routed to each port. We try to have source and destination | ||
1263 | * on separate ports, but always respect the allowable settings. | ||
1264 | */ | ||
1265 | static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) | ||
1266 | { | ||
1267 | u32 cctl = 0; | ||
1268 | |||
1269 | if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) | ||
1270 | cctl |= PL080_CONTROL_DST_AHB2; | ||
1271 | if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) | ||
1272 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
1273 | |||
1274 | return cctl; | ||
1275 | } | ||
1276 | |||
1277 | static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, | ||
1278 | unsigned long flags) | ||
1279 | { | ||
1280 | struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | ||
1281 | |||
1282 | if (txd) { | ||
1283 | dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); | ||
1284 | txd->tx.flags = flags; | ||
1285 | txd->tx.tx_submit = pl08x_tx_submit; | ||
1286 | INIT_LIST_HEAD(&txd->node); | ||
1287 | |||
1288 | /* Always enable error and terminal interrupts */ | ||
1289 | txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | | ||
1290 | PL080_CONFIG_TC_IRQ_MASK; | ||
1291 | } | ||
1292 | return txd; | ||
1293 | } | ||
1294 | |||
1295 | /* | ||
1404 | * Initialize a descriptor to be used by memcpy submit | 1296 | * Initialize a descriptor to be used by memcpy submit |
1405 | */ | 1297 | */ |
1406 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | 1298 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( |
@@ -1412,40 +1304,38 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1412 | struct pl08x_txd *txd; | 1304 | struct pl08x_txd *txd; |
1413 | int ret; | 1305 | int ret; |
1414 | 1306 | ||
1415 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | 1307 | txd = pl08x_get_txd(plchan, flags); |
1416 | if (!txd) { | 1308 | if (!txd) { |
1417 | dev_err(&pl08x->adev->dev, | 1309 | dev_err(&pl08x->adev->dev, |
1418 | "%s no memory for descriptor\n", __func__); | 1310 | "%s no memory for descriptor\n", __func__); |
1419 | return NULL; | 1311 | return NULL; |
1420 | } | 1312 | } |
1421 | 1313 | ||
1422 | dma_async_tx_descriptor_init(&txd->tx, chan); | ||
1423 | txd->direction = DMA_NONE; | 1314 | txd->direction = DMA_NONE; |
1424 | txd->srcbus.addr = src; | 1315 | txd->src_addr = src; |
1425 | txd->dstbus.addr = dest; | 1316 | txd->dst_addr = dest; |
1317 | txd->len = len; | ||
1426 | 1318 | ||
1427 | /* Set platform data for m2m */ | 1319 | /* Set platform data for m2m */ |
1428 | txd->cd = &pl08x->pd->memcpy_channel; | 1320 | txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1321 | txd->cctl = pl08x->pd->memcpy_channel.cctl & | ||
1322 | ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | ||
1323 | |||
1429 | /* Both to be incremented or the code will break */ | 1324 | /* Both to be incremented or the code will break */ |
1430 | txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | 1325 | txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; |
1431 | txd->tx.tx_submit = pl08x_tx_submit; | 1326 | |
1432 | txd->tx.callback = NULL; | 1327 | if (pl08x->vd->dualmaster) |
1433 | txd->tx.callback_param = NULL; | 1328 | txd->cctl |= pl08x_select_bus(pl08x, |
1434 | txd->len = len; | 1329 | pl08x->mem_buses, pl08x->mem_buses); |
1435 | 1330 | ||
1436 | INIT_LIST_HEAD(&txd->node); | ||
1437 | ret = pl08x_prep_channel_resources(plchan, txd); | 1331 | ret = pl08x_prep_channel_resources(plchan, txd); |
1438 | if (ret) | 1332 | if (ret) |
1439 | return NULL; | 1333 | return NULL; |
1440 | /* | ||
1441 | * NB: the channel lock is held at this point so tx_submit() | ||
1442 | * must be called in direct succession. | ||
1443 | */ | ||
1444 | 1334 | ||
1445 | return &txd->tx; | 1335 | return &txd->tx; |
1446 | } | 1336 | } |
1447 | 1337 | ||
1448 | struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1338 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
1449 | struct dma_chan *chan, struct scatterlist *sgl, | 1339 | struct dma_chan *chan, struct scatterlist *sgl, |
1450 | unsigned int sg_len, enum dma_data_direction direction, | 1340 | unsigned int sg_len, enum dma_data_direction direction, |
1451 | unsigned long flags) | 1341 | unsigned long flags) |
@@ -1453,6 +1343,7 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1453 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1343 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1454 | struct pl08x_driver_data *pl08x = plchan->host; | 1344 | struct pl08x_driver_data *pl08x = plchan->host; |
1455 | struct pl08x_txd *txd; | 1345 | struct pl08x_txd *txd; |
1346 | u8 src_buses, dst_buses; | ||
1456 | int ret; | 1347 | int ret; |
1457 | 1348 | ||
1458 | /* | 1349 | /* |
@@ -1467,14 +1358,12 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1467 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | 1358 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", |
1468 | __func__, sgl->length, plchan->name); | 1359 | __func__, sgl->length, plchan->name); |
1469 | 1360 | ||
1470 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | 1361 | txd = pl08x_get_txd(plchan, flags); |
1471 | if (!txd) { | 1362 | if (!txd) { |
1472 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | 1363 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); |
1473 | return NULL; | 1364 | return NULL; |
1474 | } | 1365 | } |
1475 | 1366 | ||
1476 | dma_async_tx_descriptor_init(&txd->tx, chan); | ||
1477 | |||
1478 | if (direction != plchan->runtime_direction) | 1367 | if (direction != plchan->runtime_direction) |
1479 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | 1368 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " |
1480 | "the direction configured for the PrimeCell\n", | 1369 | "the direction configured for the PrimeCell\n", |
@@ -1486,37 +1375,47 @@ struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1486 | * channel target address dynamically at runtime. | 1375 | * channel target address dynamically at runtime. |
1487 | */ | 1376 | */ |
1488 | txd->direction = direction; | 1377 | txd->direction = direction; |
1378 | txd->len = sgl->length; | ||
1379 | |||
1380 | txd->cctl = plchan->cd->cctl & | ||
1381 | ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | | ||
1382 | PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | | ||
1383 | PL080_CONTROL_PROT_MASK); | ||
1384 | |||
1385 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
1386 | txd->cctl |= PL080_CONTROL_PROT_SYS; | ||
1387 | |||
1489 | if (direction == DMA_TO_DEVICE) { | 1388 | if (direction == DMA_TO_DEVICE) { |
1490 | txd->srcbus.addr = sgl->dma_address; | 1389 | txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
1390 | txd->cctl |= PL080_CONTROL_SRC_INCR; | ||
1391 | txd->src_addr = sgl->dma_address; | ||
1491 | if (plchan->runtime_addr) | 1392 | if (plchan->runtime_addr) |
1492 | txd->dstbus.addr = plchan->runtime_addr; | 1393 | txd->dst_addr = plchan->runtime_addr; |
1493 | else | 1394 | else |
1494 | txd->dstbus.addr = plchan->cd->addr; | 1395 | txd->dst_addr = plchan->cd->addr; |
1396 | src_buses = pl08x->mem_buses; | ||
1397 | dst_buses = plchan->cd->periph_buses; | ||
1495 | } else if (direction == DMA_FROM_DEVICE) { | 1398 | } else if (direction == DMA_FROM_DEVICE) { |
1399 | txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
1400 | txd->cctl |= PL080_CONTROL_DST_INCR; | ||
1496 | if (plchan->runtime_addr) | 1401 | if (plchan->runtime_addr) |
1497 | txd->srcbus.addr = plchan->runtime_addr; | 1402 | txd->src_addr = plchan->runtime_addr; |
1498 | else | 1403 | else |
1499 | txd->srcbus.addr = plchan->cd->addr; | 1404 | txd->src_addr = plchan->cd->addr; |
1500 | txd->dstbus.addr = sgl->dma_address; | 1405 | txd->dst_addr = sgl->dma_address; |
1406 | src_buses = plchan->cd->periph_buses; | ||
1407 | dst_buses = pl08x->mem_buses; | ||
1501 | } else { | 1408 | } else { |
1502 | dev_err(&pl08x->adev->dev, | 1409 | dev_err(&pl08x->adev->dev, |
1503 | "%s direction unsupported\n", __func__); | 1410 | "%s direction unsupported\n", __func__); |
1504 | return NULL; | 1411 | return NULL; |
1505 | } | 1412 | } |
1506 | txd->cd = plchan->cd; | 1413 | |
1507 | txd->tx.tx_submit = pl08x_tx_submit; | 1414 | txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); |
1508 | txd->tx.callback = NULL; | ||
1509 | txd->tx.callback_param = NULL; | ||
1510 | txd->len = sgl->length; | ||
1511 | INIT_LIST_HEAD(&txd->node); | ||
1512 | 1415 | ||
1513 | ret = pl08x_prep_channel_resources(plchan, txd); | 1416 | ret = pl08x_prep_channel_resources(plchan, txd); |
1514 | if (ret) | 1417 | if (ret) |
1515 | return NULL; | 1418 | return NULL; |
1516 | /* | ||
1517 | * NB: the channel lock is held at this point so tx_submit() | ||
1518 | * must be called in direct succession. | ||
1519 | */ | ||
1520 | 1419 | ||
1521 | return &txd->tx; | 1420 | return &txd->tx; |
1522 | } | 1421 | } |
@@ -1531,10 +1430,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1531 | 1430 | ||
1532 | /* Controls applicable to inactive channels */ | 1431 | /* Controls applicable to inactive channels */ |
1533 | if (cmd == DMA_SLAVE_CONFIG) { | 1432 | if (cmd == DMA_SLAVE_CONFIG) { |
1534 | dma_set_runtime_config(chan, | 1433 | return dma_set_runtime_config(chan, |
1535 | (struct dma_slave_config *) | 1434 | (struct dma_slave_config *)arg); |
1536 | arg); | ||
1537 | return 0; | ||
1538 | } | 1435 | } |
1539 | 1436 | ||
1540 | /* | 1437 | /* |
@@ -1558,16 +1455,8 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1558 | * Mark physical channel as free and free any slave | 1455 | * Mark physical channel as free and free any slave |
1559 | * signal | 1456 | * signal |
1560 | */ | 1457 | */ |
1561 | if ((plchan->phychan->signal >= 0) && | 1458 | release_phy_channel(plchan); |
1562 | pl08x->pd->put_signal) { | ||
1563 | pl08x->pd->put_signal(plchan); | ||
1564 | plchan->phychan->signal = -1; | ||
1565 | } | ||
1566 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
1567 | plchan->phychan = NULL; | ||
1568 | } | 1459 | } |
1569 | /* Stop any pending tasklet */ | ||
1570 | tasklet_disable(&plchan->tasklet); | ||
1571 | /* Dequeue jobs and free LLIs */ | 1460 | /* Dequeue jobs and free LLIs */ |
1572 | if (plchan->at) { | 1461 | if (plchan->at) { |
1573 | pl08x_free_txd(pl08x, plchan->at); | 1462 | pl08x_free_txd(pl08x, plchan->at); |
@@ -1609,10 +1498,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | |||
1609 | 1498 | ||
1610 | /* | 1499 | /* |
1611 | * Just check that the device is there and active | 1500 | * Just check that the device is there and active |
1612 | * TODO: turn this bit on/off depending on the number of | 1501 | * TODO: turn this bit on/off depending on the number of physical channels |
1613 | * physical channels actually used, if it is zero... well | 1502 | * actually used, if it is zero... well shut it off. That will save some |
1614 | * shut it off. That will save some power. Cut the clock | 1503 | * power. Cut the clock at the same time. |
1615 | * at the same time. | ||
1616 | */ | 1504 | */ |
1617 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | 1505 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) |
1618 | { | 1506 | { |
@@ -1620,78 +1508,66 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | |||
1620 | 1508 | ||
1621 | val = readl(pl08x->base + PL080_CONFIG); | 1509 | val = readl(pl08x->base + PL080_CONFIG); |
1622 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); | 1510 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); |
1623 | /* We implictly clear bit 1 and that means little-endian mode */ | 1511 | /* We implicitly clear bit 1 and that means little-endian mode */ |
1624 | val |= PL080_CONFIG_ENABLE; | 1512 | val |= PL080_CONFIG_ENABLE; |
1625 | writel(val, pl08x->base + PL080_CONFIG); | 1513 | writel(val, pl08x->base + PL080_CONFIG); |
1626 | } | 1514 | } |
1627 | 1515 | ||
1516 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | ||
1517 | { | ||
1518 | struct device *dev = txd->tx.chan->device->dev; | ||
1519 | |||
1520 | if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1521 | if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
1522 | dma_unmap_single(dev, txd->src_addr, txd->len, | ||
1523 | DMA_TO_DEVICE); | ||
1524 | else | ||
1525 | dma_unmap_page(dev, txd->src_addr, txd->len, | ||
1526 | DMA_TO_DEVICE); | ||
1527 | } | ||
1528 | if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1529 | if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1530 | dma_unmap_single(dev, txd->dst_addr, txd->len, | ||
1531 | DMA_FROM_DEVICE); | ||
1532 | else | ||
1533 | dma_unmap_page(dev, txd->dst_addr, txd->len, | ||
1534 | DMA_FROM_DEVICE); | ||
1535 | } | ||
1536 | } | ||
1537 | |||
1628 | static void pl08x_tasklet(unsigned long data) | 1538 | static void pl08x_tasklet(unsigned long data) |
1629 | { | 1539 | { |
1630 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | 1540 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; |
1631 | struct pl08x_phy_chan *phychan = plchan->phychan; | ||
1632 | struct pl08x_driver_data *pl08x = plchan->host; | 1541 | struct pl08x_driver_data *pl08x = plchan->host; |
1542 | struct pl08x_txd *txd; | ||
1543 | unsigned long flags; | ||
1633 | 1544 | ||
1634 | if (!plchan) | 1545 | spin_lock_irqsave(&plchan->lock, flags); |
1635 | BUG(); | ||
1636 | |||
1637 | spin_lock(&plchan->lock); | ||
1638 | |||
1639 | if (plchan->at) { | ||
1640 | dma_async_tx_callback callback = | ||
1641 | plchan->at->tx.callback; | ||
1642 | void *callback_param = | ||
1643 | plchan->at->tx.callback_param; | ||
1644 | |||
1645 | /* | ||
1646 | * Update last completed | ||
1647 | */ | ||
1648 | plchan->lc = | ||
1649 | (plchan->at->tx.cookie); | ||
1650 | |||
1651 | /* | ||
1652 | * Callback to signal completion | ||
1653 | */ | ||
1654 | if (callback) | ||
1655 | callback(callback_param); | ||
1656 | 1546 | ||
1657 | /* | 1547 | txd = plchan->at; |
1658 | * Device callbacks should NOT clear | 1548 | plchan->at = NULL; |
1659 | * the current transaction on the channel | ||
1660 | * Linus: sometimes they should? | ||
1661 | */ | ||
1662 | if (!plchan->at) | ||
1663 | BUG(); | ||
1664 | 1549 | ||
1665 | /* | 1550 | if (txd) { |
1666 | * Free the descriptor if it's not for a device | 1551 | /* Update last completed */ |
1667 | * using a circular buffer | 1552 | plchan->lc = txd->tx.cookie; |
1668 | */ | ||
1669 | if (!plchan->at->cd->circular_buffer) { | ||
1670 | pl08x_free_txd(pl08x, plchan->at); | ||
1671 | plchan->at = NULL; | ||
1672 | } | ||
1673 | /* | ||
1674 | * else descriptor for circular | ||
1675 | * buffers only freed when | ||
1676 | * client has disabled dma | ||
1677 | */ | ||
1678 | } | 1553 | } |
1679 | /* | 1554 | |
1680 | * If a new descriptor is queued, set it up | 1555 | /* If a new descriptor is queued, set it up plchan->at is NULL here */ |
1681 | * plchan->at is NULL here | 1556 | if (!list_empty(&plchan->pend_list)) { |
1682 | */ | ||
1683 | if (!list_empty(&plchan->desc_list)) { | ||
1684 | struct pl08x_txd *next; | 1557 | struct pl08x_txd *next; |
1685 | 1558 | ||
1686 | next = list_first_entry(&plchan->desc_list, | 1559 | next = list_first_entry(&plchan->pend_list, |
1687 | struct pl08x_txd, | 1560 | struct pl08x_txd, |
1688 | node); | 1561 | node); |
1689 | list_del(&next->node); | 1562 | list_del(&next->node); |
1690 | plchan->at = next; | 1563 | |
1691 | /* Configure the physical channel for the next txd */ | 1564 | pl08x_start_txd(plchan, next); |
1692 | pl08x_config_phychan_for_txd(plchan); | 1565 | } else if (plchan->phychan_hold) { |
1693 | pl08x_set_cregs(pl08x, plchan->phychan); | 1566 | /* |
1694 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | 1567 | * This channel is still in use - we have a new txd being |
1568 | * prepared and will soon be queued. Don't give up the | ||
1569 | * physical channel. | ||
1570 | */ | ||
1695 | } else { | 1571 | } else { |
1696 | struct pl08x_dma_chan *waiting = NULL; | 1572 | struct pl08x_dma_chan *waiting = NULL; |
1697 | 1573 | ||
@@ -1699,20 +1575,14 @@ static void pl08x_tasklet(unsigned long data) | |||
1699 | * No more jobs, so free up the physical channel | 1575 | * No more jobs, so free up the physical channel |
1700 | * Free any allocated signal on slave transfers too | 1576 | * Free any allocated signal on slave transfers too |
1701 | */ | 1577 | */ |
1702 | if ((phychan->signal >= 0) && pl08x->pd->put_signal) { | 1578 | release_phy_channel(plchan); |
1703 | pl08x->pd->put_signal(plchan); | ||
1704 | phychan->signal = -1; | ||
1705 | } | ||
1706 | pl08x_put_phy_channel(pl08x, phychan); | ||
1707 | plchan->phychan = NULL; | ||
1708 | plchan->state = PL08X_CHAN_IDLE; | 1579 | plchan->state = PL08X_CHAN_IDLE; |
1709 | 1580 | ||
1710 | /* | 1581 | /* |
1711 | * And NOW before anyone else can grab that free:d | 1582 | * And NOW before anyone else can grab that free:d up |
1712 | * up physical channel, see if there is some memcpy | 1583 | * physical channel, see if there is some memcpy pending |
1713 | * pending that seriously needs to start because of | 1584 | * that seriously needs to start because of being stacked |
1714 | * being stacked up while we were choking the | 1585 | * up while we were choking the physical channels with data. |
1715 | * physical channels with data. | ||
1716 | */ | 1586 | */ |
1717 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | 1587 | list_for_each_entry(waiting, &pl08x->memcpy.channels, |
1718 | chan.device_node) { | 1588 | chan.device_node) { |
@@ -1724,6 +1594,7 @@ static void pl08x_tasklet(unsigned long data) | |||
1724 | ret = prep_phy_channel(waiting, | 1594 | ret = prep_phy_channel(waiting, |
1725 | waiting->waiting); | 1595 | waiting->waiting); |
1726 | BUG_ON(ret); | 1596 | BUG_ON(ret); |
1597 | waiting->phychan_hold--; | ||
1727 | waiting->state = PL08X_CHAN_RUNNING; | 1598 | waiting->state = PL08X_CHAN_RUNNING; |
1728 | waiting->waiting = NULL; | 1599 | waiting->waiting = NULL; |
1729 | pl08x_issue_pending(&waiting->chan); | 1600 | pl08x_issue_pending(&waiting->chan); |
@@ -1732,7 +1603,25 @@ static void pl08x_tasklet(unsigned long data) | |||
1732 | } | 1603 | } |
1733 | } | 1604 | } |
1734 | 1605 | ||
1735 | spin_unlock(&plchan->lock); | 1606 | spin_unlock_irqrestore(&plchan->lock, flags); |
1607 | |||
1608 | if (txd) { | ||
1609 | dma_async_tx_callback callback = txd->tx.callback; | ||
1610 | void *callback_param = txd->tx.callback_param; | ||
1611 | |||
1612 | /* Don't try to unmap buffers on slave channels */ | ||
1613 | if (!plchan->slave) | ||
1614 | pl08x_unmap_buffers(txd); | ||
1615 | |||
1616 | /* Free the descriptor */ | ||
1617 | spin_lock_irqsave(&plchan->lock, flags); | ||
1618 | pl08x_free_txd(pl08x, txd); | ||
1619 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
1620 | |||
1621 | /* Callback to signal completion */ | ||
1622 | if (callback) | ||
1623 | callback(callback_param); | ||
1624 | } | ||
1736 | } | 1625 | } |
1737 | 1626 | ||
1738 | static irqreturn_t pl08x_irq(int irq, void *dev) | 1627 | static irqreturn_t pl08x_irq(int irq, void *dev) |
@@ -1744,9 +1633,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1744 | 1633 | ||
1745 | val = readl(pl08x->base + PL080_ERR_STATUS); | 1634 | val = readl(pl08x->base + PL080_ERR_STATUS); |
1746 | if (val) { | 1635 | if (val) { |
1747 | /* | 1636 | /* An error interrupt (on one or more channels) */ |
1748 | * An error interrupt (on one or more channels) | ||
1749 | */ | ||
1750 | dev_err(&pl08x->adev->dev, | 1637 | dev_err(&pl08x->adev->dev, |
1751 | "%s error interrupt, register value 0x%08x\n", | 1638 | "%s error interrupt, register value 0x%08x\n", |
1752 | __func__, val); | 1639 | __func__, val); |
@@ -1770,9 +1657,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) | |||
1770 | mask |= (1 << i); | 1657 | mask |= (1 << i); |
1771 | } | 1658 | } |
1772 | } | 1659 | } |
1773 | /* | 1660 | /* Clear only the terminal interrupts on channels we processed */ |
1774 | * Clear only the terminal interrupts on channels we processed | ||
1775 | */ | ||
1776 | writel(mask, pl08x->base + PL080_TC_CLEAR); | 1661 | writel(mask, pl08x->base + PL080_TC_CLEAR); |
1777 | 1662 | ||
1778 | return mask ? IRQ_HANDLED : IRQ_NONE; | 1663 | return mask ? IRQ_HANDLED : IRQ_NONE; |
@@ -1791,6 +1676,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1791 | int i; | 1676 | int i; |
1792 | 1677 | ||
1793 | INIT_LIST_HEAD(&dmadev->channels); | 1678 | INIT_LIST_HEAD(&dmadev->channels); |
1679 | |||
1794 | /* | 1680 | /* |
1795 | * Register as many many memcpy as we have physical channels, | 1681 | * Register as many many memcpy as we have physical channels, |
1796 | * we won't always be able to use all but the code will have | 1682 | * we won't always be able to use all but the code will have |
@@ -1819,16 +1705,23 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |||
1819 | return -ENOMEM; | 1705 | return -ENOMEM; |
1820 | } | 1706 | } |
1821 | } | 1707 | } |
1708 | if (chan->cd->circular_buffer) { | ||
1709 | dev_err(&pl08x->adev->dev, | ||
1710 | "channel %s: circular buffers not supported\n", | ||
1711 | chan->name); | ||
1712 | kfree(chan); | ||
1713 | continue; | ||
1714 | } | ||
1822 | dev_info(&pl08x->adev->dev, | 1715 | dev_info(&pl08x->adev->dev, |
1823 | "initialize virtual channel \"%s\"\n", | 1716 | "initialize virtual channel \"%s\"\n", |
1824 | chan->name); | 1717 | chan->name); |
1825 | 1718 | ||
1826 | chan->chan.device = dmadev; | 1719 | chan->chan.device = dmadev; |
1827 | atomic_set(&chan->last_issued, 0); | 1720 | chan->chan.cookie = 0; |
1828 | chan->lc = atomic_read(&chan->last_issued); | 1721 | chan->lc = 0; |
1829 | 1722 | ||
1830 | spin_lock_init(&chan->lock); | 1723 | spin_lock_init(&chan->lock); |
1831 | INIT_LIST_HEAD(&chan->desc_list); | 1724 | INIT_LIST_HEAD(&chan->pend_list); |
1832 | tasklet_init(&chan->tasklet, pl08x_tasklet, | 1725 | tasklet_init(&chan->tasklet, pl08x_tasklet, |
1833 | (unsigned long) chan); | 1726 | (unsigned long) chan); |
1834 | 1727 | ||
@@ -1898,7 +1791,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1898 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1791 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1899 | seq_printf(s, "--------\t------\n"); | 1792 | seq_printf(s, "--------\t------\n"); |
1900 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { | 1793 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { |
1901 | seq_printf(s, "%s\t\t\%s\n", chan->name, | 1794 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1902 | pl08x_state_str(chan->state)); | 1795 | pl08x_state_str(chan->state)); |
1903 | } | 1796 | } |
1904 | 1797 | ||
@@ -1906,7 +1799,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
1906 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | 1799 | seq_printf(s, "CHANNEL:\tSTATE:\n"); |
1907 | seq_printf(s, "--------\t------\n"); | 1800 | seq_printf(s, "--------\t------\n"); |
1908 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { | 1801 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { |
1909 | seq_printf(s, "%s\t\t\%s\n", chan->name, | 1802 | seq_printf(s, "%s\t\t%s\n", chan->name, |
1910 | pl08x_state_str(chan->state)); | 1803 | pl08x_state_str(chan->state)); |
1911 | } | 1804 | } |
1912 | 1805 | ||
@@ -1942,7 +1835,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | |||
1942 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | 1835 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) |
1943 | { | 1836 | { |
1944 | struct pl08x_driver_data *pl08x; | 1837 | struct pl08x_driver_data *pl08x; |
1945 | struct vendor_data *vd = id->data; | 1838 | const struct vendor_data *vd = id->data; |
1946 | int ret = 0; | 1839 | int ret = 0; |
1947 | int i; | 1840 | int i; |
1948 | 1841 | ||
@@ -1990,6 +1883,14 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
1990 | pl08x->adev = adev; | 1883 | pl08x->adev = adev; |
1991 | pl08x->vd = vd; | 1884 | pl08x->vd = vd; |
1992 | 1885 | ||
1886 | /* By default, AHB1 only. If dualmaster, from platform */ | ||
1887 | pl08x->lli_buses = PL08X_AHB1; | ||
1888 | pl08x->mem_buses = PL08X_AHB1; | ||
1889 | if (pl08x->vd->dualmaster) { | ||
1890 | pl08x->lli_buses = pl08x->pd->lli_buses; | ||
1891 | pl08x->mem_buses = pl08x->pd->mem_buses; | ||
1892 | } | ||
1893 | |||
1993 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ | 1894 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ |
1994 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, | 1895 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, |
1995 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); | 1896 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); |
@@ -2009,14 +1910,12 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
2009 | /* Turn on the PL08x */ | 1910 | /* Turn on the PL08x */ |
2010 | pl08x_ensure_on(pl08x); | 1911 | pl08x_ensure_on(pl08x); |
2011 | 1912 | ||
2012 | /* | 1913 | /* Attach the interrupt handler */ |
2013 | * Attach the interrupt handler | ||
2014 | */ | ||
2015 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | 1914 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); |
2016 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | 1915 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); |
2017 | 1916 | ||
2018 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | 1917 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, |
2019 | vd->name, pl08x); | 1918 | DRIVER_NAME, pl08x); |
2020 | if (ret) { | 1919 | if (ret) { |
2021 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | 1920 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", |
2022 | __func__, adev->irq[0]); | 1921 | __func__, adev->irq[0]); |
@@ -2087,8 +1986,9 @@ static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |||
2087 | 1986 | ||
2088 | amba_set_drvdata(adev, pl08x); | 1987 | amba_set_drvdata(adev, pl08x); |
2089 | init_pl08x_debugfs(pl08x); | 1988 | init_pl08x_debugfs(pl08x); |
2090 | dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", | 1989 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", |
2091 | vd->name, adev->res.start); | 1990 | amba_part(adev), amba_rev(adev), |
1991 | (unsigned long long)adev->res.start, adev->irq[0]); | ||
2092 | return 0; | 1992 | return 0; |
2093 | 1993 | ||
2094 | out_no_slave_reg: | 1994 | out_no_slave_reg: |
@@ -2115,13 +2015,11 @@ out_no_pl08x: | |||
2115 | 2015 | ||
2116 | /* PL080 has 8 channels and the PL080 have just 2 */ | 2016 | /* PL080 has 8 channels and the PL080 have just 2 */ |
2117 | static struct vendor_data vendor_pl080 = { | 2017 | static struct vendor_data vendor_pl080 = { |
2118 | .name = "PL080", | ||
2119 | .channels = 8, | 2018 | .channels = 8, |
2120 | .dualmaster = true, | 2019 | .dualmaster = true, |
2121 | }; | 2020 | }; |
2122 | 2021 | ||
2123 | static struct vendor_data vendor_pl081 = { | 2022 | static struct vendor_data vendor_pl081 = { |
2124 | .name = "PL081", | ||
2125 | .channels = 2, | 2023 | .channels = 2, |
2126 | .dualmaster = false, | 2024 | .dualmaster = false, |
2127 | }; | 2025 | }; |
@@ -2160,7 +2058,7 @@ static int __init pl08x_init(void) | |||
2160 | retval = amba_driver_register(&pl08x_amba_driver); | 2058 | retval = amba_driver_register(&pl08x_amba_driver); |
2161 | if (retval) | 2059 | if (retval) |
2162 | printk(KERN_WARNING DRIVER_NAME | 2060 | printk(KERN_WARNING DRIVER_NAME |
2163 | "failed to register as an amba device (%d)\n", | 2061 | "failed to register as an AMBA device (%d)\n", |
2164 | retval); | 2062 | retval); |
2165 | return retval; | 2063 | return retval; |
2166 | } | 2064 | } |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index ea0ee81cff53..3d7d705f026f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -253,7 +253,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
253 | /* move myself to free_list */ | 253 | /* move myself to free_list */ |
254 | list_move(&desc->desc_node, &atchan->free_list); | 254 | list_move(&desc->desc_node, &atchan->free_list); |
255 | 255 | ||
256 | /* unmap dma addresses */ | 256 | /* unmap dma addresses (not on slave channels) */ |
257 | if (!atchan->chan_common.private) { | 257 | if (!atchan->chan_common.private) { |
258 | struct device *parent = chan2parent(&atchan->chan_common); | 258 | struct device *parent = chan2parent(&atchan->chan_common); |
259 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 259 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
@@ -583,7 +583,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
583 | desc->lli.ctrlb = ctrlb; | 583 | desc->lli.ctrlb = ctrlb; |
584 | 584 | ||
585 | desc->txd.cookie = 0; | 585 | desc->txd.cookie = 0; |
586 | async_tx_ack(&desc->txd); | ||
587 | 586 | ||
588 | if (!first) { | 587 | if (!first) { |
589 | first = desc; | 588 | first = desc; |
@@ -604,7 +603,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
604 | /* set end-of-link to the last link descriptor of list*/ | 603 | /* set end-of-link to the last link descriptor of list*/ |
605 | set_desc_eol(desc); | 604 | set_desc_eol(desc); |
606 | 605 | ||
607 | desc->txd.flags = flags; /* client is in control of this ack */ | 606 | first->txd.flags = flags; /* client is in control of this ack */ |
608 | 607 | ||
609 | return &first->txd; | 608 | return &first->txd; |
610 | 609 | ||
@@ -670,7 +669,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
670 | if (!desc) | 669 | if (!desc) |
671 | goto err_desc_get; | 670 | goto err_desc_get; |
672 | 671 | ||
673 | mem = sg_phys(sg); | 672 | mem = sg_dma_address(sg); |
674 | len = sg_dma_len(sg); | 673 | len = sg_dma_len(sg); |
675 | mem_width = 2; | 674 | mem_width = 2; |
676 | if (unlikely(mem & 3 || len & 3)) | 675 | if (unlikely(mem & 3 || len & 3)) |
@@ -712,7 +711,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
712 | if (!desc) | 711 | if (!desc) |
713 | goto err_desc_get; | 712 | goto err_desc_get; |
714 | 713 | ||
715 | mem = sg_phys(sg); | 714 | mem = sg_dma_address(sg); |
716 | len = sg_dma_len(sg); | 715 | len = sg_dma_len(sg); |
717 | mem_width = 2; | 716 | mem_width = 2; |
718 | if (unlikely(mem & 3 || len & 3)) | 717 | if (unlikely(mem & 3 || len & 3)) |
@@ -749,8 +748,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
749 | first->txd.cookie = -EBUSY; | 748 | first->txd.cookie = -EBUSY; |
750 | first->len = total_len; | 749 | first->len = total_len; |
751 | 750 | ||
752 | /* last link descriptor of list is responsible of flags */ | 751 | /* first link descriptor of list is responsible of flags */ |
753 | prev->txd.flags = flags; /* client is in control of this ack */ | 752 | first->txd.flags = flags; /* client is in control of this ack */ |
754 | 753 | ||
755 | return &first->txd; | 754 | return &first->txd; |
756 | 755 | ||
@@ -854,11 +853,11 @@ static void atc_issue_pending(struct dma_chan *chan) | |||
854 | 853 | ||
855 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | 854 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
856 | 855 | ||
856 | spin_lock_bh(&atchan->lock); | ||
857 | if (!atc_chan_is_enabled(atchan)) { | 857 | if (!atc_chan_is_enabled(atchan)) { |
858 | spin_lock_bh(&atchan->lock); | ||
859 | atc_advance_work(atchan); | 858 | atc_advance_work(atchan); |
860 | spin_unlock_bh(&atchan->lock); | ||
861 | } | 859 | } |
860 | spin_unlock_bh(&atchan->lock); | ||
862 | } | 861 | } |
863 | 862 | ||
864 | /** | 863 | /** |
@@ -1210,7 +1209,7 @@ static int __init at_dma_init(void) | |||
1210 | { | 1209 | { |
1211 | return platform_driver_probe(&at_dma_driver, at_dma_probe); | 1210 | return platform_driver_probe(&at_dma_driver, at_dma_probe); |
1212 | } | 1211 | } |
1213 | module_init(at_dma_init); | 1212 | subsys_initcall(at_dma_init); |
1214 | 1213 | ||
1215 | static void __exit at_dma_exit(void) | 1214 | static void __exit at_dma_exit(void) |
1216 | { | 1215 | { |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index e5e172d21692..4de947a450fc 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | 2 | * Freescale MPC85xx, MPC83xx DMA Engine support |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 4 | * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * Author: | 6 | * Author: |
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | 7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 |
@@ -1324,6 +1324,8 @@ static int __devinit fsldma_of_probe(struct platform_device *op, | |||
1324 | fdev->common.device_control = fsl_dma_device_control; | 1324 | fdev->common.device_control = fsl_dma_device_control; |
1325 | fdev->common.dev = &op->dev; | 1325 | fdev->common.dev = &op->dev; |
1326 | 1326 | ||
1327 | dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); | ||
1328 | |||
1327 | dev_set_drvdata(&op->dev, fdev); | 1329 | dev_set_drvdata(&op->dev, fdev); |
1328 | 1330 | ||
1329 | /* | 1331 | /* |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 78266382797e..798f46a4590d 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -664,11 +664,20 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
664 | /*calculate CTL_LO*/ | 664 | /*calculate CTL_LO*/ |
665 | ctl_lo.ctl_lo = 0; | 665 | ctl_lo.ctl_lo = 0; |
666 | ctl_lo.ctlx.int_en = 1; | 666 | ctl_lo.ctlx.int_en = 1; |
667 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width; | ||
668 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width; | ||
669 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; | 667 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; |
670 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; | 668 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; |
671 | 669 | ||
670 | /* | ||
671 | * Here we need some translation from "enum dma_slave_buswidth" | ||
672 | * to the format for our dma controller | ||
673 | * standard intel_mid_dmac's format | ||
674 | * 1 Byte 0b000 | ||
675 | * 2 Bytes 0b001 | ||
676 | * 4 Bytes 0b010 | ||
677 | */ | ||
678 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; | ||
679 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; | ||
680 | |||
672 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | 681 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { |
673 | ctl_lo.ctlx.tt_fc = 0; | 682 | ctl_lo.ctlx.tt_fc = 0; |
674 | ctl_lo.ctlx.sinc = 0; | 683 | ctl_lo.ctlx.sinc = 0; |
@@ -746,8 +755,18 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |||
746 | BUG_ON(!mids); | 755 | BUG_ON(!mids); |
747 | 756 | ||
748 | if (!midc->dma->pimr_mask) { | 757 | if (!midc->dma->pimr_mask) { |
749 | pr_debug("MDMA: SG list is not supported by this controller\n"); | 758 | /* We can still handle sg list with only one item */ |
750 | return NULL; | 759 | if (sg_len == 1) { |
760 | txd = intel_mid_dma_prep_memcpy(chan, | ||
761 | mids->dma_slave.dst_addr, | ||
762 | mids->dma_slave.src_addr, | ||
763 | sgl->length, | ||
764 | flags); | ||
765 | return txd; | ||
766 | } else { | ||
767 | pr_warn("MDMA: SG list is not supported by this controller\n"); | ||
768 | return NULL; | ||
769 | } | ||
751 | } | 770 | } |
752 | 771 | ||
753 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | 772 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", |
@@ -758,6 +777,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |||
758 | pr_err("MDMA: Prep memcpy failed\n"); | 777 | pr_err("MDMA: Prep memcpy failed\n"); |
759 | return NULL; | 778 | return NULL; |
760 | } | 779 | } |
780 | |||
761 | desc = to_intel_mid_dma_desc(txd); | 781 | desc = to_intel_mid_dma_desc(txd); |
762 | desc->dirn = direction; | 782 | desc->dirn = direction; |
763 | ctl_lo.ctl_lo = desc->ctl_lo; | 783 | ctl_lo.ctl_lo = desc->ctl_lo; |
@@ -1021,11 +1041,6 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |||
1021 | 1041 | ||
1022 | /*DMA Interrupt*/ | 1042 | /*DMA Interrupt*/ |
1023 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); | 1043 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); |
1024 | if (!mid) { | ||
1025 | pr_err("ERR_MDMA:null pointer mid\n"); | ||
1026 | return -EINVAL; | ||
1027 | } | ||
1028 | |||
1029 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); | 1044 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); |
1030 | tfr_status &= mid->intr_mask; | 1045 | tfr_status &= mid->intr_mask; |
1031 | if (tfr_status) { | 1046 | if (tfr_status) { |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 161c452923b8..c6b01f535b29 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -1261,7 +1261,7 @@ out: | |||
1261 | return err; | 1261 | return err; |
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | #ifdef CONFIG_MD_RAID6_PQ | 1264 | #ifdef CONFIG_RAID6_PQ |
1265 | static int __devinit | 1265 | static int __devinit |
1266 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | 1266 | iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) |
1267 | { | 1267 | { |
@@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1584 | 1584 | ||
1585 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && | 1585 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && |
1586 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { | 1586 | dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { |
1587 | #ifdef CONFIG_MD_RAID6_PQ | 1587 | #ifdef CONFIG_RAID6_PQ |
1588 | ret = iop_adma_pq_zero_sum_self_test(adev); | 1588 | ret = iop_adma_pq_zero_sum_self_test(adev); |
1589 | dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); | 1589 | dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); |
1590 | #else | 1590 | #else |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index c064c89420d0..1c38418ae61f 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Topcliff PCH DMA controller driver | 2 | * Topcliff PCH DMA controller driver |
3 | * Copyright (c) 2010 Intel Corporation | 3 | * Copyright (c) 2010 Intel Corporation |
4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -921,12 +922,19 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) | |||
921 | } | 922 | } |
922 | 923 | ||
923 | /* PCI Device ID of DMA device */ | 924 | /* PCI Device ID of DMA device */ |
924 | #define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810 | 925 | #define PCI_VENDOR_ID_ROHM 0x10DB |
925 | #define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815 | 926 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 |
927 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 | ||
928 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 | ||
929 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B | ||
930 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 | ||
926 | 931 | ||
927 | static const struct pci_device_id pch_dma_id_table[] = { | 932 | static const struct pci_device_id pch_dma_id_table[] = { |
928 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 }, | 933 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
929 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 }, | 934 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, |
935 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ | ||
936 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ | ||
937 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ | ||
930 | { 0, }, | 938 | { 0, }, |
931 | }; | 939 | }; |
932 | 940 | ||
@@ -954,6 +962,7 @@ static void __exit pch_dma_exit(void) | |||
954 | module_init(pch_dma_init); | 962 | module_init(pch_dma_init); |
955 | module_exit(pch_dma_exit); | 963 | module_exit(pch_dma_exit); |
956 | 964 | ||
957 | MODULE_DESCRIPTION("Topcliff PCH DMA controller driver"); | 965 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " |
966 | "DMA controller driver"); | ||
958 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 967 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
959 | MODULE_LICENSE("GPL v2"); | 968 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index fab68a553205..6e1d46a65d0e 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) ST-Ericsson SA 2007-2010 | 2 | * Copyright (C) Ericsson AB 2007-2008 |
3 | * Copyright (C) ST-Ericsson SA 2008-2010 | ||
3 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson | 4 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson | 5 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
5 | * License terms: GNU General Public License (GPL) version 2 | 6 | * License terms: GNU General Public License (GPL) version 2 |
@@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c) | |||
554 | return d; | 555 | return d; |
555 | } | 556 | } |
556 | 557 | ||
557 | /* Support functions for logical channels */ | 558 | static int d40_psize_2_burst_size(bool is_log, int psize) |
559 | { | ||
560 | if (is_log) { | ||
561 | if (psize == STEDMA40_PSIZE_LOG_1) | ||
562 | return 1; | ||
563 | } else { | ||
564 | if (psize == STEDMA40_PSIZE_PHY_1) | ||
565 | return 1; | ||
566 | } | ||
567 | |||
568 | return 2 << psize; | ||
569 | } | ||
570 | |||
571 | /* | ||
572 | * The dma only supports transmitting packages up to | ||
573 | * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of | ||
574 | * dma elements required to send the entire sg list | ||
575 | */ | ||
576 | static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) | ||
577 | { | ||
578 | int dmalen; | ||
579 | u32 max_w = max(data_width1, data_width2); | ||
580 | u32 min_w = min(data_width1, data_width2); | ||
581 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); | ||
582 | |||
583 | if (seg_max > STEDMA40_MAX_SEG_SIZE) | ||
584 | seg_max -= (1 << max_w); | ||
585 | |||
586 | if (!IS_ALIGNED(size, 1 << max_w)) | ||
587 | return -EINVAL; | ||
588 | |||
589 | if (size <= seg_max) | ||
590 | dmalen = 1; | ||
591 | else { | ||
592 | dmalen = size / seg_max; | ||
593 | if (dmalen * seg_max < size) | ||
594 | dmalen++; | ||
595 | } | ||
596 | return dmalen; | ||
597 | } | ||
598 | |||
599 | static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | ||
600 | u32 data_width1, u32 data_width2) | ||
601 | { | ||
602 | struct scatterlist *sg; | ||
603 | int i; | ||
604 | int len = 0; | ||
605 | int ret; | ||
606 | |||
607 | for_each_sg(sgl, sg, sg_len, i) { | ||
608 | ret = d40_size_2_dmalen(sg_dma_len(sg), | ||
609 | data_width1, data_width2); | ||
610 | if (ret < 0) | ||
611 | return ret; | ||
612 | len += ret; | ||
613 | } | ||
614 | return len; | ||
615 | } | ||
558 | 616 | ||
617 | /* Support functions for logical channels */ | ||
559 | 618 | ||
560 | static int d40_channel_execute_command(struct d40_chan *d40c, | 619 | static int d40_channel_execute_command(struct d40_chan *d40c, |
561 | enum d40_command command) | 620 | enum d40_command command) |
@@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1241 | res = -EINVAL; | 1300 | res = -EINVAL; |
1242 | } | 1301 | } |
1243 | 1302 | ||
1303 | if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * | ||
1304 | (1 << conf->src_info.data_width) != | ||
1305 | d40_psize_2_burst_size(is_log, conf->dst_info.psize) * | ||
1306 | (1 << conf->dst_info.data_width)) { | ||
1307 | /* | ||
1308 | * The DMAC hardware only supports | ||
1309 | * src (burst x width) == dst (burst x width) | ||
1310 | */ | ||
1311 | |||
1312 | dev_err(&d40c->chan.dev->device, | ||
1313 | "[%s] src (burst x width) != dst (burst x width)\n", | ||
1314 | __func__); | ||
1315 | res = -EINVAL; | ||
1316 | } | ||
1317 | |||
1244 | return res; | 1318 | return res; |
1245 | } | 1319 | } |
1246 | 1320 | ||
@@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1638 | if (d40d == NULL) | 1712 | if (d40d == NULL) |
1639 | goto err; | 1713 | goto err; |
1640 | 1714 | ||
1641 | d40d->lli_len = sgl_len; | 1715 | d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len, |
1716 | d40c->dma_cfg.src_info.data_width, | ||
1717 | d40c->dma_cfg.dst_info.data_width); | ||
1718 | if (d40d->lli_len < 0) { | ||
1719 | dev_err(&d40c->chan.dev->device, | ||
1720 | "[%s] Unaligned size\n", __func__); | ||
1721 | goto err; | ||
1722 | } | ||
1723 | |||
1642 | d40d->lli_current = 0; | 1724 | d40d->lli_current = 0; |
1643 | d40d->txd.flags = dma_flags; | 1725 | d40d->txd.flags = dma_flags; |
1644 | 1726 | ||
1645 | if (d40c->log_num != D40_PHY_CHAN) { | 1727 | if (d40c->log_num != D40_PHY_CHAN) { |
1646 | 1728 | ||
1647 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | 1729 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { |
1648 | dev_err(&d40c->chan.dev->device, | 1730 | dev_err(&d40c->chan.dev->device, |
1649 | "[%s] Out of memory\n", __func__); | 1731 | "[%s] Out of memory\n", __func__); |
1650 | goto err; | 1732 | goto err; |
@@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1654 | sgl_len, | 1736 | sgl_len, |
1655 | d40d->lli_log.src, | 1737 | d40d->lli_log.src, |
1656 | d40c->log_def.lcsp1, | 1738 | d40c->log_def.lcsp1, |
1657 | d40c->dma_cfg.src_info.data_width); | 1739 | d40c->dma_cfg.src_info.data_width, |
1740 | d40c->dma_cfg.dst_info.data_width); | ||
1658 | 1741 | ||
1659 | (void) d40_log_sg_to_lli(sgl_dst, | 1742 | (void) d40_log_sg_to_lli(sgl_dst, |
1660 | sgl_len, | 1743 | sgl_len, |
1661 | d40d->lli_log.dst, | 1744 | d40d->lli_log.dst, |
1662 | d40c->log_def.lcsp3, | 1745 | d40c->log_def.lcsp3, |
1663 | d40c->dma_cfg.dst_info.data_width); | 1746 | d40c->dma_cfg.dst_info.data_width, |
1747 | d40c->dma_cfg.src_info.data_width); | ||
1664 | } else { | 1748 | } else { |
1665 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | 1749 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { |
1666 | dev_err(&d40c->chan.dev->device, | 1750 | dev_err(&d40c->chan.dev->device, |
1667 | "[%s] Out of memory\n", __func__); | 1751 | "[%s] Out of memory\n", __func__); |
1668 | goto err; | 1752 | goto err; |
@@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1675 | virt_to_phys(d40d->lli_phy.src), | 1759 | virt_to_phys(d40d->lli_phy.src), |
1676 | d40c->src_def_cfg, | 1760 | d40c->src_def_cfg, |
1677 | d40c->dma_cfg.src_info.data_width, | 1761 | d40c->dma_cfg.src_info.data_width, |
1762 | d40c->dma_cfg.dst_info.data_width, | ||
1678 | d40c->dma_cfg.src_info.psize); | 1763 | d40c->dma_cfg.src_info.psize); |
1679 | 1764 | ||
1680 | if (res < 0) | 1765 | if (res < 0) |
@@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
1687 | virt_to_phys(d40d->lli_phy.dst), | 1772 | virt_to_phys(d40d->lli_phy.dst), |
1688 | d40c->dst_def_cfg, | 1773 | d40c->dst_def_cfg, |
1689 | d40c->dma_cfg.dst_info.data_width, | 1774 | d40c->dma_cfg.dst_info.data_width, |
1775 | d40c->dma_cfg.src_info.data_width, | ||
1690 | d40c->dma_cfg.dst_info.psize); | 1776 | d40c->dma_cfg.dst_info.psize); |
1691 | 1777 | ||
1692 | if (res < 0) | 1778 | if (res < 0) |
@@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1826 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1912 | struct d40_chan *d40c = container_of(chan, struct d40_chan, |
1827 | chan); | 1913 | chan); |
1828 | unsigned long flags; | 1914 | unsigned long flags; |
1829 | int err = 0; | ||
1830 | 1915 | ||
1831 | if (d40c->phy_chan == NULL) { | 1916 | if (d40c->phy_chan == NULL) { |
1832 | dev_err(&d40c->chan.dev->device, | 1917 | dev_err(&d40c->chan.dev->device, |
@@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1844 | } | 1929 | } |
1845 | 1930 | ||
1846 | d40d->txd.flags = dma_flags; | 1931 | d40d->txd.flags = dma_flags; |
1932 | d40d->lli_len = d40_size_2_dmalen(size, | ||
1933 | d40c->dma_cfg.src_info.data_width, | ||
1934 | d40c->dma_cfg.dst_info.data_width); | ||
1935 | if (d40d->lli_len < 0) { | ||
1936 | dev_err(&d40c->chan.dev->device, | ||
1937 | "[%s] Unaligned size\n", __func__); | ||
1938 | goto err; | ||
1939 | } | ||
1940 | |||
1847 | 1941 | ||
1848 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 1942 | dma_async_tx_descriptor_init(&d40d->txd, chan); |
1849 | 1943 | ||
@@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1851 | 1945 | ||
1852 | if (d40c->log_num != D40_PHY_CHAN) { | 1946 | if (d40c->log_num != D40_PHY_CHAN) { |
1853 | 1947 | ||
1854 | if (d40_pool_lli_alloc(d40d, 1, true) < 0) { | 1948 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { |
1855 | dev_err(&d40c->chan.dev->device, | 1949 | dev_err(&d40c->chan.dev->device, |
1856 | "[%s] Out of memory\n", __func__); | 1950 | "[%s] Out of memory\n", __func__); |
1857 | goto err; | 1951 | goto err; |
1858 | } | 1952 | } |
1859 | d40d->lli_len = 1; | ||
1860 | d40d->lli_current = 0; | 1953 | d40d->lli_current = 0; |
1861 | 1954 | ||
1862 | d40_log_fill_lli(d40d->lli_log.src, | 1955 | if (d40_log_buf_to_lli(d40d->lli_log.src, |
1863 | src, | 1956 | src, |
1864 | size, | 1957 | size, |
1865 | d40c->log_def.lcsp1, | 1958 | d40c->log_def.lcsp1, |
1866 | d40c->dma_cfg.src_info.data_width, | 1959 | d40c->dma_cfg.src_info.data_width, |
1867 | true); | 1960 | d40c->dma_cfg.dst_info.data_width, |
1961 | true) == NULL) | ||
1962 | goto err; | ||
1868 | 1963 | ||
1869 | d40_log_fill_lli(d40d->lli_log.dst, | 1964 | if (d40_log_buf_to_lli(d40d->lli_log.dst, |
1870 | dst, | 1965 | dst, |
1871 | size, | 1966 | size, |
1872 | d40c->log_def.lcsp3, | 1967 | d40c->log_def.lcsp3, |
1873 | d40c->dma_cfg.dst_info.data_width, | 1968 | d40c->dma_cfg.dst_info.data_width, |
1874 | true); | 1969 | d40c->dma_cfg.src_info.data_width, |
1970 | true) == NULL) | ||
1971 | goto err; | ||
1875 | 1972 | ||
1876 | } else { | 1973 | } else { |
1877 | 1974 | ||
1878 | if (d40_pool_lli_alloc(d40d, 1, false) < 0) { | 1975 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { |
1879 | dev_err(&d40c->chan.dev->device, | 1976 | dev_err(&d40c->chan.dev->device, |
1880 | "[%s] Out of memory\n", __func__); | 1977 | "[%s] Out of memory\n", __func__); |
1881 | goto err; | 1978 | goto err; |
1882 | } | 1979 | } |
1883 | 1980 | ||
1884 | err = d40_phy_fill_lli(d40d->lli_phy.src, | 1981 | if (d40_phy_buf_to_lli(d40d->lli_phy.src, |
1885 | src, | 1982 | src, |
1886 | size, | 1983 | size, |
1887 | d40c->dma_cfg.src_info.psize, | 1984 | d40c->dma_cfg.src_info.psize, |
@@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1889 | d40c->src_def_cfg, | 1986 | d40c->src_def_cfg, |
1890 | true, | 1987 | true, |
1891 | d40c->dma_cfg.src_info.data_width, | 1988 | d40c->dma_cfg.src_info.data_width, |
1892 | false); | 1989 | d40c->dma_cfg.dst_info.data_width, |
1893 | if (err) | 1990 | false) == NULL) |
1894 | goto err_fill_lli; | 1991 | goto err; |
1895 | 1992 | ||
1896 | err = d40_phy_fill_lli(d40d->lli_phy.dst, | 1993 | if (d40_phy_buf_to_lli(d40d->lli_phy.dst, |
1897 | dst, | 1994 | dst, |
1898 | size, | 1995 | size, |
1899 | d40c->dma_cfg.dst_info.psize, | 1996 | d40c->dma_cfg.dst_info.psize, |
@@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1901 | d40c->dst_def_cfg, | 1998 | d40c->dst_def_cfg, |
1902 | true, | 1999 | true, |
1903 | d40c->dma_cfg.dst_info.data_width, | 2000 | d40c->dma_cfg.dst_info.data_width, |
1904 | false); | 2001 | d40c->dma_cfg.src_info.data_width, |
1905 | 2002 | false) == NULL) | |
1906 | if (err) | 2003 | goto err; |
1907 | goto err_fill_lli; | ||
1908 | 2004 | ||
1909 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 2005 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, |
1910 | d40d->lli_pool.size, DMA_TO_DEVICE); | 2006 | d40d->lli_pool.size, DMA_TO_DEVICE); |
@@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
1913 | spin_unlock_irqrestore(&d40c->lock, flags); | 2009 | spin_unlock_irqrestore(&d40c->lock, flags); |
1914 | return &d40d->txd; | 2010 | return &d40d->txd; |
1915 | 2011 | ||
1916 | err_fill_lli: | ||
1917 | dev_err(&d40c->chan.dev->device, | ||
1918 | "[%s] Failed filling in PHY LLI\n", __func__); | ||
1919 | err: | 2012 | err: |
1920 | if (d40d) | 2013 | if (d40d) |
1921 | d40_desc_free(d40c, d40d); | 2014 | d40_desc_free(d40c, d40d); |
@@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, | |||
1945 | dma_addr_t dev_addr = 0; | 2038 | dma_addr_t dev_addr = 0; |
1946 | int total_size; | 2039 | int total_size; |
1947 | 2040 | ||
1948 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { | 2041 | d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len, |
2042 | d40c->dma_cfg.src_info.data_width, | ||
2043 | d40c->dma_cfg.dst_info.data_width); | ||
2044 | if (d40d->lli_len < 0) { | ||
2045 | dev_err(&d40c->chan.dev->device, | ||
2046 | "[%s] Unaligned size\n", __func__); | ||
2047 | return -EINVAL; | ||
2048 | } | ||
2049 | |||
2050 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { | ||
1949 | dev_err(&d40c->chan.dev->device, | 2051 | dev_err(&d40c->chan.dev->device, |
1950 | "[%s] Out of memory\n", __func__); | 2052 | "[%s] Out of memory\n", __func__); |
1951 | return -ENOMEM; | 2053 | return -ENOMEM; |
1952 | } | 2054 | } |
1953 | 2055 | ||
1954 | d40d->lli_len = sg_len; | ||
1955 | d40d->lli_current = 0; | 2056 | d40d->lli_current = 0; |
1956 | 2057 | ||
1957 | if (direction == DMA_FROM_DEVICE) | 2058 | if (direction == DMA_FROM_DEVICE) |
@@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |||
1993 | dma_addr_t dst_dev_addr; | 2094 | dma_addr_t dst_dev_addr; |
1994 | int res; | 2095 | int res; |
1995 | 2096 | ||
1996 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | 2097 | d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len, |
2098 | d40c->dma_cfg.src_info.data_width, | ||
2099 | d40c->dma_cfg.dst_info.data_width); | ||
2100 | if (d40d->lli_len < 0) { | ||
2101 | dev_err(&d40c->chan.dev->device, | ||
2102 | "[%s] Unaligned size\n", __func__); | ||
2103 | return -EINVAL; | ||
2104 | } | ||
2105 | |||
2106 | if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { | ||
1997 | dev_err(&d40c->chan.dev->device, | 2107 | dev_err(&d40c->chan.dev->device, |
1998 | "[%s] Out of memory\n", __func__); | 2108 | "[%s] Out of memory\n", __func__); |
1999 | return -ENOMEM; | 2109 | return -ENOMEM; |
2000 | } | 2110 | } |
2001 | 2111 | ||
2002 | d40d->lli_len = sgl_len; | ||
2003 | d40d->lli_current = 0; | 2112 | d40d->lli_current = 0; |
2004 | 2113 | ||
2005 | if (direction == DMA_FROM_DEVICE) { | 2114 | if (direction == DMA_FROM_DEVICE) { |
@@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |||
2024 | virt_to_phys(d40d->lli_phy.src), | 2133 | virt_to_phys(d40d->lli_phy.src), |
2025 | d40c->src_def_cfg, | 2134 | d40c->src_def_cfg, |
2026 | d40c->dma_cfg.src_info.data_width, | 2135 | d40c->dma_cfg.src_info.data_width, |
2136 | d40c->dma_cfg.dst_info.data_width, | ||
2027 | d40c->dma_cfg.src_info.psize); | 2137 | d40c->dma_cfg.src_info.psize); |
2028 | if (res < 0) | 2138 | if (res < 0) |
2029 | return res; | 2139 | return res; |
@@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |||
2035 | virt_to_phys(d40d->lli_phy.dst), | 2145 | virt_to_phys(d40d->lli_phy.dst), |
2036 | d40c->dst_def_cfg, | 2146 | d40c->dst_def_cfg, |
2037 | d40c->dma_cfg.dst_info.data_width, | 2147 | d40c->dma_cfg.dst_info.data_width, |
2148 | d40c->dma_cfg.src_info.data_width, | ||
2038 | d40c->dma_cfg.dst_info.psize); | 2149 | d40c->dma_cfg.dst_info.psize); |
2039 | if (res < 0) | 2150 | if (res < 0) |
2040 | return res; | 2151 | return res; |
@@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
2244 | psize = STEDMA40_PSIZE_PHY_8; | 2355 | psize = STEDMA40_PSIZE_PHY_8; |
2245 | else if (config_maxburst >= 4) | 2356 | else if (config_maxburst >= 4) |
2246 | psize = STEDMA40_PSIZE_PHY_4; | 2357 | psize = STEDMA40_PSIZE_PHY_4; |
2358 | else if (config_maxburst >= 2) | ||
2359 | psize = STEDMA40_PSIZE_PHY_2; | ||
2247 | else | 2360 | else |
2248 | psize = STEDMA40_PSIZE_PHY_1; | 2361 | psize = STEDMA40_PSIZE_PHY_1; |
2249 | } | 2362 | } |
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 8557cb88b255..0b096a38322d 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) ST-Ericsson SA 2007-2010 | 2 | * Copyright (C) ST-Ericsson SA 2007-2010 |
3 | * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson | 3 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson | 4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
6 | */ | 6 | */ |
@@ -122,15 +122,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | |||
122 | *dst_cfg = dst; | 122 | *dst_cfg = dst; |
123 | } | 123 | } |
124 | 124 | ||
125 | int d40_phy_fill_lli(struct d40_phy_lli *lli, | 125 | static int d40_phy_fill_lli(struct d40_phy_lli *lli, |
126 | dma_addr_t data, | 126 | dma_addr_t data, |
127 | u32 data_size, | 127 | u32 data_size, |
128 | int psize, | 128 | int psize, |
129 | dma_addr_t next_lli, | 129 | dma_addr_t next_lli, |
130 | u32 reg_cfg, | 130 | u32 reg_cfg, |
131 | bool term_int, | 131 | bool term_int, |
132 | u32 data_width, | 132 | u32 data_width, |
133 | bool is_device) | 133 | bool is_device) |
134 | { | 134 | { |
135 | int num_elems; | 135 | int num_elems; |
136 | 136 | ||
@@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
139 | else | 139 | else |
140 | num_elems = 2 << psize; | 140 | num_elems = 2 << psize; |
141 | 141 | ||
142 | /* | ||
143 | * Size is 16bit. data_width is 8, 16, 32 or 64 bit | ||
144 | * Block large than 64 KiB must be split. | ||
145 | */ | ||
146 | if (data_size > (0xffff << data_width)) | ||
147 | return -EINVAL; | ||
148 | |||
149 | /* Must be aligned */ | 142 | /* Must be aligned */ |
150 | if (!IS_ALIGNED(data, 0x1 << data_width)) | 143 | if (!IS_ALIGNED(data, 0x1 << data_width)) |
151 | return -EINVAL; | 144 | return -EINVAL; |
@@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli, | |||
187 | return 0; | 180 | return 0; |
188 | } | 181 | } |
189 | 182 | ||
183 | static int d40_seg_size(int size, int data_width1, int data_width2) | ||
184 | { | ||
185 | u32 max_w = max(data_width1, data_width2); | ||
186 | u32 min_w = min(data_width1, data_width2); | ||
187 | u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); | ||
188 | |||
189 | if (seg_max > STEDMA40_MAX_SEG_SIZE) | ||
190 | seg_max -= (1 << max_w); | ||
191 | |||
192 | if (size <= seg_max) | ||
193 | return size; | ||
194 | |||
195 | if (size <= 2 * seg_max) | ||
196 | return ALIGN(size / 2, 1 << max_w); | ||
197 | |||
198 | return seg_max; | ||
199 | } | ||
200 | |||
201 | struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, | ||
202 | dma_addr_t addr, | ||
203 | u32 size, | ||
204 | int psize, | ||
205 | dma_addr_t lli_phys, | ||
206 | u32 reg_cfg, | ||
207 | bool term_int, | ||
208 | u32 data_width1, | ||
209 | u32 data_width2, | ||
210 | bool is_device) | ||
211 | { | ||
212 | int err; | ||
213 | dma_addr_t next = lli_phys; | ||
214 | int size_rest = size; | ||
215 | int size_seg = 0; | ||
216 | |||
217 | do { | ||
218 | size_seg = d40_seg_size(size_rest, data_width1, data_width2); | ||
219 | size_rest -= size_seg; | ||
220 | |||
221 | if (term_int && size_rest == 0) | ||
222 | next = 0; | ||
223 | else | ||
224 | next = ALIGN(next + sizeof(struct d40_phy_lli), | ||
225 | D40_LLI_ALIGN); | ||
226 | |||
227 | err = d40_phy_fill_lli(lli, | ||
228 | addr, | ||
229 | size_seg, | ||
230 | psize, | ||
231 | next, | ||
232 | reg_cfg, | ||
233 | !next, | ||
234 | data_width1, | ||
235 | is_device); | ||
236 | |||
237 | if (err) | ||
238 | goto err; | ||
239 | |||
240 | lli++; | ||
241 | if (!is_device) | ||
242 | addr += size_seg; | ||
243 | } while (size_rest); | ||
244 | |||
245 | return lli; | ||
246 | |||
247 | err: | ||
248 | return NULL; | ||
249 | } | ||
250 | |||
190 | int d40_phy_sg_to_lli(struct scatterlist *sg, | 251 | int d40_phy_sg_to_lli(struct scatterlist *sg, |
191 | int sg_len, | 252 | int sg_len, |
192 | dma_addr_t target, | 253 | dma_addr_t target, |
193 | struct d40_phy_lli *lli, | 254 | struct d40_phy_lli *lli_sg, |
194 | dma_addr_t lli_phys, | 255 | dma_addr_t lli_phys, |
195 | u32 reg_cfg, | 256 | u32 reg_cfg, |
196 | u32 data_width, | 257 | u32 data_width1, |
258 | u32 data_width2, | ||
197 | int psize) | 259 | int psize) |
198 | { | 260 | { |
199 | int total_size = 0; | 261 | int total_size = 0; |
200 | int i; | 262 | int i; |
201 | struct scatterlist *current_sg = sg; | 263 | struct scatterlist *current_sg = sg; |
202 | dma_addr_t next_lli_phys; | ||
203 | dma_addr_t dst; | 264 | dma_addr_t dst; |
204 | int err = 0; | 265 | struct d40_phy_lli *lli = lli_sg; |
266 | dma_addr_t l_phys = lli_phys; | ||
205 | 267 | ||
206 | for_each_sg(sg, current_sg, sg_len, i) { | 268 | for_each_sg(sg, current_sg, sg_len, i) { |
207 | 269 | ||
208 | total_size += sg_dma_len(current_sg); | 270 | total_size += sg_dma_len(current_sg); |
209 | 271 | ||
210 | /* If this scatter list entry is the last one, no next link */ | ||
211 | if (sg_len - 1 == i) | ||
212 | next_lli_phys = 0; | ||
213 | else | ||
214 | next_lli_phys = ALIGN(lli_phys + (i + 1) * | ||
215 | sizeof(struct d40_phy_lli), | ||
216 | D40_LLI_ALIGN); | ||
217 | |||
218 | if (target) | 272 | if (target) |
219 | dst = target; | 273 | dst = target; |
220 | else | 274 | else |
221 | dst = sg_phys(current_sg); | 275 | dst = sg_phys(current_sg); |
222 | 276 | ||
223 | err = d40_phy_fill_lli(&lli[i], | 277 | l_phys = ALIGN(lli_phys + (lli - lli_sg) * |
224 | dst, | 278 | sizeof(struct d40_phy_lli), D40_LLI_ALIGN); |
225 | sg_dma_len(current_sg), | 279 | |
226 | psize, | 280 | lli = d40_phy_buf_to_lli(lli, |
227 | next_lli_phys, | 281 | dst, |
228 | reg_cfg, | 282 | sg_dma_len(current_sg), |
229 | !next_lli_phys, | 283 | psize, |
230 | data_width, | 284 | l_phys, |
231 | target == dst); | 285 | reg_cfg, |
232 | if (err) | 286 | sg_len - 1 == i, |
233 | goto err; | 287 | data_width1, |
288 | data_width2, | ||
289 | target == dst); | ||
290 | if (lli == NULL) | ||
291 | return -EINVAL; | ||
234 | } | 292 | } |
235 | 293 | ||
236 | return total_size; | 294 | return total_size; |
237 | err: | ||
238 | return err; | ||
239 | } | 295 | } |
240 | 296 | ||
241 | 297 | ||
@@ -315,17 +371,20 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | |||
315 | writel(lli_dst->lcsp13, &lcla[1].lcsp13); | 371 | writel(lli_dst->lcsp13, &lcla[1].lcsp13); |
316 | } | 372 | } |
317 | 373 | ||
318 | void d40_log_fill_lli(struct d40_log_lli *lli, | 374 | static void d40_log_fill_lli(struct d40_log_lli *lli, |
319 | dma_addr_t data, u32 data_size, | 375 | dma_addr_t data, u32 data_size, |
320 | u32 reg_cfg, | 376 | u32 reg_cfg, |
321 | u32 data_width, | 377 | u32 data_width, |
322 | bool addr_inc) | 378 | bool addr_inc) |
323 | { | 379 | { |
324 | lli->lcsp13 = reg_cfg; | 380 | lli->lcsp13 = reg_cfg; |
325 | 381 | ||
326 | /* The number of elements to transfer */ | 382 | /* The number of elements to transfer */ |
327 | lli->lcsp02 = ((data_size >> data_width) << | 383 | lli->lcsp02 = ((data_size >> data_width) << |
328 | D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; | 384 | D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; |
385 | |||
386 | BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE); | ||
387 | |||
329 | /* 16 LSBs address of the current element */ | 388 | /* 16 LSBs address of the current element */ |
330 | lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; | 389 | lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; |
331 | /* 16 MSBs address of the current element */ | 390 | /* 16 MSBs address of the current element */ |
@@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg, | |||
348 | int total_size = 0; | 407 | int total_size = 0; |
349 | struct scatterlist *current_sg = sg; | 408 | struct scatterlist *current_sg = sg; |
350 | int i; | 409 | int i; |
410 | struct d40_log_lli *lli_src = lli->src; | ||
411 | struct d40_log_lli *lli_dst = lli->dst; | ||
351 | 412 | ||
352 | for_each_sg(sg, current_sg, sg_len, i) { | 413 | for_each_sg(sg, current_sg, sg_len, i) { |
353 | total_size += sg_dma_len(current_sg); | 414 | total_size += sg_dma_len(current_sg); |
354 | 415 | ||
355 | if (direction == DMA_TO_DEVICE) { | 416 | if (direction == DMA_TO_DEVICE) { |
356 | d40_log_fill_lli(&lli->src[i], | 417 | lli_src = |
357 | sg_phys(current_sg), | 418 | d40_log_buf_to_lli(lli_src, |
358 | sg_dma_len(current_sg), | 419 | sg_phys(current_sg), |
359 | lcsp->lcsp1, src_data_width, | 420 | sg_dma_len(current_sg), |
360 | true); | 421 | lcsp->lcsp1, src_data_width, |
361 | d40_log_fill_lli(&lli->dst[i], | 422 | dst_data_width, |
362 | dev_addr, | 423 | true); |
363 | sg_dma_len(current_sg), | 424 | lli_dst = |
364 | lcsp->lcsp3, dst_data_width, | 425 | d40_log_buf_to_lli(lli_dst, |
365 | false); | 426 | dev_addr, |
427 | sg_dma_len(current_sg), | ||
428 | lcsp->lcsp3, dst_data_width, | ||
429 | src_data_width, | ||
430 | false); | ||
366 | } else { | 431 | } else { |
367 | d40_log_fill_lli(&lli->dst[i], | 432 | lli_dst = |
368 | sg_phys(current_sg), | 433 | d40_log_buf_to_lli(lli_dst, |
369 | sg_dma_len(current_sg), | 434 | sg_phys(current_sg), |
370 | lcsp->lcsp3, dst_data_width, | 435 | sg_dma_len(current_sg), |
371 | true); | 436 | lcsp->lcsp3, dst_data_width, |
372 | d40_log_fill_lli(&lli->src[i], | 437 | src_data_width, |
373 | dev_addr, | 438 | true); |
374 | sg_dma_len(current_sg), | 439 | lli_src = |
375 | lcsp->lcsp1, src_data_width, | 440 | d40_log_buf_to_lli(lli_src, |
376 | false); | 441 | dev_addr, |
442 | sg_dma_len(current_sg), | ||
443 | lcsp->lcsp1, src_data_width, | ||
444 | dst_data_width, | ||
445 | false); | ||
377 | } | 446 | } |
378 | } | 447 | } |
379 | return total_size; | 448 | return total_size; |
380 | } | 449 | } |
381 | 450 | ||
451 | struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, | ||
452 | dma_addr_t addr, | ||
453 | int size, | ||
454 | u32 lcsp13, /* src or dst*/ | ||
455 | u32 data_width1, | ||
456 | u32 data_width2, | ||
457 | bool addr_inc) | ||
458 | { | ||
459 | struct d40_log_lli *lli = lli_sg; | ||
460 | int size_rest = size; | ||
461 | int size_seg = 0; | ||
462 | |||
463 | do { | ||
464 | size_seg = d40_seg_size(size_rest, data_width1, data_width2); | ||
465 | size_rest -= size_seg; | ||
466 | |||
467 | d40_log_fill_lli(lli, | ||
468 | addr, | ||
469 | size_seg, | ||
470 | lcsp13, data_width1, | ||
471 | addr_inc); | ||
472 | if (addr_inc) | ||
473 | addr += size_seg; | ||
474 | lli++; | ||
475 | } while (size_rest); | ||
476 | |||
477 | return lli; | ||
478 | } | ||
479 | |||
382 | int d40_log_sg_to_lli(struct scatterlist *sg, | 480 | int d40_log_sg_to_lli(struct scatterlist *sg, |
383 | int sg_len, | 481 | int sg_len, |
384 | struct d40_log_lli *lli_sg, | 482 | struct d40_log_lli *lli_sg, |
385 | u32 lcsp13, /* src or dst*/ | 483 | u32 lcsp13, /* src or dst*/ |
386 | u32 data_width) | 484 | u32 data_width1, u32 data_width2) |
387 | { | 485 | { |
388 | int total_size = 0; | 486 | int total_size = 0; |
389 | struct scatterlist *current_sg = sg; | 487 | struct scatterlist *current_sg = sg; |
390 | int i; | 488 | int i; |
489 | struct d40_log_lli *lli = lli_sg; | ||
391 | 490 | ||
392 | for_each_sg(sg, current_sg, sg_len, i) { | 491 | for_each_sg(sg, current_sg, sg_len, i) { |
393 | total_size += sg_dma_len(current_sg); | 492 | total_size += sg_dma_len(current_sg); |
394 | 493 | lli = d40_log_buf_to_lli(lli, | |
395 | d40_log_fill_lli(&lli_sg[i], | 494 | sg_phys(current_sg), |
396 | sg_phys(current_sg), | 495 | sg_dma_len(current_sg), |
397 | sg_dma_len(current_sg), | 496 | lcsp13, |
398 | lcsp13, data_width, | 497 | data_width1, data_width2, true); |
399 | true); | ||
400 | } | 498 | } |
401 | return total_size; | 499 | return total_size; |
402 | } | 500 | } |
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 9e419b907544..9cc43495bea2 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -292,18 +292,20 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
292 | struct d40_phy_lli *lli, | 292 | struct d40_phy_lli *lli, |
293 | dma_addr_t lli_phys, | 293 | dma_addr_t lli_phys, |
294 | u32 reg_cfg, | 294 | u32 reg_cfg, |
295 | u32 data_width, | 295 | u32 data_width1, |
296 | u32 data_width2, | ||
296 | int psize); | 297 | int psize); |
297 | 298 | ||
298 | int d40_phy_fill_lli(struct d40_phy_lli *lli, | 299 | struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, |
299 | dma_addr_t data, | 300 | dma_addr_t data, |
300 | u32 data_size, | 301 | u32 data_size, |
301 | int psize, | 302 | int psize, |
302 | dma_addr_t next_lli, | 303 | dma_addr_t next_lli, |
303 | u32 reg_cfg, | 304 | u32 reg_cfg, |
304 | bool term_int, | 305 | bool term_int, |
305 | u32 data_width, | 306 | u32 data_width1, |
306 | bool is_device); | 307 | u32 data_width2, |
308 | bool is_device); | ||
307 | 309 | ||
308 | void d40_phy_lli_write(void __iomem *virtbase, | 310 | void d40_phy_lli_write(void __iomem *virtbase, |
309 | u32 phy_chan_num, | 311 | u32 phy_chan_num, |
@@ -312,12 +314,12 @@ void d40_phy_lli_write(void __iomem *virtbase, | |||
312 | 314 | ||
313 | /* Logical channels */ | 315 | /* Logical channels */ |
314 | 316 | ||
315 | void d40_log_fill_lli(struct d40_log_lli *lli, | 317 | struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, |
316 | dma_addr_t data, | 318 | dma_addr_t addr, |
317 | u32 data_size, | 319 | int size, |
318 | u32 reg_cfg, | 320 | u32 lcsp13, /* src or dst*/ |
319 | u32 data_width, | 321 | u32 data_width1, u32 data_width2, |
320 | bool addr_inc); | 322 | bool addr_inc); |
321 | 323 | ||
322 | int d40_log_sg_to_dev(struct scatterlist *sg, | 324 | int d40_log_sg_to_dev(struct scatterlist *sg, |
323 | int sg_len, | 325 | int sg_len, |
@@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg, | |||
332 | int sg_len, | 334 | int sg_len, |
333 | struct d40_log_lli *lli_sg, | 335 | struct d40_log_lli *lli_sg, |
334 | u32 lcsp13, /* src or dst*/ | 336 | u32 lcsp13, /* src or dst*/ |
335 | u32 data_width); | 337 | u32 data_width1, u32 data_width2); |
336 | 338 | ||
337 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | 339 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, |
338 | struct d40_log_lli *lli_dst, | 340 | struct d40_log_lli *lli_dst, |