aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/bfin_dma_5xx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel/bfin_dma_5xx.c')
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c81
1 files changed, 81 insertions, 0 deletions
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 8531693fb48d..704419e4da20 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -232,6 +232,87 @@ void blackfin_dma_resume(void)
232void __init blackfin_dma_early_init(void) 232void __init blackfin_dma_early_init(void)
233{ 233{
234 bfin_write_MDMA_S0_CONFIG(0); 234 bfin_write_MDMA_S0_CONFIG(0);
235 bfin_write_MDMA_S1_CONFIG(0);
236}
237
238void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
239{
240 unsigned long dst = (unsigned long)pdst;
241 unsigned long src = (unsigned long)psrc;
242 struct dma_register *dst_ch, *src_ch;
243
244 /* We assume that everything is 4 byte aligned, so include
245 * a basic sanity check
246 */
247 BUG_ON(dst % 4);
248 BUG_ON(src % 4);
249 BUG_ON(size % 4);
250
251 /* Force a sync in case a previous config reset on this channel
252 * occurred. This is needed so subsequent writes to DMA registers
253 * are not spuriously lost/corrupted.
254 */
255 __builtin_bfin_ssync();
256
257 src_ch = 0;
258 /* Find an avalible memDMA channel */
259 while (1) {
260 if (!src_ch || src_ch == (struct dma_register *)MDMA_S1_NEXT_DESC_PTR) {
261 dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
262 src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
263 } else {
264 dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
265 src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
266 }
267
268 if (!bfin_read16(&src_ch->cfg)) {
269 break;
270 } else {
271 if (bfin_read16(&src_ch->irq_status) & DMA_DONE)
272 bfin_write16(&src_ch->cfg, 0);
273 }
274
275 }
276
277 /* Destination */
278 bfin_write32(&dst_ch->start_addr, dst);
279 bfin_write16(&dst_ch->x_count, size >> 2);
280 bfin_write16(&dst_ch->x_modify, 1 << 2);
281 bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
282
283 /* Source */
284 bfin_write32(&src_ch->start_addr, src);
285 bfin_write16(&src_ch->x_count, size >> 2);
286 bfin_write16(&src_ch->x_modify, 1 << 2);
287 bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);
288
289 /* Enable */
290 bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
291 bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);
292
293 /* Since we are atomic now, don't use the workaround ssync */
294 __builtin_bfin_ssync();
295}
296
297void __init early_dma_memcpy_done(void)
298{
299 while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
300 (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
301 continue;
302
303 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
304 bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
305 /*
306 * Now that DMA is done, we would normally flush cache, but
307 * i/d cache isn't running this early, so we don't bother,
308 * and just clear out the DMA channel for next time
309 */
310 bfin_write_MDMA_S0_CONFIG(0);
311 bfin_write_MDMA_S1_CONFIG(0);
312 bfin_write_MDMA_D0_CONFIG(0);
313 bfin_write_MDMA_D1_CONFIG(0);
314
315 __builtin_bfin_ssync();
235} 316}
236 317
237/** 318/**