aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Frysinger <vapier.adi@gmail.com>2009-01-07 10:14:39 -0500
committerBryan Wu <cooloney@kernel.org>2009-01-07 10:14:39 -0500
commitdd3dd384df7f9f77fba6875a606e5a663510cd1d (patch)
tree4838d9fd78192fedc2ba8c6d061b8c5bf3b481d9
parent49946e7329fa38d79aed1a9ef4a64c320ada305d (diff)
Blackfin arch: rewrite dma_memcpy() and dma in/out functions
- unify all dma in/out functions (takes ~35 lines of code now) - unify dma_memcpy with dma in/out functions (1 place that touches MDMA0 registers) - add support for 32bit transfers - cleanup dma_memcpy code to be much more readable - irqs are disabled only while programming MDMA registers rather than the entire transaction Signed-off-by: Mike Frysinger <vapier.adi@gmail.com> Signed-off-by: Bryan Wu <cooloney@kernel.org>
-rw-r--r--arch/blackfin/include/asm/dma.h1
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c519
-rw-r--r--arch/blackfin/kernel/setup.c2
3 files changed, 156 insertions, 366 deletions
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index d059b2de162..ca8252ab7a1 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -178,6 +178,7 @@ void dma_enable_irq(unsigned int channel);
178void clear_dma_irqstat(unsigned int channel); 178void clear_dma_irqstat(unsigned int channel);
179void *dma_memcpy(void *dest, const void *src, size_t count); 179void *dma_memcpy(void *dest, const void *src, size_t count);
180void *safe_dma_memcpy(void *dest, const void *src, size_t count); 180void *safe_dma_memcpy(void *dest, const void *src, size_t count);
181void blackfin_dma_early_init(void);
181 182
182extern int channel2irq(unsigned int channel); 183extern int channel2irq(unsigned int channel);
183extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS]; 184extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS];
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index bafb6aea0bc..dff979bf854 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -1,44 +1,24 @@
1/* 1/*
2 * File: arch/blackfin/kernel/bfin_dma_5xx.c 2 * bfin_dma_5xx.c - Blackfin DMA implementation
3 * Based on:
4 * Author:
5 * 3 *
6 * Created: 4 * Copyright 2004-2006 Analog Devices Inc.
7 * Description: This file contains the simple DMA Implementation for Blackfin 5 * Licensed under the GPL-2 or later.
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */ 6 */
29 7
30#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/interrupt.h>
10#include <linux/kernel.h>
31#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/param.h>
32#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
33#include <linux/sched.h> 14#include <linux/sched.h>
34#include <linux/seq_file.h> 15#include <linux/seq_file.h>
35#include <linux/interrupt.h> 16#include <linux/spinlock.h>
36#include <linux/kernel.h>
37#include <linux/param.h>
38 17
39#include <asm/blackfin.h> 18#include <asm/blackfin.h>
40#include <asm/dma.h>
41#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
20#include <asm/dma.h>
21#include <asm/uaccess.h>
42 22
43/************************************************************************** 23/**************************************************************************
44 * Global Variables 24 * Global Variables
@@ -82,12 +62,11 @@ static int __init blackfin_dma_init(void)
82arch_initcall(blackfin_dma_init); 62arch_initcall(blackfin_dma_init);
83 63
84#ifdef CONFIG_PROC_FS 64#ifdef CONFIG_PROC_FS
85
86static int proc_dma_show(struct seq_file *m, void *v) 65static int proc_dma_show(struct seq_file *m, void *v)
87{ 66{
88 int i; 67 int i;
89 68
90 for (i = 0 ; i < MAX_DMA_CHANNELS; ++i) 69 for (i = 0; i < MAX_DMA_CHANNELS; ++i)
91 if (dma_ch[i].chan_status != DMA_CHANNEL_FREE) 70 if (dma_ch[i].chan_status != DMA_CHANNEL_FREE)
92 seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id); 71 seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
93 72
@@ -438,385 +417,193 @@ void blackfin_dma_resume(void)
438} 417}
439#endif 418#endif
440 419
441static void *__dma_memcpy(void *dest, const void *src, size_t size) 420/**
421 * blackfin_dma_early_init - minimal DMA init
422 *
423 * Setup a few DMA registers so we can safely do DMA transfers early on in
424 * the kernel booting process. Really this just means using dma_memcpy().
425 */
426void __init blackfin_dma_early_init(void)
442{ 427{
443 int direction; /* 1 - address decrease, 0 - address increase */
444 int flag_align; /* 1 - address aligned, 0 - address unaligned */
445 int flag_2D; /* 1 - 2D DMA needed, 0 - 1D DMA needed */
446 unsigned long flags;
447
448 if (size <= 0)
449 return NULL;
450
451 local_irq_save(flags);
452
453 if ((unsigned long)src < memory_end)
454 blackfin_dcache_flush_range((unsigned int)src,
455 (unsigned int)(src + size));
456
457 if ((unsigned long)dest < memory_end)
458 blackfin_dcache_invalidate_range((unsigned int)dest,
459 (unsigned int)(dest + size));
460
461 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
462
463 if ((unsigned long)src < (unsigned long)dest)
464 direction = 1;
465 else
466 direction = 0;
467
468 if ((((unsigned long)dest % 2) == 0) && (((unsigned long)src % 2) == 0)
469 && ((size % 2) == 0))
470 flag_align = 1;
471 else
472 flag_align = 0;
473
474 if (size > 0x10000) /* size > 64K */
475 flag_2D = 1;
476 else
477 flag_2D = 0;
478
479 /* Setup destination and source start address */
480 if (direction) {
481 if (flag_align) {
482 bfin_write_MDMA_D0_START_ADDR(dest + size - 2);
483 bfin_write_MDMA_S0_START_ADDR(src + size - 2);
484 } else {
485 bfin_write_MDMA_D0_START_ADDR(dest + size - 1);
486 bfin_write_MDMA_S0_START_ADDR(src + size - 1);
487 }
488 } else {
489 bfin_write_MDMA_D0_START_ADDR(dest);
490 bfin_write_MDMA_S0_START_ADDR(src);
491 }
492
493 /* Setup destination and source xcount */
494 if (flag_2D) {
495 if (flag_align) {
496 bfin_write_MDMA_D0_X_COUNT(1024 / 2);
497 bfin_write_MDMA_S0_X_COUNT(1024 / 2);
498 } else {
499 bfin_write_MDMA_D0_X_COUNT(1024);
500 bfin_write_MDMA_S0_X_COUNT(1024);
501 }
502 bfin_write_MDMA_D0_Y_COUNT(size >> 10);
503 bfin_write_MDMA_S0_Y_COUNT(size >> 10);
504 } else {
505 if (flag_align) {
506 bfin_write_MDMA_D0_X_COUNT(size / 2);
507 bfin_write_MDMA_S0_X_COUNT(size / 2);
508 } else {
509 bfin_write_MDMA_D0_X_COUNT(size);
510 bfin_write_MDMA_S0_X_COUNT(size);
511 }
512 }
513
514 /* Setup destination and source xmodify and ymodify */
515 if (direction) {
516 if (flag_align) {
517 bfin_write_MDMA_D0_X_MODIFY(-2);
518 bfin_write_MDMA_S0_X_MODIFY(-2);
519 if (flag_2D) {
520 bfin_write_MDMA_D0_Y_MODIFY(-2);
521 bfin_write_MDMA_S0_Y_MODIFY(-2);
522 }
523 } else {
524 bfin_write_MDMA_D0_X_MODIFY(-1);
525 bfin_write_MDMA_S0_X_MODIFY(-1);
526 if (flag_2D) {
527 bfin_write_MDMA_D0_Y_MODIFY(-1);
528 bfin_write_MDMA_S0_Y_MODIFY(-1);
529 }
530 }
531 } else {
532 if (flag_align) {
533 bfin_write_MDMA_D0_X_MODIFY(2);
534 bfin_write_MDMA_S0_X_MODIFY(2);
535 if (flag_2D) {
536 bfin_write_MDMA_D0_Y_MODIFY(2);
537 bfin_write_MDMA_S0_Y_MODIFY(2);
538 }
539 } else {
540 bfin_write_MDMA_D0_X_MODIFY(1);
541 bfin_write_MDMA_S0_X_MODIFY(1);
542 if (flag_2D) {
543 bfin_write_MDMA_D0_Y_MODIFY(1);
544 bfin_write_MDMA_S0_Y_MODIFY(1);
545 }
546 }
547 }
548
549 /* Enable source DMA */
550 if (flag_2D) {
551 if (flag_align) {
552 bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D | WDSIZE_16);
553 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D | WDSIZE_16);
554 } else {
555 bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D);
556 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D);
557 }
558 } else {
559 if (flag_align) {
560 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
561 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
562 } else {
563 bfin_write_MDMA_S0_CONFIG(DMAEN);
564 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN);
565 }
566 }
567
568 SSYNC();
569
570 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
571 ;
572
573 bfin_write_MDMA_D0_IRQ_STATUS(bfin_read_MDMA_D0_IRQ_STATUS() |
574 (DMA_DONE | DMA_ERR));
575
576 bfin_write_MDMA_S0_CONFIG(0); 428 bfin_write_MDMA_S0_CONFIG(0);
577 bfin_write_MDMA_D0_CONFIG(0);
578
579 local_irq_restore(flags);
580
581 return dest;
582} 429}
583 430
584void *dma_memcpy(void *dest, const void *src, size_t size)
585{
586 size_t bulk;
587 size_t rest;
588 void * addr;
589
590 bulk = (size >> 16) << 16;
591 rest = size - bulk;
592 if (bulk)
593 __dma_memcpy(dest, src, bulk);
594 __dma_memcpy(dest+bulk, src+bulk, rest);
595 return dest;
596}
597EXPORT_SYMBOL(dma_memcpy);
598
599/** 431/**
600 * safe_dma_memcpy - DMA memcpy w/argument checking 432 * __dma_memcpy - program the MDMA registers
601 * 433 *
602 * Verify arguments are safe before heading to dma_memcpy(). 434 * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs
435 * while programming registers so that everything is fully configured. Wait
436 * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE
437 * check will make sure we don't clobber any existing transfer.
603 */ 438 */
604void *safe_dma_memcpy(void *dest, const void *src, size_t size) 439static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
605{
606 if (!access_ok(VERIFY_WRITE, dst, size))
607 return NULL;
608 if (!access_ok(VERIFY_READ, src, size))
609 return NULL;
610 return dma_memcpy(dst, src, size);
611}
612EXPORT_SYMBOL(safe_dma_memcpy);
613
614void dma_outsb(unsigned long addr, const void *buf, unsigned short len)
615{ 440{
441 static DEFINE_SPINLOCK(mdma_lock);
616 unsigned long flags; 442 unsigned long flags;
617 443
618 local_irq_save(flags); 444 spin_lock_irqsave(&mdma_lock, flags);
619 445
620 blackfin_dcache_flush_range((unsigned int)buf, 446 if (bfin_read_MDMA_S0_CONFIG())
621 (unsigned int)(buf) + len); 447 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
448 continue;
449
450 if (conf & DMA2D) {
451 /* For larger bit sizes, we've already divided down cnt so it
452 * is no longer a multiple of 64k. So we have to break down
453 * the limit here so it is a multiple of the incoming size.
454 * There is no limitation here in terms of total size other
455 * than the hardware though as the bits lost in the shift are
456 * made up by MODIFY (== we can hit the whole address space).
457 * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4
458 */
459 u32 shift = abs(dmod) >> 1;
460 size_t ycnt = cnt >> (16 - shift);
461 cnt = 1 << (16 - shift);
462 bfin_write_MDMA_D0_Y_COUNT(ycnt);
463 bfin_write_MDMA_S0_Y_COUNT(ycnt);
464 bfin_write_MDMA_D0_Y_MODIFY(dmod);
465 bfin_write_MDMA_S0_Y_MODIFY(smod);
466 }
622 467
623 bfin_write_MDMA_D0_START_ADDR(addr); 468 bfin_write_MDMA_D0_START_ADDR(daddr);
624 bfin_write_MDMA_D0_X_COUNT(len); 469 bfin_write_MDMA_D0_X_COUNT(cnt);
625 bfin_write_MDMA_D0_X_MODIFY(0); 470 bfin_write_MDMA_D0_X_MODIFY(dmod);
626 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 471 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
627 472
628 bfin_write_MDMA_S0_START_ADDR(buf); 473 bfin_write_MDMA_S0_START_ADDR(saddr);
629 bfin_write_MDMA_S0_X_COUNT(len); 474 bfin_write_MDMA_S0_X_COUNT(cnt);
630 bfin_write_MDMA_S0_X_MODIFY(1); 475 bfin_write_MDMA_S0_X_MODIFY(smod);
631 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); 476 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
632 477
633 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_8); 478 bfin_write_MDMA_S0_CONFIG(DMAEN | conf);
634 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_8); 479 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf);
480
481 spin_unlock_irqrestore(&mdma_lock, flags);
635 482
636 SSYNC(); 483 SSYNC();
637 484
638 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); 485 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
486 if (bfin_read_MDMA_S0_CONFIG())
487 continue;
488 else
489 return;
639 490
640 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 491 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
641 492
642 bfin_write_MDMA_S0_CONFIG(0); 493 bfin_write_MDMA_S0_CONFIG(0);
643 bfin_write_MDMA_D0_CONFIG(0); 494 bfin_write_MDMA_D0_CONFIG(0);
644 local_irq_restore(flags);
645
646} 495}
647EXPORT_SYMBOL(dma_outsb);
648 496
649 497/**
650void dma_insb(unsigned long addr, void *buf, unsigned short len) 498 * _dma_memcpy - translate C memcpy settings into MDMA settings
499 *
500 * Handle all the high level steps before we touch the MDMA registers. So
501 * handle caching, tweaking of sizes, and formatting of addresses.
502 */
503static void *_dma_memcpy(void *pdst, const void *psrc, size_t size)
651{ 504{
652 unsigned long flags; 505 u32 conf, shift;
506 s16 mod;
507 unsigned long dst = (unsigned long)pdst;
508 unsigned long src = (unsigned long)psrc;
653 509
654 blackfin_dcache_invalidate_range((unsigned int)buf, 510 if (size == 0)
655 (unsigned int)(buf) + len); 511 return NULL;
656 512
657 local_irq_save(flags); 513 if (bfin_addr_dcachable(src))
658 bfin_write_MDMA_D0_START_ADDR(buf); 514 blackfin_dcache_flush_range(src, src + size);
659 bfin_write_MDMA_D0_X_COUNT(len);
660 bfin_write_MDMA_D0_X_MODIFY(1);
661 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
662 515
663 bfin_write_MDMA_S0_START_ADDR(addr); 516 if (bfin_addr_dcachable(dst))
664 bfin_write_MDMA_S0_X_COUNT(len); 517 blackfin_dcache_invalidate_range(dst, dst + size);
665 bfin_write_MDMA_S0_X_MODIFY(0);
666 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
667 518
668 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_8); 519 if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) {
669 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_8); 520 conf = WDSIZE_32;
670 521 shift = 2;
671 SSYNC(); 522 } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) {
523 conf = WDSIZE_16;
524 shift = 1;
525 } else {
526 conf = WDSIZE_8;
527 shift = 0;
528 }
672 529
673 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); 530 /* If the two memory regions have a chance of overlapping, make
531 * sure the memcpy still works as expected. Do this by having the
532 * copy run backwards instead.
533 */
534 mod = 1 << shift;
535 if (src < dst) {
536 mod *= -1;
537 dst += size + mod;
538 src += size + mod;
539 }
540 size >>= shift;
674 541
675 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 542 if (size > 0x10000)
543 conf |= DMA2D;
676 544
677 bfin_write_MDMA_S0_CONFIG(0); 545 __dma_memcpy(dst, mod, src, mod, size, conf);
678 bfin_write_MDMA_D0_CONFIG(0);
679 local_irq_restore(flags);
680 546
547 return pdst;
681} 548}
682EXPORT_SYMBOL(dma_insb);
683 549
684void dma_outsw(unsigned long addr, const void *buf, unsigned short len) 550/**
551 * dma_memcpy - DMA memcpy under mutex lock
552 *
553 * Do not check arguments before starting the DMA memcpy. Break the transfer
554 * up into two pieces. The first transfer is in multiples of 64k and the
555 * second transfer is the piece smaller than 64k.
556 */
557void *dma_memcpy(void *dst, const void *src, size_t size)
685{ 558{
686 unsigned long flags; 559 size_t bulk, rest;
687 560 bulk = size & ~0xffff;
688 local_irq_save(flags); 561 rest = size - bulk;
689 562 if (bulk)
690 blackfin_dcache_flush_range((unsigned int)buf, 563 _dma_memcpy(dst, src, bulk);
691 (unsigned int)(buf) + len * sizeof(short)); 564 _dma_memcpy(dst + bulk, src + bulk, rest);
692 565 return dst;
693 bfin_write_MDMA_D0_START_ADDR(addr);
694 bfin_write_MDMA_D0_X_COUNT(len);
695 bfin_write_MDMA_D0_X_MODIFY(0);
696 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
697
698 bfin_write_MDMA_S0_START_ADDR(buf);
699 bfin_write_MDMA_S0_X_COUNT(len);
700 bfin_write_MDMA_S0_X_MODIFY(2);
701 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
702
703 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
704 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
705
706 SSYNC();
707
708 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
709
710 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
711
712 bfin_write_MDMA_S0_CONFIG(0);
713 bfin_write_MDMA_D0_CONFIG(0);
714 local_irq_restore(flags);
715
716} 566}
717EXPORT_SYMBOL(dma_outsw); 567EXPORT_SYMBOL(dma_memcpy);
718 568
719void dma_insw(unsigned long addr, void *buf, unsigned short len) 569/**
570 * safe_dma_memcpy - DMA memcpy w/argument checking
571 *
572 * Verify arguments are safe before heading to dma_memcpy().
573 */
574void *safe_dma_memcpy(void *dst, const void *src, size_t size)
720{ 575{
721 unsigned long flags; 576 if (!access_ok(VERIFY_WRITE, dst, size))
722 577 return NULL;
723 blackfin_dcache_invalidate_range((unsigned int)buf, 578 if (!access_ok(VERIFY_READ, src, size))
724 (unsigned int)(buf) + len * sizeof(short)); 579 return NULL;
725 580 return dma_memcpy(dst, src, size);
726 local_irq_save(flags);
727
728 bfin_write_MDMA_D0_START_ADDR(buf);
729 bfin_write_MDMA_D0_X_COUNT(len);
730 bfin_write_MDMA_D0_X_MODIFY(2);
731 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
732
733 bfin_write_MDMA_S0_START_ADDR(addr);
734 bfin_write_MDMA_S0_X_COUNT(len);
735 bfin_write_MDMA_S0_X_MODIFY(0);
736 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
737
738 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
739 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
740
741 SSYNC();
742
743 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
744
745 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
746
747 bfin_write_MDMA_S0_CONFIG(0);
748 bfin_write_MDMA_D0_CONFIG(0);
749 local_irq_restore(flags);
750
751} 581}
752EXPORT_SYMBOL(dma_insw); 582EXPORT_SYMBOL(safe_dma_memcpy);
753 583
754void dma_outsl(unsigned long addr, const void *buf, unsigned short len) 584static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len,
585 u16 size, u16 dma_size)
755{ 586{
756 unsigned long flags; 587 blackfin_dcache_flush_range(buf, buf + len * size);
757 588 __dma_memcpy(addr, 0, buf, size, len, dma_size);
758 local_irq_save(flags);
759
760 blackfin_dcache_flush_range((unsigned int)buf,
761 (unsigned int)(buf) + len * sizeof(long));
762
763 bfin_write_MDMA_D0_START_ADDR(addr);
764 bfin_write_MDMA_D0_X_COUNT(len);
765 bfin_write_MDMA_D0_X_MODIFY(0);
766 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
767
768 bfin_write_MDMA_S0_START_ADDR(buf);
769 bfin_write_MDMA_S0_X_COUNT(len);
770 bfin_write_MDMA_S0_X_MODIFY(4);
771 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
772
773 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_32);
774 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_32);
775
776 SSYNC();
777
778 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
779
780 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
781
782 bfin_write_MDMA_S0_CONFIG(0);
783 bfin_write_MDMA_D0_CONFIG(0);
784 local_irq_restore(flags);
785
786} 589}
787EXPORT_SYMBOL(dma_outsl);
788 590
789void dma_insl(unsigned long addr, void *buf, unsigned short len) 591static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len,
592 u16 size, u16 dma_size)
790{ 593{
791 unsigned long flags; 594 blackfin_dcache_invalidate_range(buf, buf + len * size);
792 595 __dma_memcpy(buf, size, addr, 0, len, dma_size);
793 blackfin_dcache_invalidate_range((unsigned int)buf,
794 (unsigned int)(buf) + len * sizeof(long));
795
796 local_irq_save(flags);
797
798 bfin_write_MDMA_D0_START_ADDR(buf);
799 bfin_write_MDMA_D0_X_COUNT(len);
800 bfin_write_MDMA_D0_X_MODIFY(4);
801 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
802
803 bfin_write_MDMA_S0_START_ADDR(addr);
804 bfin_write_MDMA_S0_X_COUNT(len);
805 bfin_write_MDMA_S0_X_MODIFY(0);
806 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
807
808 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_32);
809 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_32);
810
811 SSYNC();
812
813 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
814
815 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
816
817 bfin_write_MDMA_S0_CONFIG(0);
818 bfin_write_MDMA_D0_CONFIG(0);
819 local_irq_restore(flags);
820
821} 596}
822EXPORT_SYMBOL(dma_insl); 597
598#define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
599void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \
600{ \
601 _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
602} \
603EXPORT_SYMBOL(dma_##io##s##bwl)
604MAKE_DMA_IO(out, b, 1, 8, const);
605MAKE_DMA_IO(in, b, 1, 8, );
606MAKE_DMA_IO(out, w, 2, 16, const);
607MAKE_DMA_IO(in, w, 2, 16, );
608MAKE_DMA_IO(out, l, 4, 32, const);
609MAKE_DMA_IO(in, l, 4, 32, );
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index b147ed90cad..56b8b4cff99 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -154,6 +154,8 @@ void __init bfin_relocate_l1_mem(void)
154 unsigned long l1_data_b_length; 154 unsigned long l1_data_b_length;
155 unsigned long l2_length; 155 unsigned long l2_length;
156 156
157 blackfin_dma_early_init();
158
157 l1_code_length = _etext_l1 - _stext_l1; 159 l1_code_length = _etext_l1 - _stext_l1;
158 if (l1_code_length > L1_CODE_LENGTH) 160 if (l1_code_length > L1_CODE_LENGTH)
159 panic("L1 Instruction SRAM Overflow\n"); 161 panic("L1 Instruction SRAM Overflow\n");