diff options
Diffstat (limited to 'arch/mips/alchemy/common/dbdma.c')
-rw-r--r-- | arch/mips/alchemy/common/dbdma.c | 187 |
1 files changed, 99 insertions, 88 deletions
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c index f9201ca2295b..99ae84ce5af3 100644 --- a/arch/mips/alchemy/common/dbdma.c +++ b/arch/mips/alchemy/common/dbdma.c | |||
@@ -30,6 +30,7 @@ | |||
30 | * | 30 | * |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/init.h> | ||
33 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
34 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
35 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
@@ -58,7 +59,6 @@ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock); | |||
58 | 59 | ||
59 | static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; | 60 | static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; |
60 | static int dbdma_initialized; | 61 | static int dbdma_initialized; |
61 | static void au1xxx_dbdma_init(void); | ||
62 | 62 | ||
63 | static dbdev_tab_t dbdev_tab[] = { | 63 | static dbdev_tab_t dbdev_tab[] = { |
64 | #ifdef CONFIG_SOC_AU1550 | 64 | #ifdef CONFIG_SOC_AU1550 |
@@ -237,7 +237,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
237 | void (*callback)(int, void *), void *callparam) | 237 | void (*callback)(int, void *), void *callparam) |
238 | { | 238 | { |
239 | unsigned long flags; | 239 | unsigned long flags; |
240 | u32 used, chan, rv; | 240 | u32 used, chan; |
241 | u32 dcp; | 241 | u32 dcp; |
242 | int i; | 242 | int i; |
243 | dbdev_tab_t *stp, *dtp; | 243 | dbdev_tab_t *stp, *dtp; |
@@ -250,8 +250,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
250 | * which can't be done successfully during board set up. | 250 | * which can't be done successfully during board set up. |
251 | */ | 251 | */ |
252 | if (!dbdma_initialized) | 252 | if (!dbdma_initialized) |
253 | au1xxx_dbdma_init(); | 253 | return 0; |
254 | dbdma_initialized = 1; | ||
255 | 254 | ||
256 | stp = find_dbdev_id(srcid); | 255 | stp = find_dbdev_id(srcid); |
257 | if (stp == NULL) | 256 | if (stp == NULL) |
@@ -261,7 +260,6 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
261 | return 0; | 260 | return 0; |
262 | 261 | ||
263 | used = 0; | 262 | used = 0; |
264 | rv = 0; | ||
265 | 263 | ||
266 | /* Check to see if we can get both channels. */ | 264 | /* Check to see if we can get both channels. */ |
267 | spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); | 265 | spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); |
@@ -282,63 +280,65 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
282 | used++; | 280 | used++; |
283 | spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags); | 281 | spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags); |
284 | 282 | ||
285 | if (!used) { | 283 | if (used) |
286 | /* Let's see if we can allocate a channel for it. */ | 284 | return 0; |
287 | ctp = NULL; | ||
288 | chan = 0; | ||
289 | spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); | ||
290 | for (i = 0; i < NUM_DBDMA_CHANS; i++) | ||
291 | if (chan_tab_ptr[i] == NULL) { | ||
292 | /* | ||
293 | * If kmalloc fails, it is caught below same | ||
294 | * as a channel not available. | ||
295 | */ | ||
296 | ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC); | ||
297 | chan_tab_ptr[i] = ctp; | ||
298 | break; | ||
299 | } | ||
300 | spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags); | ||
301 | |||
302 | if (ctp != NULL) { | ||
303 | memset(ctp, 0, sizeof(chan_tab_t)); | ||
304 | ctp->chan_index = chan = i; | ||
305 | dcp = DDMA_CHANNEL_BASE; | ||
306 | dcp += (0x0100 * chan); | ||
307 | ctp->chan_ptr = (au1x_dma_chan_t *)dcp; | ||
308 | cp = (au1x_dma_chan_t *)dcp; | ||
309 | ctp->chan_src = stp; | ||
310 | ctp->chan_dest = dtp; | ||
311 | ctp->chan_callback = callback; | ||
312 | ctp->chan_callparam = callparam; | ||
313 | |||
314 | /* Initialize channel configuration. */ | ||
315 | i = 0; | ||
316 | if (stp->dev_intlevel) | ||
317 | i |= DDMA_CFG_SED; | ||
318 | if (stp->dev_intpolarity) | ||
319 | i |= DDMA_CFG_SP; | ||
320 | if (dtp->dev_intlevel) | ||
321 | i |= DDMA_CFG_DED; | ||
322 | if (dtp->dev_intpolarity) | ||
323 | i |= DDMA_CFG_DP; | ||
324 | if ((stp->dev_flags & DEV_FLAGS_SYNC) || | ||
325 | (dtp->dev_flags & DEV_FLAGS_SYNC)) | ||
326 | i |= DDMA_CFG_SYNC; | ||
327 | cp->ddma_cfg = i; | ||
328 | au_sync(); | ||
329 | 285 | ||
330 | /* Return a non-zero value that can be used to | 286 | /* Let's see if we can allocate a channel for it. */ |
331 | * find the channel information in subsequent | 287 | ctp = NULL; |
332 | * operations. | 288 | chan = 0; |
289 | spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); | ||
290 | for (i = 0; i < NUM_DBDMA_CHANS; i++) | ||
291 | if (chan_tab_ptr[i] == NULL) { | ||
292 | /* | ||
293 | * If kmalloc fails, it is caught below same | ||
294 | * as a channel not available. | ||
333 | */ | 295 | */ |
334 | rv = (u32)(&chan_tab_ptr[chan]); | 296 | ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC); |
335 | } else { | 297 | chan_tab_ptr[i] = ctp; |
336 | /* Release devices */ | 298 | break; |
337 | stp->dev_flags &= ~DEV_FLAGS_INUSE; | ||
338 | dtp->dev_flags &= ~DEV_FLAGS_INUSE; | ||
339 | } | 299 | } |
300 | spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags); | ||
301 | |||
302 | if (ctp != NULL) { | ||
303 | memset(ctp, 0, sizeof(chan_tab_t)); | ||
304 | ctp->chan_index = chan = i; | ||
305 | dcp = DDMA_CHANNEL_BASE; | ||
306 | dcp += (0x0100 * chan); | ||
307 | ctp->chan_ptr = (au1x_dma_chan_t *)dcp; | ||
308 | cp = (au1x_dma_chan_t *)dcp; | ||
309 | ctp->chan_src = stp; | ||
310 | ctp->chan_dest = dtp; | ||
311 | ctp->chan_callback = callback; | ||
312 | ctp->chan_callparam = callparam; | ||
313 | |||
314 | /* Initialize channel configuration. */ | ||
315 | i = 0; | ||
316 | if (stp->dev_intlevel) | ||
317 | i |= DDMA_CFG_SED; | ||
318 | if (stp->dev_intpolarity) | ||
319 | i |= DDMA_CFG_SP; | ||
320 | if (dtp->dev_intlevel) | ||
321 | i |= DDMA_CFG_DED; | ||
322 | if (dtp->dev_intpolarity) | ||
323 | i |= DDMA_CFG_DP; | ||
324 | if ((stp->dev_flags & DEV_FLAGS_SYNC) || | ||
325 | (dtp->dev_flags & DEV_FLAGS_SYNC)) | ||
326 | i |= DDMA_CFG_SYNC; | ||
327 | cp->ddma_cfg = i; | ||
328 | au_sync(); | ||
329 | |||
330 | /* | ||
331 | * Return a non-zero value that can be used to find the channel | ||
332 | * information in subsequent operations. | ||
333 | */ | ||
334 | return (u32)(&chan_tab_ptr[chan]); | ||
340 | } | 335 | } |
341 | return rv; | 336 | |
337 | /* Release devices */ | ||
338 | stp->dev_flags &= ~DEV_FLAGS_INUSE; | ||
339 | dtp->dev_flags &= ~DEV_FLAGS_INUSE; | ||
340 | |||
341 | return 0; | ||
342 | } | 342 | } |
343 | EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc); | 343 | EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc); |
344 | 344 | ||
@@ -572,7 +572,7 @@ EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc); | |||
572 | * This updates the source pointer and byte count. Normally used | 572 | * This updates the source pointer and byte count. Normally used |
573 | * for memory to fifo transfers. | 573 | * for memory to fifo transfers. |
574 | */ | 574 | */ |
575 | u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags) | 575 | u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) |
576 | { | 576 | { |
577 | chan_tab_t *ctp; | 577 | chan_tab_t *ctp; |
578 | au1x_ddma_desc_t *dp; | 578 | au1x_ddma_desc_t *dp; |
@@ -598,7 +598,7 @@ u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags) | |||
598 | return 0; | 598 | return 0; |
599 | 599 | ||
600 | /* Load up buffer address and byte count. */ | 600 | /* Load up buffer address and byte count. */ |
601 | dp->dscr_source0 = virt_to_phys(buf); | 601 | dp->dscr_source0 = buf & ~0UL; |
602 | dp->dscr_cmd1 = nbytes; | 602 | dp->dscr_cmd1 = nbytes; |
603 | /* Check flags */ | 603 | /* Check flags */ |
604 | if (flags & DDMA_FLAGS_IE) | 604 | if (flags & DDMA_FLAGS_IE) |
@@ -625,14 +625,13 @@ u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags) | |||
625 | /* Return something non-zero. */ | 625 | /* Return something non-zero. */ |
626 | return nbytes; | 626 | return nbytes; |
627 | } | 627 | } |
628 | EXPORT_SYMBOL(_au1xxx_dbdma_put_source); | 628 | EXPORT_SYMBOL(au1xxx_dbdma_put_source); |
629 | 629 | ||
630 | /* Put a destination buffer into the DMA ring. | 630 | /* Put a destination buffer into the DMA ring. |
631 | * This updates the destination pointer and byte count. Normally used | 631 | * This updates the destination pointer and byte count. Normally used |
632 | * to place an empty buffer into the ring for fifo to memory transfers. | 632 | * to place an empty buffer into the ring for fifo to memory transfers. |
633 | */ | 633 | */ |
634 | u32 | 634 | u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags) |
635 | _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags) | ||
636 | { | 635 | { |
637 | chan_tab_t *ctp; | 636 | chan_tab_t *ctp; |
638 | au1x_ddma_desc_t *dp; | 637 | au1x_ddma_desc_t *dp; |
@@ -662,7 +661,7 @@ _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags) | |||
662 | if (flags & DDMA_FLAGS_NOIE) | 661 | if (flags & DDMA_FLAGS_NOIE) |
663 | dp->dscr_cmd0 &= ~DSCR_CMD0_IE; | 662 | dp->dscr_cmd0 &= ~DSCR_CMD0_IE; |
664 | 663 | ||
665 | dp->dscr_dest0 = virt_to_phys(buf); | 664 | dp->dscr_dest0 = buf & ~0UL; |
666 | dp->dscr_cmd1 = nbytes; | 665 | dp->dscr_cmd1 = nbytes; |
667 | #if 0 | 666 | #if 0 |
668 | printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", | 667 | printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", |
@@ -688,7 +687,7 @@ _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags) | |||
688 | /* Return something non-zero. */ | 687 | /* Return something non-zero. */ |
689 | return nbytes; | 688 | return nbytes; |
690 | } | 689 | } |
691 | EXPORT_SYMBOL(_au1xxx_dbdma_put_dest); | 690 | EXPORT_SYMBOL(au1xxx_dbdma_put_dest); |
692 | 691 | ||
693 | /* | 692 | /* |
694 | * Get a destination buffer into the DMA ring. | 693 | * Get a destination buffer into the DMA ring. |
@@ -871,28 +870,6 @@ static irqreturn_t dbdma_interrupt(int irq, void *dev_id) | |||
871 | return IRQ_RETVAL(1); | 870 | return IRQ_RETVAL(1); |
872 | } | 871 | } |
873 | 872 | ||
874 | static void au1xxx_dbdma_init(void) | ||
875 | { | ||
876 | int irq_nr; | ||
877 | |||
878 | dbdma_gptr->ddma_config = 0; | ||
879 | dbdma_gptr->ddma_throttle = 0; | ||
880 | dbdma_gptr->ddma_inten = 0xffff; | ||
881 | au_sync(); | ||
882 | |||
883 | #if defined(CONFIG_SOC_AU1550) | ||
884 | irq_nr = AU1550_DDMA_INT; | ||
885 | #elif defined(CONFIG_SOC_AU1200) | ||
886 | irq_nr = AU1200_DDMA_INT; | ||
887 | #else | ||
888 | #error Unknown Au1x00 SOC | ||
889 | #endif | ||
890 | |||
891 | if (request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED, | ||
892 | "Au1xxx dbdma", (void *)dbdma_gptr)) | ||
893 | printk(KERN_ERR "Can't get 1550 dbdma irq"); | ||
894 | } | ||
895 | |||
896 | void au1xxx_dbdma_dump(u32 chanid) | 873 | void au1xxx_dbdma_dump(u32 chanid) |
897 | { | 874 | { |
898 | chan_tab_t *ctp; | 875 | chan_tab_t *ctp; |
@@ -906,7 +883,7 @@ void au1xxx_dbdma_dump(u32 chanid) | |||
906 | dtp = ctp->chan_dest; | 883 | dtp = ctp->chan_dest; |
907 | cp = ctp->chan_ptr; | 884 | cp = ctp->chan_ptr; |
908 | 885 | ||
909 | printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d) \n", | 886 | printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d)\n", |
910 | (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp, | 887 | (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp, |
911 | dtp - dbdev_tab); | 888 | dtp - dbdev_tab); |
912 | printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n", | 889 | printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n", |
@@ -1041,4 +1018,38 @@ void au1xxx_dbdma_resume(void) | |||
1041 | } | 1018 | } |
1042 | } | 1019 | } |
1043 | #endif /* CONFIG_PM */ | 1020 | #endif /* CONFIG_PM */ |
1021 | |||
1022 | static int __init au1xxx_dbdma_init(void) | ||
1023 | { | ||
1024 | int irq_nr, ret; | ||
1025 | |||
1026 | dbdma_gptr->ddma_config = 0; | ||
1027 | dbdma_gptr->ddma_throttle = 0; | ||
1028 | dbdma_gptr->ddma_inten = 0xffff; | ||
1029 | au_sync(); | ||
1030 | |||
1031 | switch (alchemy_get_cputype()) { | ||
1032 | case ALCHEMY_CPU_AU1550: | ||
1033 | irq_nr = AU1550_DDMA_INT; | ||
1034 | break; | ||
1035 | case ALCHEMY_CPU_AU1200: | ||
1036 | irq_nr = AU1200_DDMA_INT; | ||
1037 | break; | ||
1038 | default: | ||
1039 | return -ENODEV; | ||
1040 | } | ||
1041 | |||
1042 | ret = request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED, | ||
1043 | "Au1xxx dbdma", (void *)dbdma_gptr); | ||
1044 | if (ret) | ||
1045 | printk(KERN_ERR "Cannot grab DBDMA interrupt!\n"); | ||
1046 | else { | ||
1047 | dbdma_initialized = 1; | ||
1048 | printk(KERN_INFO "Alchemy DBDMA initialized\n"); | ||
1049 | } | ||
1050 | |||
1051 | return ret; | ||
1052 | } | ||
1053 | subsys_initcall(au1xxx_dbdma_init); | ||
1054 | |||
1044 | #endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */ | 1055 | #endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */ |