aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/alchemy/common/dbdma.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/mips/alchemy/common/dbdma.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/mips/alchemy/common/dbdma.c')
-rw-r--r--arch/mips/alchemy/common/dbdma.c198
1 files changed, 106 insertions, 92 deletions
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 19c1c82849ff..99ae84ce5af3 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -30,6 +30,7 @@
30 * 30 *
31 */ 31 */
32 32
33#include <linux/init.h>
33#include <linux/kernel.h> 34#include <linux/kernel.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/spinlock.h> 36#include <linux/spinlock.h>
@@ -58,7 +59,6 @@ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
58 59
59static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; 60static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE;
60static int dbdma_initialized; 61static int dbdma_initialized;
61static void au1xxx_dbdma_init(void);
62 62
63static dbdev_tab_t dbdev_tab[] = { 63static dbdev_tab_t dbdev_tab[] = {
64#ifdef CONFIG_SOC_AU1550 64#ifdef CONFIG_SOC_AU1550
@@ -237,7 +237,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
237 void (*callback)(int, void *), void *callparam) 237 void (*callback)(int, void *), void *callparam)
238{ 238{
239 unsigned long flags; 239 unsigned long flags;
240 u32 used, chan, rv; 240 u32 used, chan;
241 u32 dcp; 241 u32 dcp;
242 int i; 242 int i;
243 dbdev_tab_t *stp, *dtp; 243 dbdev_tab_t *stp, *dtp;
@@ -250,8 +250,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
250 * which can't be done successfully during board set up. 250 * which can't be done successfully during board set up.
251 */ 251 */
252 if (!dbdma_initialized) 252 if (!dbdma_initialized)
253 au1xxx_dbdma_init(); 253 return 0;
254 dbdma_initialized = 1;
255 254
256 stp = find_dbdev_id(srcid); 255 stp = find_dbdev_id(srcid);
257 if (stp == NULL) 256 if (stp == NULL)
@@ -261,7 +260,6 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
261 return 0; 260 return 0;
262 261
263 used = 0; 262 used = 0;
264 rv = 0;
265 263
266 /* Check to see if we can get both channels. */ 264 /* Check to see if we can get both channels. */
267 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); 265 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
@@ -282,63 +280,65 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
282 used++; 280 used++;
283 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags); 281 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
284 282
285 if (!used) { 283 if (used)
286 /* Let's see if we can allocate a channel for it. */ 284 return 0;
287 ctp = NULL;
288 chan = 0;
289 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
290 for (i = 0; i < NUM_DBDMA_CHANS; i++)
291 if (chan_tab_ptr[i] == NULL) {
292 /*
293 * If kmalloc fails, it is caught below same
294 * as a channel not available.
295 */
296 ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
297 chan_tab_ptr[i] = ctp;
298 break;
299 }
300 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
301
302 if (ctp != NULL) {
303 memset(ctp, 0, sizeof(chan_tab_t));
304 ctp->chan_index = chan = i;
305 dcp = DDMA_CHANNEL_BASE;
306 dcp += (0x0100 * chan);
307 ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
308 cp = (au1x_dma_chan_t *)dcp;
309 ctp->chan_src = stp;
310 ctp->chan_dest = dtp;
311 ctp->chan_callback = callback;
312 ctp->chan_callparam = callparam;
313
314 /* Initialize channel configuration. */
315 i = 0;
316 if (stp->dev_intlevel)
317 i |= DDMA_CFG_SED;
318 if (stp->dev_intpolarity)
319 i |= DDMA_CFG_SP;
320 if (dtp->dev_intlevel)
321 i |= DDMA_CFG_DED;
322 if (dtp->dev_intpolarity)
323 i |= DDMA_CFG_DP;
324 if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
325 (dtp->dev_flags & DEV_FLAGS_SYNC))
326 i |= DDMA_CFG_SYNC;
327 cp->ddma_cfg = i;
328 au_sync();
329 285
330 /* Return a non-zero value that can be used to 286 /* Let's see if we can allocate a channel for it. */
331 * find the channel information in subsequent 287 ctp = NULL;
332 * operations. 288 chan = 0;
289 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
290 for (i = 0; i < NUM_DBDMA_CHANS; i++)
291 if (chan_tab_ptr[i] == NULL) {
292 /*
293 * If kmalloc fails, it is caught below same
294 * as a channel not available.
333 */ 295 */
334 rv = (u32)(&chan_tab_ptr[chan]); 296 ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
335 } else { 297 chan_tab_ptr[i] = ctp;
336 /* Release devices */ 298 break;
337 stp->dev_flags &= ~DEV_FLAGS_INUSE;
338 dtp->dev_flags &= ~DEV_FLAGS_INUSE;
339 } 299 }
300 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
301
302 if (ctp != NULL) {
303 memset(ctp, 0, sizeof(chan_tab_t));
304 ctp->chan_index = chan = i;
305 dcp = DDMA_CHANNEL_BASE;
306 dcp += (0x0100 * chan);
307 ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
308 cp = (au1x_dma_chan_t *)dcp;
309 ctp->chan_src = stp;
310 ctp->chan_dest = dtp;
311 ctp->chan_callback = callback;
312 ctp->chan_callparam = callparam;
313
314 /* Initialize channel configuration. */
315 i = 0;
316 if (stp->dev_intlevel)
317 i |= DDMA_CFG_SED;
318 if (stp->dev_intpolarity)
319 i |= DDMA_CFG_SP;
320 if (dtp->dev_intlevel)
321 i |= DDMA_CFG_DED;
322 if (dtp->dev_intpolarity)
323 i |= DDMA_CFG_DP;
324 if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
325 (dtp->dev_flags & DEV_FLAGS_SYNC))
326 i |= DDMA_CFG_SYNC;
327 cp->ddma_cfg = i;
328 au_sync();
329
330 /*
331 * Return a non-zero value that can be used to find the channel
332 * information in subsequent operations.
333 */
334 return (u32)(&chan_tab_ptr[chan]);
340 } 335 }
341 return rv; 336
337 /* Release devices */
338 stp->dev_flags &= ~DEV_FLAGS_INUSE;
339 dtp->dev_flags &= ~DEV_FLAGS_INUSE;
340
341 return 0;
342} 342}
343EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc); 343EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
344 344
@@ -412,8 +412,11 @@ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
412 if (desc_base == 0) 412 if (desc_base == 0)
413 return 0; 413 return 0;
414 414
415 ctp->cdb_membase = desc_base;
415 desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t)); 416 desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
416 } 417 } else
418 ctp->cdb_membase = desc_base;
419
417 dp = (au1x_ddma_desc_t *)desc_base; 420 dp = (au1x_ddma_desc_t *)desc_base;
418 421
419 /* Keep track of the base descriptor. */ 422 /* Keep track of the base descriptor. */
@@ -569,7 +572,7 @@ EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
569 * This updates the source pointer and byte count. Normally used 572 * This updates the source pointer and byte count. Normally used
570 * for memory to fifo transfers. 573 * for memory to fifo transfers.
571 */ 574 */
572u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags) 575u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
573{ 576{
574 chan_tab_t *ctp; 577 chan_tab_t *ctp;
575 au1x_ddma_desc_t *dp; 578 au1x_ddma_desc_t *dp;
@@ -595,7 +598,7 @@ u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
595 return 0; 598 return 0;
596 599
597 /* Load up buffer address and byte count. */ 600 /* Load up buffer address and byte count. */
598 dp->dscr_source0 = virt_to_phys(buf); 601 dp->dscr_source0 = buf & ~0UL;
599 dp->dscr_cmd1 = nbytes; 602 dp->dscr_cmd1 = nbytes;
600 /* Check flags */ 603 /* Check flags */
601 if (flags & DDMA_FLAGS_IE) 604 if (flags & DDMA_FLAGS_IE)
@@ -613,7 +616,7 @@ u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
613 dma_cache_wback_inv((unsigned long)buf, nbytes); 616 dma_cache_wback_inv((unsigned long)buf, nbytes);
614 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ 617 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
615 au_sync(); 618 au_sync();
616 dma_cache_wback_inv((unsigned long)dp, sizeof(dp)); 619 dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
617 ctp->chan_ptr->ddma_dbell = 0; 620 ctp->chan_ptr->ddma_dbell = 0;
618 621
619 /* Get next descriptor pointer. */ 622 /* Get next descriptor pointer. */
@@ -622,14 +625,13 @@ u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
622 /* Return something non-zero. */ 625 /* Return something non-zero. */
623 return nbytes; 626 return nbytes;
624} 627}
625EXPORT_SYMBOL(_au1xxx_dbdma_put_source); 628EXPORT_SYMBOL(au1xxx_dbdma_put_source);
626 629
627/* Put a destination buffer into the DMA ring. 630/* Put a destination buffer into the DMA ring.
628 * This updates the destination pointer and byte count. Normally used 631 * This updates the destination pointer and byte count. Normally used
629 * to place an empty buffer into the ring for fifo to memory transfers. 632 * to place an empty buffer into the ring for fifo to memory transfers.
630 */ 633 */
631u32 634u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
632_au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
633{ 635{
634 chan_tab_t *ctp; 636 chan_tab_t *ctp;
635 au1x_ddma_desc_t *dp; 637 au1x_ddma_desc_t *dp;
@@ -659,7 +661,7 @@ _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
659 if (flags & DDMA_FLAGS_NOIE) 661 if (flags & DDMA_FLAGS_NOIE)
660 dp->dscr_cmd0 &= ~DSCR_CMD0_IE; 662 dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
661 663
662 dp->dscr_dest0 = virt_to_phys(buf); 664 dp->dscr_dest0 = buf & ~0UL;
663 dp->dscr_cmd1 = nbytes; 665 dp->dscr_cmd1 = nbytes;
664#if 0 666#if 0
665 printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", 667 printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
@@ -676,7 +678,7 @@ _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
676 dma_cache_inv((unsigned long)buf, nbytes); 678 dma_cache_inv((unsigned long)buf, nbytes);
677 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ 679 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
678 au_sync(); 680 au_sync();
679 dma_cache_wback_inv((unsigned long)dp, sizeof(dp)); 681 dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
680 ctp->chan_ptr->ddma_dbell = 0; 682 ctp->chan_ptr->ddma_dbell = 0;
681 683
682 /* Get next descriptor pointer. */ 684 /* Get next descriptor pointer. */
@@ -685,7 +687,7 @@ _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
685 /* Return something non-zero. */ 687 /* Return something non-zero. */
686 return nbytes; 688 return nbytes;
687} 689}
688EXPORT_SYMBOL(_au1xxx_dbdma_put_dest); 690EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
689 691
690/* 692/*
691 * Get a destination buffer into the DMA ring. 693 * Get a destination buffer into the DMA ring.
@@ -831,7 +833,7 @@ void au1xxx_dbdma_chan_free(u32 chanid)
831 833
832 au1xxx_dbdma_stop(chanid); 834 au1xxx_dbdma_stop(chanid);
833 835
834 kfree((void *)ctp->chan_desc_base); 836 kfree((void *)ctp->cdb_membase);
835 837
836 stp->dev_flags &= ~DEV_FLAGS_INUSE; 838 stp->dev_flags &= ~DEV_FLAGS_INUSE;
837 dtp->dev_flags &= ~DEV_FLAGS_INUSE; 839 dtp->dev_flags &= ~DEV_FLAGS_INUSE;
@@ -868,28 +870,6 @@ static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
868 return IRQ_RETVAL(1); 870 return IRQ_RETVAL(1);
869} 871}
870 872
871static void au1xxx_dbdma_init(void)
872{
873 int irq_nr;
874
875 dbdma_gptr->ddma_config = 0;
876 dbdma_gptr->ddma_throttle = 0;
877 dbdma_gptr->ddma_inten = 0xffff;
878 au_sync();
879
880#if defined(CONFIG_SOC_AU1550)
881 irq_nr = AU1550_DDMA_INT;
882#elif defined(CONFIG_SOC_AU1200)
883 irq_nr = AU1200_DDMA_INT;
884#else
885 #error Unknown Au1x00 SOC
886#endif
887
888 if (request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED,
889 "Au1xxx dbdma", (void *)dbdma_gptr))
890 printk(KERN_ERR "Can't get 1550 dbdma irq");
891}
892
893void au1xxx_dbdma_dump(u32 chanid) 873void au1xxx_dbdma_dump(u32 chanid)
894{ 874{
895 chan_tab_t *ctp; 875 chan_tab_t *ctp;
@@ -903,7 +883,7 @@ void au1xxx_dbdma_dump(u32 chanid)
903 dtp = ctp->chan_dest; 883 dtp = ctp->chan_dest;
904 cp = ctp->chan_ptr; 884 cp = ctp->chan_ptr;
905 885
906 printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d) \n", 886 printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d)\n",
907 (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp, 887 (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
908 dtp - dbdev_tab); 888 dtp - dbdev_tab);
909 printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n", 889 printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
@@ -1038,4 +1018,38 @@ void au1xxx_dbdma_resume(void)
1038 } 1018 }
1039} 1019}
1040#endif /* CONFIG_PM */ 1020#endif /* CONFIG_PM */
1021
1022static int __init au1xxx_dbdma_init(void)
1023{
1024 int irq_nr, ret;
1025
1026 dbdma_gptr->ddma_config = 0;
1027 dbdma_gptr->ddma_throttle = 0;
1028 dbdma_gptr->ddma_inten = 0xffff;
1029 au_sync();
1030
1031 switch (alchemy_get_cputype()) {
1032 case ALCHEMY_CPU_AU1550:
1033 irq_nr = AU1550_DDMA_INT;
1034 break;
1035 case ALCHEMY_CPU_AU1200:
1036 irq_nr = AU1200_DDMA_INT;
1037 break;
1038 default:
1039 return -ENODEV;
1040 }
1041
1042 ret = request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED,
1043 "Au1xxx dbdma", (void *)dbdma_gptr);
1044 if (ret)
1045 printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
1046 else {
1047 dbdma_initialized = 1;
1048 printk(KERN_INFO "Alchemy DBDMA initialized\n");
1049 }
1050
1051 return ret;
1052}
1053subsys_initcall(au1xxx_dbdma_init);
1054
1041#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */ 1055#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */