aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/au1000/common/dbdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/au1000/common/dbdma.c')
-rw-r--r--arch/mips/au1000/common/dbdma.c389
1 files changed, 184 insertions, 205 deletions
diff --git a/arch/mips/au1000/common/dbdma.c b/arch/mips/au1000/common/dbdma.c
index 53377dfc0640..42d555236de1 100644
--- a/arch/mips/au1000/common/dbdma.c
+++ b/arch/mips/au1000/common/dbdma.c
@@ -53,12 +53,11 @@
53 */ 53 */
54static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock); 54static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
55 55
56/* I couldn't find a macro that did this...... 56/* I couldn't find a macro that did this... */
57*/
58#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) 57#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
59 58
60static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; 59static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE;
61static int dbdma_initialized=0; 60static int dbdma_initialized;
62static void au1xxx_dbdma_init(void); 61static void au1xxx_dbdma_init(void);
63 62
64static dbdev_tab_t dbdev_tab[] = { 63static dbdev_tab_t dbdev_tab[] = {
@@ -149,7 +148,7 @@ static dbdev_tab_t dbdev_tab[] = {
149 148
150 { DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, 149 { DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
151 150
152#endif // CONFIG_SOC_AU1200 151#endif /* CONFIG_SOC_AU1200 */
153 152
154 { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 153 { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
155 { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 154 { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
@@ -177,8 +176,7 @@ static dbdev_tab_t dbdev_tab[] = {
177 176
178static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS]; 177static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
179 178
180static dbdev_tab_t * 179static dbdev_tab_t *find_dbdev_id(u32 id)
181find_dbdev_id(u32 id)
182{ 180{
183 int i; 181 int i;
184 dbdev_tab_t *p; 182 dbdev_tab_t *p;
@@ -190,29 +188,27 @@ find_dbdev_id(u32 id)
190 return NULL; 188 return NULL;
191} 189}
192 190
193void * au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp) 191void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
194{ 192{
195 return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); 193 return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
196} 194}
197EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt); 195EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
198 196
199u32 197u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
200au1xxx_ddma_add_device(dbdev_tab_t *dev)
201{ 198{
202 u32 ret = 0; 199 u32 ret = 0;
203 dbdev_tab_t *p=NULL; 200 dbdev_tab_t *p;
204 static u16 new_id=0x1000; 201 static u16 new_id = 0x1000;
205 202
206 p = find_dbdev_id(~0); 203 p = find_dbdev_id(~0);
207 if ( NULL != p ) 204 if (NULL != p) {
208 {
209 memcpy(p, dev, sizeof(dbdev_tab_t)); 205 memcpy(p, dev, sizeof(dbdev_tab_t));
210 p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id); 206 p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
211 ret = p->dev_id; 207 ret = p->dev_id;
212 new_id++; 208 new_id++;
213#if 0 209#if 0
214 printk("add_device: id:%x flags:%x padd:%x\n", 210 printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
215 p->dev_id, p->dev_flags, p->dev_physaddr ); 211 p->dev_id, p->dev_flags, p->dev_physaddr);
216#endif 212#endif
217 } 213 }
218 214
@@ -220,10 +216,8 @@ au1xxx_ddma_add_device(dbdev_tab_t *dev)
220} 216}
221EXPORT_SYMBOL(au1xxx_ddma_add_device); 217EXPORT_SYMBOL(au1xxx_ddma_add_device);
222 218
223/* Allocate a channel and return a non-zero descriptor if successful. 219/* Allocate a channel and return a non-zero descriptor if successful. */
224*/ 220u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
225u32
226au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
227 void (*callback)(int, void *), void *callparam) 221 void (*callback)(int, void *), void *callparam)
228{ 222{
229 unsigned long flags; 223 unsigned long flags;
@@ -234,7 +228,8 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
234 chan_tab_t *ctp; 228 chan_tab_t *ctp;
235 au1x_dma_chan_t *cp; 229 au1x_dma_chan_t *cp;
236 230
237 /* We do the intialization on the first channel allocation. 231 /*
232 * We do the intialization on the first channel allocation.
238 * We have to wait because of the interrupt handler initialization 233 * We have to wait because of the interrupt handler initialization
239 * which can't be done successfully during board set up. 234 * which can't be done successfully during board set up.
240 */ 235 */
@@ -242,16 +237,17 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
242 au1xxx_dbdma_init(); 237 au1xxx_dbdma_init();
243 dbdma_initialized = 1; 238 dbdma_initialized = 1;
244 239
245 if ((stp = find_dbdev_id(srcid)) == NULL) 240 stp = find_dbdev_id(srcid);
241 if (stp == NULL)
246 return 0; 242 return 0;
247 if ((dtp = find_dbdev_id(destid)) == NULL) 243 dtp = find_dbdev_id(destid);
244 if (dtp == NULL)
248 return 0; 245 return 0;
249 246
250 used = 0; 247 used = 0;
251 rv = 0; 248 rv = 0;
252 249
253 /* Check to see if we can get both channels. 250 /* Check to see if we can get both channels. */
254 */
255 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); 251 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
256 if (!(stp->dev_flags & DEV_FLAGS_INUSE) || 252 if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
257 (stp->dev_flags & DEV_FLAGS_ANYUSE)) { 253 (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
@@ -261,35 +257,30 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
261 (dtp->dev_flags & DEV_FLAGS_ANYUSE)) { 257 (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
262 /* Got destination */ 258 /* Got destination */
263 dtp->dev_flags |= DEV_FLAGS_INUSE; 259 dtp->dev_flags |= DEV_FLAGS_INUSE;
264 } 260 } else {
265 else { 261 /* Can't get dest. Release src. */
266 /* Can't get dest. Release src.
267 */
268 stp->dev_flags &= ~DEV_FLAGS_INUSE; 262 stp->dev_flags &= ~DEV_FLAGS_INUSE;
269 used++; 263 used++;
270 } 264 }
271 } 265 } else
272 else {
273 used++; 266 used++;
274 }
275 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags); 267 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
276 268
277 if (!used) { 269 if (!used) {
278 /* Let's see if we can allocate a channel for it. 270 /* Let's see if we can allocate a channel for it. */
279 */
280 ctp = NULL; 271 ctp = NULL;
281 chan = 0; 272 chan = 0;
282 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); 273 spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
283 for (i=0; i<NUM_DBDMA_CHANS; i++) { 274 for (i = 0; i < NUM_DBDMA_CHANS; i++)
284 if (chan_tab_ptr[i] == NULL) { 275 if (chan_tab_ptr[i] == NULL) {
285 /* If kmalloc fails, it is caught below same 276 /*
277 * If kmalloc fails, it is caught below same
286 * as a channel not available. 278 * as a channel not available.
287 */ 279 */
288 ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC); 280 ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
289 chan_tab_ptr[i] = ctp; 281 chan_tab_ptr[i] = ctp;
290 break; 282 break;
291 } 283 }
292 }
293 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags); 284 spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
294 285
295 if (ctp != NULL) { 286 if (ctp != NULL) {
@@ -304,8 +295,7 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
304 ctp->chan_callback = callback; 295 ctp->chan_callback = callback;
305 ctp->chan_callparam = callparam; 296 ctp->chan_callparam = callparam;
306 297
307 /* Initialize channel configuration. 298 /* Initialize channel configuration. */
308 */
309 i = 0; 299 i = 0;
310 if (stp->dev_intlevel) 300 if (stp->dev_intlevel)
311 i |= DDMA_CFG_SED; 301 i |= DDMA_CFG_SED;
@@ -326,8 +316,7 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
326 * operations. 316 * operations.
327 */ 317 */
328 rv = (u32)(&chan_tab_ptr[chan]); 318 rv = (u32)(&chan_tab_ptr[chan]);
329 } 319 } else {
330 else {
331 /* Release devices */ 320 /* Release devices */
332 stp->dev_flags &= ~DEV_FLAGS_INUSE; 321 stp->dev_flags &= ~DEV_FLAGS_INUSE;
333 dtp->dev_flags &= ~DEV_FLAGS_INUSE; 322 dtp->dev_flags &= ~DEV_FLAGS_INUSE;
@@ -337,11 +326,11 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
337} 326}
338EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc); 327EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
339 328
340/* Set the device width if source or destination is a FIFO. 329/*
330 * Set the device width if source or destination is a FIFO.
341 * Should be 8, 16, or 32 bits. 331 * Should be 8, 16, or 32 bits.
342 */ 332 */
343u32 333u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
344au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
345{ 334{
346 u32 rv; 335 u32 rv;
347 chan_tab_t *ctp; 336 chan_tab_t *ctp;
@@ -365,10 +354,8 @@ au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
365} 354}
366EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth); 355EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
367 356
368/* Allocate a descriptor ring, initializing as much as possible. 357/* Allocate a descriptor ring, initializing as much as possible. */
369*/ 358u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
370u32
371au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
372{ 359{
373 int i; 360 int i;
374 u32 desc_base, srcid, destid; 361 u32 desc_base, srcid, destid;
@@ -378,43 +365,45 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
378 dbdev_tab_t *stp, *dtp; 365 dbdev_tab_t *stp, *dtp;
379 au1x_ddma_desc_t *dp; 366 au1x_ddma_desc_t *dp;
380 367
381 /* I guess we could check this to be within the 368 /*
369 * I guess we could check this to be within the
382 * range of the table...... 370 * range of the table......
383 */ 371 */
384 ctp = *((chan_tab_t **)chanid); 372 ctp = *((chan_tab_t **)chanid);
385 stp = ctp->chan_src; 373 stp = ctp->chan_src;
386 dtp = ctp->chan_dest; 374 dtp = ctp->chan_dest;
387 375
388 /* The descriptors must be 32-byte aligned. There is a 376 /*
377 * The descriptors must be 32-byte aligned. There is a
389 * possibility the allocation will give us such an address, 378 * possibility the allocation will give us such an address,
390 * and if we try that first we are likely to not waste larger 379 * and if we try that first we are likely to not waste larger
391 * slabs of memory. 380 * slabs of memory.
392 */ 381 */
393 desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t), 382 desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
394 GFP_KERNEL|GFP_DMA); 383 GFP_KERNEL|GFP_DMA);
395 if (desc_base == 0) 384 if (desc_base == 0)
396 return 0; 385 return 0;
397 386
398 if (desc_base & 0x1f) { 387 if (desc_base & 0x1f) {
399 /* Lost....do it again, allocate extra, and round 388 /*
389 * Lost....do it again, allocate extra, and round
400 * the address base. 390 * the address base.
401 */ 391 */
402 kfree((const void *)desc_base); 392 kfree((const void *)desc_base);
403 i = entries * sizeof(au1x_ddma_desc_t); 393 i = entries * sizeof(au1x_ddma_desc_t);
404 i += (sizeof(au1x_ddma_desc_t) - 1); 394 i += (sizeof(au1x_ddma_desc_t) - 1);
405 if ((desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA)) == 0) 395 desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
396 if (desc_base == 0)
406 return 0; 397 return 0;
407 398
408 desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t)); 399 desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
409 } 400 }
410 dp = (au1x_ddma_desc_t *)desc_base; 401 dp = (au1x_ddma_desc_t *)desc_base;
411 402
412 /* Keep track of the base descriptor. 403 /* Keep track of the base descriptor. */
413 */
414 ctp->chan_desc_base = dp; 404 ctp->chan_desc_base = dp;
415 405
416 /* Initialize the rings with as much information as we know. 406 /* Initialize the rings with as much information as we know. */
417 */
418 srcid = stp->dev_id; 407 srcid = stp->dev_id;
419 destid = dtp->dev_id; 408 destid = dtp->dev_id;
420 409
@@ -426,11 +415,12 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
426 cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV; 415 cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
427 cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE); 416 cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
428 417
429 /* is it mem to mem transfer? */ 418 /* Is it mem to mem transfer? */
430 if(((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) || (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) && 419 if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
431 ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) || (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS))) { 420 (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
432 cmd0 |= DSCR_CMD0_MEM; 421 ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
433 } 422 (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
423 cmd0 |= DSCR_CMD0_MEM;
434 424
435 switch (stp->dev_devwidth) { 425 switch (stp->dev_devwidth) {
436 case 8: 426 case 8:
@@ -458,15 +448,17 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
458 break; 448 break;
459 } 449 }
460 450
461 /* If the device is marked as an in/out FIFO, ensure it is 451 /*
452 * If the device is marked as an in/out FIFO, ensure it is
462 * set non-coherent. 453 * set non-coherent.
463 */ 454 */
464 if (stp->dev_flags & DEV_FLAGS_IN) 455 if (stp->dev_flags & DEV_FLAGS_IN)
465 cmd0 |= DSCR_CMD0_SN; /* Source in fifo */ 456 cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */
466 if (dtp->dev_flags & DEV_FLAGS_OUT) 457 if (dtp->dev_flags & DEV_FLAGS_OUT)
467 cmd0 |= DSCR_CMD0_DN; /* Destination out fifo */ 458 cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */
468 459
469 /* Set up source1. For now, assume no stride and increment. 460 /*
461 * Set up source1. For now, assume no stride and increment.
470 * A channel attribute update can change this later. 462 * A channel attribute update can change this later.
471 */ 463 */
472 switch (stp->dev_tsize) { 464 switch (stp->dev_tsize) {
@@ -485,19 +477,19 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
485 break; 477 break;
486 } 478 }
487 479
488 /* If source input is fifo, set static address. 480 /* If source input is FIFO, set static address. */
489 */
490 if (stp->dev_flags & DEV_FLAGS_IN) { 481 if (stp->dev_flags & DEV_FLAGS_IN) {
491 if ( stp->dev_flags & DEV_FLAGS_BURSTABLE ) 482 if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
492 src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST); 483 src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
493 else 484 else
494 src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC); 485 src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
495
496 } 486 }
487
497 if (stp->dev_physaddr) 488 if (stp->dev_physaddr)
498 src0 = stp->dev_physaddr; 489 src0 = stp->dev_physaddr;
499 490
500 /* Set up dest1. For now, assume no stride and increment. 491 /*
492 * Set up dest1. For now, assume no stride and increment.
501 * A channel attribute update can change this later. 493 * A channel attribute update can change this later.
502 */ 494 */
503 switch (dtp->dev_tsize) { 495 switch (dtp->dev_tsize) {
@@ -516,22 +508,24 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
516 break; 508 break;
517 } 509 }
518 510
519 /* If destination output is fifo, set static address. 511 /* If destination output is FIFO, set static address. */
520 */
521 if (dtp->dev_flags & DEV_FLAGS_OUT) { 512 if (dtp->dev_flags & DEV_FLAGS_OUT) {
522 if ( dtp->dev_flags & DEV_FLAGS_BURSTABLE ) 513 if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
523 dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST); 514 dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
524 else 515 else
525 dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC); 516 dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
526 } 517 }
518
527 if (dtp->dev_physaddr) 519 if (dtp->dev_physaddr)
528 dest0 = dtp->dev_physaddr; 520 dest0 = dtp->dev_physaddr;
529 521
530#if 0 522#if 0
531 printk("did:%x sid:%x cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", 523 printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
532 dtp->dev_id, stp->dev_id, cmd0, cmd1, src0, src1, dest0, dest1 ); 524 "source1:%x dest0:%x dest1:%x\n",
525 dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
526 src1, dest0, dest1);
533#endif 527#endif
534 for (i=0; i<entries; i++) { 528 for (i = 0; i < entries; i++) {
535 dp->dscr_cmd0 = cmd0; 529 dp->dscr_cmd0 = cmd0;
536 dp->dscr_cmd1 = cmd1; 530 dp->dscr_cmd1 = cmd1;
537 dp->dscr_source0 = src0; 531 dp->dscr_source0 = src0;
@@ -545,49 +539,49 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
545 dp++; 539 dp++;
546 } 540 }
547 541
548 /* Make last descrptor point to the first. 542 /* Make last descrptor point to the first. */
549 */
550 dp--; 543 dp--;
551 dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base)); 544 dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
552 ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base; 545 ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
553 546
554 return (u32)(ctp->chan_desc_base); 547 return (u32)ctp->chan_desc_base;
555} 548}
556EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc); 549EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
557 550
558/* Put a source buffer into the DMA ring. 551/*
552 * Put a source buffer into the DMA ring.
559 * This updates the source pointer and byte count. Normally used 553 * This updates the source pointer and byte count. Normally used
560 * for memory to fifo transfers. 554 * for memory to fifo transfers.
561 */ 555 */
562u32 556u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
563_au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
564{ 557{
565 chan_tab_t *ctp; 558 chan_tab_t *ctp;
566 au1x_ddma_desc_t *dp; 559 au1x_ddma_desc_t *dp;
567 560
568 /* I guess we could check this to be within the 561 /*
562 * I guess we could check this to be within the
569 * range of the table...... 563 * range of the table......
570 */ 564 */
571 ctp = *((chan_tab_t **)chanid); 565 ctp = *(chan_tab_t **)chanid;
572 566
573 /* We should have multiple callers for a particular channel, 567 /*
568 * We should have multiple callers for a particular channel,
574 * an interrupt doesn't affect this pointer nor the descriptor, 569 * an interrupt doesn't affect this pointer nor the descriptor,
575 * so no locking should be needed. 570 * so no locking should be needed.
576 */ 571 */
577 dp = ctp->put_ptr; 572 dp = ctp->put_ptr;
578 573
579 /* If the descriptor is valid, we are way ahead of the DMA 574 /*
575 * If the descriptor is valid, we are way ahead of the DMA
580 * engine, so just return an error condition. 576 * engine, so just return an error condition.
581 */ 577 */
582 if (dp->dscr_cmd0 & DSCR_CMD0_V) { 578 if (dp->dscr_cmd0 & DSCR_CMD0_V)
583 return 0; 579 return 0;
584 }
585 580
586 /* Load up buffer address and byte count. 581 /* Load up buffer address and byte count. */
587 */
588 dp->dscr_source0 = virt_to_phys(buf); 582 dp->dscr_source0 = virt_to_phys(buf);
589 dp->dscr_cmd1 = nbytes; 583 dp->dscr_cmd1 = nbytes;
590 /* Check flags */ 584 /* Check flags */
591 if (flags & DDMA_FLAGS_IE) 585 if (flags & DDMA_FLAGS_IE)
592 dp->dscr_cmd0 |= DSCR_CMD0_IE; 586 dp->dscr_cmd0 |= DSCR_CMD0_IE;
593 if (flags & DDMA_FLAGS_NOIE) 587 if (flags & DDMA_FLAGS_NOIE)
@@ -595,23 +589,21 @@ _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
595 589
596 /* 590 /*
597 * There is an errata on the Au1200/Au1550 parts that could result 591 * There is an errata on the Au1200/Au1550 parts that could result
598 * in "stale" data being DMA'd. It has to do with the snoop logic on 592 * in "stale" data being DMA'ed. It has to do with the snoop logic on
599 * the dache eviction buffer. NONCOHERENT_IO is on by default for 593 * the cache eviction buffer. DMA_NONCOHERENT is on by default for
600 * these parts. If it is fixedin the future, these dma_cache_inv will 594 * these parts. If it is fixed in the future, these dma_cache_inv will
601 * just be nothing more than empty macros. See io.h. 595 * just be nothing more than empty macros. See io.h.
602 * */ 596 */
603 dma_cache_wback_inv((unsigned long)buf, nbytes); 597 dma_cache_wback_inv((unsigned long)buf, nbytes);
604 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ 598 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
605 au_sync(); 599 au_sync();
606 dma_cache_wback_inv((unsigned long)dp, sizeof(dp)); 600 dma_cache_wback_inv((unsigned long)dp, sizeof(dp));
607 ctp->chan_ptr->ddma_dbell = 0; 601 ctp->chan_ptr->ddma_dbell = 0;
608 602
609 /* Get next descriptor pointer. 603 /* Get next descriptor pointer. */
610 */
611 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); 604 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
612 605
613 /* return something not zero. 606 /* Return something non-zero. */
614 */
615 return nbytes; 607 return nbytes;
616} 608}
617EXPORT_SYMBOL(_au1xxx_dbdma_put_source); 609EXPORT_SYMBOL(_au1xxx_dbdma_put_source);
@@ -654,81 +646,77 @@ _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
654 dp->dscr_dest0 = virt_to_phys(buf); 646 dp->dscr_dest0 = virt_to_phys(buf);
655 dp->dscr_cmd1 = nbytes; 647 dp->dscr_cmd1 = nbytes;
656#if 0 648#if 0
657 printk("cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", 649 printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
658 dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0, 650 dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
659 dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1 ); 651 dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
660#endif 652#endif
661 /* 653 /*
662 * There is an errata on the Au1200/Au1550 parts that could result in 654 * There is an errata on the Au1200/Au1550 parts that could result in
663 * "stale" data being DMA'd. It has to do with the snoop logic on the 655 * "stale" data being DMA'ed. It has to do with the snoop logic on the
664 * dache eviction buffer. NONCOHERENT_IO is on by default for these 656 * cache eviction buffer. DMA_NONCOHERENT is on by default for these
665 * parts. If it is fixedin the future, these dma_cache_inv will just 657 * parts. If it is fixed in the future, these dma_cache_inv will just
666 * be nothing more than empty macros. See io.h. 658 * be nothing more than empty macros. See io.h.
667 * */ 659 */
668 dma_cache_inv((unsigned long)buf, nbytes); 660 dma_cache_inv((unsigned long)buf, nbytes);
669 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ 661 dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
670 au_sync(); 662 au_sync();
671 dma_cache_wback_inv((unsigned long)dp, sizeof(dp)); 663 dma_cache_wback_inv((unsigned long)dp, sizeof(dp));
672 ctp->chan_ptr->ddma_dbell = 0; 664 ctp->chan_ptr->ddma_dbell = 0;
673 665
674 /* Get next descriptor pointer. 666 /* Get next descriptor pointer. */
675 */
676 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); 667 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
677 668
678 /* return something not zero. 669 /* Return something non-zero. */
679 */
680 return nbytes; 670 return nbytes;
681} 671}
682EXPORT_SYMBOL(_au1xxx_dbdma_put_dest); 672EXPORT_SYMBOL(_au1xxx_dbdma_put_dest);
683 673
684/* Get a destination buffer into the DMA ring. 674/*
675 * Get a destination buffer into the DMA ring.
685 * Normally used to get a full buffer from the ring during fifo 676 * Normally used to get a full buffer from the ring during fifo
686 * to memory transfers. This does not set the valid bit, you will 677 * to memory transfers. This does not set the valid bit, you will
687 * have to put another destination buffer to keep the DMA going. 678 * have to put another destination buffer to keep the DMA going.
688 */ 679 */
689u32 680u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
690au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
691{ 681{
692 chan_tab_t *ctp; 682 chan_tab_t *ctp;
693 au1x_ddma_desc_t *dp; 683 au1x_ddma_desc_t *dp;
694 u32 rv; 684 u32 rv;
695 685
696 /* I guess we could check this to be within the 686 /*
687 * I guess we could check this to be within the
697 * range of the table...... 688 * range of the table......
698 */ 689 */
699 ctp = *((chan_tab_t **)chanid); 690 ctp = *((chan_tab_t **)chanid);
700 691
701 /* We should have multiple callers for a particular channel, 692 /*
693 * We should have multiple callers for a particular channel,
702 * an interrupt doesn't affect this pointer nor the descriptor, 694 * an interrupt doesn't affect this pointer nor the descriptor,
703 * so no locking should be needed. 695 * so no locking should be needed.
704 */ 696 */
705 dp = ctp->get_ptr; 697 dp = ctp->get_ptr;
706 698
707 /* If the descriptor is valid, we are way ahead of the DMA 699 /*
700 * If the descriptor is valid, we are way ahead of the DMA
708 * engine, so just return an error condition. 701 * engine, so just return an error condition.
709 */ 702 */
710 if (dp->dscr_cmd0 & DSCR_CMD0_V) 703 if (dp->dscr_cmd0 & DSCR_CMD0_V)
711 return 0; 704 return 0;
712 705
713 /* Return buffer address and byte count. 706 /* Return buffer address and byte count. */
714 */
715 *buf = (void *)(phys_to_virt(dp->dscr_dest0)); 707 *buf = (void *)(phys_to_virt(dp->dscr_dest0));
716 *nbytes = dp->dscr_cmd1; 708 *nbytes = dp->dscr_cmd1;
717 rv = dp->dscr_stat; 709 rv = dp->dscr_stat;
718 710
719 /* Get next descriptor pointer. 711 /* Get next descriptor pointer. */
720 */
721 ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); 712 ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
722 713
723 /* return something not zero. 714 /* Return something non-zero. */
724 */
725 return rv; 715 return rv;
726} 716}
727
728EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest); 717EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
729 718
730void 719void au1xxx_dbdma_stop(u32 chanid)
731au1xxx_dbdma_stop(u32 chanid)
732{ 720{
733 chan_tab_t *ctp; 721 chan_tab_t *ctp;
734 au1x_dma_chan_t *cp; 722 au1x_dma_chan_t *cp;
@@ -743,7 +731,7 @@ au1xxx_dbdma_stop(u32 chanid)
743 udelay(1); 731 udelay(1);
744 halt_timeout++; 732 halt_timeout++;
745 if (halt_timeout > 100) { 733 if (halt_timeout > 100) {
746 printk("warning: DMA channel won't halt\n"); 734 printk(KERN_WARNING "warning: DMA channel won't halt\n");
747 break; 735 break;
748 } 736 }
749 } 737 }
@@ -753,12 +741,12 @@ au1xxx_dbdma_stop(u32 chanid)
753} 741}
754EXPORT_SYMBOL(au1xxx_dbdma_stop); 742EXPORT_SYMBOL(au1xxx_dbdma_stop);
755 743
756/* Start using the current descriptor pointer. If the dbdma encounters 744/*
757 * a not valid descriptor, it will stop. In this case, we can just 745 * Start using the current descriptor pointer. If the DBDMA encounters
746 * a non-valid descriptor, it will stop. In this case, we can just
758 * continue by adding a buffer to the list and starting again. 747 * continue by adding a buffer to the list and starting again.
759 */ 748 */
760void 749void au1xxx_dbdma_start(u32 chanid)
761au1xxx_dbdma_start(u32 chanid)
762{ 750{
763 chan_tab_t *ctp; 751 chan_tab_t *ctp;
764 au1x_dma_chan_t *cp; 752 au1x_dma_chan_t *cp;
@@ -773,8 +761,7 @@ au1xxx_dbdma_start(u32 chanid)
773} 761}
774EXPORT_SYMBOL(au1xxx_dbdma_start); 762EXPORT_SYMBOL(au1xxx_dbdma_start);
775 763
776void 764void au1xxx_dbdma_reset(u32 chanid)
777au1xxx_dbdma_reset(u32 chanid)
778{ 765{
779 chan_tab_t *ctp; 766 chan_tab_t *ctp;
780 au1x_ddma_desc_t *dp; 767 au1x_ddma_desc_t *dp;
@@ -784,14 +771,14 @@ au1xxx_dbdma_reset(u32 chanid)
784 ctp = *((chan_tab_t **)chanid); 771 ctp = *((chan_tab_t **)chanid);
785 ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base; 772 ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
786 773
787 /* Run through the descriptors and reset the valid indicator. 774 /* Run through the descriptors and reset the valid indicator. */
788 */
789 dp = ctp->chan_desc_base; 775 dp = ctp->chan_desc_base;
790 776
791 do { 777 do {
792 dp->dscr_cmd0 &= ~DSCR_CMD0_V; 778 dp->dscr_cmd0 &= ~DSCR_CMD0_V;
793 /* reset our SW status -- this is used to determine 779 /*
794 * if a descriptor is in use by upper level SW. Since 780 * Reset our software status -- this is used to determine
781 * if a descriptor is in use by upper level software. Since
795 * posting can reset 'V' bit. 782 * posting can reset 'V' bit.
796 */ 783 */
797 dp->sw_status = 0; 784 dp->sw_status = 0;
@@ -800,8 +787,7 @@ au1xxx_dbdma_reset(u32 chanid)
800} 787}
801EXPORT_SYMBOL(au1xxx_dbdma_reset); 788EXPORT_SYMBOL(au1xxx_dbdma_reset);
802 789
803u32 790u32 au1xxx_get_dma_residue(u32 chanid)
804au1xxx_get_dma_residue(u32 chanid)
805{ 791{
806 chan_tab_t *ctp; 792 chan_tab_t *ctp;
807 au1x_dma_chan_t *cp; 793 au1x_dma_chan_t *cp;
@@ -810,18 +796,15 @@ au1xxx_get_dma_residue(u32 chanid)
810 ctp = *((chan_tab_t **)chanid); 796 ctp = *((chan_tab_t **)chanid);
811 cp = ctp->chan_ptr; 797 cp = ctp->chan_ptr;
812 798
813 /* This is only valid if the channel is stopped. 799 /* This is only valid if the channel is stopped. */
814 */
815 rv = cp->ddma_bytecnt; 800 rv = cp->ddma_bytecnt;
816 au_sync(); 801 au_sync();
817 802
818 return rv; 803 return rv;
819} 804}
820
821EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue); 805EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
822 806
823void 807void au1xxx_dbdma_chan_free(u32 chanid)
824au1xxx_dbdma_chan_free(u32 chanid)
825{ 808{
826 chan_tab_t *ctp; 809 chan_tab_t *ctp;
827 dbdev_tab_t *stp, *dtp; 810 dbdev_tab_t *stp, *dtp;
@@ -842,8 +825,7 @@ au1xxx_dbdma_chan_free(u32 chanid)
842} 825}
843EXPORT_SYMBOL(au1xxx_dbdma_chan_free); 826EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
844 827
845static irqreturn_t 828static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
846dbdma_interrupt(int irq, void *dev_id)
847{ 829{
848 u32 intstat; 830 u32 intstat;
849 u32 chan_index; 831 u32 chan_index;
@@ -859,13 +841,12 @@ dbdma_interrupt(int irq, void *dev_id)
859 cp = ctp->chan_ptr; 841 cp = ctp->chan_ptr;
860 dp = ctp->cur_ptr; 842 dp = ctp->cur_ptr;
861 843
862 /* Reset interrupt. 844 /* Reset interrupt. */
863 */
864 cp->ddma_irq = 0; 845 cp->ddma_irq = 0;
865 au_sync(); 846 au_sync();
866 847
867 if (ctp->chan_callback) 848 if (ctp->chan_callback)
868 (ctp->chan_callback)(irq, ctp->chan_callparam); 849 ctp->chan_callback(irq, ctp->chan_callparam);
869 850
870 ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); 851 ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
871 return IRQ_RETVAL(1); 852 return IRQ_RETVAL(1);
@@ -890,47 +871,47 @@ static void au1xxx_dbdma_init(void)
890 871
891 if (request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED, 872 if (request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED,
892 "Au1xxx dbdma", (void *)dbdma_gptr)) 873 "Au1xxx dbdma", (void *)dbdma_gptr))
893 printk("Can't get 1550 dbdma irq"); 874 printk(KERN_ERR "Can't get 1550 dbdma irq");
894} 875}
895 876
896void 877void au1xxx_dbdma_dump(u32 chanid)
897au1xxx_dbdma_dump(u32 chanid)
898{ 878{
899 chan_tab_t *ctp; 879 chan_tab_t *ctp;
900 au1x_ddma_desc_t *dp; 880 au1x_ddma_desc_t *dp;
901 dbdev_tab_t *stp, *dtp; 881 dbdev_tab_t *stp, *dtp;
902 au1x_dma_chan_t *cp; 882 au1x_dma_chan_t *cp;
903 u32 i = 0; 883 u32 i = 0;
904 884
905 ctp = *((chan_tab_t **)chanid); 885 ctp = *((chan_tab_t **)chanid);
906 stp = ctp->chan_src; 886 stp = ctp->chan_src;
907 dtp = ctp->chan_dest; 887 dtp = ctp->chan_dest;
908 cp = ctp->chan_ptr; 888 cp = ctp->chan_ptr;
909 889
910 printk("Chan %x, stp %x (dev %d) dtp %x (dev %d) \n", 890 printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d) \n",
911 (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp, dtp - dbdev_tab); 891 (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
912 printk("desc base %x, get %x, put %x, cur %x\n", 892 dtp - dbdev_tab);
913 (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr), 893 printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
914 (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr)); 894 (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
915 895 (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
916 printk("dbdma chan %x\n", (u32)cp); 896
917 printk("cfg %08x, desptr %08x, statptr %08x\n", 897 printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
918 cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr); 898 printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
919 printk("dbell %08x, irq %08x, stat %08x, bytecnt %08x\n", 899 cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
920 cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat, cp->ddma_bytecnt); 900 printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
921 901 cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
922 902 cp->ddma_bytecnt);
923 /* Run through the descriptors 903
924 */ 904 /* Run through the descriptors */
925 dp = ctp->chan_desc_base; 905 dp = ctp->chan_desc_base;
926 906
927 do { 907 do {
928 printk("Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n", 908 printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
929 i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1); 909 i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
930 printk("src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n", 910 printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
931 dp->dscr_source0, dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1); 911 dp->dscr_source0, dp->dscr_source1,
932 printk("stat %08x, nxtptr %08x\n", 912 dp->dscr_dest0, dp->dscr_dest1);
933 dp->dscr_stat, dp->dscr_nxtptr); 913 printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
914 dp->dscr_stat, dp->dscr_nxtptr);
934 dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); 915 dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
935 } while (dp != ctp->chan_desc_base); 916 } while (dp != ctp->chan_desc_base);
936} 917}
@@ -938,32 +919,33 @@ au1xxx_dbdma_dump(u32 chanid)
938/* Put a descriptor into the DMA ring. 919/* Put a descriptor into the DMA ring.
939 * This updates the source/destination pointers and byte count. 920 * This updates the source/destination pointers and byte count.
940 */ 921 */
941u32 922u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
942au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr )
943{ 923{
944 chan_tab_t *ctp; 924 chan_tab_t *ctp;
945 au1x_ddma_desc_t *dp; 925 au1x_ddma_desc_t *dp;
946 u32 nbytes=0; 926 u32 nbytes = 0;
947 927
948 /* I guess we could check this to be within the 928 /*
949 * range of the table...... 929 * I guess we could check this to be within the
950 */ 930 * range of the table......
931 */
951 ctp = *((chan_tab_t **)chanid); 932 ctp = *((chan_tab_t **)chanid);
952 933
953 /* We should have multiple callers for a particular channel, 934 /*
954 * an interrupt doesn't affect this pointer nor the descriptor, 935 * We should have multiple callers for a particular channel,
955 * so no locking should be needed. 936 * an interrupt doesn't affect this pointer nor the descriptor,
956 */ 937 * so no locking should be needed.
938 */
957 dp = ctp->put_ptr; 939 dp = ctp->put_ptr;
958 940
959 /* If the descriptor is valid, we are way ahead of the DMA 941 /*
960 * engine, so just return an error condition. 942 * If the descriptor is valid, we are way ahead of the DMA
961 */ 943 * engine, so just return an error condition.
944 */
962 if (dp->dscr_cmd0 & DSCR_CMD0_V) 945 if (dp->dscr_cmd0 & DSCR_CMD0_V)
963 return 0; 946 return 0;
964 947
965 /* Load up buffer addresses and byte count. 948 /* Load up buffer addresses and byte count. */
966 */
967 dp->dscr_dest0 = dscr->dscr_dest0; 949 dp->dscr_dest0 = dscr->dscr_dest0;
968 dp->dscr_source0 = dscr->dscr_source0; 950 dp->dscr_source0 = dscr->dscr_source0;
969 dp->dscr_dest1 = dscr->dscr_dest1; 951 dp->dscr_dest1 = dscr->dscr_dest1;
@@ -975,14 +957,11 @@ au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr )
975 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V; 957 dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
976 ctp->chan_ptr->ddma_dbell = 0; 958 ctp->chan_ptr->ddma_dbell = 0;
977 959
978 /* Get next descriptor pointer. 960 /* Get next descriptor pointer. */
979 */
980 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); 961 ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
981 962
982 /* return something not zero. 963 /* Return something non-zero. */
983 */
984 return nbytes; 964 return nbytes;
985} 965}
986 966
987#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */ 967#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */
988