diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/fsldma.c | 71 |
1 files changed, 47 insertions, 24 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index da8a8ed9e411..f18d1bde0439 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -179,9 +179,14 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan) | |||
179 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | 179 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, |
180 | struct fsl_desc_sw *desc) | 180 | struct fsl_desc_sw *desc) |
181 | { | 181 | { |
182 | u64 snoop_bits; | ||
183 | |||
184 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
185 | ? FSL_DMA_SNEN : 0; | ||
186 | |||
182 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | 187 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, |
183 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, | 188 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL |
184 | 64); | 189 | | snoop_bits, 64); |
185 | } | 190 | } |
186 | 191 | ||
187 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | 192 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, |
@@ -313,8 +318,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | |||
313 | 318 | ||
314 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 319 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
315 | { | 320 | { |
316 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||
317 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | 321 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); |
322 | struct fsl_desc_sw *desc; | ||
318 | unsigned long flags; | 323 | unsigned long flags; |
319 | dma_cookie_t cookie; | 324 | dma_cookie_t cookie; |
320 | 325 | ||
@@ -322,14 +327,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
322 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 327 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); |
323 | 328 | ||
324 | cookie = fsl_chan->common.cookie; | 329 | cookie = fsl_chan->common.cookie; |
325 | cookie++; | 330 | list_for_each_entry(desc, &tx->tx_list, node) { |
326 | if (cookie < 0) | 331 | cookie++; |
327 | cookie = 1; | 332 | if (cookie < 0) |
328 | desc->async_tx.cookie = cookie; | 333 | cookie = 1; |
329 | fsl_chan->common.cookie = desc->async_tx.cookie; | ||
330 | 334 | ||
331 | append_ld_queue(fsl_chan, desc); | 335 | desc->async_tx.cookie = cookie; |
332 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | 336 | } |
337 | |||
338 | fsl_chan->common.cookie = cookie; | ||
339 | append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); | ||
340 | list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); | ||
333 | 341 | ||
334 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 342 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); |
335 | 343 | ||
@@ -454,8 +462,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
454 | { | 462 | { |
455 | struct fsl_dma_chan *fsl_chan; | 463 | struct fsl_dma_chan *fsl_chan; |
456 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | 464 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; |
465 | struct list_head *list; | ||
457 | size_t copy; | 466 | size_t copy; |
458 | LIST_HEAD(link_chain); | ||
459 | 467 | ||
460 | if (!chan) | 468 | if (!chan) |
461 | return NULL; | 469 | return NULL; |
@@ -472,7 +480,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
472 | if (!new) { | 480 | if (!new) { |
473 | dev_err(fsl_chan->dev, | 481 | dev_err(fsl_chan->dev, |
474 | "No free memory for link descriptor\n"); | 482 | "No free memory for link descriptor\n"); |
475 | return NULL; | 483 | goto fail; |
476 | } | 484 | } |
477 | #ifdef FSL_DMA_LD_DEBUG | 485 | #ifdef FSL_DMA_LD_DEBUG |
478 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | 486 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); |
@@ -507,7 +515,19 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
507 | /* Set End-of-link to the last link descriptor of new list*/ | 515 | /* Set End-of-link to the last link descriptor of new list*/ |
508 | set_ld_eol(fsl_chan, new); | 516 | set_ld_eol(fsl_chan, new); |
509 | 517 | ||
510 | return first ? &first->async_tx : NULL; | 518 | return &first->async_tx; |
519 | |||
520 | fail: | ||
521 | if (!first) | ||
522 | return NULL; | ||
523 | |||
524 | list = &first->async_tx.tx_list; | ||
525 | list_for_each_entry_safe_reverse(new, prev, list, node) { | ||
526 | list_del(&new->node); | ||
527 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | ||
528 | } | ||
529 | |||
530 | return NULL; | ||
511 | } | 531 | } |
512 | 532 | ||
513 | /** | 533 | /** |
@@ -598,15 +618,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | |||
598 | dma_addr_t next_dest_addr; | 618 | dma_addr_t next_dest_addr; |
599 | unsigned long flags; | 619 | unsigned long flags; |
600 | 620 | ||
621 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
622 | |||
601 | if (!dma_is_idle(fsl_chan)) | 623 | if (!dma_is_idle(fsl_chan)) |
602 | return; | 624 | goto out_unlock; |
603 | 625 | ||
604 | dma_halt(fsl_chan); | 626 | dma_halt(fsl_chan); |
605 | 627 | ||
606 | /* If there are some link descriptors | 628 | /* If there are some link descriptors |
607 | * not transfered in queue. We need to start it. | 629 | * not transfered in queue. We need to start it. |
608 | */ | 630 | */ |
609 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
610 | 631 | ||
611 | /* Find the first un-transfer desciptor */ | 632 | /* Find the first un-transfer desciptor */ |
612 | for (ld_node = fsl_chan->ld_queue.next; | 633 | for (ld_node = fsl_chan->ld_queue.next; |
@@ -617,19 +638,20 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | |||
617 | fsl_chan->common.cookie) == DMA_SUCCESS); | 638 | fsl_chan->common.cookie) == DMA_SUCCESS); |
618 | ld_node = ld_node->next); | 639 | ld_node = ld_node->next); |
619 | 640 | ||
620 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
621 | |||
622 | if (ld_node != &fsl_chan->ld_queue) { | 641 | if (ld_node != &fsl_chan->ld_queue) { |
623 | /* Get the ld start address from ld_queue */ | 642 | /* Get the ld start address from ld_queue */ |
624 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | 643 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; |
625 | dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", | 644 | dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", |
626 | (void *)next_dest_addr); | 645 | (unsigned long long)next_dest_addr); |
627 | set_cdar(fsl_chan, next_dest_addr); | 646 | set_cdar(fsl_chan, next_dest_addr); |
628 | dma_start(fsl_chan); | 647 | dma_start(fsl_chan); |
629 | } else { | 648 | } else { |
630 | set_cdar(fsl_chan, 0); | 649 | set_cdar(fsl_chan, 0); |
631 | set_ndar(fsl_chan, 0); | 650 | set_ndar(fsl_chan, 0); |
632 | } | 651 | } |
652 | |||
653 | out_unlock: | ||
654 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
633 | } | 655 | } |
634 | 656 | ||
635 | /** | 657 | /** |
@@ -734,8 +756,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | |||
734 | */ | 756 | */ |
735 | if (stat & FSL_DMA_SR_EOSI) { | 757 | if (stat & FSL_DMA_SR_EOSI) { |
736 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | 758 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); |
737 | dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", | 759 | dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", |
738 | (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); | 760 | (unsigned long long)get_cdar(fsl_chan), |
761 | (unsigned long long)get_ndar(fsl_chan)); | ||
739 | stat &= ~FSL_DMA_SR_EOSI; | 762 | stat &= ~FSL_DMA_SR_EOSI; |
740 | update_cookie = 1; | 763 | update_cookie = 1; |
741 | } | 764 | } |
@@ -830,7 +853,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | |||
830 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | 853 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); |
831 | 854 | ||
832 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | 855 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; |
833 | if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { | 856 | if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { |
834 | dev_err(fdev->dev, "There is no %d channel!\n", | 857 | dev_err(fdev->dev, "There is no %d channel!\n", |
835 | new_fsl_chan->id); | 858 | new_fsl_chan->id); |
836 | err = -EINVAL; | 859 | err = -EINVAL; |
@@ -925,8 +948,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
925 | } | 948 | } |
926 | 949 | ||
927 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | 950 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " |
928 | "controller at %p...\n", | 951 | "controller at 0x%llx...\n", |
929 | match->compatible, (void *)fdev->reg.start); | 952 | match->compatible, (unsigned long long)fdev->reg.start); |
930 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end | 953 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end |
931 | - fdev->reg.start + 1); | 954 | - fdev->reg.start + 1); |
932 | 955 | ||