diff options
Diffstat (limited to 'drivers/dma/fsldma.c')
-rw-r--r-- | drivers/dma/fsldma.c | 305 |
1 files changed, 277 insertions, 28 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index f18d1bde0439..296f9e747fac 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -12,6 +12,11 @@ | |||
12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. | 12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. |
13 | * The support for MPC8349 DMA contorller is also added. | 13 | * The support for MPC8349 DMA contorller is also added. |
14 | * | 14 | * |
15 | * This driver instructs the DMA controller to issue the PCI Read Multiple | ||
16 | * command for PCI read operations, instead of using the default PCI Read Line | ||
17 | * command. Please be aware that this setting may result in read pre-fetching | ||
18 | * on some platforms. | ||
19 | * | ||
15 | * This is free software; you can redistribute it and/or modify | 20 | * This is free software; you can redistribute it and/or modify |
16 | * it under the terms of the GNU General Public License as published by | 21 | * it under the terms of the GNU General Public License as published by |
17 | * the Free Software Foundation; either version 2 of the License, or | 22 | * the Free Software Foundation; either version 2 of the License, or |
@@ -29,6 +34,7 @@ | |||
29 | #include <linux/dmapool.h> | 34 | #include <linux/dmapool.h> |
30 | #include <linux/of_platform.h> | 35 | #include <linux/of_platform.h> |
31 | 36 | ||
37 | #include <asm/fsldma.h> | ||
32 | #include "fsldma.h" | 38 | #include "fsldma.h" |
33 | 39 | ||
34 | static void dma_init(struct fsl_dma_chan *fsl_chan) | 40 | static void dma_init(struct fsl_dma_chan *fsl_chan) |
@@ -49,9 +55,10 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) | |||
49 | case FSL_DMA_IP_83XX: | 55 | case FSL_DMA_IP_83XX: |
50 | /* Set the channel to below modes: | 56 | /* Set the channel to below modes: |
51 | * EOTIE - End-of-transfer interrupt enable | 57 | * EOTIE - End-of-transfer interrupt enable |
58 | * PRC_RM - PCI read multiple | ||
52 | */ | 59 | */ |
53 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, | 60 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE |
54 | 32); | 61 | | FSL_DMA_MR_PRC_RM, 32); |
55 | break; | 62 | break; |
56 | } | 63 | } |
57 | 64 | ||
@@ -136,15 +143,16 @@ static int dma_is_idle(struct fsl_dma_chan *fsl_chan) | |||
136 | 143 | ||
137 | static void dma_start(struct fsl_dma_chan *fsl_chan) | 144 | static void dma_start(struct fsl_dma_chan *fsl_chan) |
138 | { | 145 | { |
139 | u32 mr_set = 0;; | 146 | u32 mr_set = 0; |
140 | 147 | ||
141 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 148 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
142 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); | 149 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); |
143 | mr_set |= FSL_DMA_MR_EMP_EN; | 150 | mr_set |= FSL_DMA_MR_EMP_EN; |
144 | } else | 151 | } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { |
145 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 152 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, |
146 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 153 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
147 | & ~FSL_DMA_MR_EMP_EN, 32); | 154 | & ~FSL_DMA_MR_EMP_EN, 32); |
155 | } | ||
148 | 156 | ||
149 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | 157 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) |
150 | mr_set |= FSL_DMA_MR_EMS_EN; | 158 | mr_set |= FSL_DMA_MR_EMS_EN; |
@@ -273,28 +281,40 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |||
273 | } | 281 | } |
274 | 282 | ||
275 | /** | 283 | /** |
276 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | 284 | * fsl_chan_set_request_count - Set DMA Request Count for external control |
277 | * @fsl_chan : Freescale DMA channel | 285 | * @fsl_chan : Freescale DMA channel |
278 | * @size : Pause control size, 0 for disable external pause control. | 286 | * @size : Number of bytes to transfer in a single request |
279 | * The maximum is 1024. | 287 | * |
288 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | ||
289 | * The DMA request count is how many bytes are allowed to transfer before | ||
290 | * pausing the channel, after which a new assertion of DREQ# resumes channel | ||
291 | * operation. | ||
280 | * | 292 | * |
281 | * The Freescale DMA channel can be controlled by the external | 293 | * A size of 0 disables external pause control. The maximum size is 1024. |
282 | * signal DREQ#. The pause control size is how many bytes are allowed | ||
283 | * to transfer before pausing the channel, after which a new assertion | ||
284 | * of DREQ# resumes channel operation. | ||
285 | */ | 294 | */ |
286 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | 295 | static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) |
287 | { | 296 | { |
288 | if (size > 1024) | 297 | BUG_ON(size > 1024); |
289 | return; | 298 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, |
299 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
300 | | ((__ilog2(size) << 24) & 0x0f000000), | ||
301 | 32); | ||
302 | } | ||
290 | 303 | ||
291 | if (size) { | 304 | /** |
292 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 305 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status |
293 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 306 | * @fsl_chan : Freescale DMA channel |
294 | | ((__ilog2(size) << 24) & 0x0f000000), | 307 | * @enable : 0 is disabled, 1 is enabled. |
295 | 32); | 308 | * |
309 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | ||
310 | * The DMA Request Count feature should be used in addition to this feature | ||
311 | * to set the number of bytes to transfer before pausing the channel. | ||
312 | */ | ||
313 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) | ||
314 | { | ||
315 | if (enable) | ||
296 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | 316 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; |
297 | } else | 317 | else |
298 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | 318 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; |
299 | } | 319 | } |
300 | 320 | ||
@@ -319,7 +339,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | |||
319 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 339 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
320 | { | 340 | { |
321 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | 341 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); |
322 | struct fsl_desc_sw *desc; | 342 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
343 | struct fsl_desc_sw *child; | ||
323 | unsigned long flags; | 344 | unsigned long flags; |
324 | dma_cookie_t cookie; | 345 | dma_cookie_t cookie; |
325 | 346 | ||
@@ -327,7 +348,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
327 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 348 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); |
328 | 349 | ||
329 | cookie = fsl_chan->common.cookie; | 350 | cookie = fsl_chan->common.cookie; |
330 | list_for_each_entry(desc, &tx->tx_list, node) { | 351 | list_for_each_entry(child, &desc->tx_list, node) { |
331 | cookie++; | 352 | cookie++; |
332 | if (cookie < 0) | 353 | if (cookie < 0) |
333 | cookie = 1; | 354 | cookie = 1; |
@@ -336,8 +357,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
336 | } | 357 | } |
337 | 358 | ||
338 | fsl_chan->common.cookie = cookie; | 359 | fsl_chan->common.cookie = cookie; |
339 | append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); | 360 | append_ld_queue(fsl_chan, desc); |
340 | list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); | 361 | list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); |
341 | 362 | ||
342 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 363 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); |
343 | 364 | ||
@@ -359,6 +380,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
359 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | 380 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); |
360 | if (desc_sw) { | 381 | if (desc_sw) { |
361 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | 382 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); |
383 | INIT_LIST_HEAD(&desc_sw->tx_list); | ||
362 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | 384 | dma_async_tx_descriptor_init(&desc_sw->async_tx, |
363 | &fsl_chan->common); | 385 | &fsl_chan->common); |
364 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | 386 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; |
@@ -448,7 +470,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | |||
448 | new->async_tx.flags = flags; | 470 | new->async_tx.flags = flags; |
449 | 471 | ||
450 | /* Insert the link descriptor to the LD ring */ | 472 | /* Insert the link descriptor to the LD ring */ |
451 | list_add_tail(&new->node, &new->async_tx.tx_list); | 473 | list_add_tail(&new->node, &new->tx_list); |
452 | 474 | ||
453 | /* Set End-of-link to the last link descriptor of new list*/ | 475 | /* Set End-of-link to the last link descriptor of new list*/ |
454 | set_ld_eol(fsl_chan, new); | 476 | set_ld_eol(fsl_chan, new); |
@@ -506,7 +528,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
506 | dma_dest += copy; | 528 | dma_dest += copy; |
507 | 529 | ||
508 | /* Insert the link descriptor to the LD ring */ | 530 | /* Insert the link descriptor to the LD ring */ |
509 | list_add_tail(&new->node, &first->async_tx.tx_list); | 531 | list_add_tail(&new->node, &first->tx_list); |
510 | } while (len); | 532 | } while (len); |
511 | 533 | ||
512 | new->async_tx.flags = flags; /* client is in control of this ack */ | 534 | new->async_tx.flags = flags; /* client is in control of this ack */ |
@@ -521,7 +543,7 @@ fail: | |||
521 | if (!first) | 543 | if (!first) |
522 | return NULL; | 544 | return NULL; |
523 | 545 | ||
524 | list = &first->async_tx.tx_list; | 546 | list = &first->tx_list; |
525 | list_for_each_entry_safe_reverse(new, prev, list, node) { | 547 | list_for_each_entry_safe_reverse(new, prev, list, node) { |
526 | list_del(&new->node); | 548 | list_del(&new->node); |
527 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | 549 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); |
@@ -531,6 +553,229 @@ fail: | |||
531 | } | 553 | } |
532 | 554 | ||
533 | /** | 555 | /** |
556 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
557 | * @chan: DMA channel | ||
558 | * @sgl: scatterlist to transfer to/from | ||
559 | * @sg_len: number of entries in @scatterlist | ||
560 | * @direction: DMA direction | ||
561 | * @flags: DMAEngine flags | ||
562 | * | ||
563 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
564 | * DMA_SLAVE API, this gets the device-specific information from the | ||
565 | * chan->private variable. | ||
566 | */ | ||
567 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
568 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
569 | enum dma_data_direction direction, unsigned long flags) | ||
570 | { | ||
571 | struct fsl_dma_chan *fsl_chan; | ||
572 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | ||
573 | struct fsl_dma_slave *slave; | ||
574 | struct list_head *tx_list; | ||
575 | size_t copy; | ||
576 | |||
577 | int i; | ||
578 | struct scatterlist *sg; | ||
579 | size_t sg_used; | ||
580 | size_t hw_used; | ||
581 | struct fsl_dma_hw_addr *hw; | ||
582 | dma_addr_t dma_dst, dma_src; | ||
583 | |||
584 | if (!chan) | ||
585 | return NULL; | ||
586 | |||
587 | if (!chan->private) | ||
588 | return NULL; | ||
589 | |||
590 | fsl_chan = to_fsl_chan(chan); | ||
591 | slave = chan->private; | ||
592 | |||
593 | if (list_empty(&slave->addresses)) | ||
594 | return NULL; | ||
595 | |||
596 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); | ||
597 | hw_used = 0; | ||
598 | |||
599 | /* | ||
600 | * Build the hardware transaction to copy from the scatterlist to | ||
601 | * the hardware, or from the hardware to the scatterlist | ||
602 | * | ||
603 | * If you are copying from the hardware to the scatterlist and it | ||
604 | * takes two hardware entries to fill an entire page, then both | ||
605 | * hardware entries will be coalesced into the same page | ||
606 | * | ||
607 | * If you are copying from the scatterlist to the hardware and a | ||
608 | * single page can fill two hardware entries, then the data will | ||
609 | * be read out of the page into the first hardware entry, and so on | ||
610 | */ | ||
611 | for_each_sg(sgl, sg, sg_len, i) { | ||
612 | sg_used = 0; | ||
613 | |||
614 | /* Loop until the entire scatterlist entry is used */ | ||
615 | while (sg_used < sg_dma_len(sg)) { | ||
616 | |||
617 | /* | ||
618 | * If we've used up the current hardware address/length | ||
619 | * pair, we need to load a new one | ||
620 | * | ||
621 | * This is done in a while loop so that descriptors with | ||
622 | * length == 0 will be skipped | ||
623 | */ | ||
624 | while (hw_used >= hw->length) { | ||
625 | |||
626 | /* | ||
627 | * If the current hardware entry is the last | ||
628 | * entry in the list, we're finished | ||
629 | */ | ||
630 | if (list_is_last(&hw->entry, &slave->addresses)) | ||
631 | goto finished; | ||
632 | |||
633 | /* Get the next hardware address/length pair */ | ||
634 | hw = list_entry(hw->entry.next, | ||
635 | struct fsl_dma_hw_addr, entry); | ||
636 | hw_used = 0; | ||
637 | } | ||
638 | |||
639 | /* Allocate the link descriptor from DMA pool */ | ||
640 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
641 | if (!new) { | ||
642 | dev_err(fsl_chan->dev, "No free memory for " | ||
643 | "link descriptor\n"); | ||
644 | goto fail; | ||
645 | } | ||
646 | #ifdef FSL_DMA_LD_DEBUG | ||
647 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | ||
648 | #endif | ||
649 | |||
650 | /* | ||
651 | * Calculate the maximum number of bytes to transfer, | ||
652 | * making sure it is less than the DMA controller limit | ||
653 | */ | ||
654 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | ||
655 | hw->length - hw_used); | ||
656 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); | ||
657 | |||
658 | /* | ||
659 | * DMA_FROM_DEVICE | ||
660 | * from the hardware to the scatterlist | ||
661 | * | ||
662 | * DMA_TO_DEVICE | ||
663 | * from the scatterlist to the hardware | ||
664 | */ | ||
665 | if (direction == DMA_FROM_DEVICE) { | ||
666 | dma_src = hw->address + hw_used; | ||
667 | dma_dst = sg_dma_address(sg) + sg_used; | ||
668 | } else { | ||
669 | dma_src = sg_dma_address(sg) + sg_used; | ||
670 | dma_dst = hw->address + hw_used; | ||
671 | } | ||
672 | |||
673 | /* Fill in the descriptor */ | ||
674 | set_desc_cnt(fsl_chan, &new->hw, copy); | ||
675 | set_desc_src(fsl_chan, &new->hw, dma_src); | ||
676 | set_desc_dest(fsl_chan, &new->hw, dma_dst); | ||
677 | |||
678 | /* | ||
679 | * If this is not the first descriptor, chain the | ||
680 | * current descriptor after the previous descriptor | ||
681 | */ | ||
682 | if (!first) { | ||
683 | first = new; | ||
684 | } else { | ||
685 | set_desc_next(fsl_chan, &prev->hw, | ||
686 | new->async_tx.phys); | ||
687 | } | ||
688 | |||
689 | new->async_tx.cookie = 0; | ||
690 | async_tx_ack(&new->async_tx); | ||
691 | |||
692 | prev = new; | ||
693 | sg_used += copy; | ||
694 | hw_used += copy; | ||
695 | |||
696 | /* Insert the link descriptor into the LD ring */ | ||
697 | list_add_tail(&new->node, &first->tx_list); | ||
698 | } | ||
699 | } | ||
700 | |||
701 | finished: | ||
702 | |||
703 | /* All of the hardware address/length pairs had length == 0 */ | ||
704 | if (!first || !new) | ||
705 | return NULL; | ||
706 | |||
707 | new->async_tx.flags = flags; | ||
708 | new->async_tx.cookie = -EBUSY; | ||
709 | |||
710 | /* Set End-of-link to the last link descriptor of new list */ | ||
711 | set_ld_eol(fsl_chan, new); | ||
712 | |||
713 | /* Enable extra controller features */ | ||
714 | if (fsl_chan->set_src_loop_size) | ||
715 | fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); | ||
716 | |||
717 | if (fsl_chan->set_dest_loop_size) | ||
718 | fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); | ||
719 | |||
720 | if (fsl_chan->toggle_ext_start) | ||
721 | fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); | ||
722 | |||
723 | if (fsl_chan->toggle_ext_pause) | ||
724 | fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); | ||
725 | |||
726 | if (fsl_chan->set_request_count) | ||
727 | fsl_chan->set_request_count(fsl_chan, slave->request_count); | ||
728 | |||
729 | return &first->async_tx; | ||
730 | |||
731 | fail: | ||
732 | /* If first was not set, then we failed to allocate the very first | ||
733 | * descriptor, and we're done */ | ||
734 | if (!first) | ||
735 | return NULL; | ||
736 | |||
737 | /* | ||
738 | * First is set, so all of the descriptors we allocated have been added | ||
739 | * to first->tx_list, INCLUDING "first" itself. Therefore we | ||
740 | * must traverse the list backwards freeing each descriptor in turn | ||
741 | * | ||
742 | * We're re-using variables for the loop, oh well | ||
743 | */ | ||
744 | tx_list = &first->tx_list; | ||
745 | list_for_each_entry_safe_reverse(new, prev, tx_list, node) { | ||
746 | list_del_init(&new->node); | ||
747 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | ||
748 | } | ||
749 | |||
750 | return NULL; | ||
751 | } | ||
752 | |||
753 | static void fsl_dma_device_terminate_all(struct dma_chan *chan) | ||
754 | { | ||
755 | struct fsl_dma_chan *fsl_chan; | ||
756 | struct fsl_desc_sw *desc, *tmp; | ||
757 | unsigned long flags; | ||
758 | |||
759 | if (!chan) | ||
760 | return; | ||
761 | |||
762 | fsl_chan = to_fsl_chan(chan); | ||
763 | |||
764 | /* Halt the DMA engine */ | ||
765 | dma_halt(fsl_chan); | ||
766 | |||
767 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
768 | |||
769 | /* Remove and free all of the descriptors in the LD queue */ | ||
770 | list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { | ||
771 | list_del(&desc->node); | ||
772 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
773 | } | ||
774 | |||
775 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
776 | } | ||
777 | |||
778 | /** | ||
534 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 779 | * fsl_dma_update_completed_cookie - Update the completed cookie. |
535 | * @fsl_chan : Freescale DMA channel | 780 | * @fsl_chan : Freescale DMA channel |
536 | */ | 781 | */ |
@@ -871,11 +1116,12 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | |||
871 | 1116 | ||
872 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { | 1117 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { |
873 | case FSL_DMA_IP_85XX: | 1118 | case FSL_DMA_IP_85XX: |
874 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | ||
875 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | 1119 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; |
876 | case FSL_DMA_IP_83XX: | 1120 | case FSL_DMA_IP_83XX: |
1121 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | ||
877 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | 1122 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; |
878 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | 1123 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; |
1124 | new_fsl_chan->set_request_count = fsl_chan_set_request_count; | ||
879 | } | 1125 | } |
880 | 1126 | ||
881 | spin_lock_init(&new_fsl_chan->desc_lock); | 1127 | spin_lock_init(&new_fsl_chan->desc_lock); |
@@ -955,12 +1201,15 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
955 | 1201 | ||
956 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1202 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
957 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1203 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
1204 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | ||
958 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1205 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
959 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1206 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
960 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | 1207 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
961 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1208 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
962 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | 1209 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; |
963 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1210 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
1211 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | ||
1212 | fdev->common.device_terminate_all = fsl_dma_device_terminate_all; | ||
964 | fdev->common.dev = &dev->dev; | 1213 | fdev->common.dev = &dev->dev; |
965 | 1214 | ||
966 | fdev->irq = irq_of_parse_and_map(dev->node, 0); | 1215 | fdev->irq = irq_of_parse_and_map(dev->node, 0); |