diff options
| -rw-r--r-- | Documentation/powerpc/dts-bindings/fsl/dma.txt | 8 | ||||
| -rw-r--r-- | arch/arm/include/asm/hardware/iop3xx-adma.h | 12 | ||||
| -rw-r--r-- | arch/arm/mach-u300/include/mach/coh901318.h | 2 | ||||
| -rw-r--r-- | drivers/dma/Kconfig | 23 | ||||
| -rw-r--r-- | drivers/dma/Makefile | 8 | ||||
| -rw-r--r-- | drivers/dma/coh901318.c | 182 | ||||
| -rw-r--r-- | drivers/dma/coh901318_lli.c | 23 | ||||
| -rw-r--r-- | drivers/dma/dmatest.c | 8 | ||||
| -rw-r--r-- | drivers/dma/fsldma.c | 1177 | ||||
| -rw-r--r-- | drivers/dma/fsldma.h | 35 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma.c | 46 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma.h | 11 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma_v2.c | 70 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma_v2.h | 6 | ||||
| -rw-r--r-- | drivers/dma/ioat/dma_v3.c | 64 | ||||
| -rw-r--r-- | drivers/dma/ioat/registers.h | 2 | ||||
| -rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 15 | ||||
| -rw-r--r-- | drivers/dma/mpc512x_dma.c | 800 | ||||
| -rw-r--r-- | drivers/dma/ppc4xx/adma.c | 2 | ||||
| -rw-r--r-- | include/linux/dmaengine.h | 2 |
20 files changed, 1717 insertions, 779 deletions
diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/powerpc/dts-bindings/fsl/dma.txt index 0732cdd05ba1..2a4b4bce6110 100644 --- a/Documentation/powerpc/dts-bindings/fsl/dma.txt +++ b/Documentation/powerpc/dts-bindings/fsl/dma.txt | |||
| @@ -44,21 +44,29 @@ Example: | |||
| 44 | compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; | 44 | compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; |
| 45 | cell-index = <0>; | 45 | cell-index = <0>; |
| 46 | reg = <0 0x80>; | 46 | reg = <0 0x80>; |
| 47 | interrupt-parent = <&ipic>; | ||
| 48 | interrupts = <71 8>; | ||
| 47 | }; | 49 | }; |
| 48 | dma-channel@80 { | 50 | dma-channel@80 { |
| 49 | compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; | 51 | compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; |
| 50 | cell-index = <1>; | 52 | cell-index = <1>; |
| 51 | reg = <0x80 0x80>; | 53 | reg = <0x80 0x80>; |
| 54 | interrupt-parent = <&ipic>; | ||
| 55 | interrupts = <71 8>; | ||
| 52 | }; | 56 | }; |
| 53 | dma-channel@100 { | 57 | dma-channel@100 { |
| 54 | compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; | 58 | compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; |
| 55 | cell-index = <2>; | 59 | cell-index = <2>; |
| 56 | reg = <0x100 0x80>; | 60 | reg = <0x100 0x80>; |
| 61 | interrupt-parent = <&ipic>; | ||
| 62 | interrupts = <71 8>; | ||
| 57 | }; | 63 | }; |
| 58 | dma-channel@180 { | 64 | dma-channel@180 { |
| 59 | compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; | 65 | compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; |
| 60 | cell-index = <3>; | 66 | cell-index = <3>; |
| 61 | reg = <0x180 0x80>; | 67 | reg = <0x180 0x80>; |
| 68 | interrupt-parent = <&ipic>; | ||
| 69 | interrupts = <71 8>; | ||
| 62 | }; | 70 | }; |
| 63 | }; | 71 | }; |
| 64 | 72 | ||
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 1a8c7279a28b..9b28f1243bdc 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h | |||
| @@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, | |||
| 366 | slot_cnt += *slots_per_op; | 366 | slot_cnt += *slots_per_op; |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | if (len) | 369 | slot_cnt += *slots_per_op; |
| 370 | slot_cnt += *slots_per_op; | ||
| 371 | 370 | ||
| 372 | return slot_cnt; | 371 | return slot_cnt; |
| 373 | } | 372 | } |
| @@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, | |||
| 389 | slot_cnt += *slots_per_op; | 388 | slot_cnt += *slots_per_op; |
| 390 | } | 389 | } |
| 391 | 390 | ||
| 392 | if (len) | 391 | slot_cnt += *slots_per_op; |
| 393 | slot_cnt += *slots_per_op; | ||
| 394 | 392 | ||
| 395 | return slot_cnt; | 393 | return slot_cnt; |
| 396 | } | 394 | } |
| @@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) | |||
| 737 | i += slots_per_op; | 735 | i += slots_per_op; |
| 738 | } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); | 736 | } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); |
| 739 | 737 | ||
| 740 | if (len) { | 738 | iter = iop_hw_desc_slot_idx(hw_desc, i); |
| 741 | iter = iop_hw_desc_slot_idx(hw_desc, i); | 739 | iter->byte_count = len; |
| 742 | iter->byte_count = len; | ||
| 743 | } | ||
| 744 | } | 740 | } |
| 745 | } | 741 | } |
| 746 | 742 | ||
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h index f4cfee9c7d28..b8155b4e5ffa 100644 --- a/arch/arm/mach-u300/include/mach/coh901318.h +++ b/arch/arm/mach-u300/include/mach/coh901318.h | |||
| @@ -53,7 +53,7 @@ struct coh901318_params { | |||
| 53 | * struct coh_dma_channel - dma channel base | 53 | * struct coh_dma_channel - dma channel base |
| 54 | * @name: ascii name of dma channel | 54 | * @name: ascii name of dma channel |
| 55 | * @number: channel id number | 55 | * @number: channel id number |
| 56 | * @desc_nbr_max: number of preallocated descriptortors | 56 | * @desc_nbr_max: number of preallocated descriptors |
| 57 | * @priority_high: prio of channel, 0 low otherwise high. | 57 | * @priority_high: prio of channel, 0 low otherwise high. |
| 58 | * @param: configuration parameters | 58 | * @param: configuration parameters |
| 59 | * @dev_addr: physical address of periphal connected to channel | 59 | * @dev_addr: physical address of periphal connected to channel |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e02d74b1e892..c27f80e5d531 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -13,6 +13,22 @@ menuconfig DMADEVICES | |||
| 13 | DMA Device drivers supported by the configured arch, it may | 13 | DMA Device drivers supported by the configured arch, it may |
| 14 | be empty in some cases. | 14 | be empty in some cases. |
| 15 | 15 | ||
| 16 | config DMADEVICES_DEBUG | ||
| 17 | bool "DMA Engine debugging" | ||
| 18 | depends on DMADEVICES != n | ||
| 19 | help | ||
| 20 | This is an option for use by developers; most people should | ||
| 21 | say N here. This enables DMA engine core and driver debugging. | ||
| 22 | |||
| 23 | config DMADEVICES_VDEBUG | ||
| 24 | bool "DMA Engine verbose debugging" | ||
| 25 | depends on DMADEVICES_DEBUG != n | ||
| 26 | help | ||
| 27 | This is an option for use by developers; most people should | ||
| 28 | say N here. This enables deeper (more verbose) debugging of | ||
| 29 | the DMA engine core and drivers. | ||
| 30 | |||
| 31 | |||
| 16 | if DMADEVICES | 32 | if DMADEVICES |
| 17 | 33 | ||
| 18 | comment "DMA Devices" | 34 | comment "DMA Devices" |
| @@ -69,6 +85,13 @@ config FSL_DMA | |||
| 69 | The Elo is the DMA controller on some 82xx and 83xx parts, and the | 85 | The Elo is the DMA controller on some 82xx and 83xx parts, and the |
| 70 | Elo Plus is the DMA controller on 85xx and 86xx parts. | 86 | Elo Plus is the DMA controller on 85xx and 86xx parts. |
| 71 | 87 | ||
| 88 | config MPC512X_DMA | ||
| 89 | tristate "Freescale MPC512x built-in DMA engine support" | ||
| 90 | depends on PPC_MPC512x | ||
| 91 | select DMA_ENGINE | ||
| 92 | ---help--- | ||
| 93 | Enable support for the Freescale MPC512x built-in DMA engine. | ||
| 94 | |||
| 72 | config MV_XOR | 95 | config MV_XOR |
| 73 | bool "Marvell XOR engine support" | 96 | bool "Marvell XOR engine support" |
| 74 | depends on PLAT_ORION | 97 | depends on PLAT_ORION |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 807053d48232..22bba3d5e2b6 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -1,9 +1,17 @@ | |||
| 1 | ifeq ($(CONFIG_DMADEVICES_DEBUG),y) | ||
| 2 | EXTRA_CFLAGS += -DDEBUG | ||
| 3 | endif | ||
| 4 | ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) | ||
| 5 | EXTRA_CFLAGS += -DVERBOSE_DEBUG | ||
| 6 | endif | ||
| 7 | |||
| 1 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 8 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
| 2 | obj-$(CONFIG_NET_DMA) += iovlock.o | 9 | obj-$(CONFIG_NET_DMA) += iovlock.o |
| 3 | obj-$(CONFIG_DMATEST) += dmatest.o | 10 | obj-$(CONFIG_DMATEST) += dmatest.o |
| 4 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ | 11 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ |
| 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 12 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
| 6 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 13 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
| 14 | obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o | ||
| 7 | obj-$(CONFIG_MV_XOR) += mv_xor.o | 15 | obj-$(CONFIG_MV_XOR) += mv_xor.o |
| 8 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o | 16 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o |
| 9 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 17 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 64a937262a40..1656fdcdb6c2 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
| @@ -39,7 +39,6 @@ struct coh901318_desc { | |||
| 39 | unsigned int sg_len; | 39 | unsigned int sg_len; |
| 40 | struct coh901318_lli *data; | 40 | struct coh901318_lli *data; |
| 41 | enum dma_data_direction dir; | 41 | enum dma_data_direction dir; |
| 42 | int pending_irqs; | ||
| 43 | unsigned long flags; | 42 | unsigned long flags; |
| 44 | }; | 43 | }; |
| 45 | 44 | ||
| @@ -72,7 +71,6 @@ struct coh901318_chan { | |||
| 72 | 71 | ||
| 73 | unsigned long nbr_active_done; | 72 | unsigned long nbr_active_done; |
| 74 | unsigned long busy; | 73 | unsigned long busy; |
| 75 | int pending_irqs; | ||
| 76 | 74 | ||
| 77 | struct coh901318_base *base; | 75 | struct coh901318_base *base; |
| 78 | }; | 76 | }; |
| @@ -80,18 +78,16 @@ struct coh901318_chan { | |||
| 80 | static void coh901318_list_print(struct coh901318_chan *cohc, | 78 | static void coh901318_list_print(struct coh901318_chan *cohc, |
| 81 | struct coh901318_lli *lli) | 79 | struct coh901318_lli *lli) |
| 82 | { | 80 | { |
| 83 | struct coh901318_lli *l; | 81 | struct coh901318_lli *l = lli; |
| 84 | dma_addr_t addr = virt_to_phys(lli); | ||
| 85 | int i = 0; | 82 | int i = 0; |
| 86 | 83 | ||
| 87 | while (addr) { | 84 | while (l) { |
| 88 | l = phys_to_virt(addr); | ||
| 89 | dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" | 85 | dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" |
| 90 | ", dst 0x%x, link 0x%x link_virt 0x%p\n", | 86 | ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", |
| 91 | i, l, l->control, l->src_addr, l->dst_addr, | 87 | i, l, l->control, l->src_addr, l->dst_addr, |
| 92 | l->link_addr, phys_to_virt(l->link_addr)); | 88 | l->link_addr, l->virt_link_addr); |
| 93 | i++; | 89 | i++; |
| 94 | addr = l->link_addr; | 90 | l = l->virt_link_addr; |
| 95 | } | 91 | } |
| 96 | } | 92 | } |
| 97 | 93 | ||
| @@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, | |||
| 125 | goto err_kmalloc; | 121 | goto err_kmalloc; |
| 126 | tmp = dev_buf; | 122 | tmp = dev_buf; |
| 127 | 123 | ||
| 128 | tmp += sprintf(tmp, "DMA -- enable dma channels\n"); | 124 | tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); |
| 129 | 125 | ||
| 130 | for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) | 126 | for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) |
| 131 | if (started_channels & (1 << i)) | 127 | if (started_channels & (1 << i)) |
| @@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc) | |||
| 337 | * TODO: alloc a pile of descs instead of just one, | 333 | * TODO: alloc a pile of descs instead of just one, |
| 338 | * avoid many small allocations. | 334 | * avoid many small allocations. |
| 339 | */ | 335 | */ |
| 340 | desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); | 336 | desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); |
| 341 | if (desc == NULL) | 337 | if (desc == NULL) |
| 342 | goto out; | 338 | goto out; |
| 343 | INIT_LIST_HEAD(&desc->node); | 339 | INIT_LIST_HEAD(&desc->node); |
| 340 | dma_async_tx_descriptor_init(&desc->desc, &cohc->chan); | ||
| 344 | } else { | 341 | } else { |
| 345 | /* Reuse an old desc. */ | 342 | /* Reuse an old desc. */ |
| 346 | desc = list_first_entry(&cohc->free, | 343 | desc = list_first_entry(&cohc->free, |
| 347 | struct coh901318_desc, | 344 | struct coh901318_desc, |
| 348 | node); | 345 | node); |
| 349 | list_del(&desc->node); | 346 | list_del(&desc->node); |
| 347 | /* Initialize it a bit so it's not insane */ | ||
| 348 | desc->sg = NULL; | ||
| 349 | desc->sg_len = 0; | ||
| 350 | desc->desc.callback = NULL; | ||
| 351 | desc->desc.callback_param = NULL; | ||
| 350 | } | 352 | } |
| 351 | 353 | ||
| 352 | out: | 354 | out: |
| @@ -364,10 +366,6 @@ static void | |||
| 364 | coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) | 366 | coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) |
| 365 | { | 367 | { |
| 366 | list_add_tail(&desc->node, &cohc->active); | 368 | list_add_tail(&desc->node, &cohc->active); |
| 367 | |||
| 368 | BUG_ON(cohc->pending_irqs != 0); | ||
| 369 | |||
| 370 | cohc->pending_irqs = desc->pending_irqs; | ||
| 371 | } | 369 | } |
| 372 | 370 | ||
| 373 | static struct coh901318_desc * | 371 | static struct coh901318_desc * |
| @@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) | |||
| 592 | return cohd_que; | 590 | return cohd_que; |
| 593 | } | 591 | } |
| 594 | 592 | ||
| 593 | /* | ||
| 594 | * This tasklet is called from the interrupt handler to | ||
| 595 | * handle each descriptor (DMA job) that is sent to a channel. | ||
| 596 | */ | ||
| 595 | static void dma_tasklet(unsigned long data) | 597 | static void dma_tasklet(unsigned long data) |
| 596 | { | 598 | { |
| 597 | struct coh901318_chan *cohc = (struct coh901318_chan *) data; | 599 | struct coh901318_chan *cohc = (struct coh901318_chan *) data; |
| @@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data) | |||
| 600 | dma_async_tx_callback callback; | 602 | dma_async_tx_callback callback; |
| 601 | void *callback_param; | 603 | void *callback_param; |
| 602 | 604 | ||
| 605 | dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" | ||
| 606 | " nbr_active_done %ld\n", __func__, | ||
| 607 | cohc->id, cohc->nbr_active_done); | ||
| 608 | |||
| 603 | spin_lock_irqsave(&cohc->lock, flags); | 609 | spin_lock_irqsave(&cohc->lock, flags); |
| 604 | 610 | ||
| 605 | /* get first active entry from list */ | 611 | /* get first active descriptor entry from list */ |
| 606 | cohd_fin = coh901318_first_active_get(cohc); | 612 | cohd_fin = coh901318_first_active_get(cohc); |
| 607 | 613 | ||
| 608 | BUG_ON(cohd_fin->pending_irqs == 0); | ||
| 609 | |||
| 610 | if (cohd_fin == NULL) | 614 | if (cohd_fin == NULL) |
| 611 | goto err; | 615 | goto err; |
| 612 | 616 | ||
| 613 | cohd_fin->pending_irqs--; | 617 | /* locate callback to client */ |
| 614 | cohc->completed = cohd_fin->desc.cookie; | 618 | callback = cohd_fin->desc.callback; |
| 619 | callback_param = cohd_fin->desc.callback_param; | ||
| 615 | 620 | ||
| 616 | if (cohc->nbr_active_done == 0) | 621 | /* sign this job as completed on the channel */ |
| 617 | return; | 622 | cohc->completed = cohd_fin->desc.cookie; |
| 618 | 623 | ||
| 619 | if (!cohd_fin->pending_irqs) { | 624 | /* release the lli allocation and remove the descriptor */ |
| 620 | /* release the lli allocation*/ | 625 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); |
| 621 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); | ||
| 622 | } | ||
| 623 | 626 | ||
| 624 | dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d" | 627 | /* return desc to free-list */ |
| 625 | " nbr_active_done %ld\n", __func__, | 628 | coh901318_desc_remove(cohd_fin); |
| 626 | cohc->id, cohc->pending_irqs, cohc->nbr_active_done); | 629 | coh901318_desc_free(cohc, cohd_fin); |
| 627 | 630 | ||
| 628 | /* callback to client */ | 631 | spin_unlock_irqrestore(&cohc->lock, flags); |
| 629 | callback = cohd_fin->desc.callback; | ||
| 630 | callback_param = cohd_fin->desc.callback_param; | ||
| 631 | |||
| 632 | if (!cohd_fin->pending_irqs) { | ||
| 633 | coh901318_desc_remove(cohd_fin); | ||
| 634 | 632 | ||
| 635 | /* return desc to free-list */ | 633 | /* Call the callback when we're done */ |
| 636 | coh901318_desc_free(cohc, cohd_fin); | 634 | if (callback) |
| 637 | } | 635 | callback(callback_param); |
| 638 | 636 | ||
| 639 | if (cohc->nbr_active_done) | 637 | spin_lock_irqsave(&cohc->lock, flags); |
| 640 | cohc->nbr_active_done--; | ||
| 641 | 638 | ||
| 639 | /* | ||
| 640 | * If another interrupt fired while the tasklet was scheduling, | ||
| 641 | * we don't get called twice, so we have this number of active | ||
| 642 | * counter that keep track of the number of IRQs expected to | ||
| 643 | * be handled for this channel. If there happen to be more than | ||
| 644 | * one IRQ to be ack:ed, we simply schedule this tasklet again. | ||
| 645 | */ | ||
| 646 | cohc->nbr_active_done--; | ||
| 642 | if (cohc->nbr_active_done) { | 647 | if (cohc->nbr_active_done) { |
| 648 | dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " | ||
| 649 | "came in while we were scheduling this tasklet\n"); | ||
| 643 | if (cohc_chan_conf(cohc)->priority_high) | 650 | if (cohc_chan_conf(cohc)->priority_high) |
| 644 | tasklet_hi_schedule(&cohc->tasklet); | 651 | tasklet_hi_schedule(&cohc->tasklet); |
| 645 | else | 652 | else |
| 646 | tasklet_schedule(&cohc->tasklet); | 653 | tasklet_schedule(&cohc->tasklet); |
| 647 | } | 654 | } |
| 648 | spin_unlock_irqrestore(&cohc->lock, flags); | ||
| 649 | 655 | ||
| 650 | if (callback) | 656 | spin_unlock_irqrestore(&cohc->lock, flags); |
| 651 | callback(callback_param); | ||
| 652 | 657 | ||
| 653 | return; | 658 | return; |
| 654 | 659 | ||
| @@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc) | |||
| 667 | if (!cohc->allocated) | 672 | if (!cohc->allocated) |
| 668 | return; | 673 | return; |
| 669 | 674 | ||
| 670 | BUG_ON(cohc->pending_irqs == 0); | 675 | spin_lock(&cohc->lock); |
| 671 | 676 | ||
| 672 | cohc->pending_irqs--; | ||
| 673 | cohc->nbr_active_done++; | 677 | cohc->nbr_active_done++; |
| 674 | 678 | ||
| 675 | if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL) | 679 | if (coh901318_queue_start(cohc) == NULL) |
| 676 | cohc->busy = 0; | 680 | cohc->busy = 0; |
| 677 | 681 | ||
| 678 | BUG_ON(list_empty(&cohc->active)); | 682 | BUG_ON(list_empty(&cohc->active)); |
| 679 | 683 | ||
| 684 | spin_unlock(&cohc->lock); | ||
| 685 | |||
| 680 | if (cohc_chan_conf(cohc)->priority_high) | 686 | if (cohc_chan_conf(cohc)->priority_high) |
| 681 | tasklet_hi_schedule(&cohc->tasklet); | 687 | tasklet_hi_schedule(&cohc->tasklet); |
| 682 | else | 688 | else |
| @@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 870 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 876 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
| 871 | int lli_len; | 877 | int lli_len; |
| 872 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; | 878 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; |
| 879 | int ret; | ||
| 873 | 880 | ||
| 874 | spin_lock_irqsave(&cohc->lock, flg); | 881 | spin_lock_irqsave(&cohc->lock, flg); |
| 875 | 882 | ||
| @@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 890 | if (data == NULL) | 897 | if (data == NULL) |
| 891 | goto err; | 898 | goto err; |
| 892 | 899 | ||
| 893 | cohd = coh901318_desc_get(cohc); | 900 | ret = coh901318_lli_fill_memcpy( |
| 894 | cohd->sg = NULL; | 901 | &cohc->base->pool, data, src, size, dest, |
| 895 | cohd->sg_len = 0; | 902 | cohc_chan_param(cohc)->ctrl_lli_chained, |
| 896 | cohd->data = data; | 903 | ctrl_last); |
| 897 | 904 | if (ret) | |
| 898 | cohd->pending_irqs = | 905 | goto err; |
| 899 | coh901318_lli_fill_memcpy( | ||
| 900 | &cohc->base->pool, data, src, size, dest, | ||
| 901 | cohc_chan_param(cohc)->ctrl_lli_chained, | ||
| 902 | ctrl_last); | ||
| 903 | cohd->flags = flags; | ||
| 904 | 906 | ||
| 905 | COH_DBG(coh901318_list_print(cohc, data)); | 907 | COH_DBG(coh901318_list_print(cohc, data)); |
| 906 | 908 | ||
| 907 | dma_async_tx_descriptor_init(&cohd->desc, chan); | 909 | /* Pick a descriptor to handle this transfer */ |
| 908 | 910 | cohd = coh901318_desc_get(cohc); | |
| 911 | cohd->data = data; | ||
| 912 | cohd->flags = flags; | ||
| 909 | cohd->desc.tx_submit = coh901318_tx_submit; | 913 | cohd->desc.tx_submit = coh901318_tx_submit; |
| 910 | 914 | ||
| 911 | spin_unlock_irqrestore(&cohc->lock, flg); | 915 | spin_unlock_irqrestore(&cohc->lock, flg); |
| @@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 924 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 928 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
| 925 | struct coh901318_lli *data; | 929 | struct coh901318_lli *data; |
| 926 | struct coh901318_desc *cohd; | 930 | struct coh901318_desc *cohd; |
| 931 | const struct coh901318_params *params; | ||
| 927 | struct scatterlist *sg; | 932 | struct scatterlist *sg; |
| 928 | int len = 0; | 933 | int len = 0; |
| 929 | int size; | 934 | int size; |
| @@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 931 | u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; | 936 | u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; |
| 932 | u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; | 937 | u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; |
| 933 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; | 938 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; |
| 939 | u32 config; | ||
| 934 | unsigned long flg; | 940 | unsigned long flg; |
| 941 | int ret; | ||
| 935 | 942 | ||
| 936 | if (!sgl) | 943 | if (!sgl) |
| 937 | goto out; | 944 | goto out; |
| @@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 947 | /* Trigger interrupt after last lli */ | 954 | /* Trigger interrupt after last lli */ |
| 948 | ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; | 955 | ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; |
| 949 | 956 | ||
| 950 | cohd = coh901318_desc_get(cohc); | 957 | params = cohc_chan_param(cohc); |
| 951 | cohd->sg = NULL; | 958 | config = params->config; |
| 952 | cohd->sg_len = 0; | ||
| 953 | cohd->dir = direction; | ||
| 954 | 959 | ||
| 955 | if (direction == DMA_TO_DEVICE) { | 960 | if (direction == DMA_TO_DEVICE) { |
| 956 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | | 961 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | |
| 957 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; | 962 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; |
| 958 | 963 | ||
| 964 | config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY; | ||
| 959 | ctrl_chained |= tx_flags; | 965 | ctrl_chained |= tx_flags; |
| 960 | ctrl_last |= tx_flags; | 966 | ctrl_last |= tx_flags; |
| 961 | ctrl |= tx_flags; | 967 | ctrl |= tx_flags; |
| @@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 963 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | | 969 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | |
| 964 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; | 970 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; |
| 965 | 971 | ||
| 972 | config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY; | ||
| 966 | ctrl_chained |= rx_flags; | 973 | ctrl_chained |= rx_flags; |
| 967 | ctrl_last |= rx_flags; | 974 | ctrl_last |= rx_flags; |
| 968 | ctrl |= rx_flags; | 975 | ctrl |= rx_flags; |
| 969 | } else | 976 | } else |
| 970 | goto err_direction; | 977 | goto err_direction; |
| 971 | 978 | ||
| 972 | dma_async_tx_descriptor_init(&cohd->desc, chan); | 979 | coh901318_set_conf(cohc, config); |
| 973 | |||
| 974 | cohd->desc.tx_submit = coh901318_tx_submit; | ||
| 975 | |||
| 976 | 980 | ||
| 977 | /* The dma only supports transmitting packages up to | 981 | /* The dma only supports transmitting packages up to |
| 978 | * MAX_DMA_PACKET_SIZE. Calculate to total number of | 982 | * MAX_DMA_PACKET_SIZE. Calculate to total number of |
| @@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 994 | len += factor; | 998 | len += factor; |
| 995 | } | 999 | } |
| 996 | 1000 | ||
| 1001 | pr_debug("Allocate %d lli:s for this transfer\n", len); | ||
| 997 | data = coh901318_lli_alloc(&cohc->base->pool, len); | 1002 | data = coh901318_lli_alloc(&cohc->base->pool, len); |
| 998 | 1003 | ||
| 999 | if (data == NULL) | 1004 | if (data == NULL) |
| 1000 | goto err_dma_alloc; | 1005 | goto err_dma_alloc; |
| 1001 | 1006 | ||
| 1002 | /* initiate allocated data list */ | 1007 | /* initiate allocated data list */ |
| 1003 | cohd->pending_irqs = | 1008 | ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, |
| 1004 | coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, | 1009 | cohc_dev_addr(cohc), |
| 1005 | cohc_dev_addr(cohc), | 1010 | ctrl_chained, |
| 1006 | ctrl_chained, | 1011 | ctrl, |
| 1007 | ctrl, | 1012 | ctrl_last, |
| 1008 | ctrl_last, | 1013 | direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); |
| 1009 | direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); | 1014 | if (ret) |
| 1010 | cohd->data = data; | 1015 | goto err_lli_fill; |
| 1011 | |||
| 1012 | cohd->flags = flags; | ||
| 1013 | 1016 | ||
| 1014 | COH_DBG(coh901318_list_print(cohc, data)); | 1017 | COH_DBG(coh901318_list_print(cohc, data)); |
| 1015 | 1018 | ||
| 1019 | /* Pick a descriptor to handle this transfer */ | ||
| 1020 | cohd = coh901318_desc_get(cohc); | ||
| 1021 | cohd->dir = direction; | ||
| 1022 | cohd->flags = flags; | ||
| 1023 | cohd->desc.tx_submit = coh901318_tx_submit; | ||
| 1024 | cohd->data = data; | ||
| 1025 | |||
| 1016 | spin_unlock_irqrestore(&cohc->lock, flg); | 1026 | spin_unlock_irqrestore(&cohc->lock, flg); |
| 1017 | 1027 | ||
| 1018 | return &cohd->desc; | 1028 | return &cohd->desc; |
| 1029 | err_lli_fill: | ||
| 1019 | err_dma_alloc: | 1030 | err_dma_alloc: |
| 1020 | err_direction: | 1031 | err_direction: |
| 1021 | coh901318_desc_remove(cohd); | ||
| 1022 | coh901318_desc_free(cohc, cohd); | ||
| 1023 | spin_unlock_irqrestore(&cohc->lock, flg); | 1032 | spin_unlock_irqrestore(&cohc->lock, flg); |
| 1024 | out: | 1033 | out: |
| 1025 | return NULL; | 1034 | return NULL; |
| @@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan) | |||
| 1092 | /* release the lli allocation*/ | 1101 | /* release the lli allocation*/ |
| 1093 | coh901318_lli_free(&cohc->base->pool, &cohd->data); | 1102 | coh901318_lli_free(&cohc->base->pool, &cohd->data); |
| 1094 | 1103 | ||
| 1095 | coh901318_desc_remove(cohd); | ||
| 1096 | |||
| 1097 | /* return desc to free-list */ | 1104 | /* return desc to free-list */ |
| 1105 | coh901318_desc_remove(cohd); | ||
| 1098 | coh901318_desc_free(cohc, cohd); | 1106 | coh901318_desc_free(cohc, cohd); |
| 1099 | } | 1107 | } |
| 1100 | 1108 | ||
| @@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan) | |||
| 1102 | /* release the lli allocation*/ | 1110 | /* release the lli allocation*/ |
| 1103 | coh901318_lli_free(&cohc->base->pool, &cohd->data); | 1111 | coh901318_lli_free(&cohc->base->pool, &cohd->data); |
| 1104 | 1112 | ||
| 1105 | coh901318_desc_remove(cohd); | ||
| 1106 | |||
| 1107 | /* return desc to free-list */ | 1113 | /* return desc to free-list */ |
| 1114 | coh901318_desc_remove(cohd); | ||
| 1108 | coh901318_desc_free(cohc, cohd); | 1115 | coh901318_desc_free(cohc, cohd); |
| 1109 | } | 1116 | } |
| 1110 | 1117 | ||
| 1111 | 1118 | ||
| 1112 | cohc->nbr_active_done = 0; | 1119 | cohc->nbr_active_done = 0; |
| 1113 | cohc->busy = 0; | 1120 | cohc->busy = 0; |
| 1114 | cohc->pending_irqs = 0; | ||
| 1115 | 1121 | ||
| 1116 | spin_unlock_irqrestore(&cohc->lock, flags); | 1122 | spin_unlock_irqrestore(&cohc->lock, flags); |
| 1117 | } | 1123 | } |
| @@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans, | |||
| 1138 | 1144 | ||
| 1139 | spin_lock_init(&cohc->lock); | 1145 | spin_lock_init(&cohc->lock); |
| 1140 | 1146 | ||
| 1141 | cohc->pending_irqs = 0; | ||
| 1142 | cohc->nbr_active_done = 0; | 1147 | cohc->nbr_active_done = 0; |
| 1143 | cohc->busy = 0; | 1148 | cohc->busy = 0; |
| 1144 | INIT_LIST_HEAD(&cohc->free); | 1149 | INIT_LIST_HEAD(&cohc->free); |
| @@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
| 1254 | base->dma_memcpy.device_issue_pending = coh901318_issue_pending; | 1259 | base->dma_memcpy.device_issue_pending = coh901318_issue_pending; |
| 1255 | base->dma_memcpy.device_terminate_all = coh901318_terminate_all; | 1260 | base->dma_memcpy.device_terminate_all = coh901318_terminate_all; |
| 1256 | base->dma_memcpy.dev = &pdev->dev; | 1261 | base->dma_memcpy.dev = &pdev->dev; |
| 1262 | /* | ||
| 1263 | * This controller can only access address at even 32bit boundaries, | ||
| 1264 | * i.e. 2^2 | ||
| 1265 | */ | ||
| 1266 | base->dma_memcpy.copy_align = 2; | ||
| 1257 | err = dma_async_device_register(&base->dma_memcpy); | 1267 | err = dma_async_device_register(&base->dma_memcpy); |
| 1258 | 1268 | ||
| 1259 | if (err) | 1269 | if (err) |
| 1260 | goto err_register_memcpy; | 1270 | goto err_register_memcpy; |
| 1261 | 1271 | ||
| 1262 | dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", | 1272 | dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", |
| 1263 | (u32) base->virtbase); | 1273 | (u32) base->virtbase); |
| 1264 | 1274 | ||
| 1265 | return err; | 1275 | return err; |
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index f5120f238a4d..71d58c1a1e86 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c | |||
| @@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) | |||
| 74 | 74 | ||
| 75 | lli = head; | 75 | lli = head; |
| 76 | lli->phy_this = phy; | 76 | lli->phy_this = phy; |
| 77 | lli->link_addr = 0x00000000; | ||
| 78 | lli->virt_link_addr = 0x00000000U; | ||
| 77 | 79 | ||
| 78 | for (i = 1; i < len; i++) { | 80 | for (i = 1; i < len; i++) { |
| 79 | lli_prev = lli; | 81 | lli_prev = lli; |
| @@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) | |||
| 85 | 87 | ||
| 86 | DEBUGFS_POOL_COUNTER_ADD(pool, 1); | 88 | DEBUGFS_POOL_COUNTER_ADD(pool, 1); |
| 87 | lli->phy_this = phy; | 89 | lli->phy_this = phy; |
| 90 | lli->link_addr = 0x00000000; | ||
| 91 | lli->virt_link_addr = 0x00000000U; | ||
| 88 | 92 | ||
| 89 | lli_prev->link_addr = phy; | 93 | lli_prev->link_addr = phy; |
| 90 | lli_prev->virt_link_addr = lli; | 94 | lli_prev->virt_link_addr = lli; |
| 91 | } | 95 | } |
| 92 | 96 | ||
| 93 | lli->link_addr = 0x00000000U; | ||
| 94 | |||
| 95 | spin_unlock(&pool->lock); | 97 | spin_unlock(&pool->lock); |
| 96 | 98 | ||
| 97 | return head; | 99 | return head; |
| @@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool, | |||
| 166 | lli->src_addr = src; | 168 | lli->src_addr = src; |
| 167 | lli->dst_addr = dst; | 169 | lli->dst_addr = dst; |
| 168 | 170 | ||
| 169 | /* One irq per single transfer */ | 171 | return 0; |
| 170 | return 1; | ||
| 171 | } | 172 | } |
| 172 | 173 | ||
| 173 | int | 174 | int |
| @@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
| 223 | lli->src_addr = src; | 224 | lli->src_addr = src; |
| 224 | lli->dst_addr = dst; | 225 | lli->dst_addr = dst; |
| 225 | 226 | ||
| 226 | /* One irq per single transfer */ | 227 | return 0; |
| 227 | return 1; | ||
| 228 | } | 228 | } |
| 229 | 229 | ||
| 230 | int | 230 | int |
| @@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
| 240 | u32 ctrl_sg; | 240 | u32 ctrl_sg; |
| 241 | dma_addr_t src = 0; | 241 | dma_addr_t src = 0; |
| 242 | dma_addr_t dst = 0; | 242 | dma_addr_t dst = 0; |
| 243 | int nbr_of_irq = 0; | ||
| 244 | u32 bytes_to_transfer; | 243 | u32 bytes_to_transfer; |
| 245 | u32 elem_size; | 244 | u32 elem_size; |
| 246 | 245 | ||
| @@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
| 269 | ctrl_sg = ctrl ? ctrl : ctrl_last; | 268 | ctrl_sg = ctrl ? ctrl : ctrl_last; |
| 270 | 269 | ||
| 271 | 270 | ||
| 272 | if ((ctrl_sg & ctrl_irq_mask)) | ||
| 273 | nbr_of_irq++; | ||
| 274 | |||
| 275 | if (dir == DMA_TO_DEVICE) | 271 | if (dir == DMA_TO_DEVICE) |
| 276 | /* increment source address */ | 272 | /* increment source address */ |
| 277 | src = sg_dma_address(sg); | 273 | src = sg_phys(sg); |
| 278 | else | 274 | else |
| 279 | /* increment destination address */ | 275 | /* increment destination address */ |
| 280 | dst = sg_dma_address(sg); | 276 | dst = sg_phys(sg); |
| 281 | 277 | ||
| 282 | bytes_to_transfer = sg_dma_len(sg); | 278 | bytes_to_transfer = sg_dma_len(sg); |
| 283 | 279 | ||
| @@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
| 310 | } | 306 | } |
| 311 | spin_unlock(&pool->lock); | 307 | spin_unlock(&pool->lock); |
| 312 | 308 | ||
| 313 | /* There can be many IRQs per sg transfer */ | 309 | return 0; |
| 314 | return nbr_of_irq; | ||
| 315 | err: | 310 | err: |
| 316 | spin_unlock(&pool->lock); | 311 | spin_unlock(&pool->lock); |
| 317 | return -EINVAL; | 312 | return -EINVAL; |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 948d563941c9..6fa55fe3dd24 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -237,7 +237,7 @@ static int dmatest_func(void *data) | |||
| 237 | dma_cookie_t cookie; | 237 | dma_cookie_t cookie; |
| 238 | enum dma_status status; | 238 | enum dma_status status; |
| 239 | enum dma_ctrl_flags flags; | 239 | enum dma_ctrl_flags flags; |
| 240 | u8 pq_coefs[pq_sources]; | 240 | u8 pq_coefs[pq_sources + 1]; |
| 241 | int ret; | 241 | int ret; |
| 242 | int src_cnt; | 242 | int src_cnt; |
| 243 | int dst_cnt; | 243 | int dst_cnt; |
| @@ -257,7 +257,7 @@ static int dmatest_func(void *data) | |||
| 257 | } else if (thread->type == DMA_PQ) { | 257 | } else if (thread->type == DMA_PQ) { |
| 258 | src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ | 258 | src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ |
| 259 | dst_cnt = 2; | 259 | dst_cnt = 2; |
| 260 | for (i = 0; i < pq_sources; i++) | 260 | for (i = 0; i < src_cnt; i++) |
| 261 | pq_coefs[i] = 1; | 261 | pq_coefs[i] = 1; |
| 262 | } else | 262 | } else |
| 263 | goto err_srcs; | 263 | goto err_srcs; |
| @@ -347,7 +347,7 @@ static int dmatest_func(void *data) | |||
| 347 | else if (thread->type == DMA_XOR) | 347 | else if (thread->type == DMA_XOR) |
| 348 | tx = dev->device_prep_dma_xor(chan, | 348 | tx = dev->device_prep_dma_xor(chan, |
| 349 | dma_dsts[0] + dst_off, | 349 | dma_dsts[0] + dst_off, |
| 350 | dma_srcs, xor_sources, | 350 | dma_srcs, src_cnt, |
| 351 | len, flags); | 351 | len, flags); |
| 352 | else if (thread->type == DMA_PQ) { | 352 | else if (thread->type == DMA_PQ) { |
| 353 | dma_addr_t dma_pq[dst_cnt]; | 353 | dma_addr_t dma_pq[dst_cnt]; |
| @@ -355,7 +355,7 @@ static int dmatest_func(void *data) | |||
| 355 | for (i = 0; i < dst_cnt; i++) | 355 | for (i = 0; i < dst_cnt; i++) |
| 356 | dma_pq[i] = dma_dsts[i] + dst_off; | 356 | dma_pq[i] = dma_dsts[i] + dst_off; |
| 357 | tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, | 357 | tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, |
| 358 | pq_sources, pq_coefs, | 358 | src_cnt, pq_coefs, |
| 359 | len, flags); | 359 | len, flags); |
| 360 | } | 360 | } |
| 361 | 361 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 296f9e747fac..bbb4be5a3ff4 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -37,19 +37,19 @@ | |||
| 37 | #include <asm/fsldma.h> | 37 | #include <asm/fsldma.h> |
| 38 | #include "fsldma.h" | 38 | #include "fsldma.h" |
| 39 | 39 | ||
| 40 | static void dma_init(struct fsl_dma_chan *fsl_chan) | 40 | static void dma_init(struct fsldma_chan *chan) |
| 41 | { | 41 | { |
| 42 | /* Reset the channel */ | 42 | /* Reset the channel */ |
| 43 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); | 43 | DMA_OUT(chan, &chan->regs->mr, 0, 32); |
| 44 | 44 | ||
| 45 | switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | 45 | switch (chan->feature & FSL_DMA_IP_MASK) { |
| 46 | case FSL_DMA_IP_85XX: | 46 | case FSL_DMA_IP_85XX: |
| 47 | /* Set the channel to below modes: | 47 | /* Set the channel to below modes: |
| 48 | * EIE - Error interrupt enable | 48 | * EIE - Error interrupt enable |
| 49 | * EOSIE - End of segments interrupt enable (basic mode) | 49 | * EOSIE - End of segments interrupt enable (basic mode) |
| 50 | * EOLNIE - End of links interrupt enable | 50 | * EOLNIE - End of links interrupt enable |
| 51 | */ | 51 | */ |
| 52 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE | 52 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE |
| 53 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | 53 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); |
| 54 | break; | 54 | break; |
| 55 | case FSL_DMA_IP_83XX: | 55 | case FSL_DMA_IP_83XX: |
| @@ -57,170 +57,146 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) | |||
| 57 | * EOTIE - End-of-transfer interrupt enable | 57 | * EOTIE - End-of-transfer interrupt enable |
| 58 | * PRC_RM - PCI read multiple | 58 | * PRC_RM - PCI read multiple |
| 59 | */ | 59 | */ |
| 60 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE | 60 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE |
| 61 | | FSL_DMA_MR_PRC_RM, 32); | 61 | | FSL_DMA_MR_PRC_RM, 32); |
| 62 | break; | 62 | break; |
| 63 | } | 63 | } |
| 64 | |||
| 65 | } | 64 | } |
| 66 | 65 | ||
| 67 | static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) | 66 | static void set_sr(struct fsldma_chan *chan, u32 val) |
| 68 | { | 67 | { |
| 69 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); | 68 | DMA_OUT(chan, &chan->regs->sr, val, 32); |
| 70 | } | 69 | } |
| 71 | 70 | ||
| 72 | static u32 get_sr(struct fsl_dma_chan *fsl_chan) | 71 | static u32 get_sr(struct fsldma_chan *chan) |
| 73 | { | 72 | { |
| 74 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); | 73 | return DMA_IN(chan, &chan->regs->sr, 32); |
| 75 | } | 74 | } |
| 76 | 75 | ||
| 77 | static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, | 76 | static void set_desc_cnt(struct fsldma_chan *chan, |
| 78 | struct fsl_dma_ld_hw *hw, u32 count) | 77 | struct fsl_dma_ld_hw *hw, u32 count) |
| 79 | { | 78 | { |
| 80 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | 79 | hw->count = CPU_TO_DMA(chan, count, 32); |
| 81 | } | 80 | } |
| 82 | 81 | ||
| 83 | static void set_desc_src(struct fsl_dma_chan *fsl_chan, | 82 | static void set_desc_src(struct fsldma_chan *chan, |
| 84 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 83 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
| 85 | { | 84 | { |
| 86 | u64 snoop_bits; | 85 | u64 snoop_bits; |
| 87 | 86 | ||
| 88 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | 87 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) |
| 89 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | 88 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; |
| 90 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | 89 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
| 91 | } | 90 | } |
| 92 | 91 | ||
| 93 | static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | 92 | static void set_desc_dst(struct fsldma_chan *chan, |
| 94 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) | 93 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
| 95 | { | 94 | { |
| 96 | u64 snoop_bits; | 95 | u64 snoop_bits; |
| 97 | 96 | ||
| 98 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | 97 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) |
| 99 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | 98 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; |
| 100 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); | 99 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
| 101 | } | 100 | } |
| 102 | 101 | ||
| 103 | static void set_desc_next(struct fsl_dma_chan *fsl_chan, | 102 | static void set_desc_next(struct fsldma_chan *chan, |
| 104 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 103 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
| 105 | { | 104 | { |
| 106 | u64 snoop_bits; | 105 | u64 snoop_bits; |
| 107 | 106 | ||
| 108 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | 107 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
| 109 | ? FSL_DMA_SNEN : 0; | 108 | ? FSL_DMA_SNEN : 0; |
| 110 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | 109 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); |
| 111 | } | ||
| 112 | |||
| 113 | static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
| 114 | { | ||
| 115 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); | ||
| 116 | } | 110 | } |
| 117 | 111 | ||
| 118 | static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) | 112 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) |
| 119 | { | 113 | { |
| 120 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; | 114 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); |
| 121 | } | 115 | } |
| 122 | 116 | ||
| 123 | static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | 117 | static dma_addr_t get_cdar(struct fsldma_chan *chan) |
| 124 | { | 118 | { |
| 125 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); | 119 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; |
| 126 | } | 120 | } |
| 127 | 121 | ||
| 128 | static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) | 122 | static dma_addr_t get_ndar(struct fsldma_chan *chan) |
| 129 | { | 123 | { |
| 130 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); | 124 | return DMA_IN(chan, &chan->regs->ndar, 64); |
| 131 | } | 125 | } |
| 132 | 126 | ||
| 133 | static u32 get_bcr(struct fsl_dma_chan *fsl_chan) | 127 | static u32 get_bcr(struct fsldma_chan *chan) |
| 134 | { | 128 | { |
| 135 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); | 129 | return DMA_IN(chan, &chan->regs->bcr, 32); |
| 136 | } | 130 | } |
| 137 | 131 | ||
| 138 | static int dma_is_idle(struct fsl_dma_chan *fsl_chan) | 132 | static int dma_is_idle(struct fsldma_chan *chan) |
| 139 | { | 133 | { |
| 140 | u32 sr = get_sr(fsl_chan); | 134 | u32 sr = get_sr(chan); |
| 141 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | 135 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
| 142 | } | 136 | } |
| 143 | 137 | ||
| 144 | static void dma_start(struct fsl_dma_chan *fsl_chan) | 138 | static void dma_start(struct fsldma_chan *chan) |
| 145 | { | 139 | { |
| 146 | u32 mr_set = 0; | 140 | u32 mode; |
| 147 | 141 | ||
| 148 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 142 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
| 149 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); | 143 | |
| 150 | mr_set |= FSL_DMA_MR_EMP_EN; | 144 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { |
| 151 | } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | 145 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
| 152 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 146 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); |
| 153 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 147 | mode |= FSL_DMA_MR_EMP_EN; |
| 154 | & ~FSL_DMA_MR_EMP_EN, 32); | 148 | } else { |
| 149 | mode &= ~FSL_DMA_MR_EMP_EN; | ||
| 150 | } | ||
| 155 | } | 151 | } |
| 156 | 152 | ||
| 157 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | 153 | if (chan->feature & FSL_DMA_CHAN_START_EXT) |
| 158 | mr_set |= FSL_DMA_MR_EMS_EN; | 154 | mode |= FSL_DMA_MR_EMS_EN; |
| 159 | else | 155 | else |
| 160 | mr_set |= FSL_DMA_MR_CS; | 156 | mode |= FSL_DMA_MR_CS; |
| 161 | 157 | ||
| 162 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 158 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 163 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
| 164 | | mr_set, 32); | ||
| 165 | } | 159 | } |
| 166 | 160 | ||
| 167 | static void dma_halt(struct fsl_dma_chan *fsl_chan) | 161 | static void dma_halt(struct fsldma_chan *chan) |
| 168 | { | 162 | { |
| 163 | u32 mode; | ||
| 169 | int i; | 164 | int i; |
| 170 | 165 | ||
| 171 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 166 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
| 172 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, | 167 | mode |= FSL_DMA_MR_CA; |
| 173 | 32); | 168 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 174 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 169 | |
| 175 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS | 170 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); |
| 176 | | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); | 171 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 177 | 172 | ||
| 178 | for (i = 0; i < 100; i++) { | 173 | for (i = 0; i < 100; i++) { |
| 179 | if (dma_is_idle(fsl_chan)) | 174 | if (dma_is_idle(chan)) |
| 180 | break; | 175 | return; |
| 176 | |||
| 181 | udelay(10); | 177 | udelay(10); |
| 182 | } | 178 | } |
| 183 | if (i >= 100 && !dma_is_idle(fsl_chan)) | 179 | |
| 184 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | 180 | if (!dma_is_idle(chan)) |
| 181 | dev_err(chan->dev, "DMA halt timeout!\n"); | ||
| 185 | } | 182 | } |
| 186 | 183 | ||
| 187 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | 184 | static void set_ld_eol(struct fsldma_chan *chan, |
| 188 | struct fsl_desc_sw *desc) | 185 | struct fsl_desc_sw *desc) |
| 189 | { | 186 | { |
| 190 | u64 snoop_bits; | 187 | u64 snoop_bits; |
| 191 | 188 | ||
| 192 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | 189 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
| 193 | ? FSL_DMA_SNEN : 0; | 190 | ? FSL_DMA_SNEN : 0; |
| 194 | 191 | ||
| 195 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | 192 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, |
| 196 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | 193 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL |
| 197 | | snoop_bits, 64); | 194 | | snoop_bits, 64); |
| 198 | } | 195 | } |
| 199 | 196 | ||
| 200 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | ||
| 201 | struct fsl_desc_sw *new_desc) | ||
| 202 | { | ||
| 203 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | ||
| 204 | |||
| 205 | if (list_empty(&fsl_chan->ld_queue)) | ||
| 206 | return; | ||
| 207 | |||
| 208 | /* Link to the new descriptor physical address and | ||
| 209 | * Enable End-of-segment interrupt for | ||
| 210 | * the last link descriptor. | ||
| 211 | * (the previous node's next link descriptor) | ||
| 212 | * | ||
| 213 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. | ||
| 214 | */ | ||
| 215 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
| 216 | new_desc->async_tx.phys | FSL_DMA_EOSIE | | ||
| 217 | (((fsl_chan->feature & FSL_DMA_IP_MASK) | ||
| 218 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); | ||
| 219 | } | ||
| 220 | |||
| 221 | /** | 197 | /** |
| 222 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | 198 | * fsl_chan_set_src_loop_size - Set source address hold transfer size |
| 223 | * @fsl_chan : Freescale DMA channel | 199 | * @chan : Freescale DMA channel |
| 224 | * @size : Address loop size, 0 for disable loop | 200 | * @size : Address loop size, 0 for disable loop |
| 225 | * | 201 | * |
| 226 | * The set source address hold transfer size. The source | 202 | * The set source address hold transfer size. The source |
| @@ -229,29 +205,30 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | |||
| 229 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | 205 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, |
| 230 | * SA + 1 ... and so on. | 206 | * SA + 1 ... and so on. |
| 231 | */ | 207 | */ |
| 232 | static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | 208 | static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) |
| 233 | { | 209 | { |
| 210 | u32 mode; | ||
| 211 | |||
| 212 | mode = DMA_IN(chan, &chan->regs->mr, 32); | ||
| 213 | |||
| 234 | switch (size) { | 214 | switch (size) { |
| 235 | case 0: | 215 | case 0: |
| 236 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 216 | mode &= ~FSL_DMA_MR_SAHE; |
| 237 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
| 238 | (~FSL_DMA_MR_SAHE), 32); | ||
| 239 | break; | 217 | break; |
| 240 | case 1: | 218 | case 1: |
| 241 | case 2: | 219 | case 2: |
| 242 | case 4: | 220 | case 4: |
| 243 | case 8: | 221 | case 8: |
| 244 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 222 | mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); |
| 245 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
| 246 | FSL_DMA_MR_SAHE | (__ilog2(size) << 14), | ||
| 247 | 32); | ||
| 248 | break; | 223 | break; |
| 249 | } | 224 | } |
| 225 | |||
| 226 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
| 250 | } | 227 | } |
| 251 | 228 | ||
| 252 | /** | 229 | /** |
| 253 | * fsl_chan_set_dest_loop_size - Set destination address hold transfer size | 230 | * fsl_chan_set_dst_loop_size - Set destination address hold transfer size |
| 254 | * @fsl_chan : Freescale DMA channel | 231 | * @chan : Freescale DMA channel |
| 255 | * @size : Address loop size, 0 for disable loop | 232 | * @size : Address loop size, 0 for disable loop |
| 256 | * | 233 | * |
| 257 | * The set destination address hold transfer size. The destination | 234 | * The set destination address hold transfer size. The destination |
| @@ -260,29 +237,30 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |||
| 260 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | 237 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, |
| 261 | * TA + 1 ... and so on. | 238 | * TA + 1 ... and so on. |
| 262 | */ | 239 | */ |
| 263 | static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | 240 | static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) |
| 264 | { | 241 | { |
| 242 | u32 mode; | ||
| 243 | |||
| 244 | mode = DMA_IN(chan, &chan->regs->mr, 32); | ||
| 245 | |||
| 265 | switch (size) { | 246 | switch (size) { |
| 266 | case 0: | 247 | case 0: |
| 267 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 248 | mode &= ~FSL_DMA_MR_DAHE; |
| 268 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
| 269 | (~FSL_DMA_MR_DAHE), 32); | ||
| 270 | break; | 249 | break; |
| 271 | case 1: | 250 | case 1: |
| 272 | case 2: | 251 | case 2: |
| 273 | case 4: | 252 | case 4: |
| 274 | case 8: | 253 | case 8: |
| 275 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 254 | mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); |
| 276 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
| 277 | FSL_DMA_MR_DAHE | (__ilog2(size) << 16), | ||
| 278 | 32); | ||
| 279 | break; | 255 | break; |
| 280 | } | 256 | } |
| 257 | |||
| 258 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
| 281 | } | 259 | } |
| 282 | 260 | ||
| 283 | /** | 261 | /** |
| 284 | * fsl_chan_set_request_count - Set DMA Request Count for external control | 262 | * fsl_chan_set_request_count - Set DMA Request Count for external control |
| 285 | * @fsl_chan : Freescale DMA channel | 263 | * @chan : Freescale DMA channel |
| 286 | * @size : Number of bytes to transfer in a single request | 264 | * @size : Number of bytes to transfer in a single request |
| 287 | * | 265 | * |
| 288 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | 266 | * The Freescale DMA channel can be controlled by the external signal DREQ#. |
| @@ -292,35 +270,38 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |||
| 292 | * | 270 | * |
| 293 | * A size of 0 disables external pause control. The maximum size is 1024. | 271 | * A size of 0 disables external pause control. The maximum size is 1024. |
| 294 | */ | 272 | */ |
| 295 | static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) | 273 | static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) |
| 296 | { | 274 | { |
| 275 | u32 mode; | ||
| 276 | |||
| 297 | BUG_ON(size > 1024); | 277 | BUG_ON(size > 1024); |
| 298 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | 278 | |
| 299 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 279 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
| 300 | | ((__ilog2(size) << 24) & 0x0f000000), | 280 | mode |= (__ilog2(size) << 24) & 0x0f000000; |
| 301 | 32); | 281 | |
| 282 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | ||
| 302 | } | 283 | } |
| 303 | 284 | ||
| 304 | /** | 285 | /** |
| 305 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | 286 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status |
| 306 | * @fsl_chan : Freescale DMA channel | 287 | * @chan : Freescale DMA channel |
| 307 | * @enable : 0 is disabled, 1 is enabled. | 288 | * @enable : 0 is disabled, 1 is enabled. |
| 308 | * | 289 | * |
| 309 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | 290 | * The Freescale DMA channel can be controlled by the external signal DREQ#. |
| 310 | * The DMA Request Count feature should be used in addition to this feature | 291 | * The DMA Request Count feature should be used in addition to this feature |
| 311 | * to set the number of bytes to transfer before pausing the channel. | 292 | * to set the number of bytes to transfer before pausing the channel. |
| 312 | */ | 293 | */ |
| 313 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) | 294 | static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) |
| 314 | { | 295 | { |
| 315 | if (enable) | 296 | if (enable) |
| 316 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | 297 | chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; |
| 317 | else | 298 | else |
| 318 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | 299 | chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; |
| 319 | } | 300 | } |
| 320 | 301 | ||
| 321 | /** | 302 | /** |
| 322 | * fsl_chan_toggle_ext_start - Toggle channel external start status | 303 | * fsl_chan_toggle_ext_start - Toggle channel external start status |
| 323 | * @fsl_chan : Freescale DMA channel | 304 | * @chan : Freescale DMA channel |
| 324 | * @enable : 0 is disabled, 1 is enabled. | 305 | * @enable : 0 is disabled, 1 is enabled. |
| 325 | * | 306 | * |
| 326 | * If enable the external start, the channel can be started by an | 307 | * If enable the external start, the channel can be started by an |
| @@ -328,141 +309,196 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) | |||
| 328 | * transfer immediately. The DMA channel will wait for the | 309 | * transfer immediately. The DMA channel will wait for the |
| 329 | * control pin asserted. | 310 | * control pin asserted. |
| 330 | */ | 311 | */ |
| 331 | static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | 312 | static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) |
| 332 | { | 313 | { |
| 333 | if (enable) | 314 | if (enable) |
| 334 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | 315 | chan->feature |= FSL_DMA_CHAN_START_EXT; |
| 335 | else | 316 | else |
| 336 | fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | 317 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; |
| 318 | } | ||
| 319 | |||
| 320 | static void append_ld_queue(struct fsldma_chan *chan, | ||
| 321 | struct fsl_desc_sw *desc) | ||
| 322 | { | ||
| 323 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); | ||
| 324 | |||
| 325 | if (list_empty(&chan->ld_pending)) | ||
| 326 | goto out_splice; | ||
| 327 | |||
| 328 | /* | ||
| 329 | * Add the hardware descriptor to the chain of hardware descriptors | ||
| 330 | * that already exists in memory. | ||
| 331 | * | ||
| 332 | * This will un-set the EOL bit of the existing transaction, and the | ||
| 333 | * last link in this transaction will become the EOL descriptor. | ||
| 334 | */ | ||
| 335 | set_desc_next(chan, &tail->hw, desc->async_tx.phys); | ||
| 336 | |||
| 337 | /* | ||
| 338 | * Add the software descriptor and all children to the list | ||
| 339 | * of pending transactions | ||
| 340 | */ | ||
| 341 | out_splice: | ||
| 342 | list_splice_tail_init(&desc->tx_list, &chan->ld_pending); | ||
| 337 | } | 343 | } |
| 338 | 344 | ||
| 339 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 345 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
| 340 | { | 346 | { |
| 341 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | 347 | struct fsldma_chan *chan = to_fsl_chan(tx->chan); |
| 342 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 348 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
| 343 | struct fsl_desc_sw *child; | 349 | struct fsl_desc_sw *child; |
| 344 | unsigned long flags; | 350 | unsigned long flags; |
| 345 | dma_cookie_t cookie; | 351 | dma_cookie_t cookie; |
| 346 | 352 | ||
| 347 | /* cookie increment and adding to ld_queue must be atomic */ | 353 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 348 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
| 349 | 354 | ||
| 350 | cookie = fsl_chan->common.cookie; | 355 | /* |
| 356 | * assign cookies to all of the software descriptors | ||
| 357 | * that make up this transaction | ||
| 358 | */ | ||
| 359 | cookie = chan->common.cookie; | ||
| 351 | list_for_each_entry(child, &desc->tx_list, node) { | 360 | list_for_each_entry(child, &desc->tx_list, node) { |
| 352 | cookie++; | 361 | cookie++; |
| 353 | if (cookie < 0) | 362 | if (cookie < 0) |
| 354 | cookie = 1; | 363 | cookie = 1; |
| 355 | 364 | ||
| 356 | desc->async_tx.cookie = cookie; | 365 | child->async_tx.cookie = cookie; |
| 357 | } | 366 | } |
| 358 | 367 | ||
| 359 | fsl_chan->common.cookie = cookie; | 368 | chan->common.cookie = cookie; |
| 360 | append_ld_queue(fsl_chan, desc); | 369 | |
| 361 | list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); | 370 | /* put this transaction onto the tail of the pending queue */ |
| 371 | append_ld_queue(chan, desc); | ||
| 362 | 372 | ||
| 363 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 373 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 364 | 374 | ||
| 365 | return cookie; | 375 | return cookie; |
| 366 | } | 376 | } |
| 367 | 377 | ||
| 368 | /** | 378 | /** |
| 369 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | 379 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. |
| 370 | * @fsl_chan : Freescale DMA channel | 380 | * @chan : Freescale DMA channel |
| 371 | * | 381 | * |
| 372 | * Return - The descriptor allocated. NULL for failed. | 382 | * Return - The descriptor allocated. NULL for failed. |
| 373 | */ | 383 | */ |
| 374 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | 384 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( |
| 375 | struct fsl_dma_chan *fsl_chan) | 385 | struct fsldma_chan *chan) |
| 376 | { | 386 | { |
| 387 | struct fsl_desc_sw *desc; | ||
| 377 | dma_addr_t pdesc; | 388 | dma_addr_t pdesc; |
| 378 | struct fsl_desc_sw *desc_sw; | 389 | |
| 379 | 390 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | |
| 380 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | 391 | if (!desc) { |
| 381 | if (desc_sw) { | 392 | dev_dbg(chan->dev, "out of memory for link desc\n"); |
| 382 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | 393 | return NULL; |
| 383 | INIT_LIST_HEAD(&desc_sw->tx_list); | ||
| 384 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | ||
| 385 | &fsl_chan->common); | ||
| 386 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | ||
| 387 | desc_sw->async_tx.phys = pdesc; | ||
| 388 | } | 394 | } |
| 389 | 395 | ||
| 390 | return desc_sw; | 396 | memset(desc, 0, sizeof(*desc)); |
| 397 | INIT_LIST_HEAD(&desc->tx_list); | ||
| 398 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | ||
| 399 | desc->async_tx.tx_submit = fsl_dma_tx_submit; | ||
| 400 | desc->async_tx.phys = pdesc; | ||
| 401 | |||
| 402 | return desc; | ||
| 391 | } | 403 | } |
| 392 | 404 | ||
| 393 | 405 | ||
| 394 | /** | 406 | /** |
| 395 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | 407 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. |
| 396 | * @fsl_chan : Freescale DMA channel | 408 | * @chan : Freescale DMA channel |
| 397 | * | 409 | * |
| 398 | * This function will create a dma pool for descriptor allocation. | 410 | * This function will create a dma pool for descriptor allocation. |
| 399 | * | 411 | * |
| 400 | * Return - The number of descriptors allocated. | 412 | * Return - The number of descriptors allocated. |
| 401 | */ | 413 | */ |
| 402 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | 414 | static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) |
| 403 | { | 415 | { |
| 404 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 416 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 405 | 417 | ||
| 406 | /* Has this channel already been allocated? */ | 418 | /* Has this channel already been allocated? */ |
| 407 | if (fsl_chan->desc_pool) | 419 | if (chan->desc_pool) |
| 408 | return 1; | 420 | return 1; |
| 409 | 421 | ||
| 410 | /* We need the descriptor to be aligned to 32bytes | 422 | /* |
| 423 | * We need the descriptor to be aligned to 32bytes | ||
| 411 | * for meeting FSL DMA specification requirement. | 424 | * for meeting FSL DMA specification requirement. |
| 412 | */ | 425 | */ |
| 413 | fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | 426 | chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", |
| 414 | fsl_chan->dev, sizeof(struct fsl_desc_sw), | 427 | chan->dev, |
| 415 | 32, 0); | 428 | sizeof(struct fsl_desc_sw), |
| 416 | if (!fsl_chan->desc_pool) { | 429 | __alignof__(struct fsl_desc_sw), 0); |
| 417 | dev_err(fsl_chan->dev, "No memory for channel %d " | 430 | if (!chan->desc_pool) { |
| 418 | "descriptor dma pool.\n", fsl_chan->id); | 431 | dev_err(chan->dev, "unable to allocate channel %d " |
| 419 | return 0; | 432 | "descriptor pool\n", chan->id); |
| 433 | return -ENOMEM; | ||
| 420 | } | 434 | } |
| 421 | 435 | ||
| 436 | /* there is at least one descriptor free to be allocated */ | ||
| 422 | return 1; | 437 | return 1; |
| 423 | } | 438 | } |
| 424 | 439 | ||
| 425 | /** | 440 | /** |
| 426 | * fsl_dma_free_chan_resources - Free all resources of the channel. | 441 | * fsldma_free_desc_list - Free all descriptors in a queue |
| 427 | * @fsl_chan : Freescale DMA channel | 442 | * @chan: Freescae DMA channel |
| 443 | * @list: the list to free | ||
| 444 | * | ||
| 445 | * LOCKING: must hold chan->desc_lock | ||
| 428 | */ | 446 | */ |
| 429 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | 447 | static void fsldma_free_desc_list(struct fsldma_chan *chan, |
| 448 | struct list_head *list) | ||
| 430 | { | 449 | { |
| 431 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
| 432 | struct fsl_desc_sw *desc, *_desc; | 450 | struct fsl_desc_sw *desc, *_desc; |
| 433 | unsigned long flags; | ||
| 434 | 451 | ||
| 435 | dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | 452 | list_for_each_entry_safe(desc, _desc, list, node) { |
| 436 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 453 | list_del(&desc->node); |
| 437 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | 454 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
| 438 | #ifdef FSL_DMA_LD_DEBUG | 455 | } |
| 439 | dev_dbg(fsl_chan->dev, | 456 | } |
| 440 | "LD %p will be released.\n", desc); | 457 | |
| 441 | #endif | 458 | static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, |
| 459 | struct list_head *list) | ||
| 460 | { | ||
| 461 | struct fsl_desc_sw *desc, *_desc; | ||
| 462 | |||
| 463 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { | ||
| 442 | list_del(&desc->node); | 464 | list_del(&desc->node); |
| 443 | /* free link descriptor */ | 465 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
| 444 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
| 445 | } | 466 | } |
| 446 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 467 | } |
| 447 | dma_pool_destroy(fsl_chan->desc_pool); | 468 | |
| 469 | /** | ||
| 470 | * fsl_dma_free_chan_resources - Free all resources of the channel. | ||
| 471 | * @chan : Freescale DMA channel | ||
| 472 | */ | ||
| 473 | static void fsl_dma_free_chan_resources(struct dma_chan *dchan) | ||
| 474 | { | ||
| 475 | struct fsldma_chan *chan = to_fsl_chan(dchan); | ||
| 476 | unsigned long flags; | ||
| 477 | |||
| 478 | dev_dbg(chan->dev, "Free all channel resources.\n"); | ||
| 479 | spin_lock_irqsave(&chan->desc_lock, flags); | ||
| 480 | fsldma_free_desc_list(chan, &chan->ld_pending); | ||
| 481 | fsldma_free_desc_list(chan, &chan->ld_running); | ||
| 482 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 448 | 483 | ||
| 449 | fsl_chan->desc_pool = NULL; | 484 | dma_pool_destroy(chan->desc_pool); |
| 485 | chan->desc_pool = NULL; | ||
| 450 | } | 486 | } |
| 451 | 487 | ||
| 452 | static struct dma_async_tx_descriptor * | 488 | static struct dma_async_tx_descriptor * |
| 453 | fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | 489 | fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) |
| 454 | { | 490 | { |
| 455 | struct fsl_dma_chan *fsl_chan; | 491 | struct fsldma_chan *chan; |
| 456 | struct fsl_desc_sw *new; | 492 | struct fsl_desc_sw *new; |
| 457 | 493 | ||
| 458 | if (!chan) | 494 | if (!dchan) |
| 459 | return NULL; | 495 | return NULL; |
| 460 | 496 | ||
| 461 | fsl_chan = to_fsl_chan(chan); | 497 | chan = to_fsl_chan(dchan); |
| 462 | 498 | ||
| 463 | new = fsl_dma_alloc_descriptor(fsl_chan); | 499 | new = fsl_dma_alloc_descriptor(chan); |
| 464 | if (!new) { | 500 | if (!new) { |
| 465 | dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); | 501 | dev_err(chan->dev, "No free memory for link descriptor\n"); |
| 466 | return NULL; | 502 | return NULL; |
| 467 | } | 503 | } |
| 468 | 504 | ||
| @@ -473,51 +509,50 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | |||
| 473 | list_add_tail(&new->node, &new->tx_list); | 509 | list_add_tail(&new->node, &new->tx_list); |
| 474 | 510 | ||
| 475 | /* Set End-of-link to the last link descriptor of new list*/ | 511 | /* Set End-of-link to the last link descriptor of new list*/ |
| 476 | set_ld_eol(fsl_chan, new); | 512 | set_ld_eol(chan, new); |
| 477 | 513 | ||
| 478 | return &new->async_tx; | 514 | return &new->async_tx; |
| 479 | } | 515 | } |
| 480 | 516 | ||
| 481 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | 517 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( |
| 482 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 518 | struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, |
| 483 | size_t len, unsigned long flags) | 519 | size_t len, unsigned long flags) |
| 484 | { | 520 | { |
| 485 | struct fsl_dma_chan *fsl_chan; | 521 | struct fsldma_chan *chan; |
| 486 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | 522 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; |
| 487 | struct list_head *list; | ||
| 488 | size_t copy; | 523 | size_t copy; |
| 489 | 524 | ||
| 490 | if (!chan) | 525 | if (!dchan) |
| 491 | return NULL; | 526 | return NULL; |
| 492 | 527 | ||
| 493 | if (!len) | 528 | if (!len) |
| 494 | return NULL; | 529 | return NULL; |
| 495 | 530 | ||
| 496 | fsl_chan = to_fsl_chan(chan); | 531 | chan = to_fsl_chan(dchan); |
| 497 | 532 | ||
| 498 | do { | 533 | do { |
| 499 | 534 | ||
| 500 | /* Allocate the link descriptor from DMA pool */ | 535 | /* Allocate the link descriptor from DMA pool */ |
| 501 | new = fsl_dma_alloc_descriptor(fsl_chan); | 536 | new = fsl_dma_alloc_descriptor(chan); |
| 502 | if (!new) { | 537 | if (!new) { |
| 503 | dev_err(fsl_chan->dev, | 538 | dev_err(chan->dev, |
| 504 | "No free memory for link descriptor\n"); | 539 | "No free memory for link descriptor\n"); |
| 505 | goto fail; | 540 | goto fail; |
| 506 | } | 541 | } |
| 507 | #ifdef FSL_DMA_LD_DEBUG | 542 | #ifdef FSL_DMA_LD_DEBUG |
| 508 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | 543 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); |
| 509 | #endif | 544 | #endif |
| 510 | 545 | ||
| 511 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); | 546 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); |
| 512 | 547 | ||
| 513 | set_desc_cnt(fsl_chan, &new->hw, copy); | 548 | set_desc_cnt(chan, &new->hw, copy); |
| 514 | set_desc_src(fsl_chan, &new->hw, dma_src); | 549 | set_desc_src(chan, &new->hw, dma_src); |
| 515 | set_desc_dest(fsl_chan, &new->hw, dma_dest); | 550 | set_desc_dst(chan, &new->hw, dma_dst); |
| 516 | 551 | ||
| 517 | if (!first) | 552 | if (!first) |
| 518 | first = new; | 553 | first = new; |
| 519 | else | 554 | else |
| 520 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | 555 | set_desc_next(chan, &prev->hw, new->async_tx.phys); |
| 521 | 556 | ||
| 522 | new->async_tx.cookie = 0; | 557 | new->async_tx.cookie = 0; |
| 523 | async_tx_ack(&new->async_tx); | 558 | async_tx_ack(&new->async_tx); |
| @@ -525,7 +560,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
| 525 | prev = new; | 560 | prev = new; |
| 526 | len -= copy; | 561 | len -= copy; |
| 527 | dma_src += copy; | 562 | dma_src += copy; |
| 528 | dma_dest += copy; | 563 | dma_dst += copy; |
| 529 | 564 | ||
| 530 | /* Insert the link descriptor to the LD ring */ | 565 | /* Insert the link descriptor to the LD ring */ |
| 531 | list_add_tail(&new->node, &first->tx_list); | 566 | list_add_tail(&new->node, &first->tx_list); |
| @@ -535,7 +570,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
| 535 | new->async_tx.cookie = -EBUSY; | 570 | new->async_tx.cookie = -EBUSY; |
| 536 | 571 | ||
| 537 | /* Set End-of-link to the last link descriptor of new list*/ | 572 | /* Set End-of-link to the last link descriptor of new list*/ |
| 538 | set_ld_eol(fsl_chan, new); | 573 | set_ld_eol(chan, new); |
| 539 | 574 | ||
| 540 | return &first->async_tx; | 575 | return &first->async_tx; |
| 541 | 576 | ||
| @@ -543,12 +578,7 @@ fail: | |||
| 543 | if (!first) | 578 | if (!first) |
| 544 | return NULL; | 579 | return NULL; |
| 545 | 580 | ||
| 546 | list = &first->tx_list; | 581 | fsldma_free_desc_list_reverse(chan, &first->tx_list); |
| 547 | list_for_each_entry_safe_reverse(new, prev, list, node) { | ||
| 548 | list_del(&new->node); | ||
| 549 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | ||
| 550 | } | ||
| 551 | |||
| 552 | return NULL; | 582 | return NULL; |
| 553 | } | 583 | } |
| 554 | 584 | ||
| @@ -565,13 +595,12 @@ fail: | |||
| 565 | * chan->private variable. | 595 | * chan->private variable. |
| 566 | */ | 596 | */ |
| 567 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | 597 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( |
| 568 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 598 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
| 569 | enum dma_data_direction direction, unsigned long flags) | 599 | enum dma_data_direction direction, unsigned long flags) |
| 570 | { | 600 | { |
| 571 | struct fsl_dma_chan *fsl_chan; | 601 | struct fsldma_chan *chan; |
| 572 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 602 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
| 573 | struct fsl_dma_slave *slave; | 603 | struct fsl_dma_slave *slave; |
| 574 | struct list_head *tx_list; | ||
| 575 | size_t copy; | 604 | size_t copy; |
| 576 | 605 | ||
| 577 | int i; | 606 | int i; |
| @@ -581,14 +610,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
| 581 | struct fsl_dma_hw_addr *hw; | 610 | struct fsl_dma_hw_addr *hw; |
| 582 | dma_addr_t dma_dst, dma_src; | 611 | dma_addr_t dma_dst, dma_src; |
| 583 | 612 | ||
| 584 | if (!chan) | 613 | if (!dchan) |
| 585 | return NULL; | 614 | return NULL; |
| 586 | 615 | ||
| 587 | if (!chan->private) | 616 | if (!dchan->private) |
| 588 | return NULL; | 617 | return NULL; |
| 589 | 618 | ||
| 590 | fsl_chan = to_fsl_chan(chan); | 619 | chan = to_fsl_chan(dchan); |
| 591 | slave = chan->private; | 620 | slave = dchan->private; |
| 592 | 621 | ||
| 593 | if (list_empty(&slave->addresses)) | 622 | if (list_empty(&slave->addresses)) |
| 594 | return NULL; | 623 | return NULL; |
| @@ -637,14 +666,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
| 637 | } | 666 | } |
| 638 | 667 | ||
| 639 | /* Allocate the link descriptor from DMA pool */ | 668 | /* Allocate the link descriptor from DMA pool */ |
| 640 | new = fsl_dma_alloc_descriptor(fsl_chan); | 669 | new = fsl_dma_alloc_descriptor(chan); |
| 641 | if (!new) { | 670 | if (!new) { |
| 642 | dev_err(fsl_chan->dev, "No free memory for " | 671 | dev_err(chan->dev, "No free memory for " |
| 643 | "link descriptor\n"); | 672 | "link descriptor\n"); |
| 644 | goto fail; | 673 | goto fail; |
| 645 | } | 674 | } |
| 646 | #ifdef FSL_DMA_LD_DEBUG | 675 | #ifdef FSL_DMA_LD_DEBUG |
| 647 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | 676 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); |
| 648 | #endif | 677 | #endif |
| 649 | 678 | ||
| 650 | /* | 679 | /* |
| @@ -671,9 +700,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
| 671 | } | 700 | } |
| 672 | 701 | ||
| 673 | /* Fill in the descriptor */ | 702 | /* Fill in the descriptor */ |
| 674 | set_desc_cnt(fsl_chan, &new->hw, copy); | 703 | set_desc_cnt(chan, &new->hw, copy); |
| 675 | set_desc_src(fsl_chan, &new->hw, dma_src); | 704 | set_desc_src(chan, &new->hw, dma_src); |
| 676 | set_desc_dest(fsl_chan, &new->hw, dma_dst); | 705 | set_desc_dst(chan, &new->hw, dma_dst); |
| 677 | 706 | ||
| 678 | /* | 707 | /* |
| 679 | * If this is not the first descriptor, chain the | 708 | * If this is not the first descriptor, chain the |
| @@ -682,7 +711,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
| 682 | if (!first) { | 711 | if (!first) { |
| 683 | first = new; | 712 | first = new; |
| 684 | } else { | 713 | } else { |
| 685 | set_desc_next(fsl_chan, &prev->hw, | 714 | set_desc_next(chan, &prev->hw, |
| 686 | new->async_tx.phys); | 715 | new->async_tx.phys); |
| 687 | } | 716 | } |
| 688 | 717 | ||
| @@ -708,23 +737,23 @@ finished: | |||
| 708 | new->async_tx.cookie = -EBUSY; | 737 | new->async_tx.cookie = -EBUSY; |
| 709 | 738 | ||
| 710 | /* Set End-of-link to the last link descriptor of new list */ | 739 | /* Set End-of-link to the last link descriptor of new list */ |
| 711 | set_ld_eol(fsl_chan, new); | 740 | set_ld_eol(chan, new); |
| 712 | 741 | ||
| 713 | /* Enable extra controller features */ | 742 | /* Enable extra controller features */ |
| 714 | if (fsl_chan->set_src_loop_size) | 743 | if (chan->set_src_loop_size) |
| 715 | fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); | 744 | chan->set_src_loop_size(chan, slave->src_loop_size); |
| 716 | 745 | ||
| 717 | if (fsl_chan->set_dest_loop_size) | 746 | if (chan->set_dst_loop_size) |
| 718 | fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); | 747 | chan->set_dst_loop_size(chan, slave->dst_loop_size); |
| 719 | 748 | ||
| 720 | if (fsl_chan->toggle_ext_start) | 749 | if (chan->toggle_ext_start) |
| 721 | fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); | 750 | chan->toggle_ext_start(chan, slave->external_start); |
| 722 | 751 | ||
| 723 | if (fsl_chan->toggle_ext_pause) | 752 | if (chan->toggle_ext_pause) |
| 724 | fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); | 753 | chan->toggle_ext_pause(chan, slave->external_pause); |
| 725 | 754 | ||
| 726 | if (fsl_chan->set_request_count) | 755 | if (chan->set_request_count) |
| 727 | fsl_chan->set_request_count(fsl_chan, slave->request_count); | 756 | chan->set_request_count(chan, slave->request_count); |
| 728 | 757 | ||
| 729 | return &first->async_tx; | 758 | return &first->async_tx; |
| 730 | 759 | ||
| @@ -741,215 +770,216 @@ fail: | |||
| 741 | * | 770 | * |
| 742 | * We're re-using variables for the loop, oh well | 771 | * We're re-using variables for the loop, oh well |
| 743 | */ | 772 | */ |
| 744 | tx_list = &first->tx_list; | 773 | fsldma_free_desc_list_reverse(chan, &first->tx_list); |
| 745 | list_for_each_entry_safe_reverse(new, prev, tx_list, node) { | ||
| 746 | list_del_init(&new->node); | ||
| 747 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | ||
| 748 | } | ||
| 749 | |||
| 750 | return NULL; | 774 | return NULL; |
| 751 | } | 775 | } |
| 752 | 776 | ||
| 753 | static void fsl_dma_device_terminate_all(struct dma_chan *chan) | 777 | static void fsl_dma_device_terminate_all(struct dma_chan *dchan) |
| 754 | { | 778 | { |
| 755 | struct fsl_dma_chan *fsl_chan; | 779 | struct fsldma_chan *chan; |
| 756 | struct fsl_desc_sw *desc, *tmp; | ||
| 757 | unsigned long flags; | 780 | unsigned long flags; |
| 758 | 781 | ||
| 759 | if (!chan) | 782 | if (!dchan) |
| 760 | return; | 783 | return; |
| 761 | 784 | ||
| 762 | fsl_chan = to_fsl_chan(chan); | 785 | chan = to_fsl_chan(dchan); |
| 763 | 786 | ||
| 764 | /* Halt the DMA engine */ | 787 | /* Halt the DMA engine */ |
| 765 | dma_halt(fsl_chan); | 788 | dma_halt(chan); |
| 766 | 789 | ||
| 767 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 790 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 768 | 791 | ||
| 769 | /* Remove and free all of the descriptors in the LD queue */ | 792 | /* Remove and free all of the descriptors in the LD queue */ |
| 770 | list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { | 793 | fsldma_free_desc_list(chan, &chan->ld_pending); |
| 771 | list_del(&desc->node); | 794 | fsldma_free_desc_list(chan, &chan->ld_running); |
| 772 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
| 773 | } | ||
| 774 | 795 | ||
| 775 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 796 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 776 | } | 797 | } |
| 777 | 798 | ||
| 778 | /** | 799 | /** |
| 779 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 800 | * fsl_dma_update_completed_cookie - Update the completed cookie. |
| 780 | * @fsl_chan : Freescale DMA channel | 801 | * @chan : Freescale DMA channel |
| 802 | * | ||
| 803 | * CONTEXT: hardirq | ||
| 781 | */ | 804 | */ |
| 782 | static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | 805 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) |
| 783 | { | 806 | { |
| 784 | struct fsl_desc_sw *cur_desc, *desc; | 807 | struct fsl_desc_sw *desc; |
| 785 | dma_addr_t ld_phy; | 808 | unsigned long flags; |
| 809 | dma_cookie_t cookie; | ||
| 786 | 810 | ||
| 787 | ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | 811 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 788 | 812 | ||
| 789 | if (ld_phy) { | 813 | if (list_empty(&chan->ld_running)) { |
| 790 | cur_desc = NULL; | 814 | dev_dbg(chan->dev, "no running descriptors\n"); |
| 791 | list_for_each_entry(desc, &fsl_chan->ld_queue, node) | 815 | goto out_unlock; |
| 792 | if (desc->async_tx.phys == ld_phy) { | 816 | } |
| 793 | cur_desc = desc; | ||
| 794 | break; | ||
| 795 | } | ||
| 796 | 817 | ||
| 797 | if (cur_desc && cur_desc->async_tx.cookie) { | 818 | /* Get the last descriptor, update the cookie to that */ |
| 798 | if (dma_is_idle(fsl_chan)) | 819 | desc = to_fsl_desc(chan->ld_running.prev); |
| 799 | fsl_chan->completed_cookie = | 820 | if (dma_is_idle(chan)) |
| 800 | cur_desc->async_tx.cookie; | 821 | cookie = desc->async_tx.cookie; |
| 801 | else | 822 | else { |
| 802 | fsl_chan->completed_cookie = | 823 | cookie = desc->async_tx.cookie - 1; |
| 803 | cur_desc->async_tx.cookie - 1; | 824 | if (unlikely(cookie < DMA_MIN_COOKIE)) |
| 804 | } | 825 | cookie = DMA_MAX_COOKIE; |
| 805 | } | 826 | } |
| 827 | |||
| 828 | chan->completed_cookie = cookie; | ||
| 829 | |||
| 830 | out_unlock: | ||
| 831 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 832 | } | ||
| 833 | |||
| 834 | /** | ||
| 835 | * fsldma_desc_status - Check the status of a descriptor | ||
| 836 | * @chan: Freescale DMA channel | ||
| 837 | * @desc: DMA SW descriptor | ||
| 838 | * | ||
| 839 | * This function will return the status of the given descriptor | ||
| 840 | */ | ||
| 841 | static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, | ||
| 842 | struct fsl_desc_sw *desc) | ||
| 843 | { | ||
| 844 | return dma_async_is_complete(desc->async_tx.cookie, | ||
| 845 | chan->completed_cookie, | ||
| 846 | chan->common.cookie); | ||
| 806 | } | 847 | } |
| 807 | 848 | ||
| 808 | /** | 849 | /** |
| 809 | * fsl_chan_ld_cleanup - Clean up link descriptors | 850 | * fsl_chan_ld_cleanup - Clean up link descriptors |
| 810 | * @fsl_chan : Freescale DMA channel | 851 | * @chan : Freescale DMA channel |
| 811 | * | 852 | * |
| 812 | * This function clean up the ld_queue of DMA channel. | 853 | * This function clean up the ld_queue of DMA channel. |
| 813 | * If 'in_intr' is set, the function will move the link descriptor to | ||
| 814 | * the recycle list. Otherwise, free it directly. | ||
| 815 | */ | 854 | */ |
| 816 | static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | 855 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) |
| 817 | { | 856 | { |
| 818 | struct fsl_desc_sw *desc, *_desc; | 857 | struct fsl_desc_sw *desc, *_desc; |
| 819 | unsigned long flags; | 858 | unsigned long flags; |
| 820 | 859 | ||
| 821 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 860 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 822 | 861 | ||
| 823 | dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", | 862 | dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); |
| 824 | fsl_chan->completed_cookie); | 863 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { |
| 825 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
| 826 | dma_async_tx_callback callback; | 864 | dma_async_tx_callback callback; |
| 827 | void *callback_param; | 865 | void *callback_param; |
| 828 | 866 | ||
| 829 | if (dma_async_is_complete(desc->async_tx.cookie, | 867 | if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) |
| 830 | fsl_chan->completed_cookie, fsl_chan->common.cookie) | ||
| 831 | == DMA_IN_PROGRESS) | ||
| 832 | break; | 868 | break; |
| 833 | 869 | ||
| 834 | callback = desc->async_tx.callback; | 870 | /* Remove from the list of running transactions */ |
| 835 | callback_param = desc->async_tx.callback_param; | ||
| 836 | |||
| 837 | /* Remove from ld_queue list */ | ||
| 838 | list_del(&desc->node); | 871 | list_del(&desc->node); |
| 839 | 872 | ||
| 840 | dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | ||
| 841 | desc); | ||
| 842 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
| 843 | |||
| 844 | /* Run the link descriptor callback function */ | 873 | /* Run the link descriptor callback function */ |
| 874 | callback = desc->async_tx.callback; | ||
| 875 | callback_param = desc->async_tx.callback_param; | ||
| 845 | if (callback) { | 876 | if (callback) { |
| 846 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 877 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 847 | dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | 878 | dev_dbg(chan->dev, "LD %p callback\n", desc); |
| 848 | desc); | ||
| 849 | callback(callback_param); | 879 | callback(callback_param); |
| 850 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 880 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 851 | } | 881 | } |
| 882 | |||
| 883 | /* Run any dependencies, then free the descriptor */ | ||
| 884 | dma_run_dependencies(&desc->async_tx); | ||
| 885 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | ||
| 852 | } | 886 | } |
| 853 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 887 | |
| 888 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
| 854 | } | 889 | } |
| 855 | 890 | ||
| 856 | /** | 891 | /** |
| 857 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | 892 | * fsl_chan_xfer_ld_queue - transfer any pending transactions |
| 858 | * @fsl_chan : Freescale DMA channel | 893 | * @chan : Freescale DMA channel |
| 894 | * | ||
| 895 | * This will make sure that any pending transactions will be run. | ||
| 896 | * If the DMA controller is idle, it will be started. Otherwise, | ||
| 897 | * the DMA controller's interrupt handler will start any pending | ||
| 898 | * transactions when it becomes idle. | ||
| 859 | */ | 899 | */ |
| 860 | static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | 900 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) |
| 861 | { | 901 | { |
| 862 | struct list_head *ld_node; | 902 | struct fsl_desc_sw *desc; |
| 863 | dma_addr_t next_dest_addr; | ||
| 864 | unsigned long flags; | 903 | unsigned long flags; |
| 865 | 904 | ||
| 866 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 905 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 867 | 906 | ||
| 868 | if (!dma_is_idle(fsl_chan)) | 907 | /* |
| 908 | * If the list of pending descriptors is empty, then we | ||
| 909 | * don't need to do any work at all | ||
| 910 | */ | ||
| 911 | if (list_empty(&chan->ld_pending)) { | ||
| 912 | dev_dbg(chan->dev, "no pending LDs\n"); | ||
| 869 | goto out_unlock; | 913 | goto out_unlock; |
| 914 | } | ||
| 870 | 915 | ||
| 871 | dma_halt(fsl_chan); | 916 | /* |
| 917 | * The DMA controller is not idle, which means the interrupt | ||
| 918 | * handler will start any queued transactions when it runs | ||
| 919 | * at the end of the current transaction | ||
| 920 | */ | ||
| 921 | if (!dma_is_idle(chan)) { | ||
| 922 | dev_dbg(chan->dev, "DMA controller still busy\n"); | ||
| 923 | goto out_unlock; | ||
| 924 | } | ||
| 872 | 925 | ||
| 873 | /* If there are some link descriptors | 926 | /* |
| 874 | * not transfered in queue. We need to start it. | 927 | * TODO: |
| 928 | * make sure the dma_halt() function really un-wedges the | ||
| 929 | * controller as much as possible | ||
| 875 | */ | 930 | */ |
| 931 | dma_halt(chan); | ||
| 876 | 932 | ||
| 877 | /* Find the first un-transfer desciptor */ | 933 | /* |
| 878 | for (ld_node = fsl_chan->ld_queue.next; | 934 | * If there are some link descriptors which have not been |
| 879 | (ld_node != &fsl_chan->ld_queue) | 935 | * transferred, we need to start the controller |
| 880 | && (dma_async_is_complete( | 936 | */ |
| 881 | to_fsl_desc(ld_node)->async_tx.cookie, | 937 | |
| 882 | fsl_chan->completed_cookie, | 938 | /* |
| 883 | fsl_chan->common.cookie) == DMA_SUCCESS); | 939 | * Move all elements from the queue of pending transactions |
| 884 | ld_node = ld_node->next); | 940 | * onto the list of running transactions |
| 885 | 941 | */ | |
| 886 | if (ld_node != &fsl_chan->ld_queue) { | 942 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); |
| 887 | /* Get the ld start address from ld_queue */ | 943 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); |
| 888 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | 944 | |
| 889 | dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", | 945 | /* |
| 890 | (unsigned long long)next_dest_addr); | 946 | * Program the descriptor's address into the DMA controller, |
| 891 | set_cdar(fsl_chan, next_dest_addr); | 947 | * then start the DMA transaction |
| 892 | dma_start(fsl_chan); | 948 | */ |
| 893 | } else { | 949 | set_cdar(chan, desc->async_tx.phys); |
| 894 | set_cdar(fsl_chan, 0); | 950 | dma_start(chan); |
| 895 | set_ndar(fsl_chan, 0); | ||
| 896 | } | ||
| 897 | 951 | ||
| 898 | out_unlock: | 952 | out_unlock: |
| 899 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 953 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 900 | } | 954 | } |
| 901 | 955 | ||
| 902 | /** | 956 | /** |
| 903 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | 957 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command |
| 904 | * @fsl_chan : Freescale DMA channel | 958 | * @chan : Freescale DMA channel |
| 905 | */ | 959 | */ |
| 906 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | 960 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) |
| 907 | { | 961 | { |
| 908 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 962 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 909 | 963 | fsl_chan_xfer_ld_queue(chan); | |
| 910 | #ifdef FSL_DMA_LD_DEBUG | ||
| 911 | struct fsl_desc_sw *ld; | ||
| 912 | unsigned long flags; | ||
| 913 | |||
| 914 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
| 915 | if (list_empty(&fsl_chan->ld_queue)) { | ||
| 916 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
| 917 | return; | ||
| 918 | } | ||
| 919 | |||
| 920 | dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | ||
| 921 | list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | ||
| 922 | int i; | ||
| 923 | dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | ||
| 924 | fsl_chan->id, ld->async_tx.phys); | ||
| 925 | for (i = 0; i < 8; i++) | ||
| 926 | dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | ||
| 927 | i, *(((u32 *)&ld->hw) + i)); | ||
| 928 | } | ||
| 929 | dev_dbg(fsl_chan->dev, "----------------\n"); | ||
| 930 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
| 931 | #endif | ||
| 932 | |||
| 933 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
| 934 | } | 964 | } |
| 935 | 965 | ||
| 936 | /** | 966 | /** |
| 937 | * fsl_dma_is_complete - Determine the DMA status | 967 | * fsl_dma_is_complete - Determine the DMA status |
| 938 | * @fsl_chan : Freescale DMA channel | 968 | * @chan : Freescale DMA channel |
| 939 | */ | 969 | */ |
| 940 | static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | 970 | static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, |
| 941 | dma_cookie_t cookie, | 971 | dma_cookie_t cookie, |
| 942 | dma_cookie_t *done, | 972 | dma_cookie_t *done, |
| 943 | dma_cookie_t *used) | 973 | dma_cookie_t *used) |
| 944 | { | 974 | { |
| 945 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | 975 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 946 | dma_cookie_t last_used; | 976 | dma_cookie_t last_used; |
| 947 | dma_cookie_t last_complete; | 977 | dma_cookie_t last_complete; |
| 948 | 978 | ||
| 949 | fsl_chan_ld_cleanup(fsl_chan); | 979 | fsl_chan_ld_cleanup(chan); |
| 950 | 980 | ||
| 951 | last_used = chan->cookie; | 981 | last_used = dchan->cookie; |
| 952 | last_complete = fsl_chan->completed_cookie; | 982 | last_complete = chan->completed_cookie; |
| 953 | 983 | ||
| 954 | if (done) | 984 | if (done) |
| 955 | *done = last_complete; | 985 | *done = last_complete; |
| @@ -960,32 +990,37 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | |||
| 960 | return dma_async_is_complete(cookie, last_complete, last_used); | 990 | return dma_async_is_complete(cookie, last_complete, last_used); |
| 961 | } | 991 | } |
| 962 | 992 | ||
| 963 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | 993 | /*----------------------------------------------------------------------------*/ |
| 994 | /* Interrupt Handling */ | ||
| 995 | /*----------------------------------------------------------------------------*/ | ||
| 996 | |||
| 997 | static irqreturn_t fsldma_chan_irq(int irq, void *data) | ||
| 964 | { | 998 | { |
| 965 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | 999 | struct fsldma_chan *chan = data; |
| 966 | u32 stat; | ||
| 967 | int update_cookie = 0; | 1000 | int update_cookie = 0; |
| 968 | int xfer_ld_q = 0; | 1001 | int xfer_ld_q = 0; |
| 1002 | u32 stat; | ||
| 969 | 1003 | ||
| 970 | stat = get_sr(fsl_chan); | 1004 | /* save and clear the status register */ |
| 971 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | 1005 | stat = get_sr(chan); |
| 972 | fsl_chan->id, stat); | 1006 | set_sr(chan, stat); |
| 973 | set_sr(fsl_chan, stat); /* Clear the event register */ | 1007 | dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); |
| 974 | 1008 | ||
| 975 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | 1009 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); |
| 976 | if (!stat) | 1010 | if (!stat) |
| 977 | return IRQ_NONE; | 1011 | return IRQ_NONE; |
| 978 | 1012 | ||
| 979 | if (stat & FSL_DMA_SR_TE) | 1013 | if (stat & FSL_DMA_SR_TE) |
| 980 | dev_err(fsl_chan->dev, "Transfer Error!\n"); | 1014 | dev_err(chan->dev, "Transfer Error!\n"); |
| 981 | 1015 | ||
| 982 | /* Programming Error | 1016 | /* |
| 1017 | * Programming Error | ||
| 983 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will | 1018 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will |
| 984 | * triger a PE interrupt. | 1019 | * triger a PE interrupt. |
| 985 | */ | 1020 | */ |
| 986 | if (stat & FSL_DMA_SR_PE) { | 1021 | if (stat & FSL_DMA_SR_PE) { |
| 987 | dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); | 1022 | dev_dbg(chan->dev, "irq: Programming Error INT\n"); |
| 988 | if (get_bcr(fsl_chan) == 0) { | 1023 | if (get_bcr(chan) == 0) { |
| 989 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | 1024 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. |
| 990 | * Now, update the completed cookie, and continue the | 1025 | * Now, update the completed cookie, and continue the |
| 991 | * next uncompleted transfer. | 1026 | * next uncompleted transfer. |
| @@ -996,208 +1031,296 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | |||
| 996 | stat &= ~FSL_DMA_SR_PE; | 1031 | stat &= ~FSL_DMA_SR_PE; |
| 997 | } | 1032 | } |
| 998 | 1033 | ||
| 999 | /* If the link descriptor segment transfer finishes, | 1034 | /* |
| 1035 | * If the link descriptor segment transfer finishes, | ||
| 1000 | * we will recycle the used descriptor. | 1036 | * we will recycle the used descriptor. |
| 1001 | */ | 1037 | */ |
| 1002 | if (stat & FSL_DMA_SR_EOSI) { | 1038 | if (stat & FSL_DMA_SR_EOSI) { |
| 1003 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | 1039 | dev_dbg(chan->dev, "irq: End-of-segments INT\n"); |
| 1004 | dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", | 1040 | dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", |
| 1005 | (unsigned long long)get_cdar(fsl_chan), | 1041 | (unsigned long long)get_cdar(chan), |
| 1006 | (unsigned long long)get_ndar(fsl_chan)); | 1042 | (unsigned long long)get_ndar(chan)); |
| 1007 | stat &= ~FSL_DMA_SR_EOSI; | 1043 | stat &= ~FSL_DMA_SR_EOSI; |
| 1008 | update_cookie = 1; | 1044 | update_cookie = 1; |
| 1009 | } | 1045 | } |
| 1010 | 1046 | ||
| 1011 | /* For MPC8349, EOCDI event need to update cookie | 1047 | /* |
| 1048 | * For MPC8349, EOCDI event need to update cookie | ||
| 1012 | * and start the next transfer if it exist. | 1049 | * and start the next transfer if it exist. |
| 1013 | */ | 1050 | */ |
| 1014 | if (stat & FSL_DMA_SR_EOCDI) { | 1051 | if (stat & FSL_DMA_SR_EOCDI) { |
| 1015 | dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); | 1052 | dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); |
| 1016 | stat &= ~FSL_DMA_SR_EOCDI; | 1053 | stat &= ~FSL_DMA_SR_EOCDI; |
| 1017 | update_cookie = 1; | 1054 | update_cookie = 1; |
| 1018 | xfer_ld_q = 1; | 1055 | xfer_ld_q = 1; |
| 1019 | } | 1056 | } |
| 1020 | 1057 | ||
| 1021 | /* If it current transfer is the end-of-transfer, | 1058 | /* |
| 1059 | * If it current transfer is the end-of-transfer, | ||
| 1022 | * we should clear the Channel Start bit for | 1060 | * we should clear the Channel Start bit for |
| 1023 | * prepare next transfer. | 1061 | * prepare next transfer. |
| 1024 | */ | 1062 | */ |
| 1025 | if (stat & FSL_DMA_SR_EOLNI) { | 1063 | if (stat & FSL_DMA_SR_EOLNI) { |
| 1026 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | 1064 | dev_dbg(chan->dev, "irq: End-of-link INT\n"); |
| 1027 | stat &= ~FSL_DMA_SR_EOLNI; | 1065 | stat &= ~FSL_DMA_SR_EOLNI; |
| 1028 | xfer_ld_q = 1; | 1066 | xfer_ld_q = 1; |
| 1029 | } | 1067 | } |
| 1030 | 1068 | ||
| 1031 | if (update_cookie) | 1069 | if (update_cookie) |
| 1032 | fsl_dma_update_completed_cookie(fsl_chan); | 1070 | fsl_dma_update_completed_cookie(chan); |
| 1033 | if (xfer_ld_q) | 1071 | if (xfer_ld_q) |
| 1034 | fsl_chan_xfer_ld_queue(fsl_chan); | 1072 | fsl_chan_xfer_ld_queue(chan); |
| 1035 | if (stat) | 1073 | if (stat) |
| 1036 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | 1074 | dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); |
| 1037 | stat); | ||
| 1038 | 1075 | ||
| 1039 | dev_dbg(fsl_chan->dev, "event: Exit\n"); | 1076 | dev_dbg(chan->dev, "irq: Exit\n"); |
| 1040 | tasklet_schedule(&fsl_chan->tasklet); | 1077 | tasklet_schedule(&chan->tasklet); |
| 1041 | return IRQ_HANDLED; | 1078 | return IRQ_HANDLED; |
| 1042 | } | 1079 | } |
| 1043 | 1080 | ||
| 1044 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | 1081 | static void dma_do_tasklet(unsigned long data) |
| 1045 | { | 1082 | { |
| 1046 | struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; | 1083 | struct fsldma_chan *chan = (struct fsldma_chan *)data; |
| 1047 | u32 gsr; | 1084 | fsl_chan_ld_cleanup(chan); |
| 1048 | int ch_nr; | 1085 | } |
| 1086 | |||
| 1087 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) | ||
| 1088 | { | ||
| 1089 | struct fsldma_device *fdev = data; | ||
| 1090 | struct fsldma_chan *chan; | ||
| 1091 | unsigned int handled = 0; | ||
| 1092 | u32 gsr, mask; | ||
| 1093 | int i; | ||
| 1049 | 1094 | ||
| 1050 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) | 1095 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) |
| 1051 | : in_le32(fdev->reg_base); | 1096 | : in_le32(fdev->regs); |
| 1052 | ch_nr = (32 - ffs(gsr)) / 8; | 1097 | mask = 0xff000000; |
| 1098 | dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); | ||
| 1099 | |||
| 1100 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | ||
| 1101 | chan = fdev->chan[i]; | ||
| 1102 | if (!chan) | ||
| 1103 | continue; | ||
| 1104 | |||
| 1105 | if (gsr & mask) { | ||
| 1106 | dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); | ||
| 1107 | fsldma_chan_irq(irq, chan); | ||
| 1108 | handled++; | ||
| 1109 | } | ||
| 1053 | 1110 | ||
| 1054 | return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, | 1111 | gsr &= ~mask; |
| 1055 | fdev->chan[ch_nr]) : IRQ_NONE; | 1112 | mask >>= 8; |
| 1113 | } | ||
| 1114 | |||
| 1115 | return IRQ_RETVAL(handled); | ||
| 1056 | } | 1116 | } |
| 1057 | 1117 | ||
| 1058 | static void dma_do_tasklet(unsigned long data) | 1118 | static void fsldma_free_irqs(struct fsldma_device *fdev) |
| 1059 | { | 1119 | { |
| 1060 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | 1120 | struct fsldma_chan *chan; |
| 1061 | fsl_chan_ld_cleanup(fsl_chan); | 1121 | int i; |
| 1122 | |||
| 1123 | if (fdev->irq != NO_IRQ) { | ||
| 1124 | dev_dbg(fdev->dev, "free per-controller IRQ\n"); | ||
| 1125 | free_irq(fdev->irq, fdev); | ||
| 1126 | return; | ||
| 1127 | } | ||
| 1128 | |||
| 1129 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | ||
| 1130 | chan = fdev->chan[i]; | ||
| 1131 | if (chan && chan->irq != NO_IRQ) { | ||
| 1132 | dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); | ||
| 1133 | free_irq(chan->irq, chan); | ||
| 1134 | } | ||
| 1135 | } | ||
| 1062 | } | 1136 | } |
| 1063 | 1137 | ||
| 1064 | static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, | 1138 | static int fsldma_request_irqs(struct fsldma_device *fdev) |
| 1139 | { | ||
| 1140 | struct fsldma_chan *chan; | ||
| 1141 | int ret; | ||
| 1142 | int i; | ||
| 1143 | |||
| 1144 | /* if we have a per-controller IRQ, use that */ | ||
| 1145 | if (fdev->irq != NO_IRQ) { | ||
| 1146 | dev_dbg(fdev->dev, "request per-controller IRQ\n"); | ||
| 1147 | ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, | ||
| 1148 | "fsldma-controller", fdev); | ||
| 1149 | return ret; | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | /* no per-controller IRQ, use the per-channel IRQs */ | ||
| 1153 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | ||
| 1154 | chan = fdev->chan[i]; | ||
| 1155 | if (!chan) | ||
| 1156 | continue; | ||
| 1157 | |||
| 1158 | if (chan->irq == NO_IRQ) { | ||
| 1159 | dev_err(fdev->dev, "no interrupts property defined for " | ||
| 1160 | "DMA channel %d. Please fix your " | ||
| 1161 | "device tree\n", chan->id); | ||
| 1162 | ret = -ENODEV; | ||
| 1163 | goto out_unwind; | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); | ||
| 1167 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, | ||
| 1168 | "fsldma-chan", chan); | ||
| 1169 | if (ret) { | ||
| 1170 | dev_err(fdev->dev, "unable to request IRQ for DMA " | ||
| 1171 | "channel %d\n", chan->id); | ||
| 1172 | goto out_unwind; | ||
| 1173 | } | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | return 0; | ||
| 1177 | |||
| 1178 | out_unwind: | ||
| 1179 | for (/* none */; i >= 0; i--) { | ||
| 1180 | chan = fdev->chan[i]; | ||
| 1181 | if (!chan) | ||
| 1182 | continue; | ||
| 1183 | |||
| 1184 | if (chan->irq == NO_IRQ) | ||
| 1185 | continue; | ||
| 1186 | |||
| 1187 | free_irq(chan->irq, chan); | ||
| 1188 | } | ||
| 1189 | |||
| 1190 | return ret; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | /*----------------------------------------------------------------------------*/ | ||
| 1194 | /* OpenFirmware Subsystem */ | ||
| 1195 | /*----------------------------------------------------------------------------*/ | ||
| 1196 | |||
| 1197 | static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | ||
| 1065 | struct device_node *node, u32 feature, const char *compatible) | 1198 | struct device_node *node, u32 feature, const char *compatible) |
| 1066 | { | 1199 | { |
| 1067 | struct fsl_dma_chan *new_fsl_chan; | 1200 | struct fsldma_chan *chan; |
| 1201 | struct resource res; | ||
| 1068 | int err; | 1202 | int err; |
| 1069 | 1203 | ||
| 1070 | /* alloc channel */ | 1204 | /* alloc channel */ |
| 1071 | new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); | 1205 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
| 1072 | if (!new_fsl_chan) { | 1206 | if (!chan) { |
| 1073 | dev_err(fdev->dev, "No free memory for allocating " | 1207 | dev_err(fdev->dev, "no free memory for DMA channels!\n"); |
| 1074 | "dma channels!\n"); | 1208 | err = -ENOMEM; |
| 1075 | return -ENOMEM; | 1209 | goto out_return; |
| 1076 | } | 1210 | } |
| 1077 | 1211 | ||
| 1078 | /* get dma channel register base */ | 1212 | /* ioremap registers for use */ |
| 1079 | err = of_address_to_resource(node, 0, &new_fsl_chan->reg); | 1213 | chan->regs = of_iomap(node, 0); |
| 1080 | if (err) { | 1214 | if (!chan->regs) { |
| 1081 | dev_err(fdev->dev, "Can't get %s property 'reg'\n", | 1215 | dev_err(fdev->dev, "unable to ioremap registers\n"); |
| 1082 | node->full_name); | 1216 | err = -ENOMEM; |
| 1083 | goto err_no_reg; | 1217 | goto out_free_chan; |
| 1084 | } | 1218 | } |
| 1085 | 1219 | ||
| 1086 | new_fsl_chan->feature = feature; | 1220 | err = of_address_to_resource(node, 0, &res); |
| 1221 | if (err) { | ||
| 1222 | dev_err(fdev->dev, "unable to find 'reg' property\n"); | ||
| 1223 | goto out_iounmap_regs; | ||
| 1224 | } | ||
| 1087 | 1225 | ||
| 1226 | chan->feature = feature; | ||
| 1088 | if (!fdev->feature) | 1227 | if (!fdev->feature) |
| 1089 | fdev->feature = new_fsl_chan->feature; | 1228 | fdev->feature = chan->feature; |
| 1090 | 1229 | ||
| 1091 | /* If the DMA device's feature is different than its channels', | 1230 | /* |
| 1092 | * report the bug. | 1231 | * If the DMA device's feature is different than the feature |
| 1232 | * of its channels, report the bug | ||
| 1093 | */ | 1233 | */ |
| 1094 | WARN_ON(fdev->feature != new_fsl_chan->feature); | 1234 | WARN_ON(fdev->feature != chan->feature); |
| 1095 | 1235 | ||
| 1096 | new_fsl_chan->dev = fdev->dev; | 1236 | chan->dev = fdev->dev; |
| 1097 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, | 1237 | chan->id = ((res.start - 0x100) & 0xfff) >> 7; |
| 1098 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | 1238 | if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { |
| 1099 | 1239 | dev_err(fdev->dev, "too many channels for device\n"); | |
| 1100 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | ||
| 1101 | if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { | ||
| 1102 | dev_err(fdev->dev, "There is no %d channel!\n", | ||
| 1103 | new_fsl_chan->id); | ||
| 1104 | err = -EINVAL; | 1240 | err = -EINVAL; |
| 1105 | goto err_no_chan; | 1241 | goto out_iounmap_regs; |
| 1106 | } | 1242 | } |
| 1107 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; | ||
| 1108 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, | ||
| 1109 | (unsigned long)new_fsl_chan); | ||
| 1110 | 1243 | ||
| 1111 | /* Init the channel */ | 1244 | fdev->chan[chan->id] = chan; |
| 1112 | dma_init(new_fsl_chan); | 1245 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); |
| 1246 | |||
| 1247 | /* Initialize the channel */ | ||
| 1248 | dma_init(chan); | ||
| 1113 | 1249 | ||
| 1114 | /* Clear cdar registers */ | 1250 | /* Clear cdar registers */ |
| 1115 | set_cdar(new_fsl_chan, 0); | 1251 | set_cdar(chan, 0); |
| 1116 | 1252 | ||
| 1117 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { | 1253 | switch (chan->feature & FSL_DMA_IP_MASK) { |
| 1118 | case FSL_DMA_IP_85XX: | 1254 | case FSL_DMA_IP_85XX: |
| 1119 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | 1255 | chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; |
| 1120 | case FSL_DMA_IP_83XX: | 1256 | case FSL_DMA_IP_83XX: |
| 1121 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | 1257 | chan->toggle_ext_start = fsl_chan_toggle_ext_start; |
| 1122 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | 1258 | chan->set_src_loop_size = fsl_chan_set_src_loop_size; |
| 1123 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | 1259 | chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; |
| 1124 | new_fsl_chan->set_request_count = fsl_chan_set_request_count; | 1260 | chan->set_request_count = fsl_chan_set_request_count; |
| 1125 | } | 1261 | } |
| 1126 | 1262 | ||
| 1127 | spin_lock_init(&new_fsl_chan->desc_lock); | 1263 | spin_lock_init(&chan->desc_lock); |
| 1128 | INIT_LIST_HEAD(&new_fsl_chan->ld_queue); | 1264 | INIT_LIST_HEAD(&chan->ld_pending); |
| 1265 | INIT_LIST_HEAD(&chan->ld_running); | ||
| 1266 | |||
| 1267 | chan->common.device = &fdev->common; | ||
| 1129 | 1268 | ||
| 1130 | new_fsl_chan->common.device = &fdev->common; | 1269 | /* find the IRQ line, if it exists in the device tree */ |
| 1270 | chan->irq = irq_of_parse_and_map(node, 0); | ||
| 1131 | 1271 | ||
| 1132 | /* Add the channel to DMA device channel list */ | 1272 | /* Add the channel to DMA device channel list */ |
| 1133 | list_add_tail(&new_fsl_chan->common.device_node, | 1273 | list_add_tail(&chan->common.device_node, &fdev->common.channels); |
| 1134 | &fdev->common.channels); | ||
| 1135 | fdev->common.chancnt++; | 1274 | fdev->common.chancnt++; |
| 1136 | 1275 | ||
| 1137 | new_fsl_chan->irq = irq_of_parse_and_map(node, 0); | 1276 | dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, |
| 1138 | if (new_fsl_chan->irq != NO_IRQ) { | 1277 | chan->irq != NO_IRQ ? chan->irq : fdev->irq); |
| 1139 | err = request_irq(new_fsl_chan->irq, | ||
| 1140 | &fsl_dma_chan_do_interrupt, IRQF_SHARED, | ||
| 1141 | "fsldma-channel", new_fsl_chan); | ||
| 1142 | if (err) { | ||
| 1143 | dev_err(fdev->dev, "DMA channel %s request_irq error " | ||
| 1144 | "with return %d\n", node->full_name, err); | ||
| 1145 | goto err_no_irq; | ||
| 1146 | } | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, | ||
| 1150 | compatible, | ||
| 1151 | new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq); | ||
| 1152 | 1278 | ||
| 1153 | return 0; | 1279 | return 0; |
| 1154 | 1280 | ||
| 1155 | err_no_irq: | 1281 | out_iounmap_regs: |
| 1156 | list_del(&new_fsl_chan->common.device_node); | 1282 | iounmap(chan->regs); |
| 1157 | err_no_chan: | 1283 | out_free_chan: |
| 1158 | iounmap(new_fsl_chan->reg_base); | 1284 | kfree(chan); |
| 1159 | err_no_reg: | 1285 | out_return: |
| 1160 | kfree(new_fsl_chan); | ||
| 1161 | return err; | 1286 | return err; |
| 1162 | } | 1287 | } |
| 1163 | 1288 | ||
| 1164 | static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) | 1289 | static void fsl_dma_chan_remove(struct fsldma_chan *chan) |
| 1165 | { | 1290 | { |
| 1166 | if (fchan->irq != NO_IRQ) | 1291 | irq_dispose_mapping(chan->irq); |
| 1167 | free_irq(fchan->irq, fchan); | 1292 | list_del(&chan->common.device_node); |
| 1168 | list_del(&fchan->common.device_node); | 1293 | iounmap(chan->regs); |
| 1169 | iounmap(fchan->reg_base); | 1294 | kfree(chan); |
| 1170 | kfree(fchan); | ||
| 1171 | } | 1295 | } |
| 1172 | 1296 | ||
| 1173 | static int __devinit of_fsl_dma_probe(struct of_device *dev, | 1297 | static int __devinit fsldma_of_probe(struct of_device *op, |
| 1174 | const struct of_device_id *match) | 1298 | const struct of_device_id *match) |
| 1175 | { | 1299 | { |
| 1176 | int err; | 1300 | struct fsldma_device *fdev; |
| 1177 | struct fsl_dma_device *fdev; | ||
| 1178 | struct device_node *child; | 1301 | struct device_node *child; |
| 1302 | int err; | ||
| 1179 | 1303 | ||
| 1180 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | 1304 | fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); |
| 1181 | if (!fdev) { | 1305 | if (!fdev) { |
| 1182 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | 1306 | dev_err(&op->dev, "No enough memory for 'priv'\n"); |
| 1183 | return -ENOMEM; | 1307 | err = -ENOMEM; |
| 1308 | goto out_return; | ||
| 1184 | } | 1309 | } |
| 1185 | fdev->dev = &dev->dev; | 1310 | |
| 1311 | fdev->dev = &op->dev; | ||
| 1186 | INIT_LIST_HEAD(&fdev->common.channels); | 1312 | INIT_LIST_HEAD(&fdev->common.channels); |
| 1187 | 1313 | ||
| 1188 | /* get DMA controller register base */ | 1314 | /* ioremap the registers for use */ |
| 1189 | err = of_address_to_resource(dev->node, 0, &fdev->reg); | 1315 | fdev->regs = of_iomap(op->node, 0); |
| 1190 | if (err) { | 1316 | if (!fdev->regs) { |
| 1191 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | 1317 | dev_err(&op->dev, "unable to ioremap registers\n"); |
| 1192 | dev->node->full_name); | 1318 | err = -ENOMEM; |
| 1193 | goto err_no_reg; | 1319 | goto out_free_fdev; |
| 1194 | } | 1320 | } |
| 1195 | 1321 | ||
| 1196 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | 1322 | /* map the channel IRQ if it exists, but don't hookup the handler yet */ |
| 1197 | "controller at 0x%llx...\n", | 1323 | fdev->irq = irq_of_parse_and_map(op->node, 0); |
| 1198 | match->compatible, (unsigned long long)fdev->reg.start); | ||
| 1199 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end | ||
| 1200 | - fdev->reg.start + 1); | ||
| 1201 | 1324 | ||
| 1202 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1325 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
| 1203 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1326 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
| @@ -1210,103 +1333,111 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, | |||
| 1210 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1333 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
| 1211 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | 1334 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; |
| 1212 | fdev->common.device_terminate_all = fsl_dma_device_terminate_all; | 1335 | fdev->common.device_terminate_all = fsl_dma_device_terminate_all; |
| 1213 | fdev->common.dev = &dev->dev; | 1336 | fdev->common.dev = &op->dev; |
| 1214 | 1337 | ||
| 1215 | fdev->irq = irq_of_parse_and_map(dev->node, 0); | 1338 | dev_set_drvdata(&op->dev, fdev); |
| 1216 | if (fdev->irq != NO_IRQ) { | ||
| 1217 | err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, | ||
| 1218 | "fsldma-device", fdev); | ||
| 1219 | if (err) { | ||
| 1220 | dev_err(&dev->dev, "DMA device request_irq error " | ||
| 1221 | "with return %d\n", err); | ||
| 1222 | goto err; | ||
| 1223 | } | ||
| 1224 | } | ||
| 1225 | |||
| 1226 | dev_set_drvdata(&(dev->dev), fdev); | ||
| 1227 | 1339 | ||
| 1228 | /* We cannot use of_platform_bus_probe() because there is no | 1340 | /* |
| 1229 | * of_platform_bus_remove. Instead, we manually instantiate every DMA | 1341 | * We cannot use of_platform_bus_probe() because there is no |
| 1342 | * of_platform_bus_remove(). Instead, we manually instantiate every DMA | ||
| 1230 | * channel object. | 1343 | * channel object. |
| 1231 | */ | 1344 | */ |
| 1232 | for_each_child_of_node(dev->node, child) { | 1345 | for_each_child_of_node(op->node, child) { |
| 1233 | if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) | 1346 | if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { |
| 1234 | fsl_dma_chan_probe(fdev, child, | 1347 | fsl_dma_chan_probe(fdev, child, |
| 1235 | FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, | 1348 | FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, |
| 1236 | "fsl,eloplus-dma-channel"); | 1349 | "fsl,eloplus-dma-channel"); |
| 1237 | if (of_device_is_compatible(child, "fsl,elo-dma-channel")) | 1350 | } |
| 1351 | |||
| 1352 | if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { | ||
| 1238 | fsl_dma_chan_probe(fdev, child, | 1353 | fsl_dma_chan_probe(fdev, child, |
| 1239 | FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, | 1354 | FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, |
| 1240 | "fsl,elo-dma-channel"); | 1355 | "fsl,elo-dma-channel"); |
| 1356 | } | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | /* | ||
| 1360 | * Hookup the IRQ handler(s) | ||
| 1361 | * | ||
| 1362 | * If we have a per-controller interrupt, we prefer that to the | ||
| 1363 | * per-channel interrupts to reduce the number of shared interrupt | ||
| 1364 | * handlers on the same IRQ line | ||
| 1365 | */ | ||
| 1366 | err = fsldma_request_irqs(fdev); | ||
| 1367 | if (err) { | ||
| 1368 | dev_err(fdev->dev, "unable to request IRQs\n"); | ||
| 1369 | goto out_free_fdev; | ||
| 1241 | } | 1370 | } |
| 1242 | 1371 | ||
| 1243 | dma_async_device_register(&fdev->common); | 1372 | dma_async_device_register(&fdev->common); |
| 1244 | return 0; | 1373 | return 0; |
| 1245 | 1374 | ||
| 1246 | err: | 1375 | out_free_fdev: |
| 1247 | iounmap(fdev->reg_base); | 1376 | irq_dispose_mapping(fdev->irq); |
| 1248 | err_no_reg: | ||
| 1249 | kfree(fdev); | 1377 | kfree(fdev); |
| 1378 | out_return: | ||
| 1250 | return err; | 1379 | return err; |
| 1251 | } | 1380 | } |
| 1252 | 1381 | ||
| 1253 | static int of_fsl_dma_remove(struct of_device *of_dev) | 1382 | static int fsldma_of_remove(struct of_device *op) |
| 1254 | { | 1383 | { |
| 1255 | struct fsl_dma_device *fdev; | 1384 | struct fsldma_device *fdev; |
| 1256 | unsigned int i; | 1385 | unsigned int i; |
| 1257 | 1386 | ||
| 1258 | fdev = dev_get_drvdata(&of_dev->dev); | 1387 | fdev = dev_get_drvdata(&op->dev); |
| 1259 | |||
| 1260 | dma_async_device_unregister(&fdev->common); | 1388 | dma_async_device_unregister(&fdev->common); |
| 1261 | 1389 | ||
| 1262 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) | 1390 | fsldma_free_irqs(fdev); |
| 1391 | |||
| 1392 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | ||
| 1263 | if (fdev->chan[i]) | 1393 | if (fdev->chan[i]) |
| 1264 | fsl_dma_chan_remove(fdev->chan[i]); | 1394 | fsl_dma_chan_remove(fdev->chan[i]); |
| 1395 | } | ||
| 1265 | 1396 | ||
| 1266 | if (fdev->irq != NO_IRQ) | 1397 | iounmap(fdev->regs); |
| 1267 | free_irq(fdev->irq, fdev); | 1398 | dev_set_drvdata(&op->dev, NULL); |
| 1268 | |||
| 1269 | iounmap(fdev->reg_base); | ||
| 1270 | |||
| 1271 | kfree(fdev); | 1399 | kfree(fdev); |
| 1272 | dev_set_drvdata(&of_dev->dev, NULL); | ||
| 1273 | 1400 | ||
| 1274 | return 0; | 1401 | return 0; |
| 1275 | } | 1402 | } |
| 1276 | 1403 | ||
| 1277 | static struct of_device_id of_fsl_dma_ids[] = { | 1404 | static const struct of_device_id fsldma_of_ids[] = { |
| 1278 | { .compatible = "fsl,eloplus-dma", }, | 1405 | { .compatible = "fsl,eloplus-dma", }, |
| 1279 | { .compatible = "fsl,elo-dma", }, | 1406 | { .compatible = "fsl,elo-dma", }, |
| 1280 | {} | 1407 | {} |
| 1281 | }; | 1408 | }; |
| 1282 | 1409 | ||
| 1283 | static struct of_platform_driver of_fsl_dma_driver = { | 1410 | static struct of_platform_driver fsldma_of_driver = { |
| 1284 | .name = "fsl-elo-dma", | 1411 | .name = "fsl-elo-dma", |
| 1285 | .match_table = of_fsl_dma_ids, | 1412 | .match_table = fsldma_of_ids, |
| 1286 | .probe = of_fsl_dma_probe, | 1413 | .probe = fsldma_of_probe, |
| 1287 | .remove = of_fsl_dma_remove, | 1414 | .remove = fsldma_of_remove, |
| 1288 | }; | 1415 | }; |
| 1289 | 1416 | ||
| 1290 | static __init int of_fsl_dma_init(void) | 1417 | /*----------------------------------------------------------------------------*/ |
| 1418 | /* Module Init / Exit */ | ||
| 1419 | /*----------------------------------------------------------------------------*/ | ||
| 1420 | |||
| 1421 | static __init int fsldma_init(void) | ||
| 1291 | { | 1422 | { |
| 1292 | int ret; | 1423 | int ret; |
| 1293 | 1424 | ||
| 1294 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); | 1425 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); |
| 1295 | 1426 | ||
| 1296 | ret = of_register_platform_driver(&of_fsl_dma_driver); | 1427 | ret = of_register_platform_driver(&fsldma_of_driver); |
| 1297 | if (ret) | 1428 | if (ret) |
| 1298 | pr_err("fsldma: failed to register platform driver\n"); | 1429 | pr_err("fsldma: failed to register platform driver\n"); |
| 1299 | 1430 | ||
| 1300 | return ret; | 1431 | return ret; |
| 1301 | } | 1432 | } |
| 1302 | 1433 | ||
| 1303 | static void __exit of_fsl_dma_exit(void) | 1434 | static void __exit fsldma_exit(void) |
| 1304 | { | 1435 | { |
| 1305 | of_unregister_platform_driver(&of_fsl_dma_driver); | 1436 | of_unregister_platform_driver(&fsldma_of_driver); |
| 1306 | } | 1437 | } |
| 1307 | 1438 | ||
| 1308 | subsys_initcall(of_fsl_dma_init); | 1439 | subsys_initcall(fsldma_init); |
| 1309 | module_exit(of_fsl_dma_exit); | 1440 | module_exit(fsldma_exit); |
| 1310 | 1441 | ||
| 1311 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); | 1442 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); |
| 1312 | MODULE_LICENSE("GPL"); | 1443 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 0df14cbb8ca3..cb4d6ff51597 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
| @@ -92,11 +92,9 @@ struct fsl_desc_sw { | |||
| 92 | struct list_head node; | 92 | struct list_head node; |
| 93 | struct list_head tx_list; | 93 | struct list_head tx_list; |
| 94 | struct dma_async_tx_descriptor async_tx; | 94 | struct dma_async_tx_descriptor async_tx; |
| 95 | struct list_head *ld; | ||
| 96 | void *priv; | ||
| 97 | } __attribute__((aligned(32))); | 95 | } __attribute__((aligned(32))); |
| 98 | 96 | ||
| 99 | struct fsl_dma_chan_regs { | 97 | struct fsldma_chan_regs { |
| 100 | u32 mr; /* 0x00 - Mode Register */ | 98 | u32 mr; /* 0x00 - Mode Register */ |
| 101 | u32 sr; /* 0x04 - Status Register */ | 99 | u32 sr; /* 0x04 - Status Register */ |
| 102 | u64 cdar; /* 0x08 - Current descriptor address register */ | 100 | u64 cdar; /* 0x08 - Current descriptor address register */ |
| @@ -106,20 +104,19 @@ struct fsl_dma_chan_regs { | |||
| 106 | u64 ndar; /* 0x24 - Next Descriptor Address Register */ | 104 | u64 ndar; /* 0x24 - Next Descriptor Address Register */ |
| 107 | }; | 105 | }; |
| 108 | 106 | ||
| 109 | struct fsl_dma_chan; | 107 | struct fsldma_chan; |
| 110 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | 108 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 |
| 111 | 109 | ||
| 112 | struct fsl_dma_device { | 110 | struct fsldma_device { |
| 113 | void __iomem *reg_base; /* DGSR register base */ | 111 | void __iomem *regs; /* DGSR register base */ |
| 114 | struct resource reg; /* Resource for register */ | ||
| 115 | struct device *dev; | 112 | struct device *dev; |
| 116 | struct dma_device common; | 113 | struct dma_device common; |
| 117 | struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; | 114 | struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; |
| 118 | u32 feature; /* The same as DMA channels */ | 115 | u32 feature; /* The same as DMA channels */ |
| 119 | int irq; /* Channel IRQ */ | 116 | int irq; /* Channel IRQ */ |
| 120 | }; | 117 | }; |
| 121 | 118 | ||
| 122 | /* Define macros for fsl_dma_chan->feature property */ | 119 | /* Define macros for fsldma_chan->feature property */ |
| 123 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 | 120 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 |
| 124 | #define FSL_DMA_BIG_ENDIAN 0x00000001 | 121 | #define FSL_DMA_BIG_ENDIAN 0x00000001 |
| 125 | 122 | ||
| @@ -130,28 +127,28 @@ struct fsl_dma_device { | |||
| 130 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 | 127 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 |
| 131 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | 128 | #define FSL_DMA_CHAN_START_EXT 0x00002000 |
| 132 | 129 | ||
| 133 | struct fsl_dma_chan { | 130 | struct fsldma_chan { |
| 134 | struct fsl_dma_chan_regs __iomem *reg_base; | 131 | struct fsldma_chan_regs __iomem *regs; |
| 135 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | 132 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ |
| 136 | spinlock_t desc_lock; /* Descriptor operation lock */ | 133 | spinlock_t desc_lock; /* Descriptor operation lock */ |
| 137 | struct list_head ld_queue; /* Link descriptors queue */ | 134 | struct list_head ld_pending; /* Link descriptors queue */ |
| 135 | struct list_head ld_running; /* Link descriptors queue */ | ||
| 138 | struct dma_chan common; /* DMA common channel */ | 136 | struct dma_chan common; /* DMA common channel */ |
| 139 | struct dma_pool *desc_pool; /* Descriptors pool */ | 137 | struct dma_pool *desc_pool; /* Descriptors pool */ |
| 140 | struct device *dev; /* Channel device */ | 138 | struct device *dev; /* Channel device */ |
| 141 | struct resource reg; /* Resource for register */ | ||
| 142 | int irq; /* Channel IRQ */ | 139 | int irq; /* Channel IRQ */ |
| 143 | int id; /* Raw id of this channel */ | 140 | int id; /* Raw id of this channel */ |
| 144 | struct tasklet_struct tasklet; | 141 | struct tasklet_struct tasklet; |
| 145 | u32 feature; | 142 | u32 feature; |
| 146 | 143 | ||
| 147 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); | 144 | void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); |
| 148 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); | 145 | void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); |
| 149 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | 146 | void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size); |
| 150 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | 147 | void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size); |
| 151 | void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); | 148 | void (*set_request_count)(struct fsldma_chan *fsl_chan, int size); |
| 152 | }; | 149 | }; |
| 153 | 150 | ||
| 154 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) | 151 | #define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common) |
| 155 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | 152 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) |
| 156 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | 153 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) |
| 157 | 154 | ||
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index dcc4ab78b32b..5d0e42b263df 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
| @@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | |||
| 94 | return IRQ_HANDLED; | 94 | return IRQ_HANDLED; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | static void ioat1_cleanup_tasklet(unsigned long data); | ||
| 98 | |||
| 99 | /* common channel initialization */ | 97 | /* common channel initialization */ |
| 100 | void ioat_init_channel(struct ioatdma_device *device, | 98 | void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) |
| 101 | struct ioat_chan_common *chan, int idx, | ||
| 102 | void (*timer_fn)(unsigned long), | ||
| 103 | void (*tasklet)(unsigned long), | ||
| 104 | unsigned long ioat) | ||
| 105 | { | 99 | { |
| 106 | struct dma_device *dma = &device->common; | 100 | struct dma_device *dma = &device->common; |
| 101 | struct dma_chan *c = &chan->common; | ||
| 102 | unsigned long data = (unsigned long) c; | ||
| 107 | 103 | ||
| 108 | chan->device = device; | 104 | chan->device = device; |
| 109 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | 105 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); |
| @@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device, | |||
| 112 | list_add_tail(&chan->common.device_node, &dma->channels); | 108 | list_add_tail(&chan->common.device_node, &dma->channels); |
| 113 | device->idx[idx] = chan; | 109 | device->idx[idx] = chan; |
| 114 | init_timer(&chan->timer); | 110 | init_timer(&chan->timer); |
| 115 | chan->timer.function = timer_fn; | 111 | chan->timer.function = device->timer_fn; |
| 116 | chan->timer.data = ioat; | 112 | chan->timer.data = data; |
| 117 | tasklet_init(&chan->cleanup_task, tasklet, ioat); | 113 | tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); |
| 118 | tasklet_disable(&chan->cleanup_task); | 114 | tasklet_disable(&chan->cleanup_task); |
| 119 | } | 115 | } |
| 120 | 116 | ||
| 121 | static void ioat1_timer_event(unsigned long data); | ||
| 122 | |||
| 123 | /** | 117 | /** |
| 124 | * ioat1_dma_enumerate_channels - find and initialize the device's channels | 118 | * ioat1_dma_enumerate_channels - find and initialize the device's channels |
| 125 | * @device: the device to be enumerated | 119 | * @device: the device to be enumerated |
| @@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) | |||
| 155 | if (!ioat) | 149 | if (!ioat) |
| 156 | break; | 150 | break; |
| 157 | 151 | ||
| 158 | ioat_init_channel(device, &ioat->base, i, | 152 | ioat_init_channel(device, &ioat->base, i); |
| 159 | ioat1_timer_event, | ||
| 160 | ioat1_cleanup_tasklet, | ||
| 161 | (unsigned long) ioat); | ||
| 162 | ioat->xfercap = xfercap; | 153 | ioat->xfercap = xfercap; |
| 163 | spin_lock_init(&ioat->desc_lock); | 154 | spin_lock_init(&ioat->desc_lock); |
| 164 | INIT_LIST_HEAD(&ioat->free_desc); | 155 | INIT_LIST_HEAD(&ioat->free_desc); |
| @@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, | |||
| 532 | return &desc->txd; | 523 | return &desc->txd; |
| 533 | } | 524 | } |
| 534 | 525 | ||
| 535 | static void ioat1_cleanup_tasklet(unsigned long data) | 526 | static void ioat1_cleanup_event(unsigned long data) |
| 536 | { | 527 | { |
| 537 | struct ioat_dma_chan *chan = (void *)data; | 528 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); |
| 538 | 529 | ||
| 539 | ioat1_cleanup(chan); | 530 | ioat1_cleanup(ioat); |
| 540 | writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); | 531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
| 541 | } | 532 | } |
| 542 | 533 | ||
| 543 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | 534 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, |
| @@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) | |||
| 687 | 678 | ||
| 688 | static void ioat1_timer_event(unsigned long data) | 679 | static void ioat1_timer_event(unsigned long data) |
| 689 | { | 680 | { |
| 690 | struct ioat_dma_chan *ioat = (void *) data; | 681 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); |
| 691 | struct ioat_chan_common *chan = &ioat->base; | 682 | struct ioat_chan_common *chan = &ioat->base; |
| 692 | 683 | ||
| 693 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); | 684 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); |
| @@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data) | |||
| 734 | spin_unlock_bh(&chan->cleanup_lock); | 725 | spin_unlock_bh(&chan->cleanup_lock); |
| 735 | } | 726 | } |
| 736 | 727 | ||
| 737 | static enum dma_status | 728 | enum dma_status |
| 738 | ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, | 729 | ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, |
| 739 | dma_cookie_t *done, dma_cookie_t *used) | 730 | dma_cookie_t *done, dma_cookie_t *used) |
| 740 | { | 731 | { |
| 741 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | 732 | struct ioat_chan_common *chan = to_chan_common(c); |
| 733 | struct ioatdma_device *device = chan->device; | ||
| 742 | 734 | ||
| 743 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | 735 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) |
| 744 | return DMA_SUCCESS; | 736 | return DMA_SUCCESS; |
| 745 | 737 | ||
| 746 | ioat1_cleanup(ioat); | 738 | device->cleanup_fn((unsigned long) c); |
| 747 | 739 | ||
| 748 | return ioat_is_complete(c, cookie, done, used); | 740 | return ioat_is_complete(c, cookie, done, used); |
| 749 | } | 741 | } |
| @@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1199 | device->intr_quirk = ioat1_intr_quirk; | 1191 | device->intr_quirk = ioat1_intr_quirk; |
| 1200 | device->enumerate_channels = ioat1_enumerate_channels; | 1192 | device->enumerate_channels = ioat1_enumerate_channels; |
| 1201 | device->self_test = ioat_dma_self_test; | 1193 | device->self_test = ioat_dma_self_test; |
| 1194 | device->timer_fn = ioat1_timer_event; | ||
| 1195 | device->cleanup_fn = ioat1_cleanup_event; | ||
| 1202 | dma = &device->common; | 1196 | dma = &device->common; |
| 1203 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | 1197 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; |
| 1204 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | 1198 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; |
| 1205 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; | 1199 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; |
| 1206 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | 1200 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; |
| 1207 | dma->device_is_tx_complete = ioat1_dma_is_complete; | 1201 | dma->device_is_tx_complete = ioat_is_dma_complete; |
| 1208 | 1202 | ||
| 1209 | err = ioat_probe(device); | 1203 | err = ioat_probe(device); |
| 1210 | if (err) | 1204 | if (err) |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index bbc3e78ef333..4f747a254074 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
| @@ -61,7 +61,7 @@ | |||
| 61 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) | 61 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) |
| 62 | * @enumerate_channels: hw version specific channel enumeration | 62 | * @enumerate_channels: hw version specific channel enumeration |
| 63 | * @reset_hw: hw version specific channel (re)initialization | 63 | * @reset_hw: hw version specific channel (re)initialization |
| 64 | * @cleanup_tasklet: select between the v2 and v3 cleanup routines | 64 | * @cleanup_fn: select between the v2 and v3 cleanup routines |
| 65 | * @timer_fn: select between the v2 and v3 timer watchdog routines | 65 | * @timer_fn: select between the v2 and v3 timer watchdog routines |
| 66 | * @self_test: hardware version specific self test for each supported op type | 66 | * @self_test: hardware version specific self test for each supported op type |
| 67 | * | 67 | * |
| @@ -80,7 +80,7 @@ struct ioatdma_device { | |||
| 80 | void (*intr_quirk)(struct ioatdma_device *device); | 80 | void (*intr_quirk)(struct ioatdma_device *device); |
| 81 | int (*enumerate_channels)(struct ioatdma_device *device); | 81 | int (*enumerate_channels)(struct ioatdma_device *device); |
| 82 | int (*reset_hw)(struct ioat_chan_common *chan); | 82 | int (*reset_hw)(struct ioat_chan_common *chan); |
| 83 | void (*cleanup_tasklet)(unsigned long data); | 83 | void (*cleanup_fn)(unsigned long data); |
| 84 | void (*timer_fn)(unsigned long data); | 84 | void (*timer_fn)(unsigned long data); |
| 85 | int (*self_test)(struct ioatdma_device *device); | 85 | int (*self_test)(struct ioatdma_device *device); |
| 86 | }; | 86 | }; |
| @@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, | |||
| 337 | void __iomem *iobase); | 337 | void __iomem *iobase); |
| 338 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); | 338 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); |
| 339 | void ioat_init_channel(struct ioatdma_device *device, | 339 | void ioat_init_channel(struct ioatdma_device *device, |
| 340 | struct ioat_chan_common *chan, int idx, | 340 | struct ioat_chan_common *chan, int idx); |
| 341 | void (*timer_fn)(unsigned long), | 341 | enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, |
| 342 | void (*tasklet)(unsigned long), | 342 | dma_cookie_t *done, dma_cookie_t *used); |
| 343 | unsigned long ioat); | ||
| 344 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | 343 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, |
| 345 | size_t len, struct ioat_dma_descriptor *hw); | 344 | size_t len, struct ioat_dma_descriptor *hw); |
| 346 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 345 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5cc37afe2bc1..1ed5d66d7dca 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order, | |||
| 51 | 51 | ||
| 52 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) | 52 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) |
| 53 | { | 53 | { |
| 54 | void * __iomem reg_base = ioat->base.reg_base; | 54 | struct ioat_chan_common *chan = &ioat->base; |
| 55 | 55 | ||
| 56 | ioat->pending = 0; | ||
| 57 | ioat->dmacount += ioat2_ring_pending(ioat); | 56 | ioat->dmacount += ioat2_ring_pending(ioat); |
| 58 | ioat->issued = ioat->head; | 57 | ioat->issued = ioat->head; |
| 59 | /* make descriptor updates globally visible before notifying channel */ | 58 | /* make descriptor updates globally visible before notifying channel */ |
| 60 | wmb(); | 59 | wmb(); |
| 61 | writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | 60 | writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); |
| 62 | dev_dbg(to_dev(&ioat->base), | 61 | dev_dbg(to_dev(chan), |
| 63 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | 62 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", |
| 64 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | 63 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); |
| 65 | } | 64 | } |
| 66 | 65 | ||
| 67 | void ioat2_issue_pending(struct dma_chan *chan) | 66 | void ioat2_issue_pending(struct dma_chan *c) |
| 68 | { | 67 | { |
| 69 | struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); | 68 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
| 70 | 69 | ||
| 71 | spin_lock_bh(&ioat->ring_lock); | 70 | if (ioat2_ring_pending(ioat)) { |
| 72 | if (ioat->pending == 1) | 71 | spin_lock_bh(&ioat->ring_lock); |
| 73 | __ioat2_issue_pending(ioat); | 72 | __ioat2_issue_pending(ioat); |
| 74 | spin_unlock_bh(&ioat->ring_lock); | 73 | spin_unlock_bh(&ioat->ring_lock); |
| 74 | } | ||
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | /** | 77 | /** |
| 78 | * ioat2_update_pending - log pending descriptors | 78 | * ioat2_update_pending - log pending descriptors |
| 79 | * @ioat: ioat2+ channel | 79 | * @ioat: ioat2+ channel |
| 80 | * | 80 | * |
| 81 | * set pending to '1' unless pending is already set to '2', pending == 2 | 81 | * Check if the number of unsubmitted descriptors has exceeded the |
| 82 | * indicates that submission is temporarily blocked due to an in-flight | 82 | * watermark. Called with ring_lock held |
| 83 | * reset. If we are already above the ioat_pending_level threshold then | ||
| 84 | * just issue pending. | ||
| 85 | * | ||
| 86 | * called with ring_lock held | ||
| 87 | */ | 83 | */ |
| 88 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | 84 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) |
| 89 | { | 85 | { |
| 90 | if (unlikely(ioat->pending == 2)) | 86 | if (ioat2_ring_pending(ioat) > ioat_pending_level) |
| 91 | return; | ||
| 92 | else if (ioat2_ring_pending(ioat) > ioat_pending_level) | ||
| 93 | __ioat2_issue_pending(ioat); | 87 | __ioat2_issue_pending(ioat); |
| 94 | else | ||
| 95 | ioat->pending = 1; | ||
| 96 | } | 88 | } |
| 97 | 89 | ||
| 98 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | 90 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) |
| @@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
| 166 | seen_current = true; | 158 | seen_current = true; |
| 167 | } | 159 | } |
| 168 | ioat->tail += i; | 160 | ioat->tail += i; |
| 169 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | 161 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ |
| 170 | 162 | ||
| 171 | chan->last_completion = phys_complete; | 163 | chan->last_completion = phys_complete; |
| 172 | if (ioat->head == ioat->tail) { | 164 | if (ioat->head == ioat->tail) { |
| @@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | |||
| 207 | spin_unlock_bh(&chan->cleanup_lock); | 199 | spin_unlock_bh(&chan->cleanup_lock); |
| 208 | } | 200 | } |
| 209 | 201 | ||
| 210 | void ioat2_cleanup_tasklet(unsigned long data) | 202 | void ioat2_cleanup_event(unsigned long data) |
| 211 | { | 203 | { |
| 212 | struct ioat2_dma_chan *ioat = (void *) data; | 204 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
| 213 | 205 | ||
| 214 | ioat2_cleanup(ioat); | 206 | ioat2_cleanup(ioat); |
| 215 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | 207 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
| @@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | |||
| 291 | 283 | ||
| 292 | void ioat2_timer_event(unsigned long data) | 284 | void ioat2_timer_event(unsigned long data) |
| 293 | { | 285 | { |
| 294 | struct ioat2_dma_chan *ioat = (void *) data; | 286 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
| 295 | struct ioat_chan_common *chan = &ioat->base; | 287 | struct ioat_chan_common *chan = &ioat->base; |
| 296 | 288 | ||
| 297 | spin_lock_bh(&chan->cleanup_lock); | 289 | spin_lock_bh(&chan->cleanup_lock); |
| @@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) | |||
| 397 | if (!ioat) | 389 | if (!ioat) |
| 398 | break; | 390 | break; |
| 399 | 391 | ||
| 400 | ioat_init_channel(device, &ioat->base, i, | 392 | ioat_init_channel(device, &ioat->base, i); |
| 401 | device->timer_fn, | ||
| 402 | device->cleanup_tasklet, | ||
| 403 | (unsigned long) ioat); | ||
| 404 | ioat->xfercap_log = xfercap_log; | 393 | ioat->xfercap_log = xfercap_log; |
| 405 | spin_lock_init(&ioat->ring_lock); | 394 | spin_lock_init(&ioat->ring_lock); |
| 406 | if (device->reset_hw(&ioat->base)) { | 395 | if (device->reset_hw(&ioat->base)) { |
| @@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | |||
| 546 | ioat->head = 0; | 535 | ioat->head = 0; |
| 547 | ioat->issued = 0; | 536 | ioat->issued = 0; |
| 548 | ioat->tail = 0; | 537 | ioat->tail = 0; |
| 549 | ioat->pending = 0; | ||
| 550 | ioat->alloc_order = order; | 538 | ioat->alloc_order = order; |
| 551 | spin_unlock_bh(&ioat->ring_lock); | 539 | spin_unlock_bh(&ioat->ring_lock); |
| 552 | 540 | ||
| @@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) | |||
| 701 | 689 | ||
| 702 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | 690 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
| 703 | spin_unlock_bh(&chan->cleanup_lock); | 691 | spin_unlock_bh(&chan->cleanup_lock); |
| 704 | device->timer_fn((unsigned long) ioat); | 692 | device->timer_fn((unsigned long) &chan->common); |
| 705 | } else | 693 | } else |
| 706 | spin_unlock_bh(&chan->cleanup_lock); | 694 | spin_unlock_bh(&chan->cleanup_lock); |
| 707 | return -ENOMEM; | 695 | return -ENOMEM; |
| @@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) | |||
| 785 | 773 | ||
| 786 | tasklet_disable(&chan->cleanup_task); | 774 | tasklet_disable(&chan->cleanup_task); |
| 787 | del_timer_sync(&chan->timer); | 775 | del_timer_sync(&chan->timer); |
| 788 | device->cleanup_tasklet((unsigned long) ioat); | 776 | device->cleanup_fn((unsigned long) c); |
| 789 | device->reset_hw(chan); | 777 | device->reset_hw(chan); |
| 790 | 778 | ||
| 791 | spin_lock_bh(&ioat->ring_lock); | 779 | spin_lock_bh(&ioat->ring_lock); |
| @@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c) | |||
| 815 | 803 | ||
| 816 | chan->last_completion = 0; | 804 | chan->last_completion = 0; |
| 817 | chan->completion_dma = 0; | 805 | chan->completion_dma = 0; |
| 818 | ioat->pending = 0; | ||
| 819 | ioat->dmacount = 0; | 806 | ioat->dmacount = 0; |
| 820 | } | 807 | } |
| 821 | 808 | ||
| 822 | enum dma_status | ||
| 823 | ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
| 824 | dma_cookie_t *done, dma_cookie_t *used) | ||
| 825 | { | ||
| 826 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
| 827 | struct ioatdma_device *device = ioat->base.device; | ||
| 828 | |||
| 829 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | ||
| 830 | return DMA_SUCCESS; | ||
| 831 | |||
| 832 | device->cleanup_tasklet((unsigned long) ioat); | ||
| 833 | |||
| 834 | return ioat_is_complete(c, cookie, done, used); | ||
| 835 | } | ||
| 836 | |||
| 837 | static ssize_t ring_size_show(struct dma_chan *c, char *page) | 809 | static ssize_t ring_size_show(struct dma_chan *c, char *page) |
| 838 | { | 810 | { |
| 839 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 811 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
| @@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | |||
| 874 | 846 | ||
| 875 | device->enumerate_channels = ioat2_enumerate_channels; | 847 | device->enumerate_channels = ioat2_enumerate_channels; |
| 876 | device->reset_hw = ioat2_reset_hw; | 848 | device->reset_hw = ioat2_reset_hw; |
| 877 | device->cleanup_tasklet = ioat2_cleanup_tasklet; | 849 | device->cleanup_fn = ioat2_cleanup_event; |
| 878 | device->timer_fn = ioat2_timer_event; | 850 | device->timer_fn = ioat2_timer_event; |
| 879 | device->self_test = ioat_dma_self_test; | 851 | device->self_test = ioat_dma_self_test; |
| 880 | dma = &device->common; | 852 | dma = &device->common; |
| @@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | |||
| 882 | dma->device_issue_pending = ioat2_issue_pending; | 854 | dma->device_issue_pending = ioat2_issue_pending; |
| 883 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | 855 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; |
| 884 | dma->device_free_chan_resources = ioat2_free_chan_resources; | 856 | dma->device_free_chan_resources = ioat2_free_chan_resources; |
| 885 | dma->device_is_tx_complete = ioat2_is_complete; | 857 | dma->device_is_tx_complete = ioat_is_dma_complete; |
| 886 | 858 | ||
| 887 | err = ioat_probe(device); | 859 | err = ioat_probe(device); |
| 888 | if (err) | 860 | if (err) |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 3afad8da43cc..ef2871fd7868 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
| @@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order; | |||
| 47 | * @head: allocated index | 47 | * @head: allocated index |
| 48 | * @issued: hardware notification point | 48 | * @issued: hardware notification point |
| 49 | * @tail: cleanup index | 49 | * @tail: cleanup index |
| 50 | * @pending: lock free indicator for issued != head | ||
| 51 | * @dmacount: identical to 'head' except for occasionally resetting to zero | 50 | * @dmacount: identical to 'head' except for occasionally resetting to zero |
| 52 | * @alloc_order: log2 of the number of allocated descriptors | 51 | * @alloc_order: log2 of the number of allocated descriptors |
| 53 | * @ring: software ring buffer implementation of hardware ring | 52 | * @ring: software ring buffer implementation of hardware ring |
| @@ -61,7 +60,6 @@ struct ioat2_dma_chan { | |||
| 61 | u16 tail; | 60 | u16 tail; |
| 62 | u16 dmacount; | 61 | u16 dmacount; |
| 63 | u16 alloc_order; | 62 | u16 alloc_order; |
| 64 | int pending; | ||
| 65 | struct ioat_ring_ent **ring; | 63 | struct ioat_ring_ent **ring; |
| 66 | spinlock_t ring_lock; | 64 | spinlock_t ring_lock; |
| 67 | }; | 65 | }; |
| @@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |||
| 178 | void ioat2_issue_pending(struct dma_chan *chan); | 176 | void ioat2_issue_pending(struct dma_chan *chan); |
| 179 | int ioat2_alloc_chan_resources(struct dma_chan *c); | 177 | int ioat2_alloc_chan_resources(struct dma_chan *c); |
| 180 | void ioat2_free_chan_resources(struct dma_chan *c); | 178 | void ioat2_free_chan_resources(struct dma_chan *c); |
| 181 | enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | ||
| 182 | dma_cookie_t *done, dma_cookie_t *used); | ||
| 183 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); | 179 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); |
| 184 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order); | 180 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order); |
| 185 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); | 181 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); |
| 186 | void ioat2_cleanup_tasklet(unsigned long data); | 182 | void ioat2_cleanup_event(unsigned long data); |
| 187 | void ioat2_timer_event(unsigned long data); | 183 | void ioat2_timer_event(unsigned long data); |
| 188 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); | 184 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); |
| 189 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); | 185 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 9908c9e94b2d..26febc56dab1 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
| @@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) | |||
| 293 | } | 293 | } |
| 294 | } | 294 | } |
| 295 | ioat->tail += i; | 295 | ioat->tail += i; |
| 296 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | 296 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ |
| 297 | chan->last_completion = phys_complete; | 297 | chan->last_completion = phys_complete; |
| 298 | if (ioat->head == ioat->tail) { | 298 | |
| 299 | active = ioat2_ring_active(ioat); | ||
| 300 | if (active == 0) { | ||
| 299 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | 301 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", |
| 300 | __func__); | 302 | __func__); |
| 301 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | 303 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); |
| 302 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | 304 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
| 303 | } | 305 | } |
| 306 | /* 5 microsecond delay per pending descriptor */ | ||
| 307 | writew(min((5 * active), IOAT_INTRDELAY_MASK), | ||
| 308 | chan->device->reg_base + IOAT_INTRDELAY_OFFSET); | ||
| 304 | } | 309 | } |
| 305 | 310 | ||
| 306 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) | 311 | /* try to cleanup, but yield (via spin_trylock) to incoming submissions |
| 312 | * with the expectation that we will immediately poll again shortly | ||
| 313 | */ | ||
| 314 | static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) | ||
| 307 | { | 315 | { |
| 308 | struct ioat_chan_common *chan = &ioat->base; | 316 | struct ioat_chan_common *chan = &ioat->base; |
| 309 | unsigned long phys_complete; | 317 | unsigned long phys_complete; |
| @@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat) | |||
| 329 | spin_unlock_bh(&chan->cleanup_lock); | 337 | spin_unlock_bh(&chan->cleanup_lock); |
| 330 | } | 338 | } |
| 331 | 339 | ||
| 332 | static void ioat3_cleanup_tasklet(unsigned long data) | 340 | /* run cleanup now because we already delayed the interrupt via INTRDELAY */ |
| 341 | static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) | ||
| 342 | { | ||
| 343 | struct ioat_chan_common *chan = &ioat->base; | ||
| 344 | unsigned long phys_complete; | ||
| 345 | |||
| 346 | prefetch(chan->completion); | ||
| 347 | |||
| 348 | spin_lock_bh(&chan->cleanup_lock); | ||
| 349 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | ||
| 350 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 351 | return; | ||
| 352 | } | ||
| 353 | spin_lock_bh(&ioat->ring_lock); | ||
| 354 | |||
| 355 | __cleanup(ioat, phys_complete); | ||
| 356 | |||
| 357 | spin_unlock_bh(&ioat->ring_lock); | ||
| 358 | spin_unlock_bh(&chan->cleanup_lock); | ||
| 359 | } | ||
| 360 | |||
| 361 | static void ioat3_cleanup_event(unsigned long data) | ||
| 333 | { | 362 | { |
| 334 | struct ioat2_dma_chan *ioat = (void *) data; | 363 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
| 335 | 364 | ||
| 336 | ioat3_cleanup(ioat); | 365 | ioat3_cleanup_sync(ioat); |
| 337 | writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, | 366 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
| 338 | ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | ||
| 339 | } | 367 | } |
| 340 | 368 | ||
| 341 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | 369 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) |
| 342 | { | 370 | { |
| 343 | struct ioat_chan_common *chan = &ioat->base; | 371 | struct ioat_chan_common *chan = &ioat->base; |
| 344 | unsigned long phys_complete; | 372 | unsigned long phys_complete; |
| 345 | u32 status; | ||
| 346 | |||
| 347 | status = ioat_chansts(chan); | ||
| 348 | if (is_ioat_active(status) || is_ioat_idle(status)) | ||
| 349 | ioat_suspend(chan); | ||
| 350 | while (is_ioat_active(status) || is_ioat_idle(status)) { | ||
| 351 | status = ioat_chansts(chan); | ||
| 352 | cpu_relax(); | ||
| 353 | } | ||
| 354 | 373 | ||
| 374 | ioat2_quiesce(chan, 0); | ||
| 355 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 375 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
| 356 | __cleanup(ioat, phys_complete); | 376 | __cleanup(ioat, phys_complete); |
| 357 | 377 | ||
| @@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | |||
| 360 | 380 | ||
| 361 | static void ioat3_timer_event(unsigned long data) | 381 | static void ioat3_timer_event(unsigned long data) |
| 362 | { | 382 | { |
| 363 | struct ioat2_dma_chan *ioat = (void *) data; | 383 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
| 364 | struct ioat_chan_common *chan = &ioat->base; | 384 | struct ioat_chan_common *chan = &ioat->base; |
| 365 | 385 | ||
| 366 | spin_lock_bh(&chan->cleanup_lock); | 386 | spin_lock_bh(&chan->cleanup_lock); |
| @@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, | |||
| 426 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | 446 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) |
| 427 | return DMA_SUCCESS; | 447 | return DMA_SUCCESS; |
| 428 | 448 | ||
| 429 | ioat3_cleanup(ioat); | 449 | ioat3_cleanup_poll(ioat); |
| 430 | 450 | ||
| 431 | return ioat_is_complete(c, cookie, done, used); | 451 | return ioat_is_complete(c, cookie, done, used); |
| 432 | } | 452 | } |
| @@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
| 1239 | 1259 | ||
| 1240 | if (is_raid_device) { | 1260 | if (is_raid_device) { |
| 1241 | dma->device_is_tx_complete = ioat3_is_complete; | 1261 | dma->device_is_tx_complete = ioat3_is_complete; |
| 1242 | device->cleanup_tasklet = ioat3_cleanup_tasklet; | 1262 | device->cleanup_fn = ioat3_cleanup_event; |
| 1243 | device->timer_fn = ioat3_timer_event; | 1263 | device->timer_fn = ioat3_timer_event; |
| 1244 | } else { | 1264 | } else { |
| 1245 | dma->device_is_tx_complete = ioat2_is_complete; | 1265 | dma->device_is_tx_complete = ioat_is_dma_complete; |
| 1246 | device->cleanup_tasklet = ioat2_cleanup_tasklet; | 1266 | device->cleanup_fn = ioat2_cleanup_event; |
| 1247 | device->timer_fn = ioat2_timer_event; | 1267 | device->timer_fn = ioat2_timer_event; |
| 1248 | } | 1268 | } |
| 1249 | 1269 | ||
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index e8ae63baf588..1391798542b6 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
| @@ -60,7 +60,7 @@ | |||
| 60 | #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ | 60 | #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ |
| 61 | 61 | ||
| 62 | #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ | 62 | #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ |
| 63 | #define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ | 63 | #define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */ |
| 64 | #define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ | 64 | #define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ |
| 65 | 65 | ||
| 66 | #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ | 66 | #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index e80bae1673fa..2a446397c884 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
| @@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, | |||
| 348 | break; | 348 | break; |
| 349 | case IPU_PIX_FMT_BGRA32: | 349 | case IPU_PIX_FMT_BGRA32: |
| 350 | case IPU_PIX_FMT_BGR32: | 350 | case IPU_PIX_FMT_BGR32: |
| 351 | case IPU_PIX_FMT_ABGR32: | ||
| 351 | params->ip.bpp = 0; | 352 | params->ip.bpp = 0; |
| 352 | params->ip.pfs = 4; | 353 | params->ip.pfs = 4; |
| 353 | params->ip.npb = 7; | 354 | params->ip.npb = 7; |
| @@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, | |||
| 376 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | 377 | params->ip.wid2 = 7; /* Blue bit width - 1 */ |
| 377 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ | 378 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ |
| 378 | break; | 379 | break; |
| 379 | case IPU_PIX_FMT_ABGR32: | ||
| 380 | params->ip.bpp = 0; | ||
| 381 | params->ip.pfs = 4; | ||
| 382 | params->ip.npb = 7; | ||
| 383 | params->ip.sat = 2; /* SAT = 32-bit access */ | ||
| 384 | params->ip.ofs0 = 8; /* Red bit offset */ | ||
| 385 | params->ip.ofs1 = 16; /* Green bit offset */ | ||
| 386 | params->ip.ofs2 = 24; /* Blue bit offset */ | ||
| 387 | params->ip.ofs3 = 0; /* Alpha bit offset */ | ||
| 388 | params->ip.wid0 = 7; /* Red bit width - 1 */ | ||
| 389 | params->ip.wid1 = 7; /* Green bit width - 1 */ | ||
| 390 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | ||
| 391 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ | ||
| 392 | break; | ||
| 393 | case IPU_PIX_FMT_UYVY: | 380 | case IPU_PIX_FMT_UYVY: |
| 394 | params->ip.bpp = 2; | 381 | params->ip.bpp = 2; |
| 395 | params->ip.pfs = 6; | 382 | params->ip.pfs = 6; |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c new file mode 100644 index 000000000000..3fdf1f46bd63 --- /dev/null +++ b/drivers/dma/mpc512x_dma.c | |||
| @@ -0,0 +1,800 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | ||
| 3 | * Copyright (C) Semihalf 2009 | ||
| 4 | * | ||
| 5 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | ||
| 6 | * (defines, structures and comments) was taken from MPC5121 DMA driver | ||
| 7 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. | ||
| 8 | * | ||
| 9 | * Approved as OSADL project by a majority of OSADL members and funded | ||
| 10 | * by OSADL membership fees in 2009; for details see www.osadl.org. | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or modify it | ||
| 13 | * under the terms of the GNU General Public License as published by the Free | ||
| 14 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 15 | * any later version. | ||
| 16 | * | ||
| 17 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 20 | * more details. | ||
| 21 | * | ||
| 22 | * You should have received a copy of the GNU General Public License along with | ||
| 23 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 24 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 25 | * | ||
| 26 | * The full GNU General Public License is included in this distribution in the | ||
| 27 | * file called COPYING. | ||
| 28 | */ | ||
| 29 | |||
| 30 | /* | ||
| 31 | * This is initial version of MPC5121 DMA driver. Only memory to memory | ||
| 32 | * transfers are supported (tested using dmatest module). | ||
| 33 | */ | ||
| 34 | |||
| 35 | #include <linux/module.h> | ||
| 36 | #include <linux/dmaengine.h> | ||
| 37 | #include <linux/dma-mapping.h> | ||
| 38 | #include <linux/interrupt.h> | ||
| 39 | #include <linux/io.h> | ||
| 40 | #include <linux/of_device.h> | ||
| 41 | #include <linux/of_platform.h> | ||
| 42 | |||
| 43 | #include <linux/random.h> | ||
| 44 | |||
| 45 | /* Number of DMA Transfer descriptors allocated per channel */ | ||
| 46 | #define MPC_DMA_DESCRIPTORS 64 | ||
| 47 | |||
| 48 | /* Macro definitions */ | ||
| 49 | #define MPC_DMA_CHANNELS 64 | ||
| 50 | #define MPC_DMA_TCD_OFFSET 0x1000 | ||
| 51 | |||
| 52 | /* Arbitration mode of group and channel */ | ||
| 53 | #define MPC_DMA_DMACR_EDCG (1 << 31) | ||
| 54 | #define MPC_DMA_DMACR_ERGA (1 << 3) | ||
| 55 | #define MPC_DMA_DMACR_ERCA (1 << 2) | ||
| 56 | |||
| 57 | /* Error codes */ | ||
| 58 | #define MPC_DMA_DMAES_VLD (1 << 31) | ||
| 59 | #define MPC_DMA_DMAES_GPE (1 << 15) | ||
| 60 | #define MPC_DMA_DMAES_CPE (1 << 14) | ||
| 61 | #define MPC_DMA_DMAES_ERRCHN(err) \ | ||
| 62 | (((err) >> 8) & 0x3f) | ||
| 63 | #define MPC_DMA_DMAES_SAE (1 << 7) | ||
| 64 | #define MPC_DMA_DMAES_SOE (1 << 6) | ||
| 65 | #define MPC_DMA_DMAES_DAE (1 << 5) | ||
| 66 | #define MPC_DMA_DMAES_DOE (1 << 4) | ||
| 67 | #define MPC_DMA_DMAES_NCE (1 << 3) | ||
| 68 | #define MPC_DMA_DMAES_SGE (1 << 2) | ||
| 69 | #define MPC_DMA_DMAES_SBE (1 << 1) | ||
| 70 | #define MPC_DMA_DMAES_DBE (1 << 0) | ||
| 71 | |||
| 72 | #define MPC_DMA_TSIZE_1 0x00 | ||
| 73 | #define MPC_DMA_TSIZE_2 0x01 | ||
| 74 | #define MPC_DMA_TSIZE_4 0x02 | ||
| 75 | #define MPC_DMA_TSIZE_16 0x04 | ||
| 76 | #define MPC_DMA_TSIZE_32 0x05 | ||
| 77 | |||
| 78 | /* MPC5121 DMA engine registers */ | ||
| 79 | struct __attribute__ ((__packed__)) mpc_dma_regs { | ||
| 80 | /* 0x00 */ | ||
| 81 | u32 dmacr; /* DMA control register */ | ||
| 82 | u32 dmaes; /* DMA error status */ | ||
| 83 | /* 0x08 */ | ||
| 84 | u32 dmaerqh; /* DMA enable request high(channels 63~32) */ | ||
| 85 | u32 dmaerql; /* DMA enable request low(channels 31~0) */ | ||
| 86 | u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ | ||
| 87 | u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ | ||
| 88 | /* 0x18 */ | ||
| 89 | u8 dmaserq; /* DMA set enable request */ | ||
| 90 | u8 dmacerq; /* DMA clear enable request */ | ||
| 91 | u8 dmaseei; /* DMA set enable error interrupt */ | ||
| 92 | u8 dmaceei; /* DMA clear enable error interrupt */ | ||
| 93 | /* 0x1c */ | ||
| 94 | u8 dmacint; /* DMA clear interrupt request */ | ||
| 95 | u8 dmacerr; /* DMA clear error */ | ||
| 96 | u8 dmassrt; /* DMA set start bit */ | ||
| 97 | u8 dmacdne; /* DMA clear DONE status bit */ | ||
| 98 | /* 0x20 */ | ||
| 99 | u32 dmainth; /* DMA interrupt request high(ch63~32) */ | ||
| 100 | u32 dmaintl; /* DMA interrupt request low(ch31~0) */ | ||
| 101 | u32 dmaerrh; /* DMA error high(ch63~32) */ | ||
| 102 | u32 dmaerrl; /* DMA error low(ch31~0) */ | ||
| 103 | /* 0x30 */ | ||
| 104 | u32 dmahrsh; /* DMA hw request status high(ch63~32) */ | ||
| 105 | u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ | ||
| 106 | u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ | ||
| 107 | u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ | ||
| 108 | /* 0x40 ~ 0xff */ | ||
| 109 | u32 reserve0[48]; /* Reserved */ | ||
| 110 | /* 0x100 */ | ||
| 111 | u8 dchpri[MPC_DMA_CHANNELS]; | ||
| 112 | /* DMA channels(0~63) priority */ | ||
| 113 | }; | ||
| 114 | |||
| 115 | struct __attribute__ ((__packed__)) mpc_dma_tcd { | ||
| 116 | /* 0x00 */ | ||
| 117 | u32 saddr; /* Source address */ | ||
| 118 | |||
| 119 | u32 smod:5; /* Source address modulo */ | ||
| 120 | u32 ssize:3; /* Source data transfer size */ | ||
| 121 | u32 dmod:5; /* Destination address modulo */ | ||
| 122 | u32 dsize:3; /* Destination data transfer size */ | ||
| 123 | u32 soff:16; /* Signed source address offset */ | ||
| 124 | |||
| 125 | /* 0x08 */ | ||
| 126 | u32 nbytes; /* Inner "minor" byte count */ | ||
| 127 | u32 slast; /* Last source address adjustment */ | ||
| 128 | u32 daddr; /* Destination address */ | ||
| 129 | |||
| 130 | /* 0x14 */ | ||
| 131 | u32 citer_elink:1; /* Enable channel-to-channel linking on | ||
| 132 | * minor loop complete | ||
| 133 | */ | ||
| 134 | u32 citer_linkch:6; /* Link channel for minor loop complete */ | ||
| 135 | u32 citer:9; /* Current "major" iteration count */ | ||
| 136 | u32 doff:16; /* Signed destination address offset */ | ||
| 137 | |||
| 138 | /* 0x18 */ | ||
| 139 | u32 dlast_sga; /* Last Destination address adjustment/scatter | ||
| 140 | * gather address | ||
| 141 | */ | ||
| 142 | |||
| 143 | /* 0x1c */ | ||
| 144 | u32 biter_elink:1; /* Enable channel-to-channel linking on major | ||
| 145 | * loop complete | ||
| 146 | */ | ||
| 147 | u32 biter_linkch:6; | ||
| 148 | u32 biter:9; /* Beginning "major" iteration count */ | ||
| 149 | u32 bwc:2; /* Bandwidth control */ | ||
| 150 | u32 major_linkch:6; /* Link channel number */ | ||
| 151 | u32 done:1; /* Channel done */ | ||
| 152 | u32 active:1; /* Channel active */ | ||
| 153 | u32 major_elink:1; /* Enable channel-to-channel linking on major | ||
| 154 | * loop complete | ||
| 155 | */ | ||
| 156 | u32 e_sg:1; /* Enable scatter/gather processing */ | ||
| 157 | u32 d_req:1; /* Disable request */ | ||
| 158 | u32 int_half:1; /* Enable an interrupt when major counter is | ||
| 159 | * half complete | ||
| 160 | */ | ||
| 161 | u32 int_maj:1; /* Enable an interrupt when major iteration | ||
| 162 | * count completes | ||
| 163 | */ | ||
| 164 | u32 start:1; /* Channel start */ | ||
| 165 | }; | ||
| 166 | |||
| 167 | struct mpc_dma_desc { | ||
| 168 | struct dma_async_tx_descriptor desc; | ||
| 169 | struct mpc_dma_tcd *tcd; | ||
| 170 | dma_addr_t tcd_paddr; | ||
| 171 | int error; | ||
| 172 | struct list_head node; | ||
| 173 | }; | ||
| 174 | |||
| 175 | struct mpc_dma_chan { | ||
| 176 | struct dma_chan chan; | ||
| 177 | struct list_head free; | ||
| 178 | struct list_head prepared; | ||
| 179 | struct list_head queued; | ||
| 180 | struct list_head active; | ||
| 181 | struct list_head completed; | ||
| 182 | struct mpc_dma_tcd *tcd; | ||
| 183 | dma_addr_t tcd_paddr; | ||
| 184 | dma_cookie_t completed_cookie; | ||
| 185 | |||
| 186 | /* Lock for this structure */ | ||
| 187 | spinlock_t lock; | ||
| 188 | }; | ||
| 189 | |||
| 190 | struct mpc_dma { | ||
| 191 | struct dma_device dma; | ||
| 192 | struct tasklet_struct tasklet; | ||
| 193 | struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; | ||
| 194 | struct mpc_dma_regs __iomem *regs; | ||
| 195 | struct mpc_dma_tcd __iomem *tcd; | ||
| 196 | int irq; | ||
| 197 | uint error_status; | ||
| 198 | |||
| 199 | /* Lock for error_status field in this structure */ | ||
| 200 | spinlock_t error_status_lock; | ||
| 201 | }; | ||
| 202 | |||
| 203 | #define DRV_NAME "mpc512x_dma" | ||
| 204 | |||
| 205 | /* Convert struct dma_chan to struct mpc_dma_chan */ | ||
| 206 | static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) | ||
| 207 | { | ||
| 208 | return container_of(c, struct mpc_dma_chan, chan); | ||
| 209 | } | ||
| 210 | |||
| 211 | /* Convert struct dma_chan to struct mpc_dma */ | ||
| 212 | static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) | ||
| 213 | { | ||
| 214 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); | ||
| 215 | return container_of(mchan, struct mpc_dma, channels[c->chan_id]); | ||
| 216 | } | ||
| 217 | |||
| 218 | /* | ||
| 219 | * Execute all queued DMA descriptors. | ||
| 220 | * | ||
| 221 | * Following requirements must be met while calling mpc_dma_execute(): | ||
| 222 | * a) mchan->lock is acquired, | ||
| 223 | * b) mchan->active list is empty, | ||
| 224 | * c) mchan->queued list contains at least one entry. | ||
| 225 | */ | ||
| 226 | static void mpc_dma_execute(struct mpc_dma_chan *mchan) | ||
| 227 | { | ||
| 228 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); | ||
| 229 | struct mpc_dma_desc *first = NULL; | ||
| 230 | struct mpc_dma_desc *prev = NULL; | ||
| 231 | struct mpc_dma_desc *mdesc; | ||
| 232 | int cid = mchan->chan.chan_id; | ||
| 233 | |||
| 234 | /* Move all queued descriptors to active list */ | ||
| 235 | list_splice_tail_init(&mchan->queued, &mchan->active); | ||
| 236 | |||
| 237 | /* Chain descriptors into one transaction */ | ||
| 238 | list_for_each_entry(mdesc, &mchan->active, node) { | ||
| 239 | if (!first) | ||
| 240 | first = mdesc; | ||
| 241 | |||
| 242 | if (!prev) { | ||
| 243 | prev = mdesc; | ||
| 244 | continue; | ||
| 245 | } | ||
| 246 | |||
| 247 | prev->tcd->dlast_sga = mdesc->tcd_paddr; | ||
| 248 | prev->tcd->e_sg = 1; | ||
| 249 | mdesc->tcd->start = 1; | ||
| 250 | |||
| 251 | prev = mdesc; | ||
| 252 | } | ||
| 253 | |||
| 254 | prev->tcd->start = 0; | ||
| 255 | prev->tcd->int_maj = 1; | ||
| 256 | |||
| 257 | /* Send first descriptor in chain into hardware */ | ||
| 258 | memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); | ||
| 259 | out_8(&mdma->regs->dmassrt, cid); | ||
| 260 | } | ||
| 261 | |||
| 262 | /* Handle interrupt on one half of DMA controller (32 channels) */ | ||
| 263 | static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) | ||
| 264 | { | ||
| 265 | struct mpc_dma_chan *mchan; | ||
| 266 | struct mpc_dma_desc *mdesc; | ||
| 267 | u32 status = is | es; | ||
| 268 | int ch; | ||
| 269 | |||
| 270 | while ((ch = fls(status) - 1) >= 0) { | ||
| 271 | status &= ~(1 << ch); | ||
| 272 | mchan = &mdma->channels[ch + off]; | ||
| 273 | |||
| 274 | spin_lock(&mchan->lock); | ||
| 275 | |||
| 276 | /* Check error status */ | ||
| 277 | if (es & (1 << ch)) | ||
| 278 | list_for_each_entry(mdesc, &mchan->active, node) | ||
| 279 | mdesc->error = -EIO; | ||
| 280 | |||
| 281 | /* Execute queued descriptors */ | ||
| 282 | list_splice_tail_init(&mchan->active, &mchan->completed); | ||
| 283 | if (!list_empty(&mchan->queued)) | ||
| 284 | mpc_dma_execute(mchan); | ||
| 285 | |||
| 286 | spin_unlock(&mchan->lock); | ||
| 287 | } | ||
| 288 | } | ||
| 289 | |||
| 290 | /* Interrupt handler */ | ||
| 291 | static irqreturn_t mpc_dma_irq(int irq, void *data) | ||
| 292 | { | ||
| 293 | struct mpc_dma *mdma = data; | ||
| 294 | uint es; | ||
| 295 | |||
| 296 | /* Save error status register */ | ||
| 297 | es = in_be32(&mdma->regs->dmaes); | ||
| 298 | spin_lock(&mdma->error_status_lock); | ||
| 299 | if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) | ||
| 300 | mdma->error_status = es; | ||
| 301 | spin_unlock(&mdma->error_status_lock); | ||
| 302 | |||
| 303 | /* Handle interrupt on each channel */ | ||
| 304 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), | ||
| 305 | in_be32(&mdma->regs->dmaerrh), 32); | ||
| 306 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), | ||
| 307 | in_be32(&mdma->regs->dmaerrl), 0); | ||
| 308 | |||
| 309 | /* Ack interrupt on all channels */ | ||
| 310 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); | ||
| 311 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); | ||
| 312 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); | ||
| 313 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | ||
| 314 | |||
| 315 | /* Schedule tasklet */ | ||
| 316 | tasklet_schedule(&mdma->tasklet); | ||
| 317 | |||
| 318 | return IRQ_HANDLED; | ||
| 319 | } | ||
| 320 | |||
| 321 | /* DMA Tasklet */ | ||
| 322 | static void mpc_dma_tasklet(unsigned long data) | ||
| 323 | { | ||
| 324 | struct mpc_dma *mdma = (void *)data; | ||
| 325 | dma_cookie_t last_cookie = 0; | ||
| 326 | struct mpc_dma_chan *mchan; | ||
| 327 | struct mpc_dma_desc *mdesc; | ||
| 328 | struct dma_async_tx_descriptor *desc; | ||
| 329 | unsigned long flags; | ||
| 330 | LIST_HEAD(list); | ||
| 331 | uint es; | ||
| 332 | int i; | ||
| 333 | |||
| 334 | spin_lock_irqsave(&mdma->error_status_lock, flags); | ||
| 335 | es = mdma->error_status; | ||
| 336 | mdma->error_status = 0; | ||
| 337 | spin_unlock_irqrestore(&mdma->error_status_lock, flags); | ||
| 338 | |||
| 339 | /* Print nice error report */ | ||
| 340 | if (es) { | ||
| 341 | dev_err(mdma->dma.dev, | ||
| 342 | "Hardware reported following error(s) on channel %u:\n", | ||
| 343 | MPC_DMA_DMAES_ERRCHN(es)); | ||
| 344 | |||
| 345 | if (es & MPC_DMA_DMAES_GPE) | ||
| 346 | dev_err(mdma->dma.dev, "- Group Priority Error\n"); | ||
| 347 | if (es & MPC_DMA_DMAES_CPE) | ||
| 348 | dev_err(mdma->dma.dev, "- Channel Priority Error\n"); | ||
| 349 | if (es & MPC_DMA_DMAES_SAE) | ||
| 350 | dev_err(mdma->dma.dev, "- Source Address Error\n"); | ||
| 351 | if (es & MPC_DMA_DMAES_SOE) | ||
| 352 | dev_err(mdma->dma.dev, "- Source Offset" | ||
| 353 | " Configuration Error\n"); | ||
| 354 | if (es & MPC_DMA_DMAES_DAE) | ||
| 355 | dev_err(mdma->dma.dev, "- Destination Address" | ||
| 356 | " Error\n"); | ||
| 357 | if (es & MPC_DMA_DMAES_DOE) | ||
| 358 | dev_err(mdma->dma.dev, "- Destination Offset" | ||
| 359 | " Configuration Error\n"); | ||
| 360 | if (es & MPC_DMA_DMAES_NCE) | ||
| 361 | dev_err(mdma->dma.dev, "- NBytes/Citter" | ||
| 362 | " Configuration Error\n"); | ||
| 363 | if (es & MPC_DMA_DMAES_SGE) | ||
| 364 | dev_err(mdma->dma.dev, "- Scatter/Gather" | ||
| 365 | " Configuration Error\n"); | ||
| 366 | if (es & MPC_DMA_DMAES_SBE) | ||
| 367 | dev_err(mdma->dma.dev, "- Source Bus Error\n"); | ||
| 368 | if (es & MPC_DMA_DMAES_DBE) | ||
| 369 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); | ||
| 370 | } | ||
| 371 | |||
| 372 | for (i = 0; i < mdma->dma.chancnt; i++) { | ||
| 373 | mchan = &mdma->channels[i]; | ||
| 374 | |||
| 375 | /* Get all completed descriptors */ | ||
| 376 | spin_lock_irqsave(&mchan->lock, flags); | ||
| 377 | if (!list_empty(&mchan->completed)) | ||
| 378 | list_splice_tail_init(&mchan->completed, &list); | ||
| 379 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
| 380 | |||
| 381 | if (list_empty(&list)) | ||
| 382 | continue; | ||
| 383 | |||
| 384 | /* Execute callbacks and run dependencies */ | ||
| 385 | list_for_each_entry(mdesc, &list, node) { | ||
| 386 | desc = &mdesc->desc; | ||
| 387 | |||
| 388 | if (desc->callback) | ||
| 389 | desc->callback(desc->callback_param); | ||
| 390 | |||
| 391 | last_cookie = desc->cookie; | ||
| 392 | dma_run_dependencies(desc); | ||
| 393 | } | ||
| 394 | |||
| 395 | /* Free descriptors */ | ||
| 396 | spin_lock_irqsave(&mchan->lock, flags); | ||
| 397 | list_splice_tail_init(&list, &mchan->free); | ||
| 398 | mchan->completed_cookie = last_cookie; | ||
| 399 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
| 400 | } | ||
| 401 | } | ||
| 402 | |||
| 403 | /* Submit descriptor to hardware */ | ||
| 404 | static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
| 405 | { | ||
| 406 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); | ||
| 407 | struct mpc_dma_desc *mdesc; | ||
| 408 | unsigned long flags; | ||
| 409 | dma_cookie_t cookie; | ||
| 410 | |||
| 411 | mdesc = container_of(txd, struct mpc_dma_desc, desc); | ||
| 412 | |||
| 413 | spin_lock_irqsave(&mchan->lock, flags); | ||
| 414 | |||
| 415 | /* Move descriptor to queue */ | ||
| 416 | list_move_tail(&mdesc->node, &mchan->queued); | ||
| 417 | |||
| 418 | /* If channel is idle, execute all queued descriptors */ | ||
| 419 | if (list_empty(&mchan->active)) | ||
| 420 | mpc_dma_execute(mchan); | ||
| 421 | |||
| 422 | /* Update cookie */ | ||
| 423 | cookie = mchan->chan.cookie + 1; | ||
| 424 | if (cookie <= 0) | ||
| 425 | cookie = 1; | ||
| 426 | |||
| 427 | mchan->chan.cookie = cookie; | ||
| 428 | mdesc->desc.cookie = cookie; | ||
| 429 | |||
| 430 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
| 431 | |||
| 432 | return cookie; | ||
| 433 | } | ||
| 434 | |||
| 435 | /* Alloc channel resources */ | ||
| 436 | static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 437 | { | ||
| 438 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | ||
| 439 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | ||
| 440 | struct mpc_dma_desc *mdesc; | ||
| 441 | struct mpc_dma_tcd *tcd; | ||
| 442 | dma_addr_t tcd_paddr; | ||
| 443 | unsigned long flags; | ||
| 444 | LIST_HEAD(descs); | ||
| 445 | int i; | ||
| 446 | |||
| 447 | /* Alloc DMA memory for Transfer Control Descriptors */ | ||
| 448 | tcd = dma_alloc_coherent(mdma->dma.dev, | ||
| 449 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), | ||
| 450 | &tcd_paddr, GFP_KERNEL); | ||
| 451 | if (!tcd) | ||
| 452 | return -ENOMEM; | ||
| 453 | |||
| 454 | /* Alloc descriptors for this channel */ | ||
| 455 | for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { | ||
| 456 | mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); | ||
| 457 | if (!mdesc) { | ||
| 458 | dev_notice(mdma->dma.dev, "Memory allocation error. " | ||
| 459 | "Allocated only %u descriptors\n", i); | ||
| 460 | break; | ||
| 461 | } | ||
| 462 | |||
| 463 | dma_async_tx_descriptor_init(&mdesc->desc, chan); | ||
| 464 | mdesc->desc.flags = DMA_CTRL_ACK; | ||
| 465 | mdesc->desc.tx_submit = mpc_dma_tx_submit; | ||
| 466 | |||
| 467 | mdesc->tcd = &tcd[i]; | ||
| 468 | mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); | ||
| 469 | |||
| 470 | list_add_tail(&mdesc->node, &descs); | ||
| 471 | } | ||
| 472 | |||
| 473 | /* Return error only if no descriptors were allocated */ | ||
| 474 | if (i == 0) { | ||
| 475 | dma_free_coherent(mdma->dma.dev, | ||
| 476 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), | ||
| 477 | tcd, tcd_paddr); | ||
| 478 | return -ENOMEM; | ||
| 479 | } | ||
| 480 | |||
| 481 | spin_lock_irqsave(&mchan->lock, flags); | ||
| 482 | mchan->tcd = tcd; | ||
| 483 | mchan->tcd_paddr = tcd_paddr; | ||
| 484 | list_splice_tail_init(&descs, &mchan->free); | ||
| 485 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
| 486 | |||
| 487 | /* Enable Error Interrupt */ | ||
| 488 | out_8(&mdma->regs->dmaseei, chan->chan_id); | ||
| 489 | |||
| 490 | return 0; | ||
| 491 | } | ||
| 492 | |||
| 493 | /* Free channel resources */ | ||
| 494 | static void mpc_dma_free_chan_resources(struct dma_chan *chan) | ||
| 495 | { | ||
| 496 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | ||
| 497 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | ||
| 498 | struct mpc_dma_desc *mdesc, *tmp; | ||
| 499 | struct mpc_dma_tcd *tcd; | ||
| 500 | dma_addr_t tcd_paddr; | ||
| 501 | unsigned long flags; | ||
| 502 | LIST_HEAD(descs); | ||
| 503 | |||
| 504 | spin_lock_irqsave(&mchan->lock, flags); | ||
| 505 | |||
| 506 | /* Channel must be idle */ | ||
| 507 | BUG_ON(!list_empty(&mchan->prepared)); | ||
| 508 | BUG_ON(!list_empty(&mchan->queued)); | ||
| 509 | BUG_ON(!list_empty(&mchan->active)); | ||
| 510 | BUG_ON(!list_empty(&mchan->completed)); | ||
| 511 | |||
| 512 | /* Move data */ | ||
| 513 | list_splice_tail_init(&mchan->free, &descs); | ||
| 514 | tcd = mchan->tcd; | ||
| 515 | tcd_paddr = mchan->tcd_paddr; | ||
| 516 | |||
| 517 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
| 518 | |||
| 519 | /* Free DMA memory used by descriptors */ | ||
| 520 | dma_free_coherent(mdma->dma.dev, | ||
| 521 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), | ||
| 522 | tcd, tcd_paddr); | ||
| 523 | |||
| 524 | /* Free descriptors */ | ||
| 525 | list_for_each_entry_safe(mdesc, tmp, &descs, node) | ||
| 526 | kfree(mdesc); | ||
| 527 | |||
| 528 | /* Disable Error Interrupt */ | ||
| 529 | out_8(&mdma->regs->dmaceei, chan->chan_id); | ||
| 530 | } | ||
| 531 | |||
| 532 | /* Send all pending descriptor to hardware */ | ||
| 533 | static void mpc_dma_issue_pending(struct dma_chan *chan) | ||
| 534 | { | ||
| 535 | /* | ||
| 536 | * We are posting descriptors to the hardware as soon as | ||
| 537 | * they are ready, so this function does nothing. | ||
| 538 | */ | ||
| 539 | } | ||
| 540 | |||
| 541 | /* Check request completion status */ | ||
| 542 | static enum dma_status | ||
| 543 | mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, | ||
| 544 | dma_cookie_t *done, dma_cookie_t *used) | ||
| 545 | { | ||
| 546 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | ||
| 547 | unsigned long flags; | ||
| 548 | dma_cookie_t last_used; | ||
| 549 | dma_cookie_t last_complete; | ||
| 550 | |||
| 551 | spin_lock_irqsave(&mchan->lock, flags); | ||
| 552 | last_used = mchan->chan.cookie; | ||
| 553 | last_complete = mchan->completed_cookie; | ||
| 554 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
| 555 | |||
| 556 | if (done) | ||
| 557 | *done = last_complete; | ||
| 558 | |||
| 559 | if (used) | ||
| 560 | *used = last_used; | ||
| 561 | |||
| 562 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
| 563 | } | ||
| 564 | |||
| 565 | /* Prepare descriptor for memory to memory copy */ | ||
| 566 | static struct dma_async_tx_descriptor * | ||
| 567 | mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | ||
| 568 | size_t len, unsigned long flags) | ||
| 569 | { | ||
| 570 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | ||
| 571 | struct mpc_dma_desc *mdesc = NULL; | ||
| 572 | struct mpc_dma_tcd *tcd; | ||
| 573 | unsigned long iflags; | ||
| 574 | |||
| 575 | /* Get free descriptor */ | ||
| 576 | spin_lock_irqsave(&mchan->lock, iflags); | ||
| 577 | if (!list_empty(&mchan->free)) { | ||
| 578 | mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, | ||
| 579 | node); | ||
| 580 | list_del(&mdesc->node); | ||
| 581 | } | ||
| 582 | spin_unlock_irqrestore(&mchan->lock, iflags); | ||
| 583 | |||
| 584 | if (!mdesc) | ||
| 585 | return NULL; | ||
| 586 | |||
| 587 | mdesc->error = 0; | ||
| 588 | tcd = mdesc->tcd; | ||
| 589 | |||
| 590 | /* Prepare Transfer Control Descriptor for this transaction */ | ||
| 591 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); | ||
| 592 | |||
| 593 | if (IS_ALIGNED(src | dst | len, 32)) { | ||
| 594 | tcd->ssize = MPC_DMA_TSIZE_32; | ||
| 595 | tcd->dsize = MPC_DMA_TSIZE_32; | ||
| 596 | tcd->soff = 32; | ||
| 597 | tcd->doff = 32; | ||
| 598 | } else if (IS_ALIGNED(src | dst | len, 16)) { | ||
| 599 | tcd->ssize = MPC_DMA_TSIZE_16; | ||
| 600 | tcd->dsize = MPC_DMA_TSIZE_16; | ||
| 601 | tcd->soff = 16; | ||
| 602 | tcd->doff = 16; | ||
| 603 | } else if (IS_ALIGNED(src | dst | len, 4)) { | ||
| 604 | tcd->ssize = MPC_DMA_TSIZE_4; | ||
| 605 | tcd->dsize = MPC_DMA_TSIZE_4; | ||
| 606 | tcd->soff = 4; | ||
| 607 | tcd->doff = 4; | ||
| 608 | } else if (IS_ALIGNED(src | dst | len, 2)) { | ||
| 609 | tcd->ssize = MPC_DMA_TSIZE_2; | ||
| 610 | tcd->dsize = MPC_DMA_TSIZE_2; | ||
| 611 | tcd->soff = 2; | ||
| 612 | tcd->doff = 2; | ||
| 613 | } else { | ||
| 614 | tcd->ssize = MPC_DMA_TSIZE_1; | ||
| 615 | tcd->dsize = MPC_DMA_TSIZE_1; | ||
| 616 | tcd->soff = 1; | ||
| 617 | tcd->doff = 1; | ||
| 618 | } | ||
| 619 | |||
| 620 | tcd->saddr = src; | ||
| 621 | tcd->daddr = dst; | ||
| 622 | tcd->nbytes = len; | ||
| 623 | tcd->biter = 1; | ||
| 624 | tcd->citer = 1; | ||
| 625 | |||
| 626 | /* Place descriptor in prepared list */ | ||
| 627 | spin_lock_irqsave(&mchan->lock, iflags); | ||
| 628 | list_add_tail(&mdesc->node, &mchan->prepared); | ||
| 629 | spin_unlock_irqrestore(&mchan->lock, iflags); | ||
| 630 | |||
| 631 | return &mdesc->desc; | ||
| 632 | } | ||
| 633 | |||
| 634 | static int __devinit mpc_dma_probe(struct of_device *op, | ||
| 635 | const struct of_device_id *match) | ||
| 636 | { | ||
| 637 | struct device_node *dn = op->node; | ||
| 638 | struct device *dev = &op->dev; | ||
| 639 | struct dma_device *dma; | ||
| 640 | struct mpc_dma *mdma; | ||
| 641 | struct mpc_dma_chan *mchan; | ||
| 642 | struct resource res; | ||
| 643 | ulong regs_start, regs_size; | ||
| 644 | int retval, i; | ||
| 645 | |||
| 646 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); | ||
| 647 | if (!mdma) { | ||
| 648 | dev_err(dev, "Memory exhausted!\n"); | ||
| 649 | return -ENOMEM; | ||
| 650 | } | ||
| 651 | |||
| 652 | mdma->irq = irq_of_parse_and_map(dn, 0); | ||
| 653 | if (mdma->irq == NO_IRQ) { | ||
| 654 | dev_err(dev, "Error mapping IRQ!\n"); | ||
| 655 | return -EINVAL; | ||
| 656 | } | ||
| 657 | |||
| 658 | retval = of_address_to_resource(dn, 0, &res); | ||
| 659 | if (retval) { | ||
| 660 | dev_err(dev, "Error parsing memory region!\n"); | ||
| 661 | return retval; | ||
| 662 | } | ||
| 663 | |||
| 664 | regs_start = res.start; | ||
| 665 | regs_size = res.end - res.start + 1; | ||
| 666 | |||
| 667 | if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { | ||
| 668 | dev_err(dev, "Error requesting memory region!\n"); | ||
| 669 | return -EBUSY; | ||
| 670 | } | ||
| 671 | |||
| 672 | mdma->regs = devm_ioremap(dev, regs_start, regs_size); | ||
| 673 | if (!mdma->regs) { | ||
| 674 | dev_err(dev, "Error mapping memory region!\n"); | ||
| 675 | return -ENOMEM; | ||
| 676 | } | ||
| 677 | |||
| 678 | mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) | ||
| 679 | + MPC_DMA_TCD_OFFSET); | ||
| 680 | |||
| 681 | retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, | ||
| 682 | mdma); | ||
| 683 | if (retval) { | ||
| 684 | dev_err(dev, "Error requesting IRQ!\n"); | ||
| 685 | return -EINVAL; | ||
| 686 | } | ||
| 687 | |||
| 688 | spin_lock_init(&mdma->error_status_lock); | ||
| 689 | |||
| 690 | dma = &mdma->dma; | ||
| 691 | dma->dev = dev; | ||
| 692 | dma->chancnt = MPC_DMA_CHANNELS; | ||
| 693 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; | ||
| 694 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; | ||
| 695 | dma->device_issue_pending = mpc_dma_issue_pending; | ||
| 696 | dma->device_is_tx_complete = mpc_dma_is_tx_complete; | ||
| 697 | dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; | ||
| 698 | |||
| 699 | INIT_LIST_HEAD(&dma->channels); | ||
| 700 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | ||
| 701 | |||
| 702 | for (i = 0; i < dma->chancnt; i++) { | ||
| 703 | mchan = &mdma->channels[i]; | ||
| 704 | |||
| 705 | mchan->chan.device = dma; | ||
| 706 | mchan->chan.chan_id = i; | ||
| 707 | mchan->chan.cookie = 1; | ||
| 708 | mchan->completed_cookie = mchan->chan.cookie; | ||
| 709 | |||
| 710 | INIT_LIST_HEAD(&mchan->free); | ||
| 711 | INIT_LIST_HEAD(&mchan->prepared); | ||
| 712 | INIT_LIST_HEAD(&mchan->queued); | ||
| 713 | INIT_LIST_HEAD(&mchan->active); | ||
| 714 | INIT_LIST_HEAD(&mchan->completed); | ||
| 715 | |||
| 716 | spin_lock_init(&mchan->lock); | ||
| 717 | list_add_tail(&mchan->chan.device_node, &dma->channels); | ||
| 718 | } | ||
| 719 | |||
| 720 | tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); | ||
| 721 | |||
| 722 | /* | ||
| 723 | * Configure DMA Engine: | ||
| 724 | * - Dynamic clock, | ||
| 725 | * - Round-robin group arbitration, | ||
| 726 | * - Round-robin channel arbitration. | ||
| 727 | */ | ||
| 728 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | | ||
| 729 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); | ||
| 730 | |||
| 731 | /* Disable hardware DMA requests */ | ||
| 732 | out_be32(&mdma->regs->dmaerqh, 0); | ||
| 733 | out_be32(&mdma->regs->dmaerql, 0); | ||
| 734 | |||
| 735 | /* Disable error interrupts */ | ||
| 736 | out_be32(&mdma->regs->dmaeeih, 0); | ||
| 737 | out_be32(&mdma->regs->dmaeeil, 0); | ||
| 738 | |||
| 739 | /* Clear interrupts status */ | ||
| 740 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); | ||
| 741 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); | ||
| 742 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); | ||
| 743 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | ||
| 744 | |||
| 745 | /* Route interrupts to IPIC */ | ||
| 746 | out_be32(&mdma->regs->dmaihsa, 0); | ||
| 747 | out_be32(&mdma->regs->dmailsa, 0); | ||
| 748 | |||
| 749 | /* Register DMA engine */ | ||
| 750 | dev_set_drvdata(dev, mdma); | ||
| 751 | retval = dma_async_device_register(dma); | ||
| 752 | if (retval) { | ||
| 753 | devm_free_irq(dev, mdma->irq, mdma); | ||
| 754 | irq_dispose_mapping(mdma->irq); | ||
| 755 | } | ||
| 756 | |||
| 757 | return retval; | ||
| 758 | } | ||
| 759 | |||
| 760 | static int __devexit mpc_dma_remove(struct of_device *op) | ||
| 761 | { | ||
| 762 | struct device *dev = &op->dev; | ||
| 763 | struct mpc_dma *mdma = dev_get_drvdata(dev); | ||
| 764 | |||
| 765 | dma_async_device_unregister(&mdma->dma); | ||
| 766 | devm_free_irq(dev, mdma->irq, mdma); | ||
| 767 | irq_dispose_mapping(mdma->irq); | ||
| 768 | |||
| 769 | return 0; | ||
| 770 | } | ||
| 771 | |||
| 772 | static struct of_device_id mpc_dma_match[] = { | ||
| 773 | { .compatible = "fsl,mpc5121-dma", }, | ||
| 774 | {}, | ||
| 775 | }; | ||
| 776 | |||
| 777 | static struct of_platform_driver mpc_dma_driver = { | ||
| 778 | .match_table = mpc_dma_match, | ||
| 779 | .probe = mpc_dma_probe, | ||
| 780 | .remove = __devexit_p(mpc_dma_remove), | ||
| 781 | .driver = { | ||
| 782 | .name = DRV_NAME, | ||
| 783 | .owner = THIS_MODULE, | ||
| 784 | }, | ||
| 785 | }; | ||
| 786 | |||
| 787 | static int __init mpc_dma_init(void) | ||
| 788 | { | ||
| 789 | return of_register_platform_driver(&mpc_dma_driver); | ||
| 790 | } | ||
| 791 | module_init(mpc_dma_init); | ||
| 792 | |||
| 793 | static void __exit mpc_dma_exit(void) | ||
| 794 | { | ||
| 795 | of_unregister_platform_driver(&mpc_dma_driver); | ||
| 796 | } | ||
| 797 | module_exit(mpc_dma_exit); | ||
| 798 | |||
| 799 | MODULE_LICENSE("GPL"); | ||
| 800 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 0a3478e910f0..e69d87f24a25 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
| @@ -4940,7 +4940,7 @@ out_free: | |||
| 4940 | return ret; | 4940 | return ret; |
| 4941 | } | 4941 | } |
| 4942 | 4942 | ||
| 4943 | static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = { | 4943 | static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = { |
| 4944 | { .compatible = "ibm,dma-440spe", }, | 4944 | { .compatible = "ibm,dma-440spe", }, |
| 4945 | { .compatible = "amcc,xor-accelerator", }, | 4945 | { .compatible = "amcc,xor-accelerator", }, |
| 4946 | {}, | 4946 | {}, |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 21fd9b7c6a40..20ea12c86fd0 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -31,6 +31,8 @@ | |||
| 31 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 31 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code |
| 32 | */ | 32 | */ |
| 33 | typedef s32 dma_cookie_t; | 33 | typedef s32 dma_cookie_t; |
| 34 | #define DMA_MIN_COOKIE 1 | ||
| 35 | #define DMA_MAX_COOKIE INT_MAX | ||
| 34 | 36 | ||
| 35 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | 37 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) |
| 36 | 38 | ||
