diff options
author | David Woodhouse <dwmw2@infradead.org> | 2008-04-22 07:34:25 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2008-04-22 07:34:25 -0400 |
commit | f838bad1b3be8ca0c785ee0e0c570dfda74cf377 (patch) | |
tree | 5a842a8056a708cfad55a20fa8ab733dd94b0903 /drivers/dma | |
parent | dd919660aacdf4adfcd279556aa03e595f7f0fc2 (diff) | |
parent | 807501475fce0ebe68baedf87f202c3e4ee0d12c (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 11 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 23 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 1128 | ||||
-rw-r--r-- | drivers/dma/fsldma.h | 197 | ||||
-rw-r--r-- | drivers/dma/ioat_dca.c | 4 | ||||
-rw-r--r-- | drivers/dma/ioat_dma.c | 34 | ||||
-rw-r--r-- | drivers/dma/iop-adma.c | 92 |
8 files changed, 1402 insertions, 88 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a703deffb795..6239c3df30ac 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC |
8 | depends on !HIGHMEM64G | 8 | depends on !HIGHMEM64G |
9 | help | 9 | help |
10 | DMA engines can do asynchronous data transfers without | 10 | DMA engines can do asynchronous data transfers without |
@@ -37,6 +37,15 @@ config INTEL_IOP_ADMA | |||
37 | help | 37 | help |
38 | Enable support for the Intel(R) IOP Series RAID engines. | 38 | Enable support for the Intel(R) IOP Series RAID engines. |
39 | 39 | ||
40 | config FSL_DMA | ||
41 | bool "Freescale MPC85xx/MPC83xx DMA support" | ||
42 | depends on PPC | ||
43 | select DMA_ENGINE | ||
44 | ---help--- | ||
45 | Enable support for the Freescale DMA engine. Now, it support | ||
46 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. | ||
47 | The MPC8349, MPC8360 is also supported. | ||
48 | |||
40 | config DMA_ENGINE | 49 | config DMA_ENGINE |
41 | bool | 50 | bool |
42 | 51 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b152cd84e123..c8036d945902 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o | |||
3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | 4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o |
5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
6 | obj-$(CONFIG_FSL_DMA) += fsldma.o | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 29965231b912..97b329e76798 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -42,9 +42,9 @@ | |||
42 | * | 42 | * |
43 | * Each device has a kref, which is initialized to 1 when the device is | 43 | * Each device has a kref, which is initialized to 1 when the device is |
44 | * registered. A kref_get is done for each device registered. When the | 44 | * registered. A kref_get is done for each device registered. When the |
45 | * device is released, the coresponding kref_put is done in the release | 45 | * device is released, the corresponding kref_put is done in the release |
46 | * method. Every time one of the device's channels is allocated to a client, | 46 | * method. Every time one of the device's channels is allocated to a client, |
47 | * a kref_get occurs. When the channel is freed, the coresponding kref_put | 47 | * a kref_get occurs. When the channel is freed, the corresponding kref_put |
48 | * happens. The device's release function does a completion, so | 48 | * happens. The device's release function does a completion, so |
49 | * unregister_device does a remove event, device_unregister, a kref_put | 49 | * unregister_device does a remove event, device_unregister, a kref_put |
50 | * for the first reference, then waits on the completion for all other | 50 | * for the first reference, then waits on the completion for all other |
@@ -53,7 +53,7 @@ | |||
53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," | 53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," |
54 | * with a kref and a per_cpu local_t. A dma_chan_get is called when a client | 54 | * with a kref and a per_cpu local_t. A dma_chan_get is called when a client |
55 | * signals that it wants to use a channel, and dma_chan_put is called when | 55 | * signals that it wants to use a channel, and dma_chan_put is called when |
56 | * a channel is removed or a client using it is unregesitered. A client can | 56 | * a channel is removed or a client using it is unregistered. A client can |
57 | * take extra references per outstanding transaction, as is the case with | 57 | * take extra references per outstanding transaction, as is the case with |
58 | * the NET DMA client. The release function does a kref_put on the device. | 58 | * the NET DMA client. The release function does a kref_put on the device. |
59 | * -ChrisL, DanW | 59 | * -ChrisL, DanW |
@@ -357,12 +357,11 @@ int dma_async_device_register(struct dma_device *device) | |||
357 | !device->device_prep_dma_zero_sum); | 357 | !device->device_prep_dma_zero_sum); |
358 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | 358 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && |
359 | !device->device_prep_dma_memset); | 359 | !device->device_prep_dma_memset); |
360 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | 360 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
361 | !device->device_prep_dma_interrupt); | 361 | !device->device_prep_dma_interrupt); |
362 | 362 | ||
363 | BUG_ON(!device->device_alloc_chan_resources); | 363 | BUG_ON(!device->device_alloc_chan_resources); |
364 | BUG_ON(!device->device_free_chan_resources); | 364 | BUG_ON(!device->device_free_chan_resources); |
365 | BUG_ON(!device->device_dependency_added); | ||
366 | BUG_ON(!device->device_is_tx_complete); | 365 | BUG_ON(!device->device_is_tx_complete); |
367 | BUG_ON(!device->device_issue_pending); | 366 | BUG_ON(!device->device_issue_pending); |
368 | BUG_ON(!device->dev); | 367 | BUG_ON(!device->dev); |
@@ -479,7 +478,8 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
479 | 478 | ||
480 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 479 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
481 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | 480 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); |
482 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 481 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
482 | DMA_CTRL_ACK); | ||
483 | 483 | ||
484 | if (!tx) { | 484 | if (!tx) { |
485 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 485 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); |
@@ -487,7 +487,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |||
487 | return -ENOMEM; | 487 | return -ENOMEM; |
488 | } | 488 | } |
489 | 489 | ||
490 | tx->ack = 1; | ||
491 | tx->callback = NULL; | 490 | tx->callback = NULL; |
492 | cookie = tx->tx_submit(tx); | 491 | cookie = tx->tx_submit(tx); |
493 | 492 | ||
@@ -525,7 +524,8 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
525 | 524 | ||
526 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | 525 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
527 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | 526 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); |
528 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 527 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
528 | DMA_CTRL_ACK); | ||
529 | 529 | ||
530 | if (!tx) { | 530 | if (!tx) { |
531 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 531 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); |
@@ -533,7 +533,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |||
533 | return -ENOMEM; | 533 | return -ENOMEM; |
534 | } | 534 | } |
535 | 535 | ||
536 | tx->ack = 1; | ||
537 | tx->callback = NULL; | 536 | tx->callback = NULL; |
538 | cookie = tx->tx_submit(tx); | 537 | cookie = tx->tx_submit(tx); |
539 | 538 | ||
@@ -574,7 +573,8 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
574 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | 573 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
575 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | 574 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, |
576 | DMA_FROM_DEVICE); | 575 | DMA_FROM_DEVICE); |
577 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0); | 576 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
577 | DMA_CTRL_ACK); | ||
578 | 578 | ||
579 | if (!tx) { | 579 | if (!tx) { |
580 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | 580 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); |
@@ -582,7 +582,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
582 | return -ENOMEM; | 582 | return -ENOMEM; |
583 | } | 583 | } |
584 | 584 | ||
585 | tx->ack = 1; | ||
586 | tx->callback = NULL; | 585 | tx->callback = NULL; |
587 | cookie = tx->tx_submit(tx); | 586 | cookie = tx->tx_submit(tx); |
588 | 587 | ||
@@ -600,8 +599,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
600 | { | 599 | { |
601 | tx->chan = chan; | 600 | tx->chan = chan; |
602 | spin_lock_init(&tx->lock); | 601 | spin_lock_init(&tx->lock); |
603 | INIT_LIST_HEAD(&tx->depend_node); | ||
604 | INIT_LIST_HEAD(&tx->depend_list); | ||
605 | } | 602 | } |
606 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | 603 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); |
607 | 604 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c new file mode 100644 index 000000000000..054eabffc185 --- /dev/null +++ b/drivers/dma/fsldma.c | |||
@@ -0,0 +1,1128 @@ | |||
1 | /* | ||
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
5 | * | ||
6 | * Author: | ||
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
8 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
9 | * | ||
10 | * Description: | ||
11 | * DMA engine driver for Freescale MPC8540 DMA controller, which is | ||
12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. | ||
13 | * The support for MPC8349 DMA contorller is also added. | ||
14 | * | ||
15 | * This is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/dmapool.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | |||
32 | #include "fsldma.h" | ||
33 | |||
34 | static void dma_init(struct fsl_dma_chan *fsl_chan) | ||
35 | { | ||
36 | /* Reset the channel */ | ||
37 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); | ||
38 | |||
39 | switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
40 | case FSL_DMA_IP_85XX: | ||
41 | /* Set the channel to below modes: | ||
42 | * EIE - Error interrupt enable | ||
43 | * EOSIE - End of segments interrupt enable (basic mode) | ||
44 | * EOLNIE - End of links interrupt enable | ||
45 | */ | ||
46 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE | ||
47 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | ||
48 | break; | ||
49 | case FSL_DMA_IP_83XX: | ||
50 | /* Set the channel to below modes: | ||
51 | * EOTIE - End-of-transfer interrupt enable | ||
52 | */ | ||
53 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, | ||
54 | 32); | ||
55 | break; | ||
56 | } | ||
57 | |||
58 | } | ||
59 | |||
60 | static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) | ||
61 | { | ||
62 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); | ||
63 | } | ||
64 | |||
65 | static u32 get_sr(struct fsl_dma_chan *fsl_chan) | ||
66 | { | ||
67 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); | ||
68 | } | ||
69 | |||
70 | static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, | ||
71 | struct fsl_dma_ld_hw *hw, u32 count) | ||
72 | { | ||
73 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | ||
74 | } | ||
75 | |||
76 | static void set_desc_src(struct fsl_dma_chan *fsl_chan, | ||
77 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | ||
78 | { | ||
79 | u64 snoop_bits; | ||
80 | |||
81 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
82 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
83 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | ||
84 | } | ||
85 | |||
86 | static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | ||
87 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) | ||
88 | { | ||
89 | u64 snoop_bits; | ||
90 | |||
91 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
92 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
93 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); | ||
94 | } | ||
95 | |||
96 | static void set_desc_next(struct fsl_dma_chan *fsl_chan, | ||
97 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | ||
98 | { | ||
99 | u64 snoop_bits; | ||
100 | |||
101 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
102 | ? FSL_DMA_SNEN : 0; | ||
103 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | ||
104 | } | ||
105 | |||
106 | static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
107 | { | ||
108 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); | ||
109 | } | ||
110 | |||
111 | static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) | ||
112 | { | ||
113 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; | ||
114 | } | ||
115 | |||
116 | static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
117 | { | ||
118 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); | ||
119 | } | ||
120 | |||
121 | static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) | ||
122 | { | ||
123 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); | ||
124 | } | ||
125 | |||
126 | static u32 get_bcr(struct fsl_dma_chan *fsl_chan) | ||
127 | { | ||
128 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); | ||
129 | } | ||
130 | |||
131 | static int dma_is_idle(struct fsl_dma_chan *fsl_chan) | ||
132 | { | ||
133 | u32 sr = get_sr(fsl_chan); | ||
134 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | ||
135 | } | ||
136 | |||
137 | static void dma_start(struct fsl_dma_chan *fsl_chan) | ||
138 | { | ||
139 | u32 mr_set = 0;; | ||
140 | |||
141 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | ||
142 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); | ||
143 | mr_set |= FSL_DMA_MR_EMP_EN; | ||
144 | } else | ||
145 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
146 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
147 | & ~FSL_DMA_MR_EMP_EN, 32); | ||
148 | |||
149 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | ||
150 | mr_set |= FSL_DMA_MR_EMS_EN; | ||
151 | else | ||
152 | mr_set |= FSL_DMA_MR_CS; | ||
153 | |||
154 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
155 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
156 | | mr_set, 32); | ||
157 | } | ||
158 | |||
159 | static void dma_halt(struct fsl_dma_chan *fsl_chan) | ||
160 | { | ||
161 | int i = 0; | ||
162 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
163 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, | ||
164 | 32); | ||
165 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
166 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS | ||
167 | | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); | ||
168 | |||
169 | while (!dma_is_idle(fsl_chan) && (i++ < 100)) | ||
170 | udelay(10); | ||
171 | if (i >= 100 && !dma_is_idle(fsl_chan)) | ||
172 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | ||
173 | } | ||
174 | |||
175 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | ||
176 | struct fsl_desc_sw *desc) | ||
177 | { | ||
178 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
179 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, | ||
180 | 64); | ||
181 | } | ||
182 | |||
183 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | ||
184 | struct fsl_desc_sw *new_desc) | ||
185 | { | ||
186 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | ||
187 | |||
188 | if (list_empty(&fsl_chan->ld_queue)) | ||
189 | return; | ||
190 | |||
191 | /* Link to the new descriptor physical address and | ||
192 | * Enable End-of-segment interrupt for | ||
193 | * the last link descriptor. | ||
194 | * (the previous node's next link descriptor) | ||
195 | * | ||
196 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. | ||
197 | */ | ||
198 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
199 | new_desc->async_tx.phys | FSL_DMA_EOSIE | | ||
200 | (((fsl_chan->feature & FSL_DMA_IP_MASK) | ||
201 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); | ||
202 | } | ||
203 | |||
204 | /** | ||
205 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | ||
206 | * @fsl_chan : Freescale DMA channel | ||
207 | * @size : Address loop size, 0 for disable loop | ||
208 | * | ||
209 | * The set source address hold transfer size. The source | ||
210 | * address hold or loop transfer size is when the DMA transfer | ||
211 | * data from source address (SA), if the loop size is 4, the DMA will | ||
212 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | ||
213 | * SA + 1 ... and so on. | ||
214 | */ | ||
215 | static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
216 | { | ||
217 | switch (size) { | ||
218 | case 0: | ||
219 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
220 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
221 | (~FSL_DMA_MR_SAHE), 32); | ||
222 | break; | ||
223 | case 1: | ||
224 | case 2: | ||
225 | case 4: | ||
226 | case 8: | ||
227 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
228 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
229 | FSL_DMA_MR_SAHE | (__ilog2(size) << 14), | ||
230 | 32); | ||
231 | break; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | /** | ||
236 | * fsl_chan_set_dest_loop_size - Set destination address hold transfer size | ||
237 | * @fsl_chan : Freescale DMA channel | ||
238 | * @size : Address loop size, 0 for disable loop | ||
239 | * | ||
240 | * The set destination address hold transfer size. The destination | ||
241 | * address hold or loop transfer size is when the DMA transfer | ||
242 | * data to destination address (TA), if the loop size is 4, the DMA will | ||
243 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | ||
244 | * TA + 1 ... and so on. | ||
245 | */ | ||
246 | static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
247 | { | ||
248 | switch (size) { | ||
249 | case 0: | ||
250 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
251 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
252 | (~FSL_DMA_MR_DAHE), 32); | ||
253 | break; | ||
254 | case 1: | ||
255 | case 2: | ||
256 | case 4: | ||
257 | case 8: | ||
258 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
259 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
260 | FSL_DMA_MR_DAHE | (__ilog2(size) << 16), | ||
261 | 32); | ||
262 | break; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | /** | ||
267 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | ||
268 | * @fsl_chan : Freescale DMA channel | ||
269 | * @size : Pause control size, 0 for disable external pause control. | ||
270 | * The maximum is 1024. | ||
271 | * | ||
272 | * The Freescale DMA channel can be controlled by the external | ||
273 | * signal DREQ#. The pause control size is how many bytes are allowed | ||
274 | * to transfer before pausing the channel, after which a new assertion | ||
275 | * of DREQ# resumes channel operation. | ||
276 | */ | ||
277 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | ||
278 | { | ||
279 | if (size > 1024) | ||
280 | return; | ||
281 | |||
282 | if (size) { | ||
283 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
284 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
285 | | ((__ilog2(size) << 24) & 0x0f000000), | ||
286 | 32); | ||
287 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | ||
288 | } else | ||
289 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | ||
290 | } | ||
291 | |||
292 | /** | ||
293 | * fsl_chan_toggle_ext_start - Toggle channel external start status | ||
294 | * @fsl_chan : Freescale DMA channel | ||
295 | * @enable : 0 is disabled, 1 is enabled. | ||
296 | * | ||
297 | * If enable the external start, the channel can be started by an | ||
298 | * external DMA start pin. So the dma_start() does not start the | ||
299 | * transfer immediately. The DMA channel will wait for the | ||
300 | * control pin asserted. | ||
301 | */ | ||
302 | static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | ||
303 | { | ||
304 | if (enable) | ||
305 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | ||
306 | else | ||
307 | fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | ||
308 | } | ||
309 | |||
310 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
311 | { | ||
312 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||
313 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | ||
314 | unsigned long flags; | ||
315 | dma_cookie_t cookie; | ||
316 | |||
317 | /* cookie increment and adding to ld_queue must be atomic */ | ||
318 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
319 | |||
320 | cookie = fsl_chan->common.cookie; | ||
321 | cookie++; | ||
322 | if (cookie < 0) | ||
323 | cookie = 1; | ||
324 | desc->async_tx.cookie = cookie; | ||
325 | fsl_chan->common.cookie = desc->async_tx.cookie; | ||
326 | |||
327 | append_ld_queue(fsl_chan, desc); | ||
328 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | ||
329 | |||
330 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
331 | |||
332 | return cookie; | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | ||
337 | * @fsl_chan : Freescale DMA channel | ||
338 | * | ||
339 | * Return - The descriptor allocated. NULL for failed. | ||
340 | */ | ||
341 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | ||
342 | struct fsl_dma_chan *fsl_chan) | ||
343 | { | ||
344 | dma_addr_t pdesc; | ||
345 | struct fsl_desc_sw *desc_sw; | ||
346 | |||
347 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | ||
348 | if (desc_sw) { | ||
349 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | ||
350 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | ||
351 | &fsl_chan->common); | ||
352 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | ||
353 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); | ||
354 | desc_sw->async_tx.phys = pdesc; | ||
355 | } | ||
356 | |||
357 | return desc_sw; | ||
358 | } | ||
359 | |||
360 | |||
361 | /** | ||
362 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | ||
363 | * @fsl_chan : Freescale DMA channel | ||
364 | * | ||
365 | * This function will create a dma pool for descriptor allocation. | ||
366 | * | ||
367 | * Return - The number of descriptors allocated. | ||
368 | */ | ||
369 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | ||
370 | { | ||
371 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
372 | LIST_HEAD(tmp_list); | ||
373 | |||
374 | /* We need the descriptor to be aligned to 32bytes | ||
375 | * for meeting FSL DMA specification requirement. | ||
376 | */ | ||
377 | fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | ||
378 | fsl_chan->dev, sizeof(struct fsl_desc_sw), | ||
379 | 32, 0); | ||
380 | if (!fsl_chan->desc_pool) { | ||
381 | dev_err(fsl_chan->dev, "No memory for channel %d " | ||
382 | "descriptor dma pool.\n", fsl_chan->id); | ||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | return 1; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * fsl_dma_free_chan_resources - Free all resources of the channel. | ||
391 | * @fsl_chan : Freescale DMA channel | ||
392 | */ | ||
393 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | ||
394 | { | ||
395 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
396 | struct fsl_desc_sw *desc, *_desc; | ||
397 | unsigned long flags; | ||
398 | |||
399 | dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | ||
400 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
401 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
402 | #ifdef FSL_DMA_LD_DEBUG | ||
403 | dev_dbg(fsl_chan->dev, | ||
404 | "LD %p will be released.\n", desc); | ||
405 | #endif | ||
406 | list_del(&desc->node); | ||
407 | /* free link descriptor */ | ||
408 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
409 | } | ||
410 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
411 | dma_pool_destroy(fsl_chan->desc_pool); | ||
412 | } | ||
413 | |||
414 | static struct dma_async_tx_descriptor * | ||
415 | fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | ||
416 | { | ||
417 | struct fsl_dma_chan *fsl_chan; | ||
418 | struct fsl_desc_sw *new; | ||
419 | |||
420 | if (!chan) | ||
421 | return NULL; | ||
422 | |||
423 | fsl_chan = to_fsl_chan(chan); | ||
424 | |||
425 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
426 | if (!new) { | ||
427 | dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); | ||
428 | return NULL; | ||
429 | } | ||
430 | |||
431 | new->async_tx.cookie = -EBUSY; | ||
432 | new->async_tx.flags = flags; | ||
433 | |||
434 | /* Insert the link descriptor to the LD ring */ | ||
435 | list_add_tail(&new->node, &new->async_tx.tx_list); | ||
436 | |||
437 | /* Set End-of-link to the last link descriptor of new list*/ | ||
438 | set_ld_eol(fsl_chan, new); | ||
439 | |||
440 | return &new->async_tx; | ||
441 | } | ||
442 | |||
443 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | ||
444 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
445 | size_t len, unsigned long flags) | ||
446 | { | ||
447 | struct fsl_dma_chan *fsl_chan; | ||
448 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | ||
449 | size_t copy; | ||
450 | LIST_HEAD(link_chain); | ||
451 | |||
452 | if (!chan) | ||
453 | return NULL; | ||
454 | |||
455 | if (!len) | ||
456 | return NULL; | ||
457 | |||
458 | fsl_chan = to_fsl_chan(chan); | ||
459 | |||
460 | do { | ||
461 | |||
462 | /* Allocate the link descriptor from DMA pool */ | ||
463 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
464 | if (!new) { | ||
465 | dev_err(fsl_chan->dev, | ||
466 | "No free memory for link descriptor\n"); | ||
467 | return NULL; | ||
468 | } | ||
469 | #ifdef FSL_DMA_LD_DEBUG | ||
470 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | ||
471 | #endif | ||
472 | |||
473 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); | ||
474 | |||
475 | set_desc_cnt(fsl_chan, &new->hw, copy); | ||
476 | set_desc_src(fsl_chan, &new->hw, dma_src); | ||
477 | set_desc_dest(fsl_chan, &new->hw, dma_dest); | ||
478 | |||
479 | if (!first) | ||
480 | first = new; | ||
481 | else | ||
482 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | ||
483 | |||
484 | new->async_tx.cookie = 0; | ||
485 | async_tx_ack(&new->async_tx); | ||
486 | |||
487 | prev = new; | ||
488 | len -= copy; | ||
489 | dma_src += copy; | ||
490 | dma_dest += copy; | ||
491 | |||
492 | /* Insert the link descriptor to the LD ring */ | ||
493 | list_add_tail(&new->node, &first->async_tx.tx_list); | ||
494 | } while (len); | ||
495 | |||
496 | new->async_tx.flags = flags; /* client is in control of this ack */ | ||
497 | new->async_tx.cookie = -EBUSY; | ||
498 | |||
499 | /* Set End-of-link to the last link descriptor of new list*/ | ||
500 | set_ld_eol(fsl_chan, new); | ||
501 | |||
502 | return first ? &first->async_tx : NULL; | ||
503 | } | ||
504 | |||
505 | /** | ||
506 | * fsl_dma_update_completed_cookie - Update the completed cookie. | ||
507 | * @fsl_chan : Freescale DMA channel | ||
508 | */ | ||
509 | static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | ||
510 | { | ||
511 | struct fsl_desc_sw *cur_desc, *desc; | ||
512 | dma_addr_t ld_phy; | ||
513 | |||
514 | ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | ||
515 | |||
516 | if (ld_phy) { | ||
517 | cur_desc = NULL; | ||
518 | list_for_each_entry(desc, &fsl_chan->ld_queue, node) | ||
519 | if (desc->async_tx.phys == ld_phy) { | ||
520 | cur_desc = desc; | ||
521 | break; | ||
522 | } | ||
523 | |||
524 | if (cur_desc && cur_desc->async_tx.cookie) { | ||
525 | if (dma_is_idle(fsl_chan)) | ||
526 | fsl_chan->completed_cookie = | ||
527 | cur_desc->async_tx.cookie; | ||
528 | else | ||
529 | fsl_chan->completed_cookie = | ||
530 | cur_desc->async_tx.cookie - 1; | ||
531 | } | ||
532 | } | ||
533 | } | ||
534 | |||
535 | /** | ||
536 | * fsl_chan_ld_cleanup - Clean up link descriptors | ||
537 | * @fsl_chan : Freescale DMA channel | ||
538 | * | ||
539 | * This function clean up the ld_queue of DMA channel. | ||
540 | * If 'in_intr' is set, the function will move the link descriptor to | ||
541 | * the recycle list. Otherwise, free it directly. | ||
542 | */ | ||
543 | static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | ||
544 | { | ||
545 | struct fsl_desc_sw *desc, *_desc; | ||
546 | unsigned long flags; | ||
547 | |||
548 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
549 | |||
550 | dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", | ||
551 | fsl_chan->completed_cookie); | ||
552 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
553 | dma_async_tx_callback callback; | ||
554 | void *callback_param; | ||
555 | |||
556 | if (dma_async_is_complete(desc->async_tx.cookie, | ||
557 | fsl_chan->completed_cookie, fsl_chan->common.cookie) | ||
558 | == DMA_IN_PROGRESS) | ||
559 | break; | ||
560 | |||
561 | callback = desc->async_tx.callback; | ||
562 | callback_param = desc->async_tx.callback_param; | ||
563 | |||
564 | /* Remove from ld_queue list */ | ||
565 | list_del(&desc->node); | ||
566 | |||
567 | dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | ||
568 | desc); | ||
569 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
570 | |||
571 | /* Run the link descriptor callback function */ | ||
572 | if (callback) { | ||
573 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
574 | dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | ||
575 | desc); | ||
576 | callback(callback_param); | ||
577 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
578 | } | ||
579 | } | ||
580 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
581 | } | ||
582 | |||
583 | /** | ||
584 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | ||
585 | * @fsl_chan : Freescale DMA channel | ||
586 | */ | ||
587 | static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | ||
588 | { | ||
589 | struct list_head *ld_node; | ||
590 | dma_addr_t next_dest_addr; | ||
591 | unsigned long flags; | ||
592 | |||
593 | if (!dma_is_idle(fsl_chan)) | ||
594 | return; | ||
595 | |||
596 | dma_halt(fsl_chan); | ||
597 | |||
598 | /* If there are some link descriptors | ||
599 | * not transfered in queue. We need to start it. | ||
600 | */ | ||
601 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
602 | |||
603 | /* Find the first un-transfer desciptor */ | ||
604 | for (ld_node = fsl_chan->ld_queue.next; | ||
605 | (ld_node != &fsl_chan->ld_queue) | ||
606 | && (dma_async_is_complete( | ||
607 | to_fsl_desc(ld_node)->async_tx.cookie, | ||
608 | fsl_chan->completed_cookie, | ||
609 | fsl_chan->common.cookie) == DMA_SUCCESS); | ||
610 | ld_node = ld_node->next); | ||
611 | |||
612 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
613 | |||
614 | if (ld_node != &fsl_chan->ld_queue) { | ||
615 | /* Get the ld start address from ld_queue */ | ||
616 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | ||
617 | dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", | ||
618 | (void *)next_dest_addr); | ||
619 | set_cdar(fsl_chan, next_dest_addr); | ||
620 | dma_start(fsl_chan); | ||
621 | } else { | ||
622 | set_cdar(fsl_chan, 0); | ||
623 | set_ndar(fsl_chan, 0); | ||
624 | } | ||
625 | } | ||
626 | |||
627 | /** | ||
628 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | ||
629 | * @fsl_chan : Freescale DMA channel | ||
630 | */ | ||
631 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
632 | { | ||
633 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
634 | |||
635 | #ifdef FSL_DMA_LD_DEBUG | ||
636 | struct fsl_desc_sw *ld; | ||
637 | unsigned long flags; | ||
638 | |||
639 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
640 | if (list_empty(&fsl_chan->ld_queue)) { | ||
641 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
642 | return; | ||
643 | } | ||
644 | |||
645 | dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | ||
646 | list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | ||
647 | int i; | ||
648 | dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | ||
649 | fsl_chan->id, ld->async_tx.phys); | ||
650 | for (i = 0; i < 8; i++) | ||
651 | dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | ||
652 | i, *(((u32 *)&ld->hw) + i)); | ||
653 | } | ||
654 | dev_dbg(fsl_chan->dev, "----------------\n"); | ||
655 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
656 | #endif | ||
657 | |||
658 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
659 | } | ||
660 | |||
661 | /** | ||
662 | * fsl_dma_is_complete - Determine the DMA status | ||
663 | * @fsl_chan : Freescale DMA channel | ||
664 | */ | ||
665 | static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | ||
666 | dma_cookie_t cookie, | ||
667 | dma_cookie_t *done, | ||
668 | dma_cookie_t *used) | ||
669 | { | ||
670 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
671 | dma_cookie_t last_used; | ||
672 | dma_cookie_t last_complete; | ||
673 | |||
674 | fsl_chan_ld_cleanup(fsl_chan); | ||
675 | |||
676 | last_used = chan->cookie; | ||
677 | last_complete = fsl_chan->completed_cookie; | ||
678 | |||
679 | if (done) | ||
680 | *done = last_complete; | ||
681 | |||
682 | if (used) | ||
683 | *used = last_used; | ||
684 | |||
685 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
686 | } | ||
687 | |||
688 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | ||
689 | { | ||
690 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
691 | u32 stat; | ||
692 | int update_cookie = 0; | ||
693 | int xfer_ld_q = 0; | ||
694 | |||
695 | stat = get_sr(fsl_chan); | ||
696 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | ||
697 | fsl_chan->id, stat); | ||
698 | set_sr(fsl_chan, stat); /* Clear the event register */ | ||
699 | |||
700 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | ||
701 | if (!stat) | ||
702 | return IRQ_NONE; | ||
703 | |||
704 | if (stat & FSL_DMA_SR_TE) | ||
705 | dev_err(fsl_chan->dev, "Transfer Error!\n"); | ||
706 | |||
707 | /* Programming Error | ||
708 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will | ||
709 | * triger a PE interrupt. | ||
710 | */ | ||
711 | if (stat & FSL_DMA_SR_PE) { | ||
712 | dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); | ||
713 | if (get_bcr(fsl_chan) == 0) { | ||
714 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | ||
715 | * Now, update the completed cookie, and continue the | ||
716 | * next uncompleted transfer. | ||
717 | */ | ||
718 | update_cookie = 1; | ||
719 | xfer_ld_q = 1; | ||
720 | } | ||
721 | stat &= ~FSL_DMA_SR_PE; | ||
722 | } | ||
723 | |||
724 | /* If the link descriptor segment transfer finishes, | ||
725 | * we will recycle the used descriptor. | ||
726 | */ | ||
727 | if (stat & FSL_DMA_SR_EOSI) { | ||
728 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | ||
729 | dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", | ||
730 | (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); | ||
731 | stat &= ~FSL_DMA_SR_EOSI; | ||
732 | update_cookie = 1; | ||
733 | } | ||
734 | |||
735 | /* For MPC8349, EOCDI event need to update cookie | ||
736 | * and start the next transfer if it exist. | ||
737 | */ | ||
738 | if (stat & FSL_DMA_SR_EOCDI) { | ||
739 | dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); | ||
740 | stat &= ~FSL_DMA_SR_EOCDI; | ||
741 | update_cookie = 1; | ||
742 | xfer_ld_q = 1; | ||
743 | } | ||
744 | |||
745 | /* If it current transfer is the end-of-transfer, | ||
746 | * we should clear the Channel Start bit for | ||
747 | * prepare next transfer. | ||
748 | */ | ||
749 | if (stat & FSL_DMA_SR_EOLNI) { | ||
750 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | ||
751 | stat &= ~FSL_DMA_SR_EOLNI; | ||
752 | xfer_ld_q = 1; | ||
753 | } | ||
754 | |||
755 | if (update_cookie) | ||
756 | fsl_dma_update_completed_cookie(fsl_chan); | ||
757 | if (xfer_ld_q) | ||
758 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
759 | if (stat) | ||
760 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | ||
761 | stat); | ||
762 | |||
763 | dev_dbg(fsl_chan->dev, "event: Exit\n"); | ||
764 | tasklet_schedule(&fsl_chan->tasklet); | ||
765 | return IRQ_HANDLED; | ||
766 | } | ||
767 | |||
768 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | ||
769 | { | ||
770 | struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; | ||
771 | u32 gsr; | ||
772 | int ch_nr; | ||
773 | |||
774 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) | ||
775 | : in_le32(fdev->reg_base); | ||
776 | ch_nr = (32 - ffs(gsr)) / 8; | ||
777 | |||
778 | return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, | ||
779 | fdev->chan[ch_nr]) : IRQ_NONE; | ||
780 | } | ||
781 | |||
782 | static void dma_do_tasklet(unsigned long data) | ||
783 | { | ||
784 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
785 | fsl_chan_ld_cleanup(fsl_chan); | ||
786 | } | ||
787 | |||
788 | static void fsl_dma_callback_test(void *param) | ||
789 | { | ||
790 | struct fsl_dma_chan *fsl_chan = param; | ||
791 | if (fsl_chan) | ||
792 | dev_dbg(fsl_chan->dev, "selftest: callback is ok!\n"); | ||
793 | } | ||
794 | |||
795 | static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | ||
796 | { | ||
797 | struct dma_chan *chan; | ||
798 | int err = 0; | ||
799 | dma_addr_t dma_dest, dma_src; | ||
800 | dma_cookie_t cookie; | ||
801 | u8 *src, *dest; | ||
802 | int i; | ||
803 | size_t test_size; | ||
804 | struct dma_async_tx_descriptor *tx1, *tx2, *tx3; | ||
805 | |||
806 | test_size = 4096; | ||
807 | |||
808 | src = kmalloc(test_size * 2, GFP_KERNEL); | ||
809 | if (!src) { | ||
810 | dev_err(fsl_chan->dev, | ||
811 | "selftest: Cannot alloc memory for test!\n"); | ||
812 | err = -ENOMEM; | ||
813 | goto out; | ||
814 | } | ||
815 | |||
816 | dest = src + test_size; | ||
817 | |||
818 | for (i = 0; i < test_size; i++) | ||
819 | src[i] = (u8) i; | ||
820 | |||
821 | chan = &fsl_chan->common; | ||
822 | |||
823 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
824 | dev_err(fsl_chan->dev, | ||
825 | "selftest: Cannot alloc resources for DMA\n"); | ||
826 | err = -ENODEV; | ||
827 | goto out; | ||
828 | } | ||
829 | |||
830 | /* TX 1 */ | ||
831 | dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, | ||
832 | DMA_TO_DEVICE); | ||
833 | dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, | ||
834 | DMA_FROM_DEVICE); | ||
835 | tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); | ||
836 | async_tx_ack(tx1); | ||
837 | |||
838 | cookie = fsl_dma_tx_submit(tx1); | ||
839 | fsl_dma_memcpy_issue_pending(chan); | ||
840 | msleep(2); | ||
841 | |||
842 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
843 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
844 | err = -ENODEV; | ||
845 | goto out; | ||
846 | } | ||
847 | |||
848 | /* Test free and re-alloc channel resources */ | ||
849 | fsl_dma_free_chan_resources(chan); | ||
850 | |||
851 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
852 | dev_err(fsl_chan->dev, | ||
853 | "selftest: Cannot alloc resources for DMA\n"); | ||
854 | err = -ENODEV; | ||
855 | goto free_resources; | ||
856 | } | ||
857 | |||
858 | /* Continue to test | ||
859 | * TX 2 | ||
860 | */ | ||
861 | dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, | ||
862 | test_size / 4, DMA_TO_DEVICE); | ||
863 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, | ||
864 | test_size / 4, DMA_FROM_DEVICE); | ||
865 | tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
866 | async_tx_ack(tx2); | ||
867 | |||
868 | /* TX 3 */ | ||
869 | dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, | ||
870 | test_size / 4, DMA_TO_DEVICE); | ||
871 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, | ||
872 | test_size / 4, DMA_FROM_DEVICE); | ||
873 | tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
874 | async_tx_ack(tx3); | ||
875 | |||
876 | /* Interrupt tx test */ | ||
877 | tx1 = fsl_dma_prep_interrupt(chan, 0); | ||
878 | async_tx_ack(tx1); | ||
879 | cookie = fsl_dma_tx_submit(tx1); | ||
880 | |||
881 | /* Test exchanging the prepared tx sort */ | ||
882 | cookie = fsl_dma_tx_submit(tx3); | ||
883 | cookie = fsl_dma_tx_submit(tx2); | ||
884 | |||
885 | if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) | ||
886 | dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { | ||
887 | tx3->callback = fsl_dma_callback_test; | ||
888 | tx3->callback_param = fsl_chan; | ||
889 | } | ||
890 | fsl_dma_memcpy_issue_pending(chan); | ||
891 | msleep(2); | ||
892 | |||
893 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
894 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
895 | err = -ENODEV; | ||
896 | goto free_resources; | ||
897 | } | ||
898 | |||
899 | err = memcmp(src, dest, test_size); | ||
900 | if (err) { | ||
901 | for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); | ||
902 | i++); | ||
903 | dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is " | ||
904 | "error! src 0x%x, dest 0x%x\n", | ||
905 | i, (long)test_size, *(src + i), *(dest + i)); | ||
906 | } | ||
907 | |||
908 | free_resources: | ||
909 | fsl_dma_free_chan_resources(chan); | ||
910 | out: | ||
911 | kfree(src); | ||
912 | return err; | ||
913 | } | ||
914 | |||
915 | static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | ||
916 | const struct of_device_id *match) | ||
917 | { | ||
918 | struct fsl_dma_device *fdev; | ||
919 | struct fsl_dma_chan *new_fsl_chan; | ||
920 | int err; | ||
921 | |||
922 | fdev = dev_get_drvdata(dev->dev.parent); | ||
923 | BUG_ON(!fdev); | ||
924 | |||
925 | /* alloc channel */ | ||
926 | new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); | ||
927 | if (!new_fsl_chan) { | ||
928 | dev_err(&dev->dev, "No free memory for allocating " | ||
929 | "dma channels!\n"); | ||
930 | err = -ENOMEM; | ||
931 | goto err; | ||
932 | } | ||
933 | |||
934 | /* get dma channel register base */ | ||
935 | err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); | ||
936 | if (err) { | ||
937 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
938 | dev->node->full_name); | ||
939 | goto err; | ||
940 | } | ||
941 | |||
942 | new_fsl_chan->feature = *(u32 *)match->data; | ||
943 | |||
944 | if (!fdev->feature) | ||
945 | fdev->feature = new_fsl_chan->feature; | ||
946 | |||
947 | /* If the DMA device's feature is different than its channels', | ||
948 | * report the bug. | ||
949 | */ | ||
950 | WARN_ON(fdev->feature != new_fsl_chan->feature); | ||
951 | |||
952 | new_fsl_chan->dev = &dev->dev; | ||
953 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, | ||
954 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | ||
955 | |||
956 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | ||
957 | if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { | ||
958 | dev_err(&dev->dev, "There is no %d channel!\n", | ||
959 | new_fsl_chan->id); | ||
960 | err = -EINVAL; | ||
961 | goto err; | ||
962 | } | ||
963 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; | ||
964 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, | ||
965 | (unsigned long)new_fsl_chan); | ||
966 | |||
967 | /* Init the channel */ | ||
968 | dma_init(new_fsl_chan); | ||
969 | |||
970 | /* Clear cdar registers */ | ||
971 | set_cdar(new_fsl_chan, 0); | ||
972 | |||
973 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
974 | case FSL_DMA_IP_85XX: | ||
975 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | ||
976 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | ||
977 | case FSL_DMA_IP_83XX: | ||
978 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | ||
979 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | ||
980 | } | ||
981 | |||
982 | spin_lock_init(&new_fsl_chan->desc_lock); | ||
983 | INIT_LIST_HEAD(&new_fsl_chan->ld_queue); | ||
984 | |||
985 | new_fsl_chan->common.device = &fdev->common; | ||
986 | |||
987 | /* Add the channel to DMA device channel list */ | ||
988 | list_add_tail(&new_fsl_chan->common.device_node, | ||
989 | &fdev->common.channels); | ||
990 | fdev->common.chancnt++; | ||
991 | |||
992 | new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); | ||
993 | if (new_fsl_chan->irq != NO_IRQ) { | ||
994 | err = request_irq(new_fsl_chan->irq, | ||
995 | &fsl_dma_chan_do_interrupt, IRQF_SHARED, | ||
996 | "fsldma-channel", new_fsl_chan); | ||
997 | if (err) { | ||
998 | dev_err(&dev->dev, "DMA channel %s request_irq error " | ||
999 | "with return %d\n", dev->node->full_name, err); | ||
1000 | goto err; | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | err = fsl_dma_self_test(new_fsl_chan); | ||
1005 | if (err) | ||
1006 | goto err; | ||
1007 | |||
1008 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, | ||
1009 | match->compatible, new_fsl_chan->irq); | ||
1010 | |||
1011 | return 0; | ||
1012 | err: | ||
1013 | dma_halt(new_fsl_chan); | ||
1014 | iounmap(new_fsl_chan->reg_base); | ||
1015 | free_irq(new_fsl_chan->irq, new_fsl_chan); | ||
1016 | list_del(&new_fsl_chan->common.device_node); | ||
1017 | kfree(new_fsl_chan); | ||
1018 | return err; | ||
1019 | } | ||
1020 | |||
1021 | const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; | ||
1022 | const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; | ||
1023 | |||
1024 | static struct of_device_id of_fsl_dma_chan_ids[] = { | ||
1025 | { | ||
1026 | .compatible = "fsl,eloplus-dma-channel", | ||
1027 | .data = (void *)&mpc8540_dma_ip_feature, | ||
1028 | }, | ||
1029 | { | ||
1030 | .compatible = "fsl,elo-dma-channel", | ||
1031 | .data = (void *)&mpc8349_dma_ip_feature, | ||
1032 | }, | ||
1033 | {} | ||
1034 | }; | ||
1035 | |||
1036 | static struct of_platform_driver of_fsl_dma_chan_driver = { | ||
1037 | .name = "of-fsl-dma-channel", | ||
1038 | .match_table = of_fsl_dma_chan_ids, | ||
1039 | .probe = of_fsl_dma_chan_probe, | ||
1040 | }; | ||
1041 | |||
1042 | static __init int of_fsl_dma_chan_init(void) | ||
1043 | { | ||
1044 | return of_register_platform_driver(&of_fsl_dma_chan_driver); | ||
1045 | } | ||
1046 | |||
1047 | static int __devinit of_fsl_dma_probe(struct of_device *dev, | ||
1048 | const struct of_device_id *match) | ||
1049 | { | ||
1050 | int err; | ||
1051 | unsigned int irq; | ||
1052 | struct fsl_dma_device *fdev; | ||
1053 | |||
1054 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | ||
1055 | if (!fdev) { | ||
1056 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | ||
1057 | err = -ENOMEM; | ||
1058 | goto err; | ||
1059 | } | ||
1060 | fdev->dev = &dev->dev; | ||
1061 | INIT_LIST_HEAD(&fdev->common.channels); | ||
1062 | |||
1063 | /* get DMA controller register base */ | ||
1064 | err = of_address_to_resource(dev->node, 0, &fdev->reg); | ||
1065 | if (err) { | ||
1066 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
1067 | dev->node->full_name); | ||
1068 | goto err; | ||
1069 | } | ||
1070 | |||
1071 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | ||
1072 | "controller at %p...\n", | ||
1073 | match->compatible, (void *)fdev->reg.start); | ||
1074 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end | ||
1075 | - fdev->reg.start + 1); | ||
1076 | |||
1077 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | ||
1078 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | ||
1079 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | ||
1080 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | ||
1081 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | ||
1082 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | ||
1083 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | ||
1084 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | ||
1085 | fdev->common.dev = &dev->dev; | ||
1086 | |||
1087 | irq = irq_of_parse_and_map(dev->node, 0); | ||
1088 | if (irq != NO_IRQ) { | ||
1089 | err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, | ||
1090 | "fsldma-device", fdev); | ||
1091 | if (err) { | ||
1092 | dev_err(&dev->dev, "DMA device request_irq error " | ||
1093 | "with return %d\n", err); | ||
1094 | goto err; | ||
1095 | } | ||
1096 | } | ||
1097 | |||
1098 | dev_set_drvdata(&(dev->dev), fdev); | ||
1099 | of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); | ||
1100 | |||
1101 | dma_async_device_register(&fdev->common); | ||
1102 | return 0; | ||
1103 | |||
1104 | err: | ||
1105 | iounmap(fdev->reg_base); | ||
1106 | kfree(fdev); | ||
1107 | return err; | ||
1108 | } | ||
1109 | |||
1110 | static struct of_device_id of_fsl_dma_ids[] = { | ||
1111 | { .compatible = "fsl,eloplus-dma", }, | ||
1112 | { .compatible = "fsl,elo-dma", }, | ||
1113 | {} | ||
1114 | }; | ||
1115 | |||
1116 | static struct of_platform_driver of_fsl_dma_driver = { | ||
1117 | .name = "of-fsl-dma", | ||
1118 | .match_table = of_fsl_dma_ids, | ||
1119 | .probe = of_fsl_dma_probe, | ||
1120 | }; | ||
1121 | |||
1122 | static __init int of_fsl_dma_init(void) | ||
1123 | { | ||
1124 | return of_register_platform_driver(&of_fsl_dma_driver); | ||
1125 | } | ||
1126 | |||
1127 | subsys_initcall(of_fsl_dma_chan_init); | ||
1128 | subsys_initcall(of_fsl_dma_init); | ||
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h new file mode 100644 index 000000000000..6faf07ba0d0e --- /dev/null +++ b/drivers/dma/fsldma.h | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: | ||
5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
6 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
7 | * | ||
8 | * This is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | #ifndef __DMA_FSLDMA_H | ||
15 | #define __DMA_FSLDMA_H | ||
16 | |||
17 | #include <linux/device.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/dmaengine.h> | ||
20 | |||
21 | /* Define data structures needed by Freescale | ||
22 | * MPC8540 and MPC8349 DMA controller. | ||
23 | */ | ||
24 | #define FSL_DMA_MR_CS 0x00000001 | ||
25 | #define FSL_DMA_MR_CC 0x00000002 | ||
26 | #define FSL_DMA_MR_CA 0x00000008 | ||
27 | #define FSL_DMA_MR_EIE 0x00000040 | ||
28 | #define FSL_DMA_MR_XFE 0x00000020 | ||
29 | #define FSL_DMA_MR_EOLNIE 0x00000100 | ||
30 | #define FSL_DMA_MR_EOLSIE 0x00000080 | ||
31 | #define FSL_DMA_MR_EOSIE 0x00000200 | ||
32 | #define FSL_DMA_MR_CDSM 0x00000010 | ||
33 | #define FSL_DMA_MR_CTM 0x00000004 | ||
34 | #define FSL_DMA_MR_EMP_EN 0x00200000 | ||
35 | #define FSL_DMA_MR_EMS_EN 0x00040000 | ||
36 | #define FSL_DMA_MR_DAHE 0x00002000 | ||
37 | #define FSL_DMA_MR_SAHE 0x00001000 | ||
38 | |||
39 | /* Special MR definition for MPC8349 */ | ||
40 | #define FSL_DMA_MR_EOTIE 0x00000080 | ||
41 | |||
42 | #define FSL_DMA_SR_CH 0x00000020 | ||
43 | #define FSL_DMA_SR_PE 0x00000010 | ||
44 | #define FSL_DMA_SR_CB 0x00000004 | ||
45 | #define FSL_DMA_SR_TE 0x00000080 | ||
46 | #define FSL_DMA_SR_EOSI 0x00000002 | ||
47 | #define FSL_DMA_SR_EOLSI 0x00000001 | ||
48 | #define FSL_DMA_SR_EOCDI 0x00000001 | ||
49 | #define FSL_DMA_SR_EOLNI 0x00000008 | ||
50 | |||
51 | #define FSL_DMA_SATR_SBPATMU 0x20000000 | ||
52 | #define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000 | ||
53 | #define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000 | ||
54 | #define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000 | ||
55 | #define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000 | ||
56 | #define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000 | ||
57 | |||
58 | #define FSL_DMA_DATR_DBPATMU 0x20000000 | ||
59 | #define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000 | ||
60 | #define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000 | ||
61 | #define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000 | ||
62 | |||
63 | #define FSL_DMA_EOL ((u64)0x1) | ||
64 | #define FSL_DMA_SNEN ((u64)0x10) | ||
65 | #define FSL_DMA_EOSIE 0x8 | ||
66 | #define FSL_DMA_NLDA_MASK (~(u64)0x1f) | ||
67 | |||
68 | #define FSL_DMA_BCR_MAX_CNT 0x03ffffffu | ||
69 | |||
70 | #define FSL_DMA_DGSR_TE 0x80 | ||
71 | #define FSL_DMA_DGSR_CH 0x20 | ||
72 | #define FSL_DMA_DGSR_PE 0x10 | ||
73 | #define FSL_DMA_DGSR_EOLNI 0x08 | ||
74 | #define FSL_DMA_DGSR_CB 0x04 | ||
75 | #define FSL_DMA_DGSR_EOSI 0x02 | ||
76 | #define FSL_DMA_DGSR_EOLSI 0x01 | ||
77 | |||
78 | typedef u64 __bitwise v64; | ||
79 | typedef u32 __bitwise v32; | ||
80 | |||
81 | struct fsl_dma_ld_hw { | ||
82 | v64 src_addr; | ||
83 | v64 dst_addr; | ||
84 | v64 next_ln_addr; | ||
85 | v32 count; | ||
86 | v32 reserve; | ||
87 | } __attribute__((aligned(32))); | ||
88 | |||
89 | struct fsl_desc_sw { | ||
90 | struct fsl_dma_ld_hw hw; | ||
91 | struct list_head node; | ||
92 | struct dma_async_tx_descriptor async_tx; | ||
93 | struct list_head *ld; | ||
94 | void *priv; | ||
95 | } __attribute__((aligned(32))); | ||
96 | |||
97 | struct fsl_dma_chan_regs { | ||
98 | u32 mr; /* 0x00 - Mode Register */ | ||
99 | u32 sr; /* 0x04 - Status Register */ | ||
100 | u64 cdar; /* 0x08 - Current descriptor address register */ | ||
101 | u64 sar; /* 0x10 - Source Address Register */ | ||
102 | u64 dar; /* 0x18 - Destination Address Register */ | ||
103 | u32 bcr; /* 0x20 - Byte Count Register */ | ||
104 | u64 ndar; /* 0x24 - Next Descriptor Address Register */ | ||
105 | }; | ||
106 | |||
107 | struct fsl_dma_chan; | ||
108 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | ||
109 | |||
110 | struct fsl_dma_device { | ||
111 | void __iomem *reg_base; /* DGSR register base */ | ||
112 | struct resource reg; /* Resource for register */ | ||
113 | struct device *dev; | ||
114 | struct dma_device common; | ||
115 | struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; | ||
116 | u32 feature; /* The same as DMA channels */ | ||
117 | }; | ||
118 | |||
119 | /* Define macros for fsl_dma_chan->feature property */ | ||
120 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 | ||
121 | #define FSL_DMA_BIG_ENDIAN 0x00000001 | ||
122 | |||
123 | #define FSL_DMA_IP_MASK 0x00000ff0 | ||
124 | #define FSL_DMA_IP_85XX 0x00000010 | ||
125 | #define FSL_DMA_IP_83XX 0x00000020 | ||
126 | |||
127 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 | ||
128 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | ||
129 | |||
130 | struct fsl_dma_chan { | ||
131 | struct fsl_dma_chan_regs __iomem *reg_base; | ||
132 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
133 | spinlock_t desc_lock; /* Descriptor operation lock */ | ||
134 | struct list_head ld_queue; /* Link descriptors queue */ | ||
135 | struct dma_chan common; /* DMA common channel */ | ||
136 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
137 | struct device *dev; /* Channel device */ | ||
138 | struct resource reg; /* Resource for register */ | ||
139 | int irq; /* Channel IRQ */ | ||
140 | int id; /* Raw id of this channel */ | ||
141 | struct tasklet_struct tasklet; | ||
142 | u32 feature; | ||
143 | |||
144 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); | ||
145 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); | ||
146 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
147 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
148 | }; | ||
149 | |||
150 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) | ||
151 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | ||
152 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | ||
153 | |||
154 | #ifndef __powerpc64__ | ||
155 | static u64 in_be64(const u64 __iomem *addr) | ||
156 | { | ||
157 | return ((u64)in_be32((u32 __iomem *)addr) << 32) | | ||
158 | (in_be32((u32 __iomem *)addr + 1)); | ||
159 | } | ||
160 | |||
161 | static void out_be64(u64 __iomem *addr, u64 val) | ||
162 | { | ||
163 | out_be32((u32 __iomem *)addr, val >> 32); | ||
164 | out_be32((u32 __iomem *)addr + 1, (u32)val); | ||
165 | } | ||
166 | |||
167 | /* There is no asm instructions for 64 bits reverse loads and stores */ | ||
168 | static u64 in_le64(const u64 __iomem *addr) | ||
169 | { | ||
170 | return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) | | ||
171 | (in_le32((u32 __iomem *)addr)); | ||
172 | } | ||
173 | |||
174 | static void out_le64(u64 __iomem *addr, u64 val) | ||
175 | { | ||
176 | out_le32((u32 __iomem *)addr + 1, val >> 32); | ||
177 | out_le32((u32 __iomem *)addr, (u32)val); | ||
178 | } | ||
179 | #endif | ||
180 | |||
181 | #define DMA_IN(fsl_chan, addr, width) \ | ||
182 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
183 | in_be##width(addr) : in_le##width(addr)) | ||
184 | #define DMA_OUT(fsl_chan, addr, val, width) \ | ||
185 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
186 | out_be##width(addr, val) : out_le##width(addr, val)) | ||
187 | |||
188 | #define DMA_TO_CPU(fsl_chan, d, width) \ | ||
189 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
190 | be##width##_to_cpu((__force __be##width)(v##width)d) : \ | ||
191 | le##width##_to_cpu((__force __le##width)(v##width)d)) | ||
192 | #define CPU_TO_DMA(fsl_chan, c, width) \ | ||
193 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
194 | (__force v##width)cpu_to_be##width(c) : \ | ||
195 | (__force v##width)cpu_to_le##width(c)) | ||
196 | |||
197 | #endif /* __DMA_FSLDMA_H */ | ||
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c index 0fa8a98051a8..9e922760b7ff 100644 --- a/drivers/dma/ioat_dca.c +++ b/drivers/dma/ioat_dca.c | |||
@@ -98,7 +98,7 @@ struct ioat_dca_slot { | |||
98 | 98 | ||
99 | struct ioat_dca_priv { | 99 | struct ioat_dca_priv { |
100 | void __iomem *iobase; | 100 | void __iomem *iobase; |
101 | void *dca_base; | 101 | void __iomem *dca_base; |
102 | int max_requesters; | 102 | int max_requesters; |
103 | int requester_count; | 103 | int requester_count; |
104 | u8 tag_map[IOAT_TAG_MAP_LEN]; | 104 | u8 tag_map[IOAT_TAG_MAP_LEN]; |
@@ -338,7 +338,7 @@ static struct dca_ops ioat2_dca_ops = { | |||
338 | .get_tag = ioat2_dca_get_tag, | 338 | .get_tag = ioat2_dca_get_tag, |
339 | }; | 339 | }; |
340 | 340 | ||
341 | static int ioat2_dca_count_dca_slots(void *iobase, u16 dca_offset) | 341 | static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) |
342 | { | 342 | { |
343 | int slots = 0; | 343 | int slots = 0; |
344 | u32 req; | 344 | u32 req; |
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index dff38accc5c1..318e8a22d814 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -212,14 +212,14 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
212 | u32 copy; | 212 | u32 copy; |
213 | size_t len; | 213 | size_t len; |
214 | dma_addr_t src, dst; | 214 | dma_addr_t src, dst; |
215 | int orig_ack; | 215 | unsigned long orig_flags; |
216 | unsigned int desc_count = 0; | 216 | unsigned int desc_count = 0; |
217 | 217 | ||
218 | /* src and dest and len are stored in the initial descriptor */ | 218 | /* src and dest and len are stored in the initial descriptor */ |
219 | len = first->len; | 219 | len = first->len; |
220 | src = first->src; | 220 | src = first->src; |
221 | dst = first->dst; | 221 | dst = first->dst; |
222 | orig_ack = first->async_tx.ack; | 222 | orig_flags = first->async_tx.flags; |
223 | new = first; | 223 | new = first; |
224 | 224 | ||
225 | spin_lock_bh(&ioat_chan->desc_lock); | 225 | spin_lock_bh(&ioat_chan->desc_lock); |
@@ -228,7 +228,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
228 | do { | 228 | do { |
229 | copy = min_t(size_t, len, ioat_chan->xfercap); | 229 | copy = min_t(size_t, len, ioat_chan->xfercap); |
230 | 230 | ||
231 | new->async_tx.ack = 1; | 231 | async_tx_ack(&new->async_tx); |
232 | 232 | ||
233 | hw = new->hw; | 233 | hw = new->hw; |
234 | hw->size = copy; | 234 | hw->size = copy; |
@@ -264,7 +264,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |||
264 | } | 264 | } |
265 | 265 | ||
266 | new->tx_cnt = desc_count; | 266 | new->tx_cnt = desc_count; |
267 | new->async_tx.ack = orig_ack; /* client is in control of this ack */ | 267 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ |
268 | 268 | ||
269 | /* store the original values for use in later cleanup */ | 269 | /* store the original values for use in later cleanup */ |
270 | if (new != first) { | 270 | if (new != first) { |
@@ -304,14 +304,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
304 | u32 copy; | 304 | u32 copy; |
305 | size_t len; | 305 | size_t len; |
306 | dma_addr_t src, dst; | 306 | dma_addr_t src, dst; |
307 | int orig_ack; | 307 | unsigned long orig_flags; |
308 | unsigned int desc_count = 0; | 308 | unsigned int desc_count = 0; |
309 | 309 | ||
310 | /* src and dest and len are stored in the initial descriptor */ | 310 | /* src and dest and len are stored in the initial descriptor */ |
311 | len = first->len; | 311 | len = first->len; |
312 | src = first->src; | 312 | src = first->src; |
313 | dst = first->dst; | 313 | dst = first->dst; |
314 | orig_ack = first->async_tx.ack; | 314 | orig_flags = first->async_tx.flags; |
315 | new = first; | 315 | new = first; |
316 | 316 | ||
317 | /* | 317 | /* |
@@ -321,7 +321,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
321 | do { | 321 | do { |
322 | copy = min_t(size_t, len, ioat_chan->xfercap); | 322 | copy = min_t(size_t, len, ioat_chan->xfercap); |
323 | 323 | ||
324 | new->async_tx.ack = 1; | 324 | async_tx_ack(&new->async_tx); |
325 | 325 | ||
326 | hw = new->hw; | 326 | hw = new->hw; |
327 | hw->size = copy; | 327 | hw->size = copy; |
@@ -349,7 +349,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
349 | } | 349 | } |
350 | 350 | ||
351 | new->tx_cnt = desc_count; | 351 | new->tx_cnt = desc_count; |
352 | new->async_tx.ack = orig_ack; /* client is in control of this ack */ | 352 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ |
353 | 353 | ||
354 | /* store the original values for use in later cleanup */ | 354 | /* store the original values for use in later cleanup */ |
355 | if (new != first) { | 355 | if (new != first) { |
@@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
714 | new->len = len; | 714 | new->len = len; |
715 | new->dst = dma_dest; | 715 | new->dst = dma_dest; |
716 | new->src = dma_src; | 716 | new->src = dma_src; |
717 | new->async_tx.flags = flags; | ||
717 | return &new->async_tx; | 718 | return &new->async_tx; |
718 | } else | 719 | } else |
719 | return NULL; | 720 | return NULL; |
@@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
741 | new->len = len; | 742 | new->len = len; |
742 | new->dst = dma_dest; | 743 | new->dst = dma_dest; |
743 | new->src = dma_src; | 744 | new->src = dma_src; |
745 | new->async_tx.flags = flags; | ||
744 | return &new->async_tx; | 746 | return &new->async_tx; |
745 | } else | 747 | } else |
746 | return NULL; | 748 | return NULL; |
@@ -840,7 +842,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
840 | * a completed entry, but not the last, so clean | 842 | * a completed entry, but not the last, so clean |
841 | * up if the client is done with the descriptor | 843 | * up if the client is done with the descriptor |
842 | */ | 844 | */ |
843 | if (desc->async_tx.ack) { | 845 | if (async_tx_test_ack(&desc->async_tx)) { |
844 | list_del(&desc->node); | 846 | list_del(&desc->node); |
845 | list_add_tail(&desc->node, | 847 | list_add_tail(&desc->node, |
846 | &ioat_chan->free_desc); | 848 | &ioat_chan->free_desc); |
@@ -922,17 +924,6 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |||
922 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 924 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
923 | } | 925 | } |
924 | 926 | ||
925 | static void ioat_dma_dependency_added(struct dma_chan *chan) | ||
926 | { | ||
927 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | ||
928 | spin_lock_bh(&ioat_chan->desc_lock); | ||
929 | if (ioat_chan->pending == 0) { | ||
930 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
931 | ioat_dma_memcpy_cleanup(ioat_chan); | ||
932 | } else | ||
933 | spin_unlock_bh(&ioat_chan->desc_lock); | ||
934 | } | ||
935 | |||
936 | /** | 927 | /** |
937 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction | 928 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction |
938 | * @chan: IOAT DMA channel handle | 929 | * @chan: IOAT DMA channel handle |
@@ -988,7 +979,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |||
988 | desc->hw->size = 0; | 979 | desc->hw->size = 0; |
989 | desc->hw->src_addr = 0; | 980 | desc->hw->src_addr = 0; |
990 | desc->hw->dst_addr = 0; | 981 | desc->hw->dst_addr = 0; |
991 | desc->async_tx.ack = 1; | 982 | async_tx_ack(&desc->async_tx); |
992 | switch (ioat_chan->device->version) { | 983 | switch (ioat_chan->device->version) { |
993 | case IOAT_VER_1_2: | 984 | case IOAT_VER_1_2: |
994 | desc->hw->next = 0; | 985 | desc->hw->next = 0; |
@@ -1314,7 +1305,6 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |||
1314 | 1305 | ||
1315 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); | 1306 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); |
1316 | device->common.device_is_tx_complete = ioat_dma_is_complete; | 1307 | device->common.device_is_tx_complete = ioat_dma_is_complete; |
1317 | device->common.device_dependency_added = ioat_dma_dependency_added; | ||
1318 | switch (device->version) { | 1308 | switch (device->version) { |
1319 | case IOAT_VER_1_2: | 1309 | case IOAT_VER_1_2: |
1320 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | 1310 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 3986d54492bd..762b729672e0 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -63,7 +63,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
63 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) | 63 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) |
64 | { | 64 | { |
65 | BUG_ON(desc->async_tx.cookie < 0); | 65 | BUG_ON(desc->async_tx.cookie < 0); |
66 | spin_lock_bh(&desc->async_tx.lock); | ||
67 | if (desc->async_tx.cookie > 0) { | 66 | if (desc->async_tx.cookie > 0) { |
68 | cookie = desc->async_tx.cookie; | 67 | cookie = desc->async_tx.cookie; |
69 | desc->async_tx.cookie = 0; | 68 | desc->async_tx.cookie = 0; |
@@ -101,7 +100,6 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
101 | 100 | ||
102 | /* run dependent operations */ | 101 | /* run dependent operations */ |
103 | async_tx_run_dependencies(&desc->async_tx); | 102 | async_tx_run_dependencies(&desc->async_tx); |
104 | spin_unlock_bh(&desc->async_tx.lock); | ||
105 | 103 | ||
106 | return cookie; | 104 | return cookie; |
107 | } | 105 | } |
@@ -113,7 +111,7 @@ iop_adma_clean_slot(struct iop_adma_desc_slot *desc, | |||
113 | /* the client is allowed to attach dependent operations | 111 | /* the client is allowed to attach dependent operations |
114 | * until 'ack' is set | 112 | * until 'ack' is set |
115 | */ | 113 | */ |
116 | if (!desc->async_tx.ack) | 114 | if (!async_tx_test_ack(&desc->async_tx)) |
117 | return 0; | 115 | return 0; |
118 | 116 | ||
119 | /* leave the last descriptor in the chain | 117 | /* leave the last descriptor in the chain |
@@ -140,7 +138,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
140 | int busy = iop_chan_is_busy(iop_chan); | 138 | int busy = iop_chan_is_busy(iop_chan); |
141 | int seen_current = 0, slot_cnt = 0, slots_per_op = 0; | 139 | int seen_current = 0, slot_cnt = 0, slots_per_op = 0; |
142 | 140 | ||
143 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 141 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
144 | /* free completed slots from the chain starting with | 142 | /* free completed slots from the chain starting with |
145 | * the oldest descriptor | 143 | * the oldest descriptor |
146 | */ | 144 | */ |
@@ -150,7 +148,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
150 | "this_desc: %#x next_desc: %#x ack: %d\n", | 148 | "this_desc: %#x next_desc: %#x ack: %d\n", |
151 | iter->async_tx.cookie, iter->idx, busy, | 149 | iter->async_tx.cookie, iter->idx, busy, |
152 | iter->async_tx.phys, iop_desc_get_next_desc(iter), | 150 | iter->async_tx.phys, iop_desc_get_next_desc(iter), |
153 | iter->async_tx.ack); | 151 | async_tx_test_ack(&iter->async_tx)); |
154 | prefetch(_iter); | 152 | prefetch(_iter); |
155 | prefetch(&_iter->async_tx); | 153 | prefetch(&_iter->async_tx); |
156 | 154 | ||
@@ -257,8 +255,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
257 | 255 | ||
258 | BUG_ON(!seen_current); | 256 | BUG_ON(!seen_current); |
259 | 257 | ||
260 | iop_chan_idle(busy, iop_chan); | ||
261 | |||
262 | if (cookie > 0) { | 258 | if (cookie > 0) { |
263 | iop_chan->completed_cookie = cookie; | 259 | iop_chan->completed_cookie = cookie; |
264 | pr_debug("\tcompleted cookie %d\n", cookie); | 260 | pr_debug("\tcompleted cookie %d\n", cookie); |
@@ -275,8 +271,11 @@ iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
275 | 271 | ||
276 | static void iop_adma_tasklet(unsigned long data) | 272 | static void iop_adma_tasklet(unsigned long data) |
277 | { | 273 | { |
278 | struct iop_adma_chan *chan = (struct iop_adma_chan *) data; | 274 | struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; |
279 | __iop_adma_slot_cleanup(chan); | 275 | |
276 | spin_lock(&iop_chan->lock); | ||
277 | __iop_adma_slot_cleanup(iop_chan); | ||
278 | spin_unlock(&iop_chan->lock); | ||
280 | } | 279 | } |
281 | 280 | ||
282 | static struct iop_adma_desc_slot * | 281 | static struct iop_adma_desc_slot * |
@@ -339,9 +338,7 @@ retry: | |||
339 | 338 | ||
340 | /* pre-ack all but the last descriptor */ | 339 | /* pre-ack all but the last descriptor */ |
341 | if (num_slots != slots_per_op) | 340 | if (num_slots != slots_per_op) |
342 | iter->async_tx.ack = 1; | 341 | async_tx_ack(&iter->async_tx); |
343 | else | ||
344 | iter->async_tx.ack = 0; | ||
345 | 342 | ||
346 | list_add_tail(&iter->chain_node, &chain); | 343 | list_add_tail(&iter->chain_node, &chain); |
347 | alloc_tail = iter; | 344 | alloc_tail = iter; |
@@ -438,7 +435,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
438 | spin_unlock_bh(&iop_chan->lock); | 435 | spin_unlock_bh(&iop_chan->lock); |
439 | 436 | ||
440 | dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", | 437 | dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", |
441 | __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); | 438 | __func__, sw_desc->async_tx.cookie, sw_desc->idx); |
442 | 439 | ||
443 | return cookie; | 440 | return cookie; |
444 | } | 441 | } |
@@ -514,13 +511,13 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) | |||
514 | } | 511 | } |
515 | 512 | ||
516 | static struct dma_async_tx_descriptor * | 513 | static struct dma_async_tx_descriptor * |
517 | iop_adma_prep_dma_interrupt(struct dma_chan *chan) | 514 | iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) |
518 | { | 515 | { |
519 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 516 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
520 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 517 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
521 | int slot_cnt, slots_per_op; | 518 | int slot_cnt, slots_per_op; |
522 | 519 | ||
523 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 520 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
524 | 521 | ||
525 | spin_lock_bh(&iop_chan->lock); | 522 | spin_lock_bh(&iop_chan->lock); |
526 | slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); | 523 | slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); |
@@ -529,6 +526,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) | |||
529 | grp_start = sw_desc->group_head; | 526 | grp_start = sw_desc->group_head; |
530 | iop_desc_init_interrupt(grp_start, iop_chan); | 527 | iop_desc_init_interrupt(grp_start, iop_chan); |
531 | grp_start->unmap_len = 0; | 528 | grp_start->unmap_len = 0; |
529 | sw_desc->async_tx.flags = flags; | ||
532 | } | 530 | } |
533 | spin_unlock_bh(&iop_chan->lock); | 531 | spin_unlock_bh(&iop_chan->lock); |
534 | 532 | ||
@@ -548,7 +546,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
548 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 546 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); |
549 | 547 | ||
550 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 548 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
551 | __FUNCTION__, len); | 549 | __func__, len); |
552 | 550 | ||
553 | spin_lock_bh(&iop_chan->lock); | 551 | spin_lock_bh(&iop_chan->lock); |
554 | slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); | 552 | slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); |
@@ -561,6 +559,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
561 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); | 559 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); |
562 | sw_desc->unmap_src_cnt = 1; | 560 | sw_desc->unmap_src_cnt = 1; |
563 | sw_desc->unmap_len = len; | 561 | sw_desc->unmap_len = len; |
562 | sw_desc->async_tx.flags = flags; | ||
564 | } | 563 | } |
565 | spin_unlock_bh(&iop_chan->lock); | 564 | spin_unlock_bh(&iop_chan->lock); |
566 | 565 | ||
@@ -580,7 +579,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | |||
580 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 579 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); |
581 | 580 | ||
582 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 581 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
583 | __FUNCTION__, len); | 582 | __func__, len); |
584 | 583 | ||
585 | spin_lock_bh(&iop_chan->lock); | 584 | spin_lock_bh(&iop_chan->lock); |
586 | slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); | 585 | slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); |
@@ -593,6 +592,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | |||
593 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 592 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
594 | sw_desc->unmap_src_cnt = 1; | 593 | sw_desc->unmap_src_cnt = 1; |
595 | sw_desc->unmap_len = len; | 594 | sw_desc->unmap_len = len; |
595 | sw_desc->async_tx.flags = flags; | ||
596 | } | 596 | } |
597 | spin_unlock_bh(&iop_chan->lock); | 597 | spin_unlock_bh(&iop_chan->lock); |
598 | 598 | ||
@@ -614,7 +614,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
614 | 614 | ||
615 | dev_dbg(iop_chan->device->common.dev, | 615 | dev_dbg(iop_chan->device->common.dev, |
616 | "%s src_cnt: %d len: %u flags: %lx\n", | 616 | "%s src_cnt: %d len: %u flags: %lx\n", |
617 | __FUNCTION__, src_cnt, len, flags); | 617 | __func__, src_cnt, len, flags); |
618 | 618 | ||
619 | spin_lock_bh(&iop_chan->lock); | 619 | spin_lock_bh(&iop_chan->lock); |
620 | slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); | 620 | slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); |
@@ -626,6 +626,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
626 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 626 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
627 | sw_desc->unmap_src_cnt = src_cnt; | 627 | sw_desc->unmap_src_cnt = src_cnt; |
628 | sw_desc->unmap_len = len; | 628 | sw_desc->unmap_len = len; |
629 | sw_desc->async_tx.flags = flags; | ||
629 | while (src_cnt--) | 630 | while (src_cnt--) |
630 | iop_desc_set_xor_src_addr(grp_start, src_cnt, | 631 | iop_desc_set_xor_src_addr(grp_start, src_cnt, |
631 | dma_src[src_cnt]); | 632 | dma_src[src_cnt]); |
@@ -648,7 +649,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
648 | return NULL; | 649 | return NULL; |
649 | 650 | ||
650 | dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", | 651 | dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", |
651 | __FUNCTION__, src_cnt, len); | 652 | __func__, src_cnt, len); |
652 | 653 | ||
653 | spin_lock_bh(&iop_chan->lock); | 654 | spin_lock_bh(&iop_chan->lock); |
654 | slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); | 655 | slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); |
@@ -659,9 +660,10 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
659 | iop_desc_set_zero_sum_byte_count(grp_start, len); | 660 | iop_desc_set_zero_sum_byte_count(grp_start, len); |
660 | grp_start->xor_check_result = result; | 661 | grp_start->xor_check_result = result; |
661 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", | 662 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", |
662 | __FUNCTION__, grp_start->xor_check_result); | 663 | __func__, grp_start->xor_check_result); |
663 | sw_desc->unmap_src_cnt = src_cnt; | 664 | sw_desc->unmap_src_cnt = src_cnt; |
664 | sw_desc->unmap_len = len; | 665 | sw_desc->unmap_len = len; |
666 | sw_desc->async_tx.flags = flags; | ||
665 | while (src_cnt--) | 667 | while (src_cnt--) |
666 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, | 668 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, |
667 | dma_src[src_cnt]); | 669 | dma_src[src_cnt]); |
@@ -671,12 +673,6 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
671 | return sw_desc ? &sw_desc->async_tx : NULL; | 673 | return sw_desc ? &sw_desc->async_tx : NULL; |
672 | } | 674 | } |
673 | 675 | ||
674 | static void iop_adma_dependency_added(struct dma_chan *chan) | ||
675 | { | ||
676 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | ||
677 | tasklet_schedule(&iop_chan->irq_tasklet); | ||
678 | } | ||
679 | |||
680 | static void iop_adma_free_chan_resources(struct dma_chan *chan) | 676 | static void iop_adma_free_chan_resources(struct dma_chan *chan) |
681 | { | 677 | { |
682 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); | 678 | struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); |
@@ -700,7 +696,7 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan) | |||
700 | iop_chan->last_used = NULL; | 696 | iop_chan->last_used = NULL; |
701 | 697 | ||
702 | dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", | 698 | dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", |
703 | __FUNCTION__, iop_chan->slots_allocated); | 699 | __func__, iop_chan->slots_allocated); |
704 | spin_unlock_bh(&iop_chan->lock); | 700 | spin_unlock_bh(&iop_chan->lock); |
705 | 701 | ||
706 | /* one is ok since we left it on there on purpose */ | 702 | /* one is ok since we left it on there on purpose */ |
@@ -753,7 +749,7 @@ static irqreturn_t iop_adma_eot_handler(int irq, void *data) | |||
753 | { | 749 | { |
754 | struct iop_adma_chan *chan = data; | 750 | struct iop_adma_chan *chan = data; |
755 | 751 | ||
756 | dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); | 752 | dev_dbg(chan->device->common.dev, "%s\n", __func__); |
757 | 753 | ||
758 | tasklet_schedule(&chan->irq_tasklet); | 754 | tasklet_schedule(&chan->irq_tasklet); |
759 | 755 | ||
@@ -766,7 +762,7 @@ static irqreturn_t iop_adma_eoc_handler(int irq, void *data) | |||
766 | { | 762 | { |
767 | struct iop_adma_chan *chan = data; | 763 | struct iop_adma_chan *chan = data; |
768 | 764 | ||
769 | dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); | 765 | dev_dbg(chan->device->common.dev, "%s\n", __func__); |
770 | 766 | ||
771 | tasklet_schedule(&chan->irq_tasklet); | 767 | tasklet_schedule(&chan->irq_tasklet); |
772 | 768 | ||
@@ -823,7 +819,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
823 | int err = 0; | 819 | int err = 0; |
824 | struct iop_adma_chan *iop_chan; | 820 | struct iop_adma_chan *iop_chan; |
825 | 821 | ||
826 | dev_dbg(device->common.dev, "%s\n", __FUNCTION__); | 822 | dev_dbg(device->common.dev, "%s\n", __func__); |
827 | 823 | ||
828 | src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); | 824 | src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); |
829 | if (!src) | 825 | if (!src) |
@@ -854,11 +850,11 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
854 | src_dma = dma_map_single(dma_chan->device->dev, src, | 850 | src_dma = dma_map_single(dma_chan->device->dev, src, |
855 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); | 851 | IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); |
856 | tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | 852 | tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, |
857 | IOP_ADMA_TEST_SIZE, 1); | 853 | IOP_ADMA_TEST_SIZE, |
854 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
858 | 855 | ||
859 | cookie = iop_adma_tx_submit(tx); | 856 | cookie = iop_adma_tx_submit(tx); |
860 | iop_adma_issue_pending(dma_chan); | 857 | iop_adma_issue_pending(dma_chan); |
861 | async_tx_ack(tx); | ||
862 | msleep(1); | 858 | msleep(1); |
863 | 859 | ||
864 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | 860 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != |
@@ -906,7 +902,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
906 | int err = 0; | 902 | int err = 0; |
907 | struct iop_adma_chan *iop_chan; | 903 | struct iop_adma_chan *iop_chan; |
908 | 904 | ||
909 | dev_dbg(device->common.dev, "%s\n", __FUNCTION__); | 905 | dev_dbg(device->common.dev, "%s\n", __func__); |
910 | 906 | ||
911 | for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { | 907 | for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { |
912 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | 908 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
@@ -954,11 +950,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
954 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | 950 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], |
955 | 0, PAGE_SIZE, DMA_TO_DEVICE); | 951 | 0, PAGE_SIZE, DMA_TO_DEVICE); |
956 | tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 952 | tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
957 | IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1); | 953 | IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, |
954 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
958 | 955 | ||
959 | cookie = iop_adma_tx_submit(tx); | 956 | cookie = iop_adma_tx_submit(tx); |
960 | iop_adma_issue_pending(dma_chan); | 957 | iop_adma_issue_pending(dma_chan); |
961 | async_tx_ack(tx); | ||
962 | msleep(8); | 958 | msleep(8); |
963 | 959 | ||
964 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != | 960 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != |
@@ -1001,11 +997,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1001 | DMA_TO_DEVICE); | 997 | DMA_TO_DEVICE); |
1002 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, | 998 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
1003 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, | 999 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
1004 | &zero_sum_result, 1); | 1000 | &zero_sum_result, |
1001 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1005 | 1002 | ||
1006 | cookie = iop_adma_tx_submit(tx); | 1003 | cookie = iop_adma_tx_submit(tx); |
1007 | iop_adma_issue_pending(dma_chan); | 1004 | iop_adma_issue_pending(dma_chan); |
1008 | async_tx_ack(tx); | ||
1009 | msleep(8); | 1005 | msleep(8); |
1010 | 1006 | ||
1011 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1007 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
@@ -1025,11 +1021,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1025 | /* test memset */ | 1021 | /* test memset */ |
1026 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, | 1022 | dma_addr = dma_map_page(dma_chan->device->dev, dest, 0, |
1027 | PAGE_SIZE, DMA_FROM_DEVICE); | 1023 | PAGE_SIZE, DMA_FROM_DEVICE); |
1028 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1); | 1024 | tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, |
1025 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1029 | 1026 | ||
1030 | cookie = iop_adma_tx_submit(tx); | 1027 | cookie = iop_adma_tx_submit(tx); |
1031 | iop_adma_issue_pending(dma_chan); | 1028 | iop_adma_issue_pending(dma_chan); |
1032 | async_tx_ack(tx); | ||
1033 | msleep(8); | 1029 | msleep(8); |
1034 | 1030 | ||
1035 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1031 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
@@ -1057,11 +1053,11 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
1057 | DMA_TO_DEVICE); | 1053 | DMA_TO_DEVICE); |
1058 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, | 1054 | tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, |
1059 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, | 1055 | IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, |
1060 | &zero_sum_result, 1); | 1056 | &zero_sum_result, |
1057 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1061 | 1058 | ||
1062 | cookie = iop_adma_tx_submit(tx); | 1059 | cookie = iop_adma_tx_submit(tx); |
1063 | iop_adma_issue_pending(dma_chan); | 1060 | iop_adma_issue_pending(dma_chan); |
1064 | async_tx_ack(tx); | ||
1065 | msleep(8); | 1061 | msleep(8); |
1066 | 1062 | ||
1067 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | 1063 | if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
@@ -1159,7 +1155,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1159 | } | 1155 | } |
1160 | 1156 | ||
1161 | dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", | 1157 | dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", |
1162 | __FUNCTION__, adev->dma_desc_pool_virt, | 1158 | __func__, adev->dma_desc_pool_virt, |
1163 | (void *) adev->dma_desc_pool); | 1159 | (void *) adev->dma_desc_pool); |
1164 | 1160 | ||
1165 | adev->id = plat_data->hw_id; | 1161 | adev->id = plat_data->hw_id; |
@@ -1177,7 +1173,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1177 | dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; | 1173 | dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; |
1178 | dma_dev->device_is_tx_complete = iop_adma_is_complete; | 1174 | dma_dev->device_is_tx_complete = iop_adma_is_complete; |
1179 | dma_dev->device_issue_pending = iop_adma_issue_pending; | 1175 | dma_dev->device_issue_pending = iop_adma_issue_pending; |
1180 | dma_dev->device_dependency_added = iop_adma_dependency_added; | ||
1181 | dma_dev->dev = &pdev->dev; | 1176 | dma_dev->dev = &pdev->dev; |
1182 | 1177 | ||
1183 | /* set prep routines based on capability */ | 1178 | /* set prep routines based on capability */ |
@@ -1232,9 +1227,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1232 | } | 1227 | } |
1233 | 1228 | ||
1234 | spin_lock_init(&iop_chan->lock); | 1229 | spin_lock_init(&iop_chan->lock); |
1235 | init_timer(&iop_chan->cleanup_watchdog); | ||
1236 | iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan; | ||
1237 | iop_chan->cleanup_watchdog.function = iop_adma_tasklet; | ||
1238 | INIT_LIST_HEAD(&iop_chan->chain); | 1230 | INIT_LIST_HEAD(&iop_chan->chain); |
1239 | INIT_LIST_HEAD(&iop_chan->all_slots); | 1231 | INIT_LIST_HEAD(&iop_chan->all_slots); |
1240 | INIT_RCU_HEAD(&iop_chan->common.rcu); | 1232 | INIT_RCU_HEAD(&iop_chan->common.rcu); |
@@ -1289,7 +1281,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1289 | dma_cookie_t cookie; | 1281 | dma_cookie_t cookie; |
1290 | int slot_cnt, slots_per_op; | 1282 | int slot_cnt, slots_per_op; |
1291 | 1283 | ||
1292 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 1284 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
1293 | 1285 | ||
1294 | spin_lock_bh(&iop_chan->lock); | 1286 | spin_lock_bh(&iop_chan->lock); |
1295 | slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); | 1287 | slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); |
@@ -1298,7 +1290,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1298 | grp_start = sw_desc->group_head; | 1290 | grp_start = sw_desc->group_head; |
1299 | 1291 | ||
1300 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); | 1292 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); |
1301 | sw_desc->async_tx.ack = 1; | 1293 | async_tx_ack(&sw_desc->async_tx); |
1302 | iop_desc_init_memcpy(grp_start, 0); | 1294 | iop_desc_init_memcpy(grp_start, 0); |
1303 | iop_desc_set_byte_count(grp_start, iop_chan, 0); | 1295 | iop_desc_set_byte_count(grp_start, iop_chan, 0); |
1304 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); | 1296 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); |
@@ -1346,7 +1338,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1346 | dma_cookie_t cookie; | 1338 | dma_cookie_t cookie; |
1347 | int slot_cnt, slots_per_op; | 1339 | int slot_cnt, slots_per_op; |
1348 | 1340 | ||
1349 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 1341 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
1350 | 1342 | ||
1351 | spin_lock_bh(&iop_chan->lock); | 1343 | spin_lock_bh(&iop_chan->lock); |
1352 | slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); | 1344 | slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); |
@@ -1354,7 +1346,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1354 | if (sw_desc) { | 1346 | if (sw_desc) { |
1355 | grp_start = sw_desc->group_head; | 1347 | grp_start = sw_desc->group_head; |
1356 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); | 1348 | list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); |
1357 | sw_desc->async_tx.ack = 1; | 1349 | async_tx_ack(&sw_desc->async_tx); |
1358 | iop_desc_init_null_xor(grp_start, 2, 0); | 1350 | iop_desc_init_null_xor(grp_start, 2, 0); |
1359 | iop_desc_set_byte_count(grp_start, iop_chan, 0); | 1351 | iop_desc_set_byte_count(grp_start, iop_chan, 0); |
1360 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); | 1352 | iop_desc_set_dest_addr(grp_start, iop_chan, 0); |