diff options
71 files changed, 1979 insertions, 2272 deletions
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt index e1f343c7a34b..f69bcf5a6343 100644 --- a/Documentation/devicetree/bindings/dma/atmel-dma.txt +++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt | |||
@@ -28,7 +28,7 @@ The three cells in order are: | |||
28 | dependent: | 28 | dependent: |
29 | - bit 7-0: peripheral identifier for the hardware handshaking interface. The | 29 | - bit 7-0: peripheral identifier for the hardware handshaking interface. The |
30 | identifier can be different for tx and rx. | 30 | identifier can be different for tx and rx. |
31 | - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP. | 31 | - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP. |
32 | 32 | ||
33 | Example: | 33 | Example: |
34 | 34 | ||
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt index 2a4b4bce6110..7fc1b010fa75 100644 --- a/Documentation/devicetree/bindings/powerpc/fsl/dma.txt +++ b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt | |||
@@ -1,33 +1,30 @@ | |||
1 | * Freescale 83xx DMA Controller | 1 | * Freescale DMA Controllers |
2 | 2 | ||
3 | Freescale PowerPC 83xx have on chip general purpose DMA controllers. | 3 | ** Freescale Elo DMA Controller |
4 | This is a little-endian 4-channel DMA controller, used in Freescale mpc83xx | ||
5 | series chips such as mpc8315, mpc8349, mpc8379 etc. | ||
4 | 6 | ||
5 | Required properties: | 7 | Required properties: |
6 | 8 | ||
7 | - compatible : compatible list, contains 2 entries, first is | 9 | - compatible : must include "fsl,elo-dma" |
8 | "fsl,CHIP-dma", where CHIP is the processor | 10 | - reg : DMA General Status Register, i.e. DGSR which contains |
9 | (mpc8349, mpc8360, etc.) and the second is | 11 | status for all the 4 DMA channels |
10 | "fsl,elo-dma" | 12 | - ranges : describes the mapping between the address space of the |
11 | - reg : <registers mapping for DMA general status reg> | 13 | DMA channels and the address space of the DMA controller |
12 | - ranges : Should be defined as specified in 1) to describe the | ||
13 | DMA controller channels. | ||
14 | - cell-index : controller index. 0 for controller @ 0x8100 | 14 | - cell-index : controller index. 0 for controller @ 0x8100 |
15 | - interrupts : <interrupt mapping for DMA IRQ> | 15 | - interrupts : interrupt specifier for DMA IRQ |
16 | - interrupt-parent : optional, if needed for interrupt mapping | 16 | - interrupt-parent : optional, if needed for interrupt mapping |
17 | 17 | ||
18 | |||
19 | - DMA channel nodes: | 18 | - DMA channel nodes: |
20 | - compatible : compatible list, contains 2 entries, first is | 19 | - compatible : must include "fsl,elo-dma-channel" |
21 | "fsl,CHIP-dma-channel", where CHIP is the processor | 20 | However, see note below. |
22 | (mpc8349, mpc8350, etc.) and the second is | 21 | - reg : DMA channel specific registers |
23 | "fsl,elo-dma-channel". However, see note below. | 22 | - cell-index : DMA channel index starts at 0. |
24 | - reg : <registers mapping for channel> | ||
25 | - cell-index : dma channel index starts at 0. | ||
26 | 23 | ||
27 | Optional properties: | 24 | Optional properties: |
28 | - interrupts : <interrupt mapping for DMA channel IRQ> | 25 | - interrupts : interrupt specifier for DMA channel IRQ |
29 | (on 83xx this is expected to be identical to | 26 | (on 83xx this is expected to be identical to |
30 | the interrupts property of the parent node) | 27 | the interrupts property of the parent node) |
31 | - interrupt-parent : optional, if needed for interrupt mapping | 28 | - interrupt-parent : optional, if needed for interrupt mapping |
32 | 29 | ||
33 | Example: | 30 | Example: |
@@ -70,30 +67,27 @@ Example: | |||
70 | }; | 67 | }; |
71 | }; | 68 | }; |
72 | 69 | ||
73 | * Freescale 85xx/86xx DMA Controller | 70 | ** Freescale EloPlus DMA Controller |
74 | 71 | This is a 4-channel DMA controller with extended addresses and chaining, | |
75 | Freescale PowerPC 85xx/86xx have on chip general purpose DMA controllers. | 72 | mainly used in Freescale mpc85xx/86xx, Pxxx and BSC series chips, such as |
73 | mpc8540, mpc8641 p4080, bsc9131 etc. | ||
76 | 74 | ||
77 | Required properties: | 75 | Required properties: |
78 | 76 | ||
79 | - compatible : compatible list, contains 2 entries, first is | 77 | - compatible : must include "fsl,eloplus-dma" |
80 | "fsl,CHIP-dma", where CHIP is the processor | 78 | - reg : DMA General Status Register, i.e. DGSR which contains |
81 | (mpc8540, mpc8540, etc.) and the second is | 79 | status for all the 4 DMA channels |
82 | "fsl,eloplus-dma" | ||
83 | - reg : <registers mapping for DMA general status reg> | ||
84 | - cell-index : controller index. 0 for controller @ 0x21000, | 80 | - cell-index : controller index. 0 for controller @ 0x21000, |
85 | 1 for controller @ 0xc000 | 81 | 1 for controller @ 0xc000 |
86 | - ranges : Should be defined as specified in 1) to describe the | 82 | - ranges : describes the mapping between the address space of the |
87 | DMA controller channels. | 83 | DMA channels and the address space of the DMA controller |
88 | 84 | ||
89 | - DMA channel nodes: | 85 | - DMA channel nodes: |
90 | - compatible : compatible list, contains 2 entries, first is | 86 | - compatible : must include "fsl,eloplus-dma-channel" |
91 | "fsl,CHIP-dma-channel", where CHIP is the processor | 87 | However, see note below. |
92 | (mpc8540, mpc8560, etc.) and the second is | 88 | - cell-index : DMA channel index starts at 0. |
93 | "fsl,eloplus-dma-channel". However, see note below. | 89 | - reg : DMA channel specific registers |
94 | - cell-index : dma channel index starts at 0. | 90 | - interrupts : interrupt specifier for DMA channel IRQ |
95 | - reg : <registers mapping for channel> | ||
96 | - interrupts : <interrupt mapping for DMA channel IRQ> | ||
97 | - interrupt-parent : optional, if needed for interrupt mapping | 91 | - interrupt-parent : optional, if needed for interrupt mapping |
98 | 92 | ||
99 | Example: | 93 | Example: |
@@ -134,6 +128,76 @@ Example: | |||
134 | }; | 128 | }; |
135 | }; | 129 | }; |
136 | 130 | ||
131 | ** Freescale Elo3 DMA Controller | ||
132 | DMA controller which has same function as EloPlus except that Elo3 has 8 | ||
133 | channels while EloPlus has only 4, it is used in Freescale Txxx and Bxxx | ||
134 | series chips, such as t1040, t4240, b4860. | ||
135 | |||
136 | Required properties: | ||
137 | |||
138 | - compatible : must include "fsl,elo3-dma" | ||
139 | - reg : contains two entries for DMA General Status Registers, | ||
140 | i.e. DGSR0 which includes status for channel 1~4, and | ||
141 | DGSR1 for channel 5~8 | ||
142 | - ranges : describes the mapping between the address space of the | ||
143 | DMA channels and the address space of the DMA controller | ||
144 | |||
145 | - DMA channel nodes: | ||
146 | - compatible : must include "fsl,eloplus-dma-channel" | ||
147 | - reg : DMA channel specific registers | ||
148 | - interrupts : interrupt specifier for DMA channel IRQ | ||
149 | - interrupt-parent : optional, if needed for interrupt mapping | ||
150 | |||
151 | Example: | ||
152 | dma@100300 { | ||
153 | #address-cells = <1>; | ||
154 | #size-cells = <1>; | ||
155 | compatible = "fsl,elo3-dma"; | ||
156 | reg = <0x100300 0x4>, | ||
157 | <0x100600 0x4>; | ||
158 | ranges = <0x0 0x100100 0x500>; | ||
159 | dma-channel@0 { | ||
160 | compatible = "fsl,eloplus-dma-channel"; | ||
161 | reg = <0x0 0x80>; | ||
162 | interrupts = <28 2 0 0>; | ||
163 | }; | ||
164 | dma-channel@80 { | ||
165 | compatible = "fsl,eloplus-dma-channel"; | ||
166 | reg = <0x80 0x80>; | ||
167 | interrupts = <29 2 0 0>; | ||
168 | }; | ||
169 | dma-channel@100 { | ||
170 | compatible = "fsl,eloplus-dma-channel"; | ||
171 | reg = <0x100 0x80>; | ||
172 | interrupts = <30 2 0 0>; | ||
173 | }; | ||
174 | dma-channel@180 { | ||
175 | compatible = "fsl,eloplus-dma-channel"; | ||
176 | reg = <0x180 0x80>; | ||
177 | interrupts = <31 2 0 0>; | ||
178 | }; | ||
179 | dma-channel@300 { | ||
180 | compatible = "fsl,eloplus-dma-channel"; | ||
181 | reg = <0x300 0x80>; | ||
182 | interrupts = <76 2 0 0>; | ||
183 | }; | ||
184 | dma-channel@380 { | ||
185 | compatible = "fsl,eloplus-dma-channel"; | ||
186 | reg = <0x380 0x80>; | ||
187 | interrupts = <77 2 0 0>; | ||
188 | }; | ||
189 | dma-channel@400 { | ||
190 | compatible = "fsl,eloplus-dma-channel"; | ||
191 | reg = <0x400 0x80>; | ||
192 | interrupts = <78 2 0 0>; | ||
193 | }; | ||
194 | dma-channel@480 { | ||
195 | compatible = "fsl,eloplus-dma-channel"; | ||
196 | reg = <0x480 0x80>; | ||
197 | interrupts = <79 2 0 0>; | ||
198 | }; | ||
199 | }; | ||
200 | |||
137 | Note on DMA channel compatible properties: The compatible property must say | 201 | Note on DMA channel compatible properties: The compatible property must say |
138 | "fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA | 202 | "fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA |
139 | driver (fsldma). Any DMA channel used by fsldma cannot be used by another | 203 | driver (fsldma). Any DMA channel used by fsldma cannot be used by another |
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt index a2b5663eae26..dd77a81bdb80 100644 --- a/Documentation/dmatest.txt +++ b/Documentation/dmatest.txt | |||
@@ -15,39 +15,48 @@ be built as module or inside kernel. Let's consider those cases. | |||
15 | 15 | ||
16 | Part 2 - When dmatest is built as a module... | 16 | Part 2 - When dmatest is built as a module... |
17 | 17 | ||
18 | After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest | ||
19 | folder with nodes will be created. There are two important files located. First | ||
20 | is the 'run' node that controls run and stop phases of the test, and the second | ||
21 | one, 'results', is used to get the test case results. | ||
22 | |||
23 | Note that in this case test will not run on load automatically. | ||
24 | |||
25 | Example of usage: | 18 | Example of usage: |
19 | % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1 | ||
20 | |||
21 | ...or: | ||
22 | % modprobe dmatest | ||
26 | % echo dma0chan0 > /sys/module/dmatest/parameters/channel | 23 | % echo dma0chan0 > /sys/module/dmatest/parameters/channel |
27 | % echo 2000 > /sys/module/dmatest/parameters/timeout | 24 | % echo 2000 > /sys/module/dmatest/parameters/timeout |
28 | % echo 1 > /sys/module/dmatest/parameters/iterations | 25 | % echo 1 > /sys/module/dmatest/parameters/iterations |
29 | % echo 1 > /sys/kernel/debug/dmatest/run | 26 | % echo 1 > /sys/module/dmatest/parameters/run |
27 | |||
28 | ...or on the kernel command line: | ||
29 | |||
30 | dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1 | ||
30 | 31 | ||
31 | Hint: available channel list could be extracted by running the following | 32 | Hint: available channel list could be extracted by running the following |
32 | command: | 33 | command: |
33 | % ls -1 /sys/class/dma/ | 34 | % ls -1 /sys/class/dma/ |
34 | 35 | ||
35 | After a while you will start to get messages about current status or error like | 36 | Once started a message like "dmatest: Started 1 threads using dma0chan0" is |
36 | in the original code. | 37 | emitted. After that only test failure messages are reported until the test |
38 | stops. | ||
37 | 39 | ||
38 | Note that running a new test will not stop any in progress test. | 40 | Note that running a new test will not stop any in progress test. |
39 | 41 | ||
40 | The following command should return actual state of the test. | 42 | The following command returns the state of the test. |
41 | % cat /sys/kernel/debug/dmatest/run | 43 | % cat /sys/module/dmatest/parameters/run |
42 | 44 | ||
43 | To wait for test done the user may perform a busy loop that checks the state. | 45 | To wait for test completion userpace can poll 'run' until it is false, or use |
44 | 46 | the wait parameter. Specifying 'wait=1' when loading the module causes module | |
45 | % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ] | 47 | initialization to pause until a test run has completed, while reading |
46 | > do | 48 | /sys/module/dmatest/parameters/wait waits for any running test to complete |
47 | > echo -n "." | 49 | before returning. For example, the following scripts wait for 42 tests |
48 | > sleep 1 | 50 | to complete before exiting. Note that if 'iterations' is set to 'infinite' then |
49 | > done | 51 | waiting is disabled. |
50 | > echo | 52 | |
53 | Example: | ||
54 | % modprobe dmatest run=1 iterations=42 wait=1 | ||
55 | % modprobe -r dmatest | ||
56 | ...or: | ||
57 | % modprobe dmatest run=1 iterations=42 | ||
58 | % cat /sys/module/dmatest/parameters/wait | ||
59 | % modprobe -r dmatest | ||
51 | 60 | ||
52 | Part 3 - When built-in in the kernel... | 61 | Part 3 - When built-in in the kernel... |
53 | 62 | ||
@@ -62,21 +71,22 @@ case. You always could check them at run-time by running | |||
62 | 71 | ||
63 | Part 4 - Gathering the test results | 72 | Part 4 - Gathering the test results |
64 | 73 | ||
65 | The module provides a storage for the test results in the memory. The gathered | 74 | Test results are printed to the kernel log buffer with the format: |
66 | data could be used after test is done. | ||
67 | 75 | ||
68 | The special file 'results' in the debugfs represents gathered data of the in | 76 | "dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)" |
69 | progress test. The messages collected are printed to the kernel log as well. | ||
70 | 77 | ||
71 | Example of output: | 78 | Example of output: |
72 | % cat /sys/kernel/debug/dmatest/results | 79 | % dmesg | tail -n 1 |
73 | dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) | 80 | dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) |
74 | 81 | ||
75 | The message format is unified across the different types of errors. A number in | 82 | The message format is unified across the different types of errors. A number in |
76 | the parens represents additional information, e.g. error code, error counter, | 83 | the parens represents additional information, e.g. error code, error counter, |
77 | or status. | 84 | or status. A test thread also emits a summary line at completion listing the |
85 | number of tests executed, number that failed, and a result code. | ||
78 | 86 | ||
79 | Comparison between buffers is stored to the dedicated structure. | 87 | Example: |
88 | % dmesg | tail -n 1 | ||
89 | dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0) | ||
80 | 90 | ||
81 | Note that the verify result is now accessible only via file 'results' in the | 91 | The details of a data miscompare error are also emitted, but do not follow the |
82 | debugfs. | 92 | above format. |
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 8e1a0245907f..41bca32409fc 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c | |||
@@ -404,7 +404,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data) | |||
404 | BIT(slot)); | 404 | BIT(slot)); |
405 | if (edma_cc[ctlr]->intr_data[channel].callback) | 405 | if (edma_cc[ctlr]->intr_data[channel].callback) |
406 | edma_cc[ctlr]->intr_data[channel].callback( | 406 | edma_cc[ctlr]->intr_data[channel].callback( |
407 | channel, DMA_COMPLETE, | 407 | channel, EDMA_DMA_COMPLETE, |
408 | edma_cc[ctlr]->intr_data[channel].data); | 408 | edma_cc[ctlr]->intr_data[channel].data); |
409 | } | 409 | } |
410 | } while (sh_ipr); | 410 | } while (sh_ipr); |
@@ -459,7 +459,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data) | |||
459 | callback) { | 459 | callback) { |
460 | edma_cc[ctlr]->intr_data[k]. | 460 | edma_cc[ctlr]->intr_data[k]. |
461 | callback(k, | 461 | callback(k, |
462 | DMA_CC_ERROR, | 462 | EDMA_DMA_CC_ERROR, |
463 | edma_cc[ctlr]->intr_data | 463 | edma_cc[ctlr]->intr_data |
464 | [k].data); | 464 | [k].data); |
465 | } | 465 | } |
diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 9b28f1243bdc..240b29ef17db 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h | |||
@@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, | |||
393 | return slot_cnt; | 393 | return slot_cnt; |
394 | } | 394 | } |
395 | 395 | ||
396 | static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) | ||
397 | { | ||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, | ||
402 | struct iop_adma_chan *chan) | ||
403 | { | ||
404 | union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; | ||
405 | |||
406 | switch (chan->device->id) { | ||
407 | case DMA0_ID: | ||
408 | case DMA1_ID: | ||
409 | return hw_desc.dma->dest_addr; | ||
410 | case AAU_ID: | ||
411 | return hw_desc.aau->dest_addr; | ||
412 | default: | ||
413 | BUG(); | ||
414 | } | ||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | |||
419 | static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, | ||
420 | struct iop_adma_chan *chan) | ||
421 | { | ||
422 | BUG(); | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, | 396 | static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, |
427 | struct iop_adma_chan *chan) | 397 | struct iop_adma_chan *chan) |
428 | { | 398 | { |
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 122f86d8c991..250760e08103 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h | |||
@@ -82,8 +82,6 @@ struct iop_adma_chan { | |||
82 | * @slot_cnt: total slots used in an transaction (group of operations) | 82 | * @slot_cnt: total slots used in an transaction (group of operations) |
83 | * @slots_per_op: number of slots per operation | 83 | * @slots_per_op: number of slots per operation |
84 | * @idx: pool index | 84 | * @idx: pool index |
85 | * @unmap_src_cnt: number of xor sources | ||
86 | * @unmap_len: transaction bytecount | ||
87 | * @tx_list: list of descriptors that are associated with one operation | 85 | * @tx_list: list of descriptors that are associated with one operation |
88 | * @async_tx: support for the async_tx api | 86 | * @async_tx: support for the async_tx api |
89 | * @group_list: list of slots that make up a multi-descriptor transaction | 87 | * @group_list: list of slots that make up a multi-descriptor transaction |
@@ -99,8 +97,6 @@ struct iop_adma_desc_slot { | |||
99 | u16 slot_cnt; | 97 | u16 slot_cnt; |
100 | u16 slots_per_op; | 98 | u16 slots_per_op; |
101 | u16 idx; | 99 | u16 idx; |
102 | u16 unmap_src_cnt; | ||
103 | size_t unmap_len; | ||
104 | struct list_head tx_list; | 100 | struct list_head tx_list; |
105 | struct dma_async_tx_descriptor async_tx; | 101 | struct dma_async_tx_descriptor async_tx; |
106 | union { | 102 | union { |
diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h index 6d3782d85a9f..a86fd0ed7757 100644 --- a/arch/arm/mach-iop13xx/include/mach/adma.h +++ b/arch/arm/mach-iop13xx/include/mach/adma.h | |||
@@ -218,20 +218,6 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) | |||
218 | #define iop_chan_pq_slot_count iop_chan_xor_slot_count | 218 | #define iop_chan_pq_slot_count iop_chan_xor_slot_count |
219 | #define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count | 219 | #define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count |
220 | 220 | ||
221 | static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, | ||
222 | struct iop_adma_chan *chan) | ||
223 | { | ||
224 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; | ||
225 | return hw_desc->dest_addr; | ||
226 | } | ||
227 | |||
228 | static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, | ||
229 | struct iop_adma_chan *chan) | ||
230 | { | ||
231 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; | ||
232 | return hw_desc->q_dest_addr; | ||
233 | } | ||
234 | |||
235 | static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, | 221 | static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, |
236 | struct iop_adma_chan *chan) | 222 | struct iop_adma_chan *chan) |
237 | { | 223 | { |
@@ -350,18 +336,6 @@ iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, | |||
350 | hw_desc->desc_ctrl = u_desc_ctrl.value; | 336 | hw_desc->desc_ctrl = u_desc_ctrl.value; |
351 | } | 337 | } |
352 | 338 | ||
353 | static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) | ||
354 | { | ||
355 | struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; | ||
356 | union { | ||
357 | u32 value; | ||
358 | struct iop13xx_adma_desc_ctrl field; | ||
359 | } u_desc_ctrl; | ||
360 | |||
361 | u_desc_ctrl.value = hw_desc->desc_ctrl; | ||
362 | return u_desc_ctrl.field.pq_xfer_en; | ||
363 | } | ||
364 | |||
365 | static inline void | 339 | static inline void |
366 | iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, | 340 | iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, |
367 | unsigned long flags) | 341 | unsigned long flags) |
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi index 4c617bf8cdb2..4f6e48277c46 100644 --- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi | |||
@@ -223,13 +223,13 @@ | |||
223 | reg = <0xe2000 0x1000>; | 223 | reg = <0xe2000 0x1000>; |
224 | }; | 224 | }; |
225 | 225 | ||
226 | /include/ "qoriq-dma-0.dtsi" | 226 | /include/ "elo3-dma-0.dtsi" |
227 | dma@100300 { | 227 | dma@100300 { |
228 | fsl,iommu-parent = <&pamu0>; | 228 | fsl,iommu-parent = <&pamu0>; |
229 | fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ | 229 | fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ |
230 | }; | 230 | }; |
231 | 231 | ||
232 | /include/ "qoriq-dma-1.dtsi" | 232 | /include/ "elo3-dma-1.dtsi" |
233 | dma@101300 { | 233 | dma@101300 { |
234 | fsl,iommu-parent = <&pamu0>; | 234 | fsl,iommu-parent = <&pamu0>; |
235 | fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ | 235 | fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ |
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi new file mode 100644 index 000000000000..3c210e0d5201 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/elo3-dma-0.dtsi | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x100000 ] | ||
3 | * | ||
4 | * Copyright 2013 Freescale Semiconductor Inc. | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are met: | ||
8 | * * Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * * Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * * Neither the name of Freescale Semiconductor nor the | ||
14 | * names of its contributors may be used to endorse or promote products | ||
15 | * derived from this software without specific prior written permission. | ||
16 | * | ||
17 | * | ||
18 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") as published by the Free Software | ||
20 | * Foundation, either version 2 of that License or (at your option) any | ||
21 | * later version. | ||
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | dma0: dma@100300 { | ||
36 | #address-cells = <1>; | ||
37 | #size-cells = <1>; | ||
38 | compatible = "fsl,elo3-dma"; | ||
39 | reg = <0x100300 0x4>, | ||
40 | <0x100600 0x4>; | ||
41 | ranges = <0x0 0x100100 0x500>; | ||
42 | dma-channel@0 { | ||
43 | compatible = "fsl,eloplus-dma-channel"; | ||
44 | reg = <0x0 0x80>; | ||
45 | interrupts = <28 2 0 0>; | ||
46 | }; | ||
47 | dma-channel@80 { | ||
48 | compatible = "fsl,eloplus-dma-channel"; | ||
49 | reg = <0x80 0x80>; | ||
50 | interrupts = <29 2 0 0>; | ||
51 | }; | ||
52 | dma-channel@100 { | ||
53 | compatible = "fsl,eloplus-dma-channel"; | ||
54 | reg = <0x100 0x80>; | ||
55 | interrupts = <30 2 0 0>; | ||
56 | }; | ||
57 | dma-channel@180 { | ||
58 | compatible = "fsl,eloplus-dma-channel"; | ||
59 | reg = <0x180 0x80>; | ||
60 | interrupts = <31 2 0 0>; | ||
61 | }; | ||
62 | dma-channel@300 { | ||
63 | compatible = "fsl,eloplus-dma-channel"; | ||
64 | reg = <0x300 0x80>; | ||
65 | interrupts = <76 2 0 0>; | ||
66 | }; | ||
67 | dma-channel@380 { | ||
68 | compatible = "fsl,eloplus-dma-channel"; | ||
69 | reg = <0x380 0x80>; | ||
70 | interrupts = <77 2 0 0>; | ||
71 | }; | ||
72 | dma-channel@400 { | ||
73 | compatible = "fsl,eloplus-dma-channel"; | ||
74 | reg = <0x400 0x80>; | ||
75 | interrupts = <78 2 0 0>; | ||
76 | }; | ||
77 | dma-channel@480 { | ||
78 | compatible = "fsl,eloplus-dma-channel"; | ||
79 | reg = <0x480 0x80>; | ||
80 | interrupts = <79 2 0 0>; | ||
81 | }; | ||
82 | }; | ||
diff --git a/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi new file mode 100644 index 000000000000..cccf3bb38224 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/elo3-dma-1.dtsi | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x101000 ] | ||
3 | * | ||
4 | * Copyright 2013 Freescale Semiconductor Inc. | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are met: | ||
8 | * * Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * * Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * * Neither the name of Freescale Semiconductor nor the | ||
14 | * names of its contributors may be used to endorse or promote products | ||
15 | * derived from this software without specific prior written permission. | ||
16 | * | ||
17 | * | ||
18 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") as published by the Free Software | ||
20 | * Foundation, either version 2 of that License or (at your option) any | ||
21 | * later version. | ||
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | dma1: dma@101300 { | ||
36 | #address-cells = <1>; | ||
37 | #size-cells = <1>; | ||
38 | compatible = "fsl,elo3-dma"; | ||
39 | reg = <0x101300 0x4>, | ||
40 | <0x101600 0x4>; | ||
41 | ranges = <0x0 0x101100 0x500>; | ||
42 | dma-channel@0 { | ||
43 | compatible = "fsl,eloplus-dma-channel"; | ||
44 | reg = <0x0 0x80>; | ||
45 | interrupts = <32 2 0 0>; | ||
46 | }; | ||
47 | dma-channel@80 { | ||
48 | compatible = "fsl,eloplus-dma-channel"; | ||
49 | reg = <0x80 0x80>; | ||
50 | interrupts = <33 2 0 0>; | ||
51 | }; | ||
52 | dma-channel@100 { | ||
53 | compatible = "fsl,eloplus-dma-channel"; | ||
54 | reg = <0x100 0x80>; | ||
55 | interrupts = <34 2 0 0>; | ||
56 | }; | ||
57 | dma-channel@180 { | ||
58 | compatible = "fsl,eloplus-dma-channel"; | ||
59 | reg = <0x180 0x80>; | ||
60 | interrupts = <35 2 0 0>; | ||
61 | }; | ||
62 | dma-channel@300 { | ||
63 | compatible = "fsl,eloplus-dma-channel"; | ||
64 | reg = <0x300 0x80>; | ||
65 | interrupts = <80 2 0 0>; | ||
66 | }; | ||
67 | dma-channel@380 { | ||
68 | compatible = "fsl,eloplus-dma-channel"; | ||
69 | reg = <0x380 0x80>; | ||
70 | interrupts = <81 2 0 0>; | ||
71 | }; | ||
72 | dma-channel@400 { | ||
73 | compatible = "fsl,eloplus-dma-channel"; | ||
74 | reg = <0x400 0x80>; | ||
75 | interrupts = <82 2 0 0>; | ||
76 | }; | ||
77 | dma-channel@480 { | ||
78 | compatible = "fsl,eloplus-dma-channel"; | ||
79 | reg = <0x480 0x80>; | ||
80 | interrupts = <83 2 0 0>; | ||
81 | }; | ||
82 | }; | ||
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi index 510afa362de1..4143a9733cd0 100644 --- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi | |||
@@ -387,8 +387,8 @@ | |||
387 | reg = <0xea000 0x4000>; | 387 | reg = <0xea000 0x4000>; |
388 | }; | 388 | }; |
389 | 389 | ||
390 | /include/ "qoriq-dma-0.dtsi" | 390 | /include/ "elo3-dma-0.dtsi" |
391 | /include/ "qoriq-dma-1.dtsi" | 391 | /include/ "elo3-dma-1.dtsi" |
392 | 392 | ||
393 | /include/ "qoriq-espi-0.dtsi" | 393 | /include/ "qoriq-espi-0.dtsi" |
394 | spi@110000 { | 394 | spi@110000 { |
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 9e62feffb374..f8c0b8dbeb75 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c | |||
@@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
50 | &dest, 1, &src, 1, len); | 50 | &dest, 1, &src, 1, len); |
51 | struct dma_device *device = chan ? chan->device : NULL; | 51 | struct dma_device *device = chan ? chan->device : NULL; |
52 | struct dma_async_tx_descriptor *tx = NULL; | 52 | struct dma_async_tx_descriptor *tx = NULL; |
53 | struct dmaengine_unmap_data *unmap = NULL; | ||
53 | 54 | ||
54 | if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { | 55 | if (device) |
55 | dma_addr_t dma_dest, dma_src; | 56 | unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); |
57 | |||
58 | if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { | ||
56 | unsigned long dma_prep_flags = 0; | 59 | unsigned long dma_prep_flags = 0; |
57 | 60 | ||
58 | if (submit->cb_fn) | 61 | if (submit->cb_fn) |
59 | dma_prep_flags |= DMA_PREP_INTERRUPT; | 62 | dma_prep_flags |= DMA_PREP_INTERRUPT; |
60 | if (submit->flags & ASYNC_TX_FENCE) | 63 | if (submit->flags & ASYNC_TX_FENCE) |
61 | dma_prep_flags |= DMA_PREP_FENCE; | 64 | dma_prep_flags |= DMA_PREP_FENCE; |
62 | dma_dest = dma_map_page(device->dev, dest, dest_offset, len, | 65 | |
63 | DMA_FROM_DEVICE); | 66 | unmap->to_cnt = 1; |
64 | 67 | unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len, | |
65 | dma_src = dma_map_page(device->dev, src, src_offset, len, | 68 | DMA_TO_DEVICE); |
66 | DMA_TO_DEVICE); | 69 | unmap->from_cnt = 1; |
67 | 70 | unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len, | |
68 | tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src, | 71 | DMA_FROM_DEVICE); |
69 | len, dma_prep_flags); | 72 | unmap->len = len; |
70 | if (!tx) { | 73 | |
71 | dma_unmap_page(device->dev, dma_dest, len, | 74 | tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], |
72 | DMA_FROM_DEVICE); | 75 | unmap->addr[0], len, |
73 | dma_unmap_page(device->dev, dma_src, len, | 76 | dma_prep_flags); |
74 | DMA_TO_DEVICE); | ||
75 | } | ||
76 | } | 77 | } |
77 | 78 | ||
78 | if (tx) { | 79 | if (tx) { |
79 | pr_debug("%s: (async) len: %zu\n", __func__, len); | 80 | pr_debug("%s: (async) len: %zu\n", __func__, len); |
81 | |||
82 | dma_set_unmap(tx, unmap); | ||
80 | async_tx_submit(chan, tx, submit); | 83 | async_tx_submit(chan, tx, submit); |
81 | } else { | 84 | } else { |
82 | void *dest_buf, *src_buf; | 85 | void *dest_buf, *src_buf; |
@@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, | |||
96 | async_tx_sync_epilog(submit); | 99 | async_tx_sync_epilog(submit); |
97 | } | 100 | } |
98 | 101 | ||
102 | dmaengine_unmap_put(unmap); | ||
103 | |||
99 | return tx; | 104 | return tx; |
100 | } | 105 | } |
101 | EXPORT_SYMBOL_GPL(async_memcpy); | 106 | EXPORT_SYMBOL_GPL(async_memcpy); |
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 91d5d385899e..d05327caf69d 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c | |||
@@ -46,49 +46,24 @@ static struct page *pq_scribble_page; | |||
46 | * do_async_gen_syndrome - asynchronously calculate P and/or Q | 46 | * do_async_gen_syndrome - asynchronously calculate P and/or Q |
47 | */ | 47 | */ |
48 | static __async_inline struct dma_async_tx_descriptor * | 48 | static __async_inline struct dma_async_tx_descriptor * |
49 | do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | 49 | do_async_gen_syndrome(struct dma_chan *chan, |
50 | const unsigned char *scfs, unsigned int offset, int disks, | 50 | const unsigned char *scfs, int disks, |
51 | size_t len, dma_addr_t *dma_src, | 51 | struct dmaengine_unmap_data *unmap, |
52 | enum dma_ctrl_flags dma_flags, | ||
52 | struct async_submit_ctl *submit) | 53 | struct async_submit_ctl *submit) |
53 | { | 54 | { |
54 | struct dma_async_tx_descriptor *tx = NULL; | 55 | struct dma_async_tx_descriptor *tx = NULL; |
55 | struct dma_device *dma = chan->device; | 56 | struct dma_device *dma = chan->device; |
56 | enum dma_ctrl_flags dma_flags = 0; | ||
57 | enum async_tx_flags flags_orig = submit->flags; | 57 | enum async_tx_flags flags_orig = submit->flags; |
58 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | 58 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; |
59 | dma_async_tx_callback cb_param_orig = submit->cb_param; | 59 | dma_async_tx_callback cb_param_orig = submit->cb_param; |
60 | int src_cnt = disks - 2; | 60 | int src_cnt = disks - 2; |
61 | unsigned char coefs[src_cnt]; | ||
62 | unsigned short pq_src_cnt; | 61 | unsigned short pq_src_cnt; |
63 | dma_addr_t dma_dest[2]; | 62 | dma_addr_t dma_dest[2]; |
64 | int src_off = 0; | 63 | int src_off = 0; |
65 | int idx; | ||
66 | int i; | ||
67 | 64 | ||
68 | /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ | 65 | if (submit->flags & ASYNC_TX_FENCE) |
69 | if (P(blocks, disks)) | 66 | dma_flags |= DMA_PREP_FENCE; |
70 | dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, | ||
71 | len, DMA_BIDIRECTIONAL); | ||
72 | else | ||
73 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
74 | if (Q(blocks, disks)) | ||
75 | dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, | ||
76 | len, DMA_BIDIRECTIONAL); | ||
77 | else | ||
78 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
79 | |||
80 | /* convert source addresses being careful to collapse 'empty' | ||
81 | * sources and update the coefficients accordingly | ||
82 | */ | ||
83 | for (i = 0, idx = 0; i < src_cnt; i++) { | ||
84 | if (blocks[i] == NULL) | ||
85 | continue; | ||
86 | dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, | ||
87 | DMA_TO_DEVICE); | ||
88 | coefs[idx] = scfs[i]; | ||
89 | idx++; | ||
90 | } | ||
91 | src_cnt = idx; | ||
92 | 67 | ||
93 | while (src_cnt > 0) { | 68 | while (src_cnt > 0) { |
94 | submit->flags = flags_orig; | 69 | submit->flags = flags_orig; |
@@ -100,28 +75,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | |||
100 | if (src_cnt > pq_src_cnt) { | 75 | if (src_cnt > pq_src_cnt) { |
101 | submit->flags &= ~ASYNC_TX_ACK; | 76 | submit->flags &= ~ASYNC_TX_ACK; |
102 | submit->flags |= ASYNC_TX_FENCE; | 77 | submit->flags |= ASYNC_TX_FENCE; |
103 | dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; | ||
104 | submit->cb_fn = NULL; | 78 | submit->cb_fn = NULL; |
105 | submit->cb_param = NULL; | 79 | submit->cb_param = NULL; |
106 | } else { | 80 | } else { |
107 | dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; | ||
108 | submit->cb_fn = cb_fn_orig; | 81 | submit->cb_fn = cb_fn_orig; |
109 | submit->cb_param = cb_param_orig; | 82 | submit->cb_param = cb_param_orig; |
110 | if (cb_fn_orig) | 83 | if (cb_fn_orig) |
111 | dma_flags |= DMA_PREP_INTERRUPT; | 84 | dma_flags |= DMA_PREP_INTERRUPT; |
112 | } | 85 | } |
113 | if (submit->flags & ASYNC_TX_FENCE) | ||
114 | dma_flags |= DMA_PREP_FENCE; | ||
115 | 86 | ||
116 | /* Since we have clobbered the src_list we are committed | 87 | /* Drivers force forward progress in case they can not provide |
117 | * to doing this asynchronously. Drivers force forward | 88 | * a descriptor |
118 | * progress in case they can not provide a descriptor | ||
119 | */ | 89 | */ |
120 | for (;;) { | 90 | for (;;) { |
91 | dma_dest[0] = unmap->addr[disks - 2]; | ||
92 | dma_dest[1] = unmap->addr[disks - 1]; | ||
121 | tx = dma->device_prep_dma_pq(chan, dma_dest, | 93 | tx = dma->device_prep_dma_pq(chan, dma_dest, |
122 | &dma_src[src_off], | 94 | &unmap->addr[src_off], |
123 | pq_src_cnt, | 95 | pq_src_cnt, |
124 | &coefs[src_off], len, | 96 | &scfs[src_off], unmap->len, |
125 | dma_flags); | 97 | dma_flags); |
126 | if (likely(tx)) | 98 | if (likely(tx)) |
127 | break; | 99 | break; |
@@ -129,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, | |||
129 | dma_async_issue_pending(chan); | 101 | dma_async_issue_pending(chan); |
130 | } | 102 | } |
131 | 103 | ||
104 | dma_set_unmap(tx, unmap); | ||
132 | async_tx_submit(chan, tx, submit); | 105 | async_tx_submit(chan, tx, submit); |
133 | submit->depend_tx = tx; | 106 | submit->depend_tx = tx; |
134 | 107 | ||
@@ -188,10 +161,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
188 | * set to NULL those buffers will be replaced with the raid6_zero_page | 161 | * set to NULL those buffers will be replaced with the raid6_zero_page |
189 | * in the synchronous path and omitted in the hardware-asynchronous | 162 | * in the synchronous path and omitted in the hardware-asynchronous |
190 | * path. | 163 | * path. |
191 | * | ||
192 | * 'blocks' note: if submit->scribble is NULL then the contents of | ||
193 | * 'blocks' may be overwritten to perform address conversions | ||
194 | * (dma_map_page() or page_address()). | ||
195 | */ | 164 | */ |
196 | struct dma_async_tx_descriptor * | 165 | struct dma_async_tx_descriptor * |
197 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | 166 | async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, |
@@ -202,26 +171,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, | |||
202 | &P(blocks, disks), 2, | 171 | &P(blocks, disks), 2, |
203 | blocks, src_cnt, len); | 172 | blocks, src_cnt, len); |
204 | struct dma_device *device = chan ? chan->device : NULL; | 173 | struct dma_device *device = chan ? chan->device : NULL; |
205 | dma_addr_t *dma_src = NULL; | 174 | struct dmaengine_unmap_data *unmap = NULL; |
206 | 175 | ||
207 | BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); | 176 | BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); |
208 | 177 | ||
209 | if (submit->scribble) | 178 | if (device) |
210 | dma_src = submit->scribble; | 179 | unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); |
211 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
212 | dma_src = (dma_addr_t *) blocks; | ||
213 | 180 | ||
214 | if (dma_src && device && | 181 | if (unmap && |
215 | (src_cnt <= dma_maxpq(device, 0) || | 182 | (src_cnt <= dma_maxpq(device, 0) || |
216 | dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && | 183 | dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && |
217 | is_dma_pq_aligned(device, offset, 0, len)) { | 184 | is_dma_pq_aligned(device, offset, 0, len)) { |
185 | struct dma_async_tx_descriptor *tx; | ||
186 | enum dma_ctrl_flags dma_flags = 0; | ||
187 | unsigned char coefs[src_cnt]; | ||
188 | int i, j; | ||
189 | |||
218 | /* run the p+q asynchronously */ | 190 | /* run the p+q asynchronously */ |
219 | pr_debug("%s: (async) disks: %d len: %zu\n", | 191 | pr_debug("%s: (async) disks: %d len: %zu\n", |
220 | __func__, disks, len); | 192 | __func__, disks, len); |
221 | return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, | 193 | |
222 | disks, len, dma_src, submit); | 194 | /* convert source addresses being careful to collapse 'empty' |
195 | * sources and update the coefficients accordingly | ||
196 | */ | ||
197 | unmap->len = len; | ||
198 | for (i = 0, j = 0; i < src_cnt; i++) { | ||
199 | if (blocks[i] == NULL) | ||
200 | continue; | ||
201 | unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, | ||
202 | len, DMA_TO_DEVICE); | ||
203 | coefs[j] = raid6_gfexp[i]; | ||
204 | unmap->to_cnt++; | ||
205 | j++; | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * DMAs use destinations as sources, | ||
210 | * so use BIDIRECTIONAL mapping | ||
211 | */ | ||
212 | unmap->bidi_cnt++; | ||
213 | if (P(blocks, disks)) | ||
214 | unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), | ||
215 | offset, len, DMA_BIDIRECTIONAL); | ||
216 | else { | ||
217 | unmap->addr[j++] = 0; | ||
218 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | ||
219 | } | ||
220 | |||
221 | unmap->bidi_cnt++; | ||
222 | if (Q(blocks, disks)) | ||
223 | unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), | ||
224 | offset, len, DMA_BIDIRECTIONAL); | ||
225 | else { | ||
226 | unmap->addr[j++] = 0; | ||
227 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | ||
228 | } | ||
229 | |||
230 | tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); | ||
231 | dmaengine_unmap_put(unmap); | ||
232 | return tx; | ||
223 | } | 233 | } |
224 | 234 | ||
235 | dmaengine_unmap_put(unmap); | ||
236 | |||
225 | /* run the pq synchronously */ | 237 | /* run the pq synchronously */ |
226 | pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); | 238 | pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); |
227 | 239 | ||
@@ -277,50 +289,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
277 | struct dma_async_tx_descriptor *tx; | 289 | struct dma_async_tx_descriptor *tx; |
278 | unsigned char coefs[disks-2]; | 290 | unsigned char coefs[disks-2]; |
279 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; | 291 | enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; |
280 | dma_addr_t *dma_src = NULL; | 292 | struct dmaengine_unmap_data *unmap = NULL; |
281 | int src_cnt = 0; | ||
282 | 293 | ||
283 | BUG_ON(disks < 4); | 294 | BUG_ON(disks < 4); |
284 | 295 | ||
285 | if (submit->scribble) | 296 | if (device) |
286 | dma_src = submit->scribble; | 297 | unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); |
287 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
288 | dma_src = (dma_addr_t *) blocks; | ||
289 | 298 | ||
290 | if (dma_src && device && disks <= dma_maxpq(device, 0) && | 299 | if (unmap && disks <= dma_maxpq(device, 0) && |
291 | is_dma_pq_aligned(device, offset, 0, len)) { | 300 | is_dma_pq_aligned(device, offset, 0, len)) { |
292 | struct device *dev = device->dev; | 301 | struct device *dev = device->dev; |
293 | dma_addr_t *pq = &dma_src[disks-2]; | 302 | dma_addr_t pq[2]; |
294 | int i; | 303 | int i, j = 0, src_cnt = 0; |
295 | 304 | ||
296 | pr_debug("%s: (async) disks: %d len: %zu\n", | 305 | pr_debug("%s: (async) disks: %d len: %zu\n", |
297 | __func__, disks, len); | 306 | __func__, disks, len); |
298 | if (!P(blocks, disks)) | 307 | |
308 | unmap->len = len; | ||
309 | for (i = 0; i < disks-2; i++) | ||
310 | if (likely(blocks[i])) { | ||
311 | unmap->addr[j] = dma_map_page(dev, blocks[i], | ||
312 | offset, len, | ||
313 | DMA_TO_DEVICE); | ||
314 | coefs[j] = raid6_gfexp[i]; | ||
315 | unmap->to_cnt++; | ||
316 | src_cnt++; | ||
317 | j++; | ||
318 | } | ||
319 | |||
320 | if (!P(blocks, disks)) { | ||
321 | pq[0] = 0; | ||
299 | dma_flags |= DMA_PREP_PQ_DISABLE_P; | 322 | dma_flags |= DMA_PREP_PQ_DISABLE_P; |
300 | else | 323 | } else { |
301 | pq[0] = dma_map_page(dev, P(blocks, disks), | 324 | pq[0] = dma_map_page(dev, P(blocks, disks), |
302 | offset, len, | 325 | offset, len, |
303 | DMA_TO_DEVICE); | 326 | DMA_TO_DEVICE); |
304 | if (!Q(blocks, disks)) | 327 | unmap->addr[j++] = pq[0]; |
328 | unmap->to_cnt++; | ||
329 | } | ||
330 | if (!Q(blocks, disks)) { | ||
331 | pq[1] = 0; | ||
305 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; | 332 | dma_flags |= DMA_PREP_PQ_DISABLE_Q; |
306 | else | 333 | } else { |
307 | pq[1] = dma_map_page(dev, Q(blocks, disks), | 334 | pq[1] = dma_map_page(dev, Q(blocks, disks), |
308 | offset, len, | 335 | offset, len, |
309 | DMA_TO_DEVICE); | 336 | DMA_TO_DEVICE); |
337 | unmap->addr[j++] = pq[1]; | ||
338 | unmap->to_cnt++; | ||
339 | } | ||
310 | 340 | ||
311 | if (submit->flags & ASYNC_TX_FENCE) | 341 | if (submit->flags & ASYNC_TX_FENCE) |
312 | dma_flags |= DMA_PREP_FENCE; | 342 | dma_flags |= DMA_PREP_FENCE; |
313 | for (i = 0; i < disks-2; i++) | ||
314 | if (likely(blocks[i])) { | ||
315 | dma_src[src_cnt] = dma_map_page(dev, blocks[i], | ||
316 | offset, len, | ||
317 | DMA_TO_DEVICE); | ||
318 | coefs[src_cnt] = raid6_gfexp[i]; | ||
319 | src_cnt++; | ||
320 | } | ||
321 | |||
322 | for (;;) { | 343 | for (;;) { |
323 | tx = device->device_prep_dma_pq_val(chan, pq, dma_src, | 344 | tx = device->device_prep_dma_pq_val(chan, pq, |
345 | unmap->addr, | ||
324 | src_cnt, | 346 | src_cnt, |
325 | coefs, | 347 | coefs, |
326 | len, pqres, | 348 | len, pqres, |
@@ -330,6 +352,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, | |||
330 | async_tx_quiesce(&submit->depend_tx); | 352 | async_tx_quiesce(&submit->depend_tx); |
331 | dma_async_issue_pending(chan); | 353 | dma_async_issue_pending(chan); |
332 | } | 354 | } |
355 | |||
356 | dma_set_unmap(tx, unmap); | ||
333 | async_tx_submit(chan, tx, submit); | 357 | async_tx_submit(chan, tx, submit); |
334 | 358 | ||
335 | return tx; | 359 | return tx; |
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index a9f08a6a582e..934a84981495 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
27 | #include <linux/raid/pq.h> | 27 | #include <linux/raid/pq.h> |
28 | #include <linux/async_tx.h> | 28 | #include <linux/async_tx.h> |
29 | #include <linux/dmaengine.h> | ||
29 | 30 | ||
30 | static struct dma_async_tx_descriptor * | 31 | static struct dma_async_tx_descriptor * |
31 | async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, | 32 | async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, |
@@ -34,35 +35,45 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, | |||
34 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | 35 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, |
35 | &dest, 1, srcs, 2, len); | 36 | &dest, 1, srcs, 2, len); |
36 | struct dma_device *dma = chan ? chan->device : NULL; | 37 | struct dma_device *dma = chan ? chan->device : NULL; |
38 | struct dmaengine_unmap_data *unmap = NULL; | ||
37 | const u8 *amul, *bmul; | 39 | const u8 *amul, *bmul; |
38 | u8 ax, bx; | 40 | u8 ax, bx; |
39 | u8 *a, *b, *c; | 41 | u8 *a, *b, *c; |
40 | 42 | ||
41 | if (dma) { | 43 | if (dma) |
42 | dma_addr_t dma_dest[2]; | 44 | unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); |
43 | dma_addr_t dma_src[2]; | 45 | |
46 | if (unmap) { | ||
44 | struct device *dev = dma->dev; | 47 | struct device *dev = dma->dev; |
48 | dma_addr_t pq[2]; | ||
45 | struct dma_async_tx_descriptor *tx; | 49 | struct dma_async_tx_descriptor *tx; |
46 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | 50 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; |
47 | 51 | ||
48 | if (submit->flags & ASYNC_TX_FENCE) | 52 | if (submit->flags & ASYNC_TX_FENCE) |
49 | dma_flags |= DMA_PREP_FENCE; | 53 | dma_flags |= DMA_PREP_FENCE; |
50 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | 54 | unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); |
51 | dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); | 55 | unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); |
52 | dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); | 56 | unmap->to_cnt = 2; |
53 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, | 57 | |
58 | unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | ||
59 | unmap->bidi_cnt = 1; | ||
60 | /* engine only looks at Q, but expects it to follow P */ | ||
61 | pq[1] = unmap->addr[2]; | ||
62 | |||
63 | unmap->len = len; | ||
64 | tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, | ||
54 | len, dma_flags); | 65 | len, dma_flags); |
55 | if (tx) { | 66 | if (tx) { |
67 | dma_set_unmap(tx, unmap); | ||
56 | async_tx_submit(chan, tx, submit); | 68 | async_tx_submit(chan, tx, submit); |
69 | dmaengine_unmap_put(unmap); | ||
57 | return tx; | 70 | return tx; |
58 | } | 71 | } |
59 | 72 | ||
60 | /* could not get a descriptor, unmap and fall through to | 73 | /* could not get a descriptor, unmap and fall through to |
61 | * the synchronous path | 74 | * the synchronous path |
62 | */ | 75 | */ |
63 | dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); | 76 | dmaengine_unmap_put(unmap); |
64 | dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); | ||
65 | dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE); | ||
66 | } | 77 | } |
67 | 78 | ||
68 | /* run the operation synchronously */ | 79 | /* run the operation synchronously */ |
@@ -89,23 +100,38 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | |||
89 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | 100 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, |
90 | &dest, 1, &src, 1, len); | 101 | &dest, 1, &src, 1, len); |
91 | struct dma_device *dma = chan ? chan->device : NULL; | 102 | struct dma_device *dma = chan ? chan->device : NULL; |
103 | struct dmaengine_unmap_data *unmap = NULL; | ||
92 | const u8 *qmul; /* Q multiplier table */ | 104 | const u8 *qmul; /* Q multiplier table */ |
93 | u8 *d, *s; | 105 | u8 *d, *s; |
94 | 106 | ||
95 | if (dma) { | 107 | if (dma) |
108 | unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); | ||
109 | |||
110 | if (unmap) { | ||
96 | dma_addr_t dma_dest[2]; | 111 | dma_addr_t dma_dest[2]; |
97 | dma_addr_t dma_src[1]; | ||
98 | struct device *dev = dma->dev; | 112 | struct device *dev = dma->dev; |
99 | struct dma_async_tx_descriptor *tx; | 113 | struct dma_async_tx_descriptor *tx; |
100 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; | 114 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; |
101 | 115 | ||
102 | if (submit->flags & ASYNC_TX_FENCE) | 116 | if (submit->flags & ASYNC_TX_FENCE) |
103 | dma_flags |= DMA_PREP_FENCE; | 117 | dma_flags |= DMA_PREP_FENCE; |
104 | dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | 118 | unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); |
105 | dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); | 119 | unmap->to_cnt++; |
106 | tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, | 120 | unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); |
107 | len, dma_flags); | 121 | dma_dest[1] = unmap->addr[1]; |
122 | unmap->bidi_cnt++; | ||
123 | unmap->len = len; | ||
124 | |||
125 | /* this looks funny, but the engine looks for Q at | ||
126 | * dma_dest[1] and ignores dma_dest[0] as a dest | ||
127 | * due to DMA_PREP_PQ_DISABLE_P | ||
128 | */ | ||
129 | tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, | ||
130 | 1, &coef, len, dma_flags); | ||
131 | |||
108 | if (tx) { | 132 | if (tx) { |
133 | dma_set_unmap(tx, unmap); | ||
134 | dmaengine_unmap_put(unmap); | ||
109 | async_tx_submit(chan, tx, submit); | 135 | async_tx_submit(chan, tx, submit); |
110 | return tx; | 136 | return tx; |
111 | } | 137 | } |
@@ -113,8 +139,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | |||
113 | /* could not get a descriptor, unmap and fall through to | 139 | /* could not get a descriptor, unmap and fall through to |
114 | * the synchronous path | 140 | * the synchronous path |
115 | */ | 141 | */ |
116 | dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL); | 142 | dmaengine_unmap_put(unmap); |
117 | dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE); | ||
118 | } | 143 | } |
119 | 144 | ||
120 | /* no channel available, or failed to allocate a descriptor, so | 145 | /* no channel available, or failed to allocate a descriptor, so |
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 7be34248b450..39ea4791a3c9 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c | |||
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, | |||
128 | } | 128 | } |
129 | device->device_issue_pending(chan); | 129 | device->device_issue_pending(chan); |
130 | } else { | 130 | } else { |
131 | if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS) | 131 | if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE) |
132 | panic("%s: DMA error waiting for depend_tx\n", | 132 | panic("%s: DMA error waiting for depend_tx\n", |
133 | __func__); | 133 | __func__); |
134 | tx->tx_submit(tx); | 134 | tx->tx_submit(tx); |
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) | |||
280 | * we are referring to the correct operation | 280 | * we are referring to the correct operation |
281 | */ | 281 | */ |
282 | BUG_ON(async_tx_test_ack(*tx)); | 282 | BUG_ON(async_tx_test_ack(*tx)); |
283 | if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS) | 283 | if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE) |
284 | panic("%s: DMA error waiting for transaction\n", | 284 | panic("%s: DMA error waiting for transaction\n", |
285 | __func__); | 285 | __func__); |
286 | async_tx_ack(*tx); | 286 | async_tx_ack(*tx); |
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 8ade0a0481c6..3c562f5a60bb 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c | |||
@@ -33,48 +33,31 @@ | |||
33 | 33 | ||
34 | /* do_async_xor - dma map the pages and perform the xor with an engine */ | 34 | /* do_async_xor - dma map the pages and perform the xor with an engine */ |
35 | static __async_inline struct dma_async_tx_descriptor * | 35 | static __async_inline struct dma_async_tx_descriptor * |
36 | do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | 36 | do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, |
37 | unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, | ||
38 | struct async_submit_ctl *submit) | 37 | struct async_submit_ctl *submit) |
39 | { | 38 | { |
40 | struct dma_device *dma = chan->device; | 39 | struct dma_device *dma = chan->device; |
41 | struct dma_async_tx_descriptor *tx = NULL; | 40 | struct dma_async_tx_descriptor *tx = NULL; |
42 | int src_off = 0; | ||
43 | int i; | ||
44 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; | 41 | dma_async_tx_callback cb_fn_orig = submit->cb_fn; |
45 | void *cb_param_orig = submit->cb_param; | 42 | void *cb_param_orig = submit->cb_param; |
46 | enum async_tx_flags flags_orig = submit->flags; | 43 | enum async_tx_flags flags_orig = submit->flags; |
47 | enum dma_ctrl_flags dma_flags; | 44 | enum dma_ctrl_flags dma_flags = 0; |
48 | int xor_src_cnt = 0; | 45 | int src_cnt = unmap->to_cnt; |
49 | dma_addr_t dma_dest; | 46 | int xor_src_cnt; |
50 | 47 | dma_addr_t dma_dest = unmap->addr[unmap->to_cnt]; | |
51 | /* map the dest bidrectional in case it is re-used as a source */ | 48 | dma_addr_t *src_list = unmap->addr; |
52 | dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL); | ||
53 | for (i = 0; i < src_cnt; i++) { | ||
54 | /* only map the dest once */ | ||
55 | if (!src_list[i]) | ||
56 | continue; | ||
57 | if (unlikely(src_list[i] == dest)) { | ||
58 | dma_src[xor_src_cnt++] = dma_dest; | ||
59 | continue; | ||
60 | } | ||
61 | dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset, | ||
62 | len, DMA_TO_DEVICE); | ||
63 | } | ||
64 | src_cnt = xor_src_cnt; | ||
65 | 49 | ||
66 | while (src_cnt) { | 50 | while (src_cnt) { |
51 | dma_addr_t tmp; | ||
52 | |||
67 | submit->flags = flags_orig; | 53 | submit->flags = flags_orig; |
68 | dma_flags = 0; | ||
69 | xor_src_cnt = min(src_cnt, (int)dma->max_xor); | 54 | xor_src_cnt = min(src_cnt, (int)dma->max_xor); |
70 | /* if we are submitting additional xors, leave the chain open, | 55 | /* if we are submitting additional xors, leave the chain open |
71 | * clear the callback parameters, and leave the destination | 56 | * and clear the callback parameters |
72 | * buffer mapped | ||
73 | */ | 57 | */ |
74 | if (src_cnt > xor_src_cnt) { | 58 | if (src_cnt > xor_src_cnt) { |
75 | submit->flags &= ~ASYNC_TX_ACK; | 59 | submit->flags &= ~ASYNC_TX_ACK; |
76 | submit->flags |= ASYNC_TX_FENCE; | 60 | submit->flags |= ASYNC_TX_FENCE; |
77 | dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; | ||
78 | submit->cb_fn = NULL; | 61 | submit->cb_fn = NULL; |
79 | submit->cb_param = NULL; | 62 | submit->cb_param = NULL; |
80 | } else { | 63 | } else { |
@@ -85,12 +68,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
85 | dma_flags |= DMA_PREP_INTERRUPT; | 68 | dma_flags |= DMA_PREP_INTERRUPT; |
86 | if (submit->flags & ASYNC_TX_FENCE) | 69 | if (submit->flags & ASYNC_TX_FENCE) |
87 | dma_flags |= DMA_PREP_FENCE; | 70 | dma_flags |= DMA_PREP_FENCE; |
88 | /* Since we have clobbered the src_list we are committed | 71 | |
89 | * to doing this asynchronously. Drivers force forward progress | 72 | /* Drivers force forward progress in case they can not provide a |
90 | * in case they can not provide a descriptor | 73 | * descriptor |
91 | */ | 74 | */ |
92 | tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], | 75 | tmp = src_list[0]; |
93 | xor_src_cnt, len, dma_flags); | 76 | if (src_list > unmap->addr) |
77 | src_list[0] = dma_dest; | ||
78 | tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, | ||
79 | xor_src_cnt, unmap->len, | ||
80 | dma_flags); | ||
81 | src_list[0] = tmp; | ||
82 | |||
94 | 83 | ||
95 | if (unlikely(!tx)) | 84 | if (unlikely(!tx)) |
96 | async_tx_quiesce(&submit->depend_tx); | 85 | async_tx_quiesce(&submit->depend_tx); |
@@ -99,22 +88,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, | |||
99 | while (unlikely(!tx)) { | 88 | while (unlikely(!tx)) { |
100 | dma_async_issue_pending(chan); | 89 | dma_async_issue_pending(chan); |
101 | tx = dma->device_prep_dma_xor(chan, dma_dest, | 90 | tx = dma->device_prep_dma_xor(chan, dma_dest, |
102 | &dma_src[src_off], | 91 | src_list, |
103 | xor_src_cnt, len, | 92 | xor_src_cnt, unmap->len, |
104 | dma_flags); | 93 | dma_flags); |
105 | } | 94 | } |
106 | 95 | ||
96 | dma_set_unmap(tx, unmap); | ||
107 | async_tx_submit(chan, tx, submit); | 97 | async_tx_submit(chan, tx, submit); |
108 | submit->depend_tx = tx; | 98 | submit->depend_tx = tx; |
109 | 99 | ||
110 | if (src_cnt > xor_src_cnt) { | 100 | if (src_cnt > xor_src_cnt) { |
111 | /* drop completed sources */ | 101 | /* drop completed sources */ |
112 | src_cnt -= xor_src_cnt; | 102 | src_cnt -= xor_src_cnt; |
113 | src_off += xor_src_cnt; | ||
114 | |||
115 | /* use the intermediate result a source */ | 103 | /* use the intermediate result a source */ |
116 | dma_src[--src_off] = dma_dest; | ||
117 | src_cnt++; | 104 | src_cnt++; |
105 | src_list += xor_src_cnt - 1; | ||
118 | } else | 106 | } else |
119 | break; | 107 | break; |
120 | } | 108 | } |
@@ -189,22 +177,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, | |||
189 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, | 177 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, |
190 | &dest, 1, src_list, | 178 | &dest, 1, src_list, |
191 | src_cnt, len); | 179 | src_cnt, len); |
192 | dma_addr_t *dma_src = NULL; | 180 | struct dma_device *device = chan ? chan->device : NULL; |
181 | struct dmaengine_unmap_data *unmap = NULL; | ||
193 | 182 | ||
194 | BUG_ON(src_cnt <= 1); | 183 | BUG_ON(src_cnt <= 1); |
195 | 184 | ||
196 | if (submit->scribble) | 185 | if (device) |
197 | dma_src = submit->scribble; | 186 | unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); |
198 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | 187 | |
199 | dma_src = (dma_addr_t *) src_list; | 188 | if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { |
189 | struct dma_async_tx_descriptor *tx; | ||
190 | int i, j; | ||
200 | 191 | ||
201 | if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { | ||
202 | /* run the xor asynchronously */ | 192 | /* run the xor asynchronously */ |
203 | pr_debug("%s (async): len: %zu\n", __func__, len); | 193 | pr_debug("%s (async): len: %zu\n", __func__, len); |
204 | 194 | ||
205 | return do_async_xor(chan, dest, src_list, offset, src_cnt, len, | 195 | unmap->len = len; |
206 | dma_src, submit); | 196 | for (i = 0, j = 0; i < src_cnt; i++) { |
197 | if (!src_list[i]) | ||
198 | continue; | ||
199 | unmap->to_cnt++; | ||
200 | unmap->addr[j++] = dma_map_page(device->dev, src_list[i], | ||
201 | offset, len, DMA_TO_DEVICE); | ||
202 | } | ||
203 | |||
204 | /* map it bidirectional as it may be re-used as a source */ | ||
205 | unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, | ||
206 | DMA_BIDIRECTIONAL); | ||
207 | unmap->bidi_cnt = 1; | ||
208 | |||
209 | tx = do_async_xor(chan, unmap, submit); | ||
210 | dmaengine_unmap_put(unmap); | ||
211 | return tx; | ||
207 | } else { | 212 | } else { |
213 | dmaengine_unmap_put(unmap); | ||
208 | /* run the xor synchronously */ | 214 | /* run the xor synchronously */ |
209 | pr_debug("%s (sync): len: %zu\n", __func__, len); | 215 | pr_debug("%s (sync): len: %zu\n", __func__, len); |
210 | WARN_ONCE(chan, "%s: no space for dma address conversion\n", | 216 | WARN_ONCE(chan, "%s: no space for dma address conversion\n", |
@@ -268,16 +274,14 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
268 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); | 274 | struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len); |
269 | struct dma_device *device = chan ? chan->device : NULL; | 275 | struct dma_device *device = chan ? chan->device : NULL; |
270 | struct dma_async_tx_descriptor *tx = NULL; | 276 | struct dma_async_tx_descriptor *tx = NULL; |
271 | dma_addr_t *dma_src = NULL; | 277 | struct dmaengine_unmap_data *unmap = NULL; |
272 | 278 | ||
273 | BUG_ON(src_cnt <= 1); | 279 | BUG_ON(src_cnt <= 1); |
274 | 280 | ||
275 | if (submit->scribble) | 281 | if (device) |
276 | dma_src = submit->scribble; | 282 | unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); |
277 | else if (sizeof(dma_addr_t) <= sizeof(struct page *)) | ||
278 | dma_src = (dma_addr_t *) src_list; | ||
279 | 283 | ||
280 | if (dma_src && device && src_cnt <= device->max_xor && | 284 | if (unmap && src_cnt <= device->max_xor && |
281 | is_dma_xor_aligned(device, offset, 0, len)) { | 285 | is_dma_xor_aligned(device, offset, 0, len)) { |
282 | unsigned long dma_prep_flags = 0; | 286 | unsigned long dma_prep_flags = 0; |
283 | int i; | 287 | int i; |
@@ -288,11 +292,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
288 | dma_prep_flags |= DMA_PREP_INTERRUPT; | 292 | dma_prep_flags |= DMA_PREP_INTERRUPT; |
289 | if (submit->flags & ASYNC_TX_FENCE) | 293 | if (submit->flags & ASYNC_TX_FENCE) |
290 | dma_prep_flags |= DMA_PREP_FENCE; | 294 | dma_prep_flags |= DMA_PREP_FENCE; |
291 | for (i = 0; i < src_cnt; i++) | ||
292 | dma_src[i] = dma_map_page(device->dev, src_list[i], | ||
293 | offset, len, DMA_TO_DEVICE); | ||
294 | 295 | ||
295 | tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, | 296 | for (i = 0; i < src_cnt; i++) { |
297 | unmap->addr[i] = dma_map_page(device->dev, src_list[i], | ||
298 | offset, len, DMA_TO_DEVICE); | ||
299 | unmap->to_cnt++; | ||
300 | } | ||
301 | unmap->len = len; | ||
302 | |||
303 | tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt, | ||
296 | len, result, | 304 | len, result, |
297 | dma_prep_flags); | 305 | dma_prep_flags); |
298 | if (unlikely(!tx)) { | 306 | if (unlikely(!tx)) { |
@@ -301,11 +309,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
301 | while (!tx) { | 309 | while (!tx) { |
302 | dma_async_issue_pending(chan); | 310 | dma_async_issue_pending(chan); |
303 | tx = device->device_prep_dma_xor_val(chan, | 311 | tx = device->device_prep_dma_xor_val(chan, |
304 | dma_src, src_cnt, len, result, | 312 | unmap->addr, src_cnt, len, result, |
305 | dma_prep_flags); | 313 | dma_prep_flags); |
306 | } | 314 | } |
307 | } | 315 | } |
308 | 316 | dma_set_unmap(tx, unmap); | |
309 | async_tx_submit(chan, tx, submit); | 317 | async_tx_submit(chan, tx, submit); |
310 | } else { | 318 | } else { |
311 | enum async_tx_flags flags_orig = submit->flags; | 319 | enum async_tx_flags flags_orig = submit->flags; |
@@ -327,6 +335,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, | |||
327 | async_tx_sync_epilog(submit); | 335 | async_tx_sync_epilog(submit); |
328 | submit->flags = flags_orig; | 336 | submit->flags = flags_orig; |
329 | } | 337 | } |
338 | dmaengine_unmap_put(unmap); | ||
330 | 339 | ||
331 | return tx; | 340 | return tx; |
332 | } | 341 | } |
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index 4a92bac744dc..dad95f45b88f 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #undef pr | 28 | #undef pr |
29 | #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) | 29 | #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) |
30 | 30 | ||
31 | #define NDISKS 16 /* Including P and Q */ | 31 | #define NDISKS 64 /* Including P and Q */ |
32 | 32 | ||
33 | static struct page *dataptrs[NDISKS]; | 33 | static struct page *dataptrs[NDISKS]; |
34 | static addr_conv_t addr_conv[NDISKS]; | 34 | static addr_conv_t addr_conv[NDISKS]; |
@@ -219,6 +219,14 @@ static int raid6_test(void) | |||
219 | err += test(11, &tests); | 219 | err += test(11, &tests); |
220 | err += test(12, &tests); | 220 | err += test(12, &tests); |
221 | } | 221 | } |
222 | |||
223 | /* the 24 disk case is special for ioatdma as it is the boudary point | ||
224 | * at which it needs to switch from 8-source ops to 16-source | ||
225 | * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set) | ||
226 | */ | ||
227 | if (NDISKS > 24) | ||
228 | err += test(24, &tests); | ||
229 | |||
222 | err += test(NDISKS, &tests); | 230 | err += test(NDISKS, &tests); |
223 | 231 | ||
224 | pr("\n"); | 232 | pr("\n"); |
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 853f610af28f..e88690ebfd82 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c | |||
@@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len) | |||
396 | struct dma_async_tx_descriptor *tx; | 396 | struct dma_async_tx_descriptor *tx; |
397 | struct dma_chan *chan = acdev->dma_chan; | 397 | struct dma_chan *chan = acdev->dma_chan; |
398 | dma_cookie_t cookie; | 398 | dma_cookie_t cookie; |
399 | unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | | 399 | unsigned long flags = DMA_PREP_INTERRUPT; |
400 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
401 | int ret = 0; | 400 | int ret = 0; |
402 | 401 | ||
403 | tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); | 402 | tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index dd2874ec1927..446687cc2334 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -89,14 +89,15 @@ config AT_HDMAC | |||
89 | Support the Atmel AHB DMA controller. | 89 | Support the Atmel AHB DMA controller. |
90 | 90 | ||
91 | config FSL_DMA | 91 | config FSL_DMA |
92 | tristate "Freescale Elo and Elo Plus DMA support" | 92 | tristate "Freescale Elo series DMA support" |
93 | depends on FSL_SOC | 93 | depends on FSL_SOC |
94 | select DMA_ENGINE | 94 | select DMA_ENGINE |
95 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 95 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
96 | ---help--- | 96 | ---help--- |
97 | Enable support for the Freescale Elo and Elo Plus DMA controllers. | 97 | Enable support for the Freescale Elo series DMA controllers. |
98 | The Elo is the DMA controller on some 82xx and 83xx parts, and the | 98 | The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the |
99 | Elo Plus is the DMA controller on 85xx and 86xx parts. | 99 | EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on |
100 | some Txxx and Bxxx parts. | ||
100 | 101 | ||
101 | config MPC512X_DMA | 102 | config MPC512X_DMA |
102 | tristate "Freescale MPC512x built-in DMA engine support" | 103 | tristate "Freescale MPC512x built-in DMA engine support" |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e51a9832ef0d..16a2aa28f856 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | |||
1164 | kfree(txd); | 1164 | kfree(txd); |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | static void pl08x_unmap_buffers(struct pl08x_txd *txd) | ||
1168 | { | ||
1169 | struct device *dev = txd->vd.tx.chan->device->dev; | ||
1170 | struct pl08x_sg *dsg; | ||
1171 | |||
1172 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1173 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
1174 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1175 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | ||
1176 | DMA_TO_DEVICE); | ||
1177 | else { | ||
1178 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1179 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
1180 | DMA_TO_DEVICE); | ||
1181 | } | ||
1182 | } | ||
1183 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1184 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
1185 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1186 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
1187 | DMA_FROM_DEVICE); | ||
1188 | else | ||
1189 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
1190 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
1191 | DMA_FROM_DEVICE); | ||
1192 | } | ||
1193 | } | ||
1194 | |||
1195 | static void pl08x_desc_free(struct virt_dma_desc *vd) | 1167 | static void pl08x_desc_free(struct virt_dma_desc *vd) |
1196 | { | 1168 | { |
1197 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); | 1169 | struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); |
1198 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); | 1170 | struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); |
1199 | 1171 | ||
1200 | if (!plchan->slave) | 1172 | dma_descriptor_unmap(txd); |
1201 | pl08x_unmap_buffers(txd); | ||
1202 | |||
1203 | if (!txd->done) | 1173 | if (!txd->done) |
1204 | pl08x_release_mux(plchan); | 1174 | pl08x_release_mux(plchan); |
1205 | 1175 | ||
@@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, | |||
1252 | size_t bytes = 0; | 1222 | size_t bytes = 0; |
1253 | 1223 | ||
1254 | ret = dma_cookie_status(chan, cookie, txstate); | 1224 | ret = dma_cookie_status(chan, cookie, txstate); |
1255 | if (ret == DMA_SUCCESS) | 1225 | if (ret == DMA_COMPLETE) |
1256 | return ret; | 1226 | return ret; |
1257 | 1227 | ||
1258 | /* | 1228 | /* |
@@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, | |||
1267 | 1237 | ||
1268 | spin_lock_irqsave(&plchan->vc.lock, flags); | 1238 | spin_lock_irqsave(&plchan->vc.lock, flags); |
1269 | ret = dma_cookie_status(chan, cookie, txstate); | 1239 | ret = dma_cookie_status(chan, cookie, txstate); |
1270 | if (ret != DMA_SUCCESS) { | 1240 | if (ret != DMA_COMPLETE) { |
1271 | vd = vchan_find_desc(&plchan->vc, cookie); | 1241 | vd = vchan_find_desc(&plchan->vc, cookie); |
1272 | if (vd) { | 1242 | if (vd) { |
1273 | /* On the issued list, so hasn't been processed yet */ | 1243 | /* On the issued list, so hasn't been processed yet */ |
@@ -2138,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
2138 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | 2108 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); |
2139 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | 2109 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); |
2140 | 2110 | ||
2141 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | 2111 | ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); |
2142 | DRIVER_NAME, pl08x); | ||
2143 | if (ret) { | 2112 | if (ret) { |
2144 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | 2113 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", |
2145 | __func__, adev->irq[0]); | 2114 | __func__, adev->irq[0]); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c787f38a186a..e2c04dc81e2a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | |||
344 | /* move myself to free_list */ | 344 | /* move myself to free_list */ |
345 | list_move(&desc->desc_node, &atchan->free_list); | 345 | list_move(&desc->desc_node, &atchan->free_list); |
346 | 346 | ||
347 | /* unmap dma addresses (not on slave channels) */ | 347 | dma_descriptor_unmap(txd); |
348 | if (!atchan->chan_common.private) { | ||
349 | struct device *parent = chan2parent(&atchan->chan_common); | ||
350 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
351 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
352 | dma_unmap_single(parent, | ||
353 | desc->lli.daddr, | ||
354 | desc->len, DMA_FROM_DEVICE); | ||
355 | else | ||
356 | dma_unmap_page(parent, | ||
357 | desc->lli.daddr, | ||
358 | desc->len, DMA_FROM_DEVICE); | ||
359 | } | ||
360 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
361 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
362 | dma_unmap_single(parent, | ||
363 | desc->lli.saddr, | ||
364 | desc->len, DMA_TO_DEVICE); | ||
365 | else | ||
366 | dma_unmap_page(parent, | ||
367 | desc->lli.saddr, | ||
368 | desc->len, DMA_TO_DEVICE); | ||
369 | } | ||
370 | } | ||
371 | |||
372 | /* for cyclic transfers, | 348 | /* for cyclic transfers, |
373 | * no need to replay callback function while stopping */ | 349 | * no need to replay callback function while stopping */ |
374 | if (!atc_chan_is_cyclic(atchan)) { | 350 | if (!atc_chan_is_cyclic(atchan)) { |
@@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan, | |||
1102 | int bytes = 0; | 1078 | int bytes = 0; |
1103 | 1079 | ||
1104 | ret = dma_cookie_status(chan, cookie, txstate); | 1080 | ret = dma_cookie_status(chan, cookie, txstate); |
1105 | if (ret == DMA_SUCCESS) | 1081 | if (ret == DMA_COMPLETE) |
1106 | return ret; | 1082 | return ret; |
1107 | /* | 1083 | /* |
1108 | * There's no point calculating the residue if there's | 1084 | * There's no point calculating the residue if there's |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 31011d2a26fc..3c6716e0b78e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
2369 | enum dma_status ret; | 2369 | enum dma_status ret; |
2370 | 2370 | ||
2371 | ret = dma_cookie_status(chan, cookie, txstate); | 2371 | ret = dma_cookie_status(chan, cookie, txstate); |
2372 | if (ret == DMA_SUCCESS) | 2372 | if (ret == DMA_COMPLETE) |
2373 | return ret; | 2373 | return ret; |
2374 | 2374 | ||
2375 | dma_set_residue(txstate, coh901318_get_bytes_left(chan)); | 2375 | dma_set_residue(txstate, coh901318_get_bytes_left(chan)); |
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
2694 | if (irq < 0) | 2694 | if (irq < 0) |
2695 | return irq; | 2695 | return irq; |
2696 | 2696 | ||
2697 | err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, | 2697 | err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0, |
2698 | "coh901318", base); | 2698 | "coh901318", base); |
2699 | if (err) | 2699 | if (err) |
2700 | return err; | 2700 | return err; |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 7c82b92f9b16..c29dacff66fa 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
@@ -141,6 +141,9 @@ struct cppi41_dd { | |||
141 | const struct chan_queues *queues_rx; | 141 | const struct chan_queues *queues_rx; |
142 | const struct chan_queues *queues_tx; | 142 | const struct chan_queues *queues_tx; |
143 | struct chan_queues td_queue; | 143 | struct chan_queues td_queue; |
144 | |||
145 | /* context for suspend/resume */ | ||
146 | unsigned int dma_tdfdq; | ||
144 | }; | 147 | }; |
145 | 148 | ||
146 | #define FIST_COMPLETION_QUEUE 93 | 149 | #define FIST_COMPLETION_QUEUE 93 |
@@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val) | |||
263 | return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); | 266 | return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); |
264 | } | 267 | } |
265 | 268 | ||
269 | static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) | ||
270 | { | ||
271 | u32 desc; | ||
272 | |||
273 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); | ||
274 | desc &= ~0x1f; | ||
275 | return desc; | ||
276 | } | ||
277 | |||
266 | static irqreturn_t cppi41_irq(int irq, void *data) | 278 | static irqreturn_t cppi41_irq(int irq, void *data) |
267 | { | 279 | { |
268 | struct cppi41_dd *cdd = data; | 280 | struct cppi41_dd *cdd = data; |
@@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) | |||
300 | q_num = __fls(val); | 312 | q_num = __fls(val); |
301 | val &= ~(1 << q_num); | 313 | val &= ~(1 << q_num); |
302 | q_num += 32 * i; | 314 | q_num += 32 * i; |
303 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); | 315 | desc = cppi41_pop_desc(cdd, q_num); |
304 | desc &= ~0x1f; | ||
305 | c = desc_to_chan(cdd, desc); | 316 | c = desc_to_chan(cdd, desc); |
306 | if (WARN_ON(!c)) { | 317 | if (WARN_ON(!c)) { |
307 | pr_err("%s() q %d desc %08x\n", __func__, | 318 | pr_err("%s() q %d desc %08x\n", __func__, |
@@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, | |||
353 | 364 | ||
354 | /* lock */ | 365 | /* lock */ |
355 | ret = dma_cookie_status(chan, cookie, txstate); | 366 | ret = dma_cookie_status(chan, cookie, txstate); |
356 | if (txstate && ret == DMA_SUCCESS) | 367 | if (txstate && ret == DMA_COMPLETE) |
357 | txstate->residue = c->residue; | 368 | txstate->residue = c->residue; |
358 | /* unlock */ | 369 | /* unlock */ |
359 | 370 | ||
@@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d) | |||
517 | d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; | 528 | d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; |
518 | } | 529 | } |
519 | 530 | ||
520 | static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) | ||
521 | { | ||
522 | u32 desc; | ||
523 | |||
524 | desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); | ||
525 | desc &= ~0x1f; | ||
526 | return desc; | ||
527 | } | ||
528 | |||
529 | static int cppi41_tear_down_chan(struct cppi41_channel *c) | 531 | static int cppi41_tear_down_chan(struct cppi41_channel *c) |
530 | { | 532 | { |
531 | struct cppi41_dd *cdd = c->cdd; | 533 | struct cppi41_dd *cdd = c->cdd; |
@@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) | |||
561 | c->td_retry = 100; | 563 | c->td_retry = 100; |
562 | } | 564 | } |
563 | 565 | ||
564 | if (!c->td_seen) { | 566 | if (!c->td_seen || !c->td_desc_seen) { |
565 | unsigned td_comp_queue; | ||
566 | 567 | ||
567 | if (c->is_tx) | 568 | desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); |
568 | td_comp_queue = cdd->td_queue.complete; | 569 | if (!desc_phys) |
569 | else | 570 | desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); |
570 | td_comp_queue = c->q_comp_num; | ||
571 | 571 | ||
572 | desc_phys = cppi41_pop_desc(cdd, td_comp_queue); | 572 | if (desc_phys == c->desc_phys) { |
573 | if (desc_phys) { | 573 | c->td_desc_seen = 1; |
574 | __iormb(); | 574 | |
575 | } else if (desc_phys == td_desc_phys) { | ||
576 | u32 pd0; | ||
575 | 577 | ||
576 | if (desc_phys == td_desc_phys) { | ||
577 | u32 pd0; | ||
578 | pd0 = td->pd0; | ||
579 | WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); | ||
580 | WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); | ||
581 | WARN_ON((pd0 & 0x1f) != c->port_num); | ||
582 | } else { | ||
583 | WARN_ON_ONCE(1); | ||
584 | } | ||
585 | c->td_seen = 1; | ||
586 | } | ||
587 | } | ||
588 | if (!c->td_desc_seen) { | ||
589 | desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); | ||
590 | if (desc_phys) { | ||
591 | __iormb(); | 578 | __iormb(); |
592 | WARN_ON(c->desc_phys != desc_phys); | 579 | pd0 = td->pd0; |
593 | c->td_desc_seen = 1; | 580 | WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); |
581 | WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); | ||
582 | WARN_ON((pd0 & 0x1f) != c->port_num); | ||
583 | c->td_seen = 1; | ||
584 | } else if (desc_phys) { | ||
585 | WARN_ON_ONCE(1); | ||
594 | } | 586 | } |
595 | } | 587 | } |
596 | c->td_retry--; | 588 | c->td_retry--; |
@@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) | |||
609 | 601 | ||
610 | WARN_ON(!c->td_retry); | 602 | WARN_ON(!c->td_retry); |
611 | if (!c->td_desc_seen) { | 603 | if (!c->td_desc_seen) { |
612 | desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); | 604 | desc_phys = cppi41_pop_desc(cdd, c->q_num); |
613 | WARN_ON(!desc_phys); | 605 | WARN_ON(!desc_phys); |
614 | } | 606 | } |
615 | 607 | ||
@@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd) | |||
674 | } | 666 | } |
675 | } | 667 | } |
676 | 668 | ||
677 | static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) | 669 | static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) |
678 | { | 670 | { |
679 | struct cppi41_channel *cchan; | 671 | struct cppi41_channel *cchan; |
680 | int i; | 672 | int i; |
681 | int ret; | 673 | int ret; |
682 | u32 n_chans; | 674 | u32 n_chans; |
683 | 675 | ||
684 | ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", | 676 | ret = of_property_read_u32(dev->of_node, "#dma-channels", |
685 | &n_chans); | 677 | &n_chans); |
686 | if (ret) | 678 | if (ret) |
687 | return ret; | 679 | return ret; |
@@ -719,7 +711,7 @@ err: | |||
719 | return -ENOMEM; | 711 | return -ENOMEM; |
720 | } | 712 | } |
721 | 713 | ||
722 | static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | 714 | static void purge_descs(struct device *dev, struct cppi41_dd *cdd) |
723 | { | 715 | { |
724 | unsigned int mem_decs; | 716 | unsigned int mem_decs; |
725 | int i; | 717 | int i; |
@@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
731 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); | 723 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); |
732 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); | 724 | cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); |
733 | 725 | ||
734 | dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, | 726 | dma_free_coherent(dev, mem_decs, cdd->cd, |
735 | cdd->descs_phys); | 727 | cdd->descs_phys); |
736 | } | 728 | } |
737 | } | 729 | } |
@@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd) | |||
741 | cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); | 733 | cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); |
742 | } | 734 | } |
743 | 735 | ||
744 | static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) | 736 | static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) |
745 | { | 737 | { |
746 | disable_sched(cdd); | 738 | disable_sched(cdd); |
747 | 739 | ||
748 | purge_descs(pdev, cdd); | 740 | purge_descs(dev, cdd); |
749 | 741 | ||
750 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); | 742 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); |
751 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); | 743 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); |
752 | dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, | 744 | dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, |
753 | cdd->scratch_phys); | 745 | cdd->scratch_phys); |
754 | } | 746 | } |
755 | 747 | ||
756 | static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | 748 | static int init_descs(struct device *dev, struct cppi41_dd *cdd) |
757 | { | 749 | { |
758 | unsigned int desc_size; | 750 | unsigned int desc_size; |
759 | unsigned int mem_decs; | 751 | unsigned int mem_decs; |
@@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
777 | reg |= ilog2(ALLOC_DECS_NUM) - 5; | 769 | reg |= ilog2(ALLOC_DECS_NUM) - 5; |
778 | 770 | ||
779 | BUILD_BUG_ON(DESCS_AREAS != 1); | 771 | BUILD_BUG_ON(DESCS_AREAS != 1); |
780 | cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, | 772 | cdd->cd = dma_alloc_coherent(dev, mem_decs, |
781 | &cdd->descs_phys, GFP_KERNEL); | 773 | &cdd->descs_phys, GFP_KERNEL); |
782 | if (!cdd->cd) | 774 | if (!cdd->cd) |
783 | return -ENOMEM; | 775 | return -ENOMEM; |
@@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd) | |||
813 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); | 805 | cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); |
814 | } | 806 | } |
815 | 807 | ||
816 | static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | 808 | static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) |
817 | { | 809 | { |
818 | int ret; | 810 | int ret; |
819 | 811 | ||
820 | BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); | 812 | BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); |
821 | cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, | 813 | cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, |
822 | &cdd->scratch_phys, GFP_KERNEL); | 814 | &cdd->scratch_phys, GFP_KERNEL); |
823 | if (!cdd->qmgr_scratch) | 815 | if (!cdd->qmgr_scratch) |
824 | return -ENOMEM; | 816 | return -ENOMEM; |
@@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
827 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); | 819 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); |
828 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); | 820 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); |
829 | 821 | ||
830 | ret = init_descs(pdev, cdd); | 822 | ret = init_descs(dev, cdd); |
831 | if (ret) | 823 | if (ret) |
832 | goto err_td; | 824 | goto err_td; |
833 | 825 | ||
@@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) | |||
835 | init_sched(cdd); | 827 | init_sched(cdd); |
836 | return 0; | 828 | return 0; |
837 | err_td: | 829 | err_td: |
838 | deinit_cpii41(pdev, cdd); | 830 | deinit_cppi41(dev, cdd); |
839 | return ret; | 831 | return ret; |
840 | } | 832 | } |
841 | 833 | ||
@@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = { | |||
914 | }; | 906 | }; |
915 | MODULE_DEVICE_TABLE(of, cppi41_dma_ids); | 907 | MODULE_DEVICE_TABLE(of, cppi41_dma_ids); |
916 | 908 | ||
917 | static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) | 909 | static const struct cppi_glue_infos *get_glue_info(struct device *dev) |
918 | { | 910 | { |
919 | const struct of_device_id *of_id; | 911 | const struct of_device_id *of_id; |
920 | 912 | ||
921 | of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); | 913 | of_id = of_match_node(cppi41_dma_ids, dev->of_node); |
922 | if (!of_id) | 914 | if (!of_id) |
923 | return NULL; | 915 | return NULL; |
924 | return of_id->data; | 916 | return of_id->data; |
@@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) | |||
927 | static int cppi41_dma_probe(struct platform_device *pdev) | 919 | static int cppi41_dma_probe(struct platform_device *pdev) |
928 | { | 920 | { |
929 | struct cppi41_dd *cdd; | 921 | struct cppi41_dd *cdd; |
922 | struct device *dev = &pdev->dev; | ||
930 | const struct cppi_glue_infos *glue_info; | 923 | const struct cppi_glue_infos *glue_info; |
931 | int irq; | 924 | int irq; |
932 | int ret; | 925 | int ret; |
933 | 926 | ||
934 | glue_info = get_glue_info(pdev); | 927 | glue_info = get_glue_info(dev); |
935 | if (!glue_info) | 928 | if (!glue_info) |
936 | return -EINVAL; | 929 | return -EINVAL; |
937 | 930 | ||
@@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
946 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; | 939 | cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; |
947 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; | 940 | cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; |
948 | cdd->ddev.device_control = cppi41_dma_control; | 941 | cdd->ddev.device_control = cppi41_dma_control; |
949 | cdd->ddev.dev = &pdev->dev; | 942 | cdd->ddev.dev = dev; |
950 | INIT_LIST_HEAD(&cdd->ddev.channels); | 943 | INIT_LIST_HEAD(&cdd->ddev.channels); |
951 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; | 944 | cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; |
952 | 945 | ||
953 | cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); | 946 | cdd->usbss_mem = of_iomap(dev->of_node, 0); |
954 | cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); | 947 | cdd->ctrl_mem = of_iomap(dev->of_node, 1); |
955 | cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); | 948 | cdd->sched_mem = of_iomap(dev->of_node, 2); |
956 | cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); | 949 | cdd->qmgr_mem = of_iomap(dev->of_node, 3); |
957 | 950 | ||
958 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || | 951 | if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || |
959 | !cdd->qmgr_mem) { | 952 | !cdd->qmgr_mem) { |
@@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
961 | goto err_remap; | 954 | goto err_remap; |
962 | } | 955 | } |
963 | 956 | ||
964 | pm_runtime_enable(&pdev->dev); | 957 | pm_runtime_enable(dev); |
965 | ret = pm_runtime_get_sync(&pdev->dev); | 958 | ret = pm_runtime_get_sync(dev); |
966 | if (ret) | 959 | if (ret < 0) |
967 | goto err_get_sync; | 960 | goto err_get_sync; |
968 | 961 | ||
969 | cdd->queues_rx = glue_info->queues_rx; | 962 | cdd->queues_rx = glue_info->queues_rx; |
970 | cdd->queues_tx = glue_info->queues_tx; | 963 | cdd->queues_tx = glue_info->queues_tx; |
971 | cdd->td_queue = glue_info->td_queue; | 964 | cdd->td_queue = glue_info->td_queue; |
972 | 965 | ||
973 | ret = init_cppi41(pdev, cdd); | 966 | ret = init_cppi41(dev, cdd); |
974 | if (ret) | 967 | if (ret) |
975 | goto err_init_cppi; | 968 | goto err_init_cppi; |
976 | 969 | ||
977 | ret = cppi41_add_chans(pdev, cdd); | 970 | ret = cppi41_add_chans(dev, cdd); |
978 | if (ret) | 971 | if (ret) |
979 | goto err_chans; | 972 | goto err_chans; |
980 | 973 | ||
981 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | 974 | irq = irq_of_parse_and_map(dev->of_node, 0); |
982 | if (!irq) | 975 | if (!irq) |
983 | goto err_irq; | 976 | goto err_irq; |
984 | 977 | ||
985 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); | 978 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); |
986 | 979 | ||
987 | ret = request_irq(irq, glue_info->isr, IRQF_SHARED, | 980 | ret = request_irq(irq, glue_info->isr, IRQF_SHARED, |
988 | dev_name(&pdev->dev), cdd); | 981 | dev_name(dev), cdd); |
989 | if (ret) | 982 | if (ret) |
990 | goto err_irq; | 983 | goto err_irq; |
991 | cdd->irq = irq; | 984 | cdd->irq = irq; |
@@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) | |||
994 | if (ret) | 987 | if (ret) |
995 | goto err_dma_reg; | 988 | goto err_dma_reg; |
996 | 989 | ||
997 | ret = of_dma_controller_register(pdev->dev.of_node, | 990 | ret = of_dma_controller_register(dev->of_node, |
998 | cppi41_dma_xlate, &cpp41_dma_info); | 991 | cppi41_dma_xlate, &cpp41_dma_info); |
999 | if (ret) | 992 | if (ret) |
1000 | goto err_of; | 993 | goto err_of; |
@@ -1009,11 +1002,11 @@ err_irq: | |||
1009 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1002 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
1010 | cleanup_chans(cdd); | 1003 | cleanup_chans(cdd); |
1011 | err_chans: | 1004 | err_chans: |
1012 | deinit_cpii41(pdev, cdd); | 1005 | deinit_cppi41(dev, cdd); |
1013 | err_init_cppi: | 1006 | err_init_cppi: |
1014 | pm_runtime_put(&pdev->dev); | 1007 | pm_runtime_put(dev); |
1015 | err_get_sync: | 1008 | err_get_sync: |
1016 | pm_runtime_disable(&pdev->dev); | 1009 | pm_runtime_disable(dev); |
1017 | iounmap(cdd->usbss_mem); | 1010 | iounmap(cdd->usbss_mem); |
1018 | iounmap(cdd->ctrl_mem); | 1011 | iounmap(cdd->ctrl_mem); |
1019 | iounmap(cdd->sched_mem); | 1012 | iounmap(cdd->sched_mem); |
@@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
1033 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | 1026 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); |
1034 | free_irq(cdd->irq, cdd); | 1027 | free_irq(cdd->irq, cdd); |
1035 | cleanup_chans(cdd); | 1028 | cleanup_chans(cdd); |
1036 | deinit_cpii41(pdev, cdd); | 1029 | deinit_cppi41(&pdev->dev, cdd); |
1037 | iounmap(cdd->usbss_mem); | 1030 | iounmap(cdd->usbss_mem); |
1038 | iounmap(cdd->ctrl_mem); | 1031 | iounmap(cdd->ctrl_mem); |
1039 | iounmap(cdd->sched_mem); | 1032 | iounmap(cdd->sched_mem); |
@@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev) | |||
1044 | return 0; | 1037 | return 0; |
1045 | } | 1038 | } |
1046 | 1039 | ||
1040 | #ifdef CONFIG_PM_SLEEP | ||
1041 | static int cppi41_suspend(struct device *dev) | ||
1042 | { | ||
1043 | struct cppi41_dd *cdd = dev_get_drvdata(dev); | ||
1044 | |||
1045 | cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); | ||
1046 | cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); | ||
1047 | disable_sched(cdd); | ||
1048 | |||
1049 | return 0; | ||
1050 | } | ||
1051 | |||
1052 | static int cppi41_resume(struct device *dev) | ||
1053 | { | ||
1054 | struct cppi41_dd *cdd = dev_get_drvdata(dev); | ||
1055 | struct cppi41_channel *c; | ||
1056 | int i; | ||
1057 | |||
1058 | for (i = 0; i < DESCS_AREAS; i++) | ||
1059 | cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); | ||
1060 | |||
1061 | list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) | ||
1062 | if (!c->is_tx) | ||
1063 | cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); | ||
1064 | |||
1065 | init_sched(cdd); | ||
1066 | |||
1067 | cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); | ||
1068 | cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); | ||
1069 | cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); | ||
1070 | cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); | ||
1071 | |||
1072 | cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); | ||
1073 | |||
1074 | return 0; | ||
1075 | } | ||
1076 | #endif | ||
1077 | |||
1078 | static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); | ||
1079 | |||
1047 | static struct platform_driver cpp41_dma_driver = { | 1080 | static struct platform_driver cpp41_dma_driver = { |
1048 | .probe = cppi41_dma_probe, | 1081 | .probe = cppi41_dma_probe, |
1049 | .remove = cppi41_dma_remove, | 1082 | .remove = cppi41_dma_remove, |
1050 | .driver = { | 1083 | .driver = { |
1051 | .name = "cppi41-dma-engine", | 1084 | .name = "cppi41-dma-engine", |
1052 | .owner = THIS_MODULE, | 1085 | .owner = THIS_MODULE, |
1086 | .pm = &cppi41_pm_ops, | ||
1053 | .of_match_table = of_match_ptr(cppi41_dma_ids), | 1087 | .of_match_table = of_match_ptr(cppi41_dma_ids), |
1054 | }, | 1088 | }, |
1055 | }; | 1089 | }; |
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index b0c0c8268d42..94c380f07538 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
@@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, | |||
491 | unsigned long flags; | 491 | unsigned long flags; |
492 | 492 | ||
493 | status = dma_cookie_status(c, cookie, state); | 493 | status = dma_cookie_status(c, cookie, state); |
494 | if (status == DMA_SUCCESS || !state) | 494 | if (status == DMA_COMPLETE || !state) |
495 | return status; | 495 | return status; |
496 | 496 | ||
497 | spin_lock_irqsave(&chan->vchan.lock, flags); | 497 | spin_lock_irqsave(&chan->vchan.lock, flags); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9162ac80c18f..ea806bdc12ef 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/acpi.h> | 65 | #include <linux/acpi.h> |
66 | #include <linux/acpi_dma.h> | 66 | #include <linux/acpi_dma.h> |
67 | #include <linux/of_dma.h> | 67 | #include <linux/of_dma.h> |
68 | #include <linux/mempool.h> | ||
68 | 69 | ||
69 | static DEFINE_MUTEX(dma_list_mutex); | 70 | static DEFINE_MUTEX(dma_list_mutex); |
70 | static DEFINE_IDR(dma_idr); | 71 | static DEFINE_IDR(dma_idr); |
@@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device) | |||
901 | } | 902 | } |
902 | EXPORT_SYMBOL(dma_async_device_unregister); | 903 | EXPORT_SYMBOL(dma_async_device_unregister); |
903 | 904 | ||
904 | /** | 905 | struct dmaengine_unmap_pool { |
905 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | 906 | struct kmem_cache *cache; |
906 | * @chan: DMA channel to offload copy to | 907 | const char *name; |
907 | * @dest: destination address (virtual) | 908 | mempool_t *pool; |
908 | * @src: source address (virtual) | 909 | size_t size; |
909 | * @len: length | 910 | }; |
910 | * | ||
911 | * Both @dest and @src must be mappable to a bus address according to the | ||
912 | * DMA mapping API rules for streaming mappings. | ||
913 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
914 | * user space pages). | ||
915 | */ | ||
916 | dma_cookie_t | ||
917 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | ||
918 | void *src, size_t len) | ||
919 | { | ||
920 | struct dma_device *dev = chan->device; | ||
921 | struct dma_async_tx_descriptor *tx; | ||
922 | dma_addr_t dma_dest, dma_src; | ||
923 | dma_cookie_t cookie; | ||
924 | unsigned long flags; | ||
925 | 911 | ||
926 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); | 912 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
927 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | 913 | static struct dmaengine_unmap_pool unmap_pool[] = { |
928 | flags = DMA_CTRL_ACK | | 914 | __UNMAP_POOL(2), |
929 | DMA_COMPL_SRC_UNMAP_SINGLE | | 915 | #if IS_ENABLED(CONFIG_ASYNC_TX_DMA) |
930 | DMA_COMPL_DEST_UNMAP_SINGLE; | 916 | __UNMAP_POOL(16), |
931 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | 917 | __UNMAP_POOL(128), |
918 | __UNMAP_POOL(256), | ||
919 | #endif | ||
920 | }; | ||
932 | 921 | ||
933 | if (!tx) { | 922 | static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) |
934 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 923 | { |
935 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | 924 | int order = get_count_order(nr); |
936 | return -ENOMEM; | 925 | |
926 | switch (order) { | ||
927 | case 0 ... 1: | ||
928 | return &unmap_pool[0]; | ||
929 | case 2 ... 4: | ||
930 | return &unmap_pool[1]; | ||
931 | case 5 ... 7: | ||
932 | return &unmap_pool[2]; | ||
933 | case 8: | ||
934 | return &unmap_pool[3]; | ||
935 | default: | ||
936 | BUG(); | ||
937 | return NULL; | ||
937 | } | 938 | } |
939 | } | ||
938 | 940 | ||
939 | tx->callback = NULL; | 941 | static void dmaengine_unmap(struct kref *kref) |
940 | cookie = tx->tx_submit(tx); | 942 | { |
943 | struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); | ||
944 | struct device *dev = unmap->dev; | ||
945 | int cnt, i; | ||
946 | |||
947 | cnt = unmap->to_cnt; | ||
948 | for (i = 0; i < cnt; i++) | ||
949 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | ||
950 | DMA_TO_DEVICE); | ||
951 | cnt += unmap->from_cnt; | ||
952 | for (; i < cnt; i++) | ||
953 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | ||
954 | DMA_FROM_DEVICE); | ||
955 | cnt += unmap->bidi_cnt; | ||
956 | for (; i < cnt; i++) { | ||
957 | if (unmap->addr[i] == 0) | ||
958 | continue; | ||
959 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | ||
960 | DMA_BIDIRECTIONAL); | ||
961 | } | ||
962 | mempool_free(unmap, __get_unmap_pool(cnt)->pool); | ||
963 | } | ||
941 | 964 | ||
942 | preempt_disable(); | 965 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) |
943 | __this_cpu_add(chan->local->bytes_transferred, len); | 966 | { |
944 | __this_cpu_inc(chan->local->memcpy_count); | 967 | if (unmap) |
945 | preempt_enable(); | 968 | kref_put(&unmap->kref, dmaengine_unmap); |
969 | } | ||
970 | EXPORT_SYMBOL_GPL(dmaengine_unmap_put); | ||
946 | 971 | ||
947 | return cookie; | 972 | static void dmaengine_destroy_unmap_pool(void) |
973 | { | ||
974 | int i; | ||
975 | |||
976 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { | ||
977 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | ||
978 | |||
979 | if (p->pool) | ||
980 | mempool_destroy(p->pool); | ||
981 | p->pool = NULL; | ||
982 | if (p->cache) | ||
983 | kmem_cache_destroy(p->cache); | ||
984 | p->cache = NULL; | ||
985 | } | ||
948 | } | 986 | } |
949 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | ||
950 | 987 | ||
951 | /** | 988 | static int __init dmaengine_init_unmap_pool(void) |
952 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | ||
953 | * @chan: DMA channel to offload copy to | ||
954 | * @page: destination page | ||
955 | * @offset: offset in page to copy to | ||
956 | * @kdata: source address (virtual) | ||
957 | * @len: length | ||
958 | * | ||
959 | * Both @page/@offset and @kdata must be mappable to a bus address according | ||
960 | * to the DMA mapping API rules for streaming mappings. | ||
961 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | ||
962 | * locked user space pages) | ||
963 | */ | ||
964 | dma_cookie_t | ||
965 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | ||
966 | unsigned int offset, void *kdata, size_t len) | ||
967 | { | 989 | { |
968 | struct dma_device *dev = chan->device; | 990 | int i; |
969 | struct dma_async_tx_descriptor *tx; | ||
970 | dma_addr_t dma_dest, dma_src; | ||
971 | dma_cookie_t cookie; | ||
972 | unsigned long flags; | ||
973 | 991 | ||
974 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); | 992 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { |
975 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | 993 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; |
976 | flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; | 994 | size_t size; |
977 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | ||
978 | 995 | ||
979 | if (!tx) { | 996 | size = sizeof(struct dmaengine_unmap_data) + |
980 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | 997 | sizeof(dma_addr_t) * p->size; |
981 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | 998 | |
982 | return -ENOMEM; | 999 | p->cache = kmem_cache_create(p->name, size, 0, |
1000 | SLAB_HWCACHE_ALIGN, NULL); | ||
1001 | if (!p->cache) | ||
1002 | break; | ||
1003 | p->pool = mempool_create_slab_pool(1, p->cache); | ||
1004 | if (!p->pool) | ||
1005 | break; | ||
983 | } | 1006 | } |
984 | 1007 | ||
985 | tx->callback = NULL; | 1008 | if (i == ARRAY_SIZE(unmap_pool)) |
986 | cookie = tx->tx_submit(tx); | 1009 | return 0; |
987 | 1010 | ||
988 | preempt_disable(); | 1011 | dmaengine_destroy_unmap_pool(); |
989 | __this_cpu_add(chan->local->bytes_transferred, len); | 1012 | return -ENOMEM; |
990 | __this_cpu_inc(chan->local->memcpy_count); | 1013 | } |
991 | preempt_enable(); | ||
992 | 1014 | ||
993 | return cookie; | 1015 | struct dmaengine_unmap_data * |
1016 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | ||
1017 | { | ||
1018 | struct dmaengine_unmap_data *unmap; | ||
1019 | |||
1020 | unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); | ||
1021 | if (!unmap) | ||
1022 | return NULL; | ||
1023 | |||
1024 | memset(unmap, 0, sizeof(*unmap)); | ||
1025 | kref_init(&unmap->kref); | ||
1026 | unmap->dev = dev; | ||
1027 | |||
1028 | return unmap; | ||
994 | } | 1029 | } |
995 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | 1030 | EXPORT_SYMBOL(dmaengine_get_unmap_data); |
996 | 1031 | ||
997 | /** | 1032 | /** |
998 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | 1033 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page |
@@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
1015 | { | 1050 | { |
1016 | struct dma_device *dev = chan->device; | 1051 | struct dma_device *dev = chan->device; |
1017 | struct dma_async_tx_descriptor *tx; | 1052 | struct dma_async_tx_descriptor *tx; |
1018 | dma_addr_t dma_dest, dma_src; | 1053 | struct dmaengine_unmap_data *unmap; |
1019 | dma_cookie_t cookie; | 1054 | dma_cookie_t cookie; |
1020 | unsigned long flags; | 1055 | unsigned long flags; |
1021 | 1056 | ||
1022 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); | 1057 | unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); |
1023 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | 1058 | if (!unmap) |
1024 | DMA_FROM_DEVICE); | 1059 | return -ENOMEM; |
1060 | |||
1061 | unmap->to_cnt = 1; | ||
1062 | unmap->from_cnt = 1; | ||
1063 | unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, | ||
1064 | DMA_TO_DEVICE); | ||
1065 | unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, | ||
1066 | DMA_FROM_DEVICE); | ||
1067 | unmap->len = len; | ||
1025 | flags = DMA_CTRL_ACK; | 1068 | flags = DMA_CTRL_ACK; |
1026 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | 1069 | tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], |
1070 | len, flags); | ||
1027 | 1071 | ||
1028 | if (!tx) { | 1072 | if (!tx) { |
1029 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | 1073 | dmaengine_unmap_put(unmap); |
1030 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | ||
1031 | return -ENOMEM; | 1074 | return -ENOMEM; |
1032 | } | 1075 | } |
1033 | 1076 | ||
1034 | tx->callback = NULL; | 1077 | dma_set_unmap(tx, unmap); |
1035 | cookie = tx->tx_submit(tx); | 1078 | cookie = tx->tx_submit(tx); |
1079 | dmaengine_unmap_put(unmap); | ||
1036 | 1080 | ||
1037 | preempt_disable(); | 1081 | preempt_disable(); |
1038 | __this_cpu_add(chan->local->bytes_transferred, len); | 1082 | __this_cpu_add(chan->local->bytes_transferred, len); |
@@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |||
1043 | } | 1087 | } |
1044 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | 1088 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); |
1045 | 1089 | ||
1090 | /** | ||
1091 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | ||
1092 | * @chan: DMA channel to offload copy to | ||
1093 | * @dest: destination address (virtual) | ||
1094 | * @src: source address (virtual) | ||
1095 | * @len: length | ||
1096 | * | ||
1097 | * Both @dest and @src must be mappable to a bus address according to the | ||
1098 | * DMA mapping API rules for streaming mappings. | ||
1099 | * Both @dest and @src must stay memory resident (kernel memory or locked | ||
1100 | * user space pages). | ||
1101 | */ | ||
1102 | dma_cookie_t | ||
1103 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | ||
1104 | void *src, size_t len) | ||
1105 | { | ||
1106 | return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), | ||
1107 | (unsigned long) dest & ~PAGE_MASK, | ||
1108 | virt_to_page(src), | ||
1109 | (unsigned long) src & ~PAGE_MASK, len); | ||
1110 | } | ||
1111 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | ||
1112 | |||
1113 | /** | ||
1114 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | ||
1115 | * @chan: DMA channel to offload copy to | ||
1116 | * @page: destination page | ||
1117 | * @offset: offset in page to copy to | ||
1118 | * @kdata: source address (virtual) | ||
1119 | * @len: length | ||
1120 | * | ||
1121 | * Both @page/@offset and @kdata must be mappable to a bus address according | ||
1122 | * to the DMA mapping API rules for streaming mappings. | ||
1123 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | ||
1124 | * locked user space pages) | ||
1125 | */ | ||
1126 | dma_cookie_t | ||
1127 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | ||
1128 | unsigned int offset, void *kdata, size_t len) | ||
1129 | { | ||
1130 | return dma_async_memcpy_pg_to_pg(chan, page, offset, | ||
1131 | virt_to_page(kdata), | ||
1132 | (unsigned long) kdata & ~PAGE_MASK, len); | ||
1133 | } | ||
1134 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | ||
1135 | |||
1046 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | 1136 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
1047 | struct dma_chan *chan) | 1137 | struct dma_chan *chan) |
1048 | { | 1138 | { |
@@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |||
1062 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | 1152 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); |
1063 | 1153 | ||
1064 | if (!tx) | 1154 | if (!tx) |
1065 | return DMA_SUCCESS; | 1155 | return DMA_COMPLETE; |
1066 | 1156 | ||
1067 | while (tx->cookie == -EBUSY) { | 1157 | while (tx->cookie == -EBUSY) { |
1068 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | 1158 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { |
@@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies); | |||
1116 | 1206 | ||
1117 | static int __init dma_bus_init(void) | 1207 | static int __init dma_bus_init(void) |
1118 | { | 1208 | { |
1209 | int err = dmaengine_init_unmap_pool(); | ||
1210 | |||
1211 | if (err) | ||
1212 | return err; | ||
1119 | return class_register(&dma_devclass); | 1213 | return class_register(&dma_devclass); |
1120 | } | 1214 | } |
1121 | arch_initcall(dma_bus_init); | 1215 | arch_initcall(dma_bus_init); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 92f796cdc6ab..20f9a3aaf926 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -8,6 +8,8 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
12 | |||
11 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
12 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
13 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
@@ -19,10 +21,6 @@ | |||
19 | #include <linux/random.h> | 21 | #include <linux/random.h> |
20 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
21 | #include <linux/wait.h> | 23 | #include <linux/wait.h> |
22 | #include <linux/ctype.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <linux/seq_file.h> | ||
26 | 24 | ||
27 | static unsigned int test_buf_size = 16384; | 25 | static unsigned int test_buf_size = 16384; |
28 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); | 26 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); |
@@ -68,92 +66,13 @@ module_param(timeout, uint, S_IRUGO | S_IWUSR); | |||
68 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | 66 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " |
69 | "Pass -1 for infinite timeout"); | 67 | "Pass -1 for infinite timeout"); |
70 | 68 | ||
71 | /* Maximum amount of mismatched bytes in buffer to print */ | 69 | static bool noverify; |
72 | #define MAX_ERROR_COUNT 32 | 70 | module_param(noverify, bool, S_IRUGO | S_IWUSR); |
73 | 71 | MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); | |
74 | /* | ||
75 | * Initialization patterns. All bytes in the source buffer has bit 7 | ||
76 | * set, all bytes in the destination buffer has bit 7 cleared. | ||
77 | * | ||
78 | * Bit 6 is set for all bytes which are to be copied by the DMA | ||
79 | * engine. Bit 5 is set for all bytes which are to be overwritten by | ||
80 | * the DMA engine. | ||
81 | * | ||
82 | * The remaining bits are the inverse of a counter which increments by | ||
83 | * one for each byte address. | ||
84 | */ | ||
85 | #define PATTERN_SRC 0x80 | ||
86 | #define PATTERN_DST 0x00 | ||
87 | #define PATTERN_COPY 0x40 | ||
88 | #define PATTERN_OVERWRITE 0x20 | ||
89 | #define PATTERN_COUNT_MASK 0x1f | ||
90 | |||
91 | enum dmatest_error_type { | ||
92 | DMATEST_ET_OK, | ||
93 | DMATEST_ET_MAP_SRC, | ||
94 | DMATEST_ET_MAP_DST, | ||
95 | DMATEST_ET_PREP, | ||
96 | DMATEST_ET_SUBMIT, | ||
97 | DMATEST_ET_TIMEOUT, | ||
98 | DMATEST_ET_DMA_ERROR, | ||
99 | DMATEST_ET_DMA_IN_PROGRESS, | ||
100 | DMATEST_ET_VERIFY, | ||
101 | DMATEST_ET_VERIFY_BUF, | ||
102 | }; | ||
103 | |||
104 | struct dmatest_verify_buffer { | ||
105 | unsigned int index; | ||
106 | u8 expected; | ||
107 | u8 actual; | ||
108 | }; | ||
109 | |||
110 | struct dmatest_verify_result { | ||
111 | unsigned int error_count; | ||
112 | struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; | ||
113 | u8 pattern; | ||
114 | bool is_srcbuf; | ||
115 | }; | ||
116 | |||
117 | struct dmatest_thread_result { | ||
118 | struct list_head node; | ||
119 | unsigned int n; | ||
120 | unsigned int src_off; | ||
121 | unsigned int dst_off; | ||
122 | unsigned int len; | ||
123 | enum dmatest_error_type type; | ||
124 | union { | ||
125 | unsigned long data; | ||
126 | dma_cookie_t cookie; | ||
127 | enum dma_status status; | ||
128 | int error; | ||
129 | struct dmatest_verify_result *vr; | ||
130 | }; | ||
131 | }; | ||
132 | |||
133 | struct dmatest_result { | ||
134 | struct list_head node; | ||
135 | char *name; | ||
136 | struct list_head results; | ||
137 | }; | ||
138 | |||
139 | struct dmatest_info; | ||
140 | |||
141 | struct dmatest_thread { | ||
142 | struct list_head node; | ||
143 | struct dmatest_info *info; | ||
144 | struct task_struct *task; | ||
145 | struct dma_chan *chan; | ||
146 | u8 **srcs; | ||
147 | u8 **dsts; | ||
148 | enum dma_transaction_type type; | ||
149 | bool done; | ||
150 | }; | ||
151 | 72 | ||
152 | struct dmatest_chan { | 73 | static bool verbose; |
153 | struct list_head node; | 74 | module_param(verbose, bool, S_IRUGO | S_IWUSR); |
154 | struct dma_chan *chan; | 75 | MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); |
155 | struct list_head threads; | ||
156 | }; | ||
157 | 76 | ||
158 | /** | 77 | /** |
159 | * struct dmatest_params - test parameters. | 78 | * struct dmatest_params - test parameters. |
@@ -177,6 +96,7 @@ struct dmatest_params { | |||
177 | unsigned int xor_sources; | 96 | unsigned int xor_sources; |
178 | unsigned int pq_sources; | 97 | unsigned int pq_sources; |
179 | int timeout; | 98 | int timeout; |
99 | bool noverify; | ||
180 | }; | 100 | }; |
181 | 101 | ||
182 | /** | 102 | /** |
@@ -184,7 +104,7 @@ struct dmatest_params { | |||
184 | * @params: test parameters | 104 | * @params: test parameters |
185 | * @lock: access protection to the fields of this structure | 105 | * @lock: access protection to the fields of this structure |
186 | */ | 106 | */ |
187 | struct dmatest_info { | 107 | static struct dmatest_info { |
188 | /* Test parameters */ | 108 | /* Test parameters */ |
189 | struct dmatest_params params; | 109 | struct dmatest_params params; |
190 | 110 | ||
@@ -192,16 +112,95 @@ struct dmatest_info { | |||
192 | struct list_head channels; | 112 | struct list_head channels; |
193 | unsigned int nr_channels; | 113 | unsigned int nr_channels; |
194 | struct mutex lock; | 114 | struct mutex lock; |
115 | bool did_init; | ||
116 | } test_info = { | ||
117 | .channels = LIST_HEAD_INIT(test_info.channels), | ||
118 | .lock = __MUTEX_INITIALIZER(test_info.lock), | ||
119 | }; | ||
120 | |||
121 | static int dmatest_run_set(const char *val, const struct kernel_param *kp); | ||
122 | static int dmatest_run_get(char *val, const struct kernel_param *kp); | ||
123 | static struct kernel_param_ops run_ops = { | ||
124 | .set = dmatest_run_set, | ||
125 | .get = dmatest_run_get, | ||
126 | }; | ||
127 | static bool dmatest_run; | ||
128 | module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); | ||
129 | MODULE_PARM_DESC(run, "Run the test (default: false)"); | ||
130 | |||
131 | /* Maximum amount of mismatched bytes in buffer to print */ | ||
132 | #define MAX_ERROR_COUNT 32 | ||
133 | |||
134 | /* | ||
135 | * Initialization patterns. All bytes in the source buffer has bit 7 | ||
136 | * set, all bytes in the destination buffer has bit 7 cleared. | ||
137 | * | ||
138 | * Bit 6 is set for all bytes which are to be copied by the DMA | ||
139 | * engine. Bit 5 is set for all bytes which are to be overwritten by | ||
140 | * the DMA engine. | ||
141 | * | ||
142 | * The remaining bits are the inverse of a counter which increments by | ||
143 | * one for each byte address. | ||
144 | */ | ||
145 | #define PATTERN_SRC 0x80 | ||
146 | #define PATTERN_DST 0x00 | ||
147 | #define PATTERN_COPY 0x40 | ||
148 | #define PATTERN_OVERWRITE 0x20 | ||
149 | #define PATTERN_COUNT_MASK 0x1f | ||
195 | 150 | ||
196 | /* debugfs related stuff */ | 151 | struct dmatest_thread { |
197 | struct dentry *root; | 152 | struct list_head node; |
153 | struct dmatest_info *info; | ||
154 | struct task_struct *task; | ||
155 | struct dma_chan *chan; | ||
156 | u8 **srcs; | ||
157 | u8 **dsts; | ||
158 | enum dma_transaction_type type; | ||
159 | bool done; | ||
160 | }; | ||
198 | 161 | ||
199 | /* Test results */ | 162 | struct dmatest_chan { |
200 | struct list_head results; | 163 | struct list_head node; |
201 | struct mutex results_lock; | 164 | struct dma_chan *chan; |
165 | struct list_head threads; | ||
202 | }; | 166 | }; |
203 | 167 | ||
204 | static struct dmatest_info test_info; | 168 | static DECLARE_WAIT_QUEUE_HEAD(thread_wait); |
169 | static bool wait; | ||
170 | |||
171 | static bool is_threaded_test_run(struct dmatest_info *info) | ||
172 | { | ||
173 | struct dmatest_chan *dtc; | ||
174 | |||
175 | list_for_each_entry(dtc, &info->channels, node) { | ||
176 | struct dmatest_thread *thread; | ||
177 | |||
178 | list_for_each_entry(thread, &dtc->threads, node) { | ||
179 | if (!thread->done) | ||
180 | return true; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | return false; | ||
185 | } | ||
186 | |||
187 | static int dmatest_wait_get(char *val, const struct kernel_param *kp) | ||
188 | { | ||
189 | struct dmatest_info *info = &test_info; | ||
190 | struct dmatest_params *params = &info->params; | ||
191 | |||
192 | if (params->iterations) | ||
193 | wait_event(thread_wait, !is_threaded_test_run(info)); | ||
194 | wait = true; | ||
195 | return param_get_bool(val, kp); | ||
196 | } | ||
197 | |||
198 | static struct kernel_param_ops wait_ops = { | ||
199 | .get = dmatest_wait_get, | ||
200 | .set = param_set_bool, | ||
201 | }; | ||
202 | module_param_cb(wait, &wait_ops, &wait, S_IRUGO); | ||
203 | MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)"); | ||
205 | 204 | ||
206 | static bool dmatest_match_channel(struct dmatest_params *params, | 205 | static bool dmatest_match_channel(struct dmatest_params *params, |
207 | struct dma_chan *chan) | 206 | struct dma_chan *chan) |
@@ -223,7 +222,7 @@ static unsigned long dmatest_random(void) | |||
223 | { | 222 | { |
224 | unsigned long buf; | 223 | unsigned long buf; |
225 | 224 | ||
226 | get_random_bytes(&buf, sizeof(buf)); | 225 | prandom_bytes(&buf, sizeof(buf)); |
227 | return buf; | 226 | return buf; |
228 | } | 227 | } |
229 | 228 | ||
@@ -262,9 +261,31 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, | |||
262 | } | 261 | } |
263 | } | 262 | } |
264 | 263 | ||
265 | static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, | 264 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, |
266 | unsigned int start, unsigned int end, unsigned int counter, | 265 | unsigned int counter, bool is_srcbuf) |
267 | u8 pattern, bool is_srcbuf) | 266 | { |
267 | u8 diff = actual ^ pattern; | ||
268 | u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); | ||
269 | const char *thread_name = current->comm; | ||
270 | |||
271 | if (is_srcbuf) | ||
272 | pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", | ||
273 | thread_name, index, expected, actual); | ||
274 | else if ((pattern & PATTERN_COPY) | ||
275 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) | ||
276 | pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", | ||
277 | thread_name, index, expected, actual); | ||
278 | else if (diff & PATTERN_SRC) | ||
279 | pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", | ||
280 | thread_name, index, expected, actual); | ||
281 | else | ||
282 | pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", | ||
283 | thread_name, index, expected, actual); | ||
284 | } | ||
285 | |||
286 | static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | ||
287 | unsigned int end, unsigned int counter, u8 pattern, | ||
288 | bool is_srcbuf) | ||
268 | { | 289 | { |
269 | unsigned int i; | 290 | unsigned int i; |
270 | unsigned int error_count = 0; | 291 | unsigned int error_count = 0; |
@@ -272,7 +293,6 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, | |||
272 | u8 expected; | 293 | u8 expected; |
273 | u8 *buf; | 294 | u8 *buf; |
274 | unsigned int counter_orig = counter; | 295 | unsigned int counter_orig = counter; |
275 | struct dmatest_verify_buffer *vb; | ||
276 | 296 | ||
277 | for (; (buf = *bufs); bufs++) { | 297 | for (; (buf = *bufs); bufs++) { |
278 | counter = counter_orig; | 298 | counter = counter_orig; |
@@ -280,12 +300,9 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, | |||
280 | actual = buf[i]; | 300 | actual = buf[i]; |
281 | expected = pattern | (~counter & PATTERN_COUNT_MASK); | 301 | expected = pattern | (~counter & PATTERN_COUNT_MASK); |
282 | if (actual != expected) { | 302 | if (actual != expected) { |
283 | if (error_count < MAX_ERROR_COUNT && vr) { | 303 | if (error_count < MAX_ERROR_COUNT) |
284 | vb = &vr->data[error_count]; | 304 | dmatest_mismatch(actual, pattern, i, |
285 | vb->index = i; | 305 | counter, is_srcbuf); |
286 | vb->expected = expected; | ||
287 | vb->actual = actual; | ||
288 | } | ||
289 | error_count++; | 306 | error_count++; |
290 | } | 307 | } |
291 | counter++; | 308 | counter++; |
@@ -293,7 +310,7 @@ static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, | |||
293 | } | 310 | } |
294 | 311 | ||
295 | if (error_count > MAX_ERROR_COUNT) | 312 | if (error_count > MAX_ERROR_COUNT) |
296 | pr_warning("%s: %u errors suppressed\n", | 313 | pr_warn("%s: %u errors suppressed\n", |
297 | current->comm, error_count - MAX_ERROR_COUNT); | 314 | current->comm, error_count - MAX_ERROR_COUNT); |
298 | 315 | ||
299 | return error_count; | 316 | return error_count; |
@@ -313,20 +330,6 @@ static void dmatest_callback(void *arg) | |||
313 | wake_up_all(done->wait); | 330 | wake_up_all(done->wait); |
314 | } | 331 | } |
315 | 332 | ||
316 | static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, | ||
317 | unsigned int count) | ||
318 | { | ||
319 | while (count--) | ||
320 | dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); | ||
321 | } | ||
322 | |||
323 | static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, | ||
324 | unsigned int count) | ||
325 | { | ||
326 | while (count--) | ||
327 | dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); | ||
328 | } | ||
329 | |||
330 | static unsigned int min_odd(unsigned int x, unsigned int y) | 333 | static unsigned int min_odd(unsigned int x, unsigned int y) |
331 | { | 334 | { |
332 | unsigned int val = min(x, y); | 335 | unsigned int val = min(x, y); |
@@ -334,172 +337,49 @@ static unsigned int min_odd(unsigned int x, unsigned int y) | |||
334 | return val % 2 ? val : val - 1; | 337 | return val % 2 ? val : val - 1; |
335 | } | 338 | } |
336 | 339 | ||
337 | static char *verify_result_get_one(struct dmatest_verify_result *vr, | 340 | static void result(const char *err, unsigned int n, unsigned int src_off, |
338 | unsigned int i) | 341 | unsigned int dst_off, unsigned int len, unsigned long data) |
339 | { | 342 | { |
340 | struct dmatest_verify_buffer *vb = &vr->data[i]; | 343 | pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", |
341 | u8 diff = vb->actual ^ vr->pattern; | 344 | current->comm, n, err, src_off, dst_off, len, data); |
342 | static char buf[512]; | ||
343 | char *msg; | ||
344 | |||
345 | if (vr->is_srcbuf) | ||
346 | msg = "srcbuf overwritten!"; | ||
347 | else if ((vr->pattern & PATTERN_COPY) | ||
348 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) | ||
349 | msg = "dstbuf not copied!"; | ||
350 | else if (diff & PATTERN_SRC) | ||
351 | msg = "dstbuf was copied!"; | ||
352 | else | ||
353 | msg = "dstbuf mismatch!"; | ||
354 | |||
355 | snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, | ||
356 | vb->index, vb->expected, vb->actual); | ||
357 | |||
358 | return buf; | ||
359 | } | 345 | } |
360 | 346 | ||
361 | static char *thread_result_get(const char *name, | 347 | static void dbg_result(const char *err, unsigned int n, unsigned int src_off, |
362 | struct dmatest_thread_result *tr) | 348 | unsigned int dst_off, unsigned int len, |
349 | unsigned long data) | ||
363 | { | 350 | { |
364 | static const char * const messages[] = { | 351 | pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", |
365 | [DMATEST_ET_OK] = "No errors", | 352 | current->comm, n, err, src_off, dst_off, len, data); |
366 | [DMATEST_ET_MAP_SRC] = "src mapping error", | ||
367 | [DMATEST_ET_MAP_DST] = "dst mapping error", | ||
368 | [DMATEST_ET_PREP] = "prep error", | ||
369 | [DMATEST_ET_SUBMIT] = "submit error", | ||
370 | [DMATEST_ET_TIMEOUT] = "test timed out", | ||
371 | [DMATEST_ET_DMA_ERROR] = | ||
372 | "got completion callback (DMA_ERROR)", | ||
373 | [DMATEST_ET_DMA_IN_PROGRESS] = | ||
374 | "got completion callback (DMA_IN_PROGRESS)", | ||
375 | [DMATEST_ET_VERIFY] = "errors", | ||
376 | [DMATEST_ET_VERIFY_BUF] = "verify errors", | ||
377 | }; | ||
378 | static char buf[512]; | ||
379 | |||
380 | snprintf(buf, sizeof(buf) - 1, | ||
381 | "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", | ||
382 | name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, | ||
383 | tr->len, tr->data); | ||
384 | |||
385 | return buf; | ||
386 | } | 353 | } |
387 | 354 | ||
388 | static int thread_result_add(struct dmatest_info *info, | 355 | #define verbose_result(err, n, src_off, dst_off, len, data) ({ \ |
389 | struct dmatest_result *r, enum dmatest_error_type type, | 356 | if (verbose) \ |
390 | unsigned int n, unsigned int src_off, unsigned int dst_off, | 357 | result(err, n, src_off, dst_off, len, data); \ |
391 | unsigned int len, unsigned long data) | 358 | else \ |
392 | { | 359 | dbg_result(err, n, src_off, dst_off, len, data); \ |
393 | struct dmatest_thread_result *tr; | 360 | }) |
394 | |||
395 | tr = kzalloc(sizeof(*tr), GFP_KERNEL); | ||
396 | if (!tr) | ||
397 | return -ENOMEM; | ||
398 | |||
399 | tr->type = type; | ||
400 | tr->n = n; | ||
401 | tr->src_off = src_off; | ||
402 | tr->dst_off = dst_off; | ||
403 | tr->len = len; | ||
404 | tr->data = data; | ||
405 | 361 | ||
406 | mutex_lock(&info->results_lock); | 362 | static unsigned long long dmatest_persec(s64 runtime, unsigned int val) |
407 | list_add_tail(&tr->node, &r->results); | ||
408 | mutex_unlock(&info->results_lock); | ||
409 | |||
410 | if (tr->type == DMATEST_ET_OK) | ||
411 | pr_debug("%s\n", thread_result_get(r->name, tr)); | ||
412 | else | ||
413 | pr_warn("%s\n", thread_result_get(r->name, tr)); | ||
414 | |||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | static unsigned int verify_result_add(struct dmatest_info *info, | ||
419 | struct dmatest_result *r, unsigned int n, | ||
420 | unsigned int src_off, unsigned int dst_off, unsigned int len, | ||
421 | u8 **bufs, int whence, unsigned int counter, u8 pattern, | ||
422 | bool is_srcbuf) | ||
423 | { | 363 | { |
424 | struct dmatest_verify_result *vr; | 364 | unsigned long long per_sec = 1000000; |
425 | unsigned int error_count; | ||
426 | unsigned int buf_off = is_srcbuf ? src_off : dst_off; | ||
427 | unsigned int start, end; | ||
428 | |||
429 | if (whence < 0) { | ||
430 | start = 0; | ||
431 | end = buf_off; | ||
432 | } else if (whence > 0) { | ||
433 | start = buf_off + len; | ||
434 | end = info->params.buf_size; | ||
435 | } else { | ||
436 | start = buf_off; | ||
437 | end = buf_off + len; | ||
438 | } | ||
439 | 365 | ||
440 | vr = kmalloc(sizeof(*vr), GFP_KERNEL); | 366 | if (runtime <= 0) |
441 | if (!vr) { | 367 | return 0; |
442 | pr_warn("dmatest: No memory to store verify result\n"); | ||
443 | return dmatest_verify(NULL, bufs, start, end, counter, pattern, | ||
444 | is_srcbuf); | ||
445 | } | ||
446 | |||
447 | vr->pattern = pattern; | ||
448 | vr->is_srcbuf = is_srcbuf; | ||
449 | |||
450 | error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, | ||
451 | is_srcbuf); | ||
452 | if (error_count) { | ||
453 | vr->error_count = error_count; | ||
454 | thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, | ||
455 | dst_off, len, (unsigned long)vr); | ||
456 | return error_count; | ||
457 | } | ||
458 | |||
459 | kfree(vr); | ||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static void result_free(struct dmatest_info *info, const char *name) | ||
464 | { | ||
465 | struct dmatest_result *r, *_r; | ||
466 | |||
467 | mutex_lock(&info->results_lock); | ||
468 | list_for_each_entry_safe(r, _r, &info->results, node) { | ||
469 | struct dmatest_thread_result *tr, *_tr; | ||
470 | |||
471 | if (name && strcmp(r->name, name)) | ||
472 | continue; | ||
473 | |||
474 | list_for_each_entry_safe(tr, _tr, &r->results, node) { | ||
475 | if (tr->type == DMATEST_ET_VERIFY_BUF) | ||
476 | kfree(tr->vr); | ||
477 | list_del(&tr->node); | ||
478 | kfree(tr); | ||
479 | } | ||
480 | 368 | ||
481 | kfree(r->name); | 369 | /* drop precision until runtime is 32-bits */ |
482 | list_del(&r->node); | 370 | while (runtime > UINT_MAX) { |
483 | kfree(r); | 371 | runtime >>= 1; |
372 | per_sec <<= 1; | ||
484 | } | 373 | } |
485 | 374 | ||
486 | mutex_unlock(&info->results_lock); | 375 | per_sec *= val; |
376 | do_div(per_sec, runtime); | ||
377 | return per_sec; | ||
487 | } | 378 | } |
488 | 379 | ||
489 | static struct dmatest_result *result_init(struct dmatest_info *info, | 380 | static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) |
490 | const char *name) | ||
491 | { | 381 | { |
492 | struct dmatest_result *r; | 382 | return dmatest_persec(runtime, len >> 10); |
493 | |||
494 | r = kzalloc(sizeof(*r), GFP_KERNEL); | ||
495 | if (r) { | ||
496 | r->name = kstrdup(name, GFP_KERNEL); | ||
497 | INIT_LIST_HEAD(&r->results); | ||
498 | mutex_lock(&info->results_lock); | ||
499 | list_add_tail(&r->node, &info->results); | ||
500 | mutex_unlock(&info->results_lock); | ||
501 | } | ||
502 | return r; | ||
503 | } | 383 | } |
504 | 384 | ||
505 | /* | 385 | /* |
@@ -525,7 +405,6 @@ static int dmatest_func(void *data) | |||
525 | struct dmatest_params *params; | 405 | struct dmatest_params *params; |
526 | struct dma_chan *chan; | 406 | struct dma_chan *chan; |
527 | struct dma_device *dev; | 407 | struct dma_device *dev; |
528 | const char *thread_name; | ||
529 | unsigned int src_off, dst_off, len; | 408 | unsigned int src_off, dst_off, len; |
530 | unsigned int error_count; | 409 | unsigned int error_count; |
531 | unsigned int failed_tests = 0; | 410 | unsigned int failed_tests = 0; |
@@ -538,9 +417,10 @@ static int dmatest_func(void *data) | |||
538 | int src_cnt; | 417 | int src_cnt; |
539 | int dst_cnt; | 418 | int dst_cnt; |
540 | int i; | 419 | int i; |
541 | struct dmatest_result *result; | 420 | ktime_t ktime; |
421 | s64 runtime = 0; | ||
422 | unsigned long long total_len = 0; | ||
542 | 423 | ||
543 | thread_name = current->comm; | ||
544 | set_freezable(); | 424 | set_freezable(); |
545 | 425 | ||
546 | ret = -ENOMEM; | 426 | ret = -ENOMEM; |
@@ -570,10 +450,6 @@ static int dmatest_func(void *data) | |||
570 | } else | 450 | } else |
571 | goto err_thread_type; | 451 | goto err_thread_type; |
572 | 452 | ||
573 | result = result_init(info, thread_name); | ||
574 | if (!result) | ||
575 | goto err_srcs; | ||
576 | |||
577 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); | 453 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); |
578 | if (!thread->srcs) | 454 | if (!thread->srcs) |
579 | goto err_srcs; | 455 | goto err_srcs; |
@@ -597,17 +473,17 @@ static int dmatest_func(void *data) | |||
597 | set_user_nice(current, 10); | 473 | set_user_nice(current, 10); |
598 | 474 | ||
599 | /* | 475 | /* |
600 | * src buffers are freed by the DMAEngine code with dma_unmap_single() | 476 | * src and dst buffers are freed by ourselves below |
601 | * dst buffers are freed by ourselves below | ||
602 | */ | 477 | */ |
603 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | 478 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
604 | | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; | ||
605 | 479 | ||
480 | ktime = ktime_get(); | ||
606 | while (!kthread_should_stop() | 481 | while (!kthread_should_stop() |
607 | && !(params->iterations && total_tests >= params->iterations)) { | 482 | && !(params->iterations && total_tests >= params->iterations)) { |
608 | struct dma_async_tx_descriptor *tx = NULL; | 483 | struct dma_async_tx_descriptor *tx = NULL; |
609 | dma_addr_t dma_srcs[src_cnt]; | 484 | struct dmaengine_unmap_data *um; |
610 | dma_addr_t dma_dsts[dst_cnt]; | 485 | dma_addr_t srcs[src_cnt]; |
486 | dma_addr_t *dsts; | ||
611 | u8 align = 0; | 487 | u8 align = 0; |
612 | 488 | ||
613 | total_tests++; | 489 | total_tests++; |
@@ -626,81 +502,103 @@ static int dmatest_func(void *data) | |||
626 | break; | 502 | break; |
627 | } | 503 | } |
628 | 504 | ||
629 | len = dmatest_random() % params->buf_size + 1; | 505 | if (params->noverify) { |
506 | len = params->buf_size; | ||
507 | src_off = 0; | ||
508 | dst_off = 0; | ||
509 | } else { | ||
510 | len = dmatest_random() % params->buf_size + 1; | ||
511 | len = (len >> align) << align; | ||
512 | if (!len) | ||
513 | len = 1 << align; | ||
514 | src_off = dmatest_random() % (params->buf_size - len + 1); | ||
515 | dst_off = dmatest_random() % (params->buf_size - len + 1); | ||
516 | |||
517 | src_off = (src_off >> align) << align; | ||
518 | dst_off = (dst_off >> align) << align; | ||
519 | |||
520 | dmatest_init_srcs(thread->srcs, src_off, len, | ||
521 | params->buf_size); | ||
522 | dmatest_init_dsts(thread->dsts, dst_off, len, | ||
523 | params->buf_size); | ||
524 | } | ||
525 | |||
630 | len = (len >> align) << align; | 526 | len = (len >> align) << align; |
631 | if (!len) | 527 | if (!len) |
632 | len = 1 << align; | 528 | len = 1 << align; |
633 | src_off = dmatest_random() % (params->buf_size - len + 1); | 529 | total_len += len; |
634 | dst_off = dmatest_random() % (params->buf_size - len + 1); | ||
635 | 530 | ||
636 | src_off = (src_off >> align) << align; | 531 | um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, |
637 | dst_off = (dst_off >> align) << align; | 532 | GFP_KERNEL); |
638 | 533 | if (!um) { | |
639 | dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); | 534 | failed_tests++; |
640 | dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); | 535 | result("unmap data NULL", total_tests, |
536 | src_off, dst_off, len, ret); | ||
537 | continue; | ||
538 | } | ||
641 | 539 | ||
540 | um->len = params->buf_size; | ||
642 | for (i = 0; i < src_cnt; i++) { | 541 | for (i = 0; i < src_cnt; i++) { |
643 | u8 *buf = thread->srcs[i] + src_off; | 542 | unsigned long buf = (unsigned long) thread->srcs[i]; |
644 | 543 | struct page *pg = virt_to_page(buf); | |
645 | dma_srcs[i] = dma_map_single(dev->dev, buf, len, | 544 | unsigned pg_off = buf & ~PAGE_MASK; |
646 | DMA_TO_DEVICE); | 545 | |
647 | ret = dma_mapping_error(dev->dev, dma_srcs[i]); | 546 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, |
547 | um->len, DMA_TO_DEVICE); | ||
548 | srcs[i] = um->addr[i] + src_off; | ||
549 | ret = dma_mapping_error(dev->dev, um->addr[i]); | ||
648 | if (ret) { | 550 | if (ret) { |
649 | unmap_src(dev->dev, dma_srcs, len, i); | 551 | dmaengine_unmap_put(um); |
650 | thread_result_add(info, result, | 552 | result("src mapping error", total_tests, |
651 | DMATEST_ET_MAP_SRC, | 553 | src_off, dst_off, len, ret); |
652 | total_tests, src_off, dst_off, | ||
653 | len, ret); | ||
654 | failed_tests++; | 554 | failed_tests++; |
655 | continue; | 555 | continue; |
656 | } | 556 | } |
557 | um->to_cnt++; | ||
657 | } | 558 | } |
658 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 559 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
560 | dsts = &um->addr[src_cnt]; | ||
659 | for (i = 0; i < dst_cnt; i++) { | 561 | for (i = 0; i < dst_cnt; i++) { |
660 | dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], | 562 | unsigned long buf = (unsigned long) thread->dsts[i]; |
661 | params->buf_size, | 563 | struct page *pg = virt_to_page(buf); |
662 | DMA_BIDIRECTIONAL); | 564 | unsigned pg_off = buf & ~PAGE_MASK; |
663 | ret = dma_mapping_error(dev->dev, dma_dsts[i]); | 565 | |
566 | dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, | ||
567 | DMA_BIDIRECTIONAL); | ||
568 | ret = dma_mapping_error(dev->dev, dsts[i]); | ||
664 | if (ret) { | 569 | if (ret) { |
665 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 570 | dmaengine_unmap_put(um); |
666 | unmap_dst(dev->dev, dma_dsts, params->buf_size, | 571 | result("dst mapping error", total_tests, |
667 | i); | 572 | src_off, dst_off, len, ret); |
668 | thread_result_add(info, result, | ||
669 | DMATEST_ET_MAP_DST, | ||
670 | total_tests, src_off, dst_off, | ||
671 | len, ret); | ||
672 | failed_tests++; | 573 | failed_tests++; |
673 | continue; | 574 | continue; |
674 | } | 575 | } |
576 | um->bidi_cnt++; | ||
675 | } | 577 | } |
676 | 578 | ||
677 | if (thread->type == DMA_MEMCPY) | 579 | if (thread->type == DMA_MEMCPY) |
678 | tx = dev->device_prep_dma_memcpy(chan, | 580 | tx = dev->device_prep_dma_memcpy(chan, |
679 | dma_dsts[0] + dst_off, | 581 | dsts[0] + dst_off, |
680 | dma_srcs[0], len, | 582 | srcs[0], len, flags); |
681 | flags); | ||
682 | else if (thread->type == DMA_XOR) | 583 | else if (thread->type == DMA_XOR) |
683 | tx = dev->device_prep_dma_xor(chan, | 584 | tx = dev->device_prep_dma_xor(chan, |
684 | dma_dsts[0] + dst_off, | 585 | dsts[0] + dst_off, |
685 | dma_srcs, src_cnt, | 586 | srcs, src_cnt, |
686 | len, flags); | 587 | len, flags); |
687 | else if (thread->type == DMA_PQ) { | 588 | else if (thread->type == DMA_PQ) { |
688 | dma_addr_t dma_pq[dst_cnt]; | 589 | dma_addr_t dma_pq[dst_cnt]; |
689 | 590 | ||
690 | for (i = 0; i < dst_cnt; i++) | 591 | for (i = 0; i < dst_cnt; i++) |
691 | dma_pq[i] = dma_dsts[i] + dst_off; | 592 | dma_pq[i] = dsts[i] + dst_off; |
692 | tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, | 593 | tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, |
693 | src_cnt, pq_coefs, | 594 | src_cnt, pq_coefs, |
694 | len, flags); | 595 | len, flags); |
695 | } | 596 | } |
696 | 597 | ||
697 | if (!tx) { | 598 | if (!tx) { |
698 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 599 | dmaengine_unmap_put(um); |
699 | unmap_dst(dev->dev, dma_dsts, params->buf_size, | 600 | result("prep error", total_tests, src_off, |
700 | dst_cnt); | 601 | dst_off, len, ret); |
701 | thread_result_add(info, result, DMATEST_ET_PREP, | ||
702 | total_tests, src_off, dst_off, | ||
703 | len, 0); | ||
704 | msleep(100); | 602 | msleep(100); |
705 | failed_tests++; | 603 | failed_tests++; |
706 | continue; | 604 | continue; |
@@ -712,9 +610,9 @@ static int dmatest_func(void *data) | |||
712 | cookie = tx->tx_submit(tx); | 610 | cookie = tx->tx_submit(tx); |
713 | 611 | ||
714 | if (dma_submit_error(cookie)) { | 612 | if (dma_submit_error(cookie)) { |
715 | thread_result_add(info, result, DMATEST_ET_SUBMIT, | 613 | dmaengine_unmap_put(um); |
716 | total_tests, src_off, dst_off, | 614 | result("submit error", total_tests, src_off, |
717 | len, cookie); | 615 | dst_off, len, ret); |
718 | msleep(100); | 616 | msleep(100); |
719 | failed_tests++; | 617 | failed_tests++; |
720 | continue; | 618 | continue; |
@@ -735,59 +633,59 @@ static int dmatest_func(void *data) | |||
735 | * free it this time?" dancing. For now, just | 633 | * free it this time?" dancing. For now, just |
736 | * leave it dangling. | 634 | * leave it dangling. |
737 | */ | 635 | */ |
738 | thread_result_add(info, result, DMATEST_ET_TIMEOUT, | 636 | dmaengine_unmap_put(um); |
739 | total_tests, src_off, dst_off, | 637 | result("test timed out", total_tests, src_off, dst_off, |
740 | len, 0); | 638 | len, 0); |
741 | failed_tests++; | 639 | failed_tests++; |
742 | continue; | 640 | continue; |
743 | } else if (status != DMA_SUCCESS) { | 641 | } else if (status != DMA_COMPLETE) { |
744 | enum dmatest_error_type type = (status == DMA_ERROR) ? | 642 | dmaengine_unmap_put(um); |
745 | DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; | 643 | result(status == DMA_ERROR ? |
746 | thread_result_add(info, result, type, | 644 | "completion error status" : |
747 | total_tests, src_off, dst_off, | 645 | "completion busy status", total_tests, src_off, |
748 | len, status); | 646 | dst_off, len, ret); |
749 | failed_tests++; | 647 | failed_tests++; |
750 | continue; | 648 | continue; |
751 | } | 649 | } |
752 | 650 | ||
753 | /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ | 651 | dmaengine_unmap_put(um); |
754 | unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); | ||
755 | 652 | ||
756 | error_count = 0; | 653 | if (params->noverify) { |
654 | verbose_result("test passed", total_tests, src_off, | ||
655 | dst_off, len, 0); | ||
656 | continue; | ||
657 | } | ||
757 | 658 | ||
758 | pr_debug("%s: verifying source buffer...\n", thread_name); | 659 | pr_debug("%s: verifying source buffer...\n", current->comm); |
759 | error_count += verify_result_add(info, result, total_tests, | 660 | error_count = dmatest_verify(thread->srcs, 0, src_off, |
760 | src_off, dst_off, len, thread->srcs, -1, | ||
761 | 0, PATTERN_SRC, true); | 661 | 0, PATTERN_SRC, true); |
762 | error_count += verify_result_add(info, result, total_tests, | 662 | error_count += dmatest_verify(thread->srcs, src_off, |
763 | src_off, dst_off, len, thread->srcs, 0, | 663 | src_off + len, src_off, |
764 | src_off, PATTERN_SRC | PATTERN_COPY, true); | 664 | PATTERN_SRC | PATTERN_COPY, true); |
765 | error_count += verify_result_add(info, result, total_tests, | 665 | error_count += dmatest_verify(thread->srcs, src_off + len, |
766 | src_off, dst_off, len, thread->srcs, 1, | 666 | params->buf_size, src_off + len, |
767 | src_off + len, PATTERN_SRC, true); | 667 | PATTERN_SRC, true); |
768 | 668 | ||
769 | pr_debug("%s: verifying dest buffer...\n", thread_name); | 669 | pr_debug("%s: verifying dest buffer...\n", current->comm); |
770 | error_count += verify_result_add(info, result, total_tests, | 670 | error_count += dmatest_verify(thread->dsts, 0, dst_off, |
771 | src_off, dst_off, len, thread->dsts, -1, | ||
772 | 0, PATTERN_DST, false); | 671 | 0, PATTERN_DST, false); |
773 | error_count += verify_result_add(info, result, total_tests, | 672 | error_count += dmatest_verify(thread->dsts, dst_off, |
774 | src_off, dst_off, len, thread->dsts, 0, | 673 | dst_off + len, src_off, |
775 | src_off, PATTERN_SRC | PATTERN_COPY, false); | 674 | PATTERN_SRC | PATTERN_COPY, false); |
776 | error_count += verify_result_add(info, result, total_tests, | 675 | error_count += dmatest_verify(thread->dsts, dst_off + len, |
777 | src_off, dst_off, len, thread->dsts, 1, | 676 | params->buf_size, dst_off + len, |
778 | dst_off + len, PATTERN_DST, false); | 677 | PATTERN_DST, false); |
779 | 678 | ||
780 | if (error_count) { | 679 | if (error_count) { |
781 | thread_result_add(info, result, DMATEST_ET_VERIFY, | 680 | result("data error", total_tests, src_off, dst_off, |
782 | total_tests, src_off, dst_off, | 681 | len, error_count); |
783 | len, error_count); | ||
784 | failed_tests++; | 682 | failed_tests++; |
785 | } else { | 683 | } else { |
786 | thread_result_add(info, result, DMATEST_ET_OK, | 684 | verbose_result("test passed", total_tests, src_off, |
787 | total_tests, src_off, dst_off, | 685 | dst_off, len, 0); |
788 | len, 0); | ||
789 | } | 686 | } |
790 | } | 687 | } |
688 | runtime = ktime_us_delta(ktime_get(), ktime); | ||
791 | 689 | ||
792 | ret = 0; | 690 | ret = 0; |
793 | for (i = 0; thread->dsts[i]; i++) | 691 | for (i = 0; thread->dsts[i]; i++) |
@@ -802,20 +700,17 @@ err_srcbuf: | |||
802 | err_srcs: | 700 | err_srcs: |
803 | kfree(pq_coefs); | 701 | kfree(pq_coefs); |
804 | err_thread_type: | 702 | err_thread_type: |
805 | pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", | 703 | pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", |
806 | thread_name, total_tests, failed_tests, ret); | 704 | current->comm, total_tests, failed_tests, |
705 | dmatest_persec(runtime, total_tests), | ||
706 | dmatest_KBs(runtime, total_len), ret); | ||
807 | 707 | ||
808 | /* terminate all transfers on specified channels */ | 708 | /* terminate all transfers on specified channels */ |
809 | if (ret) | 709 | if (ret) |
810 | dmaengine_terminate_all(chan); | 710 | dmaengine_terminate_all(chan); |
811 | 711 | ||
812 | thread->done = true; | 712 | thread->done = true; |
813 | 713 | wake_up(&thread_wait); | |
814 | if (params->iterations > 0) | ||
815 | while (!kthread_should_stop()) { | ||
816 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); | ||
817 | interruptible_sleep_on(&wait_dmatest_exit); | ||
818 | } | ||
819 | 714 | ||
820 | return ret; | 715 | return ret; |
821 | } | 716 | } |
@@ -828,9 +723,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | |||
828 | 723 | ||
829 | list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { | 724 | list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { |
830 | ret = kthread_stop(thread->task); | 725 | ret = kthread_stop(thread->task); |
831 | pr_debug("dmatest: thread %s exited with status %d\n", | 726 | pr_debug("thread %s exited with status %d\n", |
832 | thread->task->comm, ret); | 727 | thread->task->comm, ret); |
833 | list_del(&thread->node); | 728 | list_del(&thread->node); |
729 | put_task_struct(thread->task); | ||
834 | kfree(thread); | 730 | kfree(thread); |
835 | } | 731 | } |
836 | 732 | ||
@@ -861,27 +757,27 @@ static int dmatest_add_threads(struct dmatest_info *info, | |||
861 | for (i = 0; i < params->threads_per_chan; i++) { | 757 | for (i = 0; i < params->threads_per_chan; i++) { |
862 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); | 758 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); |
863 | if (!thread) { | 759 | if (!thread) { |
864 | pr_warning("dmatest: No memory for %s-%s%u\n", | 760 | pr_warn("No memory for %s-%s%u\n", |
865 | dma_chan_name(chan), op, i); | 761 | dma_chan_name(chan), op, i); |
866 | |||
867 | break; | 762 | break; |
868 | } | 763 | } |
869 | thread->info = info; | 764 | thread->info = info; |
870 | thread->chan = dtc->chan; | 765 | thread->chan = dtc->chan; |
871 | thread->type = type; | 766 | thread->type = type; |
872 | smp_wmb(); | 767 | smp_wmb(); |
873 | thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", | 768 | thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", |
874 | dma_chan_name(chan), op, i); | 769 | dma_chan_name(chan), op, i); |
875 | if (IS_ERR(thread->task)) { | 770 | if (IS_ERR(thread->task)) { |
876 | pr_warning("dmatest: Failed to run thread %s-%s%u\n", | 771 | pr_warn("Failed to create thread %s-%s%u\n", |
877 | dma_chan_name(chan), op, i); | 772 | dma_chan_name(chan), op, i); |
878 | kfree(thread); | 773 | kfree(thread); |
879 | break; | 774 | break; |
880 | } | 775 | } |
881 | 776 | ||
882 | /* srcbuf and dstbuf are allocated by the thread itself */ | 777 | /* srcbuf and dstbuf are allocated by the thread itself */ |
883 | 778 | get_task_struct(thread->task); | |
884 | list_add_tail(&thread->node, &dtc->threads); | 779 | list_add_tail(&thread->node, &dtc->threads); |
780 | wake_up_process(thread->task); | ||
885 | } | 781 | } |
886 | 782 | ||
887 | return i; | 783 | return i; |
@@ -897,7 +793,7 @@ static int dmatest_add_channel(struct dmatest_info *info, | |||
897 | 793 | ||
898 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); | 794 | dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); |
899 | if (!dtc) { | 795 | if (!dtc) { |
900 | pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); | 796 | pr_warn("No memory for %s\n", dma_chan_name(chan)); |
901 | return -ENOMEM; | 797 | return -ENOMEM; |
902 | } | 798 | } |
903 | 799 | ||
@@ -917,7 +813,7 @@ static int dmatest_add_channel(struct dmatest_info *info, | |||
917 | thread_count += cnt > 0 ? cnt : 0; | 813 | thread_count += cnt > 0 ? cnt : 0; |
918 | } | 814 | } |
919 | 815 | ||
920 | pr_info("dmatest: Started %u threads using %s\n", | 816 | pr_info("Started %u threads using %s\n", |
921 | thread_count, dma_chan_name(chan)); | 817 | thread_count, dma_chan_name(chan)); |
922 | 818 | ||
923 | list_add_tail(&dtc->node, &info->channels); | 819 | list_add_tail(&dtc->node, &info->channels); |
@@ -937,20 +833,20 @@ static bool filter(struct dma_chan *chan, void *param) | |||
937 | return true; | 833 | return true; |
938 | } | 834 | } |
939 | 835 | ||
940 | static int __run_threaded_test(struct dmatest_info *info) | 836 | static void request_channels(struct dmatest_info *info, |
837 | enum dma_transaction_type type) | ||
941 | { | 838 | { |
942 | dma_cap_mask_t mask; | 839 | dma_cap_mask_t mask; |
943 | struct dma_chan *chan; | ||
944 | struct dmatest_params *params = &info->params; | ||
945 | int err = 0; | ||
946 | 840 | ||
947 | dma_cap_zero(mask); | 841 | dma_cap_zero(mask); |
948 | dma_cap_set(DMA_MEMCPY, mask); | 842 | dma_cap_set(type, mask); |
949 | for (;;) { | 843 | for (;;) { |
844 | struct dmatest_params *params = &info->params; | ||
845 | struct dma_chan *chan; | ||
846 | |||
950 | chan = dma_request_channel(mask, filter, params); | 847 | chan = dma_request_channel(mask, filter, params); |
951 | if (chan) { | 848 | if (chan) { |
952 | err = dmatest_add_channel(info, chan); | 849 | if (dmatest_add_channel(info, chan)) { |
953 | if (err) { | ||
954 | dma_release_channel(chan); | 850 | dma_release_channel(chan); |
955 | break; /* add_channel failed, punt */ | 851 | break; /* add_channel failed, punt */ |
956 | } | 852 | } |
@@ -960,22 +856,30 @@ static int __run_threaded_test(struct dmatest_info *info) | |||
960 | info->nr_channels >= params->max_channels) | 856 | info->nr_channels >= params->max_channels) |
961 | break; /* we have all we need */ | 857 | break; /* we have all we need */ |
962 | } | 858 | } |
963 | return err; | ||
964 | } | 859 | } |
965 | 860 | ||
966 | #ifndef MODULE | 861 | static void run_threaded_test(struct dmatest_info *info) |
967 | static int run_threaded_test(struct dmatest_info *info) | ||
968 | { | 862 | { |
969 | int ret; | 863 | struct dmatest_params *params = &info->params; |
970 | 864 | ||
971 | mutex_lock(&info->lock); | 865 | /* Copy test parameters */ |
972 | ret = __run_threaded_test(info); | 866 | params->buf_size = test_buf_size; |
973 | mutex_unlock(&info->lock); | 867 | strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); |
974 | return ret; | 868 | strlcpy(params->device, strim(test_device), sizeof(params->device)); |
869 | params->threads_per_chan = threads_per_chan; | ||
870 | params->max_channels = max_channels; | ||
871 | params->iterations = iterations; | ||
872 | params->xor_sources = xor_sources; | ||
873 | params->pq_sources = pq_sources; | ||
874 | params->timeout = timeout; | ||
875 | params->noverify = noverify; | ||
876 | |||
877 | request_channels(info, DMA_MEMCPY); | ||
878 | request_channels(info, DMA_XOR); | ||
879 | request_channels(info, DMA_PQ); | ||
975 | } | 880 | } |
976 | #endif | ||
977 | 881 | ||
978 | static void __stop_threaded_test(struct dmatest_info *info) | 882 | static void stop_threaded_test(struct dmatest_info *info) |
979 | { | 883 | { |
980 | struct dmatest_chan *dtc, *_dtc; | 884 | struct dmatest_chan *dtc, *_dtc; |
981 | struct dma_chan *chan; | 885 | struct dma_chan *chan; |
@@ -984,203 +888,86 @@ static void __stop_threaded_test(struct dmatest_info *info) | |||
984 | list_del(&dtc->node); | 888 | list_del(&dtc->node); |
985 | chan = dtc->chan; | 889 | chan = dtc->chan; |
986 | dmatest_cleanup_channel(dtc); | 890 | dmatest_cleanup_channel(dtc); |
987 | pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); | 891 | pr_debug("dropped channel %s\n", dma_chan_name(chan)); |
988 | dma_release_channel(chan); | 892 | dma_release_channel(chan); |
989 | } | 893 | } |
990 | 894 | ||
991 | info->nr_channels = 0; | 895 | info->nr_channels = 0; |
992 | } | 896 | } |
993 | 897 | ||
994 | static void stop_threaded_test(struct dmatest_info *info) | 898 | static void restart_threaded_test(struct dmatest_info *info, bool run) |
995 | { | 899 | { |
996 | mutex_lock(&info->lock); | 900 | /* we might be called early to set run=, defer running until all |
997 | __stop_threaded_test(info); | 901 | * parameters have been evaluated |
998 | mutex_unlock(&info->lock); | 902 | */ |
999 | } | 903 | if (!info->did_init) |
1000 | 904 | return; | |
1001 | static int __restart_threaded_test(struct dmatest_info *info, bool run) | ||
1002 | { | ||
1003 | struct dmatest_params *params = &info->params; | ||
1004 | 905 | ||
1005 | /* Stop any running test first */ | 906 | /* Stop any running test first */ |
1006 | __stop_threaded_test(info); | 907 | stop_threaded_test(info); |
1007 | |||
1008 | if (run == false) | ||
1009 | return 0; | ||
1010 | |||
1011 | /* Clear results from previous run */ | ||
1012 | result_free(info, NULL); | ||
1013 | |||
1014 | /* Copy test parameters */ | ||
1015 | params->buf_size = test_buf_size; | ||
1016 | strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); | ||
1017 | strlcpy(params->device, strim(test_device), sizeof(params->device)); | ||
1018 | params->threads_per_chan = threads_per_chan; | ||
1019 | params->max_channels = max_channels; | ||
1020 | params->iterations = iterations; | ||
1021 | params->xor_sources = xor_sources; | ||
1022 | params->pq_sources = pq_sources; | ||
1023 | params->timeout = timeout; | ||
1024 | 908 | ||
1025 | /* Run test with new parameters */ | 909 | /* Run test with new parameters */ |
1026 | return __run_threaded_test(info); | 910 | run_threaded_test(info); |
1027 | } | ||
1028 | |||
1029 | static bool __is_threaded_test_run(struct dmatest_info *info) | ||
1030 | { | ||
1031 | struct dmatest_chan *dtc; | ||
1032 | |||
1033 | list_for_each_entry(dtc, &info->channels, node) { | ||
1034 | struct dmatest_thread *thread; | ||
1035 | |||
1036 | list_for_each_entry(thread, &dtc->threads, node) { | ||
1037 | if (!thread->done) | ||
1038 | return true; | ||
1039 | } | ||
1040 | } | ||
1041 | |||
1042 | return false; | ||
1043 | } | 911 | } |
1044 | 912 | ||
1045 | static ssize_t dtf_read_run(struct file *file, char __user *user_buf, | 913 | static int dmatest_run_get(char *val, const struct kernel_param *kp) |
1046 | size_t count, loff_t *ppos) | ||
1047 | { | 914 | { |
1048 | struct dmatest_info *info = file->private_data; | 915 | struct dmatest_info *info = &test_info; |
1049 | char buf[3]; | ||
1050 | 916 | ||
1051 | mutex_lock(&info->lock); | 917 | mutex_lock(&info->lock); |
1052 | 918 | if (is_threaded_test_run(info)) { | |
1053 | if (__is_threaded_test_run(info)) { | 919 | dmatest_run = true; |
1054 | buf[0] = 'Y'; | ||
1055 | } else { | 920 | } else { |
1056 | __stop_threaded_test(info); | 921 | stop_threaded_test(info); |
1057 | buf[0] = 'N'; | 922 | dmatest_run = false; |
1058 | } | 923 | } |
1059 | |||
1060 | mutex_unlock(&info->lock); | 924 | mutex_unlock(&info->lock); |
1061 | buf[1] = '\n'; | ||
1062 | buf[2] = 0x00; | ||
1063 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | ||
1064 | } | ||
1065 | |||
1066 | static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, | ||
1067 | size_t count, loff_t *ppos) | ||
1068 | { | ||
1069 | struct dmatest_info *info = file->private_data; | ||
1070 | char buf[16]; | ||
1071 | bool bv; | ||
1072 | int ret = 0; | ||
1073 | 925 | ||
1074 | if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) | 926 | return param_get_bool(val, kp); |
1075 | return -EFAULT; | ||
1076 | |||
1077 | if (strtobool(buf, &bv) == 0) { | ||
1078 | mutex_lock(&info->lock); | ||
1079 | |||
1080 | if (__is_threaded_test_run(info)) | ||
1081 | ret = -EBUSY; | ||
1082 | else | ||
1083 | ret = __restart_threaded_test(info, bv); | ||
1084 | |||
1085 | mutex_unlock(&info->lock); | ||
1086 | } | ||
1087 | |||
1088 | return ret ? ret : count; | ||
1089 | } | 927 | } |
1090 | 928 | ||
1091 | static const struct file_operations dtf_run_fops = { | 929 | static int dmatest_run_set(const char *val, const struct kernel_param *kp) |
1092 | .read = dtf_read_run, | ||
1093 | .write = dtf_write_run, | ||
1094 | .open = simple_open, | ||
1095 | .llseek = default_llseek, | ||
1096 | }; | ||
1097 | |||
1098 | static int dtf_results_show(struct seq_file *sf, void *data) | ||
1099 | { | 930 | { |
1100 | struct dmatest_info *info = sf->private; | 931 | struct dmatest_info *info = &test_info; |
1101 | struct dmatest_result *result; | 932 | int ret; |
1102 | struct dmatest_thread_result *tr; | ||
1103 | unsigned int i; | ||
1104 | 933 | ||
1105 | mutex_lock(&info->results_lock); | 934 | mutex_lock(&info->lock); |
1106 | list_for_each_entry(result, &info->results, node) { | 935 | ret = param_set_bool(val, kp); |
1107 | list_for_each_entry(tr, &result->results, node) { | 936 | if (ret) { |
1108 | seq_printf(sf, "%s\n", | 937 | mutex_unlock(&info->lock); |
1109 | thread_result_get(result->name, tr)); | 938 | return ret; |
1110 | if (tr->type == DMATEST_ET_VERIFY_BUF) { | ||
1111 | for (i = 0; i < tr->vr->error_count; i++) { | ||
1112 | seq_printf(sf, "\t%s\n", | ||
1113 | verify_result_get_one(tr->vr, i)); | ||
1114 | } | ||
1115 | } | ||
1116 | } | ||
1117 | } | 939 | } |
1118 | 940 | ||
1119 | mutex_unlock(&info->results_lock); | 941 | if (is_threaded_test_run(info)) |
1120 | return 0; | 942 | ret = -EBUSY; |
1121 | } | 943 | else if (dmatest_run) |
1122 | 944 | restart_threaded_test(info, dmatest_run); | |
1123 | static int dtf_results_open(struct inode *inode, struct file *file) | ||
1124 | { | ||
1125 | return single_open(file, dtf_results_show, inode->i_private); | ||
1126 | } | ||
1127 | |||
1128 | static const struct file_operations dtf_results_fops = { | ||
1129 | .open = dtf_results_open, | ||
1130 | .read = seq_read, | ||
1131 | .llseek = seq_lseek, | ||
1132 | .release = single_release, | ||
1133 | }; | ||
1134 | |||
1135 | static int dmatest_register_dbgfs(struct dmatest_info *info) | ||
1136 | { | ||
1137 | struct dentry *d; | ||
1138 | |||
1139 | d = debugfs_create_dir("dmatest", NULL); | ||
1140 | if (IS_ERR(d)) | ||
1141 | return PTR_ERR(d); | ||
1142 | if (!d) | ||
1143 | goto err_root; | ||
1144 | 945 | ||
1145 | info->root = d; | 946 | mutex_unlock(&info->lock); |
1146 | |||
1147 | /* Run or stop threaded test */ | ||
1148 | debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, | ||
1149 | &dtf_run_fops); | ||
1150 | |||
1151 | /* Results of test in progress */ | ||
1152 | debugfs_create_file("results", S_IRUGO, info->root, info, | ||
1153 | &dtf_results_fops); | ||
1154 | |||
1155 | return 0; | ||
1156 | 947 | ||
1157 | err_root: | 948 | return ret; |
1158 | pr_err("dmatest: Failed to initialize debugfs\n"); | ||
1159 | return -ENOMEM; | ||
1160 | } | 949 | } |
1161 | 950 | ||
1162 | static int __init dmatest_init(void) | 951 | static int __init dmatest_init(void) |
1163 | { | 952 | { |
1164 | struct dmatest_info *info = &test_info; | 953 | struct dmatest_info *info = &test_info; |
1165 | int ret; | 954 | struct dmatest_params *params = &info->params; |
1166 | |||
1167 | memset(info, 0, sizeof(*info)); | ||
1168 | 955 | ||
1169 | mutex_init(&info->lock); | 956 | if (dmatest_run) { |
1170 | INIT_LIST_HEAD(&info->channels); | 957 | mutex_lock(&info->lock); |
958 | run_threaded_test(info); | ||
959 | mutex_unlock(&info->lock); | ||
960 | } | ||
1171 | 961 | ||
1172 | mutex_init(&info->results_lock); | 962 | if (params->iterations && wait) |
1173 | INIT_LIST_HEAD(&info->results); | 963 | wait_event(thread_wait, !is_threaded_test_run(info)); |
1174 | 964 | ||
1175 | ret = dmatest_register_dbgfs(info); | 965 | /* module parameters are stable, inittime tests are started, |
1176 | if (ret) | 966 | * let userspace take over 'run' control |
1177 | return ret; | 967 | */ |
968 | info->did_init = true; | ||
1178 | 969 | ||
1179 | #ifdef MODULE | ||
1180 | return 0; | 970 | return 0; |
1181 | #else | ||
1182 | return run_threaded_test(info); | ||
1183 | #endif | ||
1184 | } | 971 | } |
1185 | /* when compiled-in wait for drivers to load first */ | 972 | /* when compiled-in wait for drivers to load first */ |
1186 | late_initcall(dmatest_init); | 973 | late_initcall(dmatest_init); |
@@ -1189,9 +976,9 @@ static void __exit dmatest_exit(void) | |||
1189 | { | 976 | { |
1190 | struct dmatest_info *info = &test_info; | 977 | struct dmatest_info *info = &test_info; |
1191 | 978 | ||
1192 | debugfs_remove_recursive(info->root); | 979 | mutex_lock(&info->lock); |
1193 | stop_threaded_test(info); | 980 | stop_threaded_test(info); |
1194 | result_free(info, NULL); | 981 | mutex_unlock(&info->lock); |
1195 | } | 982 | } |
1196 | module_exit(dmatest_exit); | 983 | module_exit(dmatest_exit); |
1197 | 984 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 89eb89f22284..7516be4677cf 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan) | |||
85 | { | 85 | { |
86 | return &chan->dev->device; | 86 | return &chan->dev->device; |
87 | } | 87 | } |
88 | static struct device *chan2parent(struct dma_chan *chan) | ||
89 | { | ||
90 | return chan->dev->device.parent; | ||
91 | } | ||
92 | 88 | ||
93 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 89 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
94 | { | 90 | { |
@@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | |||
311 | list_splice_init(&desc->tx_list, &dwc->free_list); | 307 | list_splice_init(&desc->tx_list, &dwc->free_list); |
312 | list_move(&desc->desc_node, &dwc->free_list); | 308 | list_move(&desc->desc_node, &dwc->free_list); |
313 | 309 | ||
314 | if (!is_slave_direction(dwc->direction)) { | 310 | dma_descriptor_unmap(txd); |
315 | struct device *parent = chan2parent(&dwc->chan); | ||
316 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
317 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
318 | dma_unmap_single(parent, desc->lli.dar, | ||
319 | desc->total_len, DMA_FROM_DEVICE); | ||
320 | else | ||
321 | dma_unmap_page(parent, desc->lli.dar, | ||
322 | desc->total_len, DMA_FROM_DEVICE); | ||
323 | } | ||
324 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
325 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
326 | dma_unmap_single(parent, desc->lli.sar, | ||
327 | desc->total_len, DMA_TO_DEVICE); | ||
328 | else | ||
329 | dma_unmap_page(parent, desc->lli.sar, | ||
330 | desc->total_len, DMA_TO_DEVICE); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | spin_unlock_irqrestore(&dwc->lock, flags); | 311 | spin_unlock_irqrestore(&dwc->lock, flags); |
335 | 312 | ||
336 | if (callback) | 313 | if (callback) |
@@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan, | |||
1098 | enum dma_status ret; | 1075 | enum dma_status ret; |
1099 | 1076 | ||
1100 | ret = dma_cookie_status(chan, cookie, txstate); | 1077 | ret = dma_cookie_status(chan, cookie, txstate); |
1101 | if (ret == DMA_SUCCESS) | 1078 | if (ret == DMA_COMPLETE) |
1102 | return ret; | 1079 | return ret; |
1103 | 1080 | ||
1104 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 1081 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
1105 | 1082 | ||
1106 | ret = dma_cookie_status(chan, cookie, txstate); | 1083 | ret = dma_cookie_status(chan, cookie, txstate); |
1107 | if (ret != DMA_SUCCESS) | 1084 | if (ret != DMA_COMPLETE) |
1108 | dma_set_residue(txstate, dwc_get_residue(dwc)); | 1085 | dma_set_residue(txstate, dwc_get_residue(dwc)); |
1109 | 1086 | ||
1110 | if (dwc->paused && ret == DMA_IN_PROGRESS) | 1087 | if (dwc->paused && ret == DMA_IN_PROGRESS) |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index bef8a368c8dd..2539ea0cbc63 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -46,14 +46,21 @@ | |||
46 | #define EDMA_CHANS 64 | 46 | #define EDMA_CHANS 64 |
47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | 47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ |
48 | 48 | ||
49 | /* Max of 16 segments per channel to conserve PaRAM slots */ | 49 | /* |
50 | #define MAX_NR_SG 16 | 50 | * Max of 20 segments per channel to conserve PaRAM slots |
51 | * Also note that MAX_NR_SG should be atleast the no.of periods | ||
52 | * that are required for ASoC, otherwise DMA prep calls will | ||
53 | * fail. Today davinci-pcm is the only user of this driver and | ||
54 | * requires atleast 17 slots, so we setup the default to 20. | ||
55 | */ | ||
56 | #define MAX_NR_SG 20 | ||
51 | #define EDMA_MAX_SLOTS MAX_NR_SG | 57 | #define EDMA_MAX_SLOTS MAX_NR_SG |
52 | #define EDMA_DESCRIPTORS 16 | 58 | #define EDMA_DESCRIPTORS 16 |
53 | 59 | ||
54 | struct edma_desc { | 60 | struct edma_desc { |
55 | struct virt_dma_desc vdesc; | 61 | struct virt_dma_desc vdesc; |
56 | struct list_head node; | 62 | struct list_head node; |
63 | int cyclic; | ||
57 | int absync; | 64 | int absync; |
58 | int pset_nr; | 65 | int pset_nr; |
59 | int processed; | 66 | int processed; |
@@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan) | |||
167 | * then setup a link to the dummy slot, this results in all future | 174 | * then setup a link to the dummy slot, this results in all future |
168 | * events being absorbed and that's OK because we're done | 175 | * events being absorbed and that's OK because we're done |
169 | */ | 176 | */ |
170 | if (edesc->processed == edesc->pset_nr) | 177 | if (edesc->processed == edesc->pset_nr) { |
171 | edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); | 178 | if (edesc->cyclic) |
179 | edma_link(echan->slot[nslots-1], echan->slot[1]); | ||
180 | else | ||
181 | edma_link(echan->slot[nslots-1], | ||
182 | echan->ecc->dummy_slot); | ||
183 | } | ||
172 | 184 | ||
173 | edma_resume(echan->ch_num); | 185 | edma_resume(echan->ch_num); |
174 | 186 | ||
@@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
250 | return ret; | 262 | return ret; |
251 | } | 263 | } |
252 | 264 | ||
265 | /* | ||
266 | * A PaRAM set configuration abstraction used by other modes | ||
267 | * @chan: Channel who's PaRAM set we're configuring | ||
268 | * @pset: PaRAM set to initialize and setup. | ||
269 | * @src_addr: Source address of the DMA | ||
270 | * @dst_addr: Destination address of the DMA | ||
271 | * @burst: In units of dev_width, how much to send | ||
272 | * @dev_width: How much is the dev_width | ||
273 | * @dma_length: Total length of the DMA transfer | ||
274 | * @direction: Direction of the transfer | ||
275 | */ | ||
276 | static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | ||
277 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, | ||
278 | enum dma_slave_buswidth dev_width, unsigned int dma_length, | ||
279 | enum dma_transfer_direction direction) | ||
280 | { | ||
281 | struct edma_chan *echan = to_edma_chan(chan); | ||
282 | struct device *dev = chan->device->dev; | ||
283 | int acnt, bcnt, ccnt, cidx; | ||
284 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | ||
285 | int absync; | ||
286 | |||
287 | acnt = dev_width; | ||
288 | /* | ||
289 | * If the maxburst is equal to the fifo width, use | ||
290 | * A-synced transfers. This allows for large contiguous | ||
291 | * buffer transfers using only one PaRAM set. | ||
292 | */ | ||
293 | if (burst == 1) { | ||
294 | /* | ||
295 | * For the A-sync case, bcnt and ccnt are the remainder | ||
296 | * and quotient respectively of the division of: | ||
297 | * (dma_length / acnt) by (SZ_64K -1). This is so | ||
298 | * that in case bcnt over flows, we have ccnt to use. | ||
299 | * Note: In A-sync tranfer only, bcntrld is used, but it | ||
300 | * only applies for sg_dma_len(sg) >= SZ_64K. | ||
301 | * In this case, the best way adopted is- bccnt for the | ||
302 | * first frame will be the remainder below. Then for | ||
303 | * every successive frame, bcnt will be SZ_64K-1. This | ||
304 | * is assured as bcntrld = 0xffff in end of function. | ||
305 | */ | ||
306 | absync = false; | ||
307 | ccnt = dma_length / acnt / (SZ_64K - 1); | ||
308 | bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); | ||
309 | /* | ||
310 | * If bcnt is non-zero, we have a remainder and hence an | ||
311 | * extra frame to transfer, so increment ccnt. | ||
312 | */ | ||
313 | if (bcnt) | ||
314 | ccnt++; | ||
315 | else | ||
316 | bcnt = SZ_64K - 1; | ||
317 | cidx = acnt; | ||
318 | } else { | ||
319 | /* | ||
320 | * If maxburst is greater than the fifo address_width, | ||
321 | * use AB-synced transfers where A count is the fifo | ||
322 | * address_width and B count is the maxburst. In this | ||
323 | * case, we are limited to transfers of C count frames | ||
324 | * of (address_width * maxburst) where C count is limited | ||
325 | * to SZ_64K-1. This places an upper bound on the length | ||
326 | * of an SG segment that can be handled. | ||
327 | */ | ||
328 | absync = true; | ||
329 | bcnt = burst; | ||
330 | ccnt = dma_length / (acnt * bcnt); | ||
331 | if (ccnt > (SZ_64K - 1)) { | ||
332 | dev_err(dev, "Exceeded max SG segment size\n"); | ||
333 | return -EINVAL; | ||
334 | } | ||
335 | cidx = acnt * bcnt; | ||
336 | } | ||
337 | |||
338 | if (direction == DMA_MEM_TO_DEV) { | ||
339 | src_bidx = acnt; | ||
340 | src_cidx = cidx; | ||
341 | dst_bidx = 0; | ||
342 | dst_cidx = 0; | ||
343 | } else if (direction == DMA_DEV_TO_MEM) { | ||
344 | src_bidx = 0; | ||
345 | src_cidx = 0; | ||
346 | dst_bidx = acnt; | ||
347 | dst_cidx = cidx; | ||
348 | } else { | ||
349 | dev_err(dev, "%s: direction not implemented yet\n", __func__); | ||
350 | return -EINVAL; | ||
351 | } | ||
352 | |||
353 | pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | ||
354 | /* Configure A or AB synchronized transfers */ | ||
355 | if (absync) | ||
356 | pset->opt |= SYNCDIM; | ||
357 | |||
358 | pset->src = src_addr; | ||
359 | pset->dst = dst_addr; | ||
360 | |||
361 | pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; | ||
362 | pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; | ||
363 | |||
364 | pset->a_b_cnt = bcnt << 16 | acnt; | ||
365 | pset->ccnt = ccnt; | ||
366 | /* | ||
367 | * Only time when (bcntrld) auto reload is required is for | ||
368 | * A-sync case, and in this case, a requirement of reload value | ||
369 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL | ||
370 | * and then later will be populated by edma_execute. | ||
371 | */ | ||
372 | pset->link_bcntrld = 0xffffffff; | ||
373 | return absync; | ||
374 | } | ||
375 | |||
253 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( | 376 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( |
254 | struct dma_chan *chan, struct scatterlist *sgl, | 377 | struct dma_chan *chan, struct scatterlist *sgl, |
255 | unsigned int sg_len, enum dma_transfer_direction direction, | 378 | unsigned int sg_len, enum dma_transfer_direction direction, |
@@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
258 | struct edma_chan *echan = to_edma_chan(chan); | 381 | struct edma_chan *echan = to_edma_chan(chan); |
259 | struct device *dev = chan->device->dev; | 382 | struct device *dev = chan->device->dev; |
260 | struct edma_desc *edesc; | 383 | struct edma_desc *edesc; |
261 | dma_addr_t dev_addr; | 384 | dma_addr_t src_addr = 0, dst_addr = 0; |
262 | enum dma_slave_buswidth dev_width; | 385 | enum dma_slave_buswidth dev_width; |
263 | u32 burst; | 386 | u32 burst; |
264 | struct scatterlist *sg; | 387 | struct scatterlist *sg; |
265 | int acnt, bcnt, ccnt, src, dst, cidx; | 388 | int i, nslots, ret; |
266 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | ||
267 | int i, nslots; | ||
268 | 389 | ||
269 | if (unlikely(!echan || !sgl || !sg_len)) | 390 | if (unlikely(!echan || !sgl || !sg_len)) |
270 | return NULL; | 391 | return NULL; |
271 | 392 | ||
272 | if (direction == DMA_DEV_TO_MEM) { | 393 | if (direction == DMA_DEV_TO_MEM) { |
273 | dev_addr = echan->cfg.src_addr; | 394 | src_addr = echan->cfg.src_addr; |
274 | dev_width = echan->cfg.src_addr_width; | 395 | dev_width = echan->cfg.src_addr_width; |
275 | burst = echan->cfg.src_maxburst; | 396 | burst = echan->cfg.src_maxburst; |
276 | } else if (direction == DMA_MEM_TO_DEV) { | 397 | } else if (direction == DMA_MEM_TO_DEV) { |
277 | dev_addr = echan->cfg.dst_addr; | 398 | dst_addr = echan->cfg.dst_addr; |
278 | dev_width = echan->cfg.dst_addr_width; | 399 | dev_width = echan->cfg.dst_addr_width; |
279 | burst = echan->cfg.dst_maxburst; | 400 | burst = echan->cfg.dst_maxburst; |
280 | } else { | 401 | } else { |
@@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
307 | if (echan->slot[i] < 0) { | 428 | if (echan->slot[i] < 0) { |
308 | kfree(edesc); | 429 | kfree(edesc); |
309 | dev_err(dev, "Failed to allocate slot\n"); | 430 | dev_err(dev, "Failed to allocate slot\n"); |
310 | kfree(edesc); | ||
311 | return NULL; | 431 | return NULL; |
312 | } | 432 | } |
313 | } | 433 | } |
@@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
315 | 435 | ||
316 | /* Configure PaRAM sets for each SG */ | 436 | /* Configure PaRAM sets for each SG */ |
317 | for_each_sg(sgl, sg, sg_len, i) { | 437 | for_each_sg(sgl, sg, sg_len, i) { |
318 | 438 | /* Get address for each SG */ | |
319 | acnt = dev_width; | 439 | if (direction == DMA_DEV_TO_MEM) |
320 | 440 | dst_addr = sg_dma_address(sg); | |
321 | /* | 441 | else |
322 | * If the maxburst is equal to the fifo width, use | 442 | src_addr = sg_dma_address(sg); |
323 | * A-synced transfers. This allows for large contiguous | 443 | |
324 | * buffer transfers using only one PaRAM set. | 444 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, |
325 | */ | 445 | dst_addr, burst, dev_width, |
326 | if (burst == 1) { | 446 | sg_dma_len(sg), direction); |
327 | edesc->absync = false; | 447 | if (ret < 0) { |
328 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); | 448 | kfree(edesc); |
329 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); | 449 | return NULL; |
330 | if (bcnt) | ||
331 | ccnt++; | ||
332 | else | ||
333 | bcnt = SZ_64K - 1; | ||
334 | cidx = acnt; | ||
335 | /* | ||
336 | * If maxburst is greater than the fifo address_width, | ||
337 | * use AB-synced transfers where A count is the fifo | ||
338 | * address_width and B count is the maxburst. In this | ||
339 | * case, we are limited to transfers of C count frames | ||
340 | * of (address_width * maxburst) where C count is limited | ||
341 | * to SZ_64K-1. This places an upper bound on the length | ||
342 | * of an SG segment that can be handled. | ||
343 | */ | ||
344 | } else { | ||
345 | edesc->absync = true; | ||
346 | bcnt = burst; | ||
347 | ccnt = sg_dma_len(sg) / (acnt * bcnt); | ||
348 | if (ccnt > (SZ_64K - 1)) { | ||
349 | dev_err(dev, "Exceeded max SG segment size\n"); | ||
350 | kfree(edesc); | ||
351 | return NULL; | ||
352 | } | ||
353 | cidx = acnt * bcnt; | ||
354 | } | 450 | } |
355 | 451 | ||
356 | if (direction == DMA_MEM_TO_DEV) { | 452 | edesc->absync = ret; |
357 | src = sg_dma_address(sg); | ||
358 | dst = dev_addr; | ||
359 | src_bidx = acnt; | ||
360 | src_cidx = cidx; | ||
361 | dst_bidx = 0; | ||
362 | dst_cidx = 0; | ||
363 | } else { | ||
364 | src = dev_addr; | ||
365 | dst = sg_dma_address(sg); | ||
366 | src_bidx = 0; | ||
367 | src_cidx = 0; | ||
368 | dst_bidx = acnt; | ||
369 | dst_cidx = cidx; | ||
370 | } | ||
371 | |||
372 | edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | ||
373 | /* Configure A or AB synchronized transfers */ | ||
374 | if (edesc->absync) | ||
375 | edesc->pset[i].opt |= SYNCDIM; | ||
376 | 453 | ||
377 | /* If this is the last in a current SG set of transactions, | 454 | /* If this is the last in a current SG set of transactions, |
378 | enable interrupts so that next set is processed */ | 455 | enable interrupts so that next set is processed */ |
@@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
382 | /* If this is the last set, enable completion interrupt flag */ | 459 | /* If this is the last set, enable completion interrupt flag */ |
383 | if (i == sg_len - 1) | 460 | if (i == sg_len - 1) |
384 | edesc->pset[i].opt |= TCINTEN; | 461 | edesc->pset[i].opt |= TCINTEN; |
462 | } | ||
385 | 463 | ||
386 | edesc->pset[i].src = src; | 464 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
387 | edesc->pset[i].dst = dst; | 465 | } |
388 | 466 | ||
389 | edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; | 467 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( |
390 | edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; | 468 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
469 | size_t period_len, enum dma_transfer_direction direction, | ||
470 | unsigned long tx_flags, void *context) | ||
471 | { | ||
472 | struct edma_chan *echan = to_edma_chan(chan); | ||
473 | struct device *dev = chan->device->dev; | ||
474 | struct edma_desc *edesc; | ||
475 | dma_addr_t src_addr, dst_addr; | ||
476 | enum dma_slave_buswidth dev_width; | ||
477 | u32 burst; | ||
478 | int i, ret, nslots; | ||
479 | |||
480 | if (unlikely(!echan || !buf_len || !period_len)) | ||
481 | return NULL; | ||
482 | |||
483 | if (direction == DMA_DEV_TO_MEM) { | ||
484 | src_addr = echan->cfg.src_addr; | ||
485 | dst_addr = buf_addr; | ||
486 | dev_width = echan->cfg.src_addr_width; | ||
487 | burst = echan->cfg.src_maxburst; | ||
488 | } else if (direction == DMA_MEM_TO_DEV) { | ||
489 | src_addr = buf_addr; | ||
490 | dst_addr = echan->cfg.dst_addr; | ||
491 | dev_width = echan->cfg.dst_addr_width; | ||
492 | burst = echan->cfg.dst_maxburst; | ||
493 | } else { | ||
494 | dev_err(dev, "%s: bad direction?\n", __func__); | ||
495 | return NULL; | ||
496 | } | ||
497 | |||
498 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
499 | dev_err(dev, "Undefined slave buswidth\n"); | ||
500 | return NULL; | ||
501 | } | ||
502 | |||
503 | if (unlikely(buf_len % period_len)) { | ||
504 | dev_err(dev, "Period should be multiple of Buffer length\n"); | ||
505 | return NULL; | ||
506 | } | ||
507 | |||
508 | nslots = (buf_len / period_len) + 1; | ||
509 | |||
510 | /* | ||
511 | * Cyclic DMA users such as audio cannot tolerate delays introduced | ||
512 | * by cases where the number of periods is more than the maximum | ||
513 | * number of SGs the EDMA driver can handle at a time. For DMA types | ||
514 | * such as Slave SGs, such delays are tolerable and synchronized, | ||
515 | * but the synchronization is difficult to achieve with Cyclic and | ||
516 | * cannot be guaranteed, so we error out early. | ||
517 | */ | ||
518 | if (nslots > MAX_NR_SG) | ||
519 | return NULL; | ||
520 | |||
521 | edesc = kzalloc(sizeof(*edesc) + nslots * | ||
522 | sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
523 | if (!edesc) { | ||
524 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
525 | return NULL; | ||
526 | } | ||
527 | |||
528 | edesc->cyclic = 1; | ||
529 | edesc->pset_nr = nslots; | ||
530 | |||
531 | dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); | ||
532 | dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); | ||
533 | dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); | ||
534 | |||
535 | for (i = 0; i < nslots; i++) { | ||
536 | /* Allocate a PaRAM slot, if needed */ | ||
537 | if (echan->slot[i] < 0) { | ||
538 | echan->slot[i] = | ||
539 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | ||
540 | EDMA_SLOT_ANY); | ||
541 | if (echan->slot[i] < 0) { | ||
542 | dev_err(dev, "Failed to allocate slot\n"); | ||
543 | return NULL; | ||
544 | } | ||
545 | } | ||
546 | |||
547 | if (i == nslots - 1) { | ||
548 | memcpy(&edesc->pset[i], &edesc->pset[0], | ||
549 | sizeof(edesc->pset[0])); | ||
550 | break; | ||
551 | } | ||
552 | |||
553 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, | ||
554 | dst_addr, burst, dev_width, period_len, | ||
555 | direction); | ||
556 | if (ret < 0) | ||
557 | return NULL; | ||
391 | 558 | ||
392 | edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; | 559 | if (direction == DMA_DEV_TO_MEM) |
393 | edesc->pset[i].ccnt = ccnt; | 560 | dst_addr += period_len; |
394 | edesc->pset[i].link_bcntrld = 0xffffffff; | 561 | else |
562 | src_addr += period_len; | ||
395 | 563 | ||
564 | dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); | ||
565 | dev_dbg(dev, | ||
566 | "\n pset[%d]:\n" | ||
567 | " chnum\t%d\n" | ||
568 | " slot\t%d\n" | ||
569 | " opt\t%08x\n" | ||
570 | " src\t%08x\n" | ||
571 | " dst\t%08x\n" | ||
572 | " abcnt\t%08x\n" | ||
573 | " ccnt\t%08x\n" | ||
574 | " bidx\t%08x\n" | ||
575 | " cidx\t%08x\n" | ||
576 | " lkrld\t%08x\n", | ||
577 | i, echan->ch_num, echan->slot[i], | ||
578 | edesc->pset[i].opt, | ||
579 | edesc->pset[i].src, | ||
580 | edesc->pset[i].dst, | ||
581 | edesc->pset[i].a_b_cnt, | ||
582 | edesc->pset[i].ccnt, | ||
583 | edesc->pset[i].src_dst_bidx, | ||
584 | edesc->pset[i].src_dst_cidx, | ||
585 | edesc->pset[i].link_bcntrld); | ||
586 | |||
587 | edesc->absync = ret; | ||
588 | |||
589 | /* | ||
590 | * Enable interrupts for every period because callback | ||
591 | * has to be called for every period. | ||
592 | */ | ||
593 | edesc->pset[i].opt |= TCINTEN; | ||
396 | } | 594 | } |
397 | 595 | ||
398 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 596 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
@@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
406 | unsigned long flags; | 604 | unsigned long flags; |
407 | struct edmacc_param p; | 605 | struct edmacc_param p; |
408 | 606 | ||
409 | /* Pause the channel */ | 607 | edesc = echan->edesc; |
410 | edma_pause(echan->ch_num); | 608 | |
609 | /* Pause the channel for non-cyclic */ | ||
610 | if (!edesc || (edesc && !edesc->cyclic)) | ||
611 | edma_pause(echan->ch_num); | ||
411 | 612 | ||
412 | switch (ch_status) { | 613 | switch (ch_status) { |
413 | case DMA_COMPLETE: | 614 | case EDMA_DMA_COMPLETE: |
414 | spin_lock_irqsave(&echan->vchan.lock, flags); | 615 | spin_lock_irqsave(&echan->vchan.lock, flags); |
415 | 616 | ||
416 | edesc = echan->edesc; | ||
417 | if (edesc) { | 617 | if (edesc) { |
418 | if (edesc->processed == edesc->pset_nr) { | 618 | if (edesc->cyclic) { |
619 | vchan_cyclic_callback(&edesc->vdesc); | ||
620 | } else if (edesc->processed == edesc->pset_nr) { | ||
419 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | 621 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); |
420 | edma_stop(echan->ch_num); | 622 | edma_stop(echan->ch_num); |
421 | vchan_cookie_complete(&edesc->vdesc); | 623 | vchan_cookie_complete(&edesc->vdesc); |
624 | edma_execute(echan); | ||
422 | } else { | 625 | } else { |
423 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | 626 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); |
627 | edma_execute(echan); | ||
424 | } | 628 | } |
425 | |||
426 | edma_execute(echan); | ||
427 | } | 629 | } |
428 | 630 | ||
429 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 631 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
430 | 632 | ||
431 | break; | 633 | break; |
432 | case DMA_CC_ERROR: | 634 | case EDMA_DMA_CC_ERROR: |
433 | spin_lock_irqsave(&echan->vchan.lock, flags); | 635 | spin_lock_irqsave(&echan->vchan.lock, flags); |
434 | 636 | ||
435 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | 637 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); |
@@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
579 | unsigned long flags; | 781 | unsigned long flags; |
580 | 782 | ||
581 | ret = dma_cookie_status(chan, cookie, txstate); | 783 | ret = dma_cookie_status(chan, cookie, txstate); |
582 | if (ret == DMA_SUCCESS || !txstate) | 784 | if (ret == DMA_COMPLETE || !txstate) |
583 | return ret; | 785 | return ret; |
584 | 786 | ||
585 | spin_lock_irqsave(&echan->vchan.lock, flags); | 787 | spin_lock_irqsave(&echan->vchan.lock, flags); |
@@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | |||
619 | struct device *dev) | 821 | struct device *dev) |
620 | { | 822 | { |
621 | dma->device_prep_slave_sg = edma_prep_slave_sg; | 823 | dma->device_prep_slave_sg = edma_prep_slave_sg; |
824 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; | ||
622 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | 825 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; |
623 | dma->device_free_chan_resources = edma_free_chan_resources; | 826 | dma->device_free_chan_resources = edma_free_chan_resources; |
624 | dma->device_issue_pending = edma_issue_pending; | 827 | dma->device_issue_pending = edma_issue_pending; |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 591cd8c63abb..cb4bf682a708 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) | |||
733 | spin_unlock_irqrestore(&edmac->lock, flags); | 733 | spin_unlock_irqrestore(&edmac->lock, flags); |
734 | } | 734 | } |
735 | 735 | ||
736 | static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) | ||
737 | { | ||
738 | struct device *dev = desc->txd.chan->device->dev; | ||
739 | |||
740 | if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
741 | if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
742 | dma_unmap_single(dev, desc->src_addr, desc->size, | ||
743 | DMA_TO_DEVICE); | ||
744 | else | ||
745 | dma_unmap_page(dev, desc->src_addr, desc->size, | ||
746 | DMA_TO_DEVICE); | ||
747 | } | ||
748 | if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
749 | if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
750 | dma_unmap_single(dev, desc->dst_addr, desc->size, | ||
751 | DMA_FROM_DEVICE); | ||
752 | else | ||
753 | dma_unmap_page(dev, desc->dst_addr, desc->size, | ||
754 | DMA_FROM_DEVICE); | ||
755 | } | ||
756 | } | ||
757 | |||
758 | static void ep93xx_dma_tasklet(unsigned long data) | 736 | static void ep93xx_dma_tasklet(unsigned long data) |
759 | { | 737 | { |
760 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; | 738 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; |
@@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
787 | 765 | ||
788 | /* Now we can release all the chained descriptors */ | 766 | /* Now we can release all the chained descriptors */ |
789 | list_for_each_entry_safe(desc, d, &list, node) { | 767 | list_for_each_entry_safe(desc, d, &list, node) { |
790 | /* | 768 | dma_descriptor_unmap(&desc->txd); |
791 | * For the memcpy channels the API requires us to unmap the | ||
792 | * buffers unless requested otherwise. | ||
793 | */ | ||
794 | if (!edmac->chan.private) | ||
795 | ep93xx_dma_unmap_buffers(desc); | ||
796 | |||
797 | ep93xx_dma_desc_put(edmac, desc); | 769 | ep93xx_dma_desc_put(edmac, desc); |
798 | } | 770 | } |
799 | 771 | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 61517dd0d0b7..7086a16a55f2 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -870,22 +870,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | |||
870 | /* Run any dependencies */ | 870 | /* Run any dependencies */ |
871 | dma_run_dependencies(txd); | 871 | dma_run_dependencies(txd); |
872 | 872 | ||
873 | /* Unmap the dst buffer, if requested */ | 873 | dma_descriptor_unmap(txd); |
874 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
875 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
876 | dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); | ||
877 | else | ||
878 | dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); | ||
879 | } | ||
880 | |||
881 | /* Unmap the src buffer, if requested */ | ||
882 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
883 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
884 | dma_unmap_single(dev, src, len, DMA_TO_DEVICE); | ||
885 | else | ||
886 | dma_unmap_page(dev, src, len, DMA_TO_DEVICE); | ||
887 | } | ||
888 | |||
889 | #ifdef FSL_DMA_LD_DEBUG | 874 | #ifdef FSL_DMA_LD_DEBUG |
890 | chan_dbg(chan, "LD %p free\n", desc); | 875 | chan_dbg(chan, "LD %p free\n", desc); |
891 | #endif | 876 | #endif |
@@ -1255,7 +1240,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1255 | WARN_ON(fdev->feature != chan->feature); | 1240 | WARN_ON(fdev->feature != chan->feature); |
1256 | 1241 | ||
1257 | chan->dev = fdev->dev; | 1242 | chan->dev = fdev->dev; |
1258 | chan->id = ((res.start - 0x100) & 0xfff) >> 7; | 1243 | chan->id = (res.start & 0xfff) < 0x300 ? |
1244 | ((res.start - 0x100) & 0xfff) >> 7 : | ||
1245 | ((res.start - 0x200) & 0xfff) >> 7; | ||
1259 | if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { | 1246 | if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { |
1260 | dev_err(fdev->dev, "too many channels for device\n"); | 1247 | dev_err(fdev->dev, "too many channels for device\n"); |
1261 | err = -EINVAL; | 1248 | err = -EINVAL; |
@@ -1428,6 +1415,7 @@ static int fsldma_of_remove(struct platform_device *op) | |||
1428 | } | 1415 | } |
1429 | 1416 | ||
1430 | static const struct of_device_id fsldma_of_ids[] = { | 1417 | static const struct of_device_id fsldma_of_ids[] = { |
1418 | { .compatible = "fsl,elo3-dma", }, | ||
1431 | { .compatible = "fsl,eloplus-dma", }, | 1419 | { .compatible = "fsl,eloplus-dma", }, |
1432 | { .compatible = "fsl,elo-dma", }, | 1420 | { .compatible = "fsl,elo-dma", }, |
1433 | {} | 1421 | {} |
@@ -1449,7 +1437,7 @@ static struct platform_driver fsldma_of_driver = { | |||
1449 | 1437 | ||
1450 | static __init int fsldma_init(void) | 1438 | static __init int fsldma_init(void) |
1451 | { | 1439 | { |
1452 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); | 1440 | pr_info("Freescale Elo series DMA driver\n"); |
1453 | return platform_driver_register(&fsldma_of_driver); | 1441 | return platform_driver_register(&fsldma_of_driver); |
1454 | } | 1442 | } |
1455 | 1443 | ||
@@ -1461,5 +1449,5 @@ static void __exit fsldma_exit(void) | |||
1461 | subsys_initcall(fsldma_init); | 1449 | subsys_initcall(fsldma_init); |
1462 | module_exit(fsldma_exit); | 1450 | module_exit(fsldma_exit); |
1463 | 1451 | ||
1464 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); | 1452 | MODULE_DESCRIPTION("Freescale Elo series DMA driver"); |
1465 | MODULE_LICENSE("GPL"); | 1453 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f5c38791fc74..1ffc24484d23 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -112,7 +112,7 @@ struct fsldma_chan_regs { | |||
112 | }; | 112 | }; |
113 | 113 | ||
114 | struct fsldma_chan; | 114 | struct fsldma_chan; |
115 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | 115 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 8 |
116 | 116 | ||
117 | struct fsldma_device { | 117 | struct fsldma_device { |
118 | void __iomem *regs; /* DGSR register base */ | 118 | void __iomem *regs; /* DGSR register base */ |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 55852c026791..6f9ac2022abd 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
572 | 572 | ||
573 | imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); | 573 | imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); |
574 | 574 | ||
575 | dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " | 575 | dev_dbg(imxdma->dev, |
576 | "dma_length=%d\n", __func__, imxdmac->channel, | 576 | "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n", |
577 | d->dest, d->src, d->len); | 577 | __func__, imxdmac->channel, |
578 | (unsigned long long)d->dest, | ||
579 | (unsigned long long)d->src, d->len); | ||
578 | 580 | ||
579 | break; | 581 | break; |
580 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ | 582 | /* Cyclic transfer is the same as slave_sg with special sg configuration. */ |
@@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) | |||
586 | imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, | 588 | imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, |
587 | DMA_CCR(imxdmac->channel)); | 589 | DMA_CCR(imxdmac->channel)); |
588 | 590 | ||
589 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | 591 | dev_dbg(imxdma->dev, |
590 | "total length=%d dev_addr=0x%08x (dev2mem)\n", | 592 | "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", |
591 | __func__, imxdmac->channel, d->sg, d->sgcount, | 593 | __func__, imxdmac->channel, |
592 | d->len, imxdmac->per_address); | 594 | d->sg, d->sgcount, d->len, |
595 | (unsigned long long)imxdmac->per_address); | ||
593 | } else if (d->direction == DMA_MEM_TO_DEV) { | 596 | } else if (d->direction == DMA_MEM_TO_DEV) { |
594 | imx_dmav1_writel(imxdma, imxdmac->per_address, | 597 | imx_dmav1_writel(imxdma, imxdmac->per_address, |
595 | DMA_DAR(imxdmac->channel)); | 598 | DMA_DAR(imxdmac->channel)); |
596 | imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, | 599 | imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, |
597 | DMA_CCR(imxdmac->channel)); | 600 | DMA_CCR(imxdmac->channel)); |
598 | 601 | ||
599 | dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " | 602 | dev_dbg(imxdma->dev, |
600 | "total length=%d dev_addr=0x%08x (mem2dev)\n", | 603 | "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", |
601 | __func__, imxdmac->channel, d->sg, d->sgcount, | 604 | __func__, imxdmac->channel, |
602 | d->len, imxdmac->per_address); | 605 | d->sg, d->sgcount, d->len, |
606 | (unsigned long long)imxdmac->per_address); | ||
603 | } else { | 607 | } else { |
604 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", | 608 | dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", |
605 | __func__, imxdmac->channel); | 609 | __func__, imxdmac->channel); |
@@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) | |||
771 | desc->desc.tx_submit = imxdma_tx_submit; | 775 | desc->desc.tx_submit = imxdma_tx_submit; |
772 | /* txd.flags will be overwritten in prep funcs */ | 776 | /* txd.flags will be overwritten in prep funcs */ |
773 | desc->desc.flags = DMA_CTRL_ACK; | 777 | desc->desc.flags = DMA_CTRL_ACK; |
774 | desc->status = DMA_SUCCESS; | 778 | desc->status = DMA_COMPLETE; |
775 | 779 | ||
776 | list_add_tail(&desc->node, &imxdmac->ld_free); | 780 | list_add_tail(&desc->node, &imxdmac->ld_free); |
777 | imxdmac->descs_allocated++; | 781 | imxdmac->descs_allocated++; |
@@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
870 | int i; | 874 | int i; |
871 | unsigned int periods = buf_len / period_len; | 875 | unsigned int periods = buf_len / period_len; |
872 | 876 | ||
873 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | 877 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n", |
874 | __func__, imxdmac->channel, buf_len, period_len); | 878 | __func__, imxdmac->channel, buf_len, period_len); |
875 | 879 | ||
876 | if (list_empty(&imxdmac->ld_free) || | 880 | if (list_empty(&imxdmac->ld_free) || |
@@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( | |||
926 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 930 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
927 | struct imxdma_desc *desc; | 931 | struct imxdma_desc *desc; |
928 | 932 | ||
929 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", | 933 | dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n", |
930 | __func__, imxdmac->channel, src, dest, len); | 934 | __func__, imxdmac->channel, (unsigned long long)src, |
935 | (unsigned long long)dest, len); | ||
931 | 936 | ||
932 | if (list_empty(&imxdmac->ld_free) || | 937 | if (list_empty(&imxdmac->ld_free) || |
933 | imxdma_chan_is_doing_cyclic(imxdmac)) | 938 | imxdma_chan_is_doing_cyclic(imxdmac)) |
@@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( | |||
956 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 961 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
957 | struct imxdma_desc *desc; | 962 | struct imxdma_desc *desc; |
958 | 963 | ||
959 | dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" | 964 | dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" |
960 | " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, | 965 | " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__, |
961 | imxdmac->channel, xt->src_start, xt->dst_start, | 966 | imxdmac->channel, (unsigned long long)xt->src_start, |
967 | (unsigned long long) xt->dst_start, | ||
962 | xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", | 968 | xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", |
963 | xt->numf, xt->frame_size); | 969 | xt->numf, xt->frame_size); |
964 | 970 | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c1fd504cae28..c75679d42028 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
638 | if (error) | 638 | if (error) |
639 | sdmac->status = DMA_ERROR; | 639 | sdmac->status = DMA_ERROR; |
640 | else | 640 | else |
641 | sdmac->status = DMA_SUCCESS; | 641 | sdmac->status = DMA_COMPLETE; |
642 | 642 | ||
643 | dma_cookie_complete(&sdmac->desc); | 643 | dma_cookie_complete(&sdmac->desc); |
644 | if (sdmac->desc.callback) | 644 | if (sdmac->desc.callback) |
@@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
1089 | param &= ~BD_CONT; | 1089 | param &= ~BD_CONT; |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | 1092 | dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", |
1093 | i, count, sg->dma_address, | 1093 | i, count, (u64)sg->dma_address, |
1094 | param & BD_WRAP ? "wrap" : "", | 1094 | param & BD_WRAP ? "wrap" : "", |
1095 | param & BD_INTR ? " intr" : ""); | 1095 | param & BD_INTR ? " intr" : ""); |
1096 | 1096 | ||
@@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
1163 | if (i + 1 == num_periods) | 1163 | if (i + 1 == num_periods) |
1164 | param |= BD_WRAP; | 1164 | param |= BD_WRAP; |
1165 | 1165 | ||
1166 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | 1166 | dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", |
1167 | i, period_len, dma_addr, | 1167 | i, period_len, (u64)dma_addr, |
1168 | param & BD_WRAP ? "wrap" : "", | 1168 | param & BD_WRAP ? "wrap" : "", |
1169 | param & BD_INTR ? " intr" : ""); | 1169 | param & BD_INTR ? " intr" : ""); |
1170 | 1170 | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index a975ebebea8a..1aab8130efa1 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
309 | callback_txd(param_txd); | 309 | callback_txd(param_txd); |
310 | } | 310 | } |
311 | if (midc->raw_tfr) { | 311 | if (midc->raw_tfr) { |
312 | desc->status = DMA_SUCCESS; | 312 | desc->status = DMA_COMPLETE; |
313 | if (desc->lli != NULL) { | 313 | if (desc->lli != NULL) { |
314 | pci_pool_free(desc->lli_pool, desc->lli, | 314 | pci_pool_free(desc->lli_pool, desc->lli, |
315 | desc->lli_phys); | 315 | desc->lli_phys); |
@@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
481 | enum dma_status ret; | 481 | enum dma_status ret; |
482 | 482 | ||
483 | ret = dma_cookie_status(chan, cookie, txstate); | 483 | ret = dma_cookie_status(chan, cookie, txstate); |
484 | if (ret != DMA_SUCCESS) { | 484 | if (ret != DMA_COMPLETE) { |
485 | spin_lock_bh(&midc->lock); | 485 | spin_lock_bh(&midc->lock); |
486 | midc_scan_descriptors(to_middma_device(chan->device), midc); | 486 | midc_scan_descriptors(to_middma_device(chan->device), midc); |
487 | spin_unlock_bh(&midc->lock); | 487 | spin_unlock_bh(&midc->lock); |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5ff6fc1819dc..1a49c777607c 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data) | |||
531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | 531 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
532 | } | 532 | } |
533 | 533 | ||
534 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
535 | size_t len, struct ioat_dma_descriptor *hw) | ||
536 | { | ||
537 | struct pci_dev *pdev = chan->device->pdev; | ||
538 | size_t offset = len - hw->size; | ||
539 | |||
540 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
541 | ioat_unmap(pdev, hw->dst_addr - offset, len, | ||
542 | PCI_DMA_FROMDEVICE, flags, 1); | ||
543 | |||
544 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) | ||
545 | ioat_unmap(pdev, hw->src_addr - offset, len, | ||
546 | PCI_DMA_TODEVICE, flags, 0); | ||
547 | } | ||
548 | |||
549 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) | 534 | dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan) |
550 | { | 535 | { |
551 | dma_addr_t phys_complete; | 536 | dma_addr_t phys_complete; |
@@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete) | |||
602 | dump_desc_dbg(ioat, desc); | 587 | dump_desc_dbg(ioat, desc); |
603 | if (tx->cookie) { | 588 | if (tx->cookie) { |
604 | dma_cookie_complete(tx); | 589 | dma_cookie_complete(tx); |
605 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 590 | dma_descriptor_unmap(tx); |
606 | ioat->active -= desc->hw->tx_cnt; | 591 | ioat->active -= desc->hw->tx_cnt; |
607 | if (tx->callback) { | 592 | if (tx->callback) { |
608 | tx->callback(tx->callback_param); | 593 | tx->callback(tx->callback_param); |
@@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
733 | enum dma_status ret; | 718 | enum dma_status ret; |
734 | 719 | ||
735 | ret = dma_cookie_status(c, cookie, txstate); | 720 | ret = dma_cookie_status(c, cookie, txstate); |
736 | if (ret == DMA_SUCCESS) | 721 | if (ret == DMA_COMPLETE) |
737 | return ret; | 722 | return ret; |
738 | 723 | ||
739 | device->cleanup_fn((unsigned long) c); | 724 | device->cleanup_fn((unsigned long) c); |
@@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
833 | 818 | ||
834 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); | 819 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
835 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | 820 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); |
836 | flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | | 821 | flags = DMA_PREP_INTERRUPT; |
837 | DMA_PREP_INTERRUPT; | ||
838 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | 822 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
839 | IOAT_TEST_SIZE, flags); | 823 | IOAT_TEST_SIZE, flags); |
840 | if (!tx) { | 824 | if (!tx) { |
@@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) | |||
859 | 843 | ||
860 | if (tmo == 0 || | 844 | if (tmo == 0 || |
861 | dma->device_tx_status(dma_chan, cookie, NULL) | 845 | dma->device_tx_status(dma_chan, cookie, NULL) |
862 | != DMA_SUCCESS) { | 846 | != DMA_COMPLETE) { |
863 | dev_err(dev, "Self-test copy timed out, disabling\n"); | 847 | dev_err(dev, "Self-test copy timed out, disabling\n"); |
864 | err = -ENODEV; | 848 | err = -ENODEV; |
865 | goto unmap_dma; | 849 | goto unmap_dma; |
@@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix"; | |||
885 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | 869 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, |
886 | sizeof(ioat_interrupt_style), 0644); | 870 | sizeof(ioat_interrupt_style), 0644); |
887 | MODULE_PARM_DESC(ioat_interrupt_style, | 871 | MODULE_PARM_DESC(ioat_interrupt_style, |
888 | "set ioat interrupt style: msix (default), " | 872 | "set ioat interrupt style: msix (default), msi, intx"); |
889 | "msix-single-vector, msi, intx)"); | ||
890 | 873 | ||
891 | /** | 874 | /** |
892 | * ioat_dma_setup_interrupts - setup interrupt handler | 875 | * ioat_dma_setup_interrupts - setup interrupt handler |
@@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device) | |||
904 | 887 | ||
905 | if (!strcmp(ioat_interrupt_style, "msix")) | 888 | if (!strcmp(ioat_interrupt_style, "msix")) |
906 | goto msix; | 889 | goto msix; |
907 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | ||
908 | goto msix_single_vector; | ||
909 | if (!strcmp(ioat_interrupt_style, "msi")) | 890 | if (!strcmp(ioat_interrupt_style, "msi")) |
910 | goto msi; | 891 | goto msi; |
911 | if (!strcmp(ioat_interrupt_style, "intx")) | 892 | if (!strcmp(ioat_interrupt_style, "intx")) |
@@ -920,10 +901,8 @@ msix: | |||
920 | device->msix_entries[i].entry = i; | 901 | device->msix_entries[i].entry = i; |
921 | 902 | ||
922 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); | 903 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); |
923 | if (err < 0) | 904 | if (err) |
924 | goto msi; | 905 | goto msi; |
925 | if (err > 0) | ||
926 | goto msix_single_vector; | ||
927 | 906 | ||
928 | for (i = 0; i < msixcnt; i++) { | 907 | for (i = 0; i < msixcnt; i++) { |
929 | msix = &device->msix_entries[i]; | 908 | msix = &device->msix_entries[i]; |
@@ -937,29 +916,13 @@ msix: | |||
937 | chan = ioat_chan_by_index(device, j); | 916 | chan = ioat_chan_by_index(device, j); |
938 | devm_free_irq(dev, msix->vector, chan); | 917 | devm_free_irq(dev, msix->vector, chan); |
939 | } | 918 | } |
940 | goto msix_single_vector; | 919 | goto msi; |
941 | } | 920 | } |
942 | } | 921 | } |
943 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | 922 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; |
944 | device->irq_mode = IOAT_MSIX; | 923 | device->irq_mode = IOAT_MSIX; |
945 | goto done; | 924 | goto done; |
946 | 925 | ||
947 | msix_single_vector: | ||
948 | msix = &device->msix_entries[0]; | ||
949 | msix->entry = 0; | ||
950 | err = pci_enable_msix(pdev, device->msix_entries, 1); | ||
951 | if (err) | ||
952 | goto msi; | ||
953 | |||
954 | err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, | ||
955 | "ioat-msix", device); | ||
956 | if (err) { | ||
957 | pci_disable_msix(pdev); | ||
958 | goto msi; | ||
959 | } | ||
960 | device->irq_mode = IOAT_MSIX_SINGLE; | ||
961 | goto done; | ||
962 | |||
963 | msi: | 926 | msi: |
964 | err = pci_enable_msi(pdev); | 927 | err = pci_enable_msi(pdev); |
965 | if (err) | 928 | if (err) |
@@ -971,7 +934,7 @@ msi: | |||
971 | pci_disable_msi(pdev); | 934 | pci_disable_msi(pdev); |
972 | goto intx; | 935 | goto intx; |
973 | } | 936 | } |
974 | device->irq_mode = IOAT_MSIX; | 937 | device->irq_mode = IOAT_MSI; |
975 | goto done; | 938 | goto done; |
976 | 939 | ||
977 | intx: | 940 | intx: |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 54fb7b9ff9aa..11fb877ddca9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -52,7 +52,6 @@ | |||
52 | enum ioat_irq_mode { | 52 | enum ioat_irq_mode { |
53 | IOAT_NOIRQ = 0, | 53 | IOAT_NOIRQ = 0, |
54 | IOAT_MSIX, | 54 | IOAT_MSIX, |
55 | IOAT_MSIX_SINGLE, | ||
56 | IOAT_MSI, | 55 | IOAT_MSI, |
57 | IOAT_INTX | 56 | IOAT_INTX |
58 | }; | 57 | }; |
@@ -83,7 +82,6 @@ struct ioatdma_device { | |||
83 | struct pci_pool *completion_pool; | 82 | struct pci_pool *completion_pool; |
84 | #define MAX_SED_POOLS 5 | 83 | #define MAX_SED_POOLS 5 |
85 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | 84 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; |
86 | struct kmem_cache *sed_pool; | ||
87 | struct dma_device common; | 85 | struct dma_device common; |
88 | u8 version; | 86 | u8 version; |
89 | struct msix_entry msix_entries[4]; | 87 | struct msix_entry msix_entries[4]; |
@@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err) | |||
342 | return !!err; | 340 | return !!err; |
343 | } | 341 | } |
344 | 342 | ||
345 | static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, | ||
346 | int direction, enum dma_ctrl_flags flags, bool dst) | ||
347 | { | ||
348 | if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || | ||
349 | (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) | ||
350 | pci_unmap_single(pdev, addr, len, direction); | ||
351 | else | ||
352 | pci_unmap_page(pdev, addr, len, direction); | ||
353 | } | ||
354 | |||
355 | int ioat_probe(struct ioatdma_device *device); | 343 | int ioat_probe(struct ioatdma_device *device); |
356 | int ioat_register(struct ioatdma_device *device); | 344 | int ioat_register(struct ioatdma_device *device); |
357 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); | 345 | int ioat1_dma_probe(struct ioatdma_device *dev, int dca); |
@@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device, | |||
363 | struct ioat_chan_common *chan, int idx); | 351 | struct ioat_chan_common *chan, int idx); |
364 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 352 | enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
365 | struct dma_tx_state *txstate); | 353 | struct dma_tx_state *txstate); |
366 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, | ||
367 | size_t len, struct ioat_dma_descriptor *hw); | ||
368 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | 354 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
369 | dma_addr_t *phys_complete); | 355 | dma_addr_t *phys_complete); |
370 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | 356 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index b925e1b1d139..5d3affe7e976 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
148 | tx = &desc->txd; | 148 | tx = &desc->txd; |
149 | dump_desc_dbg(ioat, desc); | 149 | dump_desc_dbg(ioat, desc); |
150 | if (tx->cookie) { | 150 | if (tx->cookie) { |
151 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | 151 | dma_descriptor_unmap(tx); |
152 | dma_cookie_complete(tx); | 152 | dma_cookie_complete(tx); |
153 | if (tx->callback) { | 153 | if (tx->callback) { |
154 | tx->callback(tx->callback_param); | 154 | tx->callback(tx->callback_param); |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 212d584fe427..470292767e68 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | |||
157 | 157 | ||
158 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); | 158 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
159 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); | 159 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); |
160 | void ioat3_dma_remove(struct ioatdma_device *dev); | ||
161 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 160 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
162 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 161 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
163 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); | 162 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d8ececaf1b57..820817e97e62 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -67,6 +67,8 @@ | |||
67 | #include "dma.h" | 67 | #include "dma.h" |
68 | #include "dma_v2.h" | 68 | #include "dma_v2.h" |
69 | 69 | ||
70 | extern struct kmem_cache *ioat3_sed_cache; | ||
71 | |||
70 | /* ioat hardware assumes at least two sources for raid operations */ | 72 | /* ioat hardware assumes at least two sources for raid operations */ |
71 | #define src_cnt_to_sw(x) ((x) + 2) | 73 | #define src_cnt_to_sw(x) ((x) + 2) |
72 | #define src_cnt_to_hw(x) ((x) - 2) | 74 | #define src_cnt_to_hw(x) ((x) - 2) |
@@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; | |||
87 | static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, | 89 | static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, |
88 | 0, 1, 2, 3, 4, 5, 6 }; | 90 | 0, 1, 2, 3, 4, 5, 6 }; |
89 | 91 | ||
90 | /* | ||
91 | * technically sources 1 and 2 do not require SED, but the op will have | ||
92 | * at least 9 descriptors so that's irrelevant. | ||
93 | */ | ||
94 | static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
95 | 1, 1, 1, 1, 1, 1, 1 }; | ||
96 | |||
97 | static void ioat3_eh(struct ioat2_dma_chan *ioat); | 92 | static void ioat3_eh(struct ioat2_dma_chan *ioat); |
98 | 93 | ||
99 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) | ||
100 | { | ||
101 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | ||
102 | |||
103 | return raw->field[xor_idx_to_field[idx]]; | ||
104 | } | ||
105 | |||
106 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | 94 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], |
107 | dma_addr_t addr, u32 offset, int idx) | 95 | dma_addr_t addr, u32 offset, int idx) |
108 | { | 96 | { |
@@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], | |||
135 | pq->coef[idx] = coef; | 123 | pq->coef[idx] = coef; |
136 | } | 124 | } |
137 | 125 | ||
138 | static int sed_get_pq16_pool_idx(int src_cnt) | ||
139 | { | ||
140 | |||
141 | return pq16_idx_to_sed[src_cnt]; | ||
142 | } | ||
143 | |||
144 | static bool is_jf_ioat(struct pci_dev *pdev) | 126 | static bool is_jf_ioat(struct pci_dev *pdev) |
145 | { | 127 | { |
146 | switch (pdev->device) { | 128 | switch (pdev->device) { |
@@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) | |||
272 | struct ioat_sed_ent *sed; | 254 | struct ioat_sed_ent *sed; |
273 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; | 255 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; |
274 | 256 | ||
275 | sed = kmem_cache_alloc(device->sed_pool, flags); | 257 | sed = kmem_cache_alloc(ioat3_sed_cache, flags); |
276 | if (!sed) | 258 | if (!sed) |
277 | return NULL; | 259 | return NULL; |
278 | 260 | ||
@@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) | |||
280 | sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], | 262 | sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], |
281 | flags, &sed->dma); | 263 | flags, &sed->dma); |
282 | if (!sed->hw) { | 264 | if (!sed->hw) { |
283 | kmem_cache_free(device->sed_pool, sed); | 265 | kmem_cache_free(ioat3_sed_cache, sed); |
284 | return NULL; | 266 | return NULL; |
285 | } | 267 | } |
286 | 268 | ||
@@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s | |||
293 | return; | 275 | return; |
294 | 276 | ||
295 | dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | 277 | dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); |
296 | kmem_cache_free(device->sed_pool, sed); | 278 | kmem_cache_free(ioat3_sed_cache, sed); |
297 | } | ||
298 | |||
299 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | ||
300 | struct ioat_ring_ent *desc, int idx) | ||
301 | { | ||
302 | struct ioat_chan_common *chan = &ioat->base; | ||
303 | struct pci_dev *pdev = chan->device->pdev; | ||
304 | size_t len = desc->len; | ||
305 | size_t offset = len - desc->hw->size; | ||
306 | struct dma_async_tx_descriptor *tx = &desc->txd; | ||
307 | enum dma_ctrl_flags flags = tx->flags; | ||
308 | |||
309 | switch (desc->hw->ctl_f.op) { | ||
310 | case IOAT_OP_COPY: | ||
311 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ | ||
312 | ioat_dma_unmap(chan, flags, len, desc->hw); | ||
313 | break; | ||
314 | case IOAT_OP_XOR_VAL: | ||
315 | case IOAT_OP_XOR: { | ||
316 | struct ioat_xor_descriptor *xor = desc->xor; | ||
317 | struct ioat_ring_ent *ext; | ||
318 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | ||
319 | int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); | ||
320 | struct ioat_raw_descriptor *descs[2]; | ||
321 | int i; | ||
322 | |||
323 | if (src_cnt > 5) { | ||
324 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
325 | xor_ex = ext->xor_ex; | ||
326 | } | ||
327 | |||
328 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
329 | descs[0] = (struct ioat_raw_descriptor *) xor; | ||
330 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | ||
331 | for (i = 0; i < src_cnt; i++) { | ||
332 | dma_addr_t src = xor_get_src(descs, i); | ||
333 | |||
334 | ioat_unmap(pdev, src - offset, len, | ||
335 | PCI_DMA_TODEVICE, flags, 0); | ||
336 | } | ||
337 | |||
338 | /* dest is a source in xor validate operations */ | ||
339 | if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
340 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
341 | PCI_DMA_TODEVICE, flags, 1); | ||
342 | break; | ||
343 | } | ||
344 | } | ||
345 | |||
346 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | ||
347 | ioat_unmap(pdev, xor->dst_addr - offset, len, | ||
348 | PCI_DMA_FROMDEVICE, flags, 1); | ||
349 | break; | ||
350 | } | ||
351 | case IOAT_OP_PQ_VAL: | ||
352 | case IOAT_OP_PQ: { | ||
353 | struct ioat_pq_descriptor *pq = desc->pq; | ||
354 | struct ioat_ring_ent *ext; | ||
355 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | ||
356 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | ||
357 | struct ioat_raw_descriptor *descs[2]; | ||
358 | int i; | ||
359 | |||
360 | if (src_cnt > 3) { | ||
361 | ext = ioat2_get_ring_ent(ioat, idx + 1); | ||
362 | pq_ex = ext->pq_ex; | ||
363 | } | ||
364 | |||
365 | /* in the 'continue' case don't unmap the dests as sources */ | ||
366 | if (dmaf_p_disabled_continue(flags)) | ||
367 | src_cnt--; | ||
368 | else if (dmaf_continue(flags)) | ||
369 | src_cnt -= 3; | ||
370 | |||
371 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
372 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
373 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | ||
374 | for (i = 0; i < src_cnt; i++) { | ||
375 | dma_addr_t src = pq_get_src(descs, i); | ||
376 | |||
377 | ioat_unmap(pdev, src - offset, len, | ||
378 | PCI_DMA_TODEVICE, flags, 0); | ||
379 | } | ||
380 | |||
381 | /* the dests are sources in pq validate operations */ | ||
382 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
383 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
384 | ioat_unmap(pdev, pq->p_addr - offset, | ||
385 | len, PCI_DMA_TODEVICE, flags, 0); | ||
386 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
387 | ioat_unmap(pdev, pq->q_addr - offset, | ||
388 | len, PCI_DMA_TODEVICE, flags, 0); | ||
389 | break; | ||
390 | } | ||
391 | } | ||
392 | |||
393 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
394 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
395 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
396 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
397 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
398 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
399 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
400 | } | ||
401 | break; | ||
402 | } | ||
403 | case IOAT_OP_PQ_16S: | ||
404 | case IOAT_OP_PQ_VAL_16S: { | ||
405 | struct ioat_pq_descriptor *pq = desc->pq; | ||
406 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | ||
407 | struct ioat_raw_descriptor *descs[4]; | ||
408 | int i; | ||
409 | |||
410 | /* in the 'continue' case don't unmap the dests as sources */ | ||
411 | if (dmaf_p_disabled_continue(flags)) | ||
412 | src_cnt--; | ||
413 | else if (dmaf_continue(flags)) | ||
414 | src_cnt -= 3; | ||
415 | |||
416 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
417 | descs[0] = (struct ioat_raw_descriptor *)pq; | ||
418 | descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); | ||
419 | descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); | ||
420 | for (i = 0; i < src_cnt; i++) { | ||
421 | dma_addr_t src = pq16_get_src(descs, i); | ||
422 | |||
423 | ioat_unmap(pdev, src - offset, len, | ||
424 | PCI_DMA_TODEVICE, flags, 0); | ||
425 | } | ||
426 | |||
427 | /* the dests are sources in pq validate operations */ | ||
428 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
429 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
430 | ioat_unmap(pdev, pq->p_addr - offset, | ||
431 | len, PCI_DMA_TODEVICE, | ||
432 | flags, 0); | ||
433 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
434 | ioat_unmap(pdev, pq->q_addr - offset, | ||
435 | len, PCI_DMA_TODEVICE, | ||
436 | flags, 0); | ||
437 | break; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
442 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
443 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
444 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
445 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
446 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
447 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
448 | } | ||
449 | break; | ||
450 | } | ||
451 | default: | ||
452 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", | ||
453 | __func__, desc->hw->ctl_f.op); | ||
454 | } | ||
455 | } | 279 | } |
456 | 280 | ||
457 | static bool desc_has_ext(struct ioat_ring_ent *desc) | 281 | static bool desc_has_ext(struct ioat_ring_ent *desc) |
@@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
577 | tx = &desc->txd; | 401 | tx = &desc->txd; |
578 | if (tx->cookie) { | 402 | if (tx->cookie) { |
579 | dma_cookie_complete(tx); | 403 | dma_cookie_complete(tx); |
580 | ioat3_dma_unmap(ioat, desc, idx + i); | 404 | dma_descriptor_unmap(tx); |
581 | if (tx->callback) { | 405 | if (tx->callback) { |
582 | tx->callback(tx->callback_param); | 406 | tx->callback(tx->callback_param); |
583 | tx->callback = NULL; | 407 | tx->callback = NULL; |
@@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
807 | enum dma_status ret; | 631 | enum dma_status ret; |
808 | 632 | ||
809 | ret = dma_cookie_status(c, cookie, txstate); | 633 | ret = dma_cookie_status(c, cookie, txstate); |
810 | if (ret == DMA_SUCCESS) | 634 | if (ret == DMA_COMPLETE) |
811 | return ret; | 635 | return ret; |
812 | 636 | ||
813 | ioat3_cleanup(ioat); | 637 | ioat3_cleanup(ioat); |
@@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
1129 | u8 op; | 953 | u8 op; |
1130 | int i, s, idx, num_descs; | 954 | int i, s, idx, num_descs; |
1131 | 955 | ||
1132 | /* this function only handles src_cnt 9 - 16 */ | ||
1133 | BUG_ON(src_cnt < 9); | ||
1134 | |||
1135 | /* this function is only called with 9-16 sources */ | 956 | /* this function is only called with 9-16 sources */ |
1136 | op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; | 957 | op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; |
1137 | 958 | ||
@@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
1159 | 980 | ||
1160 | descs[0] = (struct ioat_raw_descriptor *) pq; | 981 | descs[0] = (struct ioat_raw_descriptor *) pq; |
1161 | 982 | ||
1162 | desc->sed = ioat3_alloc_sed(device, | 983 | desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3); |
1163 | sed_get_pq16_pool_idx(src_cnt)); | ||
1164 | if (!desc->sed) { | 984 | if (!desc->sed) { |
1165 | dev_err(to_dev(chan), | 985 | dev_err(to_dev(chan), |
1166 | "%s: no free sed entries\n", __func__); | 986 | "%s: no free sed entries\n", __func__); |
@@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
1218 | return &desc->txd; | 1038 | return &desc->txd; |
1219 | } | 1039 | } |
1220 | 1040 | ||
1041 | static int src_cnt_flags(unsigned int src_cnt, unsigned long flags) | ||
1042 | { | ||
1043 | if (dmaf_p_disabled_continue(flags)) | ||
1044 | return src_cnt + 1; | ||
1045 | else if (dmaf_continue(flags)) | ||
1046 | return src_cnt + 3; | ||
1047 | else | ||
1048 | return src_cnt; | ||
1049 | } | ||
1050 | |||
1221 | static struct dma_async_tx_descriptor * | 1051 | static struct dma_async_tx_descriptor * |
1222 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | 1052 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, |
1223 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 1053 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
1224 | unsigned long flags) | 1054 | unsigned long flags) |
1225 | { | 1055 | { |
1226 | struct dma_device *dma = chan->device; | ||
1227 | |||
1228 | /* specify valid address for disabled result */ | 1056 | /* specify valid address for disabled result */ |
1229 | if (flags & DMA_PREP_PQ_DISABLE_P) | 1057 | if (flags & DMA_PREP_PQ_DISABLE_P) |
1230 | dst[0] = dst[1]; | 1058 | dst[0] = dst[1]; |
@@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
1244 | single_source_coef[0] = scf[0]; | 1072 | single_source_coef[0] = scf[0]; |
1245 | single_source_coef[1] = 0; | 1073 | single_source_coef[1] = 0; |
1246 | 1074 | ||
1247 | return (src_cnt > 8) && (dma->max_pq > 8) ? | 1075 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1248 | __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, | 1076 | __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, |
1249 | 2, single_source_coef, len, | 1077 | 2, single_source_coef, len, |
1250 | flags) : | 1078 | flags) : |
@@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
1252 | single_source_coef, len, flags); | 1080 | single_source_coef, len, flags); |
1253 | 1081 | ||
1254 | } else { | 1082 | } else { |
1255 | return (src_cnt > 8) && (dma->max_pq > 8) ? | 1083 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1256 | __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, | 1084 | __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, |
1257 | scf, len, flags) : | 1085 | scf, len, flags) : |
1258 | __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, | 1086 | __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, |
@@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
1265 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 1093 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
1266 | enum sum_check_flags *pqres, unsigned long flags) | 1094 | enum sum_check_flags *pqres, unsigned long flags) |
1267 | { | 1095 | { |
1268 | struct dma_device *dma = chan->device; | ||
1269 | |||
1270 | /* specify valid address for disabled result */ | 1096 | /* specify valid address for disabled result */ |
1271 | if (flags & DMA_PREP_PQ_DISABLE_P) | 1097 | if (flags & DMA_PREP_PQ_DISABLE_P) |
1272 | pq[0] = pq[1]; | 1098 | pq[0] = pq[1]; |
@@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
1278 | */ | 1104 | */ |
1279 | *pqres = 0; | 1105 | *pqres = 0; |
1280 | 1106 | ||
1281 | return (src_cnt > 8) && (dma->max_pq > 8) ? | 1107 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1282 | __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, | 1108 | __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, |
1283 | flags) : | 1109 | flags) : |
1284 | __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | 1110 | __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, |
@@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor * | |||
1289 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | 1115 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, |
1290 | unsigned int src_cnt, size_t len, unsigned long flags) | 1116 | unsigned int src_cnt, size_t len, unsigned long flags) |
1291 | { | 1117 | { |
1292 | struct dma_device *dma = chan->device; | ||
1293 | unsigned char scf[src_cnt]; | 1118 | unsigned char scf[src_cnt]; |
1294 | dma_addr_t pq[2]; | 1119 | dma_addr_t pq[2]; |
1295 | 1120 | ||
@@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |||
1298 | flags |= DMA_PREP_PQ_DISABLE_Q; | 1123 | flags |= DMA_PREP_PQ_DISABLE_Q; |
1299 | pq[1] = dst; /* specify valid address for disabled result */ | 1124 | pq[1] = dst; /* specify valid address for disabled result */ |
1300 | 1125 | ||
1301 | return (src_cnt > 8) && (dma->max_pq > 8) ? | 1126 | return src_cnt_flags(src_cnt, flags) > 8 ? |
1302 | __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, | 1127 | __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, |
1303 | flags) : | 1128 | flags) : |
1304 | __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | 1129 | __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, |
@@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
1310 | unsigned int src_cnt, size_t len, | 1135 | unsigned int src_cnt, size_t len, |
1311 | enum sum_check_flags *result, unsigned long flags) | 1136 | enum sum_check_flags *result, unsigned long flags) |
1312 | { | 1137 | { |
1313 | struct dma_device *dma = chan->device; | ||
1314 | unsigned char scf[src_cnt]; | 1138 | unsigned char scf[src_cnt]; |
1315 | dma_addr_t pq[2]; | 1139 | dma_addr_t pq[2]; |
1316 | 1140 | ||
@@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
1324 | flags |= DMA_PREP_PQ_DISABLE_Q; | 1148 | flags |= DMA_PREP_PQ_DISABLE_Q; |
1325 | pq[1] = pq[0]; /* specify valid address for disabled result */ | 1149 | pq[1] = pq[0]; /* specify valid address for disabled result */ |
1326 | 1150 | ||
1327 | 1151 | return src_cnt_flags(src_cnt, flags) > 8 ? | |
1328 | return (src_cnt > 8) && (dma->max_pq > 8) ? | ||
1329 | __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, | 1152 | __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, |
1330 | scf, len, flags) : | 1153 | scf, len, flags) : |
1331 | __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, | 1154 | __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, |
@@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1444 | DMA_TO_DEVICE); | 1267 | DMA_TO_DEVICE); |
1445 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | 1268 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, |
1446 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | 1269 | IOAT_NUM_SRC_TEST, PAGE_SIZE, |
1447 | DMA_PREP_INTERRUPT | | 1270 | DMA_PREP_INTERRUPT); |
1448 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
1449 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
1450 | 1271 | ||
1451 | if (!tx) { | 1272 | if (!tx) { |
1452 | dev_err(dev, "Self-test xor prep failed\n"); | 1273 | dev_err(dev, "Self-test xor prep failed\n"); |
@@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1468 | 1289 | ||
1469 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | 1290 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
1470 | 1291 | ||
1471 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1292 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
1472 | dev_err(dev, "Self-test xor timed out\n"); | 1293 | dev_err(dev, "Self-test xor timed out\n"); |
1473 | err = -ENODEV; | 1294 | err = -ENODEV; |
1474 | goto dma_unmap; | 1295 | goto dma_unmap; |
@@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1507 | DMA_TO_DEVICE); | 1328 | DMA_TO_DEVICE); |
1508 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 1329 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
1509 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 1330 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
1510 | &xor_val_result, DMA_PREP_INTERRUPT | | 1331 | &xor_val_result, DMA_PREP_INTERRUPT); |
1511 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
1512 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
1513 | if (!tx) { | 1332 | if (!tx) { |
1514 | dev_err(dev, "Self-test zero prep failed\n"); | 1333 | dev_err(dev, "Self-test zero prep failed\n"); |
1515 | err = -ENODEV; | 1334 | err = -ENODEV; |
@@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1530 | 1349 | ||
1531 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | 1350 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
1532 | 1351 | ||
1533 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1352 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
1534 | dev_err(dev, "Self-test validate timed out\n"); | 1353 | dev_err(dev, "Self-test validate timed out\n"); |
1535 | err = -ENODEV; | 1354 | err = -ENODEV; |
1536 | goto dma_unmap; | 1355 | goto dma_unmap; |
@@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1545 | goto free_resources; | 1364 | goto free_resources; |
1546 | } | 1365 | } |
1547 | 1366 | ||
1367 | memset(page_address(dest), 0, PAGE_SIZE); | ||
1368 | |||
1548 | /* test for non-zero parity sum */ | 1369 | /* test for non-zero parity sum */ |
1549 | op = IOAT_OP_XOR_VAL; | 1370 | op = IOAT_OP_XOR_VAL; |
1550 | 1371 | ||
@@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1554 | DMA_TO_DEVICE); | 1375 | DMA_TO_DEVICE); |
1555 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | 1376 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, |
1556 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | 1377 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, |
1557 | &xor_val_result, DMA_PREP_INTERRUPT | | 1378 | &xor_val_result, DMA_PREP_INTERRUPT); |
1558 | DMA_COMPL_SKIP_SRC_UNMAP | | ||
1559 | DMA_COMPL_SKIP_DEST_UNMAP); | ||
1560 | if (!tx) { | 1379 | if (!tx) { |
1561 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | 1380 | dev_err(dev, "Self-test 2nd zero prep failed\n"); |
1562 | err = -ENODEV; | 1381 | err = -ENODEV; |
@@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) | |||
1577 | 1396 | ||
1578 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | 1397 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
1579 | 1398 | ||
1580 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1399 | if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
1581 | dev_err(dev, "Self-test 2nd validate timed out\n"); | 1400 | dev_err(dev, "Self-test 2nd validate timed out\n"); |
1582 | err = -ENODEV; | 1401 | err = -ENODEV; |
1583 | goto dma_unmap; | 1402 | goto dma_unmap; |
@@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) | |||
1630 | 1449 | ||
1631 | static int ioat3_irq_reinit(struct ioatdma_device *device) | 1450 | static int ioat3_irq_reinit(struct ioatdma_device *device) |
1632 | { | 1451 | { |
1633 | int msixcnt = device->common.chancnt; | ||
1634 | struct pci_dev *pdev = device->pdev; | 1452 | struct pci_dev *pdev = device->pdev; |
1635 | int i; | 1453 | int irq = pdev->irq, i; |
1636 | struct msix_entry *msix; | 1454 | |
1637 | struct ioat_chan_common *chan; | 1455 | if (!is_bwd_ioat(pdev)) |
1638 | int err = 0; | 1456 | return 0; |
1639 | 1457 | ||
1640 | switch (device->irq_mode) { | 1458 | switch (device->irq_mode) { |
1641 | case IOAT_MSIX: | 1459 | case IOAT_MSIX: |
1460 | for (i = 0; i < device->common.chancnt; i++) { | ||
1461 | struct msix_entry *msix = &device->msix_entries[i]; | ||
1462 | struct ioat_chan_common *chan; | ||
1642 | 1463 | ||
1643 | for (i = 0; i < msixcnt; i++) { | ||
1644 | msix = &device->msix_entries[i]; | ||
1645 | chan = ioat_chan_by_index(device, i); | 1464 | chan = ioat_chan_by_index(device, i); |
1646 | devm_free_irq(&pdev->dev, msix->vector, chan); | 1465 | devm_free_irq(&pdev->dev, msix->vector, chan); |
1647 | } | 1466 | } |
1648 | 1467 | ||
1649 | pci_disable_msix(pdev); | 1468 | pci_disable_msix(pdev); |
1650 | break; | 1469 | break; |
1651 | |||
1652 | case IOAT_MSIX_SINGLE: | ||
1653 | msix = &device->msix_entries[0]; | ||
1654 | chan = ioat_chan_by_index(device, 0); | ||
1655 | devm_free_irq(&pdev->dev, msix->vector, chan); | ||
1656 | pci_disable_msix(pdev); | ||
1657 | break; | ||
1658 | |||
1659 | case IOAT_MSI: | 1470 | case IOAT_MSI: |
1660 | chan = ioat_chan_by_index(device, 0); | ||
1661 | devm_free_irq(&pdev->dev, pdev->irq, chan); | ||
1662 | pci_disable_msi(pdev); | 1471 | pci_disable_msi(pdev); |
1663 | break; | 1472 | /* fall through */ |
1664 | |||
1665 | case IOAT_INTX: | 1473 | case IOAT_INTX: |
1666 | chan = ioat_chan_by_index(device, 0); | 1474 | devm_free_irq(&pdev->dev, irq, device); |
1667 | devm_free_irq(&pdev->dev, pdev->irq, chan); | ||
1668 | break; | 1475 | break; |
1669 | |||
1670 | default: | 1476 | default: |
1671 | return 0; | 1477 | return 0; |
1672 | } | 1478 | } |
1673 | |||
1674 | device->irq_mode = IOAT_NOIRQ; | 1479 | device->irq_mode = IOAT_NOIRQ; |
1675 | 1480 | ||
1676 | err = ioat_dma_setup_interrupts(device); | 1481 | return ioat_dma_setup_interrupts(device); |
1677 | |||
1678 | return err; | ||
1679 | } | 1482 | } |
1680 | 1483 | ||
1681 | static int ioat3_reset_hw(struct ioat_chan_common *chan) | 1484 | static int ioat3_reset_hw(struct ioat_chan_common *chan) |
@@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) | |||
1718 | } | 1521 | } |
1719 | 1522 | ||
1720 | err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); | 1523 | err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); |
1721 | if (err) { | 1524 | if (!err) |
1722 | dev_err(&pdev->dev, "Failed to reset!\n"); | ||
1723 | return err; | ||
1724 | } | ||
1725 | |||
1726 | if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) | ||
1727 | err = ioat3_irq_reinit(device); | 1525 | err = ioat3_irq_reinit(device); |
1728 | 1526 | ||
1527 | if (err) | ||
1528 | dev_err(&pdev->dev, "Failed to reset: %d\n", err); | ||
1529 | |||
1729 | return err; | 1530 | return err; |
1730 | } | 1531 | } |
1731 | 1532 | ||
@@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1835 | char pool_name[14]; | 1636 | char pool_name[14]; |
1836 | int i; | 1637 | int i; |
1837 | 1638 | ||
1838 | /* allocate sw descriptor pool for SED */ | ||
1839 | device->sed_pool = kmem_cache_create("ioat_sed", | ||
1840 | sizeof(struct ioat_sed_ent), 0, 0, NULL); | ||
1841 | if (!device->sed_pool) | ||
1842 | return -ENOMEM; | ||
1843 | |||
1844 | for (i = 0; i < MAX_SED_POOLS; i++) { | 1639 | for (i = 0; i < MAX_SED_POOLS; i++) { |
1845 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); | 1640 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); |
1846 | 1641 | ||
1847 | /* allocate SED DMA pool */ | 1642 | /* allocate SED DMA pool */ |
1848 | device->sed_hw_pool[i] = dma_pool_create(pool_name, | 1643 | device->sed_hw_pool[i] = dmam_pool_create(pool_name, |
1849 | &pdev->dev, | 1644 | &pdev->dev, |
1850 | SED_SIZE * (i + 1), 64, 0); | 1645 | SED_SIZE * (i + 1), 64, 0); |
1851 | if (!device->sed_hw_pool[i]) | 1646 | if (!device->sed_hw_pool[i]) |
1852 | goto sed_pool_cleanup; | 1647 | return -ENOMEM; |
1853 | 1648 | ||
1854 | } | 1649 | } |
1855 | } | 1650 | } |
@@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1875 | device->dca = ioat3_dca_init(pdev, device->reg_base); | 1670 | device->dca = ioat3_dca_init(pdev, device->reg_base); |
1876 | 1671 | ||
1877 | return 0; | 1672 | return 0; |
1878 | |||
1879 | sed_pool_cleanup: | ||
1880 | if (device->sed_pool) { | ||
1881 | int i; | ||
1882 | kmem_cache_destroy(device->sed_pool); | ||
1883 | |||
1884 | for (i = 0; i < MAX_SED_POOLS; i++) | ||
1885 | if (device->sed_hw_pool[i]) | ||
1886 | dma_pool_destroy(device->sed_hw_pool[i]); | ||
1887 | } | ||
1888 | |||
1889 | return -ENOMEM; | ||
1890 | } | ||
1891 | |||
1892 | void ioat3_dma_remove(struct ioatdma_device *device) | ||
1893 | { | ||
1894 | if (device->sed_pool) { | ||
1895 | int i; | ||
1896 | kmem_cache_destroy(device->sed_pool); | ||
1897 | |||
1898 | for (i = 0; i < MAX_SED_POOLS; i++) | ||
1899 | if (device->sed_hw_pool[i]) | ||
1900 | dma_pool_destroy(device->sed_hw_pool[i]); | ||
1901 | } | ||
1902 | } | 1673 | } |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 2c8d560e6334..1d051cd045db 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644); | |||
123 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | 123 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); |
124 | 124 | ||
125 | struct kmem_cache *ioat2_cache; | 125 | struct kmem_cache *ioat2_cache; |
126 | struct kmem_cache *ioat3_sed_cache; | ||
126 | 127 | ||
127 | #define DRV_NAME "ioatdma" | 128 | #define DRV_NAME "ioatdma" |
128 | 129 | ||
@@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev) | |||
207 | if (!device) | 208 | if (!device) |
208 | return; | 209 | return; |
209 | 210 | ||
210 | if (device->version >= IOAT_VER_3_0) | ||
211 | ioat3_dma_remove(device); | ||
212 | |||
213 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | 211 | dev_err(&pdev->dev, "Removing dma and dca services\n"); |
214 | if (device->dca) { | 212 | if (device->dca) { |
215 | unregister_dca_provider(device->dca, &pdev->dev); | 213 | unregister_dca_provider(device->dca, &pdev->dev); |
@@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev) | |||
221 | 219 | ||
222 | static int __init ioat_init_module(void) | 220 | static int __init ioat_init_module(void) |
223 | { | 221 | { |
224 | int err; | 222 | int err = -ENOMEM; |
225 | 223 | ||
226 | pr_info("%s: Intel(R) QuickData Technology Driver %s\n", | 224 | pr_info("%s: Intel(R) QuickData Technology Driver %s\n", |
227 | DRV_NAME, IOAT_DMA_VERSION); | 225 | DRV_NAME, IOAT_DMA_VERSION); |
@@ -231,9 +229,21 @@ static int __init ioat_init_module(void) | |||
231 | if (!ioat2_cache) | 229 | if (!ioat2_cache) |
232 | return -ENOMEM; | 230 | return -ENOMEM; |
233 | 231 | ||
232 | ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); | ||
233 | if (!ioat3_sed_cache) | ||
234 | goto err_ioat2_cache; | ||
235 | |||
234 | err = pci_register_driver(&ioat_pci_driver); | 236 | err = pci_register_driver(&ioat_pci_driver); |
235 | if (err) | 237 | if (err) |
236 | kmem_cache_destroy(ioat2_cache); | 238 | goto err_ioat3_cache; |
239 | |||
240 | return 0; | ||
241 | |||
242 | err_ioat3_cache: | ||
243 | kmem_cache_destroy(ioat3_sed_cache); | ||
244 | |||
245 | err_ioat2_cache: | ||
246 | kmem_cache_destroy(ioat2_cache); | ||
237 | 247 | ||
238 | return err; | 248 | return err; |
239 | } | 249 | } |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index dd8b44a56e5d..c56137bc3868 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) | |||
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
64 | static void | ||
65 | iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) | ||
66 | { | ||
67 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
68 | struct iop_adma_desc_slot *unmap = desc->group_head; | ||
69 | struct device *dev = &iop_chan->device->pdev->dev; | ||
70 | u32 len = unmap->unmap_len; | ||
71 | enum dma_ctrl_flags flags = tx->flags; | ||
72 | u32 src_cnt; | ||
73 | dma_addr_t addr; | ||
74 | dma_addr_t dest; | ||
75 | |||
76 | src_cnt = unmap->unmap_src_cnt; | ||
77 | dest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
78 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
79 | enum dma_data_direction dir; | ||
80 | |||
81 | if (src_cnt > 1) /* is xor? */ | ||
82 | dir = DMA_BIDIRECTIONAL; | ||
83 | else | ||
84 | dir = DMA_FROM_DEVICE; | ||
85 | |||
86 | dma_unmap_page(dev, dest, len, dir); | ||
87 | } | ||
88 | |||
89 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
90 | while (src_cnt--) { | ||
91 | addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); | ||
92 | if (addr == dest) | ||
93 | continue; | ||
94 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
95 | } | ||
96 | } | ||
97 | desc->group_head = NULL; | ||
98 | } | ||
99 | |||
100 | static void | ||
101 | iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) | ||
102 | { | ||
103 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | ||
104 | struct iop_adma_desc_slot *unmap = desc->group_head; | ||
105 | struct device *dev = &iop_chan->device->pdev->dev; | ||
106 | u32 len = unmap->unmap_len; | ||
107 | enum dma_ctrl_flags flags = tx->flags; | ||
108 | u32 src_cnt = unmap->unmap_src_cnt; | ||
109 | dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); | ||
110 | dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); | ||
111 | int i; | ||
112 | |||
113 | if (tx->flags & DMA_PREP_CONTINUE) | ||
114 | src_cnt -= 3; | ||
115 | |||
116 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { | ||
117 | dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); | ||
118 | dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); | ||
119 | } | ||
120 | |||
121 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
122 | dma_addr_t addr; | ||
123 | |||
124 | for (i = 0; i < src_cnt; i++) { | ||
125 | addr = iop_desc_get_src_addr(unmap, iop_chan, i); | ||
126 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
127 | } | ||
128 | if (desc->pq_check_result) { | ||
129 | dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); | ||
130 | dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); | ||
131 | } | ||
132 | } | ||
133 | |||
134 | desc->group_head = NULL; | ||
135 | } | ||
136 | |||
137 | |||
138 | static dma_cookie_t | 64 | static dma_cookie_t |
139 | iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | 65 | iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, |
140 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) | 66 | struct iop_adma_chan *iop_chan, dma_cookie_t cookie) |
@@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, | |||
152 | if (tx->callback) | 78 | if (tx->callback) |
153 | tx->callback(tx->callback_param); | 79 | tx->callback(tx->callback_param); |
154 | 80 | ||
155 | /* unmap dma addresses | 81 | dma_descriptor_unmap(tx); |
156 | * (unmap_single vs unmap_page?) | 82 | if (desc->group_head) |
157 | */ | 83 | desc->group_head = NULL; |
158 | if (desc->group_head && desc->unmap_len) { | ||
159 | if (iop_desc_is_pq(desc)) | ||
160 | iop_desc_unmap_pq(iop_chan, desc); | ||
161 | else | ||
162 | iop_desc_unmap(iop_chan, desc); | ||
163 | } | ||
164 | } | 84 | } |
165 | 85 | ||
166 | /* run dependent operations */ | 86 | /* run dependent operations */ |
@@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
591 | if (sw_desc) { | 511 | if (sw_desc) { |
592 | grp_start = sw_desc->group_head; | 512 | grp_start = sw_desc->group_head; |
593 | iop_desc_init_interrupt(grp_start, iop_chan); | 513 | iop_desc_init_interrupt(grp_start, iop_chan); |
594 | grp_start->unmap_len = 0; | ||
595 | sw_desc->async_tx.flags = flags; | 514 | sw_desc->async_tx.flags = flags; |
596 | } | 515 | } |
597 | spin_unlock_bh(&iop_chan->lock); | 516 | spin_unlock_bh(&iop_chan->lock); |
@@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
623 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 542 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
624 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 543 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
625 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); | 544 | iop_desc_set_memcpy_src_addr(grp_start, dma_src); |
626 | sw_desc->unmap_src_cnt = 1; | ||
627 | sw_desc->unmap_len = len; | ||
628 | sw_desc->async_tx.flags = flags; | 545 | sw_desc->async_tx.flags = flags; |
629 | } | 546 | } |
630 | spin_unlock_bh(&iop_chan->lock); | 547 | spin_unlock_bh(&iop_chan->lock); |
@@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
657 | iop_desc_init_xor(grp_start, src_cnt, flags); | 574 | iop_desc_init_xor(grp_start, src_cnt, flags); |
658 | iop_desc_set_byte_count(grp_start, iop_chan, len); | 575 | iop_desc_set_byte_count(grp_start, iop_chan, len); |
659 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); | 576 | iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); |
660 | sw_desc->unmap_src_cnt = src_cnt; | ||
661 | sw_desc->unmap_len = len; | ||
662 | sw_desc->async_tx.flags = flags; | 577 | sw_desc->async_tx.flags = flags; |
663 | while (src_cnt--) | 578 | while (src_cnt--) |
664 | iop_desc_set_xor_src_addr(grp_start, src_cnt, | 579 | iop_desc_set_xor_src_addr(grp_start, src_cnt, |
@@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, | |||
694 | grp_start->xor_check_result = result; | 609 | grp_start->xor_check_result = result; |
695 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", | 610 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", |
696 | __func__, grp_start->xor_check_result); | 611 | __func__, grp_start->xor_check_result); |
697 | sw_desc->unmap_src_cnt = src_cnt; | ||
698 | sw_desc->unmap_len = len; | ||
699 | sw_desc->async_tx.flags = flags; | 612 | sw_desc->async_tx.flags = flags; |
700 | while (src_cnt--) | 613 | while (src_cnt--) |
701 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, | 614 | iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, |
@@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
748 | dst[0] = dst[1] & 0x7; | 661 | dst[0] = dst[1] & 0x7; |
749 | 662 | ||
750 | iop_desc_set_pq_addr(g, dst); | 663 | iop_desc_set_pq_addr(g, dst); |
751 | sw_desc->unmap_src_cnt = src_cnt; | ||
752 | sw_desc->unmap_len = len; | ||
753 | sw_desc->async_tx.flags = flags; | 664 | sw_desc->async_tx.flags = flags; |
754 | for (i = 0; i < src_cnt; i++) | 665 | for (i = 0; i < src_cnt; i++) |
755 | iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); | 666 | iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); |
@@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
804 | g->pq_check_result = pqres; | 715 | g->pq_check_result = pqres; |
805 | pr_debug("\t%s: g->pq_check_result: %p\n", | 716 | pr_debug("\t%s: g->pq_check_result: %p\n", |
806 | __func__, g->pq_check_result); | 717 | __func__, g->pq_check_result); |
807 | sw_desc->unmap_src_cnt = src_cnt+2; | ||
808 | sw_desc->unmap_len = len; | ||
809 | sw_desc->async_tx.flags = flags; | 718 | sw_desc->async_tx.flags = flags; |
810 | while (src_cnt--) | 719 | while (src_cnt--) |
811 | iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, | 720 | iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, |
@@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, | |||
864 | int ret; | 773 | int ret; |
865 | 774 | ||
866 | ret = dma_cookie_status(chan, cookie, txstate); | 775 | ret = dma_cookie_status(chan, cookie, txstate); |
867 | if (ret == DMA_SUCCESS) | 776 | if (ret == DMA_COMPLETE) |
868 | return ret; | 777 | return ret; |
869 | 778 | ||
870 | iop_adma_slot_cleanup(iop_chan); | 779 | iop_adma_slot_cleanup(iop_chan); |
@@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
983 | msleep(1); | 892 | msleep(1); |
984 | 893 | ||
985 | if (iop_adma_status(dma_chan, cookie, NULL) != | 894 | if (iop_adma_status(dma_chan, cookie, NULL) != |
986 | DMA_SUCCESS) { | 895 | DMA_COMPLETE) { |
987 | dev_err(dma_chan->device->dev, | 896 | dev_err(dma_chan->device->dev, |
988 | "Self-test copy timed out, disabling\n"); | 897 | "Self-test copy timed out, disabling\n"); |
989 | err = -ENODEV; | 898 | err = -ENODEV; |
@@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1083 | msleep(8); | 992 | msleep(8); |
1084 | 993 | ||
1085 | if (iop_adma_status(dma_chan, cookie, NULL) != | 994 | if (iop_adma_status(dma_chan, cookie, NULL) != |
1086 | DMA_SUCCESS) { | 995 | DMA_COMPLETE) { |
1087 | dev_err(dma_chan->device->dev, | 996 | dev_err(dma_chan->device->dev, |
1088 | "Self-test xor timed out, disabling\n"); | 997 | "Self-test xor timed out, disabling\n"); |
1089 | err = -ENODEV; | 998 | err = -ENODEV; |
@@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1129 | iop_adma_issue_pending(dma_chan); | 1038 | iop_adma_issue_pending(dma_chan); |
1130 | msleep(8); | 1039 | msleep(8); |
1131 | 1040 | ||
1132 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1041 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
1133 | dev_err(dma_chan->device->dev, | 1042 | dev_err(dma_chan->device->dev, |
1134 | "Self-test zero sum timed out, disabling\n"); | 1043 | "Self-test zero sum timed out, disabling\n"); |
1135 | err = -ENODEV; | 1044 | err = -ENODEV; |
@@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) | |||
1158 | iop_adma_issue_pending(dma_chan); | 1067 | iop_adma_issue_pending(dma_chan); |
1159 | msleep(8); | 1068 | msleep(8); |
1160 | 1069 | ||
1161 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { | 1070 | if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { |
1162 | dev_err(dma_chan->device->dev, | 1071 | dev_err(dma_chan->device->dev, |
1163 | "Self-test non-zero sum timed out, disabling\n"); | 1072 | "Self-test non-zero sum timed out, disabling\n"); |
1164 | err = -ENODEV; | 1073 | err = -ENODEV; |
@@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | |||
1254 | msleep(8); | 1163 | msleep(8); |
1255 | 1164 | ||
1256 | if (iop_adma_status(dma_chan, cookie, NULL) != | 1165 | if (iop_adma_status(dma_chan, cookie, NULL) != |
1257 | DMA_SUCCESS) { | 1166 | DMA_COMPLETE) { |
1258 | dev_err(dev, "Self-test pq timed out, disabling\n"); | 1167 | dev_err(dev, "Self-test pq timed out, disabling\n"); |
1259 | err = -ENODEV; | 1168 | err = -ENODEV; |
1260 | goto free_resources; | 1169 | goto free_resources; |
@@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | |||
1291 | msleep(8); | 1200 | msleep(8); |
1292 | 1201 | ||
1293 | if (iop_adma_status(dma_chan, cookie, NULL) != | 1202 | if (iop_adma_status(dma_chan, cookie, NULL) != |
1294 | DMA_SUCCESS) { | 1203 | DMA_COMPLETE) { |
1295 | dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); | 1204 | dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); |
1296 | err = -ENODEV; | 1205 | err = -ENODEV; |
1297 | goto free_resources; | 1206 | goto free_resources; |
@@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) | |||
1323 | msleep(8); | 1232 | msleep(8); |
1324 | 1233 | ||
1325 | if (iop_adma_status(dma_chan, cookie, NULL) != | 1234 | if (iop_adma_status(dma_chan, cookie, NULL) != |
1326 | DMA_SUCCESS) { | 1235 | DMA_COMPLETE) { |
1327 | dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); | 1236 | dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); |
1328 | err = -ENODEV; | 1237 | err = -ENODEV; |
1329 | goto free_resources; | 1238 | goto free_resources; |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb9c0bc317e8..128ca143486d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1232 | desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); | 1232 | desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); |
1233 | descnew = desc; | 1233 | descnew = desc; |
1234 | 1234 | ||
1235 | dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", | 1235 | dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n", |
1236 | irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); | 1236 | irq, (u64)sg_dma_address(*sg), |
1237 | sgnext ? (u64)sg_dma_address(sgnext) : 0, | ||
1238 | ichan->active_buffer, curbuf); | ||
1237 | 1239 | ||
1238 | /* Find the descriptor of sgnext */ | 1240 | /* Find the descriptor of sgnext */ |
1239 | sgnew = idmac_sg_next(ichan, &descnew, *sg); | 1241 | sgnew = idmac_sg_next(ichan, &descnew, *sg); |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a2c330f5f952..e26075408e9b 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
@@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan, | |||
344 | size_t bytes = 0; | 344 | size_t bytes = 0; |
345 | 345 | ||
346 | ret = dma_cookie_status(&c->vc.chan, cookie, state); | 346 | ret = dma_cookie_status(&c->vc.chan, cookie, state); |
347 | if (ret == DMA_SUCCESS) | 347 | if (ret == DMA_COMPLETE) |
348 | return ret; | 348 | return ret; |
349 | 349 | ||
350 | spin_lock_irqsave(&c->vc.lock, flags); | 350 | spin_lock_irqsave(&c->vc.lock, flags); |
@@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op) | |||
693 | 693 | ||
694 | irq = platform_get_irq(op, 0); | 694 | irq = platform_get_irq(op, 0); |
695 | ret = devm_request_irq(&op->dev, irq, | 695 | ret = devm_request_irq(&op->dev, irq, |
696 | k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); | 696 | k3_dma_int_handler, 0, DRIVER_NAME, d); |
697 | if (ret) | 697 | if (ret) |
698 | return ret; | 698 | return ret; |
699 | 699 | ||
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index ff8d7827f8cb..dcb1e05149a7 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data) | |||
798 | * move the descriptors to a temporary list so we can drop | 798 | * move the descriptors to a temporary list so we can drop |
799 | * the lock during the entire cleanup operation | 799 | * the lock during the entire cleanup operation |
800 | */ | 800 | */ |
801 | list_del(&desc->node); | 801 | list_move(&desc->node, &chain_cleanup); |
802 | list_add(&desc->node, &chain_cleanup); | ||
803 | 802 | ||
804 | /* | 803 | /* |
805 | * Look for the first list entry which has the ENDIRQEN flag | 804 | * Look for the first list entry which has the ENDIRQEN flag |
@@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | |||
863 | 862 | ||
864 | if (irq) { | 863 | if (irq) { |
865 | ret = devm_request_irq(pdev->dev, irq, | 864 | ret = devm_request_irq(pdev->dev, irq, |
866 | mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); | 865 | mmp_pdma_chan_handler, 0, "pdma", phy); |
867 | if (ret) { | 866 | if (ret) { |
868 | dev_err(pdev->dev, "channel request irq fail!\n"); | 867 | dev_err(pdev->dev, "channel request irq fail!\n"); |
869 | return ret; | 868 | return ret; |
@@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
970 | /* all chan share one irq, demux inside */ | 969 | /* all chan share one irq, demux inside */ |
971 | irq = platform_get_irq(op, 0); | 970 | irq = platform_get_irq(op, 0); |
972 | ret = devm_request_irq(pdev->dev, irq, | 971 | ret = devm_request_irq(pdev->dev, irq, |
973 | mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); | 972 | mmp_pdma_int_handler, 0, "pdma", pdev); |
974 | if (ret) | 973 | if (ret) |
975 | return ret; | 974 | return ret; |
976 | } | 975 | } |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index d3b6358e5a27..3ddacc14a736 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -62,6 +62,11 @@ | |||
62 | #define TDCR_BURSTSZ_16B (0x3 << 6) | 62 | #define TDCR_BURSTSZ_16B (0x3 << 6) |
63 | #define TDCR_BURSTSZ_32B (0x6 << 6) | 63 | #define TDCR_BURSTSZ_32B (0x6 << 6) |
64 | #define TDCR_BURSTSZ_64B (0x7 << 6) | 64 | #define TDCR_BURSTSZ_64B (0x7 << 6) |
65 | #define TDCR_BURSTSZ_SQU_1B (0x5 << 6) | ||
66 | #define TDCR_BURSTSZ_SQU_2B (0x6 << 6) | ||
67 | #define TDCR_BURSTSZ_SQU_4B (0x0 << 6) | ||
68 | #define TDCR_BURSTSZ_SQU_8B (0x1 << 6) | ||
69 | #define TDCR_BURSTSZ_SQU_16B (0x3 << 6) | ||
65 | #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) | 70 | #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) |
66 | #define TDCR_BURSTSZ_128B (0x5 << 6) | 71 | #define TDCR_BURSTSZ_128B (0x5 << 6) |
67 | #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ | 72 | #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ |
@@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) | |||
158 | /* disable irq */ | 163 | /* disable irq */ |
159 | writel(0, tdmac->reg_base + TDIMR); | 164 | writel(0, tdmac->reg_base + TDIMR); |
160 | 165 | ||
161 | tdmac->status = DMA_SUCCESS; | 166 | tdmac->status = DMA_COMPLETE; |
162 | } | 167 | } |
163 | 168 | ||
164 | static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) | 169 | static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) |
@@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) | |||
228 | return -EINVAL; | 233 | return -EINVAL; |
229 | } | 234 | } |
230 | } else if (tdmac->type == PXA910_SQU) { | 235 | } else if (tdmac->type == PXA910_SQU) { |
231 | tdcr |= TDCR_BURSTSZ_SQU_32B; | ||
232 | tdcr |= TDCR_SSPMOD; | 236 | tdcr |= TDCR_SSPMOD; |
237 | |||
238 | switch (tdmac->burst_sz) { | ||
239 | case 1: | ||
240 | tdcr |= TDCR_BURSTSZ_SQU_1B; | ||
241 | break; | ||
242 | case 2: | ||
243 | tdcr |= TDCR_BURSTSZ_SQU_2B; | ||
244 | break; | ||
245 | case 4: | ||
246 | tdcr |= TDCR_BURSTSZ_SQU_4B; | ||
247 | break; | ||
248 | case 8: | ||
249 | tdcr |= TDCR_BURSTSZ_SQU_8B; | ||
250 | break; | ||
251 | case 16: | ||
252 | tdcr |= TDCR_BURSTSZ_SQU_16B; | ||
253 | break; | ||
254 | case 32: | ||
255 | tdcr |= TDCR_BURSTSZ_SQU_32B; | ||
256 | break; | ||
257 | default: | ||
258 | dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); | ||
259 | return -EINVAL; | ||
260 | } | ||
233 | } | 261 | } |
234 | 262 | ||
235 | writel(tdcr, tdmac->reg_base + TDCR); | 263 | writel(tdcr, tdmac->reg_base + TDCR); |
@@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) | |||
324 | 352 | ||
325 | if (tdmac->irq) { | 353 | if (tdmac->irq) { |
326 | ret = devm_request_irq(tdmac->dev, tdmac->irq, | 354 | ret = devm_request_irq(tdmac->dev, tdmac->irq, |
327 | mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); | 355 | mmp_tdma_chan_handler, 0, "tdma", tdmac); |
328 | if (ret) | 356 | if (ret) |
329 | return ret; | 357 | return ret; |
330 | } | 358 | } |
@@ -365,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( | |||
365 | int num_periods = buf_len / period_len; | 393 | int num_periods = buf_len / period_len; |
366 | int i = 0, buf = 0; | 394 | int i = 0, buf = 0; |
367 | 395 | ||
368 | if (tdmac->status != DMA_SUCCESS) | 396 | if (tdmac->status != DMA_COMPLETE) |
369 | return NULL; | 397 | return NULL; |
370 | 398 | ||
371 | if (period_len > TDMA_MAX_XFER_BYTES) { | 399 | if (period_len > TDMA_MAX_XFER_BYTES) { |
@@ -499,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | |||
499 | tdmac->idx = idx; | 527 | tdmac->idx = idx; |
500 | tdmac->type = type; | 528 | tdmac->type = type; |
501 | tdmac->reg_base = (unsigned long)tdev->base + idx * 4; | 529 | tdmac->reg_base = (unsigned long)tdev->base + idx * 4; |
502 | tdmac->status = DMA_SUCCESS; | 530 | tdmac->status = DMA_COMPLETE; |
503 | tdev->tdmac[tdmac->idx] = tdmac; | 531 | tdev->tdmac[tdmac->idx] = tdmac; |
504 | tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); | 532 | tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); |
505 | 533 | ||
@@ -554,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) | |||
554 | if (irq_num != chan_num) { | 582 | if (irq_num != chan_num) { |
555 | irq = platform_get_irq(pdev, 0); | 583 | irq = platform_get_irq(pdev, 0); |
556 | ret = devm_request_irq(&pdev->dev, irq, | 584 | ret = devm_request_irq(&pdev->dev, irq, |
557 | mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); | 585 | mmp_tdma_int_handler, 0, "tdma", tdev); |
558 | if (ret) | 586 | if (ret) |
559 | return ret; | 587 | return ret; |
560 | } | 588 | } |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 536dcb8ba5fd..7807f0ef4e20 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | |||
60 | return hw_desc->phy_dest_addr; | 60 | return hw_desc->phy_dest_addr; |
61 | } | 61 | } |
62 | 62 | ||
63 | static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, | ||
64 | int src_idx) | ||
65 | { | ||
66 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
67 | return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; | ||
68 | } | ||
69 | |||
70 | |||
71 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | 63 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, |
72 | u32 byte_count) | 64 | u32 byte_count) |
73 | { | 65 | { |
@@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |||
278 | desc->async_tx.callback( | 270 | desc->async_tx.callback( |
279 | desc->async_tx.callback_param); | 271 | desc->async_tx.callback_param); |
280 | 272 | ||
281 | /* unmap dma addresses | 273 | dma_descriptor_unmap(&desc->async_tx); |
282 | * (unmap_single vs unmap_page?) | 274 | if (desc->group_head) |
283 | */ | ||
284 | if (desc->group_head && desc->unmap_len) { | ||
285 | struct mv_xor_desc_slot *unmap = desc->group_head; | ||
286 | struct device *dev = mv_chan_to_devp(mv_chan); | ||
287 | u32 len = unmap->unmap_len; | ||
288 | enum dma_ctrl_flags flags = desc->async_tx.flags; | ||
289 | u32 src_cnt; | ||
290 | dma_addr_t addr; | ||
291 | dma_addr_t dest; | ||
292 | |||
293 | src_cnt = unmap->unmap_src_cnt; | ||
294 | dest = mv_desc_get_dest_addr(unmap); | ||
295 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
296 | enum dma_data_direction dir; | ||
297 | |||
298 | if (src_cnt > 1) /* is xor ? */ | ||
299 | dir = DMA_BIDIRECTIONAL; | ||
300 | else | ||
301 | dir = DMA_FROM_DEVICE; | ||
302 | dma_unmap_page(dev, dest, len, dir); | ||
303 | } | ||
304 | |||
305 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
306 | while (src_cnt--) { | ||
307 | addr = mv_desc_get_src_addr(unmap, | ||
308 | src_cnt); | ||
309 | if (addr == dest) | ||
310 | continue; | ||
311 | dma_unmap_page(dev, addr, len, | ||
312 | DMA_TO_DEVICE); | ||
313 | } | ||
314 | } | ||
315 | desc->group_head = NULL; | 275 | desc->group_head = NULL; |
316 | } | ||
317 | } | 276 | } |
318 | 277 | ||
319 | /* run dependent operations */ | 278 | /* run dependent operations */ |
@@ -749,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, | |||
749 | enum dma_status ret; | 708 | enum dma_status ret; |
750 | 709 | ||
751 | ret = dma_cookie_status(chan, cookie, txstate); | 710 | ret = dma_cookie_status(chan, cookie, txstate); |
752 | if (ret == DMA_SUCCESS) { | 711 | if (ret == DMA_COMPLETE) { |
753 | mv_xor_clean_completed_slots(mv_chan); | 712 | mv_xor_clean_completed_slots(mv_chan); |
754 | return ret; | 713 | return ret; |
755 | } | 714 | } |
@@ -874,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
874 | msleep(1); | 833 | msleep(1); |
875 | 834 | ||
876 | if (mv_xor_status(dma_chan, cookie, NULL) != | 835 | if (mv_xor_status(dma_chan, cookie, NULL) != |
877 | DMA_SUCCESS) { | 836 | DMA_COMPLETE) { |
878 | dev_err(dma_chan->device->dev, | 837 | dev_err(dma_chan->device->dev, |
879 | "Self-test copy timed out, disabling\n"); | 838 | "Self-test copy timed out, disabling\n"); |
880 | err = -ENODEV; | 839 | err = -ENODEV; |
@@ -968,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) | |||
968 | msleep(8); | 927 | msleep(8); |
969 | 928 | ||
970 | if (mv_xor_status(dma_chan, cookie, NULL) != | 929 | if (mv_xor_status(dma_chan, cookie, NULL) != |
971 | DMA_SUCCESS) { | 930 | DMA_COMPLETE) { |
972 | dev_err(dma_chan->device->dev, | 931 | dev_err(dma_chan->device->dev, |
973 | "Self-test xor timed out, disabling\n"); | 932 | "Self-test xor timed out, disabling\n"); |
974 | err = -ENODEV; | 933 | err = -ENODEV; |
@@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1076 | } | 1035 | } |
1077 | 1036 | ||
1078 | mv_chan->mmr_base = xordev->xor_base; | 1037 | mv_chan->mmr_base = xordev->xor_base; |
1079 | if (!mv_chan->mmr_base) { | 1038 | mv_chan->mmr_high_base = xordev->xor_high_base; |
1080 | ret = -ENOMEM; | ||
1081 | goto err_free_dma; | ||
1082 | } | ||
1083 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) | 1039 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) |
1084 | mv_chan); | 1040 | mv_chan); |
1085 | 1041 | ||
@@ -1138,7 +1094,7 @@ static void | |||
1138 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | 1094 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, |
1139 | const struct mbus_dram_target_info *dram) | 1095 | const struct mbus_dram_target_info *dram) |
1140 | { | 1096 | { |
1141 | void __iomem *base = xordev->xor_base; | 1097 | void __iomem *base = xordev->xor_high_base; |
1142 | u32 win_enable = 0; | 1098 | u32 win_enable = 0; |
1143 | int i; | 1099 | int i; |
1144 | 1100 | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 06b067f24c9b..d0749229c875 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -34,13 +34,13 @@ | |||
34 | #define XOR_OPERATION_MODE_MEMCPY 2 | 34 | #define XOR_OPERATION_MODE_MEMCPY 2 |
35 | #define XOR_DESCRIPTOR_SWAP BIT(14) | 35 | #define XOR_DESCRIPTOR_SWAP BIT(14) |
36 | 36 | ||
37 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) | 37 | #define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4)) |
38 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) | 38 | #define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4)) |
39 | #define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) | 39 | #define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4)) |
40 | #define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) | 40 | #define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4)) |
41 | #define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) | 41 | #define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4)) |
42 | #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) | 42 | #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0) |
43 | #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) | 43 | #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4) |
44 | 44 | ||
45 | #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) | 45 | #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) |
46 | #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) | 46 | #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) |
@@ -50,11 +50,11 @@ | |||
50 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) | 50 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) |
51 | #define XOR_INTR_MASK_VALUE 0x3F5 | 51 | #define XOR_INTR_MASK_VALUE 0x3F5 |
52 | 52 | ||
53 | #define WINDOW_BASE(w) (0x250 + ((w) << 2)) | 53 | #define WINDOW_BASE(w) (0x50 + ((w) << 2)) |
54 | #define WINDOW_SIZE(w) (0x270 + ((w) << 2)) | 54 | #define WINDOW_SIZE(w) (0x70 + ((w) << 2)) |
55 | #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) | 55 | #define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2)) |
56 | #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) | 56 | #define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) |
57 | #define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2)) | 57 | #define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) |
58 | 58 | ||
59 | struct mv_xor_device { | 59 | struct mv_xor_device { |
60 | void __iomem *xor_base; | 60 | void __iomem *xor_base; |
@@ -82,6 +82,7 @@ struct mv_xor_chan { | |||
82 | int pending; | 82 | int pending; |
83 | spinlock_t lock; /* protects the descriptor slot pool */ | 83 | spinlock_t lock; /* protects the descriptor slot pool */ |
84 | void __iomem *mmr_base; | 84 | void __iomem *mmr_base; |
85 | void __iomem *mmr_high_base; | ||
85 | unsigned int idx; | 86 | unsigned int idx; |
86 | int irq; | 87 | int irq; |
87 | enum dma_transaction_type current_type; | 88 | enum dma_transaction_type current_type; |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ccd13df841db..ead491346da7 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/of.h> | 27 | #include <linux/of.h> |
28 | #include <linux/of_device.h> | 28 | #include <linux/of_device.h> |
29 | #include <linux/of_dma.h> | 29 | #include <linux/of_dma.h> |
30 | #include <linux/list.h> | ||
30 | 31 | ||
31 | #include <asm/irq.h> | 32 | #include <asm/irq.h> |
32 | 33 | ||
@@ -57,6 +58,9 @@ | |||
57 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) | 58 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) |
58 | #define HW_APBHX_CHn_SEMA(d, n) \ | 59 | #define HW_APBHX_CHn_SEMA(d, n) \ |
59 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) | 60 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) |
61 | #define HW_APBHX_CHn_BAR(d, n) \ | ||
62 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) | ||
63 | #define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) | ||
60 | 64 | ||
61 | /* | 65 | /* |
62 | * ccw bits definitions | 66 | * ccw bits definitions |
@@ -115,7 +119,9 @@ struct mxs_dma_chan { | |||
115 | int desc_count; | 119 | int desc_count; |
116 | enum dma_status status; | 120 | enum dma_status status; |
117 | unsigned int flags; | 121 | unsigned int flags; |
122 | bool reset; | ||
118 | #define MXS_DMA_SG_LOOP (1 << 0) | 123 | #define MXS_DMA_SG_LOOP (1 << 0) |
124 | #define MXS_DMA_USE_SEMAPHORE (1 << 1) | ||
119 | }; | 125 | }; |
120 | 126 | ||
121 | #define MXS_DMA_CHANNELS 16 | 127 | #define MXS_DMA_CHANNELS 16 |
@@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | |||
201 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 207 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
202 | int chan_id = mxs_chan->chan.chan_id; | 208 | int chan_id = mxs_chan->chan.chan_id; |
203 | 209 | ||
204 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) | 210 | /* |
211 | * mxs dma channel resets can cause a channel stall. To recover from a | ||
212 | * channel stall, we have to reset the whole DMA engine. To avoid this, | ||
213 | * we use cyclic DMA with semaphores, that are enhanced in | ||
214 | * mxs_dma_int_handler. To reset the channel, we can simply stop writing | ||
215 | * into the semaphore counter. | ||
216 | */ | ||
217 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && | ||
218 | mxs_chan->flags & MXS_DMA_SG_LOOP) { | ||
219 | mxs_chan->reset = true; | ||
220 | } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { | ||
205 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), | 221 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), |
206 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 222 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
207 | else | 223 | } else { |
224 | unsigned long elapsed = 0; | ||
225 | const unsigned long max_wait = 50000; /* 50ms */ | ||
226 | void __iomem *reg_dbg1 = mxs_dma->base + | ||
227 | HW_APBX_CHn_DEBUG1(mxs_dma, chan_id); | ||
228 | |||
229 | /* | ||
230 | * On i.MX28 APBX, the DMA channel can stop working if we reset | ||
231 | * the channel while it is in READ_FLUSH (0x08) state. | ||
232 | * We wait here until we leave the state. Then we trigger the | ||
233 | * reset. Waiting a maximum of 50ms, the kernel shouldn't crash | ||
234 | * because of this. | ||
235 | */ | ||
236 | while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) { | ||
237 | udelay(100); | ||
238 | elapsed += 100; | ||
239 | } | ||
240 | |||
241 | if (elapsed >= max_wait) | ||
242 | dev_err(&mxs_chan->mxs_dma->pdev->dev, | ||
243 | "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n", | ||
244 | chan_id); | ||
245 | |||
208 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), | 246 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), |
209 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); | 247 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
248 | } | ||
249 | |||
250 | mxs_chan->status = DMA_COMPLETE; | ||
210 | } | 251 | } |
211 | 252 | ||
212 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | 253 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) |
@@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
219 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); | 260 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); |
220 | 261 | ||
221 | /* write 1 to SEMA to kick off the channel */ | 262 | /* write 1 to SEMA to kick off the channel */ |
222 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); | 263 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && |
264 | mxs_chan->flags & MXS_DMA_SG_LOOP) { | ||
265 | /* A cyclic DMA consists of at least 2 segments, so initialize | ||
266 | * the semaphore with 2 so we have enough time to add 1 to the | ||
267 | * semaphore if we need to */ | ||
268 | writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); | ||
269 | } else { | ||
270 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); | ||
271 | } | ||
272 | mxs_chan->reset = false; | ||
223 | } | 273 | } |
224 | 274 | ||
225 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | 275 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) |
226 | { | 276 | { |
227 | mxs_chan->status = DMA_SUCCESS; | 277 | mxs_chan->status = DMA_COMPLETE; |
228 | } | 278 | } |
229 | 279 | ||
230 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | 280 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) |
@@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data) | |||
272 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); | 322 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); |
273 | } | 323 | } |
274 | 324 | ||
325 | static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) | ||
326 | { | ||
327 | int i; | ||
328 | |||
329 | for (i = 0; i != mxs_dma->nr_channels; ++i) | ||
330 | if (mxs_dma->mxs_chans[i].chan_irq == irq) | ||
331 | return i; | ||
332 | |||
333 | return -EINVAL; | ||
334 | } | ||
335 | |||
275 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | 336 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) |
276 | { | 337 | { |
277 | struct mxs_dma_engine *mxs_dma = dev_id; | 338 | struct mxs_dma_engine *mxs_dma = dev_id; |
278 | u32 stat1, stat2; | 339 | struct mxs_dma_chan *mxs_chan; |
340 | u32 completed; | ||
341 | u32 err; | ||
342 | int chan = mxs_dma_irq_to_chan(mxs_dma, irq); | ||
343 | |||
344 | if (chan < 0) | ||
345 | return IRQ_NONE; | ||
279 | 346 | ||
280 | /* completion status */ | 347 | /* completion status */ |
281 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); | 348 | completed = readl(mxs_dma->base + HW_APBHX_CTRL1); |
282 | stat1 &= MXS_DMA_CHANNELS_MASK; | 349 | completed = (completed >> chan) & 0x1; |
283 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); | 350 | |
351 | /* Clear interrupt */ | ||
352 | writel((1 << chan), | ||
353 | mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); | ||
284 | 354 | ||
285 | /* error status */ | 355 | /* error status */ |
286 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); | 356 | err = readl(mxs_dma->base + HW_APBHX_CTRL2); |
287 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); | 357 | err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan); |
358 | |||
359 | /* | ||
360 | * error status bit is in the upper 16 bits, error irq bit in the lower | ||
361 | * 16 bits. We transform it into a simpler error code: | ||
362 | * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR | ||
363 | */ | ||
364 | err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan); | ||
365 | |||
366 | /* Clear error irq */ | ||
367 | writel((1 << chan), | ||
368 | mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); | ||
288 | 369 | ||
289 | /* | 370 | /* |
290 | * When both completion and error of termination bits set at the | 371 | * When both completion and error of termination bits set at the |
291 | * same time, we do not take it as an error. IOW, it only becomes | 372 | * same time, we do not take it as an error. IOW, it only becomes |
292 | * an error we need to handle here in case of either it's (1) a bus | 373 | * an error we need to handle here in case of either it's a bus |
293 | * error or (2) a termination error with no completion. | 374 | * error or a termination error with no completion. 0x01 is termination |
375 | * error, so we can subtract err & completed to get the real error case. | ||
294 | */ | 376 | */ |
295 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | 377 | err -= err & completed; |
296 | (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ | ||
297 | |||
298 | /* combine error and completion status for checking */ | ||
299 | stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; | ||
300 | while (stat1) { | ||
301 | int channel = fls(stat1) - 1; | ||
302 | struct mxs_dma_chan *mxs_chan = | ||
303 | &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; | ||
304 | |||
305 | if (channel >= MXS_DMA_CHANNELS) { | ||
306 | dev_dbg(mxs_dma->dma_device.dev, | ||
307 | "%s: error in channel %d\n", __func__, | ||
308 | channel - MXS_DMA_CHANNELS); | ||
309 | mxs_chan->status = DMA_ERROR; | ||
310 | mxs_dma_reset_chan(mxs_chan); | ||
311 | } else { | ||
312 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) | ||
313 | mxs_chan->status = DMA_IN_PROGRESS; | ||
314 | else | ||
315 | mxs_chan->status = DMA_SUCCESS; | ||
316 | } | ||
317 | 378 | ||
318 | stat1 &= ~(1 << channel); | 379 | mxs_chan = &mxs_dma->mxs_chans[chan]; |
319 | 380 | ||
320 | if (mxs_chan->status == DMA_SUCCESS) | 381 | if (err) { |
321 | dma_cookie_complete(&mxs_chan->desc); | 382 | dev_dbg(mxs_dma->dma_device.dev, |
383 | "%s: error in channel %d\n", __func__, | ||
384 | chan); | ||
385 | mxs_chan->status = DMA_ERROR; | ||
386 | mxs_dma_reset_chan(mxs_chan); | ||
387 | } else if (mxs_chan->status != DMA_COMPLETE) { | ||
388 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) { | ||
389 | mxs_chan->status = DMA_IN_PROGRESS; | ||
390 | if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE) | ||
391 | writel(1, mxs_dma->base + | ||
392 | HW_APBHX_CHn_SEMA(mxs_dma, chan)); | ||
393 | } else { | ||
394 | mxs_chan->status = DMA_COMPLETE; | ||
395 | } | ||
396 | } | ||
322 | 397 | ||
323 | /* schedule tasklet on this channel */ | 398 | if (mxs_chan->status == DMA_COMPLETE) { |
324 | tasklet_schedule(&mxs_chan->tasklet); | 399 | if (mxs_chan->reset) |
400 | return IRQ_HANDLED; | ||
401 | dma_cookie_complete(&mxs_chan->desc); | ||
325 | } | 402 | } |
326 | 403 | ||
404 | /* schedule tasklet on this channel */ | ||
405 | tasklet_schedule(&mxs_chan->tasklet); | ||
406 | |||
327 | return IRQ_HANDLED; | 407 | return IRQ_HANDLED; |
328 | } | 408 | } |
329 | 409 | ||
@@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
523 | 603 | ||
524 | mxs_chan->status = DMA_IN_PROGRESS; | 604 | mxs_chan->status = DMA_IN_PROGRESS; |
525 | mxs_chan->flags |= MXS_DMA_SG_LOOP; | 605 | mxs_chan->flags |= MXS_DMA_SG_LOOP; |
606 | mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE; | ||
526 | 607 | ||
527 | if (num_periods > NUM_CCW) { | 608 | if (num_periods > NUM_CCW) { |
528 | dev_err(mxs_dma->dma_device.dev, | 609 | dev_err(mxs_dma->dma_device.dev, |
@@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
554 | ccw->bits |= CCW_IRQ; | 635 | ccw->bits |= CCW_IRQ; |
555 | ccw->bits |= CCW_HALT_ON_TERM; | 636 | ccw->bits |= CCW_HALT_ON_TERM; |
556 | ccw->bits |= CCW_TERM_FLUSH; | 637 | ccw->bits |= CCW_TERM_FLUSH; |
638 | ccw->bits |= CCW_DEC_SEM; | ||
557 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? | 639 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
558 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | 640 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
559 | 641 | ||
@@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | |||
599 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 681 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
600 | { | 682 | { |
601 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 683 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
684 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
685 | u32 residue = 0; | ||
686 | |||
687 | if (mxs_chan->status == DMA_IN_PROGRESS && | ||
688 | mxs_chan->flags & MXS_DMA_SG_LOOP) { | ||
689 | struct mxs_dma_ccw *last_ccw; | ||
690 | u32 bar; | ||
691 | |||
692 | last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1]; | ||
693 | residue = last_ccw->xfer_bytes + last_ccw->bufaddr; | ||
694 | |||
695 | bar = readl(mxs_dma->base + | ||
696 | HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id)); | ||
697 | residue -= bar; | ||
698 | } | ||
602 | 699 | ||
603 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); | 700 | dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, |
701 | residue); | ||
604 | 702 | ||
605 | return mxs_chan->status; | 703 | return mxs_chan->status; |
606 | } | 704 | } |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ec3fc4fd9160..2f66cf4e54fe 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | |||
248 | unsigned long flags; | 248 | unsigned long flags; |
249 | 249 | ||
250 | ret = dma_cookie_status(chan, cookie, txstate); | 250 | ret = dma_cookie_status(chan, cookie, txstate); |
251 | if (ret == DMA_SUCCESS || !txstate) | 251 | if (ret == DMA_COMPLETE || !txstate) |
252 | return ret; | 252 | return ret; |
253 | 253 | ||
254 | spin_lock_irqsave(&c->vc.lock, flags); | 254 | spin_lock_irqsave(&c->vc.lock, flags); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index df8b10fd1726..cdf0483b8f2d 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data) | |||
2268 | list_move_tail(&desc->node, &pch->dmac->desc_pool); | 2268 | list_move_tail(&desc->node, &pch->dmac->desc_pool); |
2269 | } | 2269 | } |
2270 | 2270 | ||
2271 | dma_descriptor_unmap(&desc->txd); | ||
2272 | |||
2271 | if (callback) { | 2273 | if (callback) { |
2272 | spin_unlock_irqrestore(&pch->lock, flags); | 2274 | spin_unlock_irqrestore(&pch->lock, flags); |
2273 | callback(callback_param); | 2275 | callback(callback_param); |
@@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param) | |||
2314 | return false; | 2316 | return false; |
2315 | 2317 | ||
2316 | peri_id = chan->private; | 2318 | peri_id = chan->private; |
2317 | return *peri_id == (unsigned)param; | 2319 | return *peri_id == (unsigned long)param; |
2318 | } | 2320 | } |
2319 | EXPORT_SYMBOL(pl330_filter); | 2321 | EXPORT_SYMBOL(pl330_filter); |
2320 | 2322 | ||
@@ -2926,16 +2928,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2926 | 2928 | ||
2927 | amba_set_drvdata(adev, pdmac); | 2929 | amba_set_drvdata(adev, pdmac); |
2928 | 2930 | ||
2929 | irq = adev->irq[0]; | 2931 | for (i = 0; i < AMBA_NR_IRQS; i++) { |
2930 | ret = request_irq(irq, pl330_irq_handler, 0, | 2932 | irq = adev->irq[i]; |
2931 | dev_name(&adev->dev), pi); | 2933 | if (irq) { |
2932 | if (ret) | 2934 | ret = devm_request_irq(&adev->dev, irq, |
2933 | return ret; | 2935 | pl330_irq_handler, 0, |
2936 | dev_name(&adev->dev), pi); | ||
2937 | if (ret) | ||
2938 | return ret; | ||
2939 | } else { | ||
2940 | break; | ||
2941 | } | ||
2942 | } | ||
2934 | 2943 | ||
2935 | pi->pcfg.periph_id = adev->periphid; | 2944 | pi->pcfg.periph_id = adev->periphid; |
2936 | ret = pl330_add(pi); | 2945 | ret = pl330_add(pi); |
2937 | if (ret) | 2946 | if (ret) |
2938 | goto probe_err1; | 2947 | return ret; |
2939 | 2948 | ||
2940 | INIT_LIST_HEAD(&pdmac->desc_pool); | 2949 | INIT_LIST_HEAD(&pdmac->desc_pool); |
2941 | spin_lock_init(&pdmac->pool_lock); | 2950 | spin_lock_init(&pdmac->pool_lock); |
@@ -3033,8 +3042,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
3033 | 3042 | ||
3034 | return 0; | 3043 | return 0; |
3035 | probe_err3: | 3044 | probe_err3: |
3036 | amba_set_drvdata(adev, NULL); | ||
3037 | |||
3038 | /* Idle the DMAC */ | 3045 | /* Idle the DMAC */ |
3039 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | 3046 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, |
3040 | chan.device_node) { | 3047 | chan.device_node) { |
@@ -3048,8 +3055,6 @@ probe_err3: | |||
3048 | } | 3055 | } |
3049 | probe_err2: | 3056 | probe_err2: |
3050 | pl330_del(pi); | 3057 | pl330_del(pi); |
3051 | probe_err1: | ||
3052 | free_irq(irq, pi); | ||
3053 | 3058 | ||
3054 | return ret; | 3059 | return ret; |
3055 | } | 3060 | } |
@@ -3059,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev) | |||
3059 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); | 3064 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); |
3060 | struct dma_pl330_chan *pch, *_p; | 3065 | struct dma_pl330_chan *pch, *_p; |
3061 | struct pl330_info *pi; | 3066 | struct pl330_info *pi; |
3062 | int irq; | ||
3063 | 3067 | ||
3064 | if (!pdmac) | 3068 | if (!pdmac) |
3065 | return 0; | 3069 | return 0; |
@@ -3068,7 +3072,6 @@ static int pl330_remove(struct amba_device *adev) | |||
3068 | of_dma_controller_free(adev->dev.of_node); | 3072 | of_dma_controller_free(adev->dev.of_node); |
3069 | 3073 | ||
3070 | dma_async_device_unregister(&pdmac->ddma); | 3074 | dma_async_device_unregister(&pdmac->ddma); |
3071 | amba_set_drvdata(adev, NULL); | ||
3072 | 3075 | ||
3073 | /* Idle the DMAC */ | 3076 | /* Idle the DMAC */ |
3074 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | 3077 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, |
@@ -3086,9 +3089,6 @@ static int pl330_remove(struct amba_device *adev) | |||
3086 | 3089 | ||
3087 | pl330_del(pi); | 3090 | pl330_del(pi); |
3088 | 3091 | ||
3089 | irq = adev->irq[0]; | ||
3090 | free_irq(irq, pi); | ||
3091 | |||
3092 | return 0; | 3092 | return 0; |
3093 | } | 3093 | } |
3094 | 3094 | ||
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index e24b5ef486b5..8da48c6b2a38 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
@@ -804,218 +804,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, | |||
804 | } | 804 | } |
805 | 805 | ||
806 | /** | 806 | /** |
807 | * ppc440spe_desc_get_src_addr - extract the source address from the descriptor | ||
808 | */ | ||
809 | static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, | ||
810 | struct ppc440spe_adma_chan *chan, int src_idx) | ||
811 | { | ||
812 | struct dma_cdb *dma_hw_desc; | ||
813 | struct xor_cb *xor_hw_desc; | ||
814 | |||
815 | switch (chan->device->id) { | ||
816 | case PPC440SPE_DMA0_ID: | ||
817 | case PPC440SPE_DMA1_ID: | ||
818 | dma_hw_desc = desc->hw_desc; | ||
819 | /* May have 0, 1, 2, or 3 sources */ | ||
820 | switch (dma_hw_desc->opc) { | ||
821 | case DMA_CDB_OPC_NO_OP: | ||
822 | case DMA_CDB_OPC_DFILL128: | ||
823 | return 0; | ||
824 | case DMA_CDB_OPC_DCHECK128: | ||
825 | if (unlikely(src_idx)) { | ||
826 | printk(KERN_ERR "%s: try to get %d source for" | ||
827 | " DCHECK128\n", __func__, src_idx); | ||
828 | BUG(); | ||
829 | } | ||
830 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
831 | case DMA_CDB_OPC_MULTICAST: | ||
832 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
833 | if (unlikely(src_idx > 2)) { | ||
834 | printk(KERN_ERR "%s: try to get %d source from" | ||
835 | " DMA descr\n", __func__, src_idx); | ||
836 | BUG(); | ||
837 | } | ||
838 | if (src_idx) { | ||
839 | if (le32_to_cpu(dma_hw_desc->sg1u) & | ||
840 | DMA_CUED_XOR_WIN_MSK) { | ||
841 | u8 region; | ||
842 | |||
843 | if (src_idx == 1) | ||
844 | return le32_to_cpu( | ||
845 | dma_hw_desc->sg1l) + | ||
846 | desc->unmap_len; | ||
847 | |||
848 | region = (le32_to_cpu( | ||
849 | dma_hw_desc->sg1u)) >> | ||
850 | DMA_CUED_REGION_OFF; | ||
851 | |||
852 | region &= DMA_CUED_REGION_MSK; | ||
853 | switch (region) { | ||
854 | case DMA_RXOR123: | ||
855 | return le32_to_cpu( | ||
856 | dma_hw_desc->sg1l) + | ||
857 | (desc->unmap_len << 1); | ||
858 | case DMA_RXOR124: | ||
859 | return le32_to_cpu( | ||
860 | dma_hw_desc->sg1l) + | ||
861 | (desc->unmap_len * 3); | ||
862 | case DMA_RXOR125: | ||
863 | return le32_to_cpu( | ||
864 | dma_hw_desc->sg1l) + | ||
865 | (desc->unmap_len << 2); | ||
866 | default: | ||
867 | printk(KERN_ERR | ||
868 | "%s: try to" | ||
869 | " get src3 for region %02x" | ||
870 | "PPC440SPE_DESC_RXOR12?\n", | ||
871 | __func__, region); | ||
872 | BUG(); | ||
873 | } | ||
874 | } else { | ||
875 | printk(KERN_ERR | ||
876 | "%s: try to get %d" | ||
877 | " source for non-cued descr\n", | ||
878 | __func__, src_idx); | ||
879 | BUG(); | ||
880 | } | ||
881 | } | ||
882 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
883 | default: | ||
884 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
885 | __func__, dma_hw_desc->opc); | ||
886 | BUG(); | ||
887 | } | ||
888 | return le32_to_cpu(dma_hw_desc->sg1l); | ||
889 | case PPC440SPE_XOR_ID: | ||
890 | /* May have up to 16 sources */ | ||
891 | xor_hw_desc = desc->hw_desc; | ||
892 | return xor_hw_desc->ops[src_idx].l; | ||
893 | } | ||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | /** | ||
898 | * ppc440spe_desc_get_dest_addr - extract the destination address from the | ||
899 | * descriptor | ||
900 | */ | ||
901 | static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, | ||
902 | struct ppc440spe_adma_chan *chan, int idx) | ||
903 | { | ||
904 | struct dma_cdb *dma_hw_desc; | ||
905 | struct xor_cb *xor_hw_desc; | ||
906 | |||
907 | switch (chan->device->id) { | ||
908 | case PPC440SPE_DMA0_ID: | ||
909 | case PPC440SPE_DMA1_ID: | ||
910 | dma_hw_desc = desc->hw_desc; | ||
911 | |||
912 | if (likely(!idx)) | ||
913 | return le32_to_cpu(dma_hw_desc->sg2l); | ||
914 | return le32_to_cpu(dma_hw_desc->sg3l); | ||
915 | case PPC440SPE_XOR_ID: | ||
916 | xor_hw_desc = desc->hw_desc; | ||
917 | return xor_hw_desc->cbtal; | ||
918 | } | ||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | /** | ||
923 | * ppc440spe_desc_get_src_num - extract the number of source addresses from | ||
924 | * the descriptor | ||
925 | */ | ||
926 | static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, | ||
927 | struct ppc440spe_adma_chan *chan) | ||
928 | { | ||
929 | struct dma_cdb *dma_hw_desc; | ||
930 | struct xor_cb *xor_hw_desc; | ||
931 | |||
932 | switch (chan->device->id) { | ||
933 | case PPC440SPE_DMA0_ID: | ||
934 | case PPC440SPE_DMA1_ID: | ||
935 | dma_hw_desc = desc->hw_desc; | ||
936 | |||
937 | switch (dma_hw_desc->opc) { | ||
938 | case DMA_CDB_OPC_NO_OP: | ||
939 | case DMA_CDB_OPC_DFILL128: | ||
940 | return 0; | ||
941 | case DMA_CDB_OPC_DCHECK128: | ||
942 | return 1; | ||
943 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
944 | case DMA_CDB_OPC_MULTICAST: | ||
945 | /* | ||
946 | * Only for RXOR operations we have more than | ||
947 | * one source | ||
948 | */ | ||
949 | if (le32_to_cpu(dma_hw_desc->sg1u) & | ||
950 | DMA_CUED_XOR_WIN_MSK) { | ||
951 | /* RXOR op, there are 2 or 3 sources */ | ||
952 | if (((le32_to_cpu(dma_hw_desc->sg1u) >> | ||
953 | DMA_CUED_REGION_OFF) & | ||
954 | DMA_CUED_REGION_MSK) == DMA_RXOR12) { | ||
955 | /* RXOR 1-2 */ | ||
956 | return 2; | ||
957 | } else { | ||
958 | /* RXOR 1-2-3/1-2-4/1-2-5 */ | ||
959 | return 3; | ||
960 | } | ||
961 | } | ||
962 | return 1; | ||
963 | default: | ||
964 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
965 | __func__, dma_hw_desc->opc); | ||
966 | BUG(); | ||
967 | } | ||
968 | case PPC440SPE_XOR_ID: | ||
969 | /* up to 16 sources */ | ||
970 | xor_hw_desc = desc->hw_desc; | ||
971 | return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; | ||
972 | default: | ||
973 | BUG(); | ||
974 | } | ||
975 | return 0; | ||
976 | } | ||
977 | |||
978 | /** | ||
979 | * ppc440spe_desc_get_dst_num - get the number of destination addresses in | ||
980 | * this descriptor | ||
981 | */ | ||
982 | static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, | ||
983 | struct ppc440spe_adma_chan *chan) | ||
984 | { | ||
985 | struct dma_cdb *dma_hw_desc; | ||
986 | |||
987 | switch (chan->device->id) { | ||
988 | case PPC440SPE_DMA0_ID: | ||
989 | case PPC440SPE_DMA1_ID: | ||
990 | /* May be 1 or 2 destinations */ | ||
991 | dma_hw_desc = desc->hw_desc; | ||
992 | switch (dma_hw_desc->opc) { | ||
993 | case DMA_CDB_OPC_NO_OP: | ||
994 | case DMA_CDB_OPC_DCHECK128: | ||
995 | return 0; | ||
996 | case DMA_CDB_OPC_MV_SG1_SG2: | ||
997 | case DMA_CDB_OPC_DFILL128: | ||
998 | return 1; | ||
999 | case DMA_CDB_OPC_MULTICAST: | ||
1000 | if (desc->dst_cnt == 2) | ||
1001 | return 2; | ||
1002 | else | ||
1003 | return 1; | ||
1004 | default: | ||
1005 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | ||
1006 | __func__, dma_hw_desc->opc); | ||
1007 | BUG(); | ||
1008 | } | ||
1009 | case PPC440SPE_XOR_ID: | ||
1010 | /* Always only 1 destination */ | ||
1011 | return 1; | ||
1012 | default: | ||
1013 | BUG(); | ||
1014 | } | ||
1015 | return 0; | ||
1016 | } | ||
1017 | |||
1018 | /** | ||
1019 | * ppc440spe_desc_get_link - get the address of the descriptor that | 807 | * ppc440spe_desc_get_link - get the address of the descriptor that |
1020 | * follows this one | 808 | * follows this one |
1021 | */ | 809 | */ |
@@ -1707,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, | |||
1707 | } | 1495 | } |
1708 | } | 1496 | } |
1709 | 1497 | ||
1710 | static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, | ||
1711 | struct ppc440spe_adma_desc_slot *desc) | ||
1712 | { | ||
1713 | u32 src_cnt, dst_cnt; | ||
1714 | dma_addr_t addr; | ||
1715 | |||
1716 | /* | ||
1717 | * get the number of sources & destination | ||
1718 | * included in this descriptor and unmap | ||
1719 | * them all | ||
1720 | */ | ||
1721 | src_cnt = ppc440spe_desc_get_src_num(desc, chan); | ||
1722 | dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); | ||
1723 | |||
1724 | /* unmap destinations */ | ||
1725 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
1726 | while (dst_cnt--) { | ||
1727 | addr = ppc440spe_desc_get_dest_addr( | ||
1728 | desc, chan, dst_cnt); | ||
1729 | dma_unmap_page(chan->device->dev, | ||
1730 | addr, desc->unmap_len, | ||
1731 | DMA_FROM_DEVICE); | ||
1732 | } | ||
1733 | } | ||
1734 | |||
1735 | /* unmap sources */ | ||
1736 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
1737 | while (src_cnt--) { | ||
1738 | addr = ppc440spe_desc_get_src_addr( | ||
1739 | desc, chan, src_cnt); | ||
1740 | dma_unmap_page(chan->device->dev, | ||
1741 | addr, desc->unmap_len, | ||
1742 | DMA_TO_DEVICE); | ||
1743 | } | ||
1744 | } | ||
1745 | } | ||
1746 | |||
1747 | /** | 1498 | /** |
1748 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called | 1499 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called |
1749 | * upon completion | 1500 | * upon completion |
@@ -1767,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | |||
1767 | desc->async_tx.callback( | 1518 | desc->async_tx.callback( |
1768 | desc->async_tx.callback_param); | 1519 | desc->async_tx.callback_param); |
1769 | 1520 | ||
1770 | /* unmap dma addresses | 1521 | dma_descriptor_unmap(&desc->async_tx); |
1771 | * (unmap_single vs unmap_page?) | ||
1772 | * | ||
1773 | * actually, ppc's dma_unmap_page() functions are empty, so | ||
1774 | * the following code is just for the sake of completeness | ||
1775 | */ | ||
1776 | if (chan && chan->needs_unmap && desc->group_head && | ||
1777 | desc->unmap_len) { | ||
1778 | struct ppc440spe_adma_desc_slot *unmap = | ||
1779 | desc->group_head; | ||
1780 | /* assume 1 slot per op always */ | ||
1781 | u32 slot_count = unmap->slot_cnt; | ||
1782 | |||
1783 | /* Run through the group list and unmap addresses */ | ||
1784 | for (i = 0; i < slot_count; i++) { | ||
1785 | BUG_ON(!unmap); | ||
1786 | ppc440spe_adma_unmap(chan, unmap); | ||
1787 | unmap = unmap->hw_next; | ||
1788 | } | ||
1789 | } | ||
1790 | } | 1522 | } |
1791 | 1523 | ||
1792 | /* run dependent operations */ | 1524 | /* run dependent operations */ |
@@ -3893,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, | |||
3893 | 3625 | ||
3894 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | 3626 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); |
3895 | ret = dma_cookie_status(chan, cookie, txstate); | 3627 | ret = dma_cookie_status(chan, cookie, txstate); |
3896 | if (ret == DMA_SUCCESS) | 3628 | if (ret == DMA_COMPLETE) |
3897 | return ret; | 3629 | return ret; |
3898 | 3630 | ||
3899 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); | 3631 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 461a91ab70bb..ab26d46bbe15 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
@@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, | |||
436 | enum dma_status ret; | 436 | enum dma_status ret; |
437 | 437 | ||
438 | ret = dma_cookie_status(&c->vc.chan, cookie, state); | 438 | ret = dma_cookie_status(&c->vc.chan, cookie, state); |
439 | if (ret == DMA_SUCCESS) | 439 | if (ret == DMA_COMPLETE) |
440 | return ret; | 440 | return ret; |
441 | 441 | ||
442 | if (!state) | 442 | if (!state) |
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index d94ab592cc1b..2e7b394def80 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan, | |||
724 | * If we don't find cookie on the queue, it has been aborted and we have | 724 | * If we don't find cookie on the queue, it has been aborted and we have |
725 | * to report error | 725 | * to report error |
726 | */ | 726 | */ |
727 | if (status != DMA_SUCCESS) { | 727 | if (status != DMA_COMPLETE) { |
728 | struct shdma_desc *sdesc; | 728 | struct shdma_desc *sdesc; |
729 | status = DMA_ERROR; | 729 | status = DMA_ERROR; |
730 | list_for_each_entry(sdesc, &schan->ld_queue, node) | 730 | list_for_each_entry(sdesc, &schan->ld_queue, node) |
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 1069e8869f20..0d765c0e21ec 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
@@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | |||
685 | static int sh_dmae_probe(struct platform_device *pdev) | 685 | static int sh_dmae_probe(struct platform_device *pdev) |
686 | { | 686 | { |
687 | const struct sh_dmae_pdata *pdata; | 687 | const struct sh_dmae_pdata *pdata; |
688 | unsigned long irqflags = IRQF_DISABLED, | 688 | unsigned long irqflags = 0, |
689 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; | 689 | chan_flag[SH_DMAE_MAX_CHANNELS] = {}; |
690 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; | 690 | int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; |
691 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; | 691 | int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; |
@@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
838 | IORESOURCE_IRQ_SHAREABLE) | 838 | IORESOURCE_IRQ_SHAREABLE) |
839 | chan_flag[irq_cnt] = IRQF_SHARED; | 839 | chan_flag[irq_cnt] = IRQF_SHARED; |
840 | else | 840 | else |
841 | chan_flag[irq_cnt] = IRQF_DISABLED; | 841 | chan_flag[irq_cnt] = 0; |
842 | dev_dbg(&pdev->dev, | 842 | dev_dbg(&pdev->dev, |
843 | "Found IRQ %d for channel %d\n", | 843 | "Found IRQ %d for channel %d\n", |
844 | i, irq_cnt); | 844 | i, irq_cnt); |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 82d2b97ad942..b8c031b7de4e 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/log2.h> | ||
17 | #include <linux/pm.h> | 18 | #include <linux/pm.h> |
18 | #include <linux/pm_runtime.h> | 19 | #include <linux/pm_runtime.h> |
19 | #include <linux/err.h> | 20 | #include <linux/err.h> |
@@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, | |||
2626 | } | 2627 | } |
2627 | 2628 | ||
2628 | ret = dma_cookie_status(chan, cookie, txstate); | 2629 | ret = dma_cookie_status(chan, cookie, txstate); |
2629 | if (ret != DMA_SUCCESS) | 2630 | if (ret != DMA_COMPLETE) |
2630 | dma_set_residue(txstate, stedma40_residue(chan)); | 2631 | dma_set_residue(txstate, stedma40_residue(chan)); |
2631 | 2632 | ||
2632 | if (d40_is_paused(d40c)) | 2633 | if (d40_is_paused(d40c)) |
@@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2796 | src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || | 2797 | src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || |
2797 | dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || | 2798 | dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || |
2798 | dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || | 2799 | dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || |
2799 | ((src_addr_width > 1) && (src_addr_width & 1)) || | 2800 | !is_power_of_2(src_addr_width) || |
2800 | ((dst_addr_width > 1) && (dst_addr_width & 1))) | 2801 | !is_power_of_2(dst_addr_width)) |
2801 | return -EINVAL; | 2802 | return -EINVAL; |
2802 | 2803 | ||
2803 | cfg->src_info.data_width = src_addr_width; | 2804 | cfg->src_info.data_width = src_addr_width; |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5d4986e5f5fa..73654e33f13b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, | |||
570 | 570 | ||
571 | list_del(&sgreq->node); | 571 | list_del(&sgreq->node); |
572 | if (sgreq->last_sg) { | 572 | if (sgreq->last_sg) { |
573 | dma_desc->dma_status = DMA_SUCCESS; | 573 | dma_desc->dma_status = DMA_COMPLETE; |
574 | dma_cookie_complete(&dma_desc->txd); | 574 | dma_cookie_complete(&dma_desc->txd); |
575 | if (!dma_desc->cb_count) | 575 | if (!dma_desc->cb_count) |
576 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); | 576 | list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); |
@@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, | |||
768 | unsigned int residual; | 768 | unsigned int residual; |
769 | 769 | ||
770 | ret = dma_cookie_status(dc, cookie, txstate); | 770 | ret = dma_cookie_status(dc, cookie, txstate); |
771 | if (ret == DMA_SUCCESS) | 771 | if (ret == DMA_COMPLETE) |
772 | return ret; | 772 | return ret; |
773 | 773 | ||
774 | spin_lock_irqsave(&tdc->lock, flags); | 774 | spin_lock_irqsave(&tdc->lock, flags); |
@@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
1018 | return &dma_desc->txd; | 1018 | return &dma_desc->txd; |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | 1021 | static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( |
1022 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, | 1022 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, |
1023 | size_t period_len, enum dma_transfer_direction direction, | 1023 | size_t period_len, enum dma_transfer_direction direction, |
1024 | unsigned long flags, void *context) | 1024 | unsigned long flags, void *context) |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 28af214fce04..4506a7b4f972 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | |||
154 | return done; | 154 | return done; |
155 | } | 155 | } |
156 | 156 | ||
157 | static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | ||
158 | bool single) | ||
159 | { | ||
160 | dma_addr_t addr; | ||
161 | int len; | ||
162 | |||
163 | addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | | ||
164 | dma_desc[4]; | ||
165 | |||
166 | len = (dma_desc[3] << 8) | dma_desc[2]; | ||
167 | |||
168 | if (single) | ||
169 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | ||
170 | DMA_TO_DEVICE); | ||
171 | else | ||
172 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | ||
173 | DMA_TO_DEVICE); | ||
174 | } | ||
175 | |||
176 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | ||
177 | { | ||
178 | struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, | ||
179 | struct timb_dma_chan, chan); | ||
180 | u8 *descs; | ||
181 | |||
182 | for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { | ||
183 | __td_unmap_desc(td_chan, descs, single); | ||
184 | if (descs[0] & 0x02) | ||
185 | break; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, | 157 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, |
190 | struct scatterlist *sg, bool last) | 158 | struct scatterlist *sg, bool last) |
191 | { | 159 | { |
@@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) | |||
293 | 261 | ||
294 | list_move(&td_desc->desc_node, &td_chan->free_list); | 262 | list_move(&td_desc->desc_node, &td_chan->free_list); |
295 | 263 | ||
296 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 264 | dma_descriptor_unmap(txd); |
297 | __td_unmap_descs(td_desc, | ||
298 | txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); | ||
299 | |||
300 | /* | 265 | /* |
301 | * The API requires that no submissions are done from a | 266 | * The API requires that no submissions are done from a |
302 | * callback, so we don't need to drop the lock here | 267 | * callback, so we don't need to drop the lock here |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 71e8e775189e..bae6c29f5502 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -419,30 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |||
419 | list_splice_init(&desc->tx_list, &dc->free_list); | 419 | list_splice_init(&desc->tx_list, &dc->free_list); |
420 | list_move(&desc->desc_node, &dc->free_list); | 420 | list_move(&desc->desc_node, &dc->free_list); |
421 | 421 | ||
422 | if (!ds) { | 422 | dma_descriptor_unmap(txd); |
423 | dma_addr_t dmaaddr; | ||
424 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
425 | dmaaddr = is_dmac64(dc) ? | ||
426 | desc->hwdesc.DAR : desc->hwdesc32.DAR; | ||
427 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
428 | dma_unmap_single(chan2parent(&dc->chan), | ||
429 | dmaaddr, desc->len, DMA_FROM_DEVICE); | ||
430 | else | ||
431 | dma_unmap_page(chan2parent(&dc->chan), | ||
432 | dmaaddr, desc->len, DMA_FROM_DEVICE); | ||
433 | } | ||
434 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
435 | dmaaddr = is_dmac64(dc) ? | ||
436 | desc->hwdesc.SAR : desc->hwdesc32.SAR; | ||
437 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
438 | dma_unmap_single(chan2parent(&dc->chan), | ||
439 | dmaaddr, desc->len, DMA_TO_DEVICE); | ||
440 | else | ||
441 | dma_unmap_page(chan2parent(&dc->chan), | ||
442 | dmaaddr, desc->len, DMA_TO_DEVICE); | ||
443 | } | ||
444 | } | ||
445 | |||
446 | /* | 423 | /* |
447 | * The API requires that no submissions are done from a | 424 | * The API requires that no submissions are done from a |
448 | * callback, so we don't need to drop the lock here | 425 | * callback, so we don't need to drop the lock here |
@@ -962,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
962 | enum dma_status ret; | 939 | enum dma_status ret; |
963 | 940 | ||
964 | ret = dma_cookie_status(chan, cookie, txstate); | 941 | ret = dma_cookie_status(chan, cookie, txstate); |
965 | if (ret == DMA_SUCCESS) | 942 | if (ret == DMA_COMPLETE) |
966 | return DMA_SUCCESS; | 943 | return DMA_COMPLETE; |
967 | 944 | ||
968 | spin_lock_bh(&dc->lock); | 945 | spin_lock_bh(&dc->lock); |
969 | txx9dmac_scan_descriptors(dc); | 946 | txx9dmac_scan_descriptors(dc); |
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c index 36513e896413..65cab70fefcb 100644 --- a/drivers/media/platform/m2m-deinterlace.c +++ b/drivers/media/platform/m2m-deinterlace.c | |||
@@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op, | |||
341 | ctx->xt->dir = DMA_MEM_TO_MEM; | 341 | ctx->xt->dir = DMA_MEM_TO_MEM; |
342 | ctx->xt->src_sgl = false; | 342 | ctx->xt->src_sgl = false; |
343 | ctx->xt->dst_sgl = true; | 343 | ctx->xt->dst_sgl = true; |
344 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | | 344 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
345 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP; | ||
346 | 345 | ||
347 | tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); | 346 | tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); |
348 | if (tx == NULL) { | 347 | if (tx == NULL) { |
diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c index 6a74ce040d28..ccdadd623a3a 100644 --- a/drivers/media/platform/timblogiw.c +++ b/drivers/media/platform/timblogiw.c | |||
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) | |||
565 | 565 | ||
566 | desc = dmaengine_prep_slave_sg(fh->chan, | 566 | desc = dmaengine_prep_slave_sg(fh->chan, |
567 | buf->sg, sg_elems, DMA_DEV_TO_MEM, | 567 | buf->sg, sg_elems, DMA_DEV_TO_MEM, |
568 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 568 | DMA_PREP_INTERRUPT); |
569 | if (!desc) { | 569 | if (!desc) { |
570 | spin_lock_irq(&fh->queue_lock); | 570 | spin_lock_irq(&fh->queue_lock); |
571 | list_del_init(&vb->queue); | 571 | list_del_init(&vb->queue); |
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 08b18f3f5264..9e2b985293fc 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c | |||
@@ -633,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) | |||
633 | struct dma_async_tx_descriptor *tx; | 633 | struct dma_async_tx_descriptor *tx; |
634 | dma_cookie_t cookie; | 634 | dma_cookie_t cookie; |
635 | dma_addr_t dst, src; | 635 | dma_addr_t dst, src; |
636 | unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | | 636 | unsigned long dma_flags = 0; |
637 | DMA_COMPL_SKIP_SRC_UNMAP; | ||
638 | 637 | ||
639 | dst_sg = buf->vb.sglist; | 638 | dst_sg = buf->vb.sglist; |
640 | dst_nents = buf->vb.sglen; | 639 | dst_nents = buf->vb.sglen; |
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index d78a97d4153a..59f08c44abdb 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
@@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, | |||
375 | 375 | ||
376 | dma_dev = host->dma_chan->device; | 376 | dma_dev = host->dma_chan->device; |
377 | 377 | ||
378 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | | 378 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
379 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
380 | 379 | ||
381 | phys_addr = dma_map_single(dma_dev->dev, p, len, dir); | 380 | phys_addr = dma_map_single(dma_dev->dev, p, len, dir); |
382 | if (dma_mapping_error(dma_dev->dev, phys_addr)) { | 381 | if (dma_mapping_error(dma_dev->dev, phys_addr)) { |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 3dc1a7564d87..8b2752263db9 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c | |||
@@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | |||
573 | dma_dev = chan->device; | 573 | dma_dev = chan->device; |
574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); | 574 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); |
575 | 575 | ||
576 | flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP; | ||
577 | |||
578 | if (direction == DMA_TO_DEVICE) { | 576 | if (direction == DMA_TO_DEVICE) { |
579 | dma_src = dma_addr; | 577 | dma_src = dma_addr; |
580 | dma_dst = host->data_pa; | 578 | dma_dst = host->data_pa; |
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 0951f7aca1ef..822616e3c375 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c | |||
@@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) | |||
459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; | 459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; |
460 | 460 | ||
461 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, | 461 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, |
462 | &ctl->sg, 1, DMA_MEM_TO_DEV, | 462 | &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); |
463 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | ||
464 | if (!ctl->adesc) | 463 | if (!ctl->adesc) |
465 | return NETDEV_TX_BUSY; | 464 | return NETDEV_TX_BUSY; |
466 | 465 | ||
@@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) | |||
571 | sg_dma_len(sg) = DMA_BUFFER_SIZE; | 570 | sg_dma_len(sg) = DMA_BUFFER_SIZE; |
572 | 571 | ||
573 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, | 572 | ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, |
574 | sg, 1, DMA_DEV_TO_MEM, | 573 | sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); |
575 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | ||
576 | 574 | ||
577 | if (!ctl->adesc) | 575 | if (!ctl->adesc) |
578 | goto out; | 576 | goto out; |
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 12a9e83c008b..d0222f13d154 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
@@ -1034,10 +1034,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, | |||
1034 | struct dma_chan *chan = qp->dma_chan; | 1034 | struct dma_chan *chan = qp->dma_chan; |
1035 | struct dma_device *device; | 1035 | struct dma_device *device; |
1036 | size_t pay_off, buff_off; | 1036 | size_t pay_off, buff_off; |
1037 | dma_addr_t src, dest; | 1037 | struct dmaengine_unmap_data *unmap; |
1038 | dma_cookie_t cookie; | 1038 | dma_cookie_t cookie; |
1039 | void *buf = entry->buf; | 1039 | void *buf = entry->buf; |
1040 | unsigned long flags; | ||
1041 | 1040 | ||
1042 | entry->len = len; | 1041 | entry->len = len; |
1043 | 1042 | ||
@@ -1045,35 +1044,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, | |||
1045 | goto err; | 1044 | goto err; |
1046 | 1045 | ||
1047 | if (len < copy_bytes) | 1046 | if (len < copy_bytes) |
1048 | goto err1; | 1047 | goto err_wait; |
1049 | 1048 | ||
1050 | device = chan->device; | 1049 | device = chan->device; |
1051 | pay_off = (size_t) offset & ~PAGE_MASK; | 1050 | pay_off = (size_t) offset & ~PAGE_MASK; |
1052 | buff_off = (size_t) buf & ~PAGE_MASK; | 1051 | buff_off = (size_t) buf & ~PAGE_MASK; |
1053 | 1052 | ||
1054 | if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) | 1053 | if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) |
1055 | goto err1; | 1054 | goto err_wait; |
1056 | 1055 | ||
1057 | dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); | 1056 | unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); |
1058 | if (dma_mapping_error(device->dev, dest)) | 1057 | if (!unmap) |
1059 | goto err1; | 1058 | goto err_wait; |
1060 | 1059 | ||
1061 | src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); | 1060 | unmap->len = len; |
1062 | if (dma_mapping_error(device->dev, src)) | 1061 | unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), |
1063 | goto err2; | 1062 | pay_off, len, DMA_TO_DEVICE); |
1063 | if (dma_mapping_error(device->dev, unmap->addr[0])) | ||
1064 | goto err_get_unmap; | ||
1065 | |||
1066 | unmap->to_cnt = 1; | ||
1064 | 1067 | ||
1065 | flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | | 1068 | unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), |
1066 | DMA_PREP_INTERRUPT; | 1069 | buff_off, len, DMA_FROM_DEVICE); |
1067 | txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); | 1070 | if (dma_mapping_error(device->dev, unmap->addr[1])) |
1071 | goto err_get_unmap; | ||
1072 | |||
1073 | unmap->from_cnt = 1; | ||
1074 | |||
1075 | txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], | ||
1076 | unmap->addr[0], len, | ||
1077 | DMA_PREP_INTERRUPT); | ||
1068 | if (!txd) | 1078 | if (!txd) |
1069 | goto err3; | 1079 | goto err_get_unmap; |
1070 | 1080 | ||
1071 | txd->callback = ntb_rx_copy_callback; | 1081 | txd->callback = ntb_rx_copy_callback; |
1072 | txd->callback_param = entry; | 1082 | txd->callback_param = entry; |
1083 | dma_set_unmap(txd, unmap); | ||
1073 | 1084 | ||
1074 | cookie = dmaengine_submit(txd); | 1085 | cookie = dmaengine_submit(txd); |
1075 | if (dma_submit_error(cookie)) | 1086 | if (dma_submit_error(cookie)) |
1076 | goto err3; | 1087 | goto err_set_unmap; |
1088 | |||
1089 | dmaengine_unmap_put(unmap); | ||
1077 | 1090 | ||
1078 | qp->last_cookie = cookie; | 1091 | qp->last_cookie = cookie; |
1079 | 1092 | ||
@@ -1081,11 +1094,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, | |||
1081 | 1094 | ||
1082 | return; | 1095 | return; |
1083 | 1096 | ||
1084 | err3: | 1097 | err_set_unmap: |
1085 | dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); | 1098 | dmaengine_unmap_put(unmap); |
1086 | err2: | 1099 | err_get_unmap: |
1087 | dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); | 1100 | dmaengine_unmap_put(unmap); |
1088 | err1: | 1101 | err_wait: |
1089 | /* If the callbacks come out of order, the writing of the index to the | 1102 | /* If the callbacks come out of order, the writing of the index to the |
1090 | * last completed will be out of order. This may result in the | 1103 | * last completed will be out of order. This may result in the |
1091 | * receive stalling forever. | 1104 | * receive stalling forever. |
@@ -1245,12 +1258,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, | |||
1245 | struct dma_chan *chan = qp->dma_chan; | 1258 | struct dma_chan *chan = qp->dma_chan; |
1246 | struct dma_device *device; | 1259 | struct dma_device *device; |
1247 | size_t dest_off, buff_off; | 1260 | size_t dest_off, buff_off; |
1248 | dma_addr_t src, dest; | 1261 | struct dmaengine_unmap_data *unmap; |
1262 | dma_addr_t dest; | ||
1249 | dma_cookie_t cookie; | 1263 | dma_cookie_t cookie; |
1250 | void __iomem *offset; | 1264 | void __iomem *offset; |
1251 | size_t len = entry->len; | 1265 | size_t len = entry->len; |
1252 | void *buf = entry->buf; | 1266 | void *buf = entry->buf; |
1253 | unsigned long flags; | ||
1254 | 1267 | ||
1255 | offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; | 1268 | offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; |
1256 | hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); | 1269 | hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); |
@@ -1273,28 +1286,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, | |||
1273 | if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) | 1286 | if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) |
1274 | goto err; | 1287 | goto err; |
1275 | 1288 | ||
1276 | src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); | 1289 | unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); |
1277 | if (dma_mapping_error(device->dev, src)) | 1290 | if (!unmap) |
1278 | goto err; | 1291 | goto err; |
1279 | 1292 | ||
1280 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; | 1293 | unmap->len = len; |
1281 | txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); | 1294 | unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), |
1295 | buff_off, len, DMA_TO_DEVICE); | ||
1296 | if (dma_mapping_error(device->dev, unmap->addr[0])) | ||
1297 | goto err_get_unmap; | ||
1298 | |||
1299 | unmap->to_cnt = 1; | ||
1300 | |||
1301 | txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, | ||
1302 | DMA_PREP_INTERRUPT); | ||
1282 | if (!txd) | 1303 | if (!txd) |
1283 | goto err1; | 1304 | goto err_get_unmap; |
1284 | 1305 | ||
1285 | txd->callback = ntb_tx_copy_callback; | 1306 | txd->callback = ntb_tx_copy_callback; |
1286 | txd->callback_param = entry; | 1307 | txd->callback_param = entry; |
1308 | dma_set_unmap(txd, unmap); | ||
1287 | 1309 | ||
1288 | cookie = dmaengine_submit(txd); | 1310 | cookie = dmaengine_submit(txd); |
1289 | if (dma_submit_error(cookie)) | 1311 | if (dma_submit_error(cookie)) |
1290 | goto err1; | 1312 | goto err_set_unmap; |
1313 | |||
1314 | dmaengine_unmap_put(unmap); | ||
1291 | 1315 | ||
1292 | dma_async_issue_pending(chan); | 1316 | dma_async_issue_pending(chan); |
1293 | qp->tx_async++; | 1317 | qp->tx_async++; |
1294 | 1318 | ||
1295 | return; | 1319 | return; |
1296 | err1: | 1320 | err_set_unmap: |
1297 | dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); | 1321 | dmaengine_unmap_put(unmap); |
1322 | err_get_unmap: | ||
1323 | dmaengine_unmap_put(unmap); | ||
1298 | err: | 1324 | err: |
1299 | ntb_memcpy_tx(entry, offset); | 1325 | ntb_memcpy_tx(entry, offset); |
1300 | qp->tx_memcpy++; | 1326 | qp->tx_memcpy++; |
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index b9f0192758d6..6d207afec8cb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
150 | &dws->tx_sgl, | 150 | &dws->tx_sgl, |
151 | 1, | 151 | 1, |
152 | DMA_MEM_TO_DEV, | 152 | DMA_MEM_TO_DEV, |
153 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); | 153 | DMA_PREP_INTERRUPT); |
154 | txdesc->callback = dw_spi_dma_done; | 154 | txdesc->callback = dw_spi_dma_done; |
155 | txdesc->callback_param = dws; | 155 | txdesc->callback_param = dws; |
156 | 156 | ||
@@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
173 | &dws->rx_sgl, | 173 | &dws->rx_sgl, |
174 | 1, | 174 | 1, |
175 | DMA_DEV_TO_MEM, | 175 | DMA_DEV_TO_MEM, |
176 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); | 176 | DMA_PREP_INTERRUPT); |
177 | rxdesc->callback = dw_spi_dma_done; | 177 | rxdesc->callback = dw_spi_dma_done; |
178 | rxdesc->callback_param = dws; | 178 | rxdesc->callback_param = dws; |
179 | 179 | ||
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 537750261aaa..7d8103cd3e2e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work) | |||
1433 | desc = s->desc_rx[new]; | 1433 | desc = s->desc_rx[new]; |
1434 | 1434 | ||
1435 | if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != | 1435 | if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != |
1436 | DMA_SUCCESS) { | 1436 | DMA_COMPLETE) { |
1437 | /* Handle incomplete DMA receive */ | 1437 | /* Handle incomplete DMA receive */ |
1438 | struct dma_chan *chan = s->chan_rx; | 1438 | struct dma_chan *chan = s->chan_rx; |
1439 | struct shdma_desc *sh_desc = container_of(desc, | 1439 | struct shdma_desc *sh_desc = container_of(desc, |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0bc727534108..41cf0c399288 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie) | |||
45 | 45 | ||
46 | /** | 46 | /** |
47 | * enum dma_status - DMA transaction status | 47 | * enum dma_status - DMA transaction status |
48 | * @DMA_SUCCESS: transaction completed successfully | 48 | * @DMA_COMPLETE: transaction completed |
49 | * @DMA_IN_PROGRESS: transaction not yet processed | 49 | * @DMA_IN_PROGRESS: transaction not yet processed |
50 | * @DMA_PAUSED: transaction is paused | 50 | * @DMA_PAUSED: transaction is paused |
51 | * @DMA_ERROR: transaction failed | 51 | * @DMA_ERROR: transaction failed |
52 | */ | 52 | */ |
53 | enum dma_status { | 53 | enum dma_status { |
54 | DMA_SUCCESS, | 54 | DMA_COMPLETE, |
55 | DMA_IN_PROGRESS, | 55 | DMA_IN_PROGRESS, |
56 | DMA_PAUSED, | 56 | DMA_PAUSED, |
57 | DMA_ERROR, | 57 | DMA_ERROR, |
@@ -171,12 +171,6 @@ struct dma_interleaved_template { | |||
171 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client | 171 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client |
172 | * acknowledges receipt, i.e. has has a chance to establish any dependency | 172 | * acknowledges receipt, i.e. has has a chance to establish any dependency |
173 | * chains | 173 | * chains |
174 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | ||
175 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | ||
176 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single | ||
177 | * (if not set, do the source dma-unmapping as page) | ||
178 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single | ||
179 | * (if not set, do the destination dma-unmapping as page) | ||
180 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q | 174 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q |
181 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P | 175 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P |
182 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as | 176 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as |
@@ -188,14 +182,10 @@ struct dma_interleaved_template { | |||
188 | enum dma_ctrl_flags { | 182 | enum dma_ctrl_flags { |
189 | DMA_PREP_INTERRUPT = (1 << 0), | 183 | DMA_PREP_INTERRUPT = (1 << 0), |
190 | DMA_CTRL_ACK = (1 << 1), | 184 | DMA_CTRL_ACK = (1 << 1), |
191 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | 185 | DMA_PREP_PQ_DISABLE_P = (1 << 2), |
192 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 186 | DMA_PREP_PQ_DISABLE_Q = (1 << 3), |
193 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), | 187 | DMA_PREP_CONTINUE = (1 << 4), |
194 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), | 188 | DMA_PREP_FENCE = (1 << 5), |
195 | DMA_PREP_PQ_DISABLE_P = (1 << 6), | ||
196 | DMA_PREP_PQ_DISABLE_Q = (1 << 7), | ||
197 | DMA_PREP_CONTINUE = (1 << 8), | ||
198 | DMA_PREP_FENCE = (1 << 9), | ||
199 | }; | 189 | }; |
200 | 190 | ||
201 | /** | 191 | /** |
@@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref); | |||
413 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 403 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
414 | 404 | ||
415 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 405 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
406 | |||
407 | struct dmaengine_unmap_data { | ||
408 | u8 to_cnt; | ||
409 | u8 from_cnt; | ||
410 | u8 bidi_cnt; | ||
411 | struct device *dev; | ||
412 | struct kref kref; | ||
413 | size_t len; | ||
414 | dma_addr_t addr[0]; | ||
415 | }; | ||
416 | |||
416 | /** | 417 | /** |
417 | * struct dma_async_tx_descriptor - async transaction descriptor | 418 | * struct dma_async_tx_descriptor - async transaction descriptor |
418 | * ---dma generic offload fields--- | 419 | * ---dma generic offload fields--- |
@@ -438,6 +439,7 @@ struct dma_async_tx_descriptor { | |||
438 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 439 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
439 | dma_async_tx_callback callback; | 440 | dma_async_tx_callback callback; |
440 | void *callback_param; | 441 | void *callback_param; |
442 | struct dmaengine_unmap_data *unmap; | ||
441 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | 443 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
442 | struct dma_async_tx_descriptor *next; | 444 | struct dma_async_tx_descriptor *next; |
443 | struct dma_async_tx_descriptor *parent; | 445 | struct dma_async_tx_descriptor *parent; |
@@ -445,6 +447,40 @@ struct dma_async_tx_descriptor { | |||
445 | #endif | 447 | #endif |
446 | }; | 448 | }; |
447 | 449 | ||
450 | #ifdef CONFIG_DMA_ENGINE | ||
451 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
452 | struct dmaengine_unmap_data *unmap) | ||
453 | { | ||
454 | kref_get(&unmap->kref); | ||
455 | tx->unmap = unmap; | ||
456 | } | ||
457 | |||
458 | struct dmaengine_unmap_data * | ||
459 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); | ||
460 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); | ||
461 | #else | ||
462 | static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, | ||
463 | struct dmaengine_unmap_data *unmap) | ||
464 | { | ||
465 | } | ||
466 | static inline struct dmaengine_unmap_data * | ||
467 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | ||
468 | { | ||
469 | return NULL; | ||
470 | } | ||
471 | static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) | ||
472 | { | ||
473 | } | ||
474 | #endif | ||
475 | |||
476 | static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) | ||
477 | { | ||
478 | if (tx->unmap) { | ||
479 | dmaengine_unmap_put(tx->unmap); | ||
480 | tx->unmap = NULL; | ||
481 | } | ||
482 | } | ||
483 | |||
448 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | 484 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
449 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | 485 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) |
450 | { | 486 | { |
@@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | |||
979 | { | 1015 | { |
980 | if (last_complete <= last_used) { | 1016 | if (last_complete <= last_used) { |
981 | if ((cookie <= last_complete) || (cookie > last_used)) | 1017 | if ((cookie <= last_complete) || (cookie > last_used)) |
982 | return DMA_SUCCESS; | 1018 | return DMA_COMPLETE; |
983 | } else { | 1019 | } else { |
984 | if ((cookie <= last_complete) && (cookie > last_used)) | 1020 | if ((cookie <= last_complete) && (cookie > last_used)) |
985 | return DMA_SUCCESS; | 1021 | return DMA_COMPLETE; |
986 | } | 1022 | } |
987 | return DMA_IN_PROGRESS; | 1023 | return DMA_IN_PROGRESS; |
988 | } | 1024 | } |
@@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ | |||
1013 | } | 1049 | } |
1014 | static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | 1050 | static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
1015 | { | 1051 | { |
1016 | return DMA_SUCCESS; | 1052 | return DMA_COMPLETE; |
1017 | } | 1053 | } |
1018 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 1054 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
1019 | { | 1055 | { |
1020 | return DMA_SUCCESS; | 1056 | return DMA_COMPLETE; |
1021 | } | 1057 | } |
1022 | static inline void dma_issue_pending_all(void) | 1058 | static inline void dma_issue_pending_all(void) |
1023 | { | 1059 | { |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 179fb91bb5f2..f50821cb64be 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
@@ -67,10 +67,10 @@ struct edmacc_param { | |||
67 | #define ITCCHEN BIT(23) | 67 | #define ITCCHEN BIT(23) |
68 | 68 | ||
69 | /*ch_status paramater of callback function possible values*/ | 69 | /*ch_status paramater of callback function possible values*/ |
70 | #define DMA_COMPLETE 1 | 70 | #define EDMA_DMA_COMPLETE 1 |
71 | #define DMA_CC_ERROR 2 | 71 | #define EDMA_DMA_CC_ERROR 2 |
72 | #define DMA_TC1_ERROR 3 | 72 | #define EDMA_DMA_TC1_ERROR 3 |
73 | #define DMA_TC2_ERROR 4 | 73 | #define EDMA_DMA_TC2_ERROR 4 |
74 | 74 | ||
75 | enum address_mode { | 75 | enum address_mode { |
76 | INCR = 0, | 76 | INCR = 0, |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3dc0c6cf02a8..c4638e6f0238 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1425,7 +1425,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait) | |||
1425 | do { | 1425 | do { |
1426 | if (dma_async_is_tx_complete(tp->ucopy.dma_chan, | 1426 | if (dma_async_is_tx_complete(tp->ucopy.dma_chan, |
1427 | last_issued, &done, | 1427 | last_issued, &done, |
1428 | &used) == DMA_SUCCESS) { | 1428 | &used) == DMA_COMPLETE) { |
1429 | /* Safe to free early-copied skbs now */ | 1429 | /* Safe to free early-copied skbs now */ |
1430 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1430 | __skb_queue_purge(&sk->sk_async_wait_queue); |
1431 | break; | 1431 | break; |
@@ -1433,7 +1433,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait) | |||
1433 | struct sk_buff *skb; | 1433 | struct sk_buff *skb; |
1434 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | 1434 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && |
1435 | (dma_async_is_complete(skb->dma_cookie, done, | 1435 | (dma_async_is_complete(skb->dma_cookie, done, |
1436 | used) == DMA_SUCCESS)) { | 1436 | used) == DMA_COMPLETE)) { |
1437 | __skb_dequeue(&sk->sk_async_wait_queue); | 1437 | __skb_dequeue(&sk->sk_async_wait_queue); |
1438 | kfree_skb(skb); | 1438 | kfree_skb(skb); |
1439 | } | 1439 | } |
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c index fa64cd85204f..fb5d107f5603 100644 --- a/sound/soc/davinci/davinci-pcm.c +++ b/sound/soc/davinci/davinci-pcm.c | |||
@@ -238,7 +238,7 @@ static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data) | |||
238 | print_buf_info(prtd->ram_channel, "i ram_channel"); | 238 | print_buf_info(prtd->ram_channel, "i ram_channel"); |
239 | pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status); | 239 | pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status); |
240 | 240 | ||
241 | if (unlikely(ch_status != DMA_COMPLETE)) | 241 | if (unlikely(ch_status != EDMA_DMA_COMPLETE)) |
242 | return; | 242 | return; |
243 | 243 | ||
244 | if (snd_pcm_running(substream)) { | 244 | if (snd_pcm_running(substream)) { |