diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/Kconfig | 8 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 1364 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 183 |
4 files changed, 1556 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e4dd0065da33..5af8b1cfc1e9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -47,6 +47,14 @@ config FSL_DMA | |||
47 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. | 47 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. |
48 | The MPC8349, MPC8360 is also supported. | 48 | The MPC8349, MPC8360 is also supported. |
49 | 49 | ||
50 | config MV_XOR | ||
51 | bool "Marvell XOR engine support" | ||
52 | depends on PLAT_ORION | ||
53 | select ASYNC_CORE | ||
54 | select DMA_ENGINE | ||
55 | ---help--- | ||
56 | Enable support for the Marvell XOR engine. | ||
57 | |||
50 | config DMA_ENGINE | 58 | config DMA_ENGINE |
51 | bool | 59 | bool |
52 | 60 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index c8036d945902..ee272fd329c9 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -4,3 +4,4 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | |||
4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | 4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o |
5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
6 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 6 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
7 | obj-$(CONFIG_MV_XOR) += mv_xor.o | ||
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c new file mode 100644 index 000000000000..f0c123ce8ae0 --- /dev/null +++ b/drivers/dma/mv_xor.c | |||
@@ -0,0 +1,1364 @@ | |||
1 | /* | ||
2 | * offload engine driver for the Marvell XOR engine | ||
3 | * Copyright (C) 2007, 2008, Marvell International Ltd. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/async_tx.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/memory.h> | ||
28 | #include <asm/plat-orion/mv_xor.h> | ||
29 | #include "mv_xor.h" | ||
30 | |||
31 | static void mv_xor_issue_pending(struct dma_chan *chan); | ||
32 | |||
33 | #define to_mv_xor_chan(chan) \ | ||
34 | container_of(chan, struct mv_xor_chan, common) | ||
35 | |||
36 | #define to_mv_xor_device(dev) \ | ||
37 | container_of(dev, struct mv_xor_device, common) | ||
38 | |||
39 | #define to_mv_xor_slot(tx) \ | ||
40 | container_of(tx, struct mv_xor_desc_slot, async_tx) | ||
41 | |||
42 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) | ||
43 | { | ||
44 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
45 | |||
46 | hw_desc->status = (1 << 31); | ||
47 | hw_desc->phy_next_desc = 0; | ||
48 | hw_desc->desc_command = (1 << 31); | ||
49 | } | ||
50 | |||
51 | static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | ||
52 | { | ||
53 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
54 | return hw_desc->phy_dest_addr; | ||
55 | } | ||
56 | |||
57 | static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, | ||
58 | int src_idx) | ||
59 | { | ||
60 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
61 | return hw_desc->phy_src_addr[src_idx]; | ||
62 | } | ||
63 | |||
64 | |||
65 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | ||
66 | u32 byte_count) | ||
67 | { | ||
68 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
69 | hw_desc->byte_count = byte_count; | ||
70 | } | ||
71 | |||
72 | static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, | ||
73 | u32 next_desc_addr) | ||
74 | { | ||
75 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
76 | BUG_ON(hw_desc->phy_next_desc); | ||
77 | hw_desc->phy_next_desc = next_desc_addr; | ||
78 | } | ||
79 | |||
80 | static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | ||
81 | { | ||
82 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
83 | hw_desc->phy_next_desc = 0; | ||
84 | } | ||
85 | |||
86 | static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) | ||
87 | { | ||
88 | desc->value = val; | ||
89 | } | ||
90 | |||
91 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, | ||
92 | dma_addr_t addr) | ||
93 | { | ||
94 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
95 | hw_desc->phy_dest_addr = addr; | ||
96 | } | ||
97 | |||
98 | static int mv_chan_memset_slot_count(size_t len) | ||
99 | { | ||
100 | return 1; | ||
101 | } | ||
102 | |||
103 | #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) | ||
104 | |||
105 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, | ||
106 | int index, dma_addr_t addr) | ||
107 | { | ||
108 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
109 | hw_desc->phy_src_addr[index] = addr; | ||
110 | if (desc->type == DMA_XOR) | ||
111 | hw_desc->desc_command |= (1 << index); | ||
112 | } | ||
113 | |||
114 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | ||
115 | { | ||
116 | return __raw_readl(XOR_CURR_DESC(chan)); | ||
117 | } | ||
118 | |||
119 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | ||
120 | u32 next_desc_addr) | ||
121 | { | ||
122 | __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); | ||
123 | } | ||
124 | |||
125 | static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) | ||
126 | { | ||
127 | __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); | ||
128 | } | ||
129 | |||
130 | static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) | ||
131 | { | ||
132 | __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); | ||
133 | } | ||
134 | |||
135 | static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) | ||
136 | { | ||
137 | __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); | ||
138 | __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); | ||
139 | } | ||
140 | |||
141 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) | ||
142 | { | ||
143 | u32 val = __raw_readl(XOR_INTR_MASK(chan)); | ||
144 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); | ||
145 | __raw_writel(val, XOR_INTR_MASK(chan)); | ||
146 | } | ||
147 | |||
148 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | ||
149 | { | ||
150 | u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); | ||
151 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; | ||
152 | return intr_cause; | ||
153 | } | ||
154 | |||
155 | static int mv_is_err_intr(u32 intr_cause) | ||
156 | { | ||
157 | if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) | ||
158 | return 1; | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | ||
164 | { | ||
165 | u32 val = (1 << (1 + (chan->idx * 16))); | ||
166 | dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); | ||
167 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | ||
168 | } | ||
169 | |||
170 | static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | ||
171 | { | ||
172 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | ||
173 | __raw_writel(val, XOR_INTR_CAUSE(chan)); | ||
174 | } | ||
175 | |||
176 | static int mv_can_chain(struct mv_xor_desc_slot *desc) | ||
177 | { | ||
178 | struct mv_xor_desc_slot *chain_old_tail = list_entry( | ||
179 | desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); | ||
180 | |||
181 | if (chain_old_tail->type != desc->type) | ||
182 | return 0; | ||
183 | if (desc->type == DMA_MEMSET) | ||
184 | return 0; | ||
185 | |||
186 | return 1; | ||
187 | } | ||
188 | |||
189 | static void mv_set_mode(struct mv_xor_chan *chan, | ||
190 | enum dma_transaction_type type) | ||
191 | { | ||
192 | u32 op_mode; | ||
193 | u32 config = __raw_readl(XOR_CONFIG(chan)); | ||
194 | |||
195 | switch (type) { | ||
196 | case DMA_XOR: | ||
197 | op_mode = XOR_OPERATION_MODE_XOR; | ||
198 | break; | ||
199 | case DMA_MEMCPY: | ||
200 | op_mode = XOR_OPERATION_MODE_MEMCPY; | ||
201 | break; | ||
202 | case DMA_MEMSET: | ||
203 | op_mode = XOR_OPERATION_MODE_MEMSET; | ||
204 | break; | ||
205 | default: | ||
206 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
207 | "error: unsupported operation %d.\n", | ||
208 | type); | ||
209 | BUG(); | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | config &= ~0x7; | ||
214 | config |= op_mode; | ||
215 | __raw_writel(config, XOR_CONFIG(chan)); | ||
216 | chan->current_type = type; | ||
217 | } | ||
218 | |||
219 | static void mv_chan_activate(struct mv_xor_chan *chan) | ||
220 | { | ||
221 | u32 activation; | ||
222 | |||
223 | dev_dbg(chan->device->common.dev, " activate chan.\n"); | ||
224 | activation = __raw_readl(XOR_ACTIVATION(chan)); | ||
225 | activation |= 0x1; | ||
226 | __raw_writel(activation, XOR_ACTIVATION(chan)); | ||
227 | } | ||
228 | |||
229 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | ||
230 | { | ||
231 | u32 state = __raw_readl(XOR_ACTIVATION(chan)); | ||
232 | |||
233 | state = (state >> 4) & 0x3; | ||
234 | |||
235 | return (state == 1) ? 1 : 0; | ||
236 | } | ||
237 | |||
238 | static int mv_chan_xor_slot_count(size_t len, int src_cnt) | ||
239 | { | ||
240 | return 1; | ||
241 | } | ||
242 | |||
243 | /** | ||
244 | * mv_xor_free_slots - flags descriptor slots for reuse | ||
245 | * @slot: Slot to free | ||
246 | * Caller must hold &mv_chan->lock while calling this function | ||
247 | */ | ||
248 | static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | ||
249 | struct mv_xor_desc_slot *slot) | ||
250 | { | ||
251 | dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", | ||
252 | __func__, __LINE__, slot); | ||
253 | |||
254 | slot->slots_per_op = 0; | ||
255 | |||
256 | } | ||
257 | |||
258 | /* | ||
259 | * mv_xor_start_new_chain - program the engine to operate on new chain headed by | ||
260 | * sw_desc | ||
261 | * Caller must hold &mv_chan->lock while calling this function | ||
262 | */ | ||
263 | static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | ||
264 | struct mv_xor_desc_slot *sw_desc) | ||
265 | { | ||
266 | dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", | ||
267 | __func__, __LINE__, sw_desc); | ||
268 | if (sw_desc->type != mv_chan->current_type) | ||
269 | mv_set_mode(mv_chan, sw_desc->type); | ||
270 | |||
271 | if (sw_desc->type == DMA_MEMSET) { | ||
272 | /* for memset requests we need to program the engine, no | ||
273 | * descriptors used. | ||
274 | */ | ||
275 | struct mv_xor_desc *hw_desc = sw_desc->hw_desc; | ||
276 | mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); | ||
277 | mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); | ||
278 | mv_chan_set_value(mv_chan, sw_desc->value); | ||
279 | } else { | ||
280 | /* set the hardware chain */ | ||
281 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | ||
282 | } | ||
283 | mv_chan->pending += sw_desc->slot_cnt; | ||
284 | mv_xor_issue_pending(&mv_chan->common); | ||
285 | } | ||
286 | |||
287 | static dma_cookie_t | ||
288 | mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | ||
289 | struct mv_xor_chan *mv_chan, dma_cookie_t cookie) | ||
290 | { | ||
291 | BUG_ON(desc->async_tx.cookie < 0); | ||
292 | |||
293 | if (desc->async_tx.cookie > 0) { | ||
294 | cookie = desc->async_tx.cookie; | ||
295 | |||
296 | /* call the callback (must not sleep or submit new | ||
297 | * operations to this channel) | ||
298 | */ | ||
299 | if (desc->async_tx.callback) | ||
300 | desc->async_tx.callback( | ||
301 | desc->async_tx.callback_param); | ||
302 | |||
303 | /* unmap dma addresses | ||
304 | * (unmap_single vs unmap_page?) | ||
305 | */ | ||
306 | if (desc->group_head && desc->unmap_len) { | ||
307 | struct mv_xor_desc_slot *unmap = desc->group_head; | ||
308 | struct device *dev = | ||
309 | &mv_chan->device->pdev->dev; | ||
310 | u32 len = unmap->unmap_len; | ||
311 | u32 src_cnt = unmap->unmap_src_cnt; | ||
312 | dma_addr_t addr = mv_desc_get_dest_addr(unmap); | ||
313 | |||
314 | dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); | ||
315 | while (src_cnt--) { | ||
316 | addr = mv_desc_get_src_addr(unmap, src_cnt); | ||
317 | dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); | ||
318 | } | ||
319 | desc->group_head = NULL; | ||
320 | } | ||
321 | } | ||
322 | |||
323 | /* run dependent operations */ | ||
324 | async_tx_run_dependencies(&desc->async_tx); | ||
325 | |||
326 | return cookie; | ||
327 | } | ||
328 | |||
329 | static int | ||
330 | mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) | ||
331 | { | ||
332 | struct mv_xor_desc_slot *iter, *_iter; | ||
333 | |||
334 | dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); | ||
335 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | ||
336 | completed_node) { | ||
337 | |||
338 | if (async_tx_test_ack(&iter->async_tx)) { | ||
339 | list_del(&iter->completed_node); | ||
340 | mv_xor_free_slots(mv_chan, iter); | ||
341 | } | ||
342 | } | ||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | static int | ||
347 | mv_xor_clean_slot(struct mv_xor_desc_slot *desc, | ||
348 | struct mv_xor_chan *mv_chan) | ||
349 | { | ||
350 | dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", | ||
351 | __func__, __LINE__, desc, desc->async_tx.flags); | ||
352 | list_del(&desc->chain_node); | ||
353 | /* the client is allowed to attach dependent operations | ||
354 | * until 'ack' is set | ||
355 | */ | ||
356 | if (!async_tx_test_ack(&desc->async_tx)) { | ||
357 | /* move this slot to the completed_slots */ | ||
358 | list_add_tail(&desc->completed_node, &mv_chan->completed_slots); | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | mv_xor_free_slots(mv_chan, desc); | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | ||
367 | { | ||
368 | struct mv_xor_desc_slot *iter, *_iter; | ||
369 | dma_cookie_t cookie = 0; | ||
370 | int busy = mv_chan_is_busy(mv_chan); | ||
371 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | ||
372 | int seen_current = 0; | ||
373 | |||
374 | dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); | ||
375 | dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); | ||
376 | mv_xor_clean_completed_slots(mv_chan); | ||
377 | |||
378 | /* free completed slots from the chain starting with | ||
379 | * the oldest descriptor | ||
380 | */ | ||
381 | |||
382 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | ||
383 | chain_node) { | ||
384 | prefetch(_iter); | ||
385 | prefetch(&_iter->async_tx); | ||
386 | |||
387 | /* do not advance past the current descriptor loaded into the | ||
388 | * hardware channel, subsequent descriptors are either in | ||
389 | * process or have not been submitted | ||
390 | */ | ||
391 | if (seen_current) | ||
392 | break; | ||
393 | |||
394 | /* stop the search if we reach the current descriptor and the | ||
395 | * channel is busy | ||
396 | */ | ||
397 | if (iter->async_tx.phys == current_desc) { | ||
398 | seen_current = 1; | ||
399 | if (busy) | ||
400 | break; | ||
401 | } | ||
402 | |||
403 | cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); | ||
404 | |||
405 | if (mv_xor_clean_slot(iter, mv_chan)) | ||
406 | break; | ||
407 | } | ||
408 | |||
409 | if ((busy == 0) && !list_empty(&mv_chan->chain)) { | ||
410 | struct mv_xor_desc_slot *chain_head; | ||
411 | chain_head = list_entry(mv_chan->chain.next, | ||
412 | struct mv_xor_desc_slot, | ||
413 | chain_node); | ||
414 | |||
415 | mv_xor_start_new_chain(mv_chan, chain_head); | ||
416 | } | ||
417 | |||
418 | if (cookie > 0) | ||
419 | mv_chan->completed_cookie = cookie; | ||
420 | } | ||
421 | |||
422 | static void | ||
423 | mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | ||
424 | { | ||
425 | spin_lock_bh(&mv_chan->lock); | ||
426 | __mv_xor_slot_cleanup(mv_chan); | ||
427 | spin_unlock_bh(&mv_chan->lock); | ||
428 | } | ||
429 | |||
430 | static void mv_xor_tasklet(unsigned long data) | ||
431 | { | ||
432 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | ||
433 | __mv_xor_slot_cleanup(chan); | ||
434 | } | ||
435 | |||
436 | static struct mv_xor_desc_slot * | ||
437 | mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, | ||
438 | int slots_per_op) | ||
439 | { | ||
440 | struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; | ||
441 | LIST_HEAD(chain); | ||
442 | int slots_found, retry = 0; | ||
443 | |||
444 | /* start search from the last allocated descrtiptor | ||
445 | * if a contiguous allocation can not be found start searching | ||
446 | * from the beginning of the list | ||
447 | */ | ||
448 | retry: | ||
449 | slots_found = 0; | ||
450 | if (retry == 0) | ||
451 | iter = mv_chan->last_used; | ||
452 | else | ||
453 | iter = list_entry(&mv_chan->all_slots, | ||
454 | struct mv_xor_desc_slot, | ||
455 | slot_node); | ||
456 | |||
457 | list_for_each_entry_safe_continue( | ||
458 | iter, _iter, &mv_chan->all_slots, slot_node) { | ||
459 | prefetch(_iter); | ||
460 | prefetch(&_iter->async_tx); | ||
461 | if (iter->slots_per_op) { | ||
462 | /* give up after finding the first busy slot | ||
463 | * on the second pass through the list | ||
464 | */ | ||
465 | if (retry) | ||
466 | break; | ||
467 | |||
468 | slots_found = 0; | ||
469 | continue; | ||
470 | } | ||
471 | |||
472 | /* start the allocation if the slot is correctly aligned */ | ||
473 | if (!slots_found++) | ||
474 | alloc_start = iter; | ||
475 | |||
476 | if (slots_found == num_slots) { | ||
477 | struct mv_xor_desc_slot *alloc_tail = NULL; | ||
478 | struct mv_xor_desc_slot *last_used = NULL; | ||
479 | iter = alloc_start; | ||
480 | while (num_slots) { | ||
481 | int i; | ||
482 | |||
483 | /* pre-ack all but the last descriptor */ | ||
484 | async_tx_ack(&iter->async_tx); | ||
485 | |||
486 | list_add_tail(&iter->chain_node, &chain); | ||
487 | alloc_tail = iter; | ||
488 | iter->async_tx.cookie = 0; | ||
489 | iter->slot_cnt = num_slots; | ||
490 | iter->xor_check_result = NULL; | ||
491 | for (i = 0; i < slots_per_op; i++) { | ||
492 | iter->slots_per_op = slots_per_op - i; | ||
493 | last_used = iter; | ||
494 | iter = list_entry(iter->slot_node.next, | ||
495 | struct mv_xor_desc_slot, | ||
496 | slot_node); | ||
497 | } | ||
498 | num_slots -= slots_per_op; | ||
499 | } | ||
500 | alloc_tail->group_head = alloc_start; | ||
501 | alloc_tail->async_tx.cookie = -EBUSY; | ||
502 | list_splice(&chain, &alloc_tail->async_tx.tx_list); | ||
503 | mv_chan->last_used = last_used; | ||
504 | mv_desc_clear_next_desc(alloc_start); | ||
505 | mv_desc_clear_next_desc(alloc_tail); | ||
506 | return alloc_tail; | ||
507 | } | ||
508 | } | ||
509 | if (!retry++) | ||
510 | goto retry; | ||
511 | |||
512 | /* try to free some slots if the allocation fails */ | ||
513 | tasklet_schedule(&mv_chan->irq_tasklet); | ||
514 | |||
515 | return NULL; | ||
516 | } | ||
517 | |||
518 | static dma_cookie_t | ||
519 | mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, | ||
520 | struct mv_xor_desc_slot *desc) | ||
521 | { | ||
522 | dma_cookie_t cookie = mv_chan->common.cookie; | ||
523 | |||
524 | if (++cookie < 0) | ||
525 | cookie = 1; | ||
526 | mv_chan->common.cookie = desc->async_tx.cookie = cookie; | ||
527 | return cookie; | ||
528 | } | ||
529 | |||
530 | /************************ DMA engine API functions ****************************/ | ||
531 | static dma_cookie_t | ||
532 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | ||
533 | { | ||
534 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | ||
535 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | ||
536 | struct mv_xor_desc_slot *grp_start, *old_chain_tail; | ||
537 | dma_cookie_t cookie; | ||
538 | int new_hw_chain = 1; | ||
539 | |||
540 | dev_dbg(mv_chan->device->common.dev, | ||
541 | "%s sw_desc %p: async_tx %p\n", | ||
542 | __func__, sw_desc, &sw_desc->async_tx); | ||
543 | |||
544 | grp_start = sw_desc->group_head; | ||
545 | |||
546 | spin_lock_bh(&mv_chan->lock); | ||
547 | cookie = mv_desc_assign_cookie(mv_chan, sw_desc); | ||
548 | |||
549 | if (list_empty(&mv_chan->chain)) | ||
550 | list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain); | ||
551 | else { | ||
552 | new_hw_chain = 0; | ||
553 | |||
554 | old_chain_tail = list_entry(mv_chan->chain.prev, | ||
555 | struct mv_xor_desc_slot, | ||
556 | chain_node); | ||
557 | list_splice_init(&grp_start->async_tx.tx_list, | ||
558 | &old_chain_tail->chain_node); | ||
559 | |||
560 | if (!mv_can_chain(grp_start)) | ||
561 | goto submit_done; | ||
562 | |||
563 | dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", | ||
564 | old_chain_tail->async_tx.phys); | ||
565 | |||
566 | /* fix up the hardware chain */ | ||
567 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | ||
568 | |||
569 | /* if the channel is not busy */ | ||
570 | if (!mv_chan_is_busy(mv_chan)) { | ||
571 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | ||
572 | /* | ||
573 | * and the curren desc is the end of the chain before | ||
574 | * the append, then we need to start the channel | ||
575 | */ | ||
576 | if (current_desc == old_chain_tail->async_tx.phys) | ||
577 | new_hw_chain = 1; | ||
578 | } | ||
579 | } | ||
580 | |||
581 | if (new_hw_chain) | ||
582 | mv_xor_start_new_chain(mv_chan, grp_start); | ||
583 | |||
584 | submit_done: | ||
585 | spin_unlock_bh(&mv_chan->lock); | ||
586 | |||
587 | return cookie; | ||
588 | } | ||
589 | |||
590 | /* returns the number of allocated descriptors */ | ||
591 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | ||
592 | { | ||
593 | char *hw_desc; | ||
594 | int idx; | ||
595 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
596 | struct mv_xor_desc_slot *slot = NULL; | ||
597 | struct mv_xor_platform_data *plat_data = | ||
598 | mv_chan->device->pdev->dev.platform_data; | ||
599 | int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; | ||
600 | |||
601 | /* Allocate descriptor slots */ | ||
602 | idx = mv_chan->slots_allocated; | ||
603 | while (idx < num_descs_in_pool) { | ||
604 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | ||
605 | if (!slot) { | ||
606 | printk(KERN_INFO "MV XOR Channel only initialized" | ||
607 | " %d descriptor slots", idx); | ||
608 | break; | ||
609 | } | ||
610 | hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; | ||
611 | slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | ||
612 | |||
613 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | ||
614 | slot->async_tx.tx_submit = mv_xor_tx_submit; | ||
615 | INIT_LIST_HEAD(&slot->chain_node); | ||
616 | INIT_LIST_HEAD(&slot->slot_node); | ||
617 | INIT_LIST_HEAD(&slot->async_tx.tx_list); | ||
618 | hw_desc = (char *) mv_chan->device->dma_desc_pool; | ||
619 | slot->async_tx.phys = | ||
620 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | ||
621 | slot->idx = idx++; | ||
622 | |||
623 | spin_lock_bh(&mv_chan->lock); | ||
624 | mv_chan->slots_allocated = idx; | ||
625 | list_add_tail(&slot->slot_node, &mv_chan->all_slots); | ||
626 | spin_unlock_bh(&mv_chan->lock); | ||
627 | } | ||
628 | |||
629 | if (mv_chan->slots_allocated && !mv_chan->last_used) | ||
630 | mv_chan->last_used = list_entry(mv_chan->all_slots.next, | ||
631 | struct mv_xor_desc_slot, | ||
632 | slot_node); | ||
633 | |||
634 | dev_dbg(mv_chan->device->common.dev, | ||
635 | "allocated %d descriptor slots last_used: %p\n", | ||
636 | mv_chan->slots_allocated, mv_chan->last_used); | ||
637 | |||
638 | return mv_chan->slots_allocated ? : -ENOMEM; | ||
639 | } | ||
640 | |||
641 | static struct dma_async_tx_descriptor * | ||
642 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
643 | size_t len, unsigned long flags) | ||
644 | { | ||
645 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
646 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
647 | int slot_cnt; | ||
648 | |||
649 | dev_dbg(mv_chan->device->common.dev, | ||
650 | "%s dest: %x src %x len: %u flags: %ld\n", | ||
651 | __func__, dest, src, len, flags); | ||
652 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
653 | return NULL; | ||
654 | |||
655 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | ||
656 | |||
657 | spin_lock_bh(&mv_chan->lock); | ||
658 | slot_cnt = mv_chan_memcpy_slot_count(len); | ||
659 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
660 | if (sw_desc) { | ||
661 | sw_desc->type = DMA_MEMCPY; | ||
662 | sw_desc->async_tx.flags = flags; | ||
663 | grp_start = sw_desc->group_head; | ||
664 | mv_desc_init(grp_start, flags); | ||
665 | mv_desc_set_byte_count(grp_start, len); | ||
666 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
667 | mv_desc_set_src_addr(grp_start, 0, src); | ||
668 | sw_desc->unmap_src_cnt = 1; | ||
669 | sw_desc->unmap_len = len; | ||
670 | } | ||
671 | spin_unlock_bh(&mv_chan->lock); | ||
672 | |||
673 | dev_dbg(mv_chan->device->common.dev, | ||
674 | "%s sw_desc %p async_tx %p\n", | ||
675 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); | ||
676 | |||
677 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
678 | } | ||
679 | |||
680 | static struct dma_async_tx_descriptor * | ||
681 | mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | ||
682 | size_t len, unsigned long flags) | ||
683 | { | ||
684 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
685 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
686 | int slot_cnt; | ||
687 | |||
688 | dev_dbg(mv_chan->device->common.dev, | ||
689 | "%s dest: %x len: %u flags: %ld\n", | ||
690 | __func__, dest, len, flags); | ||
691 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
692 | return NULL; | ||
693 | |||
694 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | ||
695 | |||
696 | spin_lock_bh(&mv_chan->lock); | ||
697 | slot_cnt = mv_chan_memset_slot_count(len); | ||
698 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
699 | if (sw_desc) { | ||
700 | sw_desc->type = DMA_MEMSET; | ||
701 | sw_desc->async_tx.flags = flags; | ||
702 | grp_start = sw_desc->group_head; | ||
703 | mv_desc_init(grp_start, flags); | ||
704 | mv_desc_set_byte_count(grp_start, len); | ||
705 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
706 | mv_desc_set_block_fill_val(grp_start, value); | ||
707 | sw_desc->unmap_src_cnt = 1; | ||
708 | sw_desc->unmap_len = len; | ||
709 | } | ||
710 | spin_unlock_bh(&mv_chan->lock); | ||
711 | dev_dbg(mv_chan->device->common.dev, | ||
712 | "%s sw_desc %p async_tx %p \n", | ||
713 | __func__, sw_desc, &sw_desc->async_tx); | ||
714 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
715 | } | ||
716 | |||
717 | static struct dma_async_tx_descriptor * | ||
718 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | ||
719 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
720 | { | ||
721 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
722 | struct mv_xor_desc_slot *sw_desc, *grp_start; | ||
723 | int slot_cnt; | ||
724 | |||
725 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | ||
726 | return NULL; | ||
727 | |||
728 | BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); | ||
729 | |||
730 | dev_dbg(mv_chan->device->common.dev, | ||
731 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", | ||
732 | __func__, src_cnt, len, dest, flags); | ||
733 | |||
734 | spin_lock_bh(&mv_chan->lock); | ||
735 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | ||
736 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | ||
737 | if (sw_desc) { | ||
738 | sw_desc->type = DMA_XOR; | ||
739 | sw_desc->async_tx.flags = flags; | ||
740 | grp_start = sw_desc->group_head; | ||
741 | mv_desc_init(grp_start, flags); | ||
742 | /* the byte count field is the same as in memcpy desc*/ | ||
743 | mv_desc_set_byte_count(grp_start, len); | ||
744 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | ||
745 | sw_desc->unmap_src_cnt = src_cnt; | ||
746 | sw_desc->unmap_len = len; | ||
747 | while (src_cnt--) | ||
748 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); | ||
749 | } | ||
750 | spin_unlock_bh(&mv_chan->lock); | ||
751 | dev_dbg(mv_chan->device->common.dev, | ||
752 | "%s sw_desc %p async_tx %p \n", | ||
753 | __func__, sw_desc, &sw_desc->async_tx); | ||
754 | return sw_desc ? &sw_desc->async_tx : NULL; | ||
755 | } | ||
756 | |||
757 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | ||
758 | { | ||
759 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
760 | struct mv_xor_desc_slot *iter, *_iter; | ||
761 | int in_use_descs = 0; | ||
762 | |||
763 | mv_xor_slot_cleanup(mv_chan); | ||
764 | |||
765 | spin_lock_bh(&mv_chan->lock); | ||
766 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | ||
767 | chain_node) { | ||
768 | in_use_descs++; | ||
769 | list_del(&iter->chain_node); | ||
770 | } | ||
771 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | ||
772 | completed_node) { | ||
773 | in_use_descs++; | ||
774 | list_del(&iter->completed_node); | ||
775 | } | ||
776 | list_for_each_entry_safe_reverse( | ||
777 | iter, _iter, &mv_chan->all_slots, slot_node) { | ||
778 | list_del(&iter->slot_node); | ||
779 | kfree(iter); | ||
780 | mv_chan->slots_allocated--; | ||
781 | } | ||
782 | mv_chan->last_used = NULL; | ||
783 | |||
784 | dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", | ||
785 | __func__, mv_chan->slots_allocated); | ||
786 | spin_unlock_bh(&mv_chan->lock); | ||
787 | |||
788 | if (in_use_descs) | ||
789 | dev_err(mv_chan->device->common.dev, | ||
790 | "freeing %d in use descriptors!\n", in_use_descs); | ||
791 | } | ||
792 | |||
793 | /** | ||
794 | * mv_xor_is_complete - poll the status of an XOR transaction | ||
795 | * @chan: XOR channel handle | ||
796 | * @cookie: XOR transaction identifier | ||
797 | */ | ||
798 | static enum dma_status mv_xor_is_complete(struct dma_chan *chan, | ||
799 | dma_cookie_t cookie, | ||
800 | dma_cookie_t *done, | ||
801 | dma_cookie_t *used) | ||
802 | { | ||
803 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
804 | dma_cookie_t last_used; | ||
805 | dma_cookie_t last_complete; | ||
806 | enum dma_status ret; | ||
807 | |||
808 | last_used = chan->cookie; | ||
809 | last_complete = mv_chan->completed_cookie; | ||
810 | mv_chan->is_complete_cookie = cookie; | ||
811 | if (done) | ||
812 | *done = last_complete; | ||
813 | if (used) | ||
814 | *used = last_used; | ||
815 | |||
816 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
817 | if (ret == DMA_SUCCESS) { | ||
818 | mv_xor_clean_completed_slots(mv_chan); | ||
819 | return ret; | ||
820 | } | ||
821 | mv_xor_slot_cleanup(mv_chan); | ||
822 | |||
823 | last_used = chan->cookie; | ||
824 | last_complete = mv_chan->completed_cookie; | ||
825 | |||
826 | if (done) | ||
827 | *done = last_complete; | ||
828 | if (used) | ||
829 | *used = last_used; | ||
830 | |||
831 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
832 | } | ||
833 | |||
834 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) | ||
835 | { | ||
836 | u32 val; | ||
837 | |||
838 | val = __raw_readl(XOR_CONFIG(chan)); | ||
839 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
840 | "config 0x%08x.\n", val); | ||
841 | |||
842 | val = __raw_readl(XOR_ACTIVATION(chan)); | ||
843 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
844 | "activation 0x%08x.\n", val); | ||
845 | |||
846 | val = __raw_readl(XOR_INTR_CAUSE(chan)); | ||
847 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
848 | "intr cause 0x%08x.\n", val); | ||
849 | |||
850 | val = __raw_readl(XOR_INTR_MASK(chan)); | ||
851 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
852 | "intr mask 0x%08x.\n", val); | ||
853 | |||
854 | val = __raw_readl(XOR_ERROR_CAUSE(chan)); | ||
855 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
856 | "error cause 0x%08x.\n", val); | ||
857 | |||
858 | val = __raw_readl(XOR_ERROR_ADDR(chan)); | ||
859 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
860 | "error addr 0x%08x.\n", val); | ||
861 | } | ||
862 | |||
863 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | ||
864 | u32 intr_cause) | ||
865 | { | ||
866 | if (intr_cause & (1 << 4)) { | ||
867 | dev_dbg(chan->device->common.dev, | ||
868 | "ignore this error\n"); | ||
869 | return; | ||
870 | } | ||
871 | |||
872 | dev_printk(KERN_ERR, chan->device->common.dev, | ||
873 | "error on chan %d. intr cause 0x%08x.\n", | ||
874 | chan->idx, intr_cause); | ||
875 | |||
876 | mv_dump_xor_regs(chan); | ||
877 | BUG(); | ||
878 | } | ||
879 | |||
880 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | ||
881 | { | ||
882 | struct mv_xor_chan *chan = data; | ||
883 | u32 intr_cause = mv_chan_get_intr_cause(chan); | ||
884 | |||
885 | dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); | ||
886 | |||
887 | if (mv_is_err_intr(intr_cause)) | ||
888 | mv_xor_err_interrupt_handler(chan, intr_cause); | ||
889 | |||
890 | tasklet_schedule(&chan->irq_tasklet); | ||
891 | |||
892 | mv_xor_device_clear_eoc_cause(chan); | ||
893 | |||
894 | return IRQ_HANDLED; | ||
895 | } | ||
896 | |||
897 | static void mv_xor_issue_pending(struct dma_chan *chan) | ||
898 | { | ||
899 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
900 | |||
901 | if (mv_chan->pending >= MV_XOR_THRESHOLD) { | ||
902 | mv_chan->pending = 0; | ||
903 | mv_chan_activate(mv_chan); | ||
904 | } | ||
905 | } | ||
906 | |||
907 | /* | ||
908 | * Perform a transaction to verify the HW works. | ||
909 | */ | ||
910 | #define MV_XOR_TEST_SIZE 2000 | ||
911 | |||
912 | static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) | ||
913 | { | ||
914 | int i; | ||
915 | void *src, *dest; | ||
916 | dma_addr_t src_dma, dest_dma; | ||
917 | struct dma_chan *dma_chan; | ||
918 | dma_cookie_t cookie; | ||
919 | struct dma_async_tx_descriptor *tx; | ||
920 | int err = 0; | ||
921 | struct mv_xor_chan *mv_chan; | ||
922 | |||
923 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | ||
924 | if (!src) | ||
925 | return -ENOMEM; | ||
926 | |||
927 | dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | ||
928 | if (!dest) { | ||
929 | kfree(src); | ||
930 | return -ENOMEM; | ||
931 | } | ||
932 | |||
933 | /* Fill in src buffer */ | ||
934 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) | ||
935 | ((u8 *) src)[i] = (u8)i; | ||
936 | |||
937 | /* Start copy, using first DMA channel */ | ||
938 | dma_chan = container_of(device->common.channels.next, | ||
939 | struct dma_chan, | ||
940 | device_node); | ||
941 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { | ||
942 | err = -ENODEV; | ||
943 | goto out; | ||
944 | } | ||
945 | |||
946 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | ||
947 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | ||
948 | |||
949 | src_dma = dma_map_single(dma_chan->device->dev, src, | ||
950 | MV_XOR_TEST_SIZE, DMA_TO_DEVICE); | ||
951 | |||
952 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | ||
953 | MV_XOR_TEST_SIZE, 0); | ||
954 | cookie = mv_xor_tx_submit(tx); | ||
955 | mv_xor_issue_pending(dma_chan); | ||
956 | async_tx_ack(tx); | ||
957 | msleep(1); | ||
958 | |||
959 | if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != | ||
960 | DMA_SUCCESS) { | ||
961 | dev_printk(KERN_ERR, dma_chan->device->dev, | ||
962 | "Self-test copy timed out, disabling\n"); | ||
963 | err = -ENODEV; | ||
964 | goto free_resources; | ||
965 | } | ||
966 | |||
967 | mv_chan = to_mv_xor_chan(dma_chan); | ||
968 | dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, | ||
969 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | ||
970 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | ||
971 | dev_printk(KERN_ERR, dma_chan->device->dev, | ||
972 | "Self-test copy failed compare, disabling\n"); | ||
973 | err = -ENODEV; | ||
974 | goto free_resources; | ||
975 | } | ||
976 | |||
977 | free_resources: | ||
978 | mv_xor_free_chan_resources(dma_chan); | ||
979 | out: | ||
980 | kfree(src); | ||
981 | kfree(dest); | ||
982 | return err; | ||
983 | } | ||
984 | |||
985 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | ||
986 | static int __devinit | ||
987 | mv_xor_xor_self_test(struct mv_xor_device *device) | ||
988 | { | ||
989 | int i, src_idx; | ||
990 | struct page *dest; | ||
991 | struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; | ||
992 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | ||
993 | dma_addr_t dest_dma; | ||
994 | struct dma_async_tx_descriptor *tx; | ||
995 | struct dma_chan *dma_chan; | ||
996 | dma_cookie_t cookie; | ||
997 | u8 cmp_byte = 0; | ||
998 | u32 cmp_word; | ||
999 | int err = 0; | ||
1000 | struct mv_xor_chan *mv_chan; | ||
1001 | |||
1002 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | ||
1003 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | ||
1004 | if (!xor_srcs[src_idx]) | ||
1005 | while (src_idx--) { | ||
1006 | __free_page(xor_srcs[src_idx]); | ||
1007 | return -ENOMEM; | ||
1008 | } | ||
1009 | } | ||
1010 | |||
1011 | dest = alloc_page(GFP_KERNEL); | ||
1012 | if (!dest) | ||
1013 | while (src_idx--) { | ||
1014 | __free_page(xor_srcs[src_idx]); | ||
1015 | return -ENOMEM; | ||
1016 | } | ||
1017 | |||
1018 | /* Fill in src buffers */ | ||
1019 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | ||
1020 | u8 *ptr = page_address(xor_srcs[src_idx]); | ||
1021 | for (i = 0; i < PAGE_SIZE; i++) | ||
1022 | ptr[i] = (1 << src_idx); | ||
1023 | } | ||
1024 | |||
1025 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) | ||
1026 | cmp_byte ^= (u8) (1 << src_idx); | ||
1027 | |||
1028 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | ||
1029 | (cmp_byte << 8) | cmp_byte; | ||
1030 | |||
1031 | memset(page_address(dest), 0, PAGE_SIZE); | ||
1032 | |||
1033 | dma_chan = container_of(device->common.channels.next, | ||
1034 | struct dma_chan, | ||
1035 | device_node); | ||
1036 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { | ||
1037 | err = -ENODEV; | ||
1038 | goto out; | ||
1039 | } | ||
1040 | |||
1041 | /* test xor */ | ||
1042 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | ||
1043 | DMA_FROM_DEVICE); | ||
1044 | |||
1045 | for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) | ||
1046 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | ||
1047 | 0, PAGE_SIZE, DMA_TO_DEVICE); | ||
1048 | |||
1049 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | ||
1050 | MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); | ||
1051 | |||
1052 | cookie = mv_xor_tx_submit(tx); | ||
1053 | mv_xor_issue_pending(dma_chan); | ||
1054 | async_tx_ack(tx); | ||
1055 | msleep(8); | ||
1056 | |||
1057 | if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != | ||
1058 | DMA_SUCCESS) { | ||
1059 | dev_printk(KERN_ERR, dma_chan->device->dev, | ||
1060 | "Self-test xor timed out, disabling\n"); | ||
1061 | err = -ENODEV; | ||
1062 | goto free_resources; | ||
1063 | } | ||
1064 | |||
1065 | mv_chan = to_mv_xor_chan(dma_chan); | ||
1066 | dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, | ||
1067 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
1068 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | ||
1069 | u32 *ptr = page_address(dest); | ||
1070 | if (ptr[i] != cmp_word) { | ||
1071 | dev_printk(KERN_ERR, dma_chan->device->dev, | ||
1072 | "Self-test xor failed compare, disabling." | ||
1073 | " index %d, data %x, expected %x\n", i, | ||
1074 | ptr[i], cmp_word); | ||
1075 | err = -ENODEV; | ||
1076 | goto free_resources; | ||
1077 | } | ||
1078 | } | ||
1079 | |||
1080 | free_resources: | ||
1081 | mv_xor_free_chan_resources(dma_chan); | ||
1082 | out: | ||
1083 | src_idx = MV_XOR_NUM_SRC_TEST; | ||
1084 | while (src_idx--) | ||
1085 | __free_page(xor_srcs[src_idx]); | ||
1086 | __free_page(dest); | ||
1087 | return err; | ||
1088 | } | ||
1089 | |||
1090 | static int __devexit mv_xor_remove(struct platform_device *dev) | ||
1091 | { | ||
1092 | struct mv_xor_device *device = platform_get_drvdata(dev); | ||
1093 | struct dma_chan *chan, *_chan; | ||
1094 | struct mv_xor_chan *mv_chan; | ||
1095 | struct mv_xor_platform_data *plat_data = dev->dev.platform_data; | ||
1096 | |||
1097 | dma_async_device_unregister(&device->common); | ||
1098 | |||
1099 | dma_free_coherent(&dev->dev, plat_data->pool_size, | ||
1100 | device->dma_desc_pool_virt, device->dma_desc_pool); | ||
1101 | |||
1102 | list_for_each_entry_safe(chan, _chan, &device->common.channels, | ||
1103 | device_node) { | ||
1104 | mv_chan = to_mv_xor_chan(chan); | ||
1105 | list_del(&chan->device_node); | ||
1106 | } | ||
1107 | |||
1108 | return 0; | ||
1109 | } | ||
1110 | |||
1111 | static int __devinit mv_xor_probe(struct platform_device *pdev) | ||
1112 | { | ||
1113 | int ret = 0; | ||
1114 | int irq; | ||
1115 | struct mv_xor_device *adev; | ||
1116 | struct mv_xor_chan *mv_chan; | ||
1117 | struct dma_device *dma_dev; | ||
1118 | struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; | ||
1119 | |||
1120 | |||
1121 | adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); | ||
1122 | if (!adev) | ||
1123 | return -ENOMEM; | ||
1124 | |||
1125 | dma_dev = &adev->common; | ||
1126 | |||
1127 | /* allocate coherent memory for hardware descriptors | ||
1128 | * note: writecombine gives slightly better performance, but | ||
1129 | * requires that we explicitly flush the writes | ||
1130 | */ | ||
1131 | adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, | ||
1132 | plat_data->pool_size, | ||
1133 | &adev->dma_desc_pool, | ||
1134 | GFP_KERNEL); | ||
1135 | if (!adev->dma_desc_pool_virt) | ||
1136 | return -ENOMEM; | ||
1137 | |||
1138 | adev->id = plat_data->hw_id; | ||
1139 | |||
1140 | /* discover transaction capabilites from the platform data */ | ||
1141 | dma_dev->cap_mask = plat_data->cap_mask; | ||
1142 | adev->pdev = pdev; | ||
1143 | platform_set_drvdata(pdev, adev); | ||
1144 | |||
1145 | adev->shared = platform_get_drvdata(plat_data->shared); | ||
1146 | |||
1147 | INIT_LIST_HEAD(&dma_dev->channels); | ||
1148 | |||
1149 | /* set base routines */ | ||
1150 | dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; | ||
1151 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | ||
1152 | dma_dev->device_is_tx_complete = mv_xor_is_complete; | ||
1153 | dma_dev->device_issue_pending = mv_xor_issue_pending; | ||
1154 | dma_dev->dev = &pdev->dev; | ||
1155 | |||
1156 | /* set prep routines based on capability */ | ||
1157 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | ||
1158 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | ||
1159 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) | ||
1160 | dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; | ||
1161 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | ||
1162 | dma_dev->max_xor = 8; ; | ||
1163 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | ||
1164 | } | ||
1165 | |||
1166 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); | ||
1167 | if (!mv_chan) { | ||
1168 | ret = -ENOMEM; | ||
1169 | goto err_free_dma; | ||
1170 | } | ||
1171 | mv_chan->device = adev; | ||
1172 | mv_chan->idx = plat_data->hw_id; | ||
1173 | mv_chan->mmr_base = adev->shared->xor_base; | ||
1174 | |||
1175 | if (!mv_chan->mmr_base) { | ||
1176 | ret = -ENOMEM; | ||
1177 | goto err_free_dma; | ||
1178 | } | ||
1179 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) | ||
1180 | mv_chan); | ||
1181 | |||
1182 | /* clear errors before enabling interrupts */ | ||
1183 | mv_xor_device_clear_err_status(mv_chan); | ||
1184 | |||
1185 | irq = platform_get_irq(pdev, 0); | ||
1186 | if (irq < 0) { | ||
1187 | ret = irq; | ||
1188 | goto err_free_dma; | ||
1189 | } | ||
1190 | ret = devm_request_irq(&pdev->dev, irq, | ||
1191 | mv_xor_interrupt_handler, | ||
1192 | 0, dev_name(&pdev->dev), mv_chan); | ||
1193 | if (ret) | ||
1194 | goto err_free_dma; | ||
1195 | |||
1196 | mv_chan_unmask_interrupts(mv_chan); | ||
1197 | |||
1198 | mv_set_mode(mv_chan, DMA_MEMCPY); | ||
1199 | |||
1200 | spin_lock_init(&mv_chan->lock); | ||
1201 | INIT_LIST_HEAD(&mv_chan->chain); | ||
1202 | INIT_LIST_HEAD(&mv_chan->completed_slots); | ||
1203 | INIT_LIST_HEAD(&mv_chan->all_slots); | ||
1204 | INIT_RCU_HEAD(&mv_chan->common.rcu); | ||
1205 | mv_chan->common.device = dma_dev; | ||
1206 | |||
1207 | list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); | ||
1208 | |||
1209 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | ||
1210 | ret = mv_xor_memcpy_self_test(adev); | ||
1211 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); | ||
1212 | if (ret) | ||
1213 | goto err_free_dma; | ||
1214 | } | ||
1215 | |||
1216 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | ||
1217 | ret = mv_xor_xor_self_test(adev); | ||
1218 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); | ||
1219 | if (ret) | ||
1220 | goto err_free_dma; | ||
1221 | } | ||
1222 | |||
1223 | dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " | ||
1224 | "( %s%s%s%s)\n", | ||
1225 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | ||
1226 | dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", | ||
1227 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | ||
1228 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | ||
1229 | |||
1230 | dma_async_device_register(dma_dev); | ||
1231 | goto out; | ||
1232 | |||
1233 | err_free_dma: | ||
1234 | dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, | ||
1235 | adev->dma_desc_pool_virt, adev->dma_desc_pool); | ||
1236 | out: | ||
1237 | return ret; | ||
1238 | } | ||
1239 | |||
1240 | static void | ||
1241 | mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, | ||
1242 | struct mbus_dram_target_info *dram) | ||
1243 | { | ||
1244 | void __iomem *base = msp->xor_base; | ||
1245 | u32 win_enable = 0; | ||
1246 | int i; | ||
1247 | |||
1248 | for (i = 0; i < 8; i++) { | ||
1249 | writel(0, base + WINDOW_BASE(i)); | ||
1250 | writel(0, base + WINDOW_SIZE(i)); | ||
1251 | if (i < 4) | ||
1252 | writel(0, base + WINDOW_REMAP_HIGH(i)); | ||
1253 | } | ||
1254 | |||
1255 | for (i = 0; i < dram->num_cs; i++) { | ||
1256 | struct mbus_dram_window *cs = dram->cs + i; | ||
1257 | |||
1258 | writel((cs->base & 0xffff0000) | | ||
1259 | (cs->mbus_attr << 8) | | ||
1260 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | ||
1261 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | ||
1262 | |||
1263 | win_enable |= (1 << i); | ||
1264 | win_enable |= 3 << (16 + (2 * i)); | ||
1265 | } | ||
1266 | |||
1267 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | ||
1268 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | ||
1269 | } | ||
1270 | |||
1271 | static struct platform_driver mv_xor_driver = { | ||
1272 | .probe = mv_xor_probe, | ||
1273 | .remove = mv_xor_remove, | ||
1274 | .driver = { | ||
1275 | .owner = THIS_MODULE, | ||
1276 | .name = MV_XOR_NAME, | ||
1277 | }, | ||
1278 | }; | ||
1279 | |||
1280 | static int mv_xor_shared_probe(struct platform_device *pdev) | ||
1281 | { | ||
1282 | struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data; | ||
1283 | struct mv_xor_shared_private *msp; | ||
1284 | struct resource *res; | ||
1285 | |||
1286 | dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); | ||
1287 | |||
1288 | msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); | ||
1289 | if (!msp) | ||
1290 | return -ENOMEM; | ||
1291 | |||
1292 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1293 | if (!res) | ||
1294 | return -ENODEV; | ||
1295 | |||
1296 | msp->xor_base = devm_ioremap(&pdev->dev, res->start, | ||
1297 | res->end - res->start + 1); | ||
1298 | if (!msp->xor_base) | ||
1299 | return -EBUSY; | ||
1300 | |||
1301 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1302 | if (!res) | ||
1303 | return -ENODEV; | ||
1304 | |||
1305 | msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, | ||
1306 | res->end - res->start + 1); | ||
1307 | if (!msp->xor_high_base) | ||
1308 | return -EBUSY; | ||
1309 | |||
1310 | platform_set_drvdata(pdev, msp); | ||
1311 | |||
1312 | /* | ||
1313 | * (Re-)program MBUS remapping windows if we are asked to. | ||
1314 | */ | ||
1315 | if (msd != NULL && msd->dram != NULL) | ||
1316 | mv_xor_conf_mbus_windows(msp, msd->dram); | ||
1317 | |||
1318 | return 0; | ||
1319 | } | ||
1320 | |||
1321 | static int mv_xor_shared_remove(struct platform_device *pdev) | ||
1322 | { | ||
1323 | return 0; | ||
1324 | } | ||
1325 | |||
1326 | static struct platform_driver mv_xor_shared_driver = { | ||
1327 | .probe = mv_xor_shared_probe, | ||
1328 | .remove = mv_xor_shared_remove, | ||
1329 | .driver = { | ||
1330 | .owner = THIS_MODULE, | ||
1331 | .name = MV_XOR_SHARED_NAME, | ||
1332 | }, | ||
1333 | }; | ||
1334 | |||
1335 | |||
1336 | static int __init mv_xor_init(void) | ||
1337 | { | ||
1338 | int rc; | ||
1339 | |||
1340 | rc = platform_driver_register(&mv_xor_shared_driver); | ||
1341 | if (!rc) { | ||
1342 | rc = platform_driver_register(&mv_xor_driver); | ||
1343 | if (rc) | ||
1344 | platform_driver_unregister(&mv_xor_shared_driver); | ||
1345 | } | ||
1346 | return rc; | ||
1347 | } | ||
1348 | module_init(mv_xor_init); | ||
1349 | |||
1350 | /* it's currently unsafe to unload this module */ | ||
1351 | #if 0 | ||
1352 | static void __exit mv_xor_exit(void) | ||
1353 | { | ||
1354 | platform_driver_unregister(&mv_xor_driver); | ||
1355 | platform_driver_unregister(&mv_xor_shared_driver); | ||
1356 | return; | ||
1357 | } | ||
1358 | |||
1359 | module_exit(mv_xor_exit); | ||
1360 | #endif | ||
1361 | |||
1362 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); | ||
1363 | MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); | ||
1364 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h new file mode 100644 index 000000000000..06cafe1ef521 --- /dev/null +++ b/drivers/dma/mv_xor.h | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007, 2008, Marvell International Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software Foundation, | ||
15 | * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
16 | */ | ||
17 | |||
18 | #ifndef MV_XOR_H | ||
19 | #define MV_XOR_H | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | |||
26 | #define USE_TIMER | ||
27 | #define MV_XOR_SLOT_SIZE 64 | ||
28 | #define MV_XOR_THRESHOLD 1 | ||
29 | |||
30 | #define XOR_OPERATION_MODE_XOR 0 | ||
31 | #define XOR_OPERATION_MODE_MEMCPY 2 | ||
32 | #define XOR_OPERATION_MODE_MEMSET 4 | ||
33 | |||
34 | #define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4)) | ||
35 | #define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4)) | ||
36 | #define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4)) | ||
37 | #define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4)) | ||
38 | #define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4)) | ||
39 | #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0) | ||
40 | #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4) | ||
41 | |||
42 | #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4)) | ||
43 | #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4)) | ||
44 | #define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30) | ||
45 | #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40) | ||
46 | #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50) | ||
47 | #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60) | ||
48 | #define XOR_INTR_MASK_VALUE 0x3F5 | ||
49 | |||
50 | #define WINDOW_BASE(w) (0x250 + ((w) << 2)) | ||
51 | #define WINDOW_SIZE(w) (0x270 + ((w) << 2)) | ||
52 | #define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) | ||
53 | #define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) | ||
54 | |||
55 | struct mv_xor_shared_private { | ||
56 | void __iomem *xor_base; | ||
57 | void __iomem *xor_high_base; | ||
58 | }; | ||
59 | |||
60 | |||
61 | /** | ||
62 | * struct mv_xor_device - internal representation of a XOR device | ||
63 | * @pdev: Platform device | ||
64 | * @id: HW XOR Device selector | ||
65 | * @dma_desc_pool: base of DMA descriptor region (DMA address) | ||
66 | * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) | ||
67 | * @common: embedded struct dma_device | ||
68 | */ | ||
69 | struct mv_xor_device { | ||
70 | struct platform_device *pdev; | ||
71 | int id; | ||
72 | dma_addr_t dma_desc_pool; | ||
73 | void *dma_desc_pool_virt; | ||
74 | struct dma_device common; | ||
75 | struct mv_xor_shared_private *shared; | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * struct mv_xor_chan - internal representation of a XOR channel | ||
80 | * @pending: allows batching of hardware operations | ||
81 | * @completed_cookie: identifier for the most recently completed operation | ||
82 | * @lock: serializes enqueue/dequeue operations to the descriptors pool | ||
83 | * @mmr_base: memory mapped register base | ||
84 | * @idx: the index of the xor channel | ||
85 | * @chain: device chain view of the descriptors | ||
86 | * @completed_slots: slots completed by HW but still need to be acked | ||
87 | * @device: parent device | ||
88 | * @common: common dmaengine channel object members | ||
89 | * @last_used: place holder for allocation to continue from where it left off | ||
90 | * @all_slots: complete domain of slots usable by the channel | ||
91 | * @slots_allocated: records the actual size of the descriptor slot pool | ||
92 | * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs | ||
93 | */ | ||
94 | struct mv_xor_chan { | ||
95 | int pending; | ||
96 | dma_cookie_t completed_cookie; | ||
97 | spinlock_t lock; /* protects the descriptor slot pool */ | ||
98 | void __iomem *mmr_base; | ||
99 | unsigned int idx; | ||
100 | enum dma_transaction_type current_type; | ||
101 | struct list_head chain; | ||
102 | struct list_head completed_slots; | ||
103 | struct mv_xor_device *device; | ||
104 | struct dma_chan common; | ||
105 | struct mv_xor_desc_slot *last_used; | ||
106 | struct list_head all_slots; | ||
107 | int slots_allocated; | ||
108 | struct tasklet_struct irq_tasklet; | ||
109 | #ifdef USE_TIMER | ||
110 | unsigned long cleanup_time; | ||
111 | u32 current_on_last_cleanup; | ||
112 | dma_cookie_t is_complete_cookie; | ||
113 | #endif | ||
114 | }; | ||
115 | |||
116 | /** | ||
117 | * struct mv_xor_desc_slot - software descriptor | ||
118 | * @slot_node: node on the mv_xor_chan.all_slots list | ||
119 | * @chain_node: node on the mv_xor_chan.chain list | ||
120 | * @completed_node: node on the mv_xor_chan.completed_slots list | ||
121 | * @hw_desc: virtual address of the hardware descriptor chain | ||
122 | * @phys: hardware address of the hardware descriptor chain | ||
123 | * @group_head: first operation in a transaction | ||
124 | * @slot_cnt: total slots used in an transaction (group of operations) | ||
125 | * @slots_per_op: number of slots per operation | ||
126 | * @idx: pool index | ||
127 | * @unmap_src_cnt: number of xor sources | ||
128 | * @unmap_len: transaction bytecount | ||
129 | * @async_tx: support for the async_tx api | ||
130 | * @group_list: list of slots that make up a multi-descriptor transaction | ||
131 | * for example transfer lengths larger than the supported hw max | ||
132 | * @xor_check_result: result of zero sum | ||
133 | * @crc32_result: result crc calculation | ||
134 | */ | ||
135 | struct mv_xor_desc_slot { | ||
136 | struct list_head slot_node; | ||
137 | struct list_head chain_node; | ||
138 | struct list_head completed_node; | ||
139 | enum dma_transaction_type type; | ||
140 | void *hw_desc; | ||
141 | struct mv_xor_desc_slot *group_head; | ||
142 | u16 slot_cnt; | ||
143 | u16 slots_per_op; | ||
144 | u16 idx; | ||
145 | u16 unmap_src_cnt; | ||
146 | u32 value; | ||
147 | size_t unmap_len; | ||
148 | struct dma_async_tx_descriptor async_tx; | ||
149 | union { | ||
150 | u32 *xor_check_result; | ||
151 | u32 *crc32_result; | ||
152 | }; | ||
153 | #ifdef USE_TIMER | ||
154 | unsigned long arrival_time; | ||
155 | struct timer_list timeout; | ||
156 | #endif | ||
157 | }; | ||
158 | |||
159 | /* This structure describes XOR descriptor size 64bytes */ | ||
160 | struct mv_xor_desc { | ||
161 | u32 status; /* descriptor execution status */ | ||
162 | u32 crc32_result; /* result of CRC-32 calculation */ | ||
163 | u32 desc_command; /* type of operation to be carried out */ | ||
164 | u32 phy_next_desc; /* next descriptor address pointer */ | ||
165 | u32 byte_count; /* size of src/dst blocks in bytes */ | ||
166 | u32 phy_dest_addr; /* destination block address */ | ||
167 | u32 phy_src_addr[8]; /* source block addresses */ | ||
168 | u32 reserved0; | ||
169 | u32 reserved1; | ||
170 | }; | ||
171 | |||
172 | #define to_mv_sw_desc(addr_hw_desc) \ | ||
173 | container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc) | ||
174 | |||
175 | #define mv_hw_desc_slot_idx(hw_desc, idx) \ | ||
176 | ((void *)(((unsigned long)hw_desc) + ((idx) << 5))) | ||
177 | |||
178 | #define MV_XOR_MIN_BYTE_COUNT (128) | ||
179 | #define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1) | ||
180 | #define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT | ||
181 | |||
182 | |||
183 | #endif | ||