diff options
-rw-r--r-- | drivers/dma/mv_xor_v2.c | 147 |
1 files changed, 91 insertions, 56 deletions
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index a28a01fcba67..f652a0e0f5a2 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018 | 42 | #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018 |
43 | #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF | 43 | #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF |
44 | #define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0 | 44 | #define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0 |
45 | #define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18) | ||
45 | #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C | 46 | #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C |
46 | /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */ | 47 | /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */ |
47 | #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C | 48 | #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C |
@@ -55,6 +56,9 @@ | |||
55 | #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800 | 56 | #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800 |
56 | #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804 | 57 | #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804 |
57 | #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808 | 58 | #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808 |
59 | #define MV_XOR_V2_DMA_IMSG_TMOT 0x810 | ||
60 | #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF | ||
61 | #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0 | ||
58 | 62 | ||
59 | /* XOR Global registers */ | 63 | /* XOR Global registers */ |
60 | #define MV_XOR_V2_GLOB_BW_CTRL 0x4 | 64 | #define MV_XOR_V2_GLOB_BW_CTRL 0x4 |
@@ -90,6 +94,13 @@ | |||
90 | */ | 94 | */ |
91 | #define MV_XOR_V2_DESC_NUM 1024 | 95 | #define MV_XOR_V2_DESC_NUM 1024 |
92 | 96 | ||
97 | /* | ||
98 | * Threshold values for descriptors and timeout, determined by | ||
99 | * experimentation as giving a good level of performance. | ||
100 | */ | ||
101 | #define MV_XOR_V2_DONE_IMSG_THRD 0x14 | ||
102 | #define MV_XOR_V2_TIMER_THRD 0xB0 | ||
103 | |||
93 | /** | 104 | /** |
94 | * struct mv_xor_v2_descriptor - DMA HW descriptor | 105 | * struct mv_xor_v2_descriptor - DMA HW descriptor |
95 | * @desc_id: used by S/W and is not affected by H/W. | 106 | * @desc_id: used by S/W and is not affected by H/W. |
@@ -161,6 +172,7 @@ struct mv_xor_v2_device { | |||
161 | struct mv_xor_v2_sw_desc *sw_desq; | 172 | struct mv_xor_v2_sw_desc *sw_desq; |
162 | int desc_size; | 173 | int desc_size; |
163 | unsigned int npendings; | 174 | unsigned int npendings; |
175 | unsigned int hw_queue_idx; | ||
164 | }; | 176 | }; |
165 | 177 | ||
166 | /** | 178 | /** |
@@ -214,18 +226,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, | |||
214 | } | 226 | } |
215 | 227 | ||
216 | /* | 228 | /* |
217 | * Return the next available index in the DESQ. | ||
218 | */ | ||
219 | static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) | ||
220 | { | ||
221 | /* read the index for the next available descriptor in the DESQ */ | ||
222 | u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); | ||
223 | |||
224 | return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) | ||
225 | & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * notify the engine of new descriptors, and update the available index. | 229 | * notify the engine of new descriptors, and update the available index. |
230 | */ | 230 | */ |
231 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, | 231 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, |
@@ -261,16 +261,23 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) | |||
261 | * Set the IMSG threshold | 261 | * Set the IMSG threshold |
262 | */ | 262 | */ |
263 | static inline | 263 | static inline |
264 | void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) | 264 | void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev) |
265 | { | 265 | { |
266 | u32 reg; | 266 | u32 reg; |
267 | 267 | ||
268 | /* Configure threshold of number of descriptors, and enable timer */ | ||
268 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | 269 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); |
269 | |||
270 | reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); | 270 | reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); |
271 | reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); | 271 | reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); |
272 | 272 | reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN; | |
273 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | 273 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); |
274 | |||
275 | /* Configure Timer Threshold */ | ||
276 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT); | ||
277 | reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK << | ||
278 | MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT); | ||
279 | reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT); | ||
280 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT); | ||
274 | } | 281 | } |
275 | 282 | ||
276 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | 283 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) |
@@ -288,12 +295,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | |||
288 | if (!ndescs) | 295 | if (!ndescs) |
289 | return IRQ_NONE; | 296 | return IRQ_NONE; |
290 | 297 | ||
291 | /* | ||
292 | * Update IMSG threshold, to disable new IMSG interrupts until | ||
293 | * end of the tasklet | ||
294 | */ | ||
295 | mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); | ||
296 | |||
297 | /* schedule a tasklet to handle descriptors callbacks */ | 298 | /* schedule a tasklet to handle descriptors callbacks */ |
298 | tasklet_schedule(&xor_dev->irq_tasklet); | 299 | tasklet_schedule(&xor_dev->irq_tasklet); |
299 | 300 | ||
@@ -306,7 +307,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | |||
306 | static dma_cookie_t | 307 | static dma_cookie_t |
307 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) | 308 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) |
308 | { | 309 | { |
309 | int desq_ptr; | ||
310 | void *dest_hw_desc; | 310 | void *dest_hw_desc; |
311 | dma_cookie_t cookie; | 311 | dma_cookie_t cookie; |
312 | struct mv_xor_v2_sw_desc *sw_desc = | 312 | struct mv_xor_v2_sw_desc *sw_desc = |
@@ -322,15 +322,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
322 | spin_lock_bh(&xor_dev->lock); | 322 | spin_lock_bh(&xor_dev->lock); |
323 | cookie = dma_cookie_assign(tx); | 323 | cookie = dma_cookie_assign(tx); |
324 | 324 | ||
325 | /* get the next available slot in the DESQ */ | ||
326 | desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); | ||
327 | |||
328 | /* copy the HW descriptor from the SW descriptor to the DESQ */ | 325 | /* copy the HW descriptor from the SW descriptor to the DESQ */ |
329 | dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; | 326 | dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; |
330 | 327 | ||
331 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); | 328 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); |
332 | 329 | ||
333 | xor_dev->npendings++; | 330 | xor_dev->npendings++; |
331 | xor_dev->hw_queue_idx++; | ||
332 | if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) | ||
333 | xor_dev->hw_queue_idx = 0; | ||
334 | 334 | ||
335 | spin_unlock_bh(&xor_dev->lock); | 335 | spin_unlock_bh(&xor_dev->lock); |
336 | 336 | ||
@@ -344,6 +344,7 @@ static struct mv_xor_v2_sw_desc * | |||
344 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) | 344 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) |
345 | { | 345 | { |
346 | struct mv_xor_v2_sw_desc *sw_desc; | 346 | struct mv_xor_v2_sw_desc *sw_desc; |
347 | bool found = false; | ||
347 | 348 | ||
348 | /* Lock the channel */ | 349 | /* Lock the channel */ |
349 | spin_lock_bh(&xor_dev->lock); | 350 | spin_lock_bh(&xor_dev->lock); |
@@ -355,19 +356,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) | |||
355 | return NULL; | 356 | return NULL; |
356 | } | 357 | } |
357 | 358 | ||
358 | /* get a free SW descriptor from the SW DESQ */ | 359 | list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { |
359 | sw_desc = list_first_entry(&xor_dev->free_sw_desc, | 360 | if (async_tx_test_ack(&sw_desc->async_tx)) { |
360 | struct mv_xor_v2_sw_desc, free_list); | 361 | found = true; |
362 | break; | ||
363 | } | ||
364 | } | ||
365 | |||
366 | if (!found) { | ||
367 | spin_unlock_bh(&xor_dev->lock); | ||
368 | return NULL; | ||
369 | } | ||
370 | |||
361 | list_del(&sw_desc->free_list); | 371 | list_del(&sw_desc->free_list); |
362 | 372 | ||
363 | /* Release the channel */ | 373 | /* Release the channel */ |
364 | spin_unlock_bh(&xor_dev->lock); | 374 | spin_unlock_bh(&xor_dev->lock); |
365 | 375 | ||
366 | /* set the async tx descriptor */ | ||
367 | dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); | ||
368 | sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; | ||
369 | async_tx_ack(&sw_desc->async_tx); | ||
370 | |||
371 | return sw_desc; | 376 | return sw_desc; |
372 | } | 377 | } |
373 | 378 | ||
@@ -389,6 +394,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | |||
389 | __func__, len, &src, &dest, flags); | 394 | __func__, len, &src, &dest, flags); |
390 | 395 | ||
391 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 396 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
397 | if (!sw_desc) | ||
398 | return NULL; | ||
392 | 399 | ||
393 | sw_desc->async_tx.flags = flags; | 400 | sw_desc->async_tx.flags = flags; |
394 | 401 | ||
@@ -443,6 +450,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
443 | __func__, src_cnt, len, &dest, flags); | 450 | __func__, src_cnt, len, &dest, flags); |
444 | 451 | ||
445 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 452 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
453 | if (!sw_desc) | ||
454 | return NULL; | ||
446 | 455 | ||
447 | sw_desc->async_tx.flags = flags; | 456 | sw_desc->async_tx.flags = flags; |
448 | 457 | ||
@@ -491,6 +500,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
491 | container_of(chan, struct mv_xor_v2_device, dmachan); | 500 | container_of(chan, struct mv_xor_v2_device, dmachan); |
492 | 501 | ||
493 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 502 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
503 | if (!sw_desc) | ||
504 | return NULL; | ||
494 | 505 | ||
495 | /* set the HW descriptor */ | 506 | /* set the HW descriptor */ |
496 | hw_descriptor = &sw_desc->hw_desc; | 507 | hw_descriptor = &sw_desc->hw_desc; |
@@ -524,9 +535,6 @@ static void mv_xor_v2_issue_pending(struct dma_chan *chan) | |||
524 | mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings); | 535 | mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings); |
525 | xor_dev->npendings = 0; | 536 | xor_dev->npendings = 0; |
526 | 537 | ||
527 | /* Activate the channel */ | ||
528 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | ||
529 | |||
530 | spin_unlock_bh(&xor_dev->lock); | 538 | spin_unlock_bh(&xor_dev->lock); |
531 | } | 539 | } |
532 | 540 | ||
@@ -554,7 +562,6 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
554 | { | 562 | { |
555 | struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; | 563 | struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; |
556 | int pending_ptr, num_of_pending, i; | 564 | int pending_ptr, num_of_pending, i; |
557 | struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; | ||
558 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; | 565 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; |
559 | 566 | ||
560 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); | 567 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); |
@@ -562,17 +569,10 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
562 | /* get the pending descriptors parameters */ | 569 | /* get the pending descriptors parameters */ |
563 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); | 570 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); |
564 | 571 | ||
565 | /* next HW descriptor */ | ||
566 | next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; | ||
567 | |||
568 | /* loop over free descriptors */ | 572 | /* loop over free descriptors */ |
569 | for (i = 0; i < num_of_pending; i++) { | 573 | for (i = 0; i < num_of_pending; i++) { |
570 | 574 | struct mv_xor_v2_descriptor *next_pending_hw_desc = | |
571 | if (pending_ptr > MV_XOR_V2_DESC_NUM) | 575 | xor_dev->hw_desq_virt + pending_ptr; |
572 | pending_ptr = 0; | ||
573 | |||
574 | if (next_pending_sw_desc != NULL) | ||
575 | next_pending_hw_desc++; | ||
576 | 576 | ||
577 | /* get the SW descriptor related to the HW descriptor */ | 577 | /* get the SW descriptor related to the HW descriptor */ |
578 | next_pending_sw_desc = | 578 | next_pending_sw_desc = |
@@ -608,15 +608,14 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
608 | 608 | ||
609 | /* increment the next descriptor */ | 609 | /* increment the next descriptor */ |
610 | pending_ptr++; | 610 | pending_ptr++; |
611 | if (pending_ptr >= MV_XOR_V2_DESC_NUM) | ||
612 | pending_ptr = 0; | ||
611 | } | 613 | } |
612 | 614 | ||
613 | if (num_of_pending != 0) { | 615 | if (num_of_pending != 0) { |
614 | /* free the descriptores */ | 616 | /* free the descriptores */ |
615 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); | 617 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); |
616 | } | 618 | } |
617 | |||
618 | /* Update IMSG threshold, to enable new IMSG interrupts */ | ||
619 | mv_xor_v2_set_imsg_thrd(xor_dev, 0); | ||
620 | } | 619 | } |
621 | 620 | ||
622 | /* | 621 | /* |
@@ -648,9 +647,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) | |||
648 | writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, | 647 | writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, |
649 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); | 648 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); |
650 | 649 | ||
651 | /* enable the DMA engine */ | ||
652 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | ||
653 | |||
654 | /* | 650 | /* |
655 | * This is a temporary solution, until we activate the | 651 | * This is a temporary solution, until we activate the |
656 | * SMMU. Set the attributes for reading & writing data buffers | 652 | * SMMU. Set the attributes for reading & writing data buffers |
@@ -694,6 +690,30 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) | |||
694 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; | 690 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; |
695 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); | 691 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); |
696 | 692 | ||
693 | /* enable the DMA engine */ | ||
694 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | ||
695 | |||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state) | ||
700 | { | ||
701 | struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev); | ||
702 | |||
703 | /* Set this bit to disable to stop the XOR unit. */ | ||
704 | writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static int mv_xor_v2_resume(struct platform_device *dev) | ||
710 | { | ||
711 | struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev); | ||
712 | |||
713 | mv_xor_v2_set_desc_size(xor_dev); | ||
714 | mv_xor_v2_enable_imsg_thrd(xor_dev); | ||
715 | mv_xor_v2_descq_init(xor_dev); | ||
716 | |||
697 | return 0; | 717 | return 0; |
698 | } | 718 | } |
699 | 719 | ||
@@ -725,6 +745,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
725 | 745 | ||
726 | platform_set_drvdata(pdev, xor_dev); | 746 | platform_set_drvdata(pdev, xor_dev); |
727 | 747 | ||
748 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); | ||
749 | if (ret) | ||
750 | return ret; | ||
751 | |||
728 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); | 752 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); |
729 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) | 753 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) |
730 | return -EPROBE_DEFER; | 754 | return -EPROBE_DEFER; |
@@ -785,8 +809,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
785 | 809 | ||
786 | /* add all SW descriptors to the free list */ | 810 | /* add all SW descriptors to the free list */ |
787 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { | 811 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { |
788 | xor_dev->sw_desq[i].idx = i; | 812 | struct mv_xor_v2_sw_desc *sw_desc = |
789 | list_add(&xor_dev->sw_desq[i].free_list, | 813 | xor_dev->sw_desq + i; |
814 | sw_desc->idx = i; | ||
815 | dma_async_tx_descriptor_init(&sw_desc->async_tx, | ||
816 | &xor_dev->dmachan); | ||
817 | sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; | ||
818 | async_tx_ack(&sw_desc->async_tx); | ||
819 | |||
820 | list_add(&sw_desc->free_list, | ||
790 | &xor_dev->free_sw_desc); | 821 | &xor_dev->free_sw_desc); |
791 | } | 822 | } |
792 | 823 | ||
@@ -816,6 +847,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
816 | list_add_tail(&xor_dev->dmachan.device_node, | 847 | list_add_tail(&xor_dev->dmachan.device_node, |
817 | &dma_dev->channels); | 848 | &dma_dev->channels); |
818 | 849 | ||
850 | mv_xor_v2_enable_imsg_thrd(xor_dev); | ||
851 | |||
819 | mv_xor_v2_descq_init(xor_dev); | 852 | mv_xor_v2_descq_init(xor_dev); |
820 | 853 | ||
821 | ret = dma_async_device_register(dma_dev); | 854 | ret = dma_async_device_register(dma_dev); |
@@ -865,6 +898,8 @@ MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids); | |||
865 | 898 | ||
866 | static struct platform_driver mv_xor_v2_driver = { | 899 | static struct platform_driver mv_xor_v2_driver = { |
867 | .probe = mv_xor_v2_probe, | 900 | .probe = mv_xor_v2_probe, |
901 | .suspend = mv_xor_v2_suspend, | ||
902 | .resume = mv_xor_v2_resume, | ||
868 | .remove = mv_xor_v2_remove, | 903 | .remove = mv_xor_v2_remove, |
869 | .driver = { | 904 | .driver = { |
870 | .name = "mv_xor_v2", | 905 | .name = "mv_xor_v2", |