diff options
author | Stefan Roese <sr@denx.de> | 2016-10-26 04:10:25 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-11-25 00:46:36 -0500 |
commit | c5db858bdfeff00c219c64a95338c1eb5460555c (patch) | |
tree | 541f568b74a65616770dad3c8f04ceba42f6a9b2 | |
parent | e9bb8a9df316a2480d316af7b242f40cba3b69b6 (diff) |
dmaengine: mv_xor: Add support for scatter-gather DMA mode
This patch adds memory to memory scatter-gather support to the Marvell
mv_or DMA driver.
Signed-off-by: Stefan Roese <sr@denx.de>
Cc: Gregory CLEMENT <gregory.clement@free-electrons.com>
Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Cc: Marcin Wojtas <mw@semihalf.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/mv_xor.c | 183 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 1 |
2 files changed, 180 insertions, 4 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 23f75285a4d9..b0d09d97535f 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -68,6 +68,36 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, | |||
68 | hw_desc->byte_count = byte_count; | 68 | hw_desc->byte_count = byte_count; |
69 | } | 69 | } |
70 | 70 | ||
71 | /* Populate the descriptor */ | ||
72 | static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc, | ||
73 | dma_addr_t dma_src, dma_addr_t dma_dst, | ||
74 | u32 len, struct mv_xor_desc_slot *prev) | ||
75 | { | ||
76 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
77 | |||
78 | hw_desc->status = XOR_DESC_DMA_OWNED; | ||
79 | hw_desc->phy_next_desc = 0; | ||
80 | /* Configure for XOR with only one src address -> MEMCPY */ | ||
81 | hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0); | ||
82 | hw_desc->phy_dest_addr = dma_dst; | ||
83 | hw_desc->phy_src_addr[0] = dma_src; | ||
84 | hw_desc->byte_count = len; | ||
85 | |||
86 | if (prev) { | ||
87 | struct mv_xor_desc *hw_prev = prev->hw_desc; | ||
88 | |||
89 | hw_prev->phy_next_desc = desc->async_tx.phys; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc) | ||
94 | { | ||
95 | struct mv_xor_desc *hw_desc = desc->hw_desc; | ||
96 | |||
97 | /* Enable end-of-descriptor interrupt */ | ||
98 | hw_desc->desc_command |= XOR_DESC_EOD_INT_EN; | ||
99 | } | ||
100 | |||
71 | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) | 101 | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) |
72 | { | 102 | { |
73 | struct mv_xor_desc *hw_desc = desc->hw_desc; | 103 | struct mv_xor_desc *hw_desc = desc->hw_desc; |
@@ -228,8 +258,13 @@ mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) | |||
228 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | 258 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
229 | node) { | 259 | node) { |
230 | 260 | ||
231 | if (async_tx_test_ack(&iter->async_tx)) | 261 | if (async_tx_test_ack(&iter->async_tx)) { |
232 | list_move_tail(&iter->node, &mv_chan->free_slots); | 262 | list_move_tail(&iter->node, &mv_chan->free_slots); |
263 | if (!list_empty(&iter->sg_tx_list)) { | ||
264 | list_splice_tail_init(&iter->sg_tx_list, | ||
265 | &mv_chan->free_slots); | ||
266 | } | ||
267 | } | ||
233 | } | 268 | } |
234 | return 0; | 269 | return 0; |
235 | } | 270 | } |
@@ -244,11 +279,20 @@ mv_desc_clean_slot(struct mv_xor_desc_slot *desc, | |||
244 | /* the client is allowed to attach dependent operations | 279 | /* the client is allowed to attach dependent operations |
245 | * until 'ack' is set | 280 | * until 'ack' is set |
246 | */ | 281 | */ |
247 | if (!async_tx_test_ack(&desc->async_tx)) | 282 | if (!async_tx_test_ack(&desc->async_tx)) { |
248 | /* move this slot to the completed_slots */ | 283 | /* move this slot to the completed_slots */ |
249 | list_move_tail(&desc->node, &mv_chan->completed_slots); | 284 | list_move_tail(&desc->node, &mv_chan->completed_slots); |
250 | else | 285 | if (!list_empty(&desc->sg_tx_list)) { |
286 | list_splice_tail_init(&desc->sg_tx_list, | ||
287 | &mv_chan->completed_slots); | ||
288 | } | ||
289 | } else { | ||
251 | list_move_tail(&desc->node, &mv_chan->free_slots); | 290 | list_move_tail(&desc->node, &mv_chan->free_slots); |
291 | if (!list_empty(&desc->sg_tx_list)) { | ||
292 | list_splice_tail_init(&desc->sg_tx_list, | ||
293 | &mv_chan->free_slots); | ||
294 | } | ||
295 | } | ||
252 | 296 | ||
253 | return 0; | 297 | return 0; |
254 | } | 298 | } |
@@ -450,6 +494,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
450 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | 494 | dma_async_tx_descriptor_init(&slot->async_tx, chan); |
451 | slot->async_tx.tx_submit = mv_xor_tx_submit; | 495 | slot->async_tx.tx_submit = mv_xor_tx_submit; |
452 | INIT_LIST_HEAD(&slot->node); | 496 | INIT_LIST_HEAD(&slot->node); |
497 | INIT_LIST_HEAD(&slot->sg_tx_list); | ||
453 | dma_desc = mv_chan->dma_desc_pool; | 498 | dma_desc = mv_chan->dma_desc_pool; |
454 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; | 499 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; |
455 | slot->idx = idx++; | 500 | slot->idx = idx++; |
@@ -617,6 +662,132 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
617 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | 662 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); |
618 | } | 663 | } |
619 | 664 | ||
665 | /** | ||
666 | * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction | ||
667 | * @chan: DMA channel | ||
668 | * @dst_sg: Destination scatter list | ||
669 | * @dst_sg_len: Number of entries in destination scatter list | ||
670 | * @src_sg: Source scatter list | ||
671 | * @src_sg_len: Number of entries in source scatter list | ||
672 | * @flags: transfer ack flags | ||
673 | * | ||
674 | * Return: Async transaction descriptor on success and NULL on failure | ||
675 | */ | ||
676 | static struct dma_async_tx_descriptor * | ||
677 | mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg, | ||
678 | unsigned int dst_sg_len, struct scatterlist *src_sg, | ||
679 | unsigned int src_sg_len, unsigned long flags) | ||
680 | { | ||
681 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | ||
682 | struct mv_xor_desc_slot *new; | ||
683 | struct mv_xor_desc_slot *first = NULL; | ||
684 | struct mv_xor_desc_slot *prev = NULL; | ||
685 | size_t len, dst_avail, src_avail; | ||
686 | dma_addr_t dma_dst, dma_src; | ||
687 | int desc_cnt = 0; | ||
688 | int ret; | ||
689 | |||
690 | dev_dbg(mv_chan_to_devp(mv_chan), | ||
691 | "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n", | ||
692 | __func__, dst_sg_len, src_sg_len, flags); | ||
693 | |||
694 | dst_avail = sg_dma_len(dst_sg); | ||
695 | src_avail = sg_dma_len(src_sg); | ||
696 | |||
697 | /* Run until we are out of scatterlist entries */ | ||
698 | while (true) { | ||
699 | /* Allocate and populate the descriptor */ | ||
700 | desc_cnt++; | ||
701 | new = mv_chan_alloc_slot(mv_chan); | ||
702 | if (!new) { | ||
703 | dev_err(mv_chan_to_devp(mv_chan), | ||
704 | "Out of descriptors (desc_cnt=%d)!\n", | ||
705 | desc_cnt); | ||
706 | goto err; | ||
707 | } | ||
708 | |||
709 | len = min_t(size_t, src_avail, dst_avail); | ||
710 | len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT); | ||
711 | if (len == 0) | ||
712 | goto fetch; | ||
713 | |||
714 | if (len < MV_XOR_MIN_BYTE_COUNT) { | ||
715 | dev_err(mv_chan_to_devp(mv_chan), | ||
716 | "Transfer size of %zu too small!\n", len); | ||
717 | goto err; | ||
718 | } | ||
719 | |||
720 | dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - | ||
721 | dst_avail; | ||
722 | dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - | ||
723 | src_avail; | ||
724 | |||
725 | /* Check if a new window needs to get added for 'dst' */ | ||
726 | ret = mv_xor_add_io_win(mv_chan, dma_dst); | ||
727 | if (ret) | ||
728 | goto err; | ||
729 | |||
730 | /* Check if a new window needs to get added for 'src' */ | ||
731 | ret = mv_xor_add_io_win(mv_chan, dma_src); | ||
732 | if (ret) | ||
733 | goto err; | ||
734 | |||
735 | /* Populate the descriptor */ | ||
736 | mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev); | ||
737 | prev = new; | ||
738 | dst_avail -= len; | ||
739 | src_avail -= len; | ||
740 | |||
741 | if (!first) | ||
742 | first = new; | ||
743 | else | ||
744 | list_move_tail(&new->node, &first->sg_tx_list); | ||
745 | |||
746 | fetch: | ||
747 | /* Fetch the next dst scatterlist entry */ | ||
748 | if (dst_avail == 0) { | ||
749 | if (dst_sg_len == 0) | ||
750 | break; | ||
751 | |||
752 | /* Fetch the next entry: if there are no more: done */ | ||
753 | dst_sg = sg_next(dst_sg); | ||
754 | if (dst_sg == NULL) | ||
755 | break; | ||
756 | |||
757 | dst_sg_len--; | ||
758 | dst_avail = sg_dma_len(dst_sg); | ||
759 | } | ||
760 | |||
761 | /* Fetch the next src scatterlist entry */ | ||
762 | if (src_avail == 0) { | ||
763 | if (src_sg_len == 0) | ||
764 | break; | ||
765 | |||
766 | /* Fetch the next entry: if there are no more: done */ | ||
767 | src_sg = sg_next(src_sg); | ||
768 | if (src_sg == NULL) | ||
769 | break; | ||
770 | |||
771 | src_sg_len--; | ||
772 | src_avail = sg_dma_len(src_sg); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | /* Set the EOD flag in the last descriptor */ | ||
777 | mv_xor_desc_config_eod(new); | ||
778 | first->async_tx.flags = flags; | ||
779 | |||
780 | return &first->async_tx; | ||
781 | |||
782 | err: | ||
783 | /* Cleanup: Move all descriptors back into the free list */ | ||
784 | spin_lock_bh(&mv_chan->lock); | ||
785 | mv_desc_clean_slot(first, mv_chan); | ||
786 | spin_unlock_bh(&mv_chan->lock); | ||
787 | |||
788 | return NULL; | ||
789 | } | ||
790 | |||
620 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | 791 | static void mv_xor_free_chan_resources(struct dma_chan *chan) |
621 | { | 792 | { |
622 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 793 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
@@ -1083,6 +1254,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1083 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | 1254 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; |
1084 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | 1255 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1085 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | 1256 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; |
1257 | if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) | ||
1258 | dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg; | ||
1086 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 1259 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1087 | dma_dev->max_xor = 8; | 1260 | dma_dev->max_xor = 8; |
1088 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; | 1261 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
@@ -1132,10 +1305,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
1132 | goto err_free_irq; | 1305 | goto err_free_irq; |
1133 | } | 1306 | } |
1134 | 1307 | ||
1135 | dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", | 1308 | dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n", |
1136 | mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", | 1309 | mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", |
1137 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | 1310 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1138 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1311 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1312 | dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "", | ||
1139 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1313 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
1140 | 1314 | ||
1141 | dma_async_device_register(dma_dev); | 1315 | dma_async_device_register(dma_dev); |
@@ -1378,6 +1552,7 @@ static int mv_xor_probe(struct platform_device *pdev) | |||
1378 | 1552 | ||
1379 | dma_cap_zero(cap_mask); | 1553 | dma_cap_zero(cap_mask); |
1380 | dma_cap_set(DMA_MEMCPY, cap_mask); | 1554 | dma_cap_set(DMA_MEMCPY, cap_mask); |
1555 | dma_cap_set(DMA_SG, cap_mask); | ||
1381 | dma_cap_set(DMA_XOR, cap_mask); | 1556 | dma_cap_set(DMA_XOR, cap_mask); |
1382 | dma_cap_set(DMA_INTERRUPT, cap_mask); | 1557 | dma_cap_set(DMA_INTERRUPT, cap_mask); |
1383 | 1558 | ||
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 88eeab222a23..cf921dd6af73 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -148,6 +148,7 @@ struct mv_xor_chan { | |||
148 | */ | 148 | */ |
149 | struct mv_xor_desc_slot { | 149 | struct mv_xor_desc_slot { |
150 | struct list_head node; | 150 | struct list_head node; |
151 | struct list_head sg_tx_list; | ||
151 | enum dma_transaction_type type; | 152 | enum dma_transaction_type type; |
152 | void *hw_desc; | 153 | void *hw_desc; |
153 | u16 idx; | 154 | u16 idx; |