diff options
author | Stefan Roese <sr@denx.de> | 2016-09-15 01:37:31 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-09-15 11:43:59 -0400 |
commit | 77ff7a706f014a56d38f07acf220f381a8fe0fd8 (patch) | |
tree | 060c52eedaba57d63912fc10e89bdefbfcb8ded1 | |
parent | 434cec62a6d73b8c8080cd992bc97a564fdd5a5a (diff) |
dmaengine: mv_xor: Add support for IO (PCIe) src/dst areas
To enable the access to a specific area, the MVEBU XOR controllers needs
to have this area enabled / mapped via an address window. Right now,
only the DRAM memory area is enabled via such memory windows. So
using this driver to DMA to / from a e.g. PCIe memory region is
currently not supported.
This patch now adds support for such PCIe / IO regions by checking
if the src / dst address is located in an IO memory area in contrast
to being located in DRAM. This is done by using the newly introduced
MBus function mvebu_mbus_get_io_win_info(). If the src / dst address
is located in such an IO area, a new address window is created in
the XOR DMA controller. Enabling the controller to access this area.
Signed-off-by: Stefan Roese <sr@denx.de>
Cc: Gregory CLEMENT <gregory.clement@free-electrons.com>
Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Cc: Marcin Wojtas <mw@semihalf.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Andrew Lunn <andrew@lunn.ch>
Cc: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/mv_xor.c | 95 | ||||
-rw-r--r-- | drivers/dma/mv_xor.h | 7 |
2 files changed, 101 insertions, 1 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index f4c9f98ec35e..ff4a094cd582 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
@@ -470,12 +470,90 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) | |||
470 | return mv_chan->slots_allocated ? : -ENOMEM; | 470 | return mv_chan->slots_allocated ? : -ENOMEM; |
471 | } | 471 | } |
472 | 472 | ||
473 | /* | ||
474 | * Check if source or destination is an PCIe/IO address (non-SDRAM) and add | ||
475 | * a new MBus window if necessary. Use a cache for these check so that | ||
476 | * the MMIO mapped registers don't have to be accessed for this check | ||
477 | * to speed up this process. | ||
478 | */ | ||
479 | static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr) | ||
480 | { | ||
481 | struct mv_xor_device *xordev = mv_chan->xordev; | ||
482 | void __iomem *base = mv_chan->mmr_high_base; | ||
483 | u32 win_enable; | ||
484 | u32 size; | ||
485 | u8 target, attr; | ||
486 | int ret; | ||
487 | int i; | ||
488 | |||
489 | /* Nothing needs to get done for the Armada 3700 */ | ||
490 | if (xordev->xor_type == XOR_ARMADA_37XX) | ||
491 | return 0; | ||
492 | |||
493 | /* | ||
494 | * Loop over the cached windows to check, if the requested area | ||
495 | * is already mapped. If this the case, nothing needs to be done | ||
496 | * and we can return. | ||
497 | */ | ||
498 | for (i = 0; i < WINDOW_COUNT; i++) { | ||
499 | if (addr >= xordev->win_start[i] && | ||
500 | addr <= xordev->win_end[i]) { | ||
501 | /* Window is already mapped */ | ||
502 | return 0; | ||
503 | } | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * The window is not mapped, so we need to create the new mapping | ||
508 | */ | ||
509 | |||
510 | /* If no IO window is found that addr has to be located in SDRAM */ | ||
511 | ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr); | ||
512 | if (ret < 0) | ||
513 | return 0; | ||
514 | |||
515 | /* | ||
516 | * Mask the base addr 'addr' according to 'size' read back from the | ||
517 | * MBus window. Otherwise we might end up with an address located | ||
518 | * somewhere in the middle of this area here. | ||
519 | */ | ||
520 | size -= 1; | ||
521 | addr &= ~size; | ||
522 | |||
523 | /* | ||
524 | * Reading one of both enabled register is enough, as they are always | ||
525 | * programmed to the identical values | ||
526 | */ | ||
527 | win_enable = readl(base + WINDOW_BAR_ENABLE(0)); | ||
528 | |||
529 | /* Set 'i' to the first free window to write the new values to */ | ||
530 | i = ffs(~win_enable) - 1; | ||
531 | if (i >= WINDOW_COUNT) | ||
532 | return -ENOMEM; | ||
533 | |||
534 | writel((addr & 0xffff0000) | (attr << 8) | target, | ||
535 | base + WINDOW_BASE(i)); | ||
536 | writel(size & 0xffff0000, base + WINDOW_SIZE(i)); | ||
537 | |||
538 | /* Fill the caching variables for later use */ | ||
539 | xordev->win_start[i] = addr; | ||
540 | xordev->win_end[i] = addr + size; | ||
541 | |||
542 | win_enable |= (1 << i); | ||
543 | win_enable |= 3 << (16 + (2 * i)); | ||
544 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | ||
545 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | |||
473 | static struct dma_async_tx_descriptor * | 550 | static struct dma_async_tx_descriptor * |
474 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 551 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
475 | unsigned int src_cnt, size_t len, unsigned long flags) | 552 | unsigned int src_cnt, size_t len, unsigned long flags) |
476 | { | 553 | { |
477 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | 554 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); |
478 | struct mv_xor_desc_slot *sw_desc; | 555 | struct mv_xor_desc_slot *sw_desc; |
556 | int ret; | ||
479 | 557 | ||
480 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | 558 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
481 | return NULL; | 559 | return NULL; |
@@ -486,6 +564,11 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
486 | "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", | 564 | "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", |
487 | __func__, src_cnt, len, &dest, flags); | 565 | __func__, src_cnt, len, &dest, flags); |
488 | 566 | ||
567 | /* Check if a new window needs to get added for 'dest' */ | ||
568 | ret = mv_xor_add_io_win(mv_chan, dest); | ||
569 | if (ret) | ||
570 | return NULL; | ||
571 | |||
489 | sw_desc = mv_chan_alloc_slot(mv_chan); | 572 | sw_desc = mv_chan_alloc_slot(mv_chan); |
490 | if (sw_desc) { | 573 | if (sw_desc) { |
491 | sw_desc->type = DMA_XOR; | 574 | sw_desc->type = DMA_XOR; |
@@ -493,8 +576,13 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
493 | mv_desc_init(sw_desc, dest, len, flags); | 576 | mv_desc_init(sw_desc, dest, len, flags); |
494 | if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) | 577 | if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) |
495 | mv_desc_set_mode(sw_desc); | 578 | mv_desc_set_mode(sw_desc); |
496 | while (src_cnt--) | 579 | while (src_cnt--) { |
580 | /* Check if a new window needs to get added for 'src' */ | ||
581 | ret = mv_xor_add_io_win(mv_chan, src[src_cnt]); | ||
582 | if (ret) | ||
583 | return NULL; | ||
497 | mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); | 584 | mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); |
585 | } | ||
498 | } | 586 | } |
499 | 587 | ||
500 | dev_dbg(mv_chan_to_devp(mv_chan), | 588 | dev_dbg(mv_chan_to_devp(mv_chan), |
@@ -959,6 +1047,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
959 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; | 1047 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; |
960 | 1048 | ||
961 | dma_dev = &mv_chan->dmadev; | 1049 | dma_dev = &mv_chan->dmadev; |
1050 | mv_chan->xordev = xordev; | ||
962 | 1051 | ||
963 | /* | 1052 | /* |
964 | * These source and destination dummy buffers are used to implement | 1053 | * These source and destination dummy buffers are used to implement |
@@ -1086,6 +1175,10 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, | |||
1086 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | 1175 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); |
1087 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | 1176 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); |
1088 | 1177 | ||
1178 | /* Fill the caching variables for later use */ | ||
1179 | xordev->win_start[i] = cs->base; | ||
1180 | xordev->win_end[i] = cs->base + cs->size - 1; | ||
1181 | |||
1089 | win_enable |= (1 << i); | 1182 | win_enable |= (1 << i); |
1090 | win_enable |= 3 << (16 + (2 * i)); | 1183 | win_enable |= 3 << (16 + (2 * i)); |
1091 | } | 1184 | } |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index bf56e082e7cd..88eeab222a23 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
@@ -80,12 +80,17 @@ | |||
80 | #define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) | 80 | #define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) |
81 | #define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) | 81 | #define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) |
82 | 82 | ||
83 | #define WINDOW_COUNT 8 | ||
84 | |||
83 | struct mv_xor_device { | 85 | struct mv_xor_device { |
84 | void __iomem *xor_base; | 86 | void __iomem *xor_base; |
85 | void __iomem *xor_high_base; | 87 | void __iomem *xor_high_base; |
86 | struct clk *clk; | 88 | struct clk *clk; |
87 | struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; | 89 | struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; |
88 | int xor_type; | 90 | int xor_type; |
91 | |||
92 | u32 win_start[WINDOW_COUNT]; | ||
93 | u32 win_end[WINDOW_COUNT]; | ||
89 | }; | 94 | }; |
90 | 95 | ||
91 | /** | 96 | /** |
@@ -127,6 +132,8 @@ struct mv_xor_chan { | |||
127 | char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; | 132 | char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; |
128 | dma_addr_t dummy_src_addr, dummy_dst_addr; | 133 | dma_addr_t dummy_src_addr, dummy_dst_addr; |
129 | u32 saved_config_reg, saved_int_mask_reg; | 134 | u32 saved_config_reg, saved_int_mask_reg; |
135 | |||
136 | struct mv_xor_device *xordev; | ||
130 | }; | 137 | }; |
131 | 138 | ||
132 | /** | 139 | /** |