aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Cooper <jason@lakedaemon.net>2012-11-21 14:55:20 -0500
committerJason Cooper <jason@lakedaemon.net>2012-11-21 14:55:20 -0500
commitc9dc03ddbf4e71f20b78277d661388c1bc466258 (patch)
tree91cf0a398dce3c8cb484a2d8e7dc185e907e4349
parent61528f4e921f23e2a095aa11f27006b347e4ee70 (diff)
parent34c93c8657935d30649e777c4aa05f74f16aa418 (diff)
Merge tag 'marvell-xor-cleanup-dt-binding-3.8' of git://github.com/MISL-EBU-System-SW/mainline-public into mvebu/everything
Marvell XOR driver cleanup and DT binding for 3.8
-rw-r--r--Documentation/devicetree/bindings/dma/mv-xor.txt40
-rw-r--r--arch/arm/mach-dove/common.c9
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c5
-rw-r--r--arch/arm/mach-kirkwood/common.c4
-rw-r--r--arch/arm/plat-orion/common.c192
-rw-r--r--drivers/dma/mv_xor.c417
-rw-r--r--drivers/dma/mv_xor.h35
-rw-r--r--include/linux/platform_data/dma-mv_xor.h11
8 files changed, 366 insertions, 347 deletions
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt
new file mode 100644
index 000000000000..7c6cb7fcecd2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/mv-xor.txt
@@ -0,0 +1,40 @@
1* Marvell XOR engines
2
3Required properties:
4- compatible: Should be "marvell,orion-xor"
5- reg: Should contain registers location and length (two sets)
6 the first set is the low registers, the second set the high
7 registers for the XOR engine.
8- clocks: pointer to the reference clock
9
10The DT node must also contains sub-nodes for each XOR channel that the
11XOR engine has. Those sub-nodes have the following required
12properties:
13- interrupts: interrupt of the XOR channel
14
15And the following optional properties:
16- dmacap,memcpy to indicate that the XOR channel is capable of memcpy operations
17- dmacap,memset to indicate that the XOR channel is capable of memset operations
18- dmacap,xor to indicate that the XOR channel is capable of xor operations
19
20Example:
21
22xor@d0060900 {
23 compatible = "marvell,orion-xor";
24 reg = <0xd0060900 0x100
25 0xd0060b00 0x100>;
26 clocks = <&coreclk 0>;
27 status = "okay";
28
29 xor00 {
30 interrupts = <51>;
31 dmacap,memcpy;
32 dmacap,xor;
33 };
34 xor01 {
35 interrupts = <52>;
36 dmacap,memcpy;
37 dmacap,xor;
38 dmacap,memset;
39 };
40};
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 6a2c4dc413a8..f4ac5b06014b 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -33,6 +33,7 @@
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <plat/time.h> 34#include <plat/time.h>
35#include <linux/platform_data/usb-ehci-orion.h> 35#include <linux/platform_data/usb-ehci-orion.h>
36#include <linux/platform_data/dma-mv_xor.h>
36#include <plat/irq.h> 37#include <plat/irq.h>
37#include <plat/common.h> 38#include <plat/common.h>
38#include <plat/addr-map.h> 39#include <plat/addr-map.h>
@@ -124,8 +125,8 @@ static void __init dove_clk_init(void)
124 orion_clkdev_add(NULL, "mv_crypto", crypto); 125 orion_clkdev_add(NULL, "mv_crypto", crypto);
125 orion_clkdev_add(NULL, "dove-ac97", ac97); 126 orion_clkdev_add(NULL, "dove-ac97", ac97);
126 orion_clkdev_add(NULL, "dove-pdma", pdma); 127 orion_clkdev_add(NULL, "dove-pdma", pdma);
127 orion_clkdev_add(NULL, "mv_xor_shared.0", xor0); 128 orion_clkdev_add(NULL, MV_XOR_NAME ".0", xor0);
128 orion_clkdev_add(NULL, "mv_xor_shared.1", xor1); 129 orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
129} 130}
130 131
131/***************************************************************************** 132/*****************************************************************************
@@ -410,11 +411,11 @@ static void __init dove_legacy_clk_init(void)
410 of_clk_get_from_provider(&clkspec)); 411 of_clk_get_from_provider(&clkspec));
411 412
412 clkspec.args[0] = CLOCK_GATING_BIT_XOR0; 413 clkspec.args[0] = CLOCK_GATING_BIT_XOR0;
413 orion_clkdev_add(NULL, "mv_xor_shared.0", 414 orion_clkdev_add(NULL, MV_XOR_NAME ".0",
414 of_clk_get_from_provider(&clkspec)); 415 of_clk_get_from_provider(&clkspec));
415 416
416 clkspec.args[0] = CLOCK_GATING_BIT_XOR1; 417 clkspec.args[0] = CLOCK_GATING_BIT_XOR1;
417 orion_clkdev_add(NULL, "mv_xor_shared.1", 418 orion_clkdev_add(NULL, MV_XOR_NAME ".1",
418 of_clk_get_from_provider(&clkspec)); 419 of_clk_get_from_provider(&clkspec));
419} 420}
420 421
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 8bdfaa4db091..294ad5a4fd98 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -21,6 +21,7 @@
21#include <asm/mach/map.h> 21#include <asm/mach/map.h>
22#include <mach/bridge-regs.h> 22#include <mach/bridge-regs.h>
23#include <linux/platform_data/usb-ehci-orion.h> 23#include <linux/platform_data/usb-ehci-orion.h>
24#include <linux/platform_data/dma-mv_xor.h>
24#include <plat/irq.h> 25#include <plat/irq.h>
25#include <plat/common.h> 26#include <plat/common.h>
26#include "common.h" 27#include "common.h"
@@ -60,11 +61,11 @@ static void __init kirkwood_legacy_clk_init(void)
60 of_clk_get_from_provider(&clkspec)); 61 of_clk_get_from_provider(&clkspec));
61 62
62 clkspec.args[0] = CGC_BIT_XOR0; 63 clkspec.args[0] = CGC_BIT_XOR0;
63 orion_clkdev_add(NULL, "mv_xor_shared.0", 64 orion_clkdev_add(NULL, MV_XOR_NAME ".0",
64 of_clk_get_from_provider(&clkspec)); 65 of_clk_get_from_provider(&clkspec));
65 66
66 clkspec.args[0] = CGC_BIT_XOR1; 67 clkspec.args[0] = CGC_BIT_XOR1;
67 orion_clkdev_add(NULL, "mv_xor_shared.1", 68 orion_clkdev_add(NULL, MV_XOR_NAME ".1",
68 of_clk_get_from_provider(&clkspec)); 69 of_clk_get_from_provider(&clkspec));
69 70
70 clkspec.args[0] = CGC_BIT_PEX1; 71 clkspec.args[0] = CGC_BIT_PEX1;
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 2c6c218fb79e..401dac1a8d80 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -260,8 +260,8 @@ void __init kirkwood_clk_init(void)
260 orion_clkdev_add(NULL, "orion_nand", runit); 260 orion_clkdev_add(NULL, "orion_nand", runit);
261 orion_clkdev_add(NULL, "mvsdio", sdio); 261 orion_clkdev_add(NULL, "mvsdio", sdio);
262 orion_clkdev_add(NULL, "mv_crypto", crypto); 262 orion_clkdev_add(NULL, "mv_crypto", crypto);
263 orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".0", xor0); 263 orion_clkdev_add(NULL, MV_XOR_NAME ".0", xor0);
264 orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".1", xor1); 264 orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
265 orion_clkdev_add("0", "pcie", pex0); 265 orion_clkdev_add("0", "pcie", pex0);
266 orion_clkdev_add("1", "pcie", pex1); 266 orion_clkdev_add("1", "pcie", pex1);
267 orion_clkdev_add(NULL, "kirkwood-i2s", audio); 267 orion_clkdev_add(NULL, "kirkwood-i2s", audio);
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index b8a688cad4c2..2d4b6414609f 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -606,26 +606,6 @@ void __init orion_wdt_init(void)
606 ****************************************************************************/ 606 ****************************************************************************/
607static u64 orion_xor_dmamask = DMA_BIT_MASK(32); 607static u64 orion_xor_dmamask = DMA_BIT_MASK(32);
608 608
609void __init orion_xor_init_channels(
610 struct mv_xor_platform_data *orion_xor0_data,
611 struct platform_device *orion_xor0_channel,
612 struct mv_xor_platform_data *orion_xor1_data,
613 struct platform_device *orion_xor1_channel)
614{
615 /*
616 * two engines can't do memset simultaneously, this limitation
617 * satisfied by removing memset support from one of the engines.
618 */
619 dma_cap_set(DMA_MEMCPY, orion_xor0_data->cap_mask);
620 dma_cap_set(DMA_XOR, orion_xor0_data->cap_mask);
621 platform_device_register(orion_xor0_channel);
622
623 dma_cap_set(DMA_MEMCPY, orion_xor1_data->cap_mask);
624 dma_cap_set(DMA_MEMSET, orion_xor1_data->cap_mask);
625 dma_cap_set(DMA_XOR, orion_xor1_data->cap_mask);
626 platform_device_register(orion_xor1_channel);
627}
628
629/***************************************************************************** 609/*****************************************************************************
630 * XOR0 610 * XOR0
631 ****************************************************************************/ 611 ****************************************************************************/
@@ -636,61 +616,30 @@ static struct resource orion_xor0_shared_resources[] = {
636 }, { 616 }, {
637 .name = "xor 0 high", 617 .name = "xor 0 high",
638 .flags = IORESOURCE_MEM, 618 .flags = IORESOURCE_MEM,
619 }, {
620 .name = "irq channel 0",
621 .flags = IORESOURCE_IRQ,
622 }, {
623 .name = "irq channel 1",
624 .flags = IORESOURCE_IRQ,
639 }, 625 },
640}; 626};
641 627
642static struct platform_device orion_xor0_shared = { 628static struct mv_xor_channel_data orion_xor0_channels_data[2];
643 .name = MV_XOR_SHARED_NAME,
644 .id = 0,
645 .num_resources = ARRAY_SIZE(orion_xor0_shared_resources),
646 .resource = orion_xor0_shared_resources,
647};
648 629
649static struct resource orion_xor00_resources[] = { 630static struct mv_xor_platform_data orion_xor0_pdata = {
650 [0] = { 631 .channels = orion_xor0_channels_data,
651 .flags = IORESOURCE_IRQ,
652 },
653};
654
655static struct mv_xor_platform_data orion_xor00_data = {
656 .shared = &orion_xor0_shared,
657 .hw_id = 0,
658 .pool_size = PAGE_SIZE,
659}; 632};
660 633
661static struct platform_device orion_xor00_channel = { 634static struct platform_device orion_xor0_shared = {
662 .name = MV_XOR_NAME, 635 .name = MV_XOR_NAME,
663 .id = 0, 636 .id = 0,
664 .num_resources = ARRAY_SIZE(orion_xor00_resources), 637 .num_resources = ARRAY_SIZE(orion_xor0_shared_resources),
665 .resource = orion_xor00_resources, 638 .resource = orion_xor0_shared_resources,
666 .dev = { 639 .dev = {
667 .dma_mask = &orion_xor_dmamask, 640 .dma_mask = &orion_xor_dmamask,
668 .coherent_dma_mask = DMA_BIT_MASK(64), 641 .coherent_dma_mask = DMA_BIT_MASK(64),
669 .platform_data = &orion_xor00_data, 642 .platform_data = &orion_xor0_pdata,
670 },
671};
672
673static struct resource orion_xor01_resources[] = {
674 [0] = {
675 .flags = IORESOURCE_IRQ,
676 },
677};
678
679static struct mv_xor_platform_data orion_xor01_data = {
680 .shared = &orion_xor0_shared,
681 .hw_id = 1,
682 .pool_size = PAGE_SIZE,
683};
684
685static struct platform_device orion_xor01_channel = {
686 .name = MV_XOR_NAME,
687 .id = 1,
688 .num_resources = ARRAY_SIZE(orion_xor01_resources),
689 .resource = orion_xor01_resources,
690 .dev = {
691 .dma_mask = &orion_xor_dmamask,
692 .coherent_dma_mask = DMA_BIT_MASK(64),
693 .platform_data = &orion_xor01_data,
694 }, 643 },
695}; 644};
696 645
@@ -704,15 +653,23 @@ void __init orion_xor0_init(unsigned long mapbase_low,
704 orion_xor0_shared_resources[1].start = mapbase_high; 653 orion_xor0_shared_resources[1].start = mapbase_high;
705 orion_xor0_shared_resources[1].end = mapbase_high + 0xff; 654 orion_xor0_shared_resources[1].end = mapbase_high + 0xff;
706 655
707 orion_xor00_resources[0].start = irq_0; 656 orion_xor0_shared_resources[2].start = irq_0;
708 orion_xor00_resources[0].end = irq_0; 657 orion_xor0_shared_resources[2].end = irq_0;
709 orion_xor01_resources[0].start = irq_1; 658 orion_xor0_shared_resources[3].start = irq_1;
710 orion_xor01_resources[0].end = irq_1; 659 orion_xor0_shared_resources[3].end = irq_1;
711 660
712 platform_device_register(&orion_xor0_shared); 661 /*
662 * two engines can't do memset simultaneously, this limitation
663 * satisfied by removing memset support from one of the engines.
664 */
665 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask);
666 dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask);
667
668 dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask);
669 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask);
670 dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask);
713 671
714 orion_xor_init_channels(&orion_xor00_data, &orion_xor00_channel, 672 platform_device_register(&orion_xor0_shared);
715 &orion_xor01_data, &orion_xor01_channel);
716} 673}
717 674
718/***************************************************************************** 675/*****************************************************************************
@@ -725,61 +682,30 @@ static struct resource orion_xor1_shared_resources[] = {
725 }, { 682 }, {
726 .name = "xor 1 high", 683 .name = "xor 1 high",
727 .flags = IORESOURCE_MEM, 684 .flags = IORESOURCE_MEM,
685 }, {
686 .name = "irq channel 0",
687 .flags = IORESOURCE_IRQ,
688 }, {
689 .name = "irq channel 1",
690 .flags = IORESOURCE_IRQ,
728 }, 691 },
729}; 692};
730 693
731static struct platform_device orion_xor1_shared = { 694static struct mv_xor_channel_data orion_xor1_channels_data[2];
732 .name = MV_XOR_SHARED_NAME,
733 .id = 1,
734 .num_resources = ARRAY_SIZE(orion_xor1_shared_resources),
735 .resource = orion_xor1_shared_resources,
736};
737
738static struct resource orion_xor10_resources[] = {
739 [0] = {
740 .flags = IORESOURCE_IRQ,
741 },
742};
743
744static struct mv_xor_platform_data orion_xor10_data = {
745 .shared = &orion_xor1_shared,
746 .hw_id = 0,
747 .pool_size = PAGE_SIZE,
748};
749
750static struct platform_device orion_xor10_channel = {
751 .name = MV_XOR_NAME,
752 .id = 2,
753 .num_resources = ARRAY_SIZE(orion_xor10_resources),
754 .resource = orion_xor10_resources,
755 .dev = {
756 .dma_mask = &orion_xor_dmamask,
757 .coherent_dma_mask = DMA_BIT_MASK(64),
758 .platform_data = &orion_xor10_data,
759 },
760};
761
762static struct resource orion_xor11_resources[] = {
763 [0] = {
764 .flags = IORESOURCE_IRQ,
765 },
766};
767 695
768static struct mv_xor_platform_data orion_xor11_data = { 696static struct mv_xor_platform_data orion_xor1_pdata = {
769 .shared = &orion_xor1_shared, 697 .channels = orion_xor1_channels_data,
770 .hw_id = 1,
771 .pool_size = PAGE_SIZE,
772}; 698};
773 699
774static struct platform_device orion_xor11_channel = { 700static struct platform_device orion_xor1_shared = {
775 .name = MV_XOR_NAME, 701 .name = MV_XOR_NAME,
776 .id = 3, 702 .id = 1,
777 .num_resources = ARRAY_SIZE(orion_xor11_resources), 703 .num_resources = ARRAY_SIZE(orion_xor1_shared_resources),
778 .resource = orion_xor11_resources, 704 .resource = orion_xor1_shared_resources,
779 .dev = { 705 .dev = {
780 .dma_mask = &orion_xor_dmamask, 706 .dma_mask = &orion_xor_dmamask,
781 .coherent_dma_mask = DMA_BIT_MASK(64), 707 .coherent_dma_mask = DMA_BIT_MASK(64),
782 .platform_data = &orion_xor11_data, 708 .platform_data = &orion_xor1_pdata,
783 }, 709 },
784}; 710};
785 711
@@ -793,15 +719,23 @@ void __init orion_xor1_init(unsigned long mapbase_low,
793 orion_xor1_shared_resources[1].start = mapbase_high; 719 orion_xor1_shared_resources[1].start = mapbase_high;
794 orion_xor1_shared_resources[1].end = mapbase_high + 0xff; 720 orion_xor1_shared_resources[1].end = mapbase_high + 0xff;
795 721
796 orion_xor10_resources[0].start = irq_0; 722 orion_xor1_shared_resources[2].start = irq_0;
797 orion_xor10_resources[0].end = irq_0; 723 orion_xor1_shared_resources[2].end = irq_0;
798 orion_xor11_resources[0].start = irq_1; 724 orion_xor1_shared_resources[3].start = irq_1;
799 orion_xor11_resources[0].end = irq_1; 725 orion_xor1_shared_resources[3].end = irq_1;
800 726
801 platform_device_register(&orion_xor1_shared); 727 /*
728 * two engines can't do memset simultaneously, this limitation
729 * satisfied by removing memset support from one of the engines.
730 */
731 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask);
732 dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask);
802 733
803 orion_xor_init_channels(&orion_xor10_data, &orion_xor10_channel, 734 dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask);
804 &orion_xor11_data, &orion_xor11_channel); 735 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask);
736 dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask);
737
738 platform_device_register(&orion_xor1_shared);
805} 739}
806 740
807/***************************************************************************** 741/*****************************************************************************
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e362e2b80efb..f450fe8cbd61 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,9 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/memory.h> 27#include <linux/memory.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
29#include <linux/platform_data/dma-mv_xor.h> 32#include <linux/platform_data/dma-mv_xor.h>
30 33
31#include "dmaengine.h" 34#include "dmaengine.h"
@@ -34,14 +37,14 @@
34static void mv_xor_issue_pending(struct dma_chan *chan); 37static void mv_xor_issue_pending(struct dma_chan *chan);
35 38
36#define to_mv_xor_chan(chan) \ 39#define to_mv_xor_chan(chan) \
37 container_of(chan, struct mv_xor_chan, common) 40 container_of(chan, struct mv_xor_chan, dmachan)
38
39#define to_mv_xor_device(dev) \
40 container_of(dev, struct mv_xor_device, common)
41 41
42#define to_mv_xor_slot(tx) \ 42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx) 43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44 44
45#define mv_chan_to_devp(chan) \
46 ((chan)->dmadev.dev)
47
45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 48static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{ 49{
47 struct mv_xor_desc *hw_desc = desc->hw_desc; 50 struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -166,7 +169,7 @@ static int mv_is_err_intr(u32 intr_cause)
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 169static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{ 170{
168 u32 val = ~(1 << (chan->idx * 16)); 171 u32 val = ~(1 << (chan->idx * 16));
169 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 172 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
170 __raw_writel(val, XOR_INTR_CAUSE(chan)); 173 __raw_writel(val, XOR_INTR_CAUSE(chan));
171} 174}
172 175
@@ -206,9 +209,9 @@ static void mv_set_mode(struct mv_xor_chan *chan,
206 op_mode = XOR_OPERATION_MODE_MEMSET; 209 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break; 210 break;
208 default: 211 default:
209 dev_printk(KERN_ERR, chan->device->common.dev, 212 dev_err(mv_chan_to_devp(chan),
210 "error: unsupported operation %d.\n", 213 "error: unsupported operation %d.\n",
211 type); 214 type);
212 BUG(); 215 BUG();
213 return; 216 return;
214 } 217 }
@@ -223,7 +226,7 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
223{ 226{
224 u32 activation; 227 u32 activation;
225 228
226 dev_dbg(chan->device->common.dev, " activate chan.\n"); 229 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
227 activation = __raw_readl(XOR_ACTIVATION(chan)); 230 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1; 231 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan)); 232 __raw_writel(activation, XOR_ACTIVATION(chan));
@@ -251,7 +254,7 @@ static int mv_chan_xor_slot_count(size_t len, int src_cnt)
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 254static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot) 255 struct mv_xor_desc_slot *slot)
253{ 256{
254 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", 257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
255 __func__, __LINE__, slot); 258 __func__, __LINE__, slot);
256 259
257 slot->slots_per_op = 0; 260 slot->slots_per_op = 0;
@@ -266,7 +269,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 269static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc) 270 struct mv_xor_desc_slot *sw_desc)
268{ 271{
269 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", 272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
270 __func__, __LINE__, sw_desc); 273 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type) 274 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type); 275 mv_set_mode(mv_chan, sw_desc->type);
@@ -284,7 +287,7 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 287 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 } 288 }
286 mv_chan->pending += sw_desc->slot_cnt; 289 mv_chan->pending += sw_desc->slot_cnt;
287 mv_xor_issue_pending(&mv_chan->common); 290 mv_xor_issue_pending(&mv_chan->dmachan);
288} 291}
289 292
290static dma_cookie_t 293static dma_cookie_t
@@ -308,8 +311,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
308 */ 311 */
309 if (desc->group_head && desc->unmap_len) { 312 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head; 313 struct mv_xor_desc_slot *unmap = desc->group_head;
311 struct device *dev = 314 struct device *dev = mv_chan_to_devp(mv_chan);
312 &mv_chan->device->pdev->dev;
313 u32 len = unmap->unmap_len; 315 u32 len = unmap->unmap_len;
314 enum dma_ctrl_flags flags = desc->async_tx.flags; 316 enum dma_ctrl_flags flags = desc->async_tx.flags;
315 u32 src_cnt; 317 u32 src_cnt;
@@ -353,7 +355,7 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
353{ 355{
354 struct mv_xor_desc_slot *iter, *_iter; 356 struct mv_xor_desc_slot *iter, *_iter;
355 357
356 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 358 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
357 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 359 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
358 completed_node) { 360 completed_node) {
359 361
@@ -369,7 +371,7 @@ static int
369mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 371mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
370 struct mv_xor_chan *mv_chan) 372 struct mv_xor_chan *mv_chan)
371{ 373{
372 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", 374 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
373 __func__, __LINE__, desc, desc->async_tx.flags); 375 __func__, __LINE__, desc, desc->async_tx.flags);
374 list_del(&desc->chain_node); 376 list_del(&desc->chain_node);
375 /* the client is allowed to attach dependent operations 377 /* the client is allowed to attach dependent operations
@@ -393,8 +395,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
393 u32 current_desc = mv_chan_get_current_desc(mv_chan); 395 u32 current_desc = mv_chan_get_current_desc(mv_chan);
394 int seen_current = 0; 396 int seen_current = 0;
395 397
396 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 398 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
397 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); 399 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
398 mv_xor_clean_completed_slots(mv_chan); 400 mv_xor_clean_completed_slots(mv_chan);
399 401
400 /* free completed slots from the chain starting with 402 /* free completed slots from the chain starting with
@@ -438,7 +440,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
438 } 440 }
439 441
440 if (cookie > 0) 442 if (cookie > 0)
441 mv_chan->common.completed_cookie = cookie; 443 mv_chan->dmachan.completed_cookie = cookie;
442} 444}
443 445
444static void 446static void
@@ -547,7 +549,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
547 dma_cookie_t cookie; 549 dma_cookie_t cookie;
548 int new_hw_chain = 1; 550 int new_hw_chain = 1;
549 551
550 dev_dbg(mv_chan->device->common.dev, 552 dev_dbg(mv_chan_to_devp(mv_chan),
551 "%s sw_desc %p: async_tx %p\n", 553 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx); 554 __func__, sw_desc, &sw_desc->async_tx);
553 555
@@ -570,7 +572,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
570 if (!mv_can_chain(grp_start)) 572 if (!mv_can_chain(grp_start))
571 goto submit_done; 573 goto submit_done;
572 574
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", 575 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys); 576 old_chain_tail->async_tx.phys);
575 577
576 /* fix up the hardware chain */ 578 /* fix up the hardware chain */
@@ -604,9 +606,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
604 int idx; 606 int idx;
605 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 607 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
606 struct mv_xor_desc_slot *slot = NULL; 608 struct mv_xor_desc_slot *slot = NULL;
607 struct mv_xor_platform_data *plat_data = 609 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
608 mv_chan->device->pdev->dev.platform_data;
609 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
610 610
611 /* Allocate descriptor slots */ 611 /* Allocate descriptor slots */
612 idx = mv_chan->slots_allocated; 612 idx = mv_chan->slots_allocated;
@@ -617,7 +617,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
617 " %d descriptor slots", idx); 617 " %d descriptor slots", idx);
618 break; 618 break;
619 } 619 }
620 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; 620 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
622 622
623 dma_async_tx_descriptor_init(&slot->async_tx, chan); 623 dma_async_tx_descriptor_init(&slot->async_tx, chan);
@@ -625,7 +625,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
625 INIT_LIST_HEAD(&slot->chain_node); 625 INIT_LIST_HEAD(&slot->chain_node);
626 INIT_LIST_HEAD(&slot->slot_node); 626 INIT_LIST_HEAD(&slot->slot_node);
627 INIT_LIST_HEAD(&slot->tx_list); 627 INIT_LIST_HEAD(&slot->tx_list);
628 hw_desc = (char *) mv_chan->device->dma_desc_pool; 628 hw_desc = (char *) mv_chan->dma_desc_pool;
629 slot->async_tx.phys = 629 slot->async_tx.phys =
630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
631 slot->idx = idx++; 631 slot->idx = idx++;
@@ -641,7 +641,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
641 struct mv_xor_desc_slot, 641 struct mv_xor_desc_slot,
642 slot_node); 642 slot_node);
643 643
644 dev_dbg(mv_chan->device->common.dev, 644 dev_dbg(mv_chan_to_devp(mv_chan),
645 "allocated %d descriptor slots last_used: %p\n", 645 "allocated %d descriptor slots last_used: %p\n",
646 mv_chan->slots_allocated, mv_chan->last_used); 646 mv_chan->slots_allocated, mv_chan->last_used);
647 647
@@ -656,7 +656,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
656 struct mv_xor_desc_slot *sw_desc, *grp_start; 656 struct mv_xor_desc_slot *sw_desc, *grp_start;
657 int slot_cnt; 657 int slot_cnt;
658 658
659 dev_dbg(mv_chan->device->common.dev, 659 dev_dbg(mv_chan_to_devp(mv_chan),
660 "%s dest: %x src %x len: %u flags: %ld\n", 660 "%s dest: %x src %x len: %u flags: %ld\n",
661 __func__, dest, src, len, flags); 661 __func__, dest, src, len, flags);
662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -680,7 +680,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
680 } 680 }
681 spin_unlock_bh(&mv_chan->lock); 681 spin_unlock_bh(&mv_chan->lock);
682 682
683 dev_dbg(mv_chan->device->common.dev, 683 dev_dbg(mv_chan_to_devp(mv_chan),
684 "%s sw_desc %p async_tx %p\n", 684 "%s sw_desc %p async_tx %p\n",
685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); 685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
686 686
@@ -695,7 +695,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
695 struct mv_xor_desc_slot *sw_desc, *grp_start; 695 struct mv_xor_desc_slot *sw_desc, *grp_start;
696 int slot_cnt; 696 int slot_cnt;
697 697
698 dev_dbg(mv_chan->device->common.dev, 698 dev_dbg(mv_chan_to_devp(mv_chan),
699 "%s dest: %x len: %u flags: %ld\n", 699 "%s dest: %x len: %u flags: %ld\n",
700 __func__, dest, len, flags); 700 __func__, dest, len, flags);
701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -718,7 +718,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
718 sw_desc->unmap_len = len; 718 sw_desc->unmap_len = len;
719 } 719 }
720 spin_unlock_bh(&mv_chan->lock); 720 spin_unlock_bh(&mv_chan->lock);
721 dev_dbg(mv_chan->device->common.dev, 721 dev_dbg(mv_chan_to_devp(mv_chan),
722 "%s sw_desc %p async_tx %p \n", 722 "%s sw_desc %p async_tx %p \n",
723 __func__, sw_desc, &sw_desc->async_tx); 723 __func__, sw_desc, &sw_desc->async_tx);
724 return sw_desc ? &sw_desc->async_tx : NULL; 724 return sw_desc ? &sw_desc->async_tx : NULL;
@@ -737,7 +737,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
737 737
738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
739 739
740 dev_dbg(mv_chan->device->common.dev, 740 dev_dbg(mv_chan_to_devp(mv_chan),
741 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 741 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
742 __func__, src_cnt, len, dest, flags); 742 __func__, src_cnt, len, dest, flags);
743 743
@@ -758,7 +758,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); 758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
759 } 759 }
760 spin_unlock_bh(&mv_chan->lock); 760 spin_unlock_bh(&mv_chan->lock);
761 dev_dbg(mv_chan->device->common.dev, 761 dev_dbg(mv_chan_to_devp(mv_chan),
762 "%s sw_desc %p async_tx %p \n", 762 "%s sw_desc %p async_tx %p \n",
763 __func__, sw_desc, &sw_desc->async_tx); 763 __func__, sw_desc, &sw_desc->async_tx);
764 return sw_desc ? &sw_desc->async_tx : NULL; 764 return sw_desc ? &sw_desc->async_tx : NULL;
@@ -791,12 +791,12 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
791 } 791 }
792 mv_chan->last_used = NULL; 792 mv_chan->last_used = NULL;
793 793
794 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", 794 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
795 __func__, mv_chan->slots_allocated); 795 __func__, mv_chan->slots_allocated);
796 spin_unlock_bh(&mv_chan->lock); 796 spin_unlock_bh(&mv_chan->lock);
797 797
798 if (in_use_descs) 798 if (in_use_descs)
799 dev_err(mv_chan->device->common.dev, 799 dev_err(mv_chan_to_devp(mv_chan),
800 "freeing %d in use descriptors!\n", in_use_descs); 800 "freeing %d in use descriptors!\n", in_use_descs);
801} 801}
802 802
@@ -828,42 +828,42 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
828 u32 val; 828 u32 val;
829 829
830 val = __raw_readl(XOR_CONFIG(chan)); 830 val = __raw_readl(XOR_CONFIG(chan));
831 dev_printk(KERN_ERR, chan->device->common.dev, 831 dev_err(mv_chan_to_devp(chan),
832 "config 0x%08x.\n", val); 832 "config 0x%08x.\n", val);
833 833
834 val = __raw_readl(XOR_ACTIVATION(chan)); 834 val = __raw_readl(XOR_ACTIVATION(chan));
835 dev_printk(KERN_ERR, chan->device->common.dev, 835 dev_err(mv_chan_to_devp(chan),
836 "activation 0x%08x.\n", val); 836 "activation 0x%08x.\n", val);
837 837
838 val = __raw_readl(XOR_INTR_CAUSE(chan)); 838 val = __raw_readl(XOR_INTR_CAUSE(chan));
839 dev_printk(KERN_ERR, chan->device->common.dev, 839 dev_err(mv_chan_to_devp(chan),
840 "intr cause 0x%08x.\n", val); 840 "intr cause 0x%08x.\n", val);
841 841
842 val = __raw_readl(XOR_INTR_MASK(chan)); 842 val = __raw_readl(XOR_INTR_MASK(chan));
843 dev_printk(KERN_ERR, chan->device->common.dev, 843 dev_err(mv_chan_to_devp(chan),
844 "intr mask 0x%08x.\n", val); 844 "intr mask 0x%08x.\n", val);
845 845
846 val = __raw_readl(XOR_ERROR_CAUSE(chan)); 846 val = __raw_readl(XOR_ERROR_CAUSE(chan));
847 dev_printk(KERN_ERR, chan->device->common.dev, 847 dev_err(mv_chan_to_devp(chan),
848 "error cause 0x%08x.\n", val); 848 "error cause 0x%08x.\n", val);
849 849
850 val = __raw_readl(XOR_ERROR_ADDR(chan)); 850 val = __raw_readl(XOR_ERROR_ADDR(chan));
851 dev_printk(KERN_ERR, chan->device->common.dev, 851 dev_err(mv_chan_to_devp(chan),
852 "error addr 0x%08x.\n", val); 852 "error addr 0x%08x.\n", val);
853} 853}
854 854
855static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 855static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
856 u32 intr_cause) 856 u32 intr_cause)
857{ 857{
858 if (intr_cause & (1 << 4)) { 858 if (intr_cause & (1 << 4)) {
859 dev_dbg(chan->device->common.dev, 859 dev_dbg(mv_chan_to_devp(chan),
860 "ignore this error\n"); 860 "ignore this error\n");
861 return; 861 return;
862 } 862 }
863 863
864 dev_printk(KERN_ERR, chan->device->common.dev, 864 dev_err(mv_chan_to_devp(chan),
865 "error on chan %d. intr cause 0x%08x.\n", 865 "error on chan %d. intr cause 0x%08x.\n",
866 chan->idx, intr_cause); 866 chan->idx, intr_cause);
867 867
868 mv_dump_xor_regs(chan); 868 mv_dump_xor_regs(chan);
869 BUG(); 869 BUG();
@@ -874,7 +874,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
874 struct mv_xor_chan *chan = data; 874 struct mv_xor_chan *chan = data;
875 u32 intr_cause = mv_chan_get_intr_cause(chan); 875 u32 intr_cause = mv_chan_get_intr_cause(chan);
876 876
877 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); 877 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
878 878
879 if (mv_is_err_intr(intr_cause)) 879 if (mv_is_err_intr(intr_cause))
880 mv_xor_err_interrupt_handler(chan, intr_cause); 880 mv_xor_err_interrupt_handler(chan, intr_cause);
@@ -901,7 +901,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
901 */ 901 */
902#define MV_XOR_TEST_SIZE 2000 902#define MV_XOR_TEST_SIZE 2000
903 903
904static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) 904static int __devinit mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
905{ 905{
906 int i; 906 int i;
907 void *src, *dest; 907 void *src, *dest;
@@ -910,7 +910,6 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
910 dma_cookie_t cookie; 910 dma_cookie_t cookie;
911 struct dma_async_tx_descriptor *tx; 911 struct dma_async_tx_descriptor *tx;
912 int err = 0; 912 int err = 0;
913 struct mv_xor_chan *mv_chan;
914 913
915 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 914 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
916 if (!src) 915 if (!src)
@@ -926,10 +925,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
926 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 925 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
927 ((u8 *) src)[i] = (u8)i; 926 ((u8 *) src)[i] = (u8)i;
928 927
929 /* Start copy, using first DMA channel */ 928 dma_chan = &mv_chan->dmachan;
930 dma_chan = container_of(device->common.channels.next,
931 struct dma_chan,
932 device_node);
933 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 929 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
934 err = -ENODEV; 930 err = -ENODEV;
935 goto out; 931 goto out;
@@ -950,18 +946,17 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
950 946
951 if (mv_xor_status(dma_chan, cookie, NULL) != 947 if (mv_xor_status(dma_chan, cookie, NULL) !=
952 DMA_SUCCESS) { 948 DMA_SUCCESS) {
953 dev_printk(KERN_ERR, dma_chan->device->dev, 949 dev_err(dma_chan->device->dev,
954 "Self-test copy timed out, disabling\n"); 950 "Self-test copy timed out, disabling\n");
955 err = -ENODEV; 951 err = -ENODEV;
956 goto free_resources; 952 goto free_resources;
957 } 953 }
958 954
959 mv_chan = to_mv_xor_chan(dma_chan); 955 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
960 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
961 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 956 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
962 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 957 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
963 dev_printk(KERN_ERR, dma_chan->device->dev, 958 dev_err(dma_chan->device->dev,
964 "Self-test copy failed compare, disabling\n"); 959 "Self-test copy failed compare, disabling\n");
965 err = -ENODEV; 960 err = -ENODEV;
966 goto free_resources; 961 goto free_resources;
967 } 962 }
@@ -976,7 +971,7 @@ out:
976 971
977#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 972#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
978static int __devinit 973static int __devinit
979mv_xor_xor_self_test(struct mv_xor_device *device) 974mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
980{ 975{
981 int i, src_idx; 976 int i, src_idx;
982 struct page *dest; 977 struct page *dest;
@@ -989,7 +984,6 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
989 u8 cmp_byte = 0; 984 u8 cmp_byte = 0;
990 u32 cmp_word; 985 u32 cmp_word;
991 int err = 0; 986 int err = 0;
992 struct mv_xor_chan *mv_chan;
993 987
994 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 988 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
995 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 989 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1022,9 +1016,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1022 1016
1023 memset(page_address(dest), 0, PAGE_SIZE); 1017 memset(page_address(dest), 0, PAGE_SIZE);
1024 1018
1025 dma_chan = container_of(device->common.channels.next, 1019 dma_chan = &mv_chan->dmachan;
1026 struct dma_chan,
1027 device_node);
1028 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 1020 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1029 err = -ENODEV; 1021 err = -ENODEV;
1030 goto out; 1022 goto out;
@@ -1048,22 +1040,21 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1048 1040
1049 if (mv_xor_status(dma_chan, cookie, NULL) != 1041 if (mv_xor_status(dma_chan, cookie, NULL) !=
1050 DMA_SUCCESS) { 1042 DMA_SUCCESS) {
1051 dev_printk(KERN_ERR, dma_chan->device->dev, 1043 dev_err(dma_chan->device->dev,
1052 "Self-test xor timed out, disabling\n"); 1044 "Self-test xor timed out, disabling\n");
1053 err = -ENODEV; 1045 err = -ENODEV;
1054 goto free_resources; 1046 goto free_resources;
1055 } 1047 }
1056 1048
1057 mv_chan = to_mv_xor_chan(dma_chan); 1049 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1058 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1059 PAGE_SIZE, DMA_FROM_DEVICE); 1050 PAGE_SIZE, DMA_FROM_DEVICE);
1060 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1051 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1061 u32 *ptr = page_address(dest); 1052 u32 *ptr = page_address(dest);
1062 if (ptr[i] != cmp_word) { 1053 if (ptr[i] != cmp_word) {
1063 dev_printk(KERN_ERR, dma_chan->device->dev, 1054 dev_err(dma_chan->device->dev,
1064 "Self-test xor failed compare, disabling." 1055 "Self-test xor failed compare, disabling."
1065 " index %d, data %x, expected %x\n", i, 1056 " index %d, data %x, expected %x\n", i,
1066 ptr[i], cmp_word); 1057 ptr[i], cmp_word);
1067 err = -ENODEV; 1058 err = -ENODEV;
1068 goto free_resources; 1059 goto free_resources;
1069 } 1060 }
@@ -1079,62 +1070,66 @@ out:
1079 return err; 1070 return err;
1080} 1071}
1081 1072
1082static int __devexit mv_xor_remove(struct platform_device *dev) 1073/* This driver does not implement any of the optional DMA operations. */
1074static int
1075mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1076 unsigned long arg)
1077{
1078 return -ENOSYS;
1079}
1080
1081static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1083{ 1082{
1084 struct mv_xor_device *device = platform_get_drvdata(dev);
1085 struct dma_chan *chan, *_chan; 1083 struct dma_chan *chan, *_chan;
1086 struct mv_xor_chan *mv_chan; 1084 struct device *dev = mv_chan->dmadev.dev;
1087 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1088 1085
1089 dma_async_device_unregister(&device->common); 1086 dma_async_device_unregister(&mv_chan->dmadev);
1090 1087
1091 dma_free_coherent(&dev->dev, plat_data->pool_size, 1088 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1092 device->dma_desc_pool_virt, device->dma_desc_pool); 1089 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1093 1090
1094 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1091 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1095 device_node) { 1092 device_node) {
1096 mv_chan = to_mv_xor_chan(chan);
1097 list_del(&chan->device_node); 1093 list_del(&chan->device_node);
1098 } 1094 }
1099 1095
1096 free_irq(mv_chan->irq, mv_chan);
1097
1100 return 0; 1098 return 0;
1101} 1099}
1102 1100
1103static int __devinit mv_xor_probe(struct platform_device *pdev) 1101static struct mv_xor_chan *
1102mv_xor_channel_add(struct mv_xor_device *xordev,
1103 struct platform_device *pdev,
1104 int idx, dma_cap_mask_t cap_mask, int irq)
1104{ 1105{
1105 int ret = 0; 1106 int ret = 0;
1106 int irq;
1107 struct mv_xor_device *adev;
1108 struct mv_xor_chan *mv_chan; 1107 struct mv_xor_chan *mv_chan;
1109 struct dma_device *dma_dev; 1108 struct dma_device *dma_dev;
1110 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1111 1109
1110 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1111 if (!mv_chan) {
1112 ret = -ENOMEM;
1113 goto err_free_dma;
1114 }
1112 1115
1113 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 1116 mv_chan->idx = idx;
1114 if (!adev) 1117 mv_chan->irq = irq;
1115 return -ENOMEM;
1116 1118
1117 dma_dev = &adev->common; 1119 dma_dev = &mv_chan->dmadev;
1118 1120
1119 /* allocate coherent memory for hardware descriptors 1121 /* allocate coherent memory for hardware descriptors
1120 * note: writecombine gives slightly better performance, but 1122 * note: writecombine gives slightly better performance, but
1121 * requires that we explicitly flush the writes 1123 * requires that we explicitly flush the writes
1122 */ 1124 */
1123 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1125 mv_chan->dma_desc_pool_virt =
1124 plat_data->pool_size, 1126 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1125 &adev->dma_desc_pool, 1127 &mv_chan->dma_desc_pool, GFP_KERNEL);
1126 GFP_KERNEL); 1128 if (!mv_chan->dma_desc_pool_virt)
1127 if (!adev->dma_desc_pool_virt) 1129 return ERR_PTR(-ENOMEM);
1128 return -ENOMEM;
1129
1130 adev->id = plat_data->hw_id;
1131 1130
1132 /* discover transaction capabilites from the platform data */ 1131 /* discover transaction capabilites from the platform data */
1133 dma_dev->cap_mask = plat_data->cap_mask; 1132 dma_dev->cap_mask = cap_mask;
1134 adev->pdev = pdev;
1135 platform_set_drvdata(pdev, adev);
1136
1137 adev->shared = platform_get_drvdata(plat_data->shared);
1138 1133
1139 INIT_LIST_HEAD(&dma_dev->channels); 1134 INIT_LIST_HEAD(&dma_dev->channels);
1140 1135
@@ -1143,6 +1138,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1143 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1138 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1144 dma_dev->device_tx_status = mv_xor_status; 1139 dma_dev->device_tx_status = mv_xor_status;
1145 dma_dev->device_issue_pending = mv_xor_issue_pending; 1140 dma_dev->device_issue_pending = mv_xor_issue_pending;
1141 dma_dev->device_control = mv_xor_control;
1146 dma_dev->dev = &pdev->dev; 1142 dma_dev->dev = &pdev->dev;
1147 1143
1148 /* set prep routines based on capability */ 1144 /* set prep routines based on capability */
@@ -1155,15 +1151,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1155 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1151 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1156 } 1152 }
1157 1153
1158 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1154 mv_chan->mmr_base = xordev->xor_base;
1159 if (!mv_chan) {
1160 ret = -ENOMEM;
1161 goto err_free_dma;
1162 }
1163 mv_chan->device = adev;
1164 mv_chan->idx = plat_data->hw_id;
1165 mv_chan->mmr_base = adev->shared->xor_base;
1166
1167 if (!mv_chan->mmr_base) { 1155 if (!mv_chan->mmr_base) {
1168 ret = -ENOMEM; 1156 ret = -ENOMEM;
1169 goto err_free_dma; 1157 goto err_free_dma;
@@ -1174,12 +1162,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1174 /* clear errors before enabling interrupts */ 1162 /* clear errors before enabling interrupts */
1175 mv_xor_device_clear_err_status(mv_chan); 1163 mv_xor_device_clear_err_status(mv_chan);
1176 1164
1177 irq = platform_get_irq(pdev, 0); 1165 ret = devm_request_irq(&pdev->dev, mv_chan->irq,
1178 if (irq < 0) {
1179 ret = irq;
1180 goto err_free_dma;
1181 }
1182 ret = devm_request_irq(&pdev->dev, irq,
1183 mv_xor_interrupt_handler, 1166 mv_xor_interrupt_handler,
1184 0, dev_name(&pdev->dev), mv_chan); 1167 0, dev_name(&pdev->dev), mv_chan);
1185 if (ret) 1168 if (ret)
@@ -1193,26 +1176,26 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1193 INIT_LIST_HEAD(&mv_chan->chain); 1176 INIT_LIST_HEAD(&mv_chan->chain);
1194 INIT_LIST_HEAD(&mv_chan->completed_slots); 1177 INIT_LIST_HEAD(&mv_chan->completed_slots);
1195 INIT_LIST_HEAD(&mv_chan->all_slots); 1178 INIT_LIST_HEAD(&mv_chan->all_slots);
1196 mv_chan->common.device = dma_dev; 1179 mv_chan->dmachan.device = dma_dev;
1197 dma_cookie_init(&mv_chan->common); 1180 dma_cookie_init(&mv_chan->dmachan);
1198 1181
1199 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1182 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1200 1183
1201 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1184 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1202 ret = mv_xor_memcpy_self_test(adev); 1185 ret = mv_xor_memcpy_self_test(mv_chan);
1203 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1186 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1204 if (ret) 1187 if (ret)
1205 goto err_free_dma; 1188 goto err_free_dma;
1206 } 1189 }
1207 1190
1208 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1191 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1209 ret = mv_xor_xor_self_test(adev); 1192 ret = mv_xor_xor_self_test(mv_chan);
1210 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1193 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1211 if (ret) 1194 if (ret)
1212 goto err_free_dma; 1195 goto err_free_dma;
1213 } 1196 }
1214 1197
1215 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " 1198 dev_info(&pdev->dev, "Marvell XOR: "
1216 "( %s%s%s%s)\n", 1199 "( %s%s%s%s)\n",
1217 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1200 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1218 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1201 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
@@ -1220,20 +1203,19 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1220 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1203 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1221 1204
1222 dma_async_device_register(dma_dev); 1205 dma_async_device_register(dma_dev);
1223 goto out; 1206 return mv_chan;
1224 1207
1225 err_free_dma: 1208 err_free_dma:
1226 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1209 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1227 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1210 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1228 out: 1211 return ERR_PTR(ret);
1229 return ret;
1230} 1212}
1231 1213
1232static void 1214static void
1233mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, 1215mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1234 const struct mbus_dram_target_info *dram) 1216 const struct mbus_dram_target_info *dram)
1235{ 1217{
1236 void __iomem *base = msp->xor_base; 1218 void __iomem *base = xordev->xor_base;
1237 u32 win_enable = 0; 1219 u32 win_enable = 0;
1238 int i; 1220 int i;
1239 1221
@@ -1260,97 +1242,171 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1260 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1242 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1261} 1243}
1262 1244
1263static struct platform_driver mv_xor_driver = { 1245static int __devinit mv_xor_probe(struct platform_device *pdev)
1264 .probe = mv_xor_probe,
1265 .remove = __devexit_p(mv_xor_remove),
1266 .driver = {
1267 .owner = THIS_MODULE,
1268 .name = MV_XOR_NAME,
1269 },
1270};
1271
1272static int mv_xor_shared_probe(struct platform_device *pdev)
1273{ 1246{
1274 const struct mbus_dram_target_info *dram; 1247 const struct mbus_dram_target_info *dram;
1275 struct mv_xor_shared_private *msp; 1248 struct mv_xor_device *xordev;
1249 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
1276 struct resource *res; 1250 struct resource *res;
1251 int i, ret;
1277 1252
1278 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); 1253 dev_notice(&pdev->dev, "Marvell XOR driver\n");
1279 1254
1280 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); 1255 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1281 if (!msp) 1256 if (!xordev)
1282 return -ENOMEM; 1257 return -ENOMEM;
1283 1258
1284 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1259 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1285 if (!res) 1260 if (!res)
1286 return -ENODEV; 1261 return -ENODEV;
1287 1262
1288 msp->xor_base = devm_ioremap(&pdev->dev, res->start, 1263 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1289 resource_size(res)); 1264 resource_size(res));
1290 if (!msp->xor_base) 1265 if (!xordev->xor_base)
1291 return -EBUSY; 1266 return -EBUSY;
1292 1267
1293 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1268 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1294 if (!res) 1269 if (!res)
1295 return -ENODEV; 1270 return -ENODEV;
1296 1271
1297 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1272 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1298 resource_size(res)); 1273 resource_size(res));
1299 if (!msp->xor_high_base) 1274 if (!xordev->xor_high_base)
1300 return -EBUSY; 1275 return -EBUSY;
1301 1276
1302 platform_set_drvdata(pdev, msp); 1277 platform_set_drvdata(pdev, xordev);
1303 1278
1304 /* 1279 /*
1305 * (Re-)program MBUS remapping windows if we are asked to. 1280 * (Re-)program MBUS remapping windows if we are asked to.
1306 */ 1281 */
1307 dram = mv_mbus_dram_info(); 1282 dram = mv_mbus_dram_info();
1308 if (dram) 1283 if (dram)
1309 mv_xor_conf_mbus_windows(msp, dram); 1284 mv_xor_conf_mbus_windows(xordev, dram);
1310 1285
1311 /* Not all platforms can gate the clock, so it is not 1286 /* Not all platforms can gate the clock, so it is not
1312 * an error if the clock does not exists. 1287 * an error if the clock does not exists.
1313 */ 1288 */
1314 msp->clk = clk_get(&pdev->dev, NULL); 1289 xordev->clk = clk_get(&pdev->dev, NULL);
1315 if (!IS_ERR(msp->clk)) 1290 if (!IS_ERR(xordev->clk))
1316 clk_prepare_enable(msp->clk); 1291 clk_prepare_enable(xordev->clk);
1292
1293 if (pdev->dev.of_node) {
1294 struct device_node *np;
1295 int i = 0;
1296
1297 for_each_child_of_node(pdev->dev.of_node, np) {
1298 dma_cap_mask_t cap_mask;
1299 int irq;
1300
1301 dma_cap_zero(cap_mask);
1302 if (of_property_read_bool(np, "dmacap,memcpy"))
1303 dma_cap_set(DMA_MEMCPY, cap_mask);
1304 if (of_property_read_bool(np, "dmacap,xor"))
1305 dma_cap_set(DMA_XOR, cap_mask);
1306 if (of_property_read_bool(np, "dmacap,memset"))
1307 dma_cap_set(DMA_MEMSET, cap_mask);
1308 if (of_property_read_bool(np, "dmacap,interrupt"))
1309 dma_cap_set(DMA_INTERRUPT, cap_mask);
1310
1311 irq = irq_of_parse_and_map(np, 0);
1312 if (irq < 0) {
1313 ret = irq;
1314 goto err_channel_add;
1315 }
1316
1317 xordev->channels[i] =
1318 mv_xor_channel_add(xordev, pdev, i,
1319 cap_mask, irq);
1320 if (IS_ERR(xordev->channels[i])) {
1321 ret = PTR_ERR(xordev->channels[i]);
1322 irq_dispose_mapping(irq);
1323 goto err_channel_add;
1324 }
1325
1326 i++;
1327 }
1328 } else if (pdata && pdata->channels) {
1329 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1330 struct mv_xor_channel_data *cd;
1331 int irq;
1332
1333 cd = &pdata->channels[i];
1334 if (!cd) {
1335 ret = -ENODEV;
1336 goto err_channel_add;
1337 }
1338
1339 irq = platform_get_irq(pdev, i);
1340 if (irq < 0) {
1341 ret = irq;
1342 goto err_channel_add;
1343 }
1344
1345 xordev->channels[i] =
1346 mv_xor_channel_add(xordev, pdev, i,
1347 cd->cap_mask, irq);
1348 if (IS_ERR(xordev->channels[i])) {
1349 ret = PTR_ERR(xordev->channels[i]);
1350 goto err_channel_add;
1351 }
1352 }
1353 }
1317 1354
1318 return 0; 1355 return 0;
1356
1357err_channel_add:
1358 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1359 if (xordev->channels[i]) {
1360 if (pdev->dev.of_node)
1361 irq_dispose_mapping(xordev->channels[i]->irq);
1362 mv_xor_channel_remove(xordev->channels[i]);
1363 }
1364
1365 clk_disable_unprepare(xordev->clk);
1366 clk_put(xordev->clk);
1367 return ret;
1319} 1368}
1320 1369
1321static int mv_xor_shared_remove(struct platform_device *pdev) 1370static int __devexit mv_xor_remove(struct platform_device *pdev)
1322{ 1371{
1323 struct mv_xor_shared_private *msp = platform_get_drvdata(pdev); 1372 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1373 int i;
1374
1375 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1376 if (xordev->channels[i])
1377 mv_xor_channel_remove(xordev->channels[i]);
1378 }
1324 1379
1325 if (!IS_ERR(msp->clk)) { 1380 if (!IS_ERR(xordev->clk)) {
1326 clk_disable_unprepare(msp->clk); 1381 clk_disable_unprepare(xordev->clk);
1327 clk_put(msp->clk); 1382 clk_put(xordev->clk);
1328 } 1383 }
1329 1384
1330 return 0; 1385 return 0;
1331} 1386}
1332 1387
1333static struct platform_driver mv_xor_shared_driver = { 1388#ifdef CONFIG_OF
1334 .probe = mv_xor_shared_probe, 1389static struct of_device_id mv_xor_dt_ids[] __devinitdata = {
1335 .remove = mv_xor_shared_remove, 1390 { .compatible = "marvell,orion-xor", },
1391 {},
1392};
1393MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1394#endif
1395
1396static struct platform_driver mv_xor_driver = {
1397 .probe = mv_xor_probe,
1398 .remove = __devexit_p(mv_xor_remove),
1336 .driver = { 1399 .driver = {
1337 .owner = THIS_MODULE, 1400 .owner = THIS_MODULE,
1338 .name = MV_XOR_SHARED_NAME, 1401 .name = MV_XOR_NAME,
1402 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1339 }, 1403 },
1340}; 1404};
1341 1405
1342 1406
1343static int __init mv_xor_init(void) 1407static int __init mv_xor_init(void)
1344{ 1408{
1345 int rc; 1409 return platform_driver_register(&mv_xor_driver);
1346
1347 rc = platform_driver_register(&mv_xor_shared_driver);
1348 if (!rc) {
1349 rc = platform_driver_register(&mv_xor_driver);
1350 if (rc)
1351 platform_driver_unregister(&mv_xor_shared_driver);
1352 }
1353 return rc;
1354} 1410}
1355module_init(mv_xor_init); 1411module_init(mv_xor_init);
1356 1412
@@ -1359,7 +1415,6 @@ module_init(mv_xor_init);
1359static void __exit mv_xor_exit(void) 1415static void __exit mv_xor_exit(void)
1360{ 1416{
1361 platform_driver_unregister(&mv_xor_driver); 1417 platform_driver_unregister(&mv_xor_driver);
1362 platform_driver_unregister(&mv_xor_shared_driver);
1363 return; 1418 return;
1364} 1419}
1365 1420
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index a5b422f5a8ab..17043287b71a 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -24,8 +24,10 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#define USE_TIMER 26#define USE_TIMER
27#define MV_XOR_POOL_SIZE PAGE_SIZE
27#define MV_XOR_SLOT_SIZE 64 28#define MV_XOR_SLOT_SIZE 64
28#define MV_XOR_THRESHOLD 1 29#define MV_XOR_THRESHOLD 1
30#define MV_XOR_MAX_CHANNELS 2
29 31
30#define XOR_OPERATION_MODE_XOR 0 32#define XOR_OPERATION_MODE_XOR 0
31#define XOR_OPERATION_MODE_MEMCPY 2 33#define XOR_OPERATION_MODE_MEMCPY 2
@@ -52,28 +54,11 @@
52#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) 54#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
53#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) 55#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
54 56
55struct mv_xor_shared_private {
56 void __iomem *xor_base;
57 void __iomem *xor_high_base;
58 struct clk *clk;
59};
60
61
62/**
63 * struct mv_xor_device - internal representation of a XOR device
64 * @pdev: Platform device
65 * @id: HW XOR Device selector
66 * @dma_desc_pool: base of DMA descriptor region (DMA address)
67 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
68 * @common: embedded struct dma_device
69 */
70struct mv_xor_device { 57struct mv_xor_device {
71 struct platform_device *pdev; 58 void __iomem *xor_base;
72 int id; 59 void __iomem *xor_high_base;
73 dma_addr_t dma_desc_pool; 60 struct clk *clk;
74 void *dma_desc_pool_virt; 61 struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
75 struct dma_device common;
76 struct mv_xor_shared_private *shared;
77}; 62};
78 63
79/** 64/**
@@ -96,11 +81,15 @@ struct mv_xor_chan {
96 spinlock_t lock; /* protects the descriptor slot pool */ 81 spinlock_t lock; /* protects the descriptor slot pool */
97 void __iomem *mmr_base; 82 void __iomem *mmr_base;
98 unsigned int idx; 83 unsigned int idx;
84 int irq;
99 enum dma_transaction_type current_type; 85 enum dma_transaction_type current_type;
100 struct list_head chain; 86 struct list_head chain;
101 struct list_head completed_slots; 87 struct list_head completed_slots;
102 struct mv_xor_device *device; 88 dma_addr_t dma_desc_pool;
103 struct dma_chan common; 89 void *dma_desc_pool_virt;
90 size_t pool_size;
91 struct dma_device dmadev;
92 struct dma_chan dmachan;
104 struct mv_xor_desc_slot *last_used; 93 struct mv_xor_desc_slot *last_used;
105 struct list_head all_slots; 94 struct list_head all_slots;
106 int slots_allocated; 95 int slots_allocated;
diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h
index 2ba1f7d76eef..8ec18f64e396 100644
--- a/include/linux/platform_data/dma-mv_xor.h
+++ b/include/linux/platform_data/dma-mv_xor.h
@@ -10,15 +10,14 @@
10#include <linux/dmaengine.h> 10#include <linux/dmaengine.h>
11#include <linux/mbus.h> 11#include <linux/mbus.h>
12 12
13#define MV_XOR_SHARED_NAME "mv_xor_shared" 13#define MV_XOR_NAME "mv_xor"
14#define MV_XOR_NAME "mv_xor"
15 14
16struct mv_xor_platform_data { 15struct mv_xor_channel_data {
17 struct platform_device *shared;
18 int hw_id;
19 dma_cap_mask_t cap_mask; 16 dma_cap_mask_t cap_mask;
20 size_t pool_size;
21}; 17};
22 18
19struct mv_xor_platform_data {
20 struct mv_xor_channel_data *channels;
21};
23 22
24#endif 23#endif