aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2012-11-30 12:01:49 -0500
committerOlof Johansson <olof@lixom.net>2012-11-30 12:02:53 -0500
commit0c0029cb1806601430692d48c130a17302a18225 (patch)
tree2481cc582a77b3ce7651c3ab2478886941bea621
parent9489e9dcae718d5fde988e4a684a0f55b5f94d17 (diff)
parent56580bb422e5f542da19c057f348dd39634138e7 (diff)
Merge tag 'mvebu_everything_for_3.8' of git://git.infradead.org/users/jcooper/linux into late/mvebu
From Jason Cooper. Unfortunately this is a combined branch with all mvebu code as one drop, something we normally try to avoid and instead slice vendor topics across our branches. Hopefully we can avoid doing this again for 3.9! mvebu everything for v3.8 - due to the complex interdependencies of the received pull requests I decided to keep this in one branch the way they recommended merging it - this was their first attempt at doing pull requests, we'll work on it with them - added SMP support for mvebu SoCs - added coherency fabric - added mdio and mvneta drivers - added mirabox board - added openblocks ax3-4 board - clock fixes and improvements - converted mv_xor driver to devicetree (extensive series in itself) * tag 'mvebu_everything_for_3.8' of git://git.infradead.org/users/jcooper/linux: (85 commits) dma: mv_xor: fix error handling path dma: mv_xor: fix error checking of irq_of_parse_and_map() dma: mv_xor: use request_irq() instead of devm_request_irq() dma: mv_xor: clear the window override control registers arm: mvebu: fix address decoding armada_cfg_base() function ARM: mvebu: update defconfig with I2C and RTC support ARM: mvebu: Add SATA support for OpenBlocks AX3-4 ARM: mvebu: Add support for the RTC in OpenBlocks AX3-4 ARM: mvebu: Add support for I2C on OpenBlocks AX3-4 ARM: mvebu: Add support for I2C controllers in Armada 370/XP arm: mvebu: Add hardware I/O Coherency support arm: plat-orion: Add coherency attribute when setup mbus target arm: dma mapping: Export a dma ops function arm_dma_set_mask arm: mvebu: Add SMP support for Armada XP arm: mm: Add support for PJ4B cpu and init routines arm: mvebu: Add IPI support via doorbells arm: mvebu: Add initial support for power managmement service unit arm: mvebu: Add support for coherency fabric in mach-mvebu arm: mvebu: update defconfig to include XOR driver arm: mvebu: update defconfig to include network driver ... Signed-off-by: Olof Johansson <olof@lixom.net>
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt20
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/coherency-fabric.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-core-clock.txt47
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt119
-rw-r--r--Documentation/devicetree/bindings/dma/mv-xor.txt40
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt23
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-mdio.txt35
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/Makefile4
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts25
-rw-r--r--arch/arm/boot/dts/armada-370-mirabox.dts56
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi63
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi51
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts44
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78230.dtsi12
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi19
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78460.dtsi34
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts125
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi84
-rw-r--r--arch/arm/boot/dts/dove.dtsi62
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi62
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/mvebu_defconfig17
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/mach-dove/Kconfig2
-rw-r--r--arch/arm/mach-dove/common.c62
-rw-r--r--arch/arm/mach-kirkwood/Kconfig2
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c67
-rw-r--r--arch/arm/mach-kirkwood/common.c4
-rw-r--r--arch/arm/mach-mvebu/Kconfig7
-rw-r--r--arch/arm/mach-mvebu/Makefile4
-rw-r--r--arch/arm/mach-mvebu/addr-map.c5
-rw-r--r--arch/arm/mach-mvebu/armada-370-xp.c33
-rw-r--r--arch/arm/mach-mvebu/armada-370-xp.h7
-rw-r--r--arch/arm/mach-mvebu/coherency.c155
-rw-r--r--arch/arm/mach-mvebu/coherency.h24
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S49
-rw-r--r--arch/arm/mach-mvebu/common.h5
-rw-r--r--arch/arm/mach-mvebu/headsmp.S49
-rw-r--r--arch/arm/mach-mvebu/hotplug.c30
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c92
-rw-r--r--arch/arm/mach-mvebu/platsmp.c122
-rw-r--r--arch/arm/mach-mvebu/pmsu.c75
-rw-r--r--arch/arm/mach-mvebu/pmsu.h16
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm/mm/proc-v7.S67
-rw-r--r--arch/arm/plat-orion/addr-map.c4
-rw-r--r--arch/arm/plat-orion/common.c192
-rw-r--r--arch/arm/plat-orion/include/plat/addr-map.h1
-rw-r--r--arch/arm/plat-orion/include/plat/common.h1
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/mvebu/Kconfig8
-rw-r--r--drivers/clk/mvebu/Makefile3
-rw-r--r--drivers/clk/mvebu/clk-core.c675
-rw-r--r--drivers/clk/mvebu/clk-core.h18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c186
-rw-r--r--drivers/clk/mvebu/clk-cpu.h22
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.c249
-rw-r--r--drivers/clk/mvebu/clk-gating-ctrl.h22
-rw-r--r--drivers/clk/mvebu/clk.c27
-rw-r--r--drivers/clocksource/time-armada-370-xp.c11
-rw-r--r--drivers/dma/mv_xor.c429
-rw-r--r--drivers/dma/mv_xor.h36
-rw-r--r--drivers/net/ethernet/marvell/Kconfig24
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c228
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2848
-rw-r--r--include/linux/clk/mvebu.h22
-rw-r--r--include/linux/platform_data/dma-mv_xor.h11
75 files changed, 6503 insertions, 412 deletions
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
index 70c0dc5f00ed..61df564c0d23 100644
--- a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
@@ -6,9 +6,15 @@ Required properties:
6- interrupt-controller: Identifies the node as an interrupt controller. 6- interrupt-controller: Identifies the node as an interrupt controller.
7- #interrupt-cells: The number of cells to define the interrupts. Should be 1. 7- #interrupt-cells: The number of cells to define the interrupts. Should be 1.
8 The cell is the IRQ number 8 The cell is the IRQ number
9
9- reg: Should contain PMIC registers location and length. First pair 10- reg: Should contain PMIC registers location and length. First pair
10 for the main interrupt registers, second pair for the per-CPU 11 for the main interrupt registers, second pair for the per-CPU
11 interrupt registers 12 interrupt registers. For this last pair, to be compliant with SMP
13 support, the "virtual" must be use (For the record, these registers
14 automatically map to the interrupt controller registers of the
15 current CPU)
16
17
12 18
13Example: 19Example:
14 20
@@ -18,6 +24,6 @@ Example:
18 #address-cells = <1>; 24 #address-cells = <1>;
19 #size-cells = <1>; 25 #size-cells = <1>;
20 interrupt-controller; 26 interrupt-controller;
21 reg = <0xd0020000 0x1000>, 27 reg = <0xd0020a00 0x1d0>,
22 <0xd0021000 0x1000>; 28 <0xd0021070 0x58>;
23 }; 29 };
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt
new file mode 100644
index 000000000000..926b4d6aae7e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt
@@ -0,0 +1,20 @@
1Power Management Service Unit(PMSU)
2-----------------------------------
3Available on Marvell SOCs: Armada 370 and Armada XP
4
5Required properties:
6
7- compatible: "marvell,armada-370-xp-pmsu"
8
9- reg: Should contain PMSU registers location and length. First pair
10 for the per-CPU SW Reset Control registers, second pair for the
11 Power Management Service Unit.
12
13Example:
14
15armada-370-xp-pmsu@d0022000 {
16 compatible = "marvell,armada-370-xp-pmsu";
17 reg = <0xd0022100 0x430>,
18 <0xd0020800 0x20>;
19};
20
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
index 8b6ea2267c94..64830118b013 100644
--- a/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
@@ -5,6 +5,7 @@ Required properties:
5- compatible: Should be "marvell,armada-370-xp-timer" 5- compatible: Should be "marvell,armada-370-xp-timer"
6- interrupts: Should contain the list of Global Timer interrupts 6- interrupts: Should contain the list of Global Timer interrupts
7- reg: Should contain the base address of the Global Timer registers 7- reg: Should contain the base address of the Global Timer registers
8- clocks: clock driving the timer hardware
8 9
9Optional properties: 10Optional properties:
10- marvell,timer-25Mhz: Tells whether the Global timer supports the 25 11- marvell,timer-25Mhz: Tells whether the Global timer supports the 25
diff --git a/Documentation/devicetree/bindings/arm/coherency-fabric.txt b/Documentation/devicetree/bindings/arm/coherency-fabric.txt
new file mode 100644
index 000000000000..17d8cd107559
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/coherency-fabric.txt
@@ -0,0 +1,21 @@
1Coherency fabric
2----------------
3Available on Marvell SOCs: Armada 370 and Armada XP
4
5Required properties:
6
7- compatible: "marvell,coherency-fabric"
8
9- reg: Should contain coherency fabric registers location and
10 length. First pair for the coherency fabric registers, second pair
11 for the per-CPU fabric registers registers.
12
13Example:
14
15coherency-fabric@d0020200 {
16 compatible = "marvell,coherency-fabric";
17 reg = <0xd0020200 0xb0>,
18 <0xd0021810 0x1c>;
19
20};
21
diff --git a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
new file mode 100644
index 000000000000..1e662948661e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
@@ -0,0 +1,47 @@
1* Core Clock bindings for Marvell MVEBU SoCs
2
3Marvell MVEBU SoCs usually allow to determine core clock frequencies by
4reading the Sample-At-Reset (SAR) register. The core clock consumer should
5specify the desired clock by having the clock ID in its "clocks" phandle cell.
6
7The following is a list of provided IDs and clock names on Armada 370/XP:
8 0 = tclk (Internal Bus clock)
9 1 = cpuclk (CPU clock)
10 2 = nbclk (L2 Cache clock)
11 3 = hclk (DRAM control clock)
12 4 = dramclk (DDR clock)
13
14The following is a list of provided IDs and clock names on Kirkwood and Dove:
15 0 = tclk (Internal Bus clock)
16 1 = cpuclk (CPU0 clock)
17 2 = l2clk (L2 Cache clock derived from CPU0 clock)
18 3 = ddrclk (DDR controller clock derived from CPU0 clock)
19
20Required properties:
21- compatible : shall be one of the following:
22 "marvell,armada-370-core-clock" - For Armada 370 SoC core clocks
23 "marvell,armada-xp-core-clock" - For Armada XP SoC core clocks
24 "marvell,dove-core-clock" - for Dove SoC core clocks
25 "marvell,kirkwood-core-clock" - for Kirkwood SoC (except mv88f6180)
26 "marvell,mv88f6180-core-clock" - for Kirkwood MV88f6180 SoC
27- reg : shall be the register address of the Sample-At-Reset (SAR) register
28- #clock-cells : from common clock binding; shall be set to 1
29
30Optional properties:
31- clock-output-names : from common clock binding; allows overwrite default clock
32 output names ("tclk", "cpuclk", "l2clk", "ddrclk")
33
34Example:
35
36core_clk: core-clocks@d0214 {
37 compatible = "marvell,dove-core-clock";
38 reg = <0xd0214 0x4>;
39 #clock-cells = <1>;
40};
41
42spi0: spi@10600 {
43 compatible = "marvell,orion-spi";
44 /* ... */
45 /* get tclk from core clock provider */
46 clocks = <&core_clk 0>;
47};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
new file mode 100644
index 000000000000..feb830130714
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
@@ -0,0 +1,21 @@
1Device Tree Clock bindings for cpu clock of Marvell EBU platforms
2
3Required properties:
4- compatible : shall be one of the following:
5 "marvell,armada-xp-cpu-clock" - cpu clocks for Armada XP
6- reg : Address and length of the clock complex register set
7- #clock-cells : should be set to 1.
8- clocks : shall be the input parent clock phandle for the clock.
9
10cpuclk: clock-complex@d0018700 {
11 #clock-cells = <1>;
12 compatible = "marvell,armada-xp-cpu-clock";
13 reg = <0xd0018700 0xA0>;
14 clocks = <&coreclk 1>;
15}
16
17cpu@0 {
18 compatible = "marvell,sheeva-v7";
19 reg = <0>;
20 clocks = <&cpuclk 0>;
21};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
new file mode 100644
index 000000000000..7337005ef5e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
@@ -0,0 +1,119 @@
1* Gated Clock bindings for Marvell Orion SoCs
2
3Marvell Dove and Kirkwood allow some peripheral clocks to be gated to save
4some power. The clock consumer should specify the desired clock by having
5the clock ID in its "clocks" phandle cell. The clock ID is directly mapped to
6the corresponding clock gating control bit in HW to ease manual clock lookup
7in datasheet.
8
9The following is a list of provided IDs for Armada 370:
10ID Clock Peripheral
11-----------------------------------
120 Audio AC97 Cntrl
131 pex0_en PCIe 0 Clock out
142 pex1_en PCIe 1 Clock out
153 ge1 Gigabit Ethernet 1
164 ge0 Gigabit Ethernet 0
175 pex0 PCIe Cntrl 0
189 pex1 PCIe Cntrl 1
1915 sata0 SATA Host 0
2017 sdio SDHCI Host
2125 tdm Time Division Mplx
2228 ddr DDR Cntrl
2330 sata1 SATA Host 0
24
25The following is a list of provided IDs for Armada XP:
26ID Clock Peripheral
27-----------------------------------
280 audio Audio Cntrl
291 ge3 Gigabit Ethernet 3
302 ge2 Gigabit Ethernet 2
313 ge1 Gigabit Ethernet 1
324 ge0 Gigabit Ethernet 0
335 pex0 PCIe Cntrl 0
346 pex1 PCIe Cntrl 1
357 pex2 PCIe Cntrl 2
368 pex3 PCIe Cntrl 3
3713 bp
3814 sata0lnk
3915 sata0 SATA Host 0
4016 lcd LCD Cntrl
4117 sdio SDHCI Host
4218 usb0 USB Host 0
4319 usb1 USB Host 1
4420 usb2 USB Host 2
4522 xor0 XOR DMA 0
4623 crypto CESA engine
4725 tdm Time Division Mplx
4828 xor1 XOR DMA 1
4929 sata1lnk
5030 sata1 SATA Host 0
51
52The following is a list of provided IDs for Dove:
53ID Clock Peripheral
54-----------------------------------
550 usb0 USB Host 0
561 usb1 USB Host 1
572 ge Gigabit Ethernet
583 sata SATA Host
594 pex0 PCIe Cntrl 0
605 pex1 PCIe Cntrl 1
618 sdio0 SDHCI Host 0
629 sdio1 SDHCI Host 1
6310 nand NAND Cntrl
6411 camera Camera Cntrl
6512 i2s0 I2S Cntrl 0
6613 i2s1 I2S Cntrl 1
6715 crypto CESA engine
6821 ac97 AC97 Cntrl
6922 pdma Peripheral DMA
7023 xor0 XOR DMA 0
7124 xor1 XOR DMA 1
7230 gephy Gigabit Ethernel PHY
73Note: gephy(30) is implemented as a parent clock of ge(2)
74
75The following is a list of provided IDs for Kirkwood:
76ID Clock Peripheral
77-----------------------------------
780 ge0 Gigabit Ethernet 0
792 pex0 PCIe Cntrl 0
803 usb0 USB Host 0
814 sdio SDIO Cntrl
825 tsu Transp. Stream Unit
836 dunit SDRAM Cntrl
847 runit Runit
858 xor0 XOR DMA 0
869 audio I2S Cntrl 0
8714 sata0 SATA Host 0
8815 sata1 SATA Host 1
8916 xor1 XOR DMA 1
9017 crypto CESA engine
9118 pex1 PCIe Cntrl 1
9219 ge1 Gigabit Ethernet 0
9320 tdm Time Division Mplx
94
95Required properties:
96- compatible : shall be one of the following:
97 "marvell,dove-gating-clock" - for Dove SoC clock gating
98 "marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating
99- reg : shall be the register address of the Clock Gating Control register
100- #clock-cells : from common clock binding; shall be set to 1
101
102Optional properties:
103- clocks : default parent clock phandle (e.g. tclk)
104
105Example:
106
107gate_clk: clock-gating-control@d0038 {
108 compatible = "marvell,dove-gating-clock";
109 reg = <0xd0038 0x4>;
110 /* default parent clock is tclk */
111 clocks = <&core_clk 0>;
112 #clock-cells = <1>;
113};
114
115sdio0: sdio@92000 {
116 compatible = "marvell,dove-sdhci";
117 /* get clk gate bit 8 (sdio0) */
118 clocks = <&gate_clk 8>;
119};
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt
new file mode 100644
index 000000000000..7c6cb7fcecd2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/mv-xor.txt
@@ -0,0 +1,40 @@
1* Marvell XOR engines
2
3Required properties:
4- compatible: Should be "marvell,orion-xor"
5- reg: Should contain registers location and length (two sets)
6 the first set is the low registers, the second set the high
7 registers for the XOR engine.
8- clocks: pointer to the reference clock
9
10The DT node must also contains sub-nodes for each XOR channel that the
11XOR engine has. Those sub-nodes have the following required
12properties:
13- interrupts: interrupt of the XOR channel
14
15And the following optional properties:
16- dmacap,memcpy to indicate that the XOR channel is capable of memcpy operations
17- dmacap,memset to indicate that the XOR channel is capable of memset operations
18- dmacap,xor to indicate that the XOR channel is capable of xor operations
19
20Example:
21
22xor@d0060900 {
23 compatible = "marvell,orion-xor";
24 reg = <0xd0060900 0x100
25 0xd0060b00 0x100>;
26 clocks = <&coreclk 0>;
27 status = "okay";
28
29 xor00 {
30 interrupts = <51>;
31 dmacap,memcpy;
32 dmacap,xor;
33 };
34 xor01 {
35 interrupts = <52>;
36 dmacap,memcpy;
37 dmacap,xor;
38 dmacap,memset;
39 };
40};
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
new file mode 100644
index 000000000000..859a6fa7569c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -0,0 +1,23 @@
1* Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
2
3Required properties:
4- compatible: should be "marvell,armada-370-neta".
5- reg: address and length of the register set for the device.
6- interrupts: interrupt for the device
7- phy: A phandle to a phy node defining the PHY address (as the reg
8 property, a single integer).
9- phy-mode: The interface between the SoC and the PHY (a string that
10 of_get_phy_mode() can understand)
11- clocks: a pointer to the reference clock for this device.
12
13Example:
14
15ethernet@d0070000 {
16 compatible = "marvell,armada-370-neta";
17 reg = <0xd0070000 0x2500>;
18 interrupts = <8>;
19 clocks = <&gate_clk 4>;
20 status = "okay";
21 phy = <&phy0>;
22 phy-mode = "rgmii-id";
23};
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
new file mode 100644
index 000000000000..34e7aafa321c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
@@ -0,0 +1,35 @@
1* Marvell MDIO Ethernet Controller interface
2
3The Ethernet controllers of the Marvel Kirkwood, Dove, Orion5x,
4MV78xx0, Armada 370 and Armada XP have an identical unit that provides
5an interface with the MDIO bus. This driver handles this MDIO
6interface.
7
8Required properties:
9- compatible: "marvell,orion-mdio"
10- reg: address and length of the SMI register
11
12The child nodes of the MDIO driver are the individual PHY devices
13connected to this MDIO bus. They must have a "reg" property given the
14PHY address on the MDIO bus.
15
16Example at the SoC level:
17
18mdio {
19 #address-cells = <1>;
20 #size-cells = <0>;
21 compatible = "marvell,orion-mdio";
22 reg = <0xd0072004 0x4>;
23};
24
25And at the board level:
26
27mdio {
28 phy0: ethernet-phy@0 {
29 reg = <0>;
30 };
31
32 phy1: ethernet-phy@1 {
33 reg = <1>;
34 };
35}
diff --git a/MAINTAINERS b/MAINTAINERS
index 9386a63ea8f6..65e26d844a7e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4757,6 +4757,12 @@ S: Maintained
4757F: drivers/net/ethernet/marvell/mv643xx_eth.* 4757F: drivers/net/ethernet/marvell/mv643xx_eth.*
4758F: include/linux/mv643xx.h 4758F: include/linux/mv643xx.h
4759 4759
4760MARVELL MVNETA ETHERNET DRIVER
4761M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
4762L: netdev@vger.kernel.org
4763S: Maintained
4764F: drivers/net/ethernet/marvell/mvneta.*
4765
4760MARVELL MWIFIEX WIRELESS DRIVER 4766MARVELL MWIFIEX WIRELESS DRIVER
4761M: Bing Zhao <bzhao@marvell.com> 4767M: Bing Zhao <bzhao@marvell.com>
4762L: linux-wireless@vger.kernel.org 4768L: linux-wireless@vger.kernel.org
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ade7e924bef5..05900997af90 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -533,6 +533,7 @@ config ARCH_IXP4XX
533config ARCH_DOVE 533config ARCH_DOVE
534 bool "Marvell Dove" 534 bool "Marvell Dove"
535 select ARCH_REQUIRE_GPIOLIB 535 select ARCH_REQUIRE_GPIOLIB
536 select COMMON_CLK_DOVE
536 select CPU_V7 537 select CPU_V7
537 select GENERIC_CLOCKEVENTS 538 select GENERIC_CLOCKEVENTS
538 select MIGHT_HAVE_PCI 539 select MIGHT_HAVE_PCI
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index f37cf9fa5fa0..32d1d404924a 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -44,7 +44,9 @@ dtb-$(CONFIG_ARCH_KIRKWOOD) += kirkwood-dns320.dtb \
44dtb-$(CONFIG_ARCH_MSM) += msm8660-surf.dtb \ 44dtb-$(CONFIG_ARCH_MSM) += msm8660-surf.dtb \
45 msm8960-cdp.dtb 45 msm8960-cdp.dtb
46dtb-$(CONFIG_ARCH_MVEBU) += armada-370-db.dtb \ 46dtb-$(CONFIG_ARCH_MVEBU) += armada-370-db.dtb \
47 armada-xp-db.dtb 47 armada-370-mirabox.dtb \
48 armada-xp-db.dtb \
49 armada-xp-openblocks-ax3-4.dtb
48dtb-$(CONFIG_ARCH_MXC) += imx51-babbage.dtb \ 50dtb-$(CONFIG_ARCH_MXC) += imx51-babbage.dtb \
49 imx53-ard.dtb \ 51 imx53-ard.dtb \
50 imx53-evk.dtb \ 52 imx53-evk.dtb \
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index fffd5c2a3041..00044026ef1f 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -34,9 +34,30 @@
34 clock-frequency = <200000000>; 34 clock-frequency = <200000000>;
35 status = "okay"; 35 status = "okay";
36 }; 36 };
37 timer@d0020300 { 37 sata@d00a0000 {
38 clock-frequency = <600000000>; 38 nr-ports = <2>;
39 status = "okay"; 39 status = "okay";
40 }; 40 };
41
42 mdio {
43 phy0: ethernet-phy@0 {
44 reg = <0>;
45 };
46
47 phy1: ethernet-phy@1 {
48 reg = <1>;
49 };
50 };
51
52 ethernet@d0070000 {
53 status = "okay";
54 phy = <&phy0>;
55 phy-mode = "rgmii-id";
56 };
57 ethernet@d0074000 {
58 status = "okay";
59 phy = <&phy1>;
60 phy-mode = "rgmii-id";
61 };
41 }; 62 };
42}; 63};
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
new file mode 100644
index 000000000000..3b4071336599
--- /dev/null
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -0,0 +1,56 @@
1/*
2 * Device Tree file for Globalscale Mirabox
3 *
4 * Gregory CLEMENT <gregory.clement@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11/dts-v1/;
12/include/ "armada-370.dtsi"
13
14/ {
15 model = "Globalscale Mirabox";
16 compatible = "globalscale,mirabox", "marvell,armada370", "marvell,armada-370-xp";
17
18 chosen {
19 bootargs = "console=ttyS0,115200 earlyprintk";
20 };
21
22 memory {
23 device_type = "memory";
24 reg = <0x00000000 0x20000000>; /* 512 MB */
25 };
26
27 soc {
28 serial@d0012000 {
29 clock-frequency = <200000000>;
30 status = "okay";
31 };
32 timer@d0020300 {
33 clock-frequency = <600000000>;
34 status = "okay";
35 };
36 mdio {
37 phy0: ethernet-phy@0 {
38 reg = <0>;
39 };
40
41 phy1: ethernet-phy@1 {
42 reg = <1>;
43 };
44 };
45 ethernet@d0070000 {
46 status = "okay";
47 phy = <&phy0>;
48 phy-mode = "rgmii-id";
49 };
50 ethernet@d0074000 {
51 status = "okay";
52 phy = <&phy1>;
53 phy-mode = "rgmii-id";
54 };
55 };
56};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 16cc82cdaa81..cf6c48a09eac 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -20,7 +20,7 @@
20 20
21/ { 21/ {
22 model = "Marvell Armada 370 and XP SoC"; 22 model = "Marvell Armada 370 and XP SoC";
23 compatible = "marvell,armada_370_xp"; 23 compatible = "marvell,armada-370-xp";
24 24
25 cpus { 25 cpus {
26 cpu@0 { 26 cpu@0 {
@@ -36,6 +36,12 @@
36 interrupt-controller; 36 interrupt-controller;
37 }; 37 };
38 38
39 coherency-fabric@d0020200 {
40 compatible = "marvell,coherency-fabric";
41 reg = <0xd0020200 0xb0>,
42 <0xd0021810 0x1c>;
43 };
44
39 soc { 45 soc {
40 #address-cells = <1>; 46 #address-cells = <1>;
41 #size-cells = <1>; 47 #size-cells = <1>;
@@ -62,12 +68,67 @@
62 compatible = "marvell,armada-370-xp-timer"; 68 compatible = "marvell,armada-370-xp-timer";
63 reg = <0xd0020300 0x30>; 69 reg = <0xd0020300 0x30>;
64 interrupts = <37>, <38>, <39>, <40>; 70 interrupts = <37>, <38>, <39>, <40>;
71 clocks = <&coreclk 2>;
65 }; 72 };
66 73
67 addr-decoding@d0020000 { 74 addr-decoding@d0020000 {
68 compatible = "marvell,armada-addr-decoding-controller"; 75 compatible = "marvell,armada-addr-decoding-controller";
69 reg = <0xd0020000 0x258>; 76 reg = <0xd0020000 0x258>;
70 }; 77 };
78
79 sata@d00a0000 {
80 compatible = "marvell,orion-sata";
81 reg = <0xd00a0000 0x2400>;
82 interrupts = <55>;
83 clocks = <&gateclk 15>, <&gateclk 30>;
84 clock-names = "0", "1";
85 status = "disabled";
86 };
87
88 mdio {
89 #address-cells = <1>;
90 #size-cells = <0>;
91 compatible = "marvell,orion-mdio";
92 reg = <0xd0072004 0x4>;
93 };
94
95 ethernet@d0070000 {
96 compatible = "marvell,armada-370-neta";
97 reg = <0xd0070000 0x2500>;
98 interrupts = <8>;
99 clocks = <&gateclk 4>;
100 status = "disabled";
101 };
102
103 ethernet@d0074000 {
104 compatible = "marvell,armada-370-neta";
105 reg = <0xd0074000 0x2500>;
106 interrupts = <10>;
107 clocks = <&gateclk 3>;
108 status = "disabled";
109 };
110
111 i2c0: i2c@d0011000 {
112 compatible = "marvell,mv64xxx-i2c";
113 reg = <0xd0011000 0x20>;
114 #address-cells = <1>;
115 #size-cells = <0>;
116 interrupts = <31>;
117 timeout-ms = <1000>;
118 clocks = <&coreclk 0>;
119 status = "disabled";
120 };
121
122 i2c1: i2c@d0011100 {
123 compatible = "marvell,mv64xxx-i2c";
124 reg = <0xd0011100 0x20>;
125 #address-cells = <1>;
126 #size-cells = <0>;
127 interrupts = <32>;
128 timeout-ms = <1000>;
129 clocks = <&coreclk 0>;
130 status = "disabled";
131 };
71 }; 132 };
72}; 133};
73 134
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index 2069151afe01..7fbac28b01f3 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -75,5 +75,56 @@
75 #interrupts-cells = <2>; 75 #interrupts-cells = <2>;
76 interrupts = <91>; 76 interrupts = <91>;
77 }; 77 };
78
79 coreclk: mvebu-sar@d0018230 {
80 compatible = "marvell,armada-370-core-clock";
81 reg = <0xd0018230 0x08>;
82 #clock-cells = <1>;
83 };
84
85 gateclk: clock-gating-control@d0018220 {
86 compatible = "marvell,armada-370-gating-clock";
87 reg = <0xd0018220 0x4>;
88 clocks = <&coreclk 0>;
89 #clock-cells = <1>;
90 };
91
92 xor@d0060800 {
93 compatible = "marvell,orion-xor";
94 reg = <0xd0060800 0x100
95 0xd0060A00 0x100>;
96 status = "okay";
97
98 xor00 {
99 interrupts = <51>;
100 dmacap,memcpy;
101 dmacap,xor;
102 };
103 xor01 {
104 interrupts = <52>;
105 dmacap,memcpy;
106 dmacap,xor;
107 dmacap,memset;
108 };
109 };
110
111 xor@d0060900 {
112 compatible = "marvell,orion-xor";
113 reg = <0xd0060900 0x100
114 0xd0060b00 0x100>;
115 status = "okay";
116
117 xor10 {
118 interrupts = <94>;
119 dmacap,memcpy;
120 dmacap,xor;
121 };
122 xor11 {
123 interrupts = <95>;
124 dmacap,memcpy;
125 dmacap,xor;
126 dmacap,memset;
127 };
128 };
78 }; 129 };
79}; 130};
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index b1fc728515e9..8e53b25b5508 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -46,5 +46,49 @@
46 clock-frequency = <250000000>; 46 clock-frequency = <250000000>;
47 status = "okay"; 47 status = "okay";
48 }; 48 };
49
50 sata@d00a0000 {
51 nr-ports = <2>;
52 status = "okay";
53 };
54
55 mdio {
56 phy0: ethernet-phy@0 {
57 reg = <0>;
58 };
59
60 phy1: ethernet-phy@1 {
61 reg = <1>;
62 };
63
64 phy2: ethernet-phy@2 {
65 reg = <25>;
66 };
67
68 phy3: ethernet-phy@3 {
69 reg = <27>;
70 };
71 };
72
73 ethernet@d0070000 {
74 status = "okay";
75 phy = <&phy0>;
76 phy-mode = "rgmii-id";
77 };
78 ethernet@d0074000 {
79 status = "okay";
80 phy = <&phy1>;
81 phy-mode = "rgmii-id";
82 };
83 ethernet@d0030000 {
84 status = "okay";
85 phy = <&phy2>;
86 phy-mode = "sgmii";
87 };
88 ethernet@d0034000 {
89 status = "okay";
90 phy = <&phy3>;
91 phy-mode = "sgmii";
92 };
49 }; 93 };
50}; 94};
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index ea355192be6f..c45c7b4dc352 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -24,6 +24,18 @@
24 gpio1 = &gpio1; 24 gpio1 = &gpio1;
25 }; 25 };
26 26
27 cpus {
28 #address-cells = <1>;
29 #size-cells = <0>;
30
31 cpu@0 {
32 device_type = "cpu";
33 compatible = "marvell,sheeva-v7";
34 reg = <0>;
35 clocks = <&cpuclk 0>;
36 };
37 }
38
27 soc { 39 soc {
28 pinctrl { 40 pinctrl {
29 compatible = "marvell,mv78230-pinctrl"; 41 compatible = "marvell,mv78230-pinctrl";
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 2057863f3dfa..a2aee5707377 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -25,6 +25,25 @@
25 gpio2 = &gpio2; 25 gpio2 = &gpio2;
26 }; 26 };
27 27
28 cpus {
29 #address-cells = <1>;
30 #size-cells = <0>;
31
32 cpu@0 {
33 device_type = "cpu";
34 compatible = "marvell,sheeva-v7";
35 reg = <0>;
36 clocks = <&cpuclk 0>;
37 };
38
39 cpu@1 {
40 device_type = "cpu";
41 compatible = "marvell,sheeva-v7";
42 reg = <1>;
43 clocks = <&cpuclk 1>;
44 };
45 };
46
28 soc { 47 soc {
29 pinctrl { 48 pinctrl {
30 compatible = "marvell,mv78260-pinctrl"; 49 compatible = "marvell,mv78260-pinctrl";
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index ffac98373792..da03a129243a 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -25,6 +25,40 @@
25 gpio2 = &gpio2; 25 gpio2 = &gpio2;
26 }; 26 };
27 27
28
29 cpus {
30 #address-cells = <1>;
31 #size-cells = <0>;
32
33 cpu@0 {
34 device_type = "cpu";
35 compatible = "marvell,sheeva-v7";
36 reg = <0>;
37 clocks = <&cpuclk 0>;
38 };
39
40 cpu@1 {
41 device_type = "cpu";
42 compatible = "marvell,sheeva-v7";
43 reg = <1>;
44 clocks = <&cpuclk 1>;
45 };
46
47 cpu@2 {
48 device_type = "cpu";
49 compatible = "marvell,sheeva-v7";
50 reg = <2>;
51 clocks = <&cpuclk 2>;
52 };
53
54 cpu@3 {
55 device_type = "cpu";
56 compatible = "marvell,sheeva-v7";
57 reg = <3>;
58 clocks = <&cpuclk 3>;
59 };
60 };
61
28 soc { 62 soc {
29 pinctrl { 63 pinctrl {
30 compatible = "marvell,mv78460-pinctrl"; 64 compatible = "marvell,mv78460-pinctrl";
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
new file mode 100644
index 000000000000..b42652fd3d8c
--- /dev/null
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -0,0 +1,125 @@
1/*
2 * Device Tree file for OpenBlocks AX3-4 board
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13/dts-v1/;
14/include/ "armada-xp-mv78260.dtsi"
15
16/ {
17 model = "PlatHome OpenBlocks AX3-4 board";
18 compatible = "plathome,openblocks-ax3-4", "marvell,armadaxp-mv78260", "marvell,armadaxp", "marvell,armada-370-xp";
19
20 chosen {
21 bootargs = "console=ttyS0,115200 earlyprintk";
22 };
23
24 memory {
25 device_type = "memory";
26 reg = <0x00000000 0xC0000000>; /* 3 GB */
27 };
28
29 soc {
30 serial@d0012000 {
31 clock-frequency = <250000000>;
32 status = "okay";
33 };
34 serial@d0012100 {
35 clock-frequency = <250000000>;
36 status = "okay";
37 };
38 pinctrl {
39 led_pins: led-pins-0 {
40 marvell,pins = "mpp49", "mpp51", "mpp53";
41 marvell,function = "gpio";
42 };
43 };
44 leds {
45 compatible = "gpio-leds";
46 pinctrl-names = "default";
47 pinctrl-0 = <&led_pins>;
48
49 red_led {
50 label = "red_led";
51 gpios = <&gpio1 17 1>;
52 default-state = "off";
53 };
54
55 yellow_led {
56 label = "yellow_led";
57 gpios = <&gpio1 19 1>;
58 default-state = "off";
59 };
60
61 green_led {
62 label = "green_led";
63 gpios = <&gpio1 21 1>;
64 default-state = "off";
65 linux,default-trigger = "heartbeat";
66 };
67 };
68
69 mdio {
70 phy0: ethernet-phy@0 {
71 reg = <0>;
72 };
73
74 phy1: ethernet-phy@1 {
75 reg = <1>;
76 };
77
78 phy2: ethernet-phy@2 {
79 reg = <2>;
80 };
81
82 phy3: ethernet-phy@3 {
83 reg = <3>;
84 };
85 };
86
87 ethernet@d0070000 {
88 status = "okay";
89 phy = <&phy0>;
90 phy-mode = "sgmii";
91 };
92 ethernet@d0074000 {
93 status = "okay";
94 phy = <&phy1>;
95 phy-mode = "sgmii";
96 };
97 ethernet@d0030000 {
98 status = "okay";
99 phy = <&phy2>;
100 phy-mode = "sgmii";
101 };
102 ethernet@d0034000 {
103 status = "okay";
104 phy = <&phy3>;
105 phy-mode = "sgmii";
106 };
107 i2c@d0011000 {
108 status = "okay";
109 clock-frequency = <400000>;
110 };
111 i2c@d0011100 {
112 status = "okay";
113 clock-frequency = <400000>;
114
115 s35390a: s35390a@30 {
116 compatible = "s35390a";
117 reg = <0x30>;
118 };
119 };
120 sata@d00a0000 {
121 nr-ports = <2>;
122 status = "okay";
123 };
124 };
125};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 71d6b5d0daf1..45a567c2e9ba 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -24,7 +24,13 @@
24 24
25 mpic: interrupt-controller@d0020000 { 25 mpic: interrupt-controller@d0020000 {
26 reg = <0xd0020a00 0x1d0>, 26 reg = <0xd0020a00 0x1d0>,
27 <0xd0021870 0x58>; 27 <0xd0021070 0x58>;
28 };
29
30 armada-370-xp-pmsu@d0022000 {
31 compatible = "marvell,armada-370-xp-pmsu";
32 reg = <0xd0022100 0x430>,
33 <0xd0020800 0x20>;
28 }; 34 };
29 35
30 soc { 36 soc {
@@ -47,9 +53,85 @@
47 marvell,timer-25Mhz; 53 marvell,timer-25Mhz;
48 }; 54 };
49 55
56 coreclk: mvebu-sar@d0018230 {
57 compatible = "marvell,armada-xp-core-clock";
58 reg = <0xd0018230 0x08>;
59 #clock-cells = <1>;
60 };
61
62 cpuclk: clock-complex@d0018700 {
63 #clock-cells = <1>;
64 compatible = "marvell,armada-xp-cpu-clock";
65 reg = <0xd0018700 0xA0>;
66 clocks = <&coreclk 1>;
67 };
68
69 gateclk: clock-gating-control@d0018220 {
70 compatible = "marvell,armada-xp-gating-clock";
71 reg = <0xd0018220 0x4>;
72 clocks = <&coreclk 0>;
73 #clock-cells = <1>;
74 };
75
50 system-controller@d0018200 { 76 system-controller@d0018200 {
51 compatible = "marvell,armada-370-xp-system-controller"; 77 compatible = "marvell,armada-370-xp-system-controller";
52 reg = <0xd0018200 0x500>; 78 reg = <0xd0018200 0x500>;
53 }; 79 };
80
81 ethernet@d0030000 {
82 compatible = "marvell,armada-370-neta";
83 reg = <0xd0030000 0x2500>;
84 interrupts = <12>;
85 clocks = <&gateclk 2>;
86 status = "disabled";
87 };
88
89 ethernet@d0034000 {
90 compatible = "marvell,armada-370-neta";
91 reg = <0xd0034000 0x2500>;
92 interrupts = <14>;
93 clocks = <&gateclk 1>;
94 status = "disabled";
95 };
96
97 xor@d0060900 {
98 compatible = "marvell,orion-xor";
99 reg = <0xd0060900 0x100
100 0xd0060b00 0x100>;
101 clocks = <&gateclk 22>;
102 status = "okay";
103
104 xor10 {
105 interrupts = <51>;
106 dmacap,memcpy;
107 dmacap,xor;
108 };
109 xor11 {
110 interrupts = <52>;
111 dmacap,memcpy;
112 dmacap,xor;
113 dmacap,memset;
114 };
115 };
116
117 xor@d00f0900 {
118 compatible = "marvell,orion-xor";
119 reg = <0xd00F0900 0x100
120 0xd00F0B00 0x100>;
121 clocks = <&gateclk 28>;
122 status = "okay";
123
124 xor00 {
125 interrupts = <94>;
126 dmacap,memcpy;
127 dmacap,xor;
128 };
129 xor01 {
130 interrupts = <95>;
131 dmacap,memcpy;
132 dmacap,xor;
133 dmacap,memset;
134 };
135 };
54 }; 136 };
55}; 137};
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 5a00022383e7..b1cfa52ae223 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -31,6 +31,19 @@
31 reg = <0x20204 0x04>, <0x20214 0x04>; 31 reg = <0x20204 0x04>, <0x20214 0x04>;
32 }; 32 };
33 33
34 core_clk: core-clocks@d0214 {
35 compatible = "marvell,dove-core-clock";
36 reg = <0xd0214 0x4>;
37 #clock-cells = <1>;
38 };
39
40 gate_clk: clock-gating-control@d0038 {
41 compatible = "marvell,dove-gating-clock";
42 reg = <0xd0038 0x4>;
43 clocks = <&core_clk 0>;
44 #clock-cells = <1>;
45 };
46
34 uart0: serial@12000 { 47 uart0: serial@12000 {
35 compatible = "ns16550a"; 48 compatible = "ns16550a";
36 reg = <0x12000 0x100>; 49 reg = <0x12000 0x100>;
@@ -100,6 +113,7 @@
100 cell-index = <0>; 113 cell-index = <0>;
101 interrupts = <6>; 114 interrupts = <6>;
102 reg = <0x10600 0x28>; 115 reg = <0x10600 0x28>;
116 clocks = <&core_clk 0>;
103 status = "disabled"; 117 status = "disabled";
104 }; 118 };
105 119
@@ -110,6 +124,7 @@
110 cell-index = <1>; 124 cell-index = <1>;
111 interrupts = <5>; 125 interrupts = <5>;
112 reg = <0x14600 0x28>; 126 reg = <0x14600 0x28>;
127 clocks = <&core_clk 0>;
113 status = "disabled"; 128 status = "disabled";
114 }; 129 };
115 130
@@ -121,6 +136,7 @@
121 interrupts = <11>; 136 interrupts = <11>;
122 clock-frequency = <400000>; 137 clock-frequency = <400000>;
123 timeout-ms = <1000>; 138 timeout-ms = <1000>;
139 clocks = <&core_clk 0>;
124 status = "disabled"; 140 status = "disabled";
125 }; 141 };
126 142
@@ -128,6 +144,7 @@
128 compatible = "marvell,dove-sdhci"; 144 compatible = "marvell,dove-sdhci";
129 reg = <0x92000 0x100>; 145 reg = <0x92000 0x100>;
130 interrupts = <35>, <37>; 146 interrupts = <35>, <37>;
147 clocks = <&gate_clk 8>;
131 status = "disabled"; 148 status = "disabled";
132 }; 149 };
133 150
@@ -135,6 +152,7 @@
135 compatible = "marvell,dove-sdhci"; 152 compatible = "marvell,dove-sdhci";
136 reg = <0x90000 0x100>; 153 reg = <0x90000 0x100>;
137 interrupts = <36>, <38>; 154 interrupts = <36>, <38>;
155 clocks = <&gate_clk 9>;
138 status = "disabled"; 156 status = "disabled";
139 }; 157 };
140 158
@@ -142,6 +160,7 @@
142 compatible = "marvell,orion-sata"; 160 compatible = "marvell,orion-sata";
143 reg = <0xa0000 0x2400>; 161 reg = <0xa0000 0x2400>;
144 interrupts = <62>; 162 interrupts = <62>;
163 clocks = <&gate_clk 3>;
145 nr-ports = <1>; 164 nr-ports = <1>;
146 status = "disabled"; 165 status = "disabled";
147 }; 166 };
@@ -152,7 +171,50 @@
152 <0xc8000000 0x800>; 171 <0xc8000000 0x800>;
153 reg-names = "regs", "sram"; 172 reg-names = "regs", "sram";
154 interrupts = <31>; 173 interrupts = <31>;
174 clocks = <&gate_clk 15>;
175 status = "okay";
176 };
177
178 xor0: dma-engine@60800 {
179 compatible = "marvell,orion-xor";
180 reg = <0x60800 0x100
181 0x60a00 0x100>;
182 clocks = <&gate_clk 23>;
155 status = "okay"; 183 status = "okay";
184
185 channel0 {
186 interrupts = <39>;
187 dmacap,memcpy;
188 dmacap,xor;
189 };
190
191 channel1 {
192 interrupts = <40>;
193 dmacap,memset;
194 dmacap,memcpy;
195 dmacap,xor;
196 };
197 };
198
199 xor1: dma-engine@60900 {
200 compatible = "marvell,orion-xor";
201 reg = <0x60900 0x100
202 0x60b00 0x100>;
203 clocks = <&gate_clk 24>;
204 status = "okay";
205
206 channel0 {
207 interrupts = <42>;
208 dmacap,memcpy;
209 dmacap,xor;
210 };
211
212 channel1 {
213 interrupts = <43>;
214 dmacap,memset;
215 dmacap,memcpy;
216 dmacap,xor;
217 };
156 }; 218 };
157 }; 219 };
158}; 220};
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 4e5b8154a5be..2388f9936ebf 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -19,6 +19,12 @@
19 #address-cells = <1>; 19 #address-cells = <1>;
20 #size-cells = <1>; 20 #size-cells = <1>;
21 21
22 core_clk: core-clocks@10030 {
23 compatible = "marvell,kirkwood-core-clock";
24 reg = <0x10030 0x4>;
25 #clock-cells = <1>;
26 };
27
22 gpio0: gpio@10100 { 28 gpio0: gpio@10100 {
23 compatible = "marvell,orion-gpio"; 29 compatible = "marvell,orion-gpio";
24 #gpio-cells = <2>; 30 #gpio-cells = <2>;
@@ -42,6 +48,7 @@
42 reg = <0x12000 0x100>; 48 reg = <0x12000 0x100>;
43 reg-shift = <2>; 49 reg-shift = <2>;
44 interrupts = <33>; 50 interrupts = <33>;
51 clocks = <&gate_clk 7>;
45 /* set clock-frequency in board dts */ 52 /* set clock-frequency in board dts */
46 status = "disabled"; 53 status = "disabled";
47 }; 54 };
@@ -51,6 +58,7 @@
51 reg = <0x12100 0x100>; 58 reg = <0x12100 0x100>;
52 reg-shift = <2>; 59 reg-shift = <2>;
53 interrupts = <34>; 60 interrupts = <34>;
61 clocks = <&gate_clk 7>;
54 /* set clock-frequency in board dts */ 62 /* set clock-frequency in board dts */
55 status = "disabled"; 63 status = "disabled";
56 }; 64 };
@@ -68,19 +76,70 @@
68 cell-index = <0>; 76 cell-index = <0>;
69 interrupts = <23>; 77 interrupts = <23>;
70 reg = <0x10600 0x28>; 78 reg = <0x10600 0x28>;
79 clocks = <&gate_clk 7>;
71 status = "disabled"; 80 status = "disabled";
72 }; 81 };
73 82
83 gate_clk: clock-gating-control@2011c {
84 compatible = "marvell,kirkwood-gating-clock";
85 reg = <0x2011c 0x4>;
86 clocks = <&core_clk 0>;
87 #clock-cells = <1>;
88 };
89
74 wdt@20300 { 90 wdt@20300 {
75 compatible = "marvell,orion-wdt"; 91 compatible = "marvell,orion-wdt";
76 reg = <0x20300 0x28>; 92 reg = <0x20300 0x28>;
93 clocks = <&gate_clk 7>;
94 status = "okay";
95 };
96
97 xor@60800 {
98 compatible = "marvell,orion-xor";
99 reg = <0x60800 0x100
100 0x60A00 0x100>;
101 status = "okay";
102 clocks = <&gate_clk 8>;
103
104 xor00 {
105 interrupts = <5>;
106 dmacap,memcpy;
107 dmacap,xor;
108 };
109 xor01 {
110 interrupts = <6>;
111 dmacap,memcpy;
112 dmacap,xor;
113 dmacap,memset;
114 };
115 };
116
117 xor@60900 {
118 compatible = "marvell,orion-xor";
119 reg = <0x60900 0x100
120 0xd0B00 0x100>;
77 status = "okay"; 121 status = "okay";
122 clocks = <&gate_clk 16>;
123
124 xor00 {
125 interrupts = <7>;
126 dmacap,memcpy;
127 dmacap,xor;
128 };
129 xor01 {
130 interrupts = <8>;
131 dmacap,memcpy;
132 dmacap,xor;
133 dmacap,memset;
134 };
78 }; 135 };
79 136
80 sata@80000 { 137 sata@80000 {
81 compatible = "marvell,orion-sata"; 138 compatible = "marvell,orion-sata";
82 reg = <0x80000 0x5000>; 139 reg = <0x80000 0x5000>;
83 interrupts = <21>; 140 interrupts = <21>;
141 clocks = <&gate_clk 14>, <&gate_clk 15>;
142 clock-names = "0", "1";
84 status = "disabled"; 143 status = "disabled";
85 }; 144 };
86 145
@@ -94,6 +153,7 @@
94 reg = <0x3000000 0x400>; 153 reg = <0x3000000 0x400>;
95 chip-delay = <25>; 154 chip-delay = <25>;
96 /* set partition map and/or chip-delay in board dts */ 155 /* set partition map and/or chip-delay in board dts */
156 clocks = <&gate_clk 7>;
97 status = "disabled"; 157 status = "disabled";
98 }; 158 };
99 159
@@ -104,6 +164,7 @@
104 #size-cells = <0>; 164 #size-cells = <0>;
105 interrupts = <29>; 165 interrupts = <29>;
106 clock-frequency = <100000>; 166 clock-frequency = <100000>;
167 clocks = <&gate_clk 7>;
107 status = "disabled"; 168 status = "disabled";
108 }; 169 };
109 170
@@ -113,6 +174,7 @@
113 <0xf5000000 0x800>; 174 <0xf5000000 0x800>;
114 reg-names = "regs", "sram"; 175 reg-names = "regs", "sram";
115 interrupts = <22>; 176 interrupts = <22>;
177 clocks = <&gate_clk 17>;
116 status = "okay"; 178 status = "okay";
117 }; 179 };
118 }; 180 };
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 159f75fc4377..dbea6f4efe9f 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -17,8 +17,10 @@ CONFIG_ARM_APPENDED_DTB=y
17CONFIG_VFP=y 17CONFIG_VFP=y
18CONFIG_NEON=y 18CONFIG_NEON=y
19CONFIG_NET=y 19CONFIG_NET=y
20CONFIG_BLK_DEV_SD=y
20CONFIG_ATA=y 21CONFIG_ATA=y
21CONFIG_SATA_HIGHBANK=y 22CONFIG_SATA_HIGHBANK=y
23CONFIG_SATA_MV=y
22CONFIG_NETDEVICES=y 24CONFIG_NETDEVICES=y
23CONFIG_NET_CALXEDA_XGMAC=y 25CONFIG_NET_CALXEDA_XGMAC=y
24CONFIG_SMSC911X=y 26CONFIG_SMSC911X=y
diff --git a/arch/arm/configs/mvebu_defconfig b/arch/arm/configs/mvebu_defconfig
index 3458752c4bb2..a702fb345c01 100644
--- a/arch/arm/configs/mvebu_defconfig
+++ b/arch/arm/configs/mvebu_defconfig
@@ -12,6 +12,9 @@ CONFIG_ARCH_MVEBU=y
12CONFIG_MACH_ARMADA_370=y 12CONFIG_MACH_ARMADA_370=y
13CONFIG_MACH_ARMADA_XP=y 13CONFIG_MACH_ARMADA_XP=y
14# CONFIG_CACHE_L2X0 is not set 14# CONFIG_CACHE_L2X0 is not set
15# CONFIG_SWP_EMULATE is not set
16CONFIG_SMP=y
17# CONFIG_LOCAL_TIMERS is not set
15CONFIG_AEABI=y 18CONFIG_AEABI=y
16CONFIG_HIGHMEM=y 19CONFIG_HIGHMEM=y
17# CONFIG_COMPACTION is not set 20# CONFIG_COMPACTION is not set
@@ -19,13 +22,27 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
19CONFIG_ZBOOT_ROM_BSS=0x0 22CONFIG_ZBOOT_ROM_BSS=0x0
20CONFIG_ARM_APPENDED_DTB=y 23CONFIG_ARM_APPENDED_DTB=y
21CONFIG_VFP=y 24CONFIG_VFP=y
25CONFIG_NET=y
26CONFIG_INET=y
22CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
28CONFIG_BLK_DEV_SD=y
29CONFIG_ATA=y
30CONFIG_SATA_MV=y
31CONFIG_NETDEVICES=y
32CONFIG_MVNETA=y
33CONFIG_MARVELL_PHY=y
23CONFIG_SERIAL_8250=y 34CONFIG_SERIAL_8250=y
24CONFIG_SERIAL_8250_CONSOLE=y 35CONFIG_SERIAL_8250_CONSOLE=y
25CONFIG_SERIAL_OF_PLATFORM=y 36CONFIG_SERIAL_OF_PLATFORM=y
37CONFIG_I2C=y
38CONFIG_I2C_MV64XXX=y
26CONFIG_GPIOLIB=y 39CONFIG_GPIOLIB=y
27CONFIG_GPIO_SYSFS=y 40CONFIG_GPIO_SYSFS=y
28# CONFIG_USB_SUPPORT is not set 41# CONFIG_USB_SUPPORT is not set
42CONFIG_RTC_CLASS=y
43CONFIG_RTC_DRV_S35390A=y
44CONFIG_DMADEVICES=y
45CONFIG_MV_XOR=y
29# CONFIG_IOMMU_SUPPORT is not set 46# CONFIG_IOMMU_SUPPORT is not set
30CONFIG_EXT2_FS=y 47CONFIG_EXT2_FS=y
31CONFIG_EXT3_FS=y 48CONFIG_EXT3_FS=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 23004847bb05..98d4dabb2c10 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -111,6 +111,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
111 111
112extern int dma_supported(struct device *dev, u64 mask); 112extern int dma_supported(struct device *dev, u64 mask);
113 113
114extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
115
114/** 116/**
115 * arm_dma_alloc - allocate consistent memory for DMA 117 * arm_dma_alloc - allocate consistent memory for DMA
116 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 118 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/arch/arm/mach-dove/Kconfig b/arch/arm/mach-dove/Kconfig
index 00154e74ce6b..603c5fd99e8a 100644
--- a/arch/arm/mach-dove/Kconfig
+++ b/arch/arm/mach-dove/Kconfig
@@ -17,6 +17,8 @@ config MACH_CM_A510
17 17
18config MACH_DOVE_DT 18config MACH_DOVE_DT
19 bool "Marvell Dove Flattened Device Tree" 19 bool "Marvell Dove Flattened Device Tree"
20 select MVEBU_CLK_CORE
21 select MVEBU_CLK_GATING
20 select USE_OF 22 select USE_OF
21 help 23 help
22 Say 'Y' here if you want your kernel to support the 24 Say 'Y' here if you want your kernel to support the
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index f723fe13d0f0..89f4f993cd03 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -14,6 +14,7 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/clk-provider.h> 16#include <linux/clk-provider.h>
17#include <linux/clk/mvebu.h>
17#include <linux/ata_platform.h> 18#include <linux/ata_platform.h>
18#include <linux/gpio.h> 19#include <linux/gpio.h>
19#include <linux/of.h> 20#include <linux/of.h>
@@ -32,6 +33,7 @@
32#include <linux/irq.h> 33#include <linux/irq.h>
33#include <plat/time.h> 34#include <plat/time.h>
34#include <linux/platform_data/usb-ehci-orion.h> 35#include <linux/platform_data/usb-ehci-orion.h>
36#include <linux/platform_data/dma-mv_xor.h>
35#include <plat/irq.h> 37#include <plat/irq.h>
36#include <plat/common.h> 38#include <plat/common.h>
37#include <plat/addr-map.h> 39#include <plat/addr-map.h>
@@ -123,8 +125,8 @@ static void __init dove_clk_init(void)
123 orion_clkdev_add(NULL, "mv_crypto", crypto); 125 orion_clkdev_add(NULL, "mv_crypto", crypto);
124 orion_clkdev_add(NULL, "dove-ac97", ac97); 126 orion_clkdev_add(NULL, "dove-ac97", ac97);
125 orion_clkdev_add(NULL, "dove-pdma", pdma); 127 orion_clkdev_add(NULL, "dove-pdma", pdma);
126 orion_clkdev_add(NULL, "mv_xor_shared.0", xor0); 128 orion_clkdev_add(NULL, MV_XOR_NAME ".0", xor0);
127 orion_clkdev_add(NULL, "mv_xor_shared.1", xor1); 129 orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
128} 130}
129 131
130/***************************************************************************** 132/*****************************************************************************
@@ -376,19 +378,44 @@ void dove_restart(char mode, const char *cmd)
376 378
377#if defined(CONFIG_MACH_DOVE_DT) 379#if defined(CONFIG_MACH_DOVE_DT)
378/* 380/*
379 * Auxdata required until real OF clock provider 381 * There are still devices that doesn't even know about DT,
382 * get clock gates here and add a clock lookup.
380 */ 383 */
381struct of_dev_auxdata dove_auxdata_lookup[] __initdata = { 384static void __init dove_legacy_clk_init(void)
382 OF_DEV_AUXDATA("marvell,orion-spi", 0xf1010600, "orion_spi.0", NULL), 385{
383 OF_DEV_AUXDATA("marvell,orion-spi", 0xf1014600, "orion_spi.1", NULL), 386 struct device_node *np = of_find_compatible_node(NULL, NULL,
384 OF_DEV_AUXDATA("marvell,orion-wdt", 0xf1020300, "orion_wdt", NULL), 387 "marvell,dove-gating-clock");
385 OF_DEV_AUXDATA("marvell,mv64xxx-i2c", 0xf1011000, "mv64xxx_i2c.0", 388 struct of_phandle_args clkspec;
386 NULL), 389
387 OF_DEV_AUXDATA("marvell,orion-sata", 0xf10a0000, "sata_mv.0", NULL), 390 clkspec.np = np;
388 OF_DEV_AUXDATA("marvell,dove-sdhci", 0xf1092000, "sdhci-dove.0", NULL), 391 clkspec.args_count = 1;
389 OF_DEV_AUXDATA("marvell,dove-sdhci", 0xf1090000, "sdhci-dove.1", NULL), 392
390 {}, 393 clkspec.args[0] = CLOCK_GATING_BIT_USB0;
391}; 394 orion_clkdev_add(NULL, "orion-ehci.0",
395 of_clk_get_from_provider(&clkspec));
396
397 clkspec.args[0] = CLOCK_GATING_BIT_USB1;
398 orion_clkdev_add(NULL, "orion-ehci.1",
399 of_clk_get_from_provider(&clkspec));
400
401 clkspec.args[0] = CLOCK_GATING_BIT_GBE;
402 orion_clkdev_add(NULL, "mv643xx_eth_port.0",
403 of_clk_get_from_provider(&clkspec));
404
405 clkspec.args[0] = CLOCK_GATING_BIT_PCIE0;
406 orion_clkdev_add("0", "pcie",
407 of_clk_get_from_provider(&clkspec));
408
409 clkspec.args[0] = CLOCK_GATING_BIT_PCIE1;
410 orion_clkdev_add("1", "pcie",
411 of_clk_get_from_provider(&clkspec));
412}
413
414static void __init dove_of_clk_init(void)
415{
416 mvebu_clocks_init();
417 dove_legacy_clk_init();
418}
392 419
393static struct mv643xx_eth_platform_data dove_dt_ge00_data = { 420static struct mv643xx_eth_platform_data dove_dt_ge00_data = {
394 .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT, 421 .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT,
@@ -405,20 +432,17 @@ static void __init dove_dt_init(void)
405 dove_setup_cpu_mbus(); 432 dove_setup_cpu_mbus();
406 433
407 /* Setup root of clk tree */ 434 /* Setup root of clk tree */
408 dove_clk_init(); 435 dove_of_clk_init();
409 436
410 /* Internal devices not ported to DT yet */ 437 /* Internal devices not ported to DT yet */
411 dove_rtc_init(); 438 dove_rtc_init();
412 dove_xor0_init();
413 dove_xor1_init();
414 439
415 dove_ge00_init(&dove_dt_ge00_data); 440 dove_ge00_init(&dove_dt_ge00_data);
416 dove_ehci0_init(); 441 dove_ehci0_init();
417 dove_ehci1_init(); 442 dove_ehci1_init();
418 dove_pcie_init(1, 1); 443 dove_pcie_init(1, 1);
419 444
420 of_platform_populate(NULL, of_default_bus_match_table, 445 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
421 dove_auxdata_lookup, NULL);
422} 446}
423 447
424static const char * const dove_dt_board_compat[] = { 448static const char * const dove_dt_board_compat[] = {
diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig
index 50bca5032b7e..2833492eb273 100644
--- a/arch/arm/mach-kirkwood/Kconfig
+++ b/arch/arm/mach-kirkwood/Kconfig
@@ -46,6 +46,8 @@ config MACH_GURUPLUG
46 46
47config ARCH_KIRKWOOD_DT 47config ARCH_KIRKWOOD_DT
48 bool "Marvell Kirkwood Flattened Device Tree" 48 bool "Marvell Kirkwood Flattened Device Tree"
49 select MVEBU_CLK_CORE
50 select MVEBU_CLK_GATING
49 select USE_OF 51 select USE_OF
50 help 52 help
51 Say 'Y' here if you want your kernel to support the 53 Say 'Y' here if you want your kernel to support the
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index d94872fed8c0..9826904277b8 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -14,11 +14,15 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/clk-provider.h>
18#include <linux/clk/mvebu.h>
17#include <linux/kexec.h> 19#include <linux/kexec.h>
18#include <asm/mach/arch.h> 20#include <asm/mach/arch.h>
19#include <asm/mach/map.h> 21#include <asm/mach/map.h>
20#include <mach/bridge-regs.h> 22#include <mach/bridge-regs.h>
23#include <linux/platform_data/usb-ehci-orion.h>
21#include <plat/irq.h> 24#include <plat/irq.h>
25#include <plat/common.h>
22#include "common.h" 26#include "common.h"
23 27
24static struct of_device_id kirkwood_dt_match_table[] __initdata = { 28static struct of_device_id kirkwood_dt_match_table[] __initdata = {
@@ -26,16 +30,50 @@ static struct of_device_id kirkwood_dt_match_table[] __initdata = {
26 { } 30 { }
27}; 31};
28 32
29struct of_dev_auxdata kirkwood_auxdata_lookup[] __initdata = { 33/*
30 OF_DEV_AUXDATA("marvell,orion-spi", 0xf1010600, "orion_spi.0", NULL), 34 * There are still devices that doesn't know about DT yet. Get clock
31 OF_DEV_AUXDATA("marvell,mv64xxx-i2c", 0xf1011000, "mv64xxx_i2c.0", 35 * gates here and add a clock lookup alias, so that old platform
32 NULL), 36 * devices still work.
33 OF_DEV_AUXDATA("marvell,orion-wdt", 0xf1020300, "orion_wdt", NULL), 37*/
34 OF_DEV_AUXDATA("marvell,orion-sata", 0xf1080000, "sata_mv.0", NULL), 38
35 OF_DEV_AUXDATA("marvell,orion-nand", 0xf4000000, "orion_nand", NULL), 39static void __init kirkwood_legacy_clk_init(void)
36 OF_DEV_AUXDATA("marvell,orion-crypto", 0xf1030000, "mv_crypto", NULL), 40{
37 {}, 41
38}; 42 struct device_node *np = of_find_compatible_node(
43 NULL, NULL, "marvell,kirkwood-gating-clock");
44
45 struct of_phandle_args clkspec;
46
47 clkspec.np = np;
48 clkspec.args_count = 1;
49
50 clkspec.args[0] = CGC_BIT_GE0;
51 orion_clkdev_add(NULL, "mv643xx_eth_port.0",
52 of_clk_get_from_provider(&clkspec));
53
54 clkspec.args[0] = CGC_BIT_PEX0;
55 orion_clkdev_add("0", "pcie",
56 of_clk_get_from_provider(&clkspec));
57
58 clkspec.args[0] = CGC_BIT_USB0;
59 orion_clkdev_add(NULL, "orion-ehci.0",
60 of_clk_get_from_provider(&clkspec));
61
62 clkspec.args[0] = CGC_BIT_PEX1;
63 orion_clkdev_add("1", "pcie",
64 of_clk_get_from_provider(&clkspec));
65
66 clkspec.args[0] = CGC_BIT_GE1;
67 orion_clkdev_add(NULL, "mv643xx_eth_port.1",
68 of_clk_get_from_provider(&clkspec));
69
70}
71
72static void __init kirkwood_of_clk_init(void)
73{
74 mvebu_clocks_init();
75 kirkwood_legacy_clk_init();
76}
39 77
40static void __init kirkwood_dt_init(void) 78static void __init kirkwood_dt_init(void)
41{ 79{
@@ -54,11 +92,7 @@ static void __init kirkwood_dt_init(void)
54 kirkwood_l2_init(); 92 kirkwood_l2_init();
55 93
56 /* Setup root of clk tree */ 94 /* Setup root of clk tree */
57 kirkwood_clk_init(); 95 kirkwood_of_clk_init();
58
59 /* internal devices that every board has */
60 kirkwood_xor0_init();
61 kirkwood_xor1_init();
62 96
63#ifdef CONFIG_KEXEC 97#ifdef CONFIG_KEXEC
64 kexec_reinit = kirkwood_enable_pcie; 98 kexec_reinit = kirkwood_enable_pcie;
@@ -94,8 +128,7 @@ static void __init kirkwood_dt_init(void)
94 if (of_machine_is_compatible("keymile,km_kirkwood")) 128 if (of_machine_is_compatible("keymile,km_kirkwood"))
95 km_kirkwood_init(); 129 km_kirkwood_init();
96 130
97 of_platform_populate(NULL, kirkwood_dt_match_table, 131 of_platform_populate(NULL, kirkwood_dt_match_table, NULL, NULL);
98 kirkwood_auxdata_lookup, NULL);
99} 132}
100 133
101static const char *kirkwood_dt_board_compat[] = { 134static const char *kirkwood_dt_board_compat[] = {
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 2c6c218fb79e..401dac1a8d80 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -260,8 +260,8 @@ void __init kirkwood_clk_init(void)
260 orion_clkdev_add(NULL, "orion_nand", runit); 260 orion_clkdev_add(NULL, "orion_nand", runit);
261 orion_clkdev_add(NULL, "mvsdio", sdio); 261 orion_clkdev_add(NULL, "mvsdio", sdio);
262 orion_clkdev_add(NULL, "mv_crypto", crypto); 262 orion_clkdev_add(NULL, "mv_crypto", crypto);
263 orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".0", xor0); 263 orion_clkdev_add(NULL, MV_XOR_NAME ".0", xor0);
264 orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".1", xor1); 264 orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
265 orion_clkdev_add("0", "pcie", pex0); 265 orion_clkdev_add("0", "pcie", pex0);
266 orion_clkdev_add("1", "pcie", pex1); 266 orion_clkdev_add("1", "pcie", pex1);
267 orion_clkdev_add(NULL, "kirkwood-i2s", audio); 267 orion_clkdev_add(NULL, "kirkwood-i2s", audio);
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 416d46ef7ebd..c934e1d4933d 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -9,6 +9,10 @@ config ARCH_MVEBU
9 select PINCTRL 9 select PINCTRL
10 select PLAT_ORION 10 select PLAT_ORION
11 select SPARSE_IRQ 11 select SPARSE_IRQ
12 select CLKDEV_LOOKUP
13 select MVEBU_CLK_CORE
14 select MVEBU_CLK_CPU
15 select MVEBU_CLK_GATING
12 16
13if ARCH_MVEBU 17if ARCH_MVEBU
14 18
@@ -17,7 +21,8 @@ menu "Marvell SOC with device tree"
17config MACH_ARMADA_370_XP 21config MACH_ARMADA_370_XP
18 bool 22 bool
19 select ARMADA_370_XP_TIMER 23 select ARMADA_370_XP_TIMER
20 select CPU_V7 24 select HAVE_SMP
25 select CPU_PJ4B
21 26
22config MACH_ARMADA_370 27config MACH_ARMADA_370
23 bool "Marvell Armada 370 boards" 28 bool "Marvell Armada 370 boards"
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
index 57f996b6aa0e..5dcb369b58aa 100644
--- a/arch/arm/mach-mvebu/Makefile
+++ b/arch/arm/mach-mvebu/Makefile
@@ -2,4 +2,6 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
2 -I$(srctree)/arch/arm/plat-orion/include 2 -I$(srctree)/arch/arm/plat-orion/include
3 3
4obj-y += system-controller.o 4obj-y += system-controller.o
5obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o 5obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o
6obj-$(CONFIG_SMP) += platsmp.o headsmp.o
7obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-mvebu/addr-map.c b/arch/arm/mach-mvebu/addr-map.c
index fe454a4430be..ab9b3bd4fef5 100644
--- a/arch/arm/mach-mvebu/addr-map.c
+++ b/arch/arm/mach-mvebu/addr-map.c
@@ -78,7 +78,7 @@ armada_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
78 if (win < 8) 78 if (win < 8)
79 offset = (win << 4); 79 offset = (win << 4);
80 else 80 else
81 offset = ARMADA_WINDOW_8_PLUS_OFFSET + (win << 3); 81 offset = ARMADA_WINDOW_8_PLUS_OFFSET + ((win - 8) << 3);
82 82
83 return cfg->bridge_virt_base + offset; 83 return cfg->bridge_virt_base + offset;
84} 84}
@@ -108,6 +108,9 @@ static int __init armada_setup_cpu_mbus(void)
108 108
109 addr_map_cfg.bridge_virt_base = mbus_unit_addr_decoding_base; 109 addr_map_cfg.bridge_virt_base = mbus_unit_addr_decoding_base;
110 110
111 if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"))
112 addr_map_cfg.hw_io_coherency = 1;
113
111 /* 114 /*
112 * Disable, clear and configure windows. 115 * Disable, clear and configure windows.
113 */ 116 */
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
index 49d791548ad6..7434b5e36197 100644
--- a/arch/arm/mach-mvebu/armada-370-xp.c
+++ b/arch/arm/mach-mvebu/armada-370-xp.c
@@ -17,11 +17,14 @@
17#include <linux/of_platform.h> 17#include <linux/of_platform.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/time-armada-370-xp.h> 19#include <linux/time-armada-370-xp.h>
20#include <linux/clk/mvebu.h>
21#include <linux/dma-mapping.h>
20#include <asm/mach/arch.h> 22#include <asm/mach/arch.h>
21#include <asm/mach/map.h> 23#include <asm/mach/map.h>
22#include <asm/mach/time.h> 24#include <asm/mach/time.h>
23#include "armada-370-xp.h" 25#include "armada-370-xp.h"
24#include "common.h" 26#include "common.h"
27#include "coherency.h"
25 28
26static struct map_desc armada_370_xp_io_desc[] __initdata = { 29static struct map_desc armada_370_xp_io_desc[] __initdata = {
27 { 30 {
@@ -37,27 +40,45 @@ void __init armada_370_xp_map_io(void)
37 iotable_init(armada_370_xp_io_desc, ARRAY_SIZE(armada_370_xp_io_desc)); 40 iotable_init(armada_370_xp_io_desc, ARRAY_SIZE(armada_370_xp_io_desc));
38} 41}
39 42
43void __init armada_370_xp_timer_and_clk_init(void)
44{
45 mvebu_clocks_init();
46 armada_370_xp_timer_init();
47}
48
49void __init armada_370_xp_init_early(void)
50{
51 /*
52 * Some Armada 370/XP devices allocate their coherent buffers
53 * from atomic context. Increase size of atomic coherent pool
54 * to make sure such the allocations won't fail.
55 */
56 init_dma_coherent_pool_size(SZ_1M);
57}
58
40struct sys_timer armada_370_xp_timer = { 59struct sys_timer armada_370_xp_timer = {
41 .init = armada_370_xp_timer_init, 60 .init = armada_370_xp_timer_and_clk_init,
42}; 61};
43 62
44static void __init armada_370_xp_dt_init(void) 63static void __init armada_370_xp_dt_init(void)
45{ 64{
46 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 65 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
66 coherency_init();
47} 67}
48 68
49static const char * const armada_370_xp_dt_board_dt_compat[] = { 69static const char * const armada_370_xp_dt_compat[] = {
50 "marvell,a370-db", 70 "marvell,armada-370-xp",
51 "marvell,axp-db",
52 NULL, 71 NULL,
53}; 72};
54 73
55DT_MACHINE_START(ARMADA_XP_DT, "Marvell Aramada 370/XP (Device Tree)") 74DT_MACHINE_START(ARMADA_XP_DT, "Marvell Armada 370/XP (Device Tree)")
75 .smp = smp_ops(armada_xp_smp_ops),
56 .init_machine = armada_370_xp_dt_init, 76 .init_machine = armada_370_xp_dt_init,
57 .map_io = armada_370_xp_map_io, 77 .map_io = armada_370_xp_map_io,
78 .init_early = armada_370_xp_init_early,
58 .init_irq = armada_370_xp_init_irq, 79 .init_irq = armada_370_xp_init_irq,
59 .handle_irq = armada_370_xp_handle_irq, 80 .handle_irq = armada_370_xp_handle_irq,
60 .timer = &armada_370_xp_timer, 81 .timer = &armada_370_xp_timer,
61 .restart = mvebu_restart, 82 .restart = mvebu_restart,
62 .dt_compat = armada_370_xp_dt_board_dt_compat, 83 .dt_compat = armada_370_xp_dt_compat,
63MACHINE_END 84MACHINE_END
diff --git a/arch/arm/mach-mvebu/armada-370-xp.h b/arch/arm/mach-mvebu/armada-370-xp.h
index aac9bebc6b03..c6a7d74fddfe 100644
--- a/arch/arm/mach-mvebu/armada-370-xp.h
+++ b/arch/arm/mach-mvebu/armada-370-xp.h
@@ -19,4 +19,11 @@
19#define ARMADA_370_XP_REGS_VIRT_BASE IOMEM(0xfeb00000) 19#define ARMADA_370_XP_REGS_VIRT_BASE IOMEM(0xfeb00000)
20#define ARMADA_370_XP_REGS_SIZE SZ_1M 20#define ARMADA_370_XP_REGS_SIZE SZ_1M
21 21
22#ifdef CONFIG_SMP
23#include <linux/cpumask.h>
24
25void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq);
26void armada_xp_mpic_smp_cpu_init(void);
27#endif
28
22#endif /* __MACH_ARMADA_370_XP_H */ 29#endif /* __MACH_ARMADA_370_XP_H */
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
new file mode 100644
index 000000000000..8278960066c3
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -0,0 +1,155 @@
1/*
2 * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * The Armada 370 and Armada XP SOCs have a coherency fabric which is
15 * responsible for ensuring hardware coherency between all CPUs and between
16 * CPUs and I/O masters. This file initializes the coherency fabric and
17 * supplies basic routines for configuring and controlling hardware coherency
18 */
19
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/of_address.h>
23#include <linux/io.h>
24#include <linux/smp.h>
25#include <linux/dma-mapping.h>
26#include <linux/platform_device.h>
27#include <asm/smp_plat.h>
28#include "armada-370-xp.h"
29
30/*
31 * Some functions in this file are called very early during SMP
32 * initialization. At that time the device tree framework is not yet
33 * ready, and it is not possible to get the register address to
34 * ioremap it. That's why the pointer below is given with an initial
35 * value matching its virtual mapping
36 */
37static void __iomem *coherency_base = ARMADA_370_XP_REGS_VIRT_BASE + 0x20200;
38static void __iomem *coherency_cpu_base;
39
40/* Coherency fabric registers */
41#define COHERENCY_FABRIC_CFG_OFFSET 0x4
42
43#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
44
45static struct of_device_id of_coherency_table[] = {
46 {.compatible = "marvell,coherency-fabric"},
47 { /* end of list */ },
48};
49
50#ifdef CONFIG_SMP
51int coherency_get_cpu_count(void)
52{
53 int reg, cnt;
54
55 reg = readl(coherency_base + COHERENCY_FABRIC_CFG_OFFSET);
56 cnt = (reg & 0xF) + 1;
57
58 return cnt;
59}
60#endif
61
62/* Function defined in coherency_ll.S */
63int ll_set_cpu_coherent(void __iomem *base_addr, unsigned int hw_cpu_id);
64
65int set_cpu_coherent(unsigned int hw_cpu_id, int smp_group_id)
66{
67 if (!coherency_base) {
68 pr_warn("Can't make CPU %d cache coherent.\n", hw_cpu_id);
69 pr_warn("Coherency fabric is not initialized\n");
70 return 1;
71 }
72
73 return ll_set_cpu_coherent(coherency_base, hw_cpu_id);
74}
75
76static inline void mvebu_hwcc_sync_io_barrier(void)
77{
78 writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
79 while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
80}
81
82static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
83 unsigned long offset, size_t size,
84 enum dma_data_direction dir,
85 struct dma_attrs *attrs)
86{
87 if (dir != DMA_TO_DEVICE)
88 mvebu_hwcc_sync_io_barrier();
89 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
90}
91
92
93static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
94 size_t size, enum dma_data_direction dir,
95 struct dma_attrs *attrs)
96{
97 if (dir != DMA_TO_DEVICE)
98 mvebu_hwcc_sync_io_barrier();
99}
100
101static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
102 size_t size, enum dma_data_direction dir)
103{
104 if (dir != DMA_TO_DEVICE)
105 mvebu_hwcc_sync_io_barrier();
106}
107
108static struct dma_map_ops mvebu_hwcc_dma_ops = {
109 .alloc = arm_dma_alloc,
110 .free = arm_dma_free,
111 .mmap = arm_dma_mmap,
112 .map_page = mvebu_hwcc_dma_map_page,
113 .unmap_page = mvebu_hwcc_dma_unmap_page,
114 .get_sgtable = arm_dma_get_sgtable,
115 .map_sg = arm_dma_map_sg,
116 .unmap_sg = arm_dma_unmap_sg,
117 .sync_single_for_cpu = mvebu_hwcc_dma_sync,
118 .sync_single_for_device = mvebu_hwcc_dma_sync,
119 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
120 .sync_sg_for_device = arm_dma_sync_sg_for_device,
121 .set_dma_mask = arm_dma_set_mask,
122};
123
124static int mvebu_hwcc_platform_notifier(struct notifier_block *nb,
125 unsigned long event, void *__dev)
126{
127 struct device *dev = __dev;
128
129 if (event != BUS_NOTIFY_ADD_DEVICE)
130 return NOTIFY_DONE;
131 set_dma_ops(dev, &mvebu_hwcc_dma_ops);
132
133 return NOTIFY_OK;
134}
135
136static struct notifier_block mvebu_hwcc_platform_nb = {
137 .notifier_call = mvebu_hwcc_platform_notifier,
138};
139
140int __init coherency_init(void)
141{
142 struct device_node *np;
143
144 np = of_find_matching_node(NULL, of_coherency_table);
145 if (np) {
146 pr_info("Initializing Coherency fabric\n");
147 coherency_base = of_iomap(np, 0);
148 coherency_cpu_base = of_iomap(np, 1);
149 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
150 bus_register_notifier(&platform_bus_type,
151 &mvebu_hwcc_platform_nb);
152 }
153
154 return 0;
155}
diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
new file mode 100644
index 000000000000..2f428137f6fe
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency.h
@@ -0,0 +1,24 @@
1/*
2 * arch/arm/mach-mvebu/include/mach/coherency.h
3 *
4 *
5 * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
6 *
7 * Copyright (C) 2012 Marvell
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_370_XP_COHERENCY_H
15#define __MACH_370_XP_COHERENCY_H
16
17#ifdef CONFIG_SMP
18int coherency_get_cpu_count(void);
19#endif
20
21int set_cpu_coherent(int cpu_id, int smp_group_id);
22int coherency_init(void);
23
24#endif /* __MACH_370_XP_COHERENCY_H */
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
new file mode 100644
index 000000000000..53e8391192cd
--- /dev/null
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -0,0 +1,49 @@
1/*
2 * Coherency fabric: low level functions
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 *
12 * This file implements the assembly function to add a CPU to the
13 * coherency fabric. This function is called by each of the secondary
14 * CPUs during their early boot in an SMP kernel, this why this
15 * function have to callable from assembly. It can also be called by a
16 * primary CPU from C code during its boot.
17 */
18
19#include <linux/linkage.h>
20#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
21#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
22
23 .text
24/*
25 * r0: Coherency fabric base register address
26 * r1: HW CPU id
27 */
28ENTRY(ll_set_cpu_coherent)
29 /* Create bit by cpu index */
30 mov r3, #(1 << 24)
31 lsl r1, r3, r1
32
33 /* Add CPU to SMP group - Atomic */
34 add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
35 ldr r2, [r3]
36 orr r2, r2, r1
37 str r2, [r3]
38
39 /* Enable coherency on CPU - Atomic */
40 add r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET
41 ldr r2, [r3]
42 orr r2, r2, r1
43 str r2, [r3]
44
45 dsb
46
47 mov r0, #0
48 mov pc, lr
49ENDPROC(ll_set_cpu_coherent)
diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h
index 02f89eaa25fe..aa27bc2ffb60 100644
--- a/arch/arm/mach-mvebu/common.h
+++ b/arch/arm/mach-mvebu/common.h
@@ -20,4 +20,9 @@ void mvebu_restart(char mode, const char *cmd);
20void armada_370_xp_init_irq(void); 20void armada_370_xp_init_irq(void);
21void armada_370_xp_handle_irq(struct pt_regs *regs); 21void armada_370_xp_handle_irq(struct pt_regs *regs);
22 22
23void armada_xp_cpu_die(unsigned int cpu);
24int armada_370_xp_coherency_init(void);
25int armada_370_xp_pmsu_init(void);
26void armada_xp_secondary_startup(void);
27extern struct smp_operations armada_xp_smp_ops;
23#endif 28#endif
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
new file mode 100644
index 000000000000..a06e0ede8c08
--- /dev/null
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -0,0 +1,49 @@
1/*
2 * SMP support: Entry point for secondary CPUs
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * This file implements the assembly entry point for secondary CPUs in
15 * an SMP kernel. The only thing we need to do is to add the CPU to
16 * the coherency fabric by writing to 2 registers. Currently the base
17 * register addresses are hard coded due to the early initialisation
18 * problems.
19 */
20
21#include <linux/linkage.h>
22#include <linux/init.h>
23
24/*
25 * At this stage the secondary CPUs don't have acces yet to the MMU, so
26 * we have to provide physical addresses
27 */
28#define ARMADA_XP_CFB_BASE 0xD0020200
29
30 __CPUINIT
31
32/*
33 * Armada XP specific entry point for secondary CPUs.
34 * We add the CPU to the coherency fabric and then jump to secondary
35 * startup
36 */
37ENTRY(armada_xp_secondary_startup)
38
39 /* Read CPU id */
40 mrc p15, 0, r1, c0, c0, 5
41 and r1, r1, #0xF
42
43 /* Add CPU to coherency fabric */
44 ldr r0, =ARMADA_XP_CFB_BASE
45
46 bl ll_set_cpu_coherent
47 b secondary_startup
48
49ENDPROC(armada_xp_secondary_startup)
diff --git a/arch/arm/mach-mvebu/hotplug.c b/arch/arm/mach-mvebu/hotplug.c
new file mode 100644
index 000000000000..b228b6a80c85
--- /dev/null
+++ b/arch/arm/mach-mvebu/hotplug.c
@@ -0,0 +1,30 @@
1/*
2 * Symmetric Multi Processing (SMP) support for Armada XP
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/smp.h>
17#include <asm/proc-fns.h>
18
19/*
20 * platform-specific code to shutdown a CPU
21 *
22 * Called with IRQs disabled
23 */
24void __ref armada_xp_cpu_die(unsigned int cpu)
25{
26 cpu_do_idle();
27
28 /* We should never return from idle */
29 panic("mvebu: cpu %d unexpectedly exit from shutdown\n", cpu);
30}
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
index 5f5f9394b6b2..549b6846f940 100644
--- a/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -24,6 +24,7 @@
24#include <linux/irqdomain.h> 24#include <linux/irqdomain.h>
25#include <asm/mach/arch.h> 25#include <asm/mach/arch.h>
26#include <asm/exception.h> 26#include <asm/exception.h>
27#include <asm/smp_plat.h>
27 28
28/* Interrupt Controller Registers Map */ 29/* Interrupt Controller Registers Map */
29#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) 30#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
@@ -35,6 +36,12 @@
35 36
36#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 37#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
37 38
39#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
40#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
41#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
42
43#define ACTIVE_DOORBELLS (8)
44
38static void __iomem *per_cpu_int_base; 45static void __iomem *per_cpu_int_base;
39static void __iomem *main_int_base; 46static void __iomem *main_int_base;
40static struct irq_domain *armada_370_xp_mpic_domain; 47static struct irq_domain *armada_370_xp_mpic_domain;
@@ -51,11 +58,22 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
51 per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 58 per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
52} 59}
53 60
61#ifdef CONFIG_SMP
62static int armada_xp_set_affinity(struct irq_data *d,
63 const struct cpumask *mask_val, bool force)
64{
65 return 0;
66}
67#endif
68
54static struct irq_chip armada_370_xp_irq_chip = { 69static struct irq_chip armada_370_xp_irq_chip = {
55 .name = "armada_370_xp_irq", 70 .name = "armada_370_xp_irq",
56 .irq_mask = armada_370_xp_irq_mask, 71 .irq_mask = armada_370_xp_irq_mask,
57 .irq_mask_ack = armada_370_xp_irq_mask, 72 .irq_mask_ack = armada_370_xp_irq_mask,
58 .irq_unmask = armada_370_xp_irq_unmask, 73 .irq_unmask = armada_370_xp_irq_unmask,
74#ifdef CONFIG_SMP
75 .irq_set_affinity = armada_xp_set_affinity,
76#endif
59}; 77};
60 78
61static int armada_370_xp_mpic_irq_map(struct irq_domain *h, 79static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
@@ -72,6 +90,41 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
72 return 0; 90 return 0;
73} 91}
74 92
93#ifdef CONFIG_SMP
94void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
95{
96 int cpu;
97 unsigned long map = 0;
98
99 /* Convert our logical CPU mask into a physical one. */
100 for_each_cpu(cpu, mask)
101 map |= 1 << cpu_logical_map(cpu);
102
103 /*
104 * Ensure that stores to Normal memory are visible to the
105 * other CPUs before issuing the IPI.
106 */
107 dsb();
108
109 /* submit softirq */
110 writel((map << 8) | irq, main_int_base +
111 ARMADA_370_XP_SW_TRIG_INT_OFFS);
112}
113
114void armada_xp_mpic_smp_cpu_init(void)
115{
116 /* Clear pending IPIs */
117 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
118
119 /* Enable first 8 IPIs */
120 writel((1 << ACTIVE_DOORBELLS) - 1, per_cpu_int_base +
121 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
122
123 /* Unmask IPI interrupt */
124 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
125}
126#endif /* CONFIG_SMP */
127
75static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { 128static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
76 .map = armada_370_xp_mpic_irq_map, 129 .map = armada_370_xp_mpic_irq_map,
77 .xlate = irq_domain_xlate_onecell, 130 .xlate = irq_domain_xlate_onecell,
@@ -91,13 +144,18 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
91 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); 144 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
92 145
93 armada_370_xp_mpic_domain = 146 armada_370_xp_mpic_domain =
94 irq_domain_add_linear(node, (control >> 2) & 0x3ff, 147 irq_domain_add_linear(node, (control >> 2) & 0x3ff,
95 &armada_370_xp_mpic_irq_ops, NULL); 148 &armada_370_xp_mpic_irq_ops, NULL);
96 149
97 if (!armada_370_xp_mpic_domain) 150 if (!armada_370_xp_mpic_domain)
98 panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n"); 151 panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
99 152
100 irq_set_default_host(armada_370_xp_mpic_domain); 153 irq_set_default_host(armada_370_xp_mpic_domain);
154
155#ifdef CONFIG_SMP
156 armada_xp_mpic_smp_cpu_init();
157#endif
158
101 return 0; 159 return 0;
102} 160}
103 161
@@ -111,14 +169,36 @@ asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
111 ARMADA_370_XP_CPU_INTACK_OFFS); 169 ARMADA_370_XP_CPU_INTACK_OFFS);
112 irqnr = irqstat & 0x3FF; 170 irqnr = irqstat & 0x3FF;
113 171
114 if (irqnr < 1023) { 172 if (irqnr > 1022)
115 irqnr = 173 break;
116 irq_find_mapping(armada_370_xp_mpic_domain, irqnr); 174
175 if (irqnr >= 8) {
176 irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
177 irqnr);
117 handle_IRQ(irqnr, regs); 178 handle_IRQ(irqnr, regs);
118 continue; 179 continue;
119 } 180 }
181#ifdef CONFIG_SMP
182 /* IPI Handling */
183 if (irqnr == 0) {
184 u32 ipimask, ipinr;
185
186 ipimask = readl_relaxed(per_cpu_int_base +
187 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
188 & 0xFF;
189
190 writel(0x0, per_cpu_int_base +
191 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
192
193 /* Handle all pending doorbells */
194 for (ipinr = 0; ipinr < ACTIVE_DOORBELLS; ipinr++) {
195 if (ipimask & (0x1 << ipinr))
196 handle_IPI(ipinr, regs);
197 }
198 continue;
199 }
200#endif
120 201
121 break;
122 } while (1); 202 } while (1);
123} 203}
124 204
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
new file mode 100644
index 000000000000..fe16aaf7c19c
--- /dev/null
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -0,0 +1,122 @@
1/*
2 * Symmetric Multi Processing (SMP) support for Armada XP
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Yehuda Yitschak <yehuday@marvell.com>
8 * Gregory CLEMENT <gregory.clement@free-electrons.com>
9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 *
15 * The Armada XP SoC has 4 ARMv7 PJ4B CPUs running in full HW coherency
16 * This file implements the routines for preparing the SMP infrastructure
17 * and waking up the secondary CPUs
18 */
19
20#include <linux/init.h>
21#include <linux/smp.h>
22#include <linux/clk.h>
23#include <linux/of.h>
24#include <asm/cacheflush.h>
25#include <asm/smp_plat.h>
26#include "common.h"
27#include "armada-370-xp.h"
28#include "pmsu.h"
29#include "coherency.h"
30
31void __init set_secondary_cpus_clock(void)
32{
33 int thiscpu;
34 unsigned long rate;
35 struct clk *cpu_clk = NULL;
36 struct device_node *np = NULL;
37
38 thiscpu = smp_processor_id();
39 for_each_node_by_type(np, "cpu") {
40 int err;
41 int cpu;
42
43 err = of_property_read_u32(np, "reg", &cpu);
44 if (WARN_ON(err))
45 return;
46
47 if (cpu == thiscpu) {
48 cpu_clk = of_clk_get(np, 0);
49 break;
50 }
51 }
52 if (WARN_ON(IS_ERR(cpu_clk)))
53 return;
54 clk_prepare_enable(cpu_clk);
55 rate = clk_get_rate(cpu_clk);
56
57 /* set all the other CPU clk to the same rate than the boot CPU */
58 for_each_node_by_type(np, "cpu") {
59 int err;
60 int cpu;
61
62 err = of_property_read_u32(np, "reg", &cpu);
63 if (WARN_ON(err))
64 return;
65
66 if (cpu != thiscpu) {
67 cpu_clk = of_clk_get(np, 0);
68 clk_set_rate(cpu_clk, rate);
69 }
70 }
71}
72
73static void __cpuinit armada_xp_secondary_init(unsigned int cpu)
74{
75 armada_xp_mpic_smp_cpu_init();
76}
77
78static int __cpuinit armada_xp_boot_secondary(unsigned int cpu,
79 struct task_struct *idle)
80{
81 pr_info("Booting CPU %d\n", cpu);
82
83 armada_xp_boot_cpu(cpu, armada_xp_secondary_startup);
84
85 return 0;
86}
87
88static void __init armada_xp_smp_init_cpus(void)
89{
90 unsigned int i, ncores;
91 ncores = coherency_get_cpu_count();
92
93 /* Limit possible CPUs to defconfig */
94 if (ncores > nr_cpu_ids) {
95 pr_warn("SMP: %d CPUs physically present. Only %d configured.",
96 ncores, nr_cpu_ids);
97 pr_warn("Clipping CPU count to %d\n", nr_cpu_ids);
98 ncores = nr_cpu_ids;
99 }
100
101 for (i = 0; i < ncores; i++)
102 set_cpu_possible(i, true);
103
104 set_smp_cross_call(armada_mpic_send_doorbell);
105}
106
107void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus)
108{
109 set_secondary_cpus_clock();
110 flush_cache_all();
111 set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
112}
113
114struct smp_operations armada_xp_smp_ops __initdata = {
115 .smp_init_cpus = armada_xp_smp_init_cpus,
116 .smp_prepare_cpus = armada_xp_smp_prepare_cpus,
117 .smp_secondary_init = armada_xp_secondary_init,
118 .smp_boot_secondary = armada_xp_boot_secondary,
119#ifdef CONFIG_HOTPLUG_CPU
120 .cpu_die = armada_xp_cpu_die,
121#endif
122};
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
new file mode 100644
index 000000000000..3cc4bef6401c
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -0,0 +1,75 @@
1/*
2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * The Armada 370 and Armada XP SOCs have a power management service
15 * unit which is responsible for powering down and waking up CPUs and
16 * other SOC units
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/of_address.h>
22#include <linux/io.h>
23#include <linux/smp.h>
24#include <asm/smp_plat.h>
25
26static void __iomem *pmsu_mp_base;
27static void __iomem *pmsu_reset_base;
28
29#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x24)
30#define PMSU_RESET_CTL_OFFSET(cpu) (cpu * 0x8)
31
32static struct of_device_id of_pmsu_table[] = {
33 {.compatible = "marvell,armada-370-xp-pmsu"},
34 { /* end of list */ },
35};
36
37#ifdef CONFIG_SMP
38int armada_xp_boot_cpu(unsigned int cpu_id, void *boot_addr)
39{
40 int reg, hw_cpu;
41
42 if (!pmsu_mp_base || !pmsu_reset_base) {
43 pr_warn("Can't boot CPU. PMSU is uninitialized\n");
44 return 1;
45 }
46
47 hw_cpu = cpu_logical_map(cpu_id);
48
49 writel(virt_to_phys(boot_addr), pmsu_mp_base +
50 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
51
52 /* Release CPU from reset by clearing reset bit*/
53 reg = readl(pmsu_reset_base + PMSU_RESET_CTL_OFFSET(hw_cpu));
54 reg &= (~0x1);
55 writel(reg, pmsu_reset_base + PMSU_RESET_CTL_OFFSET(hw_cpu));
56
57 return 0;
58}
59#endif
60
61int __init armada_370_xp_pmsu_init(void)
62{
63 struct device_node *np;
64
65 np = of_find_matching_node(NULL, of_pmsu_table);
66 if (np) {
67 pr_info("Initializing Power Management Service Unit\n");
68 pmsu_mp_base = of_iomap(np, 0);
69 pmsu_reset_base = of_iomap(np, 1);
70 }
71
72 return 0;
73}
74
75early_initcall(armada_370_xp_pmsu_init);
diff --git a/arch/arm/mach-mvebu/pmsu.h b/arch/arm/mach-mvebu/pmsu.h
new file mode 100644
index 000000000000..07a737c6b95d
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu.h
@@ -0,0 +1,16 @@
1/*
2 * Power Management Service Unit (PMSU) support for Armada 370/XP platforms.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#ifndef __MACH_MVEBU_PMSU_H
12#define __MACH_MVEBU_PMSU_H
13
14int armada_xp_boot_cpu(unsigned int cpu_id, void *phys_addr);
15
16#endif /* __MACH_370_XP_PMSU_H */
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 94186b6c685f..3fd629d5a513 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -352,6 +352,10 @@ config CPU_PJ4
352 select ARM_THUMBEE 352 select ARM_THUMBEE
353 select CPU_V7 353 select CPU_V7
354 354
355config CPU_PJ4B
356 bool
357 select CPU_V7
358
355# ARMv6 359# ARMv6
356config CPU_V6 360config CPU_V6
357 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX 361 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 58bc3e4d3bd0..5383bc018571 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -124,8 +124,6 @@ static void arm_dma_sync_single_for_device(struct device *dev,
124 __dma_page_cpu_to_dev(page, offset, size, dir); 124 __dma_page_cpu_to_dev(page, offset, size, dir);
125} 125}
126 126
127static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
128
129struct dma_map_ops arm_dma_ops = { 127struct dma_map_ops arm_dma_ops = {
130 .alloc = arm_dma_alloc, 128 .alloc = arm_dma_alloc,
131 .free = arm_dma_free, 129 .free = arm_dma_free,
@@ -971,7 +969,7 @@ int dma_supported(struct device *dev, u64 mask)
971} 969}
972EXPORT_SYMBOL(dma_supported); 970EXPORT_SYMBOL(dma_supported);
973 971
974static int arm_dma_set_mask(struct device *dev, u64 dma_mask) 972int arm_dma_set_mask(struct device *dev, u64 dma_mask)
975{ 973{
976 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 974 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
977 return -EIO; 975 return -EIO;
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 846d279f3176..7cd0028cab8e 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -169,6 +169,63 @@ __v7_ca15mp_setup:
169 orreq r0, r0, r10 @ Enable CPU-specific SMP bits 169 orreq r0, r0, r10 @ Enable CPU-specific SMP bits
170 mcreq p15, 0, r0, c1, c0, 1 170 mcreq p15, 0, r0, c1, c0, 1
171#endif 171#endif
172
173__v7_pj4b_setup:
174#ifdef CONFIG_CPU_PJ4B
175
176/* Auxiliary Debug Modes Control 1 Register */
177#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
178#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
179#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
180#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
181
182/* Auxiliary Debug Modes Control 2 Register */
183#define PJ4B_FAST_LDR (1 << 23) /* Disable fast LDR */
184#define PJ4B_SNOOP_DATA (1 << 25) /* Do not interleave write and snoop data */
185#define PJ4B_CWF (1 << 27) /* Disable Critical Word First feature */
186#define PJ4B_OUTSDNG_NC (1 << 29) /* Disable outstanding non cacheable rqst */
187#define PJ4B_L1_REP_RR (1 << 30) /* L1 replacement - Strict round robin */
188#define PJ4B_AUX_DBG_CTRL2 (PJ4B_SNOOP_DATA | PJ4B_CWF |\
189 PJ4B_OUTSDNG_NC | PJ4B_L1_REP_RR)
190
191/* Auxiliary Functional Modes Control Register 0 */
192#define PJ4B_SMP_CFB (1 << 1) /* Set SMP mode. Join the coherency fabric */
193#define PJ4B_L1_PAR_CHK (1 << 2) /* Support L1 parity checking */
194#define PJ4B_BROADCAST_CACHE (1 << 8) /* Broadcast Cache and TLB maintenance */
195
196/* Auxiliary Debug Modes Control 0 Register */
197#define PJ4B_WFI_WFE (1 << 22) /* WFI/WFE - serve the DVM and back to idle */
198
199 /* Auxiliary Debug Modes Control 1 Register */
200 mrc p15, 1, r0, c15, c1, 1
201 orr r0, r0, #PJ4B_CLEAN_LINE
202 orr r0, r0, #PJ4B_BCK_OFF_STREX
203 orr r0, r0, #PJ4B_INTER_PARITY
204 bic r0, r0, #PJ4B_STATIC_BP
205 mcr p15, 1, r0, c15, c1, 1
206
207 /* Auxiliary Debug Modes Control 2 Register */
208 mrc p15, 1, r0, c15, c1, 2
209 bic r0, r0, #PJ4B_FAST_LDR
210 orr r0, r0, #PJ4B_AUX_DBG_CTRL2
211 mcr p15, 1, r0, c15, c1, 2
212
213 /* Auxiliary Functional Modes Control Register 0 */
214 mrc p15, 1, r0, c15, c2, 0
215#ifdef CONFIG_SMP
216 orr r0, r0, #PJ4B_SMP_CFB
217#endif
218 orr r0, r0, #PJ4B_L1_PAR_CHK
219 orr r0, r0, #PJ4B_BROADCAST_CACHE
220 mcr p15, 1, r0, c15, c2, 0
221
222 /* Auxiliary Debug Modes Control 0 Register */
223 mrc p15, 1, r0, c15, c1, 0
224 orr r0, r0, #PJ4B_WFI_WFE
225 mcr p15, 1, r0, c15, c1, 0
226
227#endif /* CONFIG_CPU_PJ4B */
228
172__v7_setup: 229__v7_setup:
173 adr r12, __v7_setup_stack @ the local stack 230 adr r12, __v7_setup_stack @ the local stack
174 stmia r12, {r0-r5, r7, r9, r11, lr} 231 stmia r12, {r0-r5, r7, r9, r11, lr}
@@ -342,6 +399,16 @@ __v7_ca9mp_proc_info:
342 .long 0xff0ffff0 399 .long 0xff0ffff0
343 __v7_proc __v7_ca9mp_setup 400 __v7_proc __v7_ca9mp_setup
344 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 401 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
402
403 /*
404 * Marvell PJ4B processor.
405 */
406 .type __v7_pj4b_proc_info, #object
407__v7_pj4b_proc_info:
408 .long 0x562f5840
409 .long 0xfffffff0
410 __v7_proc __v7_pj4b_setup
411 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
345#endif /* CONFIG_ARM_LPAE */ 412#endif /* CONFIG_ARM_LPAE */
346 413
347 /* 414 /*
diff --git a/arch/arm/plat-orion/addr-map.c b/arch/arm/plat-orion/addr-map.c
index a7b8060c293a..febe3862873c 100644
--- a/arch/arm/plat-orion/addr-map.c
+++ b/arch/arm/plat-orion/addr-map.c
@@ -42,6 +42,8 @@ EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
42#define WIN_REMAP_LO_OFF 0x0008 42#define WIN_REMAP_LO_OFF 0x0008
43#define WIN_REMAP_HI_OFF 0x000c 43#define WIN_REMAP_HI_OFF 0x000c
44 44
45#define ATTR_HW_COHERENCY (0x1 << 4)
46
45/* 47/*
46 * Default implementation 48 * Default implementation
47 */ 49 */
@@ -163,6 +165,8 @@ void __init orion_setup_cpu_mbus_target(const struct orion_addr_map_cfg *cfg,
163 w = &orion_mbus_dram_info.cs[cs++]; 165 w = &orion_mbus_dram_info.cs[cs++];
164 w->cs_index = i; 166 w->cs_index = i;
165 w->mbus_attr = 0xf & ~(1 << i); 167 w->mbus_attr = 0xf & ~(1 << i);
168 if (cfg->hw_io_coherency)
169 w->mbus_attr |= ATTR_HW_COHERENCY;
166 w->base = base & 0xffff0000; 170 w->base = base & 0xffff0000;
167 w->size = (size | 0x0000ffff) + 1; 171 w->size = (size | 0x0000ffff) + 1;
168 } 172 }
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index b8a688cad4c2..2d4b6414609f 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -606,26 +606,6 @@ void __init orion_wdt_init(void)
606 ****************************************************************************/ 606 ****************************************************************************/
607static u64 orion_xor_dmamask = DMA_BIT_MASK(32); 607static u64 orion_xor_dmamask = DMA_BIT_MASK(32);
608 608
609void __init orion_xor_init_channels(
610 struct mv_xor_platform_data *orion_xor0_data,
611 struct platform_device *orion_xor0_channel,
612 struct mv_xor_platform_data *orion_xor1_data,
613 struct platform_device *orion_xor1_channel)
614{
615 /*
616 * two engines can't do memset simultaneously, this limitation
617 * satisfied by removing memset support from one of the engines.
618 */
619 dma_cap_set(DMA_MEMCPY, orion_xor0_data->cap_mask);
620 dma_cap_set(DMA_XOR, orion_xor0_data->cap_mask);
621 platform_device_register(orion_xor0_channel);
622
623 dma_cap_set(DMA_MEMCPY, orion_xor1_data->cap_mask);
624 dma_cap_set(DMA_MEMSET, orion_xor1_data->cap_mask);
625 dma_cap_set(DMA_XOR, orion_xor1_data->cap_mask);
626 platform_device_register(orion_xor1_channel);
627}
628
629/***************************************************************************** 609/*****************************************************************************
630 * XOR0 610 * XOR0
631 ****************************************************************************/ 611 ****************************************************************************/
@@ -636,61 +616,30 @@ static struct resource orion_xor0_shared_resources[] = {
636 }, { 616 }, {
637 .name = "xor 0 high", 617 .name = "xor 0 high",
638 .flags = IORESOURCE_MEM, 618 .flags = IORESOURCE_MEM,
619 }, {
620 .name = "irq channel 0",
621 .flags = IORESOURCE_IRQ,
622 }, {
623 .name = "irq channel 1",
624 .flags = IORESOURCE_IRQ,
639 }, 625 },
640}; 626};
641 627
642static struct platform_device orion_xor0_shared = { 628static struct mv_xor_channel_data orion_xor0_channels_data[2];
643 .name = MV_XOR_SHARED_NAME,
644 .id = 0,
645 .num_resources = ARRAY_SIZE(orion_xor0_shared_resources),
646 .resource = orion_xor0_shared_resources,
647};
648 629
649static struct resource orion_xor00_resources[] = { 630static struct mv_xor_platform_data orion_xor0_pdata = {
650 [0] = { 631 .channels = orion_xor0_channels_data,
651 .flags = IORESOURCE_IRQ,
652 },
653};
654
655static struct mv_xor_platform_data orion_xor00_data = {
656 .shared = &orion_xor0_shared,
657 .hw_id = 0,
658 .pool_size = PAGE_SIZE,
659}; 632};
660 633
661static struct platform_device orion_xor00_channel = { 634static struct platform_device orion_xor0_shared = {
662 .name = MV_XOR_NAME, 635 .name = MV_XOR_NAME,
663 .id = 0, 636 .id = 0,
664 .num_resources = ARRAY_SIZE(orion_xor00_resources), 637 .num_resources = ARRAY_SIZE(orion_xor0_shared_resources),
665 .resource = orion_xor00_resources, 638 .resource = orion_xor0_shared_resources,
666 .dev = { 639 .dev = {
667 .dma_mask = &orion_xor_dmamask, 640 .dma_mask = &orion_xor_dmamask,
668 .coherent_dma_mask = DMA_BIT_MASK(64), 641 .coherent_dma_mask = DMA_BIT_MASK(64),
669 .platform_data = &orion_xor00_data, 642 .platform_data = &orion_xor0_pdata,
670 },
671};
672
673static struct resource orion_xor01_resources[] = {
674 [0] = {
675 .flags = IORESOURCE_IRQ,
676 },
677};
678
679static struct mv_xor_platform_data orion_xor01_data = {
680 .shared = &orion_xor0_shared,
681 .hw_id = 1,
682 .pool_size = PAGE_SIZE,
683};
684
685static struct platform_device orion_xor01_channel = {
686 .name = MV_XOR_NAME,
687 .id = 1,
688 .num_resources = ARRAY_SIZE(orion_xor01_resources),
689 .resource = orion_xor01_resources,
690 .dev = {
691 .dma_mask = &orion_xor_dmamask,
692 .coherent_dma_mask = DMA_BIT_MASK(64),
693 .platform_data = &orion_xor01_data,
694 }, 643 },
695}; 644};
696 645
@@ -704,15 +653,23 @@ void __init orion_xor0_init(unsigned long mapbase_low,
704 orion_xor0_shared_resources[1].start = mapbase_high; 653 orion_xor0_shared_resources[1].start = mapbase_high;
705 orion_xor0_shared_resources[1].end = mapbase_high + 0xff; 654 orion_xor0_shared_resources[1].end = mapbase_high + 0xff;
706 655
707 orion_xor00_resources[0].start = irq_0; 656 orion_xor0_shared_resources[2].start = irq_0;
708 orion_xor00_resources[0].end = irq_0; 657 orion_xor0_shared_resources[2].end = irq_0;
709 orion_xor01_resources[0].start = irq_1; 658 orion_xor0_shared_resources[3].start = irq_1;
710 orion_xor01_resources[0].end = irq_1; 659 orion_xor0_shared_resources[3].end = irq_1;
711 660
712 platform_device_register(&orion_xor0_shared); 661 /*
662 * two engines can't do memset simultaneously, this limitation
663 * satisfied by removing memset support from one of the engines.
664 */
665 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask);
666 dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask);
667
668 dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask);
669 dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask);
670 dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask);
713 671
714 orion_xor_init_channels(&orion_xor00_data, &orion_xor00_channel, 672 platform_device_register(&orion_xor0_shared);
715 &orion_xor01_data, &orion_xor01_channel);
716} 673}
717 674
718/***************************************************************************** 675/*****************************************************************************
@@ -725,61 +682,30 @@ static struct resource orion_xor1_shared_resources[] = {
725 }, { 682 }, {
726 .name = "xor 1 high", 683 .name = "xor 1 high",
727 .flags = IORESOURCE_MEM, 684 .flags = IORESOURCE_MEM,
685 }, {
686 .name = "irq channel 0",
687 .flags = IORESOURCE_IRQ,
688 }, {
689 .name = "irq channel 1",
690 .flags = IORESOURCE_IRQ,
728 }, 691 },
729}; 692};
730 693
731static struct platform_device orion_xor1_shared = { 694static struct mv_xor_channel_data orion_xor1_channels_data[2];
732 .name = MV_XOR_SHARED_NAME,
733 .id = 1,
734 .num_resources = ARRAY_SIZE(orion_xor1_shared_resources),
735 .resource = orion_xor1_shared_resources,
736};
737
738static struct resource orion_xor10_resources[] = {
739 [0] = {
740 .flags = IORESOURCE_IRQ,
741 },
742};
743
744static struct mv_xor_platform_data orion_xor10_data = {
745 .shared = &orion_xor1_shared,
746 .hw_id = 0,
747 .pool_size = PAGE_SIZE,
748};
749
750static struct platform_device orion_xor10_channel = {
751 .name = MV_XOR_NAME,
752 .id = 2,
753 .num_resources = ARRAY_SIZE(orion_xor10_resources),
754 .resource = orion_xor10_resources,
755 .dev = {
756 .dma_mask = &orion_xor_dmamask,
757 .coherent_dma_mask = DMA_BIT_MASK(64),
758 .platform_data = &orion_xor10_data,
759 },
760};
761
762static struct resource orion_xor11_resources[] = {
763 [0] = {
764 .flags = IORESOURCE_IRQ,
765 },
766};
767 695
768static struct mv_xor_platform_data orion_xor11_data = { 696static struct mv_xor_platform_data orion_xor1_pdata = {
769 .shared = &orion_xor1_shared, 697 .channels = orion_xor1_channels_data,
770 .hw_id = 1,
771 .pool_size = PAGE_SIZE,
772}; 698};
773 699
774static struct platform_device orion_xor11_channel = { 700static struct platform_device orion_xor1_shared = {
775 .name = MV_XOR_NAME, 701 .name = MV_XOR_NAME,
776 .id = 3, 702 .id = 1,
777 .num_resources = ARRAY_SIZE(orion_xor11_resources), 703 .num_resources = ARRAY_SIZE(orion_xor1_shared_resources),
778 .resource = orion_xor11_resources, 704 .resource = orion_xor1_shared_resources,
779 .dev = { 705 .dev = {
780 .dma_mask = &orion_xor_dmamask, 706 .dma_mask = &orion_xor_dmamask,
781 .coherent_dma_mask = DMA_BIT_MASK(64), 707 .coherent_dma_mask = DMA_BIT_MASK(64),
782 .platform_data = &orion_xor11_data, 708 .platform_data = &orion_xor1_pdata,
783 }, 709 },
784}; 710};
785 711
@@ -793,15 +719,23 @@ void __init orion_xor1_init(unsigned long mapbase_low,
793 orion_xor1_shared_resources[1].start = mapbase_high; 719 orion_xor1_shared_resources[1].start = mapbase_high;
794 orion_xor1_shared_resources[1].end = mapbase_high + 0xff; 720 orion_xor1_shared_resources[1].end = mapbase_high + 0xff;
795 721
796 orion_xor10_resources[0].start = irq_0; 722 orion_xor1_shared_resources[2].start = irq_0;
797 orion_xor10_resources[0].end = irq_0; 723 orion_xor1_shared_resources[2].end = irq_0;
798 orion_xor11_resources[0].start = irq_1; 724 orion_xor1_shared_resources[3].start = irq_1;
799 orion_xor11_resources[0].end = irq_1; 725 orion_xor1_shared_resources[3].end = irq_1;
800 726
801 platform_device_register(&orion_xor1_shared); 727 /*
728 * two engines can't do memset simultaneously, this limitation
729 * satisfied by removing memset support from one of the engines.
730 */
731 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask);
732 dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask);
802 733
803 orion_xor_init_channels(&orion_xor10_data, &orion_xor10_channel, 734 dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask);
804 &orion_xor11_data, &orion_xor11_channel); 735 dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask);
736 dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask);
737
738 platform_device_register(&orion_xor1_shared);
805} 739}
806 740
807/***************************************************************************** 741/*****************************************************************************
diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
index ec63e4a627d0..b76c06569fe5 100644
--- a/arch/arm/plat-orion/include/plat/addr-map.h
+++ b/arch/arm/plat-orion/include/plat/addr-map.h
@@ -17,6 +17,7 @@ struct orion_addr_map_cfg {
17 const int num_wins; /* Total number of windows */ 17 const int num_wins; /* Total number of windows */
18 const int remappable_wins; 18 const int remappable_wins;
19 void __iomem *bridge_virt_base; 19 void __iomem *bridge_virt_base;
20 int hw_io_coherency;
20 21
21 /* If NULL, the default cpu_win_can_remap will be used, using 22 /* If NULL, the default cpu_win_can_remap will be used, using
22 the value in remappable_wins */ 23 the value in remappable_wins */
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index 6bbc3fe5f58e..e06fc5fefa14 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -12,6 +12,7 @@
12#include <linux/mv643xx_eth.h> 12#include <linux/mv643xx_eth.h>
13 13
14struct dsa_platform_data; 14struct dsa_platform_data;
15struct mv_sata_platform_data;
15 16
16void __init orion_uart0_init(void __iomem *membase, 17void __init orion_uart0_init(void __iomem *membase,
17 resource_size_t mapbase, 18 resource_size_t mapbase,
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index bace9e98f75d..60427c0d23e6 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -54,3 +54,5 @@ config COMMON_CLK_MAX77686
54 This driver supports Maxim 77686 crystal oscillator clock. 54 This driver supports Maxim 77686 crystal oscillator clock.
55 55
56endmenu 56endmenu
57
58source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 71a25b91de00..d0a14ae8d49c 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PLAT_SPEAR) += spear/
13obj-$(CONFIG_ARCH_U300) += clk-u300.o 13obj-$(CONFIG_ARCH_U300) += clk-u300.o
14obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/ 14obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
15obj-$(CONFIG_ARCH_PRIMA2) += clk-prima2.o 15obj-$(CONFIG_ARCH_PRIMA2) += clk-prima2.o
16obj-$(CONFIG_PLAT_ORION) += mvebu/
16ifeq ($(CONFIG_COMMON_CLK), y) 17ifeq ($(CONFIG_COMMON_CLK), y)
17obj-$(CONFIG_ARCH_MMP) += mmp/ 18obj-$(CONFIG_ARCH_MMP) += mmp/
18endif 19endif
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
new file mode 100644
index 000000000000..57323fd15ec9
--- /dev/null
+++ b/drivers/clk/mvebu/Kconfig
@@ -0,0 +1,8 @@
1config MVEBU_CLK_CORE
2 bool
3
4config MVEBU_CLK_CPU
5 bool
6
7config MVEBU_CLK_GATING
8 bool
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
new file mode 100644
index 000000000000..58df3dc49363
--- /dev/null
+++ b/drivers/clk/mvebu/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_MVEBU_CLK_CORE) += clk.o clk-core.o
2obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
3obj-$(CONFIG_MVEBU_CLK_GATING) += clk-gating-ctrl.o
diff --git a/drivers/clk/mvebu/clk-core.c b/drivers/clk/mvebu/clk-core.c
new file mode 100644
index 000000000000..69056a7479e8
--- /dev/null
+++ b/drivers/clk/mvebu/clk-core.c
@@ -0,0 +1,675 @@
1/*
2 * Marvell EBU clock core handling defined at reset
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13#include <linux/kernel.h>
14#include <linux/clk.h>
15#include <linux/clkdev.h>
16#include <linux/clk-provider.h>
17#include <linux/of_address.h>
18#include <linux/io.h>
19#include <linux/of.h>
20#include "clk-core.h"
21
22struct core_ratio {
23 int id;
24 const char *name;
25};
26
27struct core_clocks {
28 u32 (*get_tclk_freq)(void __iomem *sar);
29 u32 (*get_cpu_freq)(void __iomem *sar);
30 void (*get_clk_ratio)(void __iomem *sar, int id, int *mult, int *div);
31 const struct core_ratio *ratios;
32 int num_ratios;
33};
34
35static struct clk_onecell_data clk_data;
36
37static void __init mvebu_clk_core_setup(struct device_node *np,
38 struct core_clocks *coreclk)
39{
40 const char *tclk_name = "tclk";
41 const char *cpuclk_name = "cpuclk";
42 void __iomem *base;
43 unsigned long rate;
44 int n;
45
46 base = of_iomap(np, 0);
47 if (WARN_ON(!base))
48 return;
49
50 /*
51 * Allocate struct for TCLK, cpu clk, and core ratio clocks
52 */
53 clk_data.clk_num = 2 + coreclk->num_ratios;
54 clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
55 GFP_KERNEL);
56 if (WARN_ON(!clk_data.clks))
57 return;
58
59 /*
60 * Register TCLK
61 */
62 of_property_read_string_index(np, "clock-output-names", 0,
63 &tclk_name);
64 rate = coreclk->get_tclk_freq(base);
65 clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
66 CLK_IS_ROOT, rate);
67 WARN_ON(IS_ERR(clk_data.clks[0]));
68
69 /*
70 * Register CPU clock
71 */
72 of_property_read_string_index(np, "clock-output-names", 1,
73 &cpuclk_name);
74 rate = coreclk->get_cpu_freq(base);
75 clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
76 CLK_IS_ROOT, rate);
77 WARN_ON(IS_ERR(clk_data.clks[1]));
78
79 /*
80 * Register fixed-factor clocks derived from CPU clock
81 */
82 for (n = 0; n < coreclk->num_ratios; n++) {
83 const char *rclk_name = coreclk->ratios[n].name;
84 int mult, div;
85
86 of_property_read_string_index(np, "clock-output-names",
87 2+n, &rclk_name);
88 coreclk->get_clk_ratio(base, coreclk->ratios[n].id,
89 &mult, &div);
90 clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
91 cpuclk_name, 0, mult, div);
92 WARN_ON(IS_ERR(clk_data.clks[2+n]));
93 };
94
95 /*
96 * SAR register isn't needed anymore
97 */
98 iounmap(base);
99
100 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
101}
102
103#ifdef CONFIG_MACH_ARMADA_370_XP
104/*
105 * Armada 370/XP Sample At Reset is a 64 bit bitfiled split in two
106 * register of 32 bits
107 */
108
109#define SARL 0 /* Low part [0:31] */
110#define SARL_AXP_PCLK_FREQ_OPT 21
111#define SARL_AXP_PCLK_FREQ_OPT_MASK 0x7
112#define SARL_A370_PCLK_FREQ_OPT 11
113#define SARL_A370_PCLK_FREQ_OPT_MASK 0xF
114#define SARL_AXP_FAB_FREQ_OPT 24
115#define SARL_AXP_FAB_FREQ_OPT_MASK 0xF
116#define SARL_A370_FAB_FREQ_OPT 15
117#define SARL_A370_FAB_FREQ_OPT_MASK 0x1F
118#define SARL_A370_TCLK_FREQ_OPT 20
119#define SARL_A370_TCLK_FREQ_OPT_MASK 0x1
120#define SARH 4 /* High part [32:63] */
121#define SARH_AXP_PCLK_FREQ_OPT (52-32)
122#define SARH_AXP_PCLK_FREQ_OPT_MASK 0x1
123#define SARH_AXP_PCLK_FREQ_OPT_SHIFT 3
124#define SARH_AXP_FAB_FREQ_OPT (51-32)
125#define SARH_AXP_FAB_FREQ_OPT_MASK 0x1
126#define SARH_AXP_FAB_FREQ_OPT_SHIFT 4
127
128static const u32 __initconst armada_370_tclk_frequencies[] = {
129 16600000,
130 20000000,
131};
132
133static u32 __init armada_370_get_tclk_freq(void __iomem *sar)
134{
135 u8 tclk_freq_select = 0;
136
137 tclk_freq_select = ((readl(sar) >> SARL_A370_TCLK_FREQ_OPT) &
138 SARL_A370_TCLK_FREQ_OPT_MASK);
139 return armada_370_tclk_frequencies[tclk_freq_select];
140}
141
142static const u32 __initconst armada_370_cpu_frequencies[] = {
143 400000000,
144 533000000,
145 667000000,
146 800000000,
147 1000000000,
148 1067000000,
149 1200000000,
150};
151
152static u32 __init armada_370_get_cpu_freq(void __iomem *sar)
153{
154 u32 cpu_freq;
155 u8 cpu_freq_select = 0;
156
157 cpu_freq_select = ((readl(sar) >> SARL_A370_PCLK_FREQ_OPT) &
158 SARL_A370_PCLK_FREQ_OPT_MASK);
159 if (cpu_freq_select > ARRAY_SIZE(armada_370_cpu_frequencies)) {
160 pr_err("CPU freq select unsuported %d\n", cpu_freq_select);
161 cpu_freq = 0;
162 } else
163 cpu_freq = armada_370_cpu_frequencies[cpu_freq_select];
164
165 return cpu_freq;
166}
167
168enum { A370_XP_NBCLK, A370_XP_HCLK, A370_XP_DRAMCLK };
169
170static const struct core_ratio __initconst armada_370_xp_core_ratios[] = {
171 { .id = A370_XP_NBCLK, .name = "nbclk" },
172 { .id = A370_XP_HCLK, .name = "hclk" },
173 { .id = A370_XP_DRAMCLK, .name = "dramclk" },
174};
175
176static const int __initconst armada_370_xp_nbclk_ratios[32][2] = {
177 {0, 1}, {1, 2}, {2, 2}, {2, 2},
178 {1, 2}, {1, 2}, {1, 1}, {2, 3},
179 {0, 1}, {1, 2}, {2, 4}, {0, 1},
180 {1, 2}, {0, 1}, {0, 1}, {2, 2},
181 {0, 1}, {0, 1}, {0, 1}, {1, 1},
182 {2, 3}, {0, 1}, {0, 1}, {0, 1},
183 {0, 1}, {0, 1}, {0, 1}, {1, 1},
184 {0, 1}, {0, 1}, {0, 1}, {0, 1},
185};
186
187static const int __initconst armada_370_xp_hclk_ratios[32][2] = {
188 {0, 1}, {1, 2}, {2, 6}, {2, 3},
189 {1, 3}, {1, 4}, {1, 2}, {2, 6},
190 {0, 1}, {1, 6}, {2, 10}, {0, 1},
191 {1, 4}, {0, 1}, {0, 1}, {2, 5},
192 {0, 1}, {0, 1}, {0, 1}, {1, 2},
193 {2, 6}, {0, 1}, {0, 1}, {0, 1},
194 {0, 1}, {0, 1}, {0, 1}, {1, 1},
195 {0, 1}, {0, 1}, {0, 1}, {0, 1},
196};
197
198static const int __initconst armada_370_xp_dramclk_ratios[32][2] = {
199 {0, 1}, {1, 2}, {2, 3}, {2, 3},
200 {1, 3}, {1, 2}, {1, 2}, {2, 6},
201 {0, 1}, {1, 3}, {2, 5}, {0, 1},
202 {1, 4}, {0, 1}, {0, 1}, {2, 5},
203 {0, 1}, {0, 1}, {0, 1}, {1, 1},
204 {2, 3}, {0, 1}, {0, 1}, {0, 1},
205 {0, 1}, {0, 1}, {0, 1}, {1, 1},
206 {0, 1}, {0, 1}, {0, 1}, {0, 1},
207};
208
209static void __init armada_370_xp_get_clk_ratio(u32 opt,
210 void __iomem *sar, int id, int *mult, int *div)
211{
212 switch (id) {
213 case A370_XP_NBCLK:
214 *mult = armada_370_xp_nbclk_ratios[opt][0];
215 *div = armada_370_xp_nbclk_ratios[opt][1];
216 break;
217 case A370_XP_HCLK:
218 *mult = armada_370_xp_hclk_ratios[opt][0];
219 *div = armada_370_xp_hclk_ratios[opt][1];
220 break;
221 case A370_XP_DRAMCLK:
222 *mult = armada_370_xp_dramclk_ratios[opt][0];
223 *div = armada_370_xp_dramclk_ratios[opt][1];
224 break;
225 }
226}
227
228static void __init armada_370_get_clk_ratio(
229 void __iomem *sar, int id, int *mult, int *div)
230{
231 u32 opt = ((readl(sar) >> SARL_A370_FAB_FREQ_OPT) &
232 SARL_A370_FAB_FREQ_OPT_MASK);
233
234 armada_370_xp_get_clk_ratio(opt, sar, id, mult, div);
235}
236
237
238static const struct core_clocks armada_370_core_clocks = {
239 .get_tclk_freq = armada_370_get_tclk_freq,
240 .get_cpu_freq = armada_370_get_cpu_freq,
241 .get_clk_ratio = armada_370_get_clk_ratio,
242 .ratios = armada_370_xp_core_ratios,
243 .num_ratios = ARRAY_SIZE(armada_370_xp_core_ratios),
244};
245
246static const u32 __initconst armada_xp_cpu_frequencies[] = {
247 1000000000,
248 1066000000,
249 1200000000,
250 1333000000,
251 1500000000,
252 1666000000,
253 1800000000,
254 2000000000,
255 667000000,
256 0,
257 800000000,
258 1600000000,
259};
260
261/* For Armada XP TCLK frequency is fix: 250MHz */
262static u32 __init armada_xp_get_tclk_freq(void __iomem *sar)
263{
264 return 250 * 1000 * 1000;
265}
266
267static u32 __init armada_xp_get_cpu_freq(void __iomem *sar)
268{
269 u32 cpu_freq;
270 u8 cpu_freq_select = 0;
271
272 cpu_freq_select = ((readl(sar) >> SARL_AXP_PCLK_FREQ_OPT) &
273 SARL_AXP_PCLK_FREQ_OPT_MASK);
274 /*
275 * The upper bit is not contiguous to the other ones and
276 * located in the high part of the SAR registers
277 */
278 cpu_freq_select |= (((readl(sar+4) >> SARH_AXP_PCLK_FREQ_OPT) &
279 SARH_AXP_PCLK_FREQ_OPT_MASK)
280 << SARH_AXP_PCLK_FREQ_OPT_SHIFT);
281 if (cpu_freq_select > ARRAY_SIZE(armada_xp_cpu_frequencies)) {
282 pr_err("CPU freq select unsuported: %d\n", cpu_freq_select);
283 cpu_freq = 0;
284 } else
285 cpu_freq = armada_xp_cpu_frequencies[cpu_freq_select];
286
287 return cpu_freq;
288}
289
290static void __init armada_xp_get_clk_ratio(
291 void __iomem *sar, int id, int *mult, int *div)
292{
293
294 u32 opt = ((readl(sar) >> SARL_AXP_FAB_FREQ_OPT) &
295 SARL_AXP_FAB_FREQ_OPT_MASK);
296 /*
297 * The upper bit is not contiguous to the other ones and
298 * located in the high part of the SAR registers
299 */
300 opt |= (((readl(sar+4) >> SARH_AXP_FAB_FREQ_OPT) &
301 SARH_AXP_FAB_FREQ_OPT_MASK)
302 << SARH_AXP_FAB_FREQ_OPT_SHIFT);
303
304 armada_370_xp_get_clk_ratio(opt, sar, id, mult, div);
305}
306
307static const struct core_clocks armada_xp_core_clocks = {
308 .get_tclk_freq = armada_xp_get_tclk_freq,
309 .get_cpu_freq = armada_xp_get_cpu_freq,
310 .get_clk_ratio = armada_xp_get_clk_ratio,
311 .ratios = armada_370_xp_core_ratios,
312 .num_ratios = ARRAY_SIZE(armada_370_xp_core_ratios),
313};
314
315#endif /* CONFIG_MACH_ARMADA_370_XP */
316
317/*
318 * Dove PLL sample-at-reset configuration
319 *
320 * SAR0[8:5] : CPU frequency
321 * 5 = 1000 MHz
322 * 6 = 933 MHz
323 * 7 = 933 MHz
324 * 8 = 800 MHz
325 * 9 = 800 MHz
326 * 10 = 800 MHz
327 * 11 = 1067 MHz
328 * 12 = 667 MHz
329 * 13 = 533 MHz
330 * 14 = 400 MHz
331 * 15 = 333 MHz
332 * others reserved.
333 *
334 * SAR0[11:9] : CPU to L2 Clock divider ratio
335 * 0 = (1/1) * CPU
336 * 2 = (1/2) * CPU
337 * 4 = (1/3) * CPU
338 * 6 = (1/4) * CPU
339 * others reserved.
340 *
341 * SAR0[15:12] : CPU to DDR DRAM Clock divider ratio
342 * 0 = (1/1) * CPU
343 * 2 = (1/2) * CPU
344 * 3 = (2/5) * CPU
345 * 4 = (1/3) * CPU
346 * 6 = (1/4) * CPU
347 * 8 = (1/5) * CPU
348 * 10 = (1/6) * CPU
349 * 12 = (1/7) * CPU
350 * 14 = (1/8) * CPU
351 * 15 = (1/10) * CPU
352 * others reserved.
353 *
354 * SAR0[24:23] : TCLK frequency
355 * 0 = 166 MHz
356 * 1 = 125 MHz
357 * others reserved.
358 */
359#ifdef CONFIG_ARCH_DOVE
360#define SAR_DOVE_CPU_FREQ 5
361#define SAR_DOVE_CPU_FREQ_MASK 0xf
362#define SAR_DOVE_L2_RATIO 9
363#define SAR_DOVE_L2_RATIO_MASK 0x7
364#define SAR_DOVE_DDR_RATIO 12
365#define SAR_DOVE_DDR_RATIO_MASK 0xf
366#define SAR_DOVE_TCLK_FREQ 23
367#define SAR_DOVE_TCLK_FREQ_MASK 0x3
368
369static const u32 __initconst dove_tclk_frequencies[] = {
370 166666667,
371 125000000,
372 0, 0
373};
374
375static u32 __init dove_get_tclk_freq(void __iomem *sar)
376{
377 u32 opt = (readl(sar) >> SAR_DOVE_TCLK_FREQ) &
378 SAR_DOVE_TCLK_FREQ_MASK;
379 return dove_tclk_frequencies[opt];
380}
381
382static const u32 __initconst dove_cpu_frequencies[] = {
383 0, 0, 0, 0, 0,
384 1000000000,
385 933333333, 933333333,
386 800000000, 800000000, 800000000,
387 1066666667,
388 666666667,
389 533333333,
390 400000000,
391 333333333
392};
393
394static u32 __init dove_get_cpu_freq(void __iomem *sar)
395{
396 u32 opt = (readl(sar) >> SAR_DOVE_CPU_FREQ) &
397 SAR_DOVE_CPU_FREQ_MASK;
398 return dove_cpu_frequencies[opt];
399}
400
401enum { DOVE_CPU_TO_L2, DOVE_CPU_TO_DDR };
402
403static const struct core_ratio __initconst dove_core_ratios[] = {
404 { .id = DOVE_CPU_TO_L2, .name = "l2clk", },
405 { .id = DOVE_CPU_TO_DDR, .name = "ddrclk", }
406};
407
408static const int __initconst dove_cpu_l2_ratios[8][2] = {
409 { 1, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
410 { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 }
411};
412
413static const int __initconst dove_cpu_ddr_ratios[16][2] = {
414 { 1, 1 }, { 0, 1 }, { 1, 2 }, { 2, 5 },
415 { 1, 3 }, { 0, 1 }, { 1, 4 }, { 0, 1 },
416 { 1, 5 }, { 0, 1 }, { 1, 6 }, { 0, 1 },
417 { 1, 7 }, { 0, 1 }, { 1, 8 }, { 1, 10 }
418};
419
420static void __init dove_get_clk_ratio(
421 void __iomem *sar, int id, int *mult, int *div)
422{
423 switch (id) {
424 case DOVE_CPU_TO_L2:
425 {
426 u32 opt = (readl(sar) >> SAR_DOVE_L2_RATIO) &
427 SAR_DOVE_L2_RATIO_MASK;
428 *mult = dove_cpu_l2_ratios[opt][0];
429 *div = dove_cpu_l2_ratios[opt][1];
430 break;
431 }
432 case DOVE_CPU_TO_DDR:
433 {
434 u32 opt = (readl(sar) >> SAR_DOVE_DDR_RATIO) &
435 SAR_DOVE_DDR_RATIO_MASK;
436 *mult = dove_cpu_ddr_ratios[opt][0];
437 *div = dove_cpu_ddr_ratios[opt][1];
438 break;
439 }
440 }
441}
442
443static const struct core_clocks dove_core_clocks = {
444 .get_tclk_freq = dove_get_tclk_freq,
445 .get_cpu_freq = dove_get_cpu_freq,
446 .get_clk_ratio = dove_get_clk_ratio,
447 .ratios = dove_core_ratios,
448 .num_ratios = ARRAY_SIZE(dove_core_ratios),
449};
450#endif /* CONFIG_ARCH_DOVE */
451
452/*
453 * Kirkwood PLL sample-at-reset configuration
454 * (6180 has different SAR layout than other Kirkwood SoCs)
455 *
456 * SAR0[4:3,22,1] : CPU frequency (6281,6292,6282)
457 * 4 = 600 MHz
458 * 6 = 800 MHz
459 * 7 = 1000 MHz
460 * 9 = 1200 MHz
461 * 12 = 1500 MHz
462 * 13 = 1600 MHz
463 * 14 = 1800 MHz
464 * 15 = 2000 MHz
465 * others reserved.
466 *
467 * SAR0[19,10:9] : CPU to L2 Clock divider ratio (6281,6292,6282)
468 * 1 = (1/2) * CPU
469 * 3 = (1/3) * CPU
470 * 5 = (1/4) * CPU
471 * others reserved.
472 *
473 * SAR0[8:5] : CPU to DDR DRAM Clock divider ratio (6281,6292,6282)
474 * 2 = (1/2) * CPU
475 * 4 = (1/3) * CPU
476 * 6 = (1/4) * CPU
477 * 7 = (2/9) * CPU
478 * 8 = (1/5) * CPU
479 * 9 = (1/6) * CPU
480 * others reserved.
481 *
482 * SAR0[4:2] : Kirkwood 6180 cpu/l2/ddr clock configuration (6180 only)
483 * 5 = [CPU = 600 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/3) * CPU]
484 * 6 = [CPU = 800 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/4) * CPU]
485 * 7 = [CPU = 1000 MHz, L2 = (1/2) * CPU, DDR = 200 MHz = (1/5) * CPU]
486 * others reserved.
487 *
488 * SAR0[21] : TCLK frequency
489 * 0 = 200 MHz
490 * 1 = 166 MHz
491 * others reserved.
492 */
493#ifdef CONFIG_ARCH_KIRKWOOD
494#define SAR_KIRKWOOD_CPU_FREQ(x) \
495 (((x & (1 << 1)) >> 1) | \
496 ((x & (1 << 22)) >> 21) | \
497 ((x & (3 << 3)) >> 1))
498#define SAR_KIRKWOOD_L2_RATIO(x) \
499 (((x & (3 << 9)) >> 9) | \
500 (((x & (1 << 19)) >> 17)))
501#define SAR_KIRKWOOD_DDR_RATIO 5
502#define SAR_KIRKWOOD_DDR_RATIO_MASK 0xf
503#define SAR_MV88F6180_CLK 2
504#define SAR_MV88F6180_CLK_MASK 0x7
505#define SAR_KIRKWOOD_TCLK_FREQ 21
506#define SAR_KIRKWOOD_TCLK_FREQ_MASK 0x1
507
508enum { KIRKWOOD_CPU_TO_L2, KIRKWOOD_CPU_TO_DDR };
509
510static const struct core_ratio __initconst kirkwood_core_ratios[] = {
511 { .id = KIRKWOOD_CPU_TO_L2, .name = "l2clk", },
512 { .id = KIRKWOOD_CPU_TO_DDR, .name = "ddrclk", }
513};
514
515static u32 __init kirkwood_get_tclk_freq(void __iomem *sar)
516{
517 u32 opt = (readl(sar) >> SAR_KIRKWOOD_TCLK_FREQ) &
518 SAR_KIRKWOOD_TCLK_FREQ_MASK;
519 return (opt) ? 166666667 : 200000000;
520}
521
522static const u32 __initconst kirkwood_cpu_frequencies[] = {
523 0, 0, 0, 0,
524 600000000,
525 0,
526 800000000,
527 1000000000,
528 0,
529 1200000000,
530 0, 0,
531 1500000000,
532 1600000000,
533 1800000000,
534 2000000000
535};
536
537static u32 __init kirkwood_get_cpu_freq(void __iomem *sar)
538{
539 u32 opt = SAR_KIRKWOOD_CPU_FREQ(readl(sar));
540 return kirkwood_cpu_frequencies[opt];
541}
542
543static const int __initconst kirkwood_cpu_l2_ratios[8][2] = {
544 { 0, 1 }, { 1, 2 }, { 0, 1 }, { 1, 3 },
545 { 0, 1 }, { 1, 4 }, { 0, 1 }, { 0, 1 }
546};
547
548static const int __initconst kirkwood_cpu_ddr_ratios[16][2] = {
549 { 0, 1 }, { 0, 1 }, { 1, 2 }, { 0, 1 },
550 { 1, 3 }, { 0, 1 }, { 1, 4 }, { 2, 9 },
551 { 1, 5 }, { 1, 6 }, { 0, 1 }, { 0, 1 },
552 { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }
553};
554
555static void __init kirkwood_get_clk_ratio(
556 void __iomem *sar, int id, int *mult, int *div)
557{
558 switch (id) {
559 case KIRKWOOD_CPU_TO_L2:
560 {
561 u32 opt = SAR_KIRKWOOD_L2_RATIO(readl(sar));
562 *mult = kirkwood_cpu_l2_ratios[opt][0];
563 *div = kirkwood_cpu_l2_ratios[opt][1];
564 break;
565 }
566 case KIRKWOOD_CPU_TO_DDR:
567 {
568 u32 opt = (readl(sar) >> SAR_KIRKWOOD_DDR_RATIO) &
569 SAR_KIRKWOOD_DDR_RATIO_MASK;
570 *mult = kirkwood_cpu_ddr_ratios[opt][0];
571 *div = kirkwood_cpu_ddr_ratios[opt][1];
572 break;
573 }
574 }
575}
576
577static const struct core_clocks kirkwood_core_clocks = {
578 .get_tclk_freq = kirkwood_get_tclk_freq,
579 .get_cpu_freq = kirkwood_get_cpu_freq,
580 .get_clk_ratio = kirkwood_get_clk_ratio,
581 .ratios = kirkwood_core_ratios,
582 .num_ratios = ARRAY_SIZE(kirkwood_core_ratios),
583};
584
585static const u32 __initconst mv88f6180_cpu_frequencies[] = {
586 0, 0, 0, 0, 0,
587 600000000,
588 800000000,
589 1000000000
590};
591
592static u32 __init mv88f6180_get_cpu_freq(void __iomem *sar)
593{
594 u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) & SAR_MV88F6180_CLK_MASK;
595 return mv88f6180_cpu_frequencies[opt];
596}
597
598static const int __initconst mv88f6180_cpu_ddr_ratios[8][2] = {
599 { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
600 { 0, 1 }, { 1, 3 }, { 1, 4 }, { 1, 5 }
601};
602
603static void __init mv88f6180_get_clk_ratio(
604 void __iomem *sar, int id, int *mult, int *div)
605{
606 switch (id) {
607 case KIRKWOOD_CPU_TO_L2:
608 {
609 /* mv88f6180 has a fixed 1:2 CPU-to-L2 ratio */
610 *mult = 1;
611 *div = 2;
612 break;
613 }
614 case KIRKWOOD_CPU_TO_DDR:
615 {
616 u32 opt = (readl(sar) >> SAR_MV88F6180_CLK) &
617 SAR_MV88F6180_CLK_MASK;
618 *mult = mv88f6180_cpu_ddr_ratios[opt][0];
619 *div = mv88f6180_cpu_ddr_ratios[opt][1];
620 break;
621 }
622 }
623}
624
625static const struct core_clocks mv88f6180_core_clocks = {
626 .get_tclk_freq = kirkwood_get_tclk_freq,
627 .get_cpu_freq = mv88f6180_get_cpu_freq,
628 .get_clk_ratio = mv88f6180_get_clk_ratio,
629 .ratios = kirkwood_core_ratios,
630 .num_ratios = ARRAY_SIZE(kirkwood_core_ratios),
631};
632#endif /* CONFIG_ARCH_KIRKWOOD */
633
634static const __initdata struct of_device_id clk_core_match[] = {
635#ifdef CONFIG_MACH_ARMADA_370_XP
636 {
637 .compatible = "marvell,armada-370-core-clock",
638 .data = &armada_370_core_clocks,
639 },
640 {
641 .compatible = "marvell,armada-xp-core-clock",
642 .data = &armada_xp_core_clocks,
643 },
644#endif
645#ifdef CONFIG_ARCH_DOVE
646 {
647 .compatible = "marvell,dove-core-clock",
648 .data = &dove_core_clocks,
649 },
650#endif
651
652#ifdef CONFIG_ARCH_KIRKWOOD
653 {
654 .compatible = "marvell,kirkwood-core-clock",
655 .data = &kirkwood_core_clocks,
656 },
657 {
658 .compatible = "marvell,mv88f6180-core-clock",
659 .data = &mv88f6180_core_clocks,
660 },
661#endif
662
663 { }
664};
665
666void __init mvebu_core_clk_init(void)
667{
668 struct device_node *np;
669
670 for_each_matching_node(np, clk_core_match) {
671 const struct of_device_id *match =
672 of_match_node(clk_core_match, np);
673 mvebu_clk_core_setup(np, (struct core_clocks *)match->data);
674 }
675}
diff --git a/drivers/clk/mvebu/clk-core.h b/drivers/clk/mvebu/clk-core.h
new file mode 100644
index 000000000000..28b5e02e9885
--- /dev/null
+++ b/drivers/clk/mvebu/clk-core.h
@@ -0,0 +1,18 @@
1/*
2 * * Marvell EBU clock core handling defined at reset
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#ifndef __MVEBU_CLK_CORE_H
14#define __MVEBU_CLK_CORE_H
15
16void __init mvebu_core_clk_init(void);
17
18#endif
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
new file mode 100644
index 000000000000..ff004578a119
--- /dev/null
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -0,0 +1,186 @@
1/*
2 * Marvell MVEBU CPU clock handling.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12#include <linux/kernel.h>
13#include <linux/clkdev.h>
14#include <linux/clk-provider.h>
15#include <linux/of_address.h>
16#include <linux/io.h>
17#include <linux/of.h>
18#include <linux/delay.h>
19#include "clk-cpu.h"
20
21#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
22#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
23#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
24
25#define MAX_CPU 4
26struct cpu_clk {
27 struct clk_hw hw;
28 int cpu;
29 const char *clk_name;
30 const char *parent_name;
31 void __iomem *reg_base;
32};
33
34static struct clk **clks;
35
36static struct clk_onecell_data clk_data;
37
38#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
39
40static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
41 unsigned long parent_rate)
42{
43 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
44 u32 reg, div;
45
46 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
47 div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
48 return parent_rate / div;
49}
50
51static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
52 unsigned long *parent_rate)
53{
54 /* Valid ratio are 1:1, 1:2 and 1:3 */
55 u32 div;
56
57 div = *parent_rate / rate;
58 if (div == 0)
59 div = 1;
60 else if (div > 3)
61 div = 3;
62
63 return *parent_rate / div;
64}
65
66static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
67 unsigned long parent_rate)
68{
69 struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
70 u32 reg, div;
71 u32 reload_mask;
72
73 div = parent_rate / rate;
74 reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
75 & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
76 | (div << (cpuclk->cpu * 8));
77 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
78 /* Set clock divider reload smooth bit mask */
79 reload_mask = 1 << (20 + cpuclk->cpu);
80
81 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
82 | reload_mask;
83 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
84
85 /* Now trigger the clock update */
86 reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
87 | 1 << 24;
88 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
89
90 /* Wait for clocks to settle down then clear reload request */
91 udelay(1000);
92 reg &= ~(reload_mask | 1 << 24);
93 writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
94 udelay(1000);
95
96 return 0;
97}
98
99static const struct clk_ops cpu_ops = {
100 .recalc_rate = clk_cpu_recalc_rate,
101 .round_rate = clk_cpu_round_rate,
102 .set_rate = clk_cpu_set_rate,
103};
104
105void __init of_cpu_clk_setup(struct device_node *node)
106{
107 struct cpu_clk *cpuclk;
108 void __iomem *clock_complex_base = of_iomap(node, 0);
109 int ncpus = 0;
110 struct device_node *dn;
111
112 if (clock_complex_base == NULL) {
113 pr_err("%s: clock-complex base register not set\n",
114 __func__);
115 return;
116 }
117
118 for_each_node_by_type(dn, "cpu")
119 ncpus++;
120
121 cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
122 if (WARN_ON(!cpuclk))
123 return;
124
125 clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
126 if (WARN_ON(!clks))
127 return;
128
129 for_each_node_by_type(dn, "cpu") {
130 struct clk_init_data init;
131 struct clk *clk;
132 struct clk *parent_clk;
133 char *clk_name = kzalloc(5, GFP_KERNEL);
134 int cpu, err;
135
136 if (WARN_ON(!clk_name))
137 return;
138
139 err = of_property_read_u32(dn, "reg", &cpu);
140 if (WARN_ON(err))
141 return;
142
143 sprintf(clk_name, "cpu%d", cpu);
144 parent_clk = of_clk_get(node, 0);
145
146 cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
147 cpuclk[cpu].clk_name = clk_name;
148 cpuclk[cpu].cpu = cpu;
149 cpuclk[cpu].reg_base = clock_complex_base;
150 cpuclk[cpu].hw.init = &init;
151
152 init.name = cpuclk[cpu].clk_name;
153 init.ops = &cpu_ops;
154 init.flags = 0;
155 init.parent_names = &cpuclk[cpu].parent_name;
156 init.num_parents = 1;
157
158 clk = clk_register(NULL, &cpuclk[cpu].hw);
159 if (WARN_ON(IS_ERR(clk)))
160 goto bail_out;
161 clks[cpu] = clk;
162 }
163 clk_data.clk_num = MAX_CPU;
164 clk_data.clks = clks;
165 of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
166
167 return;
168bail_out:
169 kfree(clks);
170 kfree(cpuclk);
171}
172
173static const __initconst struct of_device_id clk_cpu_match[] = {
174 {
175 .compatible = "marvell,armada-xp-cpu-clock",
176 .data = of_cpu_clk_setup,
177 },
178 {
179 /* sentinel */
180 },
181};
182
183void __init mvebu_cpu_clk_init(void)
184{
185 of_clk_init(clk_cpu_match);
186}
diff --git a/drivers/clk/mvebu/clk-cpu.h b/drivers/clk/mvebu/clk-cpu.h
new file mode 100644
index 000000000000..08e2affba4e6
--- /dev/null
+++ b/drivers/clk/mvebu/clk-cpu.h
@@ -0,0 +1,22 @@
1/*
2 * Marvell MVEBU CPU clock handling.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#ifndef __MVEBU_CLK_CPU_H
14#define __MVEBU_CLK_CPU_H
15
16#ifdef CONFIG_MVEBU_CLK_CPU
17void __init mvebu_cpu_clk_init(void);
18#else
19static inline void mvebu_cpu_clk_init(void) {}
20#endif
21
22#endif
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.c b/drivers/clk/mvebu/clk-gating-ctrl.c
new file mode 100644
index 000000000000..c6d3c263b070
--- /dev/null
+++ b/drivers/clk/mvebu/clk-gating-ctrl.c
@@ -0,0 +1,249 @@
1/*
2 * Marvell MVEBU clock gating control.
3 *
4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
5 * Andrew Lunn <andrew@lunn.ch>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11#include <linux/kernel.h>
12#include <linux/bitops.h>
13#include <linux/io.h>
14#include <linux/clk.h>
15#include <linux/clkdev.h>
16#include <linux/clk-provider.h>
17#include <linux/clk/mvebu.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20
21struct mvebu_gating_ctrl {
22 spinlock_t lock;
23 struct clk **gates;
24 int num_gates;
25};
26
27struct mvebu_soc_descr {
28 const char *name;
29 const char *parent;
30 int bit_idx;
31};
32
33#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
34
35static struct clk __init *mvebu_clk_gating_get_src(
36 struct of_phandle_args *clkspec, void *data)
37{
38 struct mvebu_gating_ctrl *ctrl = (struct mvebu_gating_ctrl *)data;
39 int n;
40
41 if (clkspec->args_count < 1)
42 return ERR_PTR(-EINVAL);
43
44 for (n = 0; n < ctrl->num_gates; n++) {
45 struct clk_gate *gate =
46 to_clk_gate(__clk_get_hw(ctrl->gates[n]));
47 if (clkspec->args[0] == gate->bit_idx)
48 return ctrl->gates[n];
49 }
50 return ERR_PTR(-ENODEV);
51}
52
53static void __init mvebu_clk_gating_setup(
54 struct device_node *np, const struct mvebu_soc_descr *descr)
55{
56 struct mvebu_gating_ctrl *ctrl;
57 struct clk *clk;
58 void __iomem *base;
59 const char *default_parent = NULL;
60 int n;
61
62 base = of_iomap(np, 0);
63
64 clk = of_clk_get(np, 0);
65 if (!IS_ERR(clk)) {
66 default_parent = __clk_get_name(clk);
67 clk_put(clk);
68 }
69
70 ctrl = kzalloc(sizeof(struct mvebu_gating_ctrl), GFP_KERNEL);
71 if (WARN_ON(!ctrl))
72 return;
73
74 spin_lock_init(&ctrl->lock);
75
76 /*
77 * Count, allocate, and register clock gates
78 */
79 for (n = 0; descr[n].name;)
80 n++;
81
82 ctrl->num_gates = n;
83 ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
84 GFP_KERNEL);
85 if (WARN_ON(!ctrl->gates)) {
86 kfree(ctrl);
87 return;
88 }
89
90 for (n = 0; n < ctrl->num_gates; n++) {
91 u8 flags = 0;
92 const char *parent =
93 (descr[n].parent) ? descr[n].parent : default_parent;
94
95 /*
96 * On Armada 370, the DDR clock is a special case: it
97 * isn't taken by any driver, but should anyway be
98 * kept enabled, so we mark it as IGNORE_UNUSED for
99 * now.
100 */
101 if (!strcmp(descr[n].name, "ddr"))
102 flags |= CLK_IGNORE_UNUSED;
103
104 ctrl->gates[n] = clk_register_gate(NULL, descr[n].name, parent,
105 flags, base, descr[n].bit_idx, 0, &ctrl->lock);
106 WARN_ON(IS_ERR(ctrl->gates[n]));
107 }
108 of_clk_add_provider(np, mvebu_clk_gating_get_src, ctrl);
109}
110
111/*
112 * SoC specific clock gating control
113 */
114
115#ifdef CONFIG_MACH_ARMADA_370
116static const struct mvebu_soc_descr __initconst armada_370_gating_descr[] = {
117 { "audio", NULL, 0 },
118 { "pex0_en", NULL, 1 },
119 { "pex1_en", NULL, 2 },
120 { "ge1", NULL, 3 },
121 { "ge0", NULL, 4 },
122 { "pex0", NULL, 5 },
123 { "pex1", NULL, 9 },
124 { "sata0", NULL, 15 },
125 { "sdio", NULL, 17 },
126 { "tdm", NULL, 25 },
127 { "ddr", NULL, 28 },
128 { "sata1", NULL, 30 },
129 { }
130};
131#endif
132
133#ifdef CONFIG_MACH_ARMADA_XP
134static const struct mvebu_soc_descr __initconst armada_xp_gating_descr[] = {
135 { "audio", NULL, 0 },
136 { "ge3", NULL, 1 },
137 { "ge2", NULL, 2 },
138 { "ge1", NULL, 3 },
139 { "ge0", NULL, 4 },
140 { "pex0", NULL, 5 },
141 { "pex1", NULL, 6 },
142 { "pex2", NULL, 7 },
143 { "pex3", NULL, 8 },
144 { "bp", NULL, 13 },
145 { "sata0lnk", NULL, 14 },
146 { "sata0", "sata0lnk", 15 },
147 { "lcd", NULL, 16 },
148 { "sdio", NULL, 17 },
149 { "usb0", NULL, 18 },
150 { "usb1", NULL, 19 },
151 { "usb2", NULL, 20 },
152 { "xor0", NULL, 22 },
153 { "crypto", NULL, 23 },
154 { "tdm", NULL, 25 },
155 { "xor1", NULL, 28 },
156 { "sata1lnk", NULL, 29 },
157 { "sata1", "sata1lnk", 30 },
158 { }
159};
160#endif
161
162#ifdef CONFIG_ARCH_DOVE
163static const struct mvebu_soc_descr __initconst dove_gating_descr[] = {
164 { "usb0", NULL, 0 },
165 { "usb1", NULL, 1 },
166 { "ge", "gephy", 2 },
167 { "sata", NULL, 3 },
168 { "pex0", NULL, 4 },
169 { "pex1", NULL, 5 },
170 { "sdio0", NULL, 8 },
171 { "sdio1", NULL, 9 },
172 { "nand", NULL, 10 },
173 { "camera", NULL, 11 },
174 { "i2s0", NULL, 12 },
175 { "i2s1", NULL, 13 },
176 { "crypto", NULL, 15 },
177 { "ac97", NULL, 21 },
178 { "pdma", NULL, 22 },
179 { "xor0", NULL, 23 },
180 { "xor1", NULL, 24 },
181 { "gephy", NULL, 30 },
182 { }
183};
184#endif
185
186#ifdef CONFIG_ARCH_KIRKWOOD
187static const struct mvebu_soc_descr __initconst kirkwood_gating_descr[] = {
188 { "ge0", NULL, 0 },
189 { "pex0", NULL, 2 },
190 { "usb0", NULL, 3 },
191 { "sdio", NULL, 4 },
192 { "tsu", NULL, 5 },
193 { "runit", NULL, 7 },
194 { "xor0", NULL, 8 },
195 { "audio", NULL, 9 },
196 { "sata0", NULL, 14 },
197 { "sata1", NULL, 15 },
198 { "xor1", NULL, 16 },
199 { "crypto", NULL, 17 },
200 { "pex1", NULL, 18 },
201 { "ge1", NULL, 19 },
202 { "tdm", NULL, 20 },
203 { }
204};
205#endif
206
207static const __initdata struct of_device_id clk_gating_match[] = {
208#ifdef CONFIG_MACH_ARMADA_370
209 {
210 .compatible = "marvell,armada-370-gating-clock",
211 .data = armada_370_gating_descr,
212 },
213#endif
214
215#ifdef CONFIG_MACH_ARMADA_XP
216 {
217 .compatible = "marvell,armada-xp-gating-clock",
218 .data = armada_xp_gating_descr,
219 },
220#endif
221
222#ifdef CONFIG_ARCH_DOVE
223 {
224 .compatible = "marvell,dove-gating-clock",
225 .data = dove_gating_descr,
226 },
227#endif
228
229#ifdef CONFIG_ARCH_KIRKWOOD
230 {
231 .compatible = "marvell,kirkwood-gating-clock",
232 .data = kirkwood_gating_descr,
233 },
234#endif
235
236 { }
237};
238
239void __init mvebu_gating_clk_init(void)
240{
241 struct device_node *np;
242
243 for_each_matching_node(np, clk_gating_match) {
244 const struct of_device_id *match =
245 of_match_node(clk_gating_match, np);
246 mvebu_clk_gating_setup(np,
247 (const struct mvebu_soc_descr *)match->data);
248 }
249}
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.h b/drivers/clk/mvebu/clk-gating-ctrl.h
new file mode 100644
index 000000000000..9275d1e51f1b
--- /dev/null
+++ b/drivers/clk/mvebu/clk-gating-ctrl.h
@@ -0,0 +1,22 @@
1/*
2 * Marvell EBU gating clock handling
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#ifndef __MVEBU_CLK_GATING_H
14#define __MVEBU_CLK_GATING_H
15
16#ifdef CONFIG_MVEBU_CLK_GATING
17void __init mvebu_gating_clk_init(void);
18#else
19void mvebu_gating_clk_init(void) {}
20#endif
21
22#endif
diff --git a/drivers/clk/mvebu/clk.c b/drivers/clk/mvebu/clk.c
new file mode 100644
index 000000000000..855681b8a9dc
--- /dev/null
+++ b/drivers/clk/mvebu/clk.c
@@ -0,0 +1,27 @@
1/*
2 * Marvell EBU SoC clock handling.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12#include <linux/kernel.h>
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/of_address.h>
16#include <linux/clk/mvebu.h>
17#include <linux/of.h>
18#include "clk-core.h"
19#include "clk-cpu.h"
20#include "clk-gating-ctrl.h"
21
22void __init mvebu_clocks_init(void)
23{
24 mvebu_core_clk_init();
25 mvebu_gating_clk_init();
26 mvebu_cpu_clk_init();
27}
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 4674f94957cd..a4605fd7e303 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/clk.h>
21#include <linux/timer.h> 22#include <linux/timer.h>
22#include <linux/clockchips.h> 23#include <linux/clockchips.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
@@ -167,7 +168,6 @@ void __init armada_370_xp_timer_init(void)
167 u32 u; 168 u32 u;
168 struct device_node *np; 169 struct device_node *np;
169 unsigned int timer_clk; 170 unsigned int timer_clk;
170 int ret;
171 np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer"); 171 np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
172 timer_base = of_iomap(np, 0); 172 timer_base = of_iomap(np, 0);
173 WARN_ON(!timer_base); 173 WARN_ON(!timer_base);
@@ -179,13 +179,14 @@ void __init armada_370_xp_timer_init(void)
179 timer_base + TIMER_CTRL_OFF); 179 timer_base + TIMER_CTRL_OFF);
180 timer_clk = 25000000; 180 timer_clk = 25000000;
181 } else { 181 } else {
182 u32 clk = 0; 182 unsigned long rate = 0;
183 ret = of_property_read_u32(np, "clock-frequency", &clk); 183 struct clk *clk = of_clk_get(np, 0);
184 WARN_ON(!clk || ret < 0); 184 WARN_ON(IS_ERR(clk));
185 rate = clk_get_rate(clk);
185 u = readl(timer_base + TIMER_CTRL_OFF); 186 u = readl(timer_base + TIMER_CTRL_OFF);
186 writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ), 187 writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
187 timer_base + TIMER_CTRL_OFF); 188 timer_base + TIMER_CTRL_OFF);
188 timer_clk = clk / TIMER_DIVIDER; 189 timer_clk = rate / TIMER_DIVIDER;
189 } 190 }
190 191
191 /* We use timer 0 as clocksource, and timer 1 for 192 /* We use timer 0 as clocksource, and timer 1 for
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e362e2b80efb..9659e58fc8b2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,9 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/memory.h> 27#include <linux/memory.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
29#include <linux/platform_data/dma-mv_xor.h> 32#include <linux/platform_data/dma-mv_xor.h>
30 33
31#include "dmaengine.h" 34#include "dmaengine.h"
@@ -34,14 +37,14 @@
34static void mv_xor_issue_pending(struct dma_chan *chan); 37static void mv_xor_issue_pending(struct dma_chan *chan);
35 38
36#define to_mv_xor_chan(chan) \ 39#define to_mv_xor_chan(chan) \
37 container_of(chan, struct mv_xor_chan, common) 40 container_of(chan, struct mv_xor_chan, dmachan)
38
39#define to_mv_xor_device(dev) \
40 container_of(dev, struct mv_xor_device, common)
41 41
42#define to_mv_xor_slot(tx) \ 42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx) 43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44 44
45#define mv_chan_to_devp(chan) \
46 ((chan)->dmadev.dev)
47
45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 48static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{ 49{
47 struct mv_xor_desc *hw_desc = desc->hw_desc; 50 struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -166,7 +169,7 @@ static int mv_is_err_intr(u32 intr_cause)
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 169static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{ 170{
168 u32 val = ~(1 << (chan->idx * 16)); 171 u32 val = ~(1 << (chan->idx * 16));
169 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 172 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
170 __raw_writel(val, XOR_INTR_CAUSE(chan)); 173 __raw_writel(val, XOR_INTR_CAUSE(chan));
171} 174}
172 175
@@ -206,9 +209,9 @@ static void mv_set_mode(struct mv_xor_chan *chan,
206 op_mode = XOR_OPERATION_MODE_MEMSET; 209 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break; 210 break;
208 default: 211 default:
209 dev_printk(KERN_ERR, chan->device->common.dev, 212 dev_err(mv_chan_to_devp(chan),
210 "error: unsupported operation %d.\n", 213 "error: unsupported operation %d.\n",
211 type); 214 type);
212 BUG(); 215 BUG();
213 return; 216 return;
214 } 217 }
@@ -223,7 +226,7 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
223{ 226{
224 u32 activation; 227 u32 activation;
225 228
226 dev_dbg(chan->device->common.dev, " activate chan.\n"); 229 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
227 activation = __raw_readl(XOR_ACTIVATION(chan)); 230 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1; 231 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan)); 232 __raw_writel(activation, XOR_ACTIVATION(chan));
@@ -251,7 +254,7 @@ static int mv_chan_xor_slot_count(size_t len, int src_cnt)
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 254static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot) 255 struct mv_xor_desc_slot *slot)
253{ 256{
254 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", 257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
255 __func__, __LINE__, slot); 258 __func__, __LINE__, slot);
256 259
257 slot->slots_per_op = 0; 260 slot->slots_per_op = 0;
@@ -266,7 +269,7 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 269static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc) 270 struct mv_xor_desc_slot *sw_desc)
268{ 271{
269 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", 272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
270 __func__, __LINE__, sw_desc); 273 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type) 274 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type); 275 mv_set_mode(mv_chan, sw_desc->type);
@@ -284,7 +287,7 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 287 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 } 288 }
286 mv_chan->pending += sw_desc->slot_cnt; 289 mv_chan->pending += sw_desc->slot_cnt;
287 mv_xor_issue_pending(&mv_chan->common); 290 mv_xor_issue_pending(&mv_chan->dmachan);
288} 291}
289 292
290static dma_cookie_t 293static dma_cookie_t
@@ -308,8 +311,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
308 */ 311 */
309 if (desc->group_head && desc->unmap_len) { 312 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head; 313 struct mv_xor_desc_slot *unmap = desc->group_head;
311 struct device *dev = 314 struct device *dev = mv_chan_to_devp(mv_chan);
312 &mv_chan->device->pdev->dev;
313 u32 len = unmap->unmap_len; 315 u32 len = unmap->unmap_len;
314 enum dma_ctrl_flags flags = desc->async_tx.flags; 316 enum dma_ctrl_flags flags = desc->async_tx.flags;
315 u32 src_cnt; 317 u32 src_cnt;
@@ -353,7 +355,7 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
353{ 355{
354 struct mv_xor_desc_slot *iter, *_iter; 356 struct mv_xor_desc_slot *iter, *_iter;
355 357
356 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 358 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
357 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 359 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
358 completed_node) { 360 completed_node) {
359 361
@@ -369,7 +371,7 @@ static int
369mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 371mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
370 struct mv_xor_chan *mv_chan) 372 struct mv_xor_chan *mv_chan)
371{ 373{
372 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", 374 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
373 __func__, __LINE__, desc, desc->async_tx.flags); 375 __func__, __LINE__, desc, desc->async_tx.flags);
374 list_del(&desc->chain_node); 376 list_del(&desc->chain_node);
375 /* the client is allowed to attach dependent operations 377 /* the client is allowed to attach dependent operations
@@ -393,8 +395,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
393 u32 current_desc = mv_chan_get_current_desc(mv_chan); 395 u32 current_desc = mv_chan_get_current_desc(mv_chan);
394 int seen_current = 0; 396 int seen_current = 0;
395 397
396 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 398 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
397 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); 399 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
398 mv_xor_clean_completed_slots(mv_chan); 400 mv_xor_clean_completed_slots(mv_chan);
399 401
400 /* free completed slots from the chain starting with 402 /* free completed slots from the chain starting with
@@ -438,7 +440,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
438 } 440 }
439 441
440 if (cookie > 0) 442 if (cookie > 0)
441 mv_chan->common.completed_cookie = cookie; 443 mv_chan->dmachan.completed_cookie = cookie;
442} 444}
443 445
444static void 446static void
@@ -547,7 +549,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
547 dma_cookie_t cookie; 549 dma_cookie_t cookie;
548 int new_hw_chain = 1; 550 int new_hw_chain = 1;
549 551
550 dev_dbg(mv_chan->device->common.dev, 552 dev_dbg(mv_chan_to_devp(mv_chan),
551 "%s sw_desc %p: async_tx %p\n", 553 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx); 554 __func__, sw_desc, &sw_desc->async_tx);
553 555
@@ -570,7 +572,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
570 if (!mv_can_chain(grp_start)) 572 if (!mv_can_chain(grp_start))
571 goto submit_done; 573 goto submit_done;
572 574
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", 575 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys); 576 old_chain_tail->async_tx.phys);
575 577
576 /* fix up the hardware chain */ 578 /* fix up the hardware chain */
@@ -604,9 +606,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
604 int idx; 606 int idx;
605 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 607 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
606 struct mv_xor_desc_slot *slot = NULL; 608 struct mv_xor_desc_slot *slot = NULL;
607 struct mv_xor_platform_data *plat_data = 609 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
608 mv_chan->device->pdev->dev.platform_data;
609 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
610 610
611 /* Allocate descriptor slots */ 611 /* Allocate descriptor slots */
612 idx = mv_chan->slots_allocated; 612 idx = mv_chan->slots_allocated;
@@ -617,7 +617,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
617 " %d descriptor slots", idx); 617 " %d descriptor slots", idx);
618 break; 618 break;
619 } 619 }
620 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; 620 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
622 622
623 dma_async_tx_descriptor_init(&slot->async_tx, chan); 623 dma_async_tx_descriptor_init(&slot->async_tx, chan);
@@ -625,7 +625,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
625 INIT_LIST_HEAD(&slot->chain_node); 625 INIT_LIST_HEAD(&slot->chain_node);
626 INIT_LIST_HEAD(&slot->slot_node); 626 INIT_LIST_HEAD(&slot->slot_node);
627 INIT_LIST_HEAD(&slot->tx_list); 627 INIT_LIST_HEAD(&slot->tx_list);
628 hw_desc = (char *) mv_chan->device->dma_desc_pool; 628 hw_desc = (char *) mv_chan->dma_desc_pool;
629 slot->async_tx.phys = 629 slot->async_tx.phys =
630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
631 slot->idx = idx++; 631 slot->idx = idx++;
@@ -641,7 +641,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
641 struct mv_xor_desc_slot, 641 struct mv_xor_desc_slot,
642 slot_node); 642 slot_node);
643 643
644 dev_dbg(mv_chan->device->common.dev, 644 dev_dbg(mv_chan_to_devp(mv_chan),
645 "allocated %d descriptor slots last_used: %p\n", 645 "allocated %d descriptor slots last_used: %p\n",
646 mv_chan->slots_allocated, mv_chan->last_used); 646 mv_chan->slots_allocated, mv_chan->last_used);
647 647
@@ -656,7 +656,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
656 struct mv_xor_desc_slot *sw_desc, *grp_start; 656 struct mv_xor_desc_slot *sw_desc, *grp_start;
657 int slot_cnt; 657 int slot_cnt;
658 658
659 dev_dbg(mv_chan->device->common.dev, 659 dev_dbg(mv_chan_to_devp(mv_chan),
660 "%s dest: %x src %x len: %u flags: %ld\n", 660 "%s dest: %x src %x len: %u flags: %ld\n",
661 __func__, dest, src, len, flags); 661 __func__, dest, src, len, flags);
662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -680,7 +680,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
680 } 680 }
681 spin_unlock_bh(&mv_chan->lock); 681 spin_unlock_bh(&mv_chan->lock);
682 682
683 dev_dbg(mv_chan->device->common.dev, 683 dev_dbg(mv_chan_to_devp(mv_chan),
684 "%s sw_desc %p async_tx %p\n", 684 "%s sw_desc %p async_tx %p\n",
685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); 685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
686 686
@@ -695,7 +695,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
695 struct mv_xor_desc_slot *sw_desc, *grp_start; 695 struct mv_xor_desc_slot *sw_desc, *grp_start;
696 int slot_cnt; 696 int slot_cnt;
697 697
698 dev_dbg(mv_chan->device->common.dev, 698 dev_dbg(mv_chan_to_devp(mv_chan),
699 "%s dest: %x len: %u flags: %ld\n", 699 "%s dest: %x len: %u flags: %ld\n",
700 __func__, dest, len, flags); 700 __func__, dest, len, flags);
701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
@@ -718,7 +718,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
718 sw_desc->unmap_len = len; 718 sw_desc->unmap_len = len;
719 } 719 }
720 spin_unlock_bh(&mv_chan->lock); 720 spin_unlock_bh(&mv_chan->lock);
721 dev_dbg(mv_chan->device->common.dev, 721 dev_dbg(mv_chan_to_devp(mv_chan),
722 "%s sw_desc %p async_tx %p \n", 722 "%s sw_desc %p async_tx %p \n",
723 __func__, sw_desc, &sw_desc->async_tx); 723 __func__, sw_desc, &sw_desc->async_tx);
724 return sw_desc ? &sw_desc->async_tx : NULL; 724 return sw_desc ? &sw_desc->async_tx : NULL;
@@ -737,7 +737,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
737 737
738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
739 739
740 dev_dbg(mv_chan->device->common.dev, 740 dev_dbg(mv_chan_to_devp(mv_chan),
741 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 741 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
742 __func__, src_cnt, len, dest, flags); 742 __func__, src_cnt, len, dest, flags);
743 743
@@ -758,7 +758,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); 758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
759 } 759 }
760 spin_unlock_bh(&mv_chan->lock); 760 spin_unlock_bh(&mv_chan->lock);
761 dev_dbg(mv_chan->device->common.dev, 761 dev_dbg(mv_chan_to_devp(mv_chan),
762 "%s sw_desc %p async_tx %p \n", 762 "%s sw_desc %p async_tx %p \n",
763 __func__, sw_desc, &sw_desc->async_tx); 763 __func__, sw_desc, &sw_desc->async_tx);
764 return sw_desc ? &sw_desc->async_tx : NULL; 764 return sw_desc ? &sw_desc->async_tx : NULL;
@@ -791,12 +791,12 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
791 } 791 }
792 mv_chan->last_used = NULL; 792 mv_chan->last_used = NULL;
793 793
794 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", 794 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
795 __func__, mv_chan->slots_allocated); 795 __func__, mv_chan->slots_allocated);
796 spin_unlock_bh(&mv_chan->lock); 796 spin_unlock_bh(&mv_chan->lock);
797 797
798 if (in_use_descs) 798 if (in_use_descs)
799 dev_err(mv_chan->device->common.dev, 799 dev_err(mv_chan_to_devp(mv_chan),
800 "freeing %d in use descriptors!\n", in_use_descs); 800 "freeing %d in use descriptors!\n", in_use_descs);
801} 801}
802 802
@@ -828,42 +828,42 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
828 u32 val; 828 u32 val;
829 829
830 val = __raw_readl(XOR_CONFIG(chan)); 830 val = __raw_readl(XOR_CONFIG(chan));
831 dev_printk(KERN_ERR, chan->device->common.dev, 831 dev_err(mv_chan_to_devp(chan),
832 "config 0x%08x.\n", val); 832 "config 0x%08x.\n", val);
833 833
834 val = __raw_readl(XOR_ACTIVATION(chan)); 834 val = __raw_readl(XOR_ACTIVATION(chan));
835 dev_printk(KERN_ERR, chan->device->common.dev, 835 dev_err(mv_chan_to_devp(chan),
836 "activation 0x%08x.\n", val); 836 "activation 0x%08x.\n", val);
837 837
838 val = __raw_readl(XOR_INTR_CAUSE(chan)); 838 val = __raw_readl(XOR_INTR_CAUSE(chan));
839 dev_printk(KERN_ERR, chan->device->common.dev, 839 dev_err(mv_chan_to_devp(chan),
840 "intr cause 0x%08x.\n", val); 840 "intr cause 0x%08x.\n", val);
841 841
842 val = __raw_readl(XOR_INTR_MASK(chan)); 842 val = __raw_readl(XOR_INTR_MASK(chan));
843 dev_printk(KERN_ERR, chan->device->common.dev, 843 dev_err(mv_chan_to_devp(chan),
844 "intr mask 0x%08x.\n", val); 844 "intr mask 0x%08x.\n", val);
845 845
846 val = __raw_readl(XOR_ERROR_CAUSE(chan)); 846 val = __raw_readl(XOR_ERROR_CAUSE(chan));
847 dev_printk(KERN_ERR, chan->device->common.dev, 847 dev_err(mv_chan_to_devp(chan),
848 "error cause 0x%08x.\n", val); 848 "error cause 0x%08x.\n", val);
849 849
850 val = __raw_readl(XOR_ERROR_ADDR(chan)); 850 val = __raw_readl(XOR_ERROR_ADDR(chan));
851 dev_printk(KERN_ERR, chan->device->common.dev, 851 dev_err(mv_chan_to_devp(chan),
852 "error addr 0x%08x.\n", val); 852 "error addr 0x%08x.\n", val);
853} 853}
854 854
855static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 855static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
856 u32 intr_cause) 856 u32 intr_cause)
857{ 857{
858 if (intr_cause & (1 << 4)) { 858 if (intr_cause & (1 << 4)) {
859 dev_dbg(chan->device->common.dev, 859 dev_dbg(mv_chan_to_devp(chan),
860 "ignore this error\n"); 860 "ignore this error\n");
861 return; 861 return;
862 } 862 }
863 863
864 dev_printk(KERN_ERR, chan->device->common.dev, 864 dev_err(mv_chan_to_devp(chan),
865 "error on chan %d. intr cause 0x%08x.\n", 865 "error on chan %d. intr cause 0x%08x.\n",
866 chan->idx, intr_cause); 866 chan->idx, intr_cause);
867 867
868 mv_dump_xor_regs(chan); 868 mv_dump_xor_regs(chan);
869 BUG(); 869 BUG();
@@ -874,7 +874,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
874 struct mv_xor_chan *chan = data; 874 struct mv_xor_chan *chan = data;
875 u32 intr_cause = mv_chan_get_intr_cause(chan); 875 u32 intr_cause = mv_chan_get_intr_cause(chan);
876 876
877 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); 877 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
878 878
879 if (mv_is_err_intr(intr_cause)) 879 if (mv_is_err_intr(intr_cause))
880 mv_xor_err_interrupt_handler(chan, intr_cause); 880 mv_xor_err_interrupt_handler(chan, intr_cause);
@@ -901,7 +901,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
901 */ 901 */
902#define MV_XOR_TEST_SIZE 2000 902#define MV_XOR_TEST_SIZE 2000
903 903
904static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) 904static int __devinit mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
905{ 905{
906 int i; 906 int i;
907 void *src, *dest; 907 void *src, *dest;
@@ -910,7 +910,6 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
910 dma_cookie_t cookie; 910 dma_cookie_t cookie;
911 struct dma_async_tx_descriptor *tx; 911 struct dma_async_tx_descriptor *tx;
912 int err = 0; 912 int err = 0;
913 struct mv_xor_chan *mv_chan;
914 913
915 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 914 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
916 if (!src) 915 if (!src)
@@ -926,10 +925,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
926 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 925 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
927 ((u8 *) src)[i] = (u8)i; 926 ((u8 *) src)[i] = (u8)i;
928 927
929 /* Start copy, using first DMA channel */ 928 dma_chan = &mv_chan->dmachan;
930 dma_chan = container_of(device->common.channels.next,
931 struct dma_chan,
932 device_node);
933 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 929 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
934 err = -ENODEV; 930 err = -ENODEV;
935 goto out; 931 goto out;
@@ -950,18 +946,17 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
950 946
951 if (mv_xor_status(dma_chan, cookie, NULL) != 947 if (mv_xor_status(dma_chan, cookie, NULL) !=
952 DMA_SUCCESS) { 948 DMA_SUCCESS) {
953 dev_printk(KERN_ERR, dma_chan->device->dev, 949 dev_err(dma_chan->device->dev,
954 "Self-test copy timed out, disabling\n"); 950 "Self-test copy timed out, disabling\n");
955 err = -ENODEV; 951 err = -ENODEV;
956 goto free_resources; 952 goto free_resources;
957 } 953 }
958 954
959 mv_chan = to_mv_xor_chan(dma_chan); 955 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
960 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
961 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 956 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
962 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 957 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
963 dev_printk(KERN_ERR, dma_chan->device->dev, 958 dev_err(dma_chan->device->dev,
964 "Self-test copy failed compare, disabling\n"); 959 "Self-test copy failed compare, disabling\n");
965 err = -ENODEV; 960 err = -ENODEV;
966 goto free_resources; 961 goto free_resources;
967 } 962 }
@@ -976,7 +971,7 @@ out:
976 971
977#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 972#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
978static int __devinit 973static int __devinit
979mv_xor_xor_self_test(struct mv_xor_device *device) 974mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
980{ 975{
981 int i, src_idx; 976 int i, src_idx;
982 struct page *dest; 977 struct page *dest;
@@ -989,7 +984,6 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
989 u8 cmp_byte = 0; 984 u8 cmp_byte = 0;
990 u32 cmp_word; 985 u32 cmp_word;
991 int err = 0; 986 int err = 0;
992 struct mv_xor_chan *mv_chan;
993 987
994 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 988 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
995 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 989 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
@@ -1022,9 +1016,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1022 1016
1023 memset(page_address(dest), 0, PAGE_SIZE); 1017 memset(page_address(dest), 0, PAGE_SIZE);
1024 1018
1025 dma_chan = container_of(device->common.channels.next, 1019 dma_chan = &mv_chan->dmachan;
1026 struct dma_chan,
1027 device_node);
1028 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 1020 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1029 err = -ENODEV; 1021 err = -ENODEV;
1030 goto out; 1022 goto out;
@@ -1048,22 +1040,21 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
1048 1040
1049 if (mv_xor_status(dma_chan, cookie, NULL) != 1041 if (mv_xor_status(dma_chan, cookie, NULL) !=
1050 DMA_SUCCESS) { 1042 DMA_SUCCESS) {
1051 dev_printk(KERN_ERR, dma_chan->device->dev, 1043 dev_err(dma_chan->device->dev,
1052 "Self-test xor timed out, disabling\n"); 1044 "Self-test xor timed out, disabling\n");
1053 err = -ENODEV; 1045 err = -ENODEV;
1054 goto free_resources; 1046 goto free_resources;
1055 } 1047 }
1056 1048
1057 mv_chan = to_mv_xor_chan(dma_chan); 1049 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1058 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1059 PAGE_SIZE, DMA_FROM_DEVICE); 1050 PAGE_SIZE, DMA_FROM_DEVICE);
1060 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1051 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1061 u32 *ptr = page_address(dest); 1052 u32 *ptr = page_address(dest);
1062 if (ptr[i] != cmp_word) { 1053 if (ptr[i] != cmp_word) {
1063 dev_printk(KERN_ERR, dma_chan->device->dev, 1054 dev_err(dma_chan->device->dev,
1064 "Self-test xor failed compare, disabling." 1055 "Self-test xor failed compare, disabling."
1065 " index %d, data %x, expected %x\n", i, 1056 " index %d, data %x, expected %x\n", i,
1066 ptr[i], cmp_word); 1057 ptr[i], cmp_word);
1067 err = -ENODEV; 1058 err = -ENODEV;
1068 goto free_resources; 1059 goto free_resources;
1069 } 1060 }
@@ -1079,62 +1070,66 @@ out:
1079 return err; 1070 return err;
1080} 1071}
1081 1072
1082static int __devexit mv_xor_remove(struct platform_device *dev) 1073/* This driver does not implement any of the optional DMA operations. */
1074static int
1075mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1076 unsigned long arg)
1077{
1078 return -ENOSYS;
1079}
1080
1081static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1083{ 1082{
1084 struct mv_xor_device *device = platform_get_drvdata(dev);
1085 struct dma_chan *chan, *_chan; 1083 struct dma_chan *chan, *_chan;
1086 struct mv_xor_chan *mv_chan; 1084 struct device *dev = mv_chan->dmadev.dev;
1087 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1088 1085
1089 dma_async_device_unregister(&device->common); 1086 dma_async_device_unregister(&mv_chan->dmadev);
1090 1087
1091 dma_free_coherent(&dev->dev, plat_data->pool_size, 1088 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1092 device->dma_desc_pool_virt, device->dma_desc_pool); 1089 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1093 1090
1094 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1091 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1095 device_node) { 1092 device_node) {
1096 mv_chan = to_mv_xor_chan(chan);
1097 list_del(&chan->device_node); 1093 list_del(&chan->device_node);
1098 } 1094 }
1099 1095
1096 free_irq(mv_chan->irq, mv_chan);
1097
1100 return 0; 1098 return 0;
1101} 1099}
1102 1100
1103static int __devinit mv_xor_probe(struct platform_device *pdev) 1101static struct mv_xor_chan *
1102mv_xor_channel_add(struct mv_xor_device *xordev,
1103 struct platform_device *pdev,
1104 int idx, dma_cap_mask_t cap_mask, int irq)
1104{ 1105{
1105 int ret = 0; 1106 int ret = 0;
1106 int irq;
1107 struct mv_xor_device *adev;
1108 struct mv_xor_chan *mv_chan; 1107 struct mv_xor_chan *mv_chan;
1109 struct dma_device *dma_dev; 1108 struct dma_device *dma_dev;
1110 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1111 1109
1110 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1111 if (!mv_chan) {
1112 ret = -ENOMEM;
1113 goto err_free_dma;
1114 }
1112 1115
1113 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 1116 mv_chan->idx = idx;
1114 if (!adev) 1117 mv_chan->irq = irq;
1115 return -ENOMEM;
1116 1118
1117 dma_dev = &adev->common; 1119 dma_dev = &mv_chan->dmadev;
1118 1120
1119 /* allocate coherent memory for hardware descriptors 1121 /* allocate coherent memory for hardware descriptors
1120 * note: writecombine gives slightly better performance, but 1122 * note: writecombine gives slightly better performance, but
1121 * requires that we explicitly flush the writes 1123 * requires that we explicitly flush the writes
1122 */ 1124 */
1123 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1125 mv_chan->dma_desc_pool_virt =
1124 plat_data->pool_size, 1126 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1125 &adev->dma_desc_pool, 1127 &mv_chan->dma_desc_pool, GFP_KERNEL);
1126 GFP_KERNEL); 1128 if (!mv_chan->dma_desc_pool_virt)
1127 if (!adev->dma_desc_pool_virt) 1129 return ERR_PTR(-ENOMEM);
1128 return -ENOMEM;
1129
1130 adev->id = plat_data->hw_id;
1131 1130
1132 /* discover transaction capabilites from the platform data */ 1131 /* discover transaction capabilites from the platform data */
1133 dma_dev->cap_mask = plat_data->cap_mask; 1132 dma_dev->cap_mask = cap_mask;
1134 adev->pdev = pdev;
1135 platform_set_drvdata(pdev, adev);
1136
1137 adev->shared = platform_get_drvdata(plat_data->shared);
1138 1133
1139 INIT_LIST_HEAD(&dma_dev->channels); 1134 INIT_LIST_HEAD(&dma_dev->channels);
1140 1135
@@ -1143,6 +1138,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1143 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1138 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1144 dma_dev->device_tx_status = mv_xor_status; 1139 dma_dev->device_tx_status = mv_xor_status;
1145 dma_dev->device_issue_pending = mv_xor_issue_pending; 1140 dma_dev->device_issue_pending = mv_xor_issue_pending;
1141 dma_dev->device_control = mv_xor_control;
1146 dma_dev->dev = &pdev->dev; 1142 dma_dev->dev = &pdev->dev;
1147 1143
1148 /* set prep routines based on capability */ 1144 /* set prep routines based on capability */
@@ -1155,15 +1151,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1155 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1151 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1156 } 1152 }
1157 1153
1158 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1154 mv_chan->mmr_base = xordev->xor_base;
1159 if (!mv_chan) {
1160 ret = -ENOMEM;
1161 goto err_free_dma;
1162 }
1163 mv_chan->device = adev;
1164 mv_chan->idx = plat_data->hw_id;
1165 mv_chan->mmr_base = adev->shared->xor_base;
1166
1167 if (!mv_chan->mmr_base) { 1155 if (!mv_chan->mmr_base) {
1168 ret = -ENOMEM; 1156 ret = -ENOMEM;
1169 goto err_free_dma; 1157 goto err_free_dma;
@@ -1174,14 +1162,8 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1174 /* clear errors before enabling interrupts */ 1162 /* clear errors before enabling interrupts */
1175 mv_xor_device_clear_err_status(mv_chan); 1163 mv_xor_device_clear_err_status(mv_chan);
1176 1164
1177 irq = platform_get_irq(pdev, 0); 1165 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1178 if (irq < 0) { 1166 0, dev_name(&pdev->dev), mv_chan);
1179 ret = irq;
1180 goto err_free_dma;
1181 }
1182 ret = devm_request_irq(&pdev->dev, irq,
1183 mv_xor_interrupt_handler,
1184 0, dev_name(&pdev->dev), mv_chan);
1185 if (ret) 1167 if (ret)
1186 goto err_free_dma; 1168 goto err_free_dma;
1187 1169
@@ -1193,26 +1175,26 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1193 INIT_LIST_HEAD(&mv_chan->chain); 1175 INIT_LIST_HEAD(&mv_chan->chain);
1194 INIT_LIST_HEAD(&mv_chan->completed_slots); 1176 INIT_LIST_HEAD(&mv_chan->completed_slots);
1195 INIT_LIST_HEAD(&mv_chan->all_slots); 1177 INIT_LIST_HEAD(&mv_chan->all_slots);
1196 mv_chan->common.device = dma_dev; 1178 mv_chan->dmachan.device = dma_dev;
1197 dma_cookie_init(&mv_chan->common); 1179 dma_cookie_init(&mv_chan->dmachan);
1198 1180
1199 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1181 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1200 1182
1201 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1183 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1202 ret = mv_xor_memcpy_self_test(adev); 1184 ret = mv_xor_memcpy_self_test(mv_chan);
1203 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1185 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1204 if (ret) 1186 if (ret)
1205 goto err_free_dma; 1187 goto err_free_irq;
1206 } 1188 }
1207 1189
1208 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1190 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1209 ret = mv_xor_xor_self_test(adev); 1191 ret = mv_xor_xor_self_test(mv_chan);
1210 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1192 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1211 if (ret) 1193 if (ret)
1212 goto err_free_dma; 1194 goto err_free_irq;
1213 } 1195 }
1214 1196
1215 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " 1197 dev_info(&pdev->dev, "Marvell XOR: "
1216 "( %s%s%s%s)\n", 1198 "( %s%s%s%s)\n",
1217 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1199 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1218 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1200 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
@@ -1220,20 +1202,21 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1220 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1202 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1221 1203
1222 dma_async_device_register(dma_dev); 1204 dma_async_device_register(dma_dev);
1223 goto out; 1205 return mv_chan;
1224 1206
1207err_free_irq:
1208 free_irq(mv_chan->irq, mv_chan);
1225 err_free_dma: 1209 err_free_dma:
1226 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1210 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1227 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1211 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1228 out: 1212 return ERR_PTR(ret);
1229 return ret;
1230} 1213}
1231 1214
1232static void 1215static void
1233mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, 1216mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1234 const struct mbus_dram_target_info *dram) 1217 const struct mbus_dram_target_info *dram)
1235{ 1218{
1236 void __iomem *base = msp->xor_base; 1219 void __iomem *base = xordev->xor_base;
1237 u32 win_enable = 0; 1220 u32 win_enable = 0;
1238 int i; 1221 int i;
1239 1222
@@ -1258,99 +1241,176 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1258 1241
1259 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1242 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1260 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1243 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1244 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1245 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1261} 1246}
1262 1247
1263static struct platform_driver mv_xor_driver = { 1248static int __devinit mv_xor_probe(struct platform_device *pdev)
1264 .probe = mv_xor_probe,
1265 .remove = __devexit_p(mv_xor_remove),
1266 .driver = {
1267 .owner = THIS_MODULE,
1268 .name = MV_XOR_NAME,
1269 },
1270};
1271
1272static int mv_xor_shared_probe(struct platform_device *pdev)
1273{ 1249{
1274 const struct mbus_dram_target_info *dram; 1250 const struct mbus_dram_target_info *dram;
1275 struct mv_xor_shared_private *msp; 1251 struct mv_xor_device *xordev;
1252 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
1276 struct resource *res; 1253 struct resource *res;
1254 int i, ret;
1277 1255
1278 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); 1256 dev_notice(&pdev->dev, "Marvell XOR driver\n");
1279 1257
1280 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); 1258 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1281 if (!msp) 1259 if (!xordev)
1282 return -ENOMEM; 1260 return -ENOMEM;
1283 1261
1284 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1262 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1285 if (!res) 1263 if (!res)
1286 return -ENODEV; 1264 return -ENODEV;
1287 1265
1288 msp->xor_base = devm_ioremap(&pdev->dev, res->start, 1266 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1289 resource_size(res)); 1267 resource_size(res));
1290 if (!msp->xor_base) 1268 if (!xordev->xor_base)
1291 return -EBUSY; 1269 return -EBUSY;
1292 1270
1293 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1271 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1294 if (!res) 1272 if (!res)
1295 return -ENODEV; 1273 return -ENODEV;
1296 1274
1297 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1275 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1298 resource_size(res)); 1276 resource_size(res));
1299 if (!msp->xor_high_base) 1277 if (!xordev->xor_high_base)
1300 return -EBUSY; 1278 return -EBUSY;
1301 1279
1302 platform_set_drvdata(pdev, msp); 1280 platform_set_drvdata(pdev, xordev);
1303 1281
1304 /* 1282 /*
1305 * (Re-)program MBUS remapping windows if we are asked to. 1283 * (Re-)program MBUS remapping windows if we are asked to.
1306 */ 1284 */
1307 dram = mv_mbus_dram_info(); 1285 dram = mv_mbus_dram_info();
1308 if (dram) 1286 if (dram)
1309 mv_xor_conf_mbus_windows(msp, dram); 1287 mv_xor_conf_mbus_windows(xordev, dram);
1310 1288
1311 /* Not all platforms can gate the clock, so it is not 1289 /* Not all platforms can gate the clock, so it is not
1312 * an error if the clock does not exists. 1290 * an error if the clock does not exists.
1313 */ 1291 */
1314 msp->clk = clk_get(&pdev->dev, NULL); 1292 xordev->clk = clk_get(&pdev->dev, NULL);
1315 if (!IS_ERR(msp->clk)) 1293 if (!IS_ERR(xordev->clk))
1316 clk_prepare_enable(msp->clk); 1294 clk_prepare_enable(xordev->clk);
1295
1296 if (pdev->dev.of_node) {
1297 struct device_node *np;
1298 int i = 0;
1299
1300 for_each_child_of_node(pdev->dev.of_node, np) {
1301 dma_cap_mask_t cap_mask;
1302 int irq;
1303
1304 dma_cap_zero(cap_mask);
1305 if (of_property_read_bool(np, "dmacap,memcpy"))
1306 dma_cap_set(DMA_MEMCPY, cap_mask);
1307 if (of_property_read_bool(np, "dmacap,xor"))
1308 dma_cap_set(DMA_XOR, cap_mask);
1309 if (of_property_read_bool(np, "dmacap,memset"))
1310 dma_cap_set(DMA_MEMSET, cap_mask);
1311 if (of_property_read_bool(np, "dmacap,interrupt"))
1312 dma_cap_set(DMA_INTERRUPT, cap_mask);
1313
1314 irq = irq_of_parse_and_map(np, 0);
1315 if (!irq) {
1316 ret = -ENODEV;
1317 goto err_channel_add;
1318 }
1319
1320 xordev->channels[i] =
1321 mv_xor_channel_add(xordev, pdev, i,
1322 cap_mask, irq);
1323 if (IS_ERR(xordev->channels[i])) {
1324 ret = PTR_ERR(xordev->channels[i]);
1325 xordev->channels[i] = NULL;
1326 irq_dispose_mapping(irq);
1327 goto err_channel_add;
1328 }
1329
1330 i++;
1331 }
1332 } else if (pdata && pdata->channels) {
1333 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1334 struct mv_xor_channel_data *cd;
1335 int irq;
1336
1337 cd = &pdata->channels[i];
1338 if (!cd) {
1339 ret = -ENODEV;
1340 goto err_channel_add;
1341 }
1342
1343 irq = platform_get_irq(pdev, i);
1344 if (irq < 0) {
1345 ret = irq;
1346 goto err_channel_add;
1347 }
1348
1349 xordev->channels[i] =
1350 mv_xor_channel_add(xordev, pdev, i,
1351 cd->cap_mask, irq);
1352 if (IS_ERR(xordev->channels[i])) {
1353 ret = PTR_ERR(xordev->channels[i]);
1354 goto err_channel_add;
1355 }
1356 }
1357 }
1317 1358
1318 return 0; 1359 return 0;
1360
1361err_channel_add:
1362 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1363 if (xordev->channels[i]) {
1364 if (pdev->dev.of_node)
1365 irq_dispose_mapping(xordev->channels[i]->irq);
1366 mv_xor_channel_remove(xordev->channels[i]);
1367 }
1368
1369 clk_disable_unprepare(xordev->clk);
1370 clk_put(xordev->clk);
1371 return ret;
1319} 1372}
1320 1373
1321static int mv_xor_shared_remove(struct platform_device *pdev) 1374static int __devexit mv_xor_remove(struct platform_device *pdev)
1322{ 1375{
1323 struct mv_xor_shared_private *msp = platform_get_drvdata(pdev); 1376 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1377 int i;
1378
1379 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1380 if (xordev->channels[i])
1381 mv_xor_channel_remove(xordev->channels[i]);
1382 }
1324 1383
1325 if (!IS_ERR(msp->clk)) { 1384 if (!IS_ERR(xordev->clk)) {
1326 clk_disable_unprepare(msp->clk); 1385 clk_disable_unprepare(xordev->clk);
1327 clk_put(msp->clk); 1386 clk_put(xordev->clk);
1328 } 1387 }
1329 1388
1330 return 0; 1389 return 0;
1331} 1390}
1332 1391
1333static struct platform_driver mv_xor_shared_driver = { 1392#ifdef CONFIG_OF
1334 .probe = mv_xor_shared_probe, 1393static struct of_device_id mv_xor_dt_ids[] __devinitdata = {
1335 .remove = mv_xor_shared_remove, 1394 { .compatible = "marvell,orion-xor", },
1395 {},
1396};
1397MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1398#endif
1399
1400static struct platform_driver mv_xor_driver = {
1401 .probe = mv_xor_probe,
1402 .remove = __devexit_p(mv_xor_remove),
1336 .driver = { 1403 .driver = {
1337 .owner = THIS_MODULE, 1404 .owner = THIS_MODULE,
1338 .name = MV_XOR_SHARED_NAME, 1405 .name = MV_XOR_NAME,
1406 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1339 }, 1407 },
1340}; 1408};
1341 1409
1342 1410
1343static int __init mv_xor_init(void) 1411static int __init mv_xor_init(void)
1344{ 1412{
1345 int rc; 1413 return platform_driver_register(&mv_xor_driver);
1346
1347 rc = platform_driver_register(&mv_xor_shared_driver);
1348 if (!rc) {
1349 rc = platform_driver_register(&mv_xor_driver);
1350 if (rc)
1351 platform_driver_unregister(&mv_xor_shared_driver);
1352 }
1353 return rc;
1354} 1414}
1355module_init(mv_xor_init); 1415module_init(mv_xor_init);
1356 1416
@@ -1359,7 +1419,6 @@ module_init(mv_xor_init);
1359static void __exit mv_xor_exit(void) 1419static void __exit mv_xor_exit(void)
1360{ 1420{
1361 platform_driver_unregister(&mv_xor_driver); 1421 platform_driver_unregister(&mv_xor_driver);
1362 platform_driver_unregister(&mv_xor_shared_driver);
1363 return; 1422 return;
1364} 1423}
1365 1424
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index a5b422f5a8ab..c632a4761fcf 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -24,8 +24,10 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25 25
26#define USE_TIMER 26#define USE_TIMER
27#define MV_XOR_POOL_SIZE PAGE_SIZE
27#define MV_XOR_SLOT_SIZE 64 28#define MV_XOR_SLOT_SIZE 64
28#define MV_XOR_THRESHOLD 1 29#define MV_XOR_THRESHOLD 1
30#define MV_XOR_MAX_CHANNELS 2
29 31
30#define XOR_OPERATION_MODE_XOR 0 32#define XOR_OPERATION_MODE_XOR 0
31#define XOR_OPERATION_MODE_MEMCPY 2 33#define XOR_OPERATION_MODE_MEMCPY 2
@@ -51,29 +53,13 @@
51#define WINDOW_SIZE(w) (0x270 + ((w) << 2)) 53#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
52#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2)) 54#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
53#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2)) 55#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
56#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
54 57
55struct mv_xor_shared_private {
56 void __iomem *xor_base;
57 void __iomem *xor_high_base;
58 struct clk *clk;
59};
60
61
62/**
63 * struct mv_xor_device - internal representation of a XOR device
64 * @pdev: Platform device
65 * @id: HW XOR Device selector
66 * @dma_desc_pool: base of DMA descriptor region (DMA address)
67 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
68 * @common: embedded struct dma_device
69 */
70struct mv_xor_device { 58struct mv_xor_device {
71 struct platform_device *pdev; 59 void __iomem *xor_base;
72 int id; 60 void __iomem *xor_high_base;
73 dma_addr_t dma_desc_pool; 61 struct clk *clk;
74 void *dma_desc_pool_virt; 62 struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
75 struct dma_device common;
76 struct mv_xor_shared_private *shared;
77}; 63};
78 64
79/** 65/**
@@ -96,11 +82,15 @@ struct mv_xor_chan {
96 spinlock_t lock; /* protects the descriptor slot pool */ 82 spinlock_t lock; /* protects the descriptor slot pool */
97 void __iomem *mmr_base; 83 void __iomem *mmr_base;
98 unsigned int idx; 84 unsigned int idx;
85 int irq;
99 enum dma_transaction_type current_type; 86 enum dma_transaction_type current_type;
100 struct list_head chain; 87 struct list_head chain;
101 struct list_head completed_slots; 88 struct list_head completed_slots;
102 struct mv_xor_device *device; 89 dma_addr_t dma_desc_pool;
103 struct dma_chan common; 90 void *dma_desc_pool_virt;
91 size_t pool_size;
92 struct dma_device dmadev;
93 struct dma_chan dmachan;
104 struct mv_xor_desc_slot *last_used; 94 struct mv_xor_desc_slot *last_used;
105 struct list_head all_slots; 95 struct list_head all_slots;
106 int slots_allocated; 96 int slots_allocated;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 0029934748bc..edfba9370922 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -31,6 +31,30 @@ config MV643XX_ETH
31 Some boards that use the Discovery chipset are the Momenco 31 Some boards that use the Discovery chipset are the Momenco
32 Ocelot C and Jaguar ATX and Pegasos II. 32 Ocelot C and Jaguar ATX and Pegasos II.
33 33
34config MVMDIO
35 tristate "Marvell MDIO interface support"
36 ---help---
37 This driver supports the MDIO interface found in the network
38 interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
39 Dove, Armada 370 and Armada XP).
40
41 For now, this driver is only needed for the MVNETA driver
42 (used on Armada 370 and XP), but it could be used in the
43 future by the MV643XX_ETH driver.
44
45config MVNETA
46 tristate "Marvell Armada 370/XP network interface support"
47 depends on MACH_ARMADA_370_XP
48 select PHYLIB
49 select MVMDIO
50 ---help---
51 This driver supports the network interface units in the
52 Marvell ARMADA XP and ARMADA 370 SoC family.
53
54 Note that this driver is distinct from the mv643xx_eth
55 driver, which should be used for the older Marvell SoCs
56 (Dove, Orion, Discovery, Kirkwood).
57
34config PXA168_ETH 58config PXA168_ETH
35 tristate "Marvell pxa168 ethernet support" 59 tristate "Marvell pxa168 ethernet support"
36 depends on CPU_PXA168 60 depends on CPU_PXA168
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 57e3234a37ba..7f63b4aac434 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -3,6 +3,8 @@
3# 3#
4 4
5obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 5obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
6obj-$(CONFIG_MVMDIO) += mvmdio.o
7obj-$(CONFIG_MVNETA) += mvneta.o
6obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o 8obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
7obj-$(CONFIG_SKGE) += skge.o 9obj-$(CONFIG_SKGE) += skge.o
8obj-$(CONFIG_SKY2) += sky2.o 10obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
new file mode 100644
index 000000000000..6d6002bab060
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -0,0 +1,228 @@
1/*
2 * Driver for the MDIO interface of Marvell network interfaces.
3 *
4 * Since the MDIO interface of Marvell network interfaces is shared
5 * between all network interfaces, having a single driver allows to
6 * handle concurrent accesses properly (you may have four Ethernet
7 * ports, but they in fact share the same SMI interface to access the
8 * MDIO bus). Moreover, this MDIO interface code is similar between
9 * the mv643xx_eth driver and the mvneta driver. For now, it is only
10 * used by the mvneta driver, but it could later be used by the
11 * mv643xx_eth driver as well.
12 *
13 * Copyright (C) 2012 Marvell
14 *
15 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/phy.h>
27#include <linux/of_address.h>
28#include <linux/of_mdio.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31
32#define MVMDIO_SMI_DATA_SHIFT 0
33#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
34#define MVMDIO_SMI_PHY_REG_SHIFT 21
35#define MVMDIO_SMI_READ_OPERATION BIT(26)
36#define MVMDIO_SMI_WRITE_OPERATION 0
37#define MVMDIO_SMI_READ_VALID BIT(27)
38#define MVMDIO_SMI_BUSY BIT(28)
39
40struct orion_mdio_dev {
41 struct mutex lock;
42 void __iomem *smireg;
43};
44
45/* Wait for the SMI unit to be ready for another operation
46 */
47static int orion_mdio_wait_ready(struct mii_bus *bus)
48{
49 struct orion_mdio_dev *dev = bus->priv;
50 int count;
51 u32 val;
52
53 count = 0;
54 while (1) {
55 val = readl(dev->smireg);
56 if (!(val & MVMDIO_SMI_BUSY))
57 break;
58
59 if (count > 100) {
60 dev_err(bus->parent, "Timeout: SMI busy for too long\n");
61 return -ETIMEDOUT;
62 }
63
64 udelay(10);
65 count++;
66 }
67
68 return 0;
69}
70
71static int orion_mdio_read(struct mii_bus *bus, int mii_id,
72 int regnum)
73{
74 struct orion_mdio_dev *dev = bus->priv;
75 int count;
76 u32 val;
77 int ret;
78
79 mutex_lock(&dev->lock);
80
81 ret = orion_mdio_wait_ready(bus);
82 if (ret < 0) {
83 mutex_unlock(&dev->lock);
84 return ret;
85 }
86
87 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
88 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
89 MVMDIO_SMI_READ_OPERATION),
90 dev->smireg);
91
92 /* Wait for the value to become available */
93 count = 0;
94 while (1) {
95 val = readl(dev->smireg);
96 if (val & MVMDIO_SMI_READ_VALID)
97 break;
98
99 if (count > 100) {
100 dev_err(bus->parent, "Timeout when reading PHY\n");
101 mutex_unlock(&dev->lock);
102 return -ETIMEDOUT;
103 }
104
105 udelay(10);
106 count++;
107 }
108
109 mutex_unlock(&dev->lock);
110
111 return val & 0xFFFF;
112}
113
114static int orion_mdio_write(struct mii_bus *bus, int mii_id,
115 int regnum, u16 value)
116{
117 struct orion_mdio_dev *dev = bus->priv;
118 int ret;
119
120 mutex_lock(&dev->lock);
121
122 ret = orion_mdio_wait_ready(bus);
123 if (ret < 0) {
124 mutex_unlock(&dev->lock);
125 return ret;
126 }
127
128 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
129 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
130 MVMDIO_SMI_WRITE_OPERATION |
131 (value << MVMDIO_SMI_DATA_SHIFT)),
132 dev->smireg);
133
134 mutex_unlock(&dev->lock);
135
136 return 0;
137}
138
139static int orion_mdio_reset(struct mii_bus *bus)
140{
141 return 0;
142}
143
144static int __devinit orion_mdio_probe(struct platform_device *pdev)
145{
146 struct device_node *np = pdev->dev.of_node;
147 struct mii_bus *bus;
148 struct orion_mdio_dev *dev;
149 int i, ret;
150
151 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
152 if (!bus) {
153 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
154 return -ENOMEM;
155 }
156
157 bus->name = "orion_mdio_bus";
158 bus->read = orion_mdio_read;
159 bus->write = orion_mdio_write;
160 bus->reset = orion_mdio_reset;
161 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
162 dev_name(&pdev->dev));
163 bus->parent = &pdev->dev;
164
165 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
166 if (!bus->irq) {
167 dev_err(&pdev->dev, "Cannot allocate PHY IRQ array\n");
168 mdiobus_free(bus);
169 return -ENOMEM;
170 }
171
172 for (i = 0; i < PHY_MAX_ADDR; i++)
173 bus->irq[i] = PHY_POLL;
174
175 dev = bus->priv;
176 dev->smireg = of_iomap(pdev->dev.of_node, 0);
177 if (!dev->smireg) {
178 dev_err(&pdev->dev, "No SMI register address given in DT\n");
179 kfree(bus->irq);
180 mdiobus_free(bus);
181 return -ENODEV;
182 }
183
184 mutex_init(&dev->lock);
185
186 ret = of_mdiobus_register(bus, np);
187 if (ret < 0) {
188 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
189 iounmap(dev->smireg);
190 kfree(bus->irq);
191 mdiobus_free(bus);
192 return ret;
193 }
194
195 platform_set_drvdata(pdev, bus);
196
197 return 0;
198}
199
200static int __devexit orion_mdio_remove(struct platform_device *pdev)
201{
202 struct mii_bus *bus = platform_get_drvdata(pdev);
203 mdiobus_unregister(bus);
204 kfree(bus->irq);
205 mdiobus_free(bus);
206 return 0;
207}
208
209static const struct of_device_id orion_mdio_match[] = {
210 { .compatible = "marvell,orion-mdio" },
211 { }
212};
213MODULE_DEVICE_TABLE(of, orion_mdio_match);
214
215static struct platform_driver orion_mdio_driver = {
216 .probe = orion_mdio_probe,
217 .remove = __devexit_p(orion_mdio_remove),
218 .driver = {
219 .name = "orion-mdio",
220 .of_match_table = orion_mdio_match,
221 },
222};
223
224module_platform_driver(orion_mdio_driver);
225
226MODULE_DESCRIPTION("Marvell MDIO interface driver");
227MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
228MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
new file mode 100644
index 000000000000..3f8086b9f5e5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -0,0 +1,2848 @@
1/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/version.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/platform_device.h>
19#include <linux/skbuff.h>
20#include <linux/inetdevice.h>
21#include <linux/mbus.h>
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <net/ip.h>
25#include <net/ipv6.h>
26#include <linux/of.h>
27#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
30#include <linux/of_address.h>
31#include <linux/phy.h>
32#include <linux/clk.h>
33
34/* Registers */
35#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
36#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
37#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
38#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
39#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
40#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
41#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
42#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
43#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
44#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
45#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
46#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
47#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
48#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
49#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
50#define MVNETA_PORT_RX_RESET 0x1cc0
51#define MVNETA_PORT_RX_DMA_RESET BIT(0)
52#define MVNETA_PHY_ADDR 0x2000
53#define MVNETA_PHY_ADDR_MASK 0x1f
54#define MVNETA_MBUS_RETRY 0x2010
55#define MVNETA_UNIT_INTR_CAUSE 0x2080
56#define MVNETA_UNIT_CONTROL 0x20B0
57#define MVNETA_PHY_POLLING_ENABLE BIT(1)
58#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
59#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
60#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
61#define MVNETA_BASE_ADDR_ENABLE 0x2290
62#define MVNETA_PORT_CONFIG 0x2400
63#define MVNETA_UNI_PROMISC_MODE BIT(0)
64#define MVNETA_DEF_RXQ(q) ((q) << 1)
65#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
66#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
67#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
68#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
69#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
70#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
71#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
72 MVNETA_DEF_RXQ_ARP(q) | \
73 MVNETA_DEF_RXQ_TCP(q) | \
74 MVNETA_DEF_RXQ_UDP(q) | \
75 MVNETA_DEF_RXQ_BPDU(q) | \
76 MVNETA_TX_UNSET_ERR_SUM | \
77 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
78#define MVNETA_PORT_CONFIG_EXTEND 0x2404
79#define MVNETA_MAC_ADDR_LOW 0x2414
80#define MVNETA_MAC_ADDR_HIGH 0x2418
81#define MVNETA_SDMA_CONFIG 0x241c
82#define MVNETA_SDMA_BRST_SIZE_16 4
83#define MVNETA_NO_DESC_SWAP 0x0
84#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
85#define MVNETA_RX_NO_DATA_SWAP BIT(4)
86#define MVNETA_TX_NO_DATA_SWAP BIT(5)
87#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
88#define MVNETA_PORT_STATUS 0x2444
89#define MVNETA_TX_IN_PRGRS BIT(1)
90#define MVNETA_TX_FIFO_EMPTY BIT(8)
91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
92#define MVNETA_TYPE_PRIO 0x24bc
93#define MVNETA_FORCE_UNI BIT(21)
94#define MVNETA_TXQ_CMD_1 0x24e4
95#define MVNETA_TXQ_CMD 0x2448
96#define MVNETA_TXQ_DISABLE_SHIFT 8
97#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
98#define MVNETA_ACC_MODE 0x2500
99#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
100#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
101#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
102#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
103#define MVNETA_INTR_NEW_CAUSE 0x25a0
104#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
105#define MVNETA_INTR_NEW_MASK 0x25a4
106#define MVNETA_INTR_OLD_CAUSE 0x25a8
107#define MVNETA_INTR_OLD_MASK 0x25ac
108#define MVNETA_INTR_MISC_CAUSE 0x25b0
109#define MVNETA_INTR_MISC_MASK 0x25b4
110#define MVNETA_INTR_ENABLE 0x25b8
111#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
112#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
113#define MVNETA_RXQ_CMD 0x2680
114#define MVNETA_RXQ_DISABLE_SHIFT 8
115#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
116#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
117#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
118#define MVNETA_GMAC_CTRL_0 0x2c00
119#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
120#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
121#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
122#define MVNETA_GMAC_CTRL_2 0x2c08
123#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
124#define MVNETA_GMAC2_PORT_RGMII BIT(4)
125#define MVNETA_GMAC2_PORT_RESET BIT(6)
126#define MVNETA_GMAC_STATUS 0x2c10
127#define MVNETA_GMAC_LINK_UP BIT(0)
128#define MVNETA_GMAC_SPEED_1000 BIT(1)
129#define MVNETA_GMAC_SPEED_100 BIT(2)
130#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
131#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
132#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
133#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
134#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
135#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
136#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
137#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
138#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
139#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
140#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
141#define MVNETA_MIB_COUNTERS_BASE 0x3080
142#define MVNETA_MIB_LATE_COLLISION 0x7c
143#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
144#define MVNETA_DA_FILT_OTH_MCAST 0x3500
145#define MVNETA_DA_FILT_UCAST_BASE 0x3600
146#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
147#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
148#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
149#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
150#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
151#define MVNETA_TXQ_DEC_SENT_SHIFT 16
152#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
153#define MVNETA_TXQ_SENT_DESC_SHIFT 16
154#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
155#define MVNETA_PORT_TX_RESET 0x3cf0
156#define MVNETA_PORT_TX_DMA_RESET BIT(0)
157#define MVNETA_TX_MTU 0x3e0c
158#define MVNETA_TX_TOKEN_SIZE 0x3e14
159#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
160#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
161#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
162
163#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
164
165/* Descriptor ring Macros */
166#define MVNETA_QUEUE_NEXT_DESC(q, index) \
167 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
168
169/* Various constants */
170
171/* Coalescing */
172#define MVNETA_TXDONE_COAL_PKTS 16
173#define MVNETA_RX_COAL_PKTS 32
174#define MVNETA_RX_COAL_USEC 100
175
176/* Timer */
177#define MVNETA_TX_DONE_TIMER_PERIOD 10
178
179/* Napi polling weight */
180#define MVNETA_RX_POLL_WEIGHT 64
181
182/* The two bytes Marvell header. Either contains a special value used
183 * by Marvell switches when a specific hardware mode is enabled (not
184 * supported by this driver) or is filled automatically by zeroes on
185 * the RX side. Those two bytes being at the front of the Ethernet
186 * header, they allow to have the IP header aligned on a 4 bytes
187 * boundary automatically: the hardware skips those two bytes on its
188 * own.
189 */
190#define MVNETA_MH_SIZE 2
191
192#define MVNETA_VLAN_TAG_LEN 4
193
194#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
195#define MVNETA_TX_CSUM_MAX_SIZE 9800
196#define MVNETA_ACC_MODE_EXT 1
197
198/* Timeout constants */
199#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
200#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
201#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
202
203#define MVNETA_TX_MTU_MAX 0x3ffff
204
205/* Max number of Rx descriptors */
206#define MVNETA_MAX_RXD 128
207
208/* Max number of Tx descriptors */
209#define MVNETA_MAX_TXD 532
210
211/* descriptor aligned size */
212#define MVNETA_DESC_ALIGNED_SIZE 32
213
214#define MVNETA_RX_PKT_SIZE(mtu) \
215 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
216 ETH_HLEN + ETH_FCS_LEN, \
217 MVNETA_CPU_D_CACHE_LINE_SIZE)
218
219#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
220
221struct mvneta_stats {
222 struct u64_stats_sync syncp;
223 u64 packets;
224 u64 bytes;
225};
226
227struct mvneta_port {
228 int pkt_size;
229 void __iomem *base;
230 struct mvneta_rx_queue *rxqs;
231 struct mvneta_tx_queue *txqs;
232 struct timer_list tx_done_timer;
233 struct net_device *dev;
234
235 u32 cause_rx_tx;
236 struct napi_struct napi;
237
238 /* Flags */
239 unsigned long flags;
240#define MVNETA_F_TX_DONE_TIMER_BIT 0
241
242 /* Napi weight */
243 int weight;
244
245 /* Core clock */
246 struct clk *clk;
247 u8 mcast_count[256];
248 u16 tx_ring_size;
249 u16 rx_ring_size;
250 struct mvneta_stats tx_stats;
251 struct mvneta_stats rx_stats;
252
253 struct mii_bus *mii_bus;
254 struct phy_device *phy_dev;
255 phy_interface_t phy_interface;
256 struct device_node *phy_node;
257 unsigned int link;
258 unsigned int duplex;
259 unsigned int speed;
260};
261
262/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
263 * layout of the transmit and reception DMA descriptors, and their
264 * layout is therefore defined by the hardware design
265 */
266struct mvneta_tx_desc {
267 u32 command; /* Options used by HW for packet transmitting.*/
268#define MVNETA_TX_L3_OFF_SHIFT 0
269#define MVNETA_TX_IP_HLEN_SHIFT 8
270#define MVNETA_TX_L4_UDP BIT(16)
271#define MVNETA_TX_L3_IP6 BIT(17)
272#define MVNETA_TXD_IP_CSUM BIT(18)
273#define MVNETA_TXD_Z_PAD BIT(19)
274#define MVNETA_TXD_L_DESC BIT(20)
275#define MVNETA_TXD_F_DESC BIT(21)
276#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
277 MVNETA_TXD_L_DESC | \
278 MVNETA_TXD_F_DESC)
279#define MVNETA_TX_L4_CSUM_FULL BIT(30)
280#define MVNETA_TX_L4_CSUM_NOT BIT(31)
281
282 u16 reserverd1; /* csum_l4 (for future use) */
283 u16 data_size; /* Data size of transmitted packet in bytes */
284 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
285 u32 reserved2; /* hw_cmd - (for future use, PMT) */
286 u32 reserved3[4]; /* Reserved - (for future use) */
287};
288
289struct mvneta_rx_desc {
290 u32 status; /* Info about received packet */
291#define MVNETA_RXD_ERR_CRC 0x0
292#define MVNETA_RXD_ERR_SUMMARY BIT(16)
293#define MVNETA_RXD_ERR_OVERRUN BIT(17)
294#define MVNETA_RXD_ERR_LEN BIT(18)
295#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
296#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
297#define MVNETA_RXD_L3_IP4 BIT(25)
298#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
299#define MVNETA_RXD_L4_CSUM_OK BIT(30)
300
301 u16 reserved1; /* pnc_info - (for future use, PnC) */
302 u16 data_size; /* Size of received packet in bytes */
303 u32 buf_phys_addr; /* Physical address of the buffer */
304 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
305 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
306 u16 reserved3; /* prefetch_cmd, for future use */
307 u16 reserved4; /* csum_l4 - (for future use, PnC) */
308 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
309 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
310};
311
312struct mvneta_tx_queue {
313 /* Number of this TX queue, in the range 0-7 */
314 u8 id;
315
316 /* Number of TX DMA descriptors in the descriptor ring */
317 int size;
318
319 /* Number of currently used TX DMA descriptor in the
320 * descriptor ring
321 */
322 int count;
323
324 /* Array of transmitted skb */
325 struct sk_buff **tx_skb;
326
327 /* Index of last TX DMA descriptor that was inserted */
328 int txq_put_index;
329
330 /* Index of the TX DMA descriptor to be cleaned up */
331 int txq_get_index;
332
333 u32 done_pkts_coal;
334
335 /* Virtual address of the TX DMA descriptors array */
336 struct mvneta_tx_desc *descs;
337
338 /* DMA address of the TX DMA descriptors array */
339 dma_addr_t descs_phys;
340
341 /* Index of the last TX DMA descriptor */
342 int last_desc;
343
344 /* Index of the next TX DMA descriptor to process */
345 int next_desc_to_proc;
346};
347
348struct mvneta_rx_queue {
349 /* rx queue number, in the range 0-7 */
350 u8 id;
351
352 /* num of rx descriptors in the rx descriptor ring */
353 int size;
354
355 /* counter of times when mvneta_refill() failed */
356 int missed;
357
358 u32 pkts_coal;
359 u32 time_coal;
360
361 /* Virtual address of the RX DMA descriptors array */
362 struct mvneta_rx_desc *descs;
363
364 /* DMA address of the RX DMA descriptors array */
365 dma_addr_t descs_phys;
366
367 /* Index of the last RX DMA descriptor */
368 int last_desc;
369
370 /* Index of the next RX DMA descriptor to process */
371 int next_desc_to_proc;
372};
373
374static int rxq_number = 8;
375static int txq_number = 8;
376
377static int rxq_def;
378static int txq_def;
379
380#define MVNETA_DRIVER_NAME "mvneta"
381#define MVNETA_DRIVER_VERSION "1.0"
382
383/* Utility/helper methods */
384
385/* Write helper method */
386static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
387{
388 writel(data, pp->base + offset);
389}
390
391/* Read helper method */
392static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
393{
394 return readl(pp->base + offset);
395}
396
397/* Increment txq get counter */
398static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
399{
400 txq->txq_get_index++;
401 if (txq->txq_get_index == txq->size)
402 txq->txq_get_index = 0;
403}
404
405/* Increment txq put counter */
406static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
407{
408 txq->txq_put_index++;
409 if (txq->txq_put_index == txq->size)
410 txq->txq_put_index = 0;
411}
412
413
414/* Clear all MIB counters */
415static void mvneta_mib_counters_clear(struct mvneta_port *pp)
416{
417 int i;
418 u32 dummy;
419
420 /* Perform dummy reads from MIB counters */
421 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
422 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
423}
424
425/* Get System Network Statistics */
426struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
427 struct rtnl_link_stats64 *stats)
428{
429 struct mvneta_port *pp = netdev_priv(dev);
430 unsigned int start;
431
432 memset(stats, 0, sizeof(struct rtnl_link_stats64));
433
434 do {
435 start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
436 stats->rx_packets = pp->rx_stats.packets;
437 stats->rx_bytes = pp->rx_stats.bytes;
438 } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
439
440
441 do {
442 start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
443 stats->tx_packets = pp->tx_stats.packets;
444 stats->tx_bytes = pp->tx_stats.bytes;
445 } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
446
447 stats->rx_errors = dev->stats.rx_errors;
448 stats->rx_dropped = dev->stats.rx_dropped;
449
450 stats->tx_dropped = dev->stats.tx_dropped;
451
452 return stats;
453}
454
455/* Rx descriptors helper methods */
456
457/* Checks whether the given RX descriptor is both the first and the
458 * last descriptor for the RX packet. Each RX packet is currently
459 * received through a single RX descriptor, so not having each RX
460 * descriptor with its first and last bits set is an error
461 */
462static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
463{
464 return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
465 MVNETA_RXD_FIRST_LAST_DESC;
466}
467
468/* Add number of descriptors ready to receive new packets */
469static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
470 struct mvneta_rx_queue *rxq,
471 int ndescs)
472{
473 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
474 * be added at once
475 */
476 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
477 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
478 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
479 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
480 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
481 }
482
483 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
484 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
485}
486
487/* Get number of RX descriptors occupied by received packets */
488static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
489 struct mvneta_rx_queue *rxq)
490{
491 u32 val;
492
493 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
494 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
495}
496
497/* Update num of rx desc called upon return from rx path or
498 * from mvneta_rxq_drop_pkts().
499 */
500static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
501 struct mvneta_rx_queue *rxq,
502 int rx_done, int rx_filled)
503{
504 u32 val;
505
506 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
507 val = rx_done |
508 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
509 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
510 return;
511 }
512
513 /* Only 255 descriptors can be added at once */
514 while ((rx_done > 0) || (rx_filled > 0)) {
515 if (rx_done <= 0xff) {
516 val = rx_done;
517 rx_done = 0;
518 } else {
519 val = 0xff;
520 rx_done -= 0xff;
521 }
522 if (rx_filled <= 0xff) {
523 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
524 rx_filled = 0;
525 } else {
526 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
527 rx_filled -= 0xff;
528 }
529 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
530 }
531}
532
533/* Get pointer to next RX descriptor to be processed by SW */
534static struct mvneta_rx_desc *
535mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
536{
537 int rx_desc = rxq->next_desc_to_proc;
538
539 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
540 return rxq->descs + rx_desc;
541}
542
543/* Change maximum receive size of the port. */
544static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
545{
546 u32 val;
547
548 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
549 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
550 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
551 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
552 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
553}
554
555
556/* Set rx queue offset */
557static void mvneta_rxq_offset_set(struct mvneta_port *pp,
558 struct mvneta_rx_queue *rxq,
559 int offset)
560{
561 u32 val;
562
563 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
564 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
565
566 /* Offset is in */
567 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
568 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
569}
570
571
572/* Tx descriptors helper methods */
573
574/* Update HW with number of TX descriptors to be sent */
575static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
576 struct mvneta_tx_queue *txq,
577 int pend_desc)
578{
579 u32 val;
580
581 /* Only 255 descriptors can be added at once ; Assume caller
582 * process TX desriptors in quanta less than 256
583 */
584 val = pend_desc;
585 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
586}
587
588/* Get pointer to next TX descriptor to be processed (send) by HW */
589static struct mvneta_tx_desc *
590mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
591{
592 int tx_desc = txq->next_desc_to_proc;
593
594 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
595 return txq->descs + tx_desc;
596}
597
598/* Release the last allocated TX descriptor. Useful to handle DMA
599 * mapping failures in the TX path.
600 */
601static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
602{
603 if (txq->next_desc_to_proc == 0)
604 txq->next_desc_to_proc = txq->last_desc - 1;
605 else
606 txq->next_desc_to_proc--;
607}
608
609/* Set rxq buf size */
610static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
611 struct mvneta_rx_queue *rxq,
612 int buf_size)
613{
614 u32 val;
615
616 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
617
618 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
619 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
620
621 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
622}
623
624/* Disable buffer management (BM) */
625static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
626 struct mvneta_rx_queue *rxq)
627{
628 u32 val;
629
630 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
631 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
632 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
633}
634
635
636
637/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
638static void __devinit mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
639{
640 u32 val;
641
642 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
643
644 if (enable)
645 val |= MVNETA_GMAC2_PORT_RGMII;
646 else
647 val &= ~MVNETA_GMAC2_PORT_RGMII;
648
649 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
650}
651
652/* Config SGMII port */
653static void __devinit mvneta_port_sgmii_config(struct mvneta_port *pp)
654{
655 u32 val;
656
657 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
658 val |= MVNETA_GMAC2_PSC_ENABLE;
659 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
660}
661
662/* Start the Ethernet port RX and TX activity */
663static void mvneta_port_up(struct mvneta_port *pp)
664{
665 int queue;
666 u32 q_map;
667
668 /* Enable all initialized TXs. */
669 mvneta_mib_counters_clear(pp);
670 q_map = 0;
671 for (queue = 0; queue < txq_number; queue++) {
672 struct mvneta_tx_queue *txq = &pp->txqs[queue];
673 if (txq->descs != NULL)
674 q_map |= (1 << queue);
675 }
676 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
677
678 /* Enable all initialized RXQs. */
679 q_map = 0;
680 for (queue = 0; queue < rxq_number; queue++) {
681 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
682 if (rxq->descs != NULL)
683 q_map |= (1 << queue);
684 }
685
686 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
687}
688
689/* Stop the Ethernet port activity */
690static void mvneta_port_down(struct mvneta_port *pp)
691{
692 u32 val;
693 int count;
694
695 /* Stop Rx port activity. Check port Rx activity. */
696 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
697
698 /* Issue stop command for active channels only */
699 if (val != 0)
700 mvreg_write(pp, MVNETA_RXQ_CMD,
701 val << MVNETA_RXQ_DISABLE_SHIFT);
702
703 /* Wait for all Rx activity to terminate. */
704 count = 0;
705 do {
706 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
707 netdev_warn(pp->dev,
708 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
709 val);
710 break;
711 }
712 mdelay(1);
713
714 val = mvreg_read(pp, MVNETA_RXQ_CMD);
715 } while (val & 0xff);
716
717 /* Stop Tx port activity. Check port Tx activity. Issue stop
718 * command for active channels only
719 */
720 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
721
722 if (val != 0)
723 mvreg_write(pp, MVNETA_TXQ_CMD,
724 (val << MVNETA_TXQ_DISABLE_SHIFT));
725
726 /* Wait for all Tx activity to terminate. */
727 count = 0;
728 do {
729 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
730 netdev_warn(pp->dev,
731 "TIMEOUT for TX stopped status=0x%08x\n",
732 val);
733 break;
734 }
735 mdelay(1);
736
737 /* Check TX Command reg that all Txqs are stopped */
738 val = mvreg_read(pp, MVNETA_TXQ_CMD);
739
740 } while (val & 0xff);
741
742 /* Double check to verify that TX FIFO is empty */
743 count = 0;
744 do {
745 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
746 netdev_warn(pp->dev,
747 "TX FIFO empty timeout status=0x08%x\n",
748 val);
749 break;
750 }
751 mdelay(1);
752
753 val = mvreg_read(pp, MVNETA_PORT_STATUS);
754 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
755 (val & MVNETA_TX_IN_PRGRS));
756
757 udelay(200);
758}
759
760/* Enable the port by setting the port enable bit of the MAC control register */
761static void mvneta_port_enable(struct mvneta_port *pp)
762{
763 u32 val;
764
765 /* Enable port */
766 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
767 val |= MVNETA_GMAC0_PORT_ENABLE;
768 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
769}
770
771/* Disable the port and wait for about 200 usec before retuning */
772static void mvneta_port_disable(struct mvneta_port *pp)
773{
774 u32 val;
775
776 /* Reset the Enable bit in the Serial Control Register */
777 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
778 val &= ~MVNETA_GMAC0_PORT_ENABLE;
779 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
780
781 udelay(200);
782}
783
784/* Multicast tables methods */
785
786/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
787static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
788{
789 int offset;
790 u32 val;
791
792 if (queue == -1) {
793 val = 0;
794 } else {
795 val = 0x1 | (queue << 1);
796 val |= (val << 24) | (val << 16) | (val << 8);
797 }
798
799 for (offset = 0; offset <= 0xc; offset += 4)
800 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
801}
802
803/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
804static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
805{
806 int offset;
807 u32 val;
808
809 if (queue == -1) {
810 val = 0;
811 } else {
812 val = 0x1 | (queue << 1);
813 val |= (val << 24) | (val << 16) | (val << 8);
814 }
815
816 for (offset = 0; offset <= 0xfc; offset += 4)
817 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
818
819}
820
821/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
822static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
823{
824 int offset;
825 u32 val;
826
827 if (queue == -1) {
828 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
829 val = 0;
830 } else {
831 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
832 val = 0x1 | (queue << 1);
833 val |= (val << 24) | (val << 16) | (val << 8);
834 }
835
836 for (offset = 0; offset <= 0xfc; offset += 4)
837 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
838}
839
840/* This method sets defaults to the NETA port:
841 * Clears interrupt Cause and Mask registers.
842 * Clears all MAC tables.
843 * Sets defaults to all registers.
844 * Resets RX and TX descriptor rings.
845 * Resets PHY.
846 * This method can be called after mvneta_port_down() to return the port
847 * settings to defaults.
848 */
849static void mvneta_defaults_set(struct mvneta_port *pp)
850{
851 int cpu;
852 int queue;
853 u32 val;
854
855 /* Clear all Cause registers */
856 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
857 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
858 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
859
860 /* Mask all interrupts */
861 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
862 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
863 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
864 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
865
866 /* Enable MBUS Retry bit16 */
867 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
868
869 /* Set CPU queue access map - all CPUs have access to all RX
870 * queues and to all TX queues
871 */
872 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
873 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
874 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
875 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
876
877 /* Reset RX and TX DMAs */
878 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
879 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
880
881 /* Disable Legacy WRR, Disable EJP, Release from reset */
882 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
883 for (queue = 0; queue < txq_number; queue++) {
884 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
885 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
886 }
887
888 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
889 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
890
891 /* Set Port Acceleration Mode */
892 val = MVNETA_ACC_MODE_EXT;
893 mvreg_write(pp, MVNETA_ACC_MODE, val);
894
895 /* Update val of portCfg register accordingly with all RxQueue types */
896 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
897 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
898
899 val = 0;
900 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
901 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
902
903 /* Build PORT_SDMA_CONFIG_REG */
904 val = 0;
905
906 /* Default burst size */
907 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
908 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
909
910 val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
911 MVNETA_NO_DESC_SWAP);
912
913 /* Assign port SDMA configuration */
914 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
915
916 mvneta_set_ucast_table(pp, -1);
917 mvneta_set_special_mcast_table(pp, -1);
918 mvneta_set_other_mcast_table(pp, -1);
919
920 /* Set port interrupt enable register - default enable all */
921 mvreg_write(pp, MVNETA_INTR_ENABLE,
922 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
923 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
924}
925
926/* Set max sizes for tx queues */
927static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
928
929{
930 u32 val, size, mtu;
931 int queue;
932
933 mtu = max_tx_size * 8;
934 if (mtu > MVNETA_TX_MTU_MAX)
935 mtu = MVNETA_TX_MTU_MAX;
936
937 /* Set MTU */
938 val = mvreg_read(pp, MVNETA_TX_MTU);
939 val &= ~MVNETA_TX_MTU_MAX;
940 val |= mtu;
941 mvreg_write(pp, MVNETA_TX_MTU, val);
942
943 /* TX token size and all TXQs token size must be larger that MTU */
944 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
945
946 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
947 if (size < mtu) {
948 size = mtu;
949 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
950 val |= size;
951 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
952 }
953 for (queue = 0; queue < txq_number; queue++) {
954 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
955
956 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
957 if (size < mtu) {
958 size = mtu;
959 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
960 val |= size;
961 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
962 }
963 }
964}
965
966/* Set unicast address */
967static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
968 int queue)
969{
970 unsigned int unicast_reg;
971 unsigned int tbl_offset;
972 unsigned int reg_offset;
973
974 /* Locate the Unicast table entry */
975 last_nibble = (0xf & last_nibble);
976
977 /* offset from unicast tbl base */
978 tbl_offset = (last_nibble / 4) * 4;
979
980 /* offset within the above reg */
981 reg_offset = last_nibble % 4;
982
983 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
984
985 if (queue == -1) {
986 /* Clear accepts frame bit at specified unicast DA tbl entry */
987 unicast_reg &= ~(0xff << (8 * reg_offset));
988 } else {
989 unicast_reg &= ~(0xff << (8 * reg_offset));
990 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
991 }
992
993 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
994}
995
996/* Set mac address */
997static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
998 int queue)
999{
1000 unsigned int mac_h;
1001 unsigned int mac_l;
1002
1003 if (queue != -1) {
1004 mac_l = (addr[4] << 8) | (addr[5]);
1005 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1006 (addr[2] << 8) | (addr[3] << 0);
1007
1008 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1009 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1010 }
1011
1012 /* Accept frames of this address */
1013 mvneta_set_ucast_addr(pp, addr[5], queue);
1014}
1015
1016/* Set the number of packets that will be received before RX interrupt
1017 * will be generated by HW.
1018 */
1019static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1020 struct mvneta_rx_queue *rxq, u32 value)
1021{
1022 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1023 value | MVNETA_RXQ_NON_OCCUPIED(0));
1024 rxq->pkts_coal = value;
1025}
1026
1027/* Set the time delay in usec before RX interrupt will be generated by
1028 * HW.
1029 */
1030static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1031 struct mvneta_rx_queue *rxq, u32 value)
1032{
1033 u32 val;
1034 unsigned long clk_rate;
1035
1036 clk_rate = clk_get_rate(pp->clk);
1037 val = (clk_rate / 1000000) * value;
1038
1039 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1040 rxq->time_coal = value;
1041}
1042
1043/* Set threshold for TX_DONE pkts coalescing */
1044static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1045 struct mvneta_tx_queue *txq, u32 value)
1046{
1047 u32 val;
1048
1049 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1050
1051 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1052 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1053
1054 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1055
1056 txq->done_pkts_coal = value;
1057}
1058
1059/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
1060static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
1061{
1062 if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
1063 pp->tx_done_timer.expires = jiffies +
1064 msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
1065 add_timer(&pp->tx_done_timer);
1066 }
1067}
1068
1069
1070/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1071static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1072 u32 phys_addr, u32 cookie)
1073{
1074 rx_desc->buf_cookie = cookie;
1075 rx_desc->buf_phys_addr = phys_addr;
1076}
1077
1078/* Decrement sent descriptors counter */
1079static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1080 struct mvneta_tx_queue *txq,
1081 int sent_desc)
1082{
1083 u32 val;
1084
1085 /* Only 255 TX descriptors can be updated at once */
1086 while (sent_desc > 0xff) {
1087 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1088 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1089 sent_desc = sent_desc - 0xff;
1090 }
1091
1092 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1093 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1094}
1095
1096/* Get number of TX descriptors already sent by HW */
1097static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1098 struct mvneta_tx_queue *txq)
1099{
1100 u32 val;
1101 int sent_desc;
1102
1103 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1104 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1105 MVNETA_TXQ_SENT_DESC_SHIFT;
1106
1107 return sent_desc;
1108}
1109
1110/* Get number of sent descriptors and decrement counter.
1111 * The number of sent descriptors is returned.
1112 */
1113static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1114 struct mvneta_tx_queue *txq)
1115{
1116 int sent_desc;
1117
1118 /* Get number of sent descriptors */
1119 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1120
1121 /* Decrement sent descriptors counter */
1122 if (sent_desc)
1123 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1124
1125 return sent_desc;
1126}
1127
1128/* Set TXQ descriptors fields relevant for CSUM calculation */
1129static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1130 int ip_hdr_len, int l4_proto)
1131{
1132 u32 command;
1133
1134 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1135 * G_L4_chk, L4_type; required only for checksum
1136 * calculation
1137 */
1138 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1139 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1140
1141 if (l3_proto == swab16(ETH_P_IP))
1142 command |= MVNETA_TXD_IP_CSUM;
1143 else
1144 command |= MVNETA_TX_L3_IP6;
1145
1146 if (l4_proto == IPPROTO_TCP)
1147 command |= MVNETA_TX_L4_CSUM_FULL;
1148 else if (l4_proto == IPPROTO_UDP)
1149 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1150 else
1151 command |= MVNETA_TX_L4_CSUM_NOT;
1152
1153 return command;
1154}
1155
1156
1157/* Display more error info */
1158static void mvneta_rx_error(struct mvneta_port *pp,
1159 struct mvneta_rx_desc *rx_desc)
1160{
1161 u32 status = rx_desc->status;
1162
1163 if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
1164 netdev_err(pp->dev,
1165 "bad rx status %08x (buffer oversize), size=%d\n",
1166 rx_desc->status, rx_desc->data_size);
1167 return;
1168 }
1169
1170 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1171 case MVNETA_RXD_ERR_CRC:
1172 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1173 status, rx_desc->data_size);
1174 break;
1175 case MVNETA_RXD_ERR_OVERRUN:
1176 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1177 status, rx_desc->data_size);
1178 break;
1179 case MVNETA_RXD_ERR_LEN:
1180 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1181 status, rx_desc->data_size);
1182 break;
1183 case MVNETA_RXD_ERR_RESOURCE:
1184 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1185 status, rx_desc->data_size);
1186 break;
1187 }
1188}
1189
1190/* Handle RX checksum offload */
1191static void mvneta_rx_csum(struct mvneta_port *pp,
1192 struct mvneta_rx_desc *rx_desc,
1193 struct sk_buff *skb)
1194{
1195 if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
1196 (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
1197 skb->csum = 0;
1198 skb->ip_summed = CHECKSUM_UNNECESSARY;
1199 return;
1200 }
1201
1202 skb->ip_summed = CHECKSUM_NONE;
1203}
1204
1205/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
1206static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1207 u32 cause)
1208{
1209 int queue = fls(cause) - 1;
1210
1211 return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
1212}
1213
1214/* Free tx queue skbuffs */
1215static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1216 struct mvneta_tx_queue *txq, int num)
1217{
1218 int i;
1219
1220 for (i = 0; i < num; i++) {
1221 struct mvneta_tx_desc *tx_desc = txq->descs +
1222 txq->txq_get_index;
1223 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1224
1225 mvneta_txq_inc_get(txq);
1226
1227 if (!skb)
1228 continue;
1229
1230 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1231 tx_desc->data_size, DMA_TO_DEVICE);
1232 dev_kfree_skb_any(skb);
1233 }
1234}
1235
1236/* Handle end of transmission */
1237static int mvneta_txq_done(struct mvneta_port *pp,
1238 struct mvneta_tx_queue *txq)
1239{
1240 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1241 int tx_done;
1242
1243 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1244 if (tx_done == 0)
1245 return tx_done;
1246 mvneta_txq_bufs_free(pp, txq, tx_done);
1247
1248 txq->count -= tx_done;
1249
1250 if (netif_tx_queue_stopped(nq)) {
1251 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
1252 netif_tx_wake_queue(nq);
1253 }
1254
1255 return tx_done;
1256}
1257
1258/* Refill processing */
1259static int mvneta_rx_refill(struct mvneta_port *pp,
1260 struct mvneta_rx_desc *rx_desc)
1261
1262{
1263 dma_addr_t phys_addr;
1264 struct sk_buff *skb;
1265
1266 skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
1267 if (!skb)
1268 return -ENOMEM;
1269
1270 phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
1271 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1272 DMA_FROM_DEVICE);
1273 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1274 dev_kfree_skb(skb);
1275 return -ENOMEM;
1276 }
1277
1278 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1279
1280 return 0;
1281}
1282
1283/* Handle tx checksum */
1284static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1285{
1286 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1287 int ip_hdr_len = 0;
1288 u8 l4_proto;
1289
1290 if (skb->protocol == htons(ETH_P_IP)) {
1291 struct iphdr *ip4h = ip_hdr(skb);
1292
1293 /* Calculate IPv4 checksum and L4 checksum */
1294 ip_hdr_len = ip4h->ihl;
1295 l4_proto = ip4h->protocol;
1296 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1297 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1298
1299 /* Read l4_protocol from one of IPv6 extra headers */
1300 if (skb_network_header_len(skb) > 0)
1301 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1302 l4_proto = ip6h->nexthdr;
1303 } else
1304 return MVNETA_TX_L4_CSUM_NOT;
1305
1306 return mvneta_txq_desc_csum(skb_network_offset(skb),
1307 skb->protocol, ip_hdr_len, l4_proto);
1308 }
1309
1310 return MVNETA_TX_L4_CSUM_NOT;
1311}
1312
1313/* Returns rx queue pointer (find last set bit) according to causeRxTx
1314 * value
1315 */
1316static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1317 u32 cause)
1318{
1319 int queue = fls(cause >> 8) - 1;
1320
1321 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1322}
1323
1324/* Drop packets received by the RXQ and free buffers */
1325static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1326 struct mvneta_rx_queue *rxq)
1327{
1328 int rx_done, i;
1329
1330 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1331 for (i = 0; i < rxq->size; i++) {
1332 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1333 struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
1334
1335 dev_kfree_skb_any(skb);
1336 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1337 rx_desc->data_size, DMA_FROM_DEVICE);
1338 }
1339
1340 if (rx_done)
1341 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1342}
1343
1344/* Main rx processing */
1345static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1346 struct mvneta_rx_queue *rxq)
1347{
1348 struct net_device *dev = pp->dev;
1349 int rx_done, rx_filled;
1350
1351 /* Get number of received packets */
1352 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1353
1354 if (rx_todo > rx_done)
1355 rx_todo = rx_done;
1356
1357 rx_done = 0;
1358 rx_filled = 0;
1359
1360 /* Fairness NAPI loop */
1361 while (rx_done < rx_todo) {
1362 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1363 struct sk_buff *skb;
1364 u32 rx_status;
1365 int rx_bytes, err;
1366
1367 prefetch(rx_desc);
1368 rx_done++;
1369 rx_filled++;
1370 rx_status = rx_desc->status;
1371 skb = (struct sk_buff *)rx_desc->buf_cookie;
1372
1373 if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
1374 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1375 dev->stats.rx_errors++;
1376 mvneta_rx_error(pp, rx_desc);
1377 mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
1378 (u32)skb);
1379 continue;
1380 }
1381
1382 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1383 rx_desc->data_size, DMA_FROM_DEVICE);
1384
1385 rx_bytes = rx_desc->data_size -
1386 (ETH_FCS_LEN + MVNETA_MH_SIZE);
1387 u64_stats_update_begin(&pp->rx_stats.syncp);
1388 pp->rx_stats.packets++;
1389 pp->rx_stats.bytes += rx_bytes;
1390 u64_stats_update_end(&pp->rx_stats.syncp);
1391
1392 /* Linux processing */
1393 skb_reserve(skb, MVNETA_MH_SIZE);
1394 skb_put(skb, rx_bytes);
1395
1396 skb->protocol = eth_type_trans(skb, dev);
1397
1398 mvneta_rx_csum(pp, rx_desc, skb);
1399
1400 napi_gro_receive(&pp->napi, skb);
1401
1402 /* Refill processing */
1403 err = mvneta_rx_refill(pp, rx_desc);
1404 if (err) {
1405 netdev_err(pp->dev, "Linux processing - Can't refill\n");
1406 rxq->missed++;
1407 rx_filled--;
1408 }
1409 }
1410
1411 /* Update rxq management counters */
1412 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1413
1414 return rx_done;
1415}
1416
1417/* Handle tx fragmentation processing */
1418static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1419 struct mvneta_tx_queue *txq)
1420{
1421 struct mvneta_tx_desc *tx_desc;
1422 int i;
1423
1424 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1425 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1426 void *addr = page_address(frag->page.p) + frag->page_offset;
1427
1428 tx_desc = mvneta_txq_next_desc_get(txq);
1429 tx_desc->data_size = frag->size;
1430
1431 tx_desc->buf_phys_addr =
1432 dma_map_single(pp->dev->dev.parent, addr,
1433 tx_desc->data_size, DMA_TO_DEVICE);
1434
1435 if (dma_mapping_error(pp->dev->dev.parent,
1436 tx_desc->buf_phys_addr)) {
1437 mvneta_txq_desc_put(txq);
1438 goto error;
1439 }
1440
1441 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
1442 /* Last descriptor */
1443 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1444
1445 txq->tx_skb[txq->txq_put_index] = skb;
1446
1447 mvneta_txq_inc_put(txq);
1448 } else {
1449 /* Descriptor in the middle: Not First, Not Last */
1450 tx_desc->command = 0;
1451
1452 txq->tx_skb[txq->txq_put_index] = NULL;
1453 mvneta_txq_inc_put(txq);
1454 }
1455 }
1456
1457 return 0;
1458
1459error:
1460 /* Release all descriptors that were used to map fragments of
1461 * this packet, as well as the corresponding DMA mappings
1462 */
1463 for (i = i - 1; i >= 0; i--) {
1464 tx_desc = txq->descs + i;
1465 dma_unmap_single(pp->dev->dev.parent,
1466 tx_desc->buf_phys_addr,
1467 tx_desc->data_size,
1468 DMA_TO_DEVICE);
1469 mvneta_txq_desc_put(txq);
1470 }
1471
1472 return -ENOMEM;
1473}
1474
1475/* Main tx processing */
1476static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1477{
1478 struct mvneta_port *pp = netdev_priv(dev);
1479 struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
1480 struct mvneta_tx_desc *tx_desc;
1481 struct netdev_queue *nq;
1482 int frags = 0;
1483 u32 tx_cmd;
1484
1485 if (!netif_running(dev))
1486 goto out;
1487
1488 frags = skb_shinfo(skb)->nr_frags + 1;
1489 nq = netdev_get_tx_queue(dev, txq_def);
1490
1491 /* Get a descriptor for the first part of the packet */
1492 tx_desc = mvneta_txq_next_desc_get(txq);
1493
1494 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1495
1496 tx_desc->data_size = skb_headlen(skb);
1497
1498 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1499 tx_desc->data_size,
1500 DMA_TO_DEVICE);
1501 if (unlikely(dma_mapping_error(dev->dev.parent,
1502 tx_desc->buf_phys_addr))) {
1503 mvneta_txq_desc_put(txq);
1504 frags = 0;
1505 goto out;
1506 }
1507
1508 if (frags == 1) {
1509 /* First and Last descriptor */
1510 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1511 tx_desc->command = tx_cmd;
1512 txq->tx_skb[txq->txq_put_index] = skb;
1513 mvneta_txq_inc_put(txq);
1514 } else {
1515 /* First but not Last */
1516 tx_cmd |= MVNETA_TXD_F_DESC;
1517 txq->tx_skb[txq->txq_put_index] = NULL;
1518 mvneta_txq_inc_put(txq);
1519 tx_desc->command = tx_cmd;
1520 /* Continue with other skb fragments */
1521 if (mvneta_tx_frag_process(pp, skb, txq)) {
1522 dma_unmap_single(dev->dev.parent,
1523 tx_desc->buf_phys_addr,
1524 tx_desc->data_size,
1525 DMA_TO_DEVICE);
1526 mvneta_txq_desc_put(txq);
1527 frags = 0;
1528 goto out;
1529 }
1530 }
1531
1532 txq->count += frags;
1533 mvneta_txq_pend_desc_add(pp, txq, frags);
1534
1535 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1536 netif_tx_stop_queue(nq);
1537
1538out:
1539 if (frags > 0) {
1540 u64_stats_update_begin(&pp->tx_stats.syncp);
1541 pp->tx_stats.packets++;
1542 pp->tx_stats.bytes += skb->len;
1543 u64_stats_update_end(&pp->tx_stats.syncp);
1544
1545 } else {
1546 dev->stats.tx_dropped++;
1547 dev_kfree_skb_any(skb);
1548 }
1549
1550 if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
1551 mvneta_txq_done(pp, txq);
1552
1553 /* If after calling mvneta_txq_done, count equals
1554 * frags, we need to set the timer
1555 */
1556 if (txq->count == frags && frags > 0)
1557 mvneta_add_tx_done_timer(pp);
1558
1559 return NETDEV_TX_OK;
1560}
1561
1562
1563/* Free tx resources, when resetting a port */
1564static void mvneta_txq_done_force(struct mvneta_port *pp,
1565 struct mvneta_tx_queue *txq)
1566
1567{
1568 int tx_done = txq->count;
1569
1570 mvneta_txq_bufs_free(pp, txq, tx_done);
1571
1572 /* reset txq */
1573 txq->count = 0;
1574 txq->txq_put_index = 0;
1575 txq->txq_get_index = 0;
1576}
1577
1578/* handle tx done - called from tx done timer callback */
1579static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
1580 int *tx_todo)
1581{
1582 struct mvneta_tx_queue *txq;
1583 u32 tx_done = 0;
1584 struct netdev_queue *nq;
1585
1586 *tx_todo = 0;
1587 while (cause_tx_done != 0) {
1588 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1589 if (!txq)
1590 break;
1591
1592 nq = netdev_get_tx_queue(pp->dev, txq->id);
1593 __netif_tx_lock(nq, smp_processor_id());
1594
1595 if (txq->count) {
1596 tx_done += mvneta_txq_done(pp, txq);
1597 *tx_todo += txq->count;
1598 }
1599
1600 __netif_tx_unlock(nq);
1601 cause_tx_done &= ~((1 << txq->id));
1602 }
1603
1604 return tx_done;
1605}
1606
1607/* Compute crc8 of the specified address, using a unique algorithm ,
1608 * according to hw spec, different than generic crc8 algorithm
1609 */
1610static int mvneta_addr_crc(unsigned char *addr)
1611{
1612 int crc = 0;
1613 int i;
1614
1615 for (i = 0; i < ETH_ALEN; i++) {
1616 int j;
1617
1618 crc = (crc ^ addr[i]) << 8;
1619 for (j = 7; j >= 0; j--) {
1620 if (crc & (0x100 << j))
1621 crc ^= 0x107 << j;
1622 }
1623 }
1624
1625 return crc;
1626}
1627
1628/* This method controls the net device special MAC multicast support.
1629 * The Special Multicast Table for MAC addresses supports MAC of the form
1630 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1631 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1632 * Table entries in the DA-Filter table. This method set the Special
1633 * Multicast Table appropriate entry.
1634 */
1635static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1636 unsigned char last_byte,
1637 int queue)
1638{
1639 unsigned int smc_table_reg;
1640 unsigned int tbl_offset;
1641 unsigned int reg_offset;
1642
1643 /* Register offset from SMC table base */
1644 tbl_offset = (last_byte / 4);
1645 /* Entry offset within the above reg */
1646 reg_offset = last_byte % 4;
1647
1648 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1649 + tbl_offset * 4));
1650
1651 if (queue == -1)
1652 smc_table_reg &= ~(0xff << (8 * reg_offset));
1653 else {
1654 smc_table_reg &= ~(0xff << (8 * reg_offset));
1655 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1656 }
1657
1658 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1659 smc_table_reg);
1660}
1661
1662/* This method controls the network device Other MAC multicast support.
1663 * The Other Multicast Table is used for multicast of another type.
1664 * A CRC-8 is used as an index to the Other Multicast Table entries
1665 * in the DA-Filter table.
1666 * The method gets the CRC-8 value from the calling routine and
1667 * sets the Other Multicast Table appropriate entry according to the
1668 * specified CRC-8 .
1669 */
1670static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1671 unsigned char crc8,
1672 int queue)
1673{
1674 unsigned int omc_table_reg;
1675 unsigned int tbl_offset;
1676 unsigned int reg_offset;
1677
1678 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1679 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1680
1681 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1682
1683 if (queue == -1) {
1684 /* Clear accepts frame bit at specified Other DA table entry */
1685 omc_table_reg &= ~(0xff << (8 * reg_offset));
1686 } else {
1687 omc_table_reg &= ~(0xff << (8 * reg_offset));
1688 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1689 }
1690
1691 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1692}
1693
1694/* The network device supports multicast using two tables:
1695 * 1) Special Multicast Table for MAC addresses of the form
1696 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1697 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1698 * Table entries in the DA-Filter table.
1699 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1700 * is used as an index to the Other Multicast Table entries in the
1701 * DA-Filter table.
1702 */
1703static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1704 int queue)
1705{
1706 unsigned char crc_result = 0;
1707
1708 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1709 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1710 return 0;
1711 }
1712
1713 crc_result = mvneta_addr_crc(p_addr);
1714 if (queue == -1) {
1715 if (pp->mcast_count[crc_result] == 0) {
1716 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1717 crc_result);
1718 return -EINVAL;
1719 }
1720
1721 pp->mcast_count[crc_result]--;
1722 if (pp->mcast_count[crc_result] != 0) {
1723 netdev_info(pp->dev,
1724 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1725 pp->mcast_count[crc_result], crc_result);
1726 return -EINVAL;
1727 }
1728 } else
1729 pp->mcast_count[crc_result]++;
1730
1731 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1732
1733 return 0;
1734}
1735
1736/* Configure Fitering mode of Ethernet port */
1737static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1738 int is_promisc)
1739{
1740 u32 port_cfg_reg, val;
1741
1742 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1743
1744 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1745
1746 /* Set / Clear UPM bit in port configuration register */
1747 if (is_promisc) {
1748 /* Accept all Unicast addresses */
1749 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1750 val |= MVNETA_FORCE_UNI;
1751 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1752 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1753 } else {
1754 /* Reject all Unicast addresses */
1755 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1756 val &= ~MVNETA_FORCE_UNI;
1757 }
1758
1759 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1760 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1761}
1762
1763/* register unicast and multicast addresses */
1764static void mvneta_set_rx_mode(struct net_device *dev)
1765{
1766 struct mvneta_port *pp = netdev_priv(dev);
1767 struct netdev_hw_addr *ha;
1768
1769 if (dev->flags & IFF_PROMISC) {
1770 /* Accept all: Multicast + Unicast */
1771 mvneta_rx_unicast_promisc_set(pp, 1);
1772 mvneta_set_ucast_table(pp, rxq_def);
1773 mvneta_set_special_mcast_table(pp, rxq_def);
1774 mvneta_set_other_mcast_table(pp, rxq_def);
1775 } else {
1776 /* Accept single Unicast */
1777 mvneta_rx_unicast_promisc_set(pp, 0);
1778 mvneta_set_ucast_table(pp, -1);
1779 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
1780
1781 if (dev->flags & IFF_ALLMULTI) {
1782 /* Accept all multicast */
1783 mvneta_set_special_mcast_table(pp, rxq_def);
1784 mvneta_set_other_mcast_table(pp, rxq_def);
1785 } else {
1786 /* Accept only initialized multicast */
1787 mvneta_set_special_mcast_table(pp, -1);
1788 mvneta_set_other_mcast_table(pp, -1);
1789
1790 if (!netdev_mc_empty(dev)) {
1791 netdev_for_each_mc_addr(ha, dev) {
1792 mvneta_mcast_addr_set(pp, ha->addr,
1793 rxq_def);
1794 }
1795 }
1796 }
1797 }
1798}
1799
1800/* Interrupt handling - the callback for request_irq() */
1801static irqreturn_t mvneta_isr(int irq, void *dev_id)
1802{
1803 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
1804
1805 /* Mask all interrupts */
1806 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1807
1808 napi_schedule(&pp->napi);
1809
1810 return IRQ_HANDLED;
1811}
1812
1813/* NAPI handler
1814 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1815 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1816 * Bits 8 -15 of the cause Rx Tx register indicate that are received
1817 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1818 * Each CPU has its own causeRxTx register
1819 */
1820static int mvneta_poll(struct napi_struct *napi, int budget)
1821{
1822 int rx_done = 0;
1823 u32 cause_rx_tx;
1824 unsigned long flags;
1825 struct mvneta_port *pp = netdev_priv(napi->dev);
1826
1827 if (!netif_running(pp->dev)) {
1828 napi_complete(napi);
1829 return rx_done;
1830 }
1831
1832 /* Read cause register */
1833 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1834 MVNETA_RX_INTR_MASK(rxq_number);
1835
1836 /* For the case where the last mvneta_poll did not process all
1837 * RX packets
1838 */
1839 cause_rx_tx |= pp->cause_rx_tx;
1840 if (rxq_number > 1) {
1841 while ((cause_rx_tx != 0) && (budget > 0)) {
1842 int count;
1843 struct mvneta_rx_queue *rxq;
1844 /* get rx queue number from cause_rx_tx */
1845 rxq = mvneta_rx_policy(pp, cause_rx_tx);
1846 if (!rxq)
1847 break;
1848
1849 /* process the packet in that rx queue */
1850 count = mvneta_rx(pp, budget, rxq);
1851 rx_done += count;
1852 budget -= count;
1853 if (budget > 0) {
1854 /* set off the rx bit of the
1855 * corresponding bit in the cause rx
1856 * tx register, so that next iteration
1857 * will find the next rx queue where
1858 * packets are received on
1859 */
1860 cause_rx_tx &= ~((1 << rxq->id) << 8);
1861 }
1862 }
1863 } else {
1864 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
1865 budget -= rx_done;
1866 }
1867
1868 if (budget > 0) {
1869 cause_rx_tx = 0;
1870 napi_complete(napi);
1871 local_irq_save(flags);
1872 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1873 MVNETA_RX_INTR_MASK(rxq_number));
1874 local_irq_restore(flags);
1875 }
1876
1877 pp->cause_rx_tx = cause_rx_tx;
1878 return rx_done;
1879}
1880
1881/* tx done timer callback */
1882static void mvneta_tx_done_timer_callback(unsigned long data)
1883{
1884 struct net_device *dev = (struct net_device *)data;
1885 struct mvneta_port *pp = netdev_priv(dev);
1886 int tx_done = 0, tx_todo = 0;
1887
1888 if (!netif_running(dev))
1889 return ;
1890
1891 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1892
1893 tx_done = mvneta_tx_done_gbe(pp,
1894 (((1 << txq_number) - 1) &
1895 MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
1896 &tx_todo);
1897 if (tx_todo > 0)
1898 mvneta_add_tx_done_timer(pp);
1899}
1900
1901/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
1902static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1903 int num)
1904{
1905 struct net_device *dev = pp->dev;
1906 int i;
1907
1908 for (i = 0; i < num; i++) {
1909 struct sk_buff *skb;
1910 struct mvneta_rx_desc *rx_desc;
1911 unsigned long phys_addr;
1912
1913 skb = dev_alloc_skb(pp->pkt_size);
1914 if (!skb) {
1915 netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
1916 __func__, rxq->id, i, num);
1917 break;
1918 }
1919
1920 rx_desc = rxq->descs + i;
1921 memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
1922 phys_addr = dma_map_single(dev->dev.parent, skb->head,
1923 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1924 DMA_FROM_DEVICE);
1925 if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
1926 dev_kfree_skb(skb);
1927 break;
1928 }
1929
1930 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1931 }
1932
1933 /* Add this number of RX descriptors as non occupied (ready to
1934 * get packets)
1935 */
1936 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1937
1938 return i;
1939}
1940
1941/* Free all packets pending transmit from all TXQs and reset TX port */
1942static void mvneta_tx_reset(struct mvneta_port *pp)
1943{
1944 int queue;
1945
1946 /* free the skb's in the hal tx ring */
1947 for (queue = 0; queue < txq_number; queue++)
1948 mvneta_txq_done_force(pp, &pp->txqs[queue]);
1949
1950 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1951 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1952}
1953
1954static void mvneta_rx_reset(struct mvneta_port *pp)
1955{
1956 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1957 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1958}
1959
1960/* Rx/Tx queue initialization/cleanup methods */
1961
1962/* Create a specified RX queue */
1963static int mvneta_rxq_init(struct mvneta_port *pp,
1964 struct mvneta_rx_queue *rxq)
1965
1966{
1967 rxq->size = pp->rx_ring_size;
1968
1969 /* Allocate memory for RX descriptors */
1970 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
1971 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1972 &rxq->descs_phys, GFP_KERNEL);
1973 if (rxq->descs == NULL) {
1974 netdev_err(pp->dev,
1975 "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
1976 rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1977 rxq->size);
1978 return -ENOMEM;
1979 }
1980
1981 BUG_ON(rxq->descs !=
1982 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
1983
1984 rxq->last_desc = rxq->size - 1;
1985
1986 /* Set Rx descriptors queue starting address */
1987 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
1988 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
1989
1990 /* Set Offset */
1991 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
1992
1993 /* Set coalescing pkts and time */
1994 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
1995 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
1996
1997 /* Fill RXQ with buffers from RX pool */
1998 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
1999 mvneta_rxq_bm_disable(pp, rxq);
2000 mvneta_rxq_fill(pp, rxq, rxq->size);
2001
2002 return 0;
2003}
2004
2005/* Cleanup Rx queue */
2006static void mvneta_rxq_deinit(struct mvneta_port *pp,
2007 struct mvneta_rx_queue *rxq)
2008{
2009 mvneta_rxq_drop_pkts(pp, rxq);
2010
2011 if (rxq->descs)
2012 dma_free_coherent(pp->dev->dev.parent,
2013 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2014 rxq->descs,
2015 rxq->descs_phys);
2016
2017 rxq->descs = NULL;
2018 rxq->last_desc = 0;
2019 rxq->next_desc_to_proc = 0;
2020 rxq->descs_phys = 0;
2021}
2022
2023/* Create and initialize a tx queue */
2024static int mvneta_txq_init(struct mvneta_port *pp,
2025 struct mvneta_tx_queue *txq)
2026{
2027 txq->size = pp->tx_ring_size;
2028
2029 /* Allocate memory for TX descriptors */
2030 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2031 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2032 &txq->descs_phys, GFP_KERNEL);
2033 if (txq->descs == NULL) {
2034 netdev_err(pp->dev,
2035 "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
2036 txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
2037 txq->size);
2038 return -ENOMEM;
2039 }
2040
2041 /* Make sure descriptor address is cache line size aligned */
2042 BUG_ON(txq->descs !=
2043 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2044
2045 txq->last_desc = txq->size - 1;
2046
2047 /* Set maximum bandwidth for enabled TXQs */
2048 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2049 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2050
2051 /* Set Tx descriptors queue starting address */
2052 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2053 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2054
2055 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2056 if (txq->tx_skb == NULL) {
2057 dma_free_coherent(pp->dev->dev.parent,
2058 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2059 txq->descs, txq->descs_phys);
2060 return -ENOMEM;
2061 }
2062 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2063
2064 return 0;
2065}
2066
2067/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2068static void mvneta_txq_deinit(struct mvneta_port *pp,
2069 struct mvneta_tx_queue *txq)
2070{
2071 kfree(txq->tx_skb);
2072
2073 if (txq->descs)
2074 dma_free_coherent(pp->dev->dev.parent,
2075 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2076 txq->descs, txq->descs_phys);
2077
2078 txq->descs = NULL;
2079 txq->last_desc = 0;
2080 txq->next_desc_to_proc = 0;
2081 txq->descs_phys = 0;
2082
2083 /* Set minimum bandwidth for disabled TXQs */
2084 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2085 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2086
2087 /* Set Tx descriptors queue starting address and size */
2088 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2089 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2090}
2091
2092/* Cleanup all Tx queues */
2093static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2094{
2095 int queue;
2096
2097 for (queue = 0; queue < txq_number; queue++)
2098 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2099}
2100
2101/* Cleanup all Rx queues */
2102static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2103{
2104 int queue;
2105
2106 for (queue = 0; queue < rxq_number; queue++)
2107 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2108}
2109
2110
2111/* Init all Rx queues */
2112static int mvneta_setup_rxqs(struct mvneta_port *pp)
2113{
2114 int queue;
2115
2116 for (queue = 0; queue < rxq_number; queue++) {
2117 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2118 if (err) {
2119 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2120 __func__, queue);
2121 mvneta_cleanup_rxqs(pp);
2122 return err;
2123 }
2124 }
2125
2126 return 0;
2127}
2128
2129/* Init all tx queues */
2130static int mvneta_setup_txqs(struct mvneta_port *pp)
2131{
2132 int queue;
2133
2134 for (queue = 0; queue < txq_number; queue++) {
2135 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2136 if (err) {
2137 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2138 __func__, queue);
2139 mvneta_cleanup_txqs(pp);
2140 return err;
2141 }
2142 }
2143
2144 return 0;
2145}
2146
2147static void mvneta_start_dev(struct mvneta_port *pp)
2148{
2149 mvneta_max_rx_size_set(pp, pp->pkt_size);
2150 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2151
2152 /* start the Rx/Tx activity */
2153 mvneta_port_enable(pp);
2154
2155 /* Enable polling on the port */
2156 napi_enable(&pp->napi);
2157
2158 /* Unmask interrupts */
2159 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2160 MVNETA_RX_INTR_MASK(rxq_number));
2161
2162 phy_start(pp->phy_dev);
2163 netif_tx_start_all_queues(pp->dev);
2164}
2165
2166static void mvneta_stop_dev(struct mvneta_port *pp)
2167{
2168 phy_stop(pp->phy_dev);
2169
2170 napi_disable(&pp->napi);
2171
2172 netif_carrier_off(pp->dev);
2173
2174 mvneta_port_down(pp);
2175 netif_tx_stop_all_queues(pp->dev);
2176
2177 /* Stop the port activity */
2178 mvneta_port_disable(pp);
2179
2180 /* Clear all ethernet port interrupts */
2181 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2182 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2183
2184 /* Mask all ethernet port interrupts */
2185 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2186 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2187 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2188
2189 mvneta_tx_reset(pp);
2190 mvneta_rx_reset(pp);
2191}
2192
2193/* tx timeout callback - display a message and stop/start the network device */
2194static void mvneta_tx_timeout(struct net_device *dev)
2195{
2196 struct mvneta_port *pp = netdev_priv(dev);
2197
2198 netdev_info(dev, "tx timeout\n");
2199 mvneta_stop_dev(pp);
2200 mvneta_start_dev(pp);
2201}
2202
2203/* Return positive if MTU is valid */
2204static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2205{
2206 if (mtu < 68) {
2207 netdev_err(dev, "cannot change mtu to less than 68\n");
2208 return -EINVAL;
2209 }
2210
2211 /* 9676 == 9700 - 20 and rounding to 8 */
2212 if (mtu > 9676) {
2213 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2214 mtu = 9676;
2215 }
2216
2217 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2218 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2219 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2220 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2221 }
2222
2223 return mtu;
2224}
2225
2226/* Change the device mtu */
2227static int mvneta_change_mtu(struct net_device *dev, int mtu)
2228{
2229 struct mvneta_port *pp = netdev_priv(dev);
2230 int ret;
2231
2232 mtu = mvneta_check_mtu_valid(dev, mtu);
2233 if (mtu < 0)
2234 return -EINVAL;
2235
2236 dev->mtu = mtu;
2237
2238 if (!netif_running(dev))
2239 return 0;
2240
2241 /* The interface is running, so we have to force a
2242 * reallocation of the RXQs
2243 */
2244 mvneta_stop_dev(pp);
2245
2246 mvneta_cleanup_txqs(pp);
2247 mvneta_cleanup_rxqs(pp);
2248
2249 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2250
2251 ret = mvneta_setup_rxqs(pp);
2252 if (ret) {
2253 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
2254 return ret;
2255 }
2256
2257 mvneta_setup_txqs(pp);
2258
2259 mvneta_start_dev(pp);
2260 mvneta_port_up(pp);
2261
2262 return 0;
2263}
2264
2265/* Handle setting mac address */
2266static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2267{
2268 struct mvneta_port *pp = netdev_priv(dev);
2269 u8 *mac = addr + 2;
2270 int i;
2271
2272 if (netif_running(dev))
2273 return -EBUSY;
2274
2275 /* Remove previous address table entry */
2276 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2277
2278 /* Set new addr in hw */
2279 mvneta_mac_addr_set(pp, mac, rxq_def);
2280
2281 /* Set addr in the device */
2282 for (i = 0; i < ETH_ALEN; i++)
2283 dev->dev_addr[i] = mac[i];
2284
2285 return 0;
2286}
2287
2288static void mvneta_adjust_link(struct net_device *ndev)
2289{
2290 struct mvneta_port *pp = netdev_priv(ndev);
2291 struct phy_device *phydev = pp->phy_dev;
2292 int status_change = 0;
2293
2294 if (phydev->link) {
2295 if ((pp->speed != phydev->speed) ||
2296 (pp->duplex != phydev->duplex)) {
2297 u32 val;
2298
2299 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2300 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2301 MVNETA_GMAC_CONFIG_GMII_SPEED |
2302 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2303
2304 if (phydev->duplex)
2305 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2306
2307 if (phydev->speed == SPEED_1000)
2308 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2309 else
2310 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2311
2312 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2313
2314 pp->duplex = phydev->duplex;
2315 pp->speed = phydev->speed;
2316 }
2317 }
2318
2319 if (phydev->link != pp->link) {
2320 if (!phydev->link) {
2321 pp->duplex = -1;
2322 pp->speed = 0;
2323 }
2324
2325 pp->link = phydev->link;
2326 status_change = 1;
2327 }
2328
2329 if (status_change) {
2330 if (phydev->link) {
2331 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2332 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2333 MVNETA_GMAC_FORCE_LINK_DOWN);
2334 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2335 mvneta_port_up(pp);
2336 netdev_info(pp->dev, "link up\n");
2337 } else {
2338 mvneta_port_down(pp);
2339 netdev_info(pp->dev, "link down\n");
2340 }
2341 }
2342}
2343
2344static int mvneta_mdio_probe(struct mvneta_port *pp)
2345{
2346 struct phy_device *phy_dev;
2347
2348 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2349 pp->phy_interface);
2350 if (!phy_dev) {
2351 netdev_err(pp->dev, "could not find the PHY\n");
2352 return -ENODEV;
2353 }
2354
2355 phy_dev->supported &= PHY_GBIT_FEATURES;
2356 phy_dev->advertising = phy_dev->supported;
2357
2358 pp->phy_dev = phy_dev;
2359 pp->link = 0;
2360 pp->duplex = 0;
2361 pp->speed = 0;
2362
2363 return 0;
2364}
2365
2366static void mvneta_mdio_remove(struct mvneta_port *pp)
2367{
2368 phy_disconnect(pp->phy_dev);
2369 pp->phy_dev = NULL;
2370}
2371
2372static int mvneta_open(struct net_device *dev)
2373{
2374 struct mvneta_port *pp = netdev_priv(dev);
2375 int ret;
2376
2377 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2378
2379 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2380
2381 ret = mvneta_setup_rxqs(pp);
2382 if (ret)
2383 return ret;
2384
2385 ret = mvneta_setup_txqs(pp);
2386 if (ret)
2387 goto err_cleanup_rxqs;
2388
2389 /* Connect to port interrupt line */
2390 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2391 MVNETA_DRIVER_NAME, pp);
2392 if (ret) {
2393 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2394 goto err_cleanup_txqs;
2395 }
2396
2397 /* In default link is down */
2398 netif_carrier_off(pp->dev);
2399
2400 ret = mvneta_mdio_probe(pp);
2401 if (ret < 0) {
2402 netdev_err(dev, "cannot probe MDIO bus\n");
2403 goto err_free_irq;
2404 }
2405
2406 mvneta_start_dev(pp);
2407
2408 return 0;
2409
2410err_free_irq:
2411 free_irq(pp->dev->irq, pp);
2412err_cleanup_txqs:
2413 mvneta_cleanup_txqs(pp);
2414err_cleanup_rxqs:
2415 mvneta_cleanup_rxqs(pp);
2416 return ret;
2417}
2418
2419/* Stop the port, free port interrupt line */
2420static int mvneta_stop(struct net_device *dev)
2421{
2422 struct mvneta_port *pp = netdev_priv(dev);
2423
2424 mvneta_stop_dev(pp);
2425 mvneta_mdio_remove(pp);
2426 free_irq(dev->irq, pp);
2427 mvneta_cleanup_rxqs(pp);
2428 mvneta_cleanup_txqs(pp);
2429 del_timer(&pp->tx_done_timer);
2430 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2431
2432 return 0;
2433}
2434
2435/* Ethtool methods */
2436
2437/* Get settings (phy address, speed) for ethtools */
2438int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2439{
2440 struct mvneta_port *pp = netdev_priv(dev);
2441
2442 if (!pp->phy_dev)
2443 return -ENODEV;
2444
2445 return phy_ethtool_gset(pp->phy_dev, cmd);
2446}
2447
2448/* Set settings (phy address, speed) for ethtools */
2449int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2450{
2451 struct mvneta_port *pp = netdev_priv(dev);
2452
2453 if (!pp->phy_dev)
2454 return -ENODEV;
2455
2456 return phy_ethtool_sset(pp->phy_dev, cmd);
2457}
2458
2459/* Set interrupt coalescing for ethtools */
2460static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2461 struct ethtool_coalesce *c)
2462{
2463 struct mvneta_port *pp = netdev_priv(dev);
2464 int queue;
2465
2466 for (queue = 0; queue < rxq_number; queue++) {
2467 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2468 rxq->time_coal = c->rx_coalesce_usecs;
2469 rxq->pkts_coal = c->rx_max_coalesced_frames;
2470 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2471 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2472 }
2473
2474 for (queue = 0; queue < txq_number; queue++) {
2475 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2476 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2477 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2478 }
2479
2480 return 0;
2481}
2482
2483/* get coalescing for ethtools */
2484static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2485 struct ethtool_coalesce *c)
2486{
2487 struct mvneta_port *pp = netdev_priv(dev);
2488
2489 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2490 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2491
2492 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2493 return 0;
2494}
2495
2496
2497static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2498 struct ethtool_drvinfo *drvinfo)
2499{
2500 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2501 sizeof(drvinfo->driver));
2502 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2503 sizeof(drvinfo->version));
2504 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2505 sizeof(drvinfo->bus_info));
2506}
2507
2508
2509static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2510 struct ethtool_ringparam *ring)
2511{
2512 struct mvneta_port *pp = netdev_priv(netdev);
2513
2514 ring->rx_max_pending = MVNETA_MAX_RXD;
2515 ring->tx_max_pending = MVNETA_MAX_TXD;
2516 ring->rx_pending = pp->rx_ring_size;
2517 ring->tx_pending = pp->tx_ring_size;
2518}
2519
2520static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2521 struct ethtool_ringparam *ring)
2522{
2523 struct mvneta_port *pp = netdev_priv(dev);
2524
2525 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2526 return -EINVAL;
2527 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2528 ring->rx_pending : MVNETA_MAX_RXD;
2529 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
2530 ring->tx_pending : MVNETA_MAX_TXD;
2531
2532 if (netif_running(dev)) {
2533 mvneta_stop(dev);
2534 if (mvneta_open(dev)) {
2535 netdev_err(dev,
2536 "error on opening device after ring param change\n");
2537 return -ENOMEM;
2538 }
2539 }
2540
2541 return 0;
2542}
2543
2544static const struct net_device_ops mvneta_netdev_ops = {
2545 .ndo_open = mvneta_open,
2546 .ndo_stop = mvneta_stop,
2547 .ndo_start_xmit = mvneta_tx,
2548 .ndo_set_rx_mode = mvneta_set_rx_mode,
2549 .ndo_set_mac_address = mvneta_set_mac_addr,
2550 .ndo_change_mtu = mvneta_change_mtu,
2551 .ndo_tx_timeout = mvneta_tx_timeout,
2552 .ndo_get_stats64 = mvneta_get_stats64,
2553};
2554
2555const struct ethtool_ops mvneta_eth_tool_ops = {
2556 .get_link = ethtool_op_get_link,
2557 .get_settings = mvneta_ethtool_get_settings,
2558 .set_settings = mvneta_ethtool_set_settings,
2559 .set_coalesce = mvneta_ethtool_set_coalesce,
2560 .get_coalesce = mvneta_ethtool_get_coalesce,
2561 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2562 .get_ringparam = mvneta_ethtool_get_ringparam,
2563 .set_ringparam = mvneta_ethtool_set_ringparam,
2564};
2565
2566/* Initialize hw */
2567static int __devinit mvneta_init(struct mvneta_port *pp, int phy_addr)
2568{
2569 int queue;
2570
2571 /* Disable port */
2572 mvneta_port_disable(pp);
2573
2574 /* Set port default values */
2575 mvneta_defaults_set(pp);
2576
2577 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
2578 GFP_KERNEL);
2579 if (!pp->txqs)
2580 return -ENOMEM;
2581
2582 /* Initialize TX descriptor rings */
2583 for (queue = 0; queue < txq_number; queue++) {
2584 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2585 txq->id = queue;
2586 txq->size = pp->tx_ring_size;
2587 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2588 }
2589
2590 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
2591 GFP_KERNEL);
2592 if (!pp->rxqs) {
2593 kfree(pp->txqs);
2594 return -ENOMEM;
2595 }
2596
2597 /* Create Rx descriptor rings */
2598 for (queue = 0; queue < rxq_number; queue++) {
2599 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2600 rxq->id = queue;
2601 rxq->size = pp->rx_ring_size;
2602 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2603 rxq->time_coal = MVNETA_RX_COAL_USEC;
2604 }
2605
2606 return 0;
2607}
2608
2609static void mvneta_deinit(struct mvneta_port *pp)
2610{
2611 kfree(pp->txqs);
2612 kfree(pp->rxqs);
2613}
2614
2615/* platform glue : initialize decoding windows */
2616static void __devinit
2617mvneta_conf_mbus_windows(struct mvneta_port *pp,
2618 const struct mbus_dram_target_info *dram)
2619{
2620 u32 win_enable;
2621 u32 win_protect;
2622 int i;
2623
2624 for (i = 0; i < 6; i++) {
2625 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2626 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2627
2628 if (i < 4)
2629 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2630 }
2631
2632 win_enable = 0x3f;
2633 win_protect = 0;
2634
2635 for (i = 0; i < dram->num_cs; i++) {
2636 const struct mbus_dram_window *cs = dram->cs + i;
2637 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2638 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2639
2640 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2641 (cs->size - 1) & 0xffff0000);
2642
2643 win_enable &= ~(1 << i);
2644 win_protect |= 3 << (2 * i);
2645 }
2646
2647 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2648}
2649
2650/* Power up the port */
2651static void __devinit mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2652{
2653 u32 val;
2654
2655 /* MAC Cause register should be cleared */
2656 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2657
2658 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2659 mvneta_port_sgmii_config(pp);
2660
2661 mvneta_gmac_rgmii_set(pp, 1);
2662
2663 /* Cancel Port Reset */
2664 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2665 val &= ~MVNETA_GMAC2_PORT_RESET;
2666 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2667
2668 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2669 MVNETA_GMAC2_PORT_RESET) != 0)
2670 continue;
2671}
2672
2673/* Device initialization routine */
2674static int __devinit mvneta_probe(struct platform_device *pdev)
2675{
2676 const struct mbus_dram_target_info *dram_target_info;
2677 struct device_node *dn = pdev->dev.of_node;
2678 struct device_node *phy_node;
2679 u32 phy_addr;
2680 struct mvneta_port *pp;
2681 struct net_device *dev;
2682 const char *mac_addr;
2683 int phy_mode;
2684 int err;
2685
2686 /* Our multiqueue support is not complete, so for now, only
2687 * allow the usage of the first RX queue
2688 */
2689 if (rxq_def != 0) {
2690 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2691 return -EINVAL;
2692 }
2693
2694 dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
2695 if (!dev)
2696 return -ENOMEM;
2697
2698 dev->irq = irq_of_parse_and_map(dn, 0);
2699 if (dev->irq == 0) {
2700 err = -EINVAL;
2701 goto err_free_netdev;
2702 }
2703
2704 phy_node = of_parse_phandle(dn, "phy", 0);
2705 if (!phy_node) {
2706 dev_err(&pdev->dev, "no associated PHY\n");
2707 err = -ENODEV;
2708 goto err_free_irq;
2709 }
2710
2711 phy_mode = of_get_phy_mode(dn);
2712 if (phy_mode < 0) {
2713 dev_err(&pdev->dev, "incorrect phy-mode\n");
2714 err = -EINVAL;
2715 goto err_free_irq;
2716 }
2717
2718 mac_addr = of_get_mac_address(dn);
2719
2720 if (!mac_addr || !is_valid_ether_addr(mac_addr))
2721 eth_hw_addr_random(dev);
2722 else
2723 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
2724
2725 dev->tx_queue_len = MVNETA_MAX_TXD;
2726 dev->watchdog_timeo = 5 * HZ;
2727 dev->netdev_ops = &mvneta_netdev_ops;
2728
2729 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
2730
2731 pp = netdev_priv(dev);
2732
2733 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2734 init_timer(&pp->tx_done_timer);
2735 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2736
2737 pp->weight = MVNETA_RX_POLL_WEIGHT;
2738 pp->phy_node = phy_node;
2739 pp->phy_interface = phy_mode;
2740
2741 pp->base = of_iomap(dn, 0);
2742 if (pp->base == NULL) {
2743 err = -ENOMEM;
2744 goto err_free_irq;
2745 }
2746
2747 pp->clk = devm_clk_get(&pdev->dev, NULL);
2748 if (IS_ERR(pp->clk)) {
2749 err = PTR_ERR(pp->clk);
2750 goto err_unmap;
2751 }
2752
2753 clk_prepare_enable(pp->clk);
2754
2755 pp->tx_done_timer.data = (unsigned long)dev;
2756
2757 pp->tx_ring_size = MVNETA_MAX_TXD;
2758 pp->rx_ring_size = MVNETA_MAX_RXD;
2759
2760 pp->dev = dev;
2761 SET_NETDEV_DEV(dev, &pdev->dev);
2762
2763 err = mvneta_init(pp, phy_addr);
2764 if (err < 0) {
2765 dev_err(&pdev->dev, "can't init eth hal\n");
2766 goto err_clk;
2767 }
2768 mvneta_port_power_up(pp, phy_mode);
2769
2770 dram_target_info = mv_mbus_dram_info();
2771 if (dram_target_info)
2772 mvneta_conf_mbus_windows(pp, dram_target_info);
2773
2774 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2775
2776 err = register_netdev(dev);
2777 if (err < 0) {
2778 dev_err(&pdev->dev, "failed to register\n");
2779 goto err_deinit;
2780 }
2781
2782 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2783 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2784 dev->priv_flags |= IFF_UNICAST_FLT;
2785
2786 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
2787
2788 platform_set_drvdata(pdev, pp->dev);
2789
2790 return 0;
2791
2792err_deinit:
2793 mvneta_deinit(pp);
2794err_clk:
2795 clk_disable_unprepare(pp->clk);
2796err_unmap:
2797 iounmap(pp->base);
2798err_free_irq:
2799 irq_dispose_mapping(dev->irq);
2800err_free_netdev:
2801 free_netdev(dev);
2802 return err;
2803}
2804
2805/* Device removal routine */
2806static int __devexit mvneta_remove(struct platform_device *pdev)
2807{
2808 struct net_device *dev = platform_get_drvdata(pdev);
2809 struct mvneta_port *pp = netdev_priv(dev);
2810
2811 unregister_netdev(dev);
2812 mvneta_deinit(pp);
2813 clk_disable_unprepare(pp->clk);
2814 iounmap(pp->base);
2815 irq_dispose_mapping(dev->irq);
2816 free_netdev(dev);
2817
2818 platform_set_drvdata(pdev, NULL);
2819
2820 return 0;
2821}
2822
2823static const struct of_device_id mvneta_match[] = {
2824 { .compatible = "marvell,armada-370-neta" },
2825 { }
2826};
2827MODULE_DEVICE_TABLE(of, mvneta_match);
2828
2829static struct platform_driver mvneta_driver = {
2830 .probe = mvneta_probe,
2831 .remove = __devexit_p(mvneta_remove),
2832 .driver = {
2833 .name = MVNETA_DRIVER_NAME,
2834 .of_match_table = mvneta_match,
2835 },
2836};
2837
2838module_platform_driver(mvneta_driver);
2839
2840MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2841MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2842MODULE_LICENSE("GPL");
2843
2844module_param(rxq_number, int, S_IRUGO);
2845module_param(txq_number, int, S_IRUGO);
2846
2847module_param(rxq_def, int, S_IRUGO);
2848module_param(txq_def, int, S_IRUGO);
diff --git a/include/linux/clk/mvebu.h b/include/linux/clk/mvebu.h
new file mode 100644
index 000000000000..8c4ae713b063
--- /dev/null
+++ b/include/linux/clk/mvebu.h
@@ -0,0 +1,22 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 */
16
17#ifndef __CLK_MVEBU_H_
18#define __CLK_MVEBU_H_
19
20void __init mvebu_clocks_init(void);
21
22#endif
diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h
index 2ba1f7d76eef..8ec18f64e396 100644
--- a/include/linux/platform_data/dma-mv_xor.h
+++ b/include/linux/platform_data/dma-mv_xor.h
@@ -10,15 +10,14 @@
10#include <linux/dmaengine.h> 10#include <linux/dmaengine.h>
11#include <linux/mbus.h> 11#include <linux/mbus.h>
12 12
13#define MV_XOR_SHARED_NAME "mv_xor_shared" 13#define MV_XOR_NAME "mv_xor"
14#define MV_XOR_NAME "mv_xor"
15 14
16struct mv_xor_platform_data { 15struct mv_xor_channel_data {
17 struct platform_device *shared;
18 int hw_id;
19 dma_cap_mask_t cap_mask; 16 dma_cap_mask_t cap_mask;
20 size_t pool_size;
21}; 17};
22 18
19struct mv_xor_platform_data {
20 struct mv_xor_channel_data *channels;
21};
23 22
24#endif 23#endif