diff options
50 files changed, 3082 insertions, 434 deletions
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt index 61df564c0d23..d74091a8a3bf 100644 --- a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt +++ b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt | |||
@@ -4,6 +4,8 @@ Marvell Armada 370 and Armada XP Interrupt Controller | |||
4 | Required properties: | 4 | Required properties: |
5 | - compatible: Should be "marvell,mpic" | 5 | - compatible: Should be "marvell,mpic" |
6 | - interrupt-controller: Identifies the node as an interrupt controller. | 6 | - interrupt-controller: Identifies the node as an interrupt controller. |
7 | - msi-controller: Identifies the node as an PCI Message Signaled | ||
8 | Interrupt controller. | ||
7 | - #interrupt-cells: The number of cells to define the interrupts. Should be 1. | 9 | - #interrupt-cells: The number of cells to define the interrupts. Should be 1. |
8 | The cell is the IRQ number | 10 | The cell is the IRQ number |
9 | 11 | ||
@@ -24,6 +26,7 @@ Example: | |||
24 | #address-cells = <1>; | 26 | #address-cells = <1>; |
25 | #size-cells = <1>; | 27 | #size-cells = <1>; |
26 | interrupt-controller; | 28 | interrupt-controller; |
29 | msi-controller; | ||
27 | reg = <0xd0020a00 0x1d0>, | 30 | reg = <0xd0020a00 0x1d0>, |
28 | <0xd0021070 0x58>; | 31 | <0xd0021070 0x58>; |
29 | }; | 32 | }; |
diff --git a/Documentation/devicetree/bindings/arm/cci.txt b/Documentation/devicetree/bindings/arm/cci.txt index 92d36e2aa877..f28d82bbbc56 100644 --- a/Documentation/devicetree/bindings/arm/cci.txt +++ b/Documentation/devicetree/bindings/arm/cci.txt | |||
@@ -36,14 +36,18 @@ specific to ARM. | |||
36 | 36 | ||
37 | - reg | 37 | - reg |
38 | Usage: required | 38 | Usage: required |
39 | Value type: <prop-encoded-array> | 39 | Value type: Integer cells. A register entry, expressed as a pair |
40 | of cells, containing base and size. | ||
40 | Definition: A standard property. Specifies base physical | 41 | Definition: A standard property. Specifies base physical |
41 | address of CCI control registers common to all | 42 | address of CCI control registers common to all |
42 | interfaces. | 43 | interfaces. |
43 | 44 | ||
44 | - ranges: | 45 | - ranges: |
45 | Usage: required | 46 | Usage: required |
46 | Value type: <prop-encoded-array> | 47 | Value type: Integer cells. An array of range entries, expressed |
48 | as a tuple of cells, containing child address, | ||
49 | parent address and the size of the region in the | ||
50 | child address space. | ||
47 | Definition: A standard property. Follow rules in the ePAPR for | 51 | Definition: A standard property. Follow rules in the ePAPR for |
48 | hierarchical bus addressing. CCI interfaces | 52 | hierarchical bus addressing. CCI interfaces |
49 | addresses refer to the parent node addressing | 53 | addresses refer to the parent node addressing |
@@ -74,11 +78,49 @@ specific to ARM. | |||
74 | 78 | ||
75 | - reg: | 79 | - reg: |
76 | Usage: required | 80 | Usage: required |
77 | Value type: <prop-encoded-array> | 81 | Value type: Integer cells. A register entry, expressed |
82 | as a pair of cells, containing base and | ||
83 | size. | ||
78 | Definition: the base address and size of the | 84 | Definition: the base address and size of the |
79 | corresponding interface programming | 85 | corresponding interface programming |
80 | registers. | 86 | registers. |
81 | 87 | ||
88 | - CCI PMU node | ||
89 | |||
90 | Parent node must be CCI interconnect node. | ||
91 | |||
92 | A CCI pmu node must contain the following properties: | ||
93 | |||
94 | - compatible | ||
95 | Usage: required | ||
96 | Value type: <string> | ||
97 | Definition: must be "arm,cci-400-pmu" | ||
98 | |||
99 | - reg: | ||
100 | Usage: required | ||
101 | Value type: Integer cells. A register entry, expressed | ||
102 | as a pair of cells, containing base and | ||
103 | size. | ||
104 | Definition: the base address and size of the | ||
105 | corresponding interface programming | ||
106 | registers. | ||
107 | |||
108 | - interrupts: | ||
109 | Usage: required | ||
110 | Value type: Integer cells. Array of interrupt specifier | ||
111 | entries, as defined in | ||
112 | ../interrupt-controller/interrupts.txt. | ||
113 | Definition: list of counter overflow interrupts, one per | ||
114 | counter. The interrupts must be specified | ||
115 | starting with the cycle counter overflow | ||
116 | interrupt, followed by counter0 overflow | ||
117 | interrupt, counter1 overflow interrupt,... | ||
118 | ,counterN overflow interrupt. | ||
119 | |||
120 | The CCI PMU has an interrupt signal for each | ||
121 | counter. The number of interrupts must be | ||
122 | equal to the number of counters. | ||
123 | |||
82 | * CCI interconnect bus masters | 124 | * CCI interconnect bus masters |
83 | 125 | ||
84 | Description: masters in the device tree connected to a CCI port | 126 | Description: masters in the device tree connected to a CCI port |
@@ -144,7 +186,7 @@ Example: | |||
144 | #address-cells = <1>; | 186 | #address-cells = <1>; |
145 | #size-cells = <1>; | 187 | #size-cells = <1>; |
146 | reg = <0x0 0x2c090000 0 0x1000>; | 188 | reg = <0x0 0x2c090000 0 0x1000>; |
147 | ranges = <0x0 0x0 0x2c090000 0x6000>; | 189 | ranges = <0x0 0x0 0x2c090000 0x10000>; |
148 | 190 | ||
149 | cci_control0: slave-if@1000 { | 191 | cci_control0: slave-if@1000 { |
150 | compatible = "arm,cci-400-ctrl-if"; | 192 | compatible = "arm,cci-400-ctrl-if"; |
@@ -163,6 +205,16 @@ Example: | |||
163 | interface-type = "ace"; | 205 | interface-type = "ace"; |
164 | reg = <0x5000 0x1000>; | 206 | reg = <0x5000 0x1000>; |
165 | }; | 207 | }; |
208 | |||
209 | pmu@9000 { | ||
210 | compatible = "arm,cci-400-pmu"; | ||
211 | reg = <0x9000 0x5000>; | ||
212 | interrupts = <0 101 4>, | ||
213 | <0 102 4>, | ||
214 | <0 103 4>, | ||
215 | <0 104 4>, | ||
216 | <0 105 4>; | ||
217 | }; | ||
166 | }; | 218 | }; |
167 | 219 | ||
168 | This CCI node corresponds to a CCI component whose control registers sits | 220 | This CCI node corresponds to a CCI component whose control registers sits |
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt index cffc93d97f54..fc2910fa7e45 100644 --- a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt +++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt | |||
@@ -1,10 +1,10 @@ | |||
1 | * Gated Clock bindings for Marvell Orion SoCs | 1 | * Gated Clock bindings for Marvell EBU SoCs |
2 | 2 | ||
3 | Marvell Dove and Kirkwood allow some peripheral clocks to be gated to save | 3 | Marvell Armada 370/XP, Dove and Kirkwood allow some peripheral clocks to be |
4 | some power. The clock consumer should specify the desired clock by having | 4 | gated to save some power. The clock consumer should specify the desired clock |
5 | the clock ID in its "clocks" phandle cell. The clock ID is directly mapped to | 5 | by having the clock ID in its "clocks" phandle cell. The clock ID is directly |
6 | the corresponding clock gating control bit in HW to ease manual clock lookup | 6 | mapped to the corresponding clock gating control bit in HW to ease manual clock |
7 | in datasheet. | 7 | lookup in datasheet. |
8 | 8 | ||
9 | The following is a list of provided IDs for Armada 370: | 9 | The following is a list of provided IDs for Armada 370: |
10 | ID Clock Peripheral | 10 | ID Clock Peripheral |
@@ -94,6 +94,8 @@ ID Clock Peripheral | |||
94 | 94 | ||
95 | Required properties: | 95 | Required properties: |
96 | - compatible : shall be one of the following: | 96 | - compatible : shall be one of the following: |
97 | "marvell,armada-370-gating-clock" - for Armada 370 SoC clock gating | ||
98 | "marvell,armada-xp-gating-clock" - for Armada XP SoC clock gating | ||
97 | "marvell,dove-gating-clock" - for Dove SoC clock gating | 99 | "marvell,dove-gating-clock" - for Dove SoC clock gating |
98 | "marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating | 100 | "marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating |
99 | - reg : shall be the register address of the Clock Gating Control register | 101 | - reg : shall be the register address of the Clock Gating Control register |
diff --git a/Documentation/devicetree/bindings/pci/mvebu-pci.txt b/Documentation/devicetree/bindings/pci/mvebu-pci.txt index 9556e2fedf6d..08c716b2c6b6 100644 --- a/Documentation/devicetree/bindings/pci/mvebu-pci.txt +++ b/Documentation/devicetree/bindings/pci/mvebu-pci.txt | |||
@@ -5,6 +5,7 @@ Mandatory properties: | |||
5 | - compatible: one of the following values: | 5 | - compatible: one of the following values: |
6 | marvell,armada-370-pcie | 6 | marvell,armada-370-pcie |
7 | marvell,armada-xp-pcie | 7 | marvell,armada-xp-pcie |
8 | marvell,dove-pcie | ||
8 | marvell,kirkwood-pcie | 9 | marvell,kirkwood-pcie |
9 | - #address-cells, set to <3> | 10 | - #address-cells, set to <3> |
10 | - #size-cells, set to <2> | 11 | - #size-cells, set to <2> |
@@ -14,6 +15,8 @@ Mandatory properties: | |||
14 | - ranges: ranges describing the MMIO registers to control the PCIe | 15 | - ranges: ranges describing the MMIO registers to control the PCIe |
15 | interfaces, and ranges describing the MBus windows needed to access | 16 | interfaces, and ranges describing the MBus windows needed to access |
16 | the memory and I/O regions of each PCIe interface. | 17 | the memory and I/O regions of each PCIe interface. |
18 | - msi-parent: Link to the hardware entity that serves as the Message | ||
19 | Signaled Interrupt controller for this PCI controller. | ||
17 | 20 | ||
18 | The ranges describing the MMIO registers have the following layout: | 21 | The ranges describing the MMIO registers have the following layout: |
19 | 22 | ||
@@ -74,6 +77,8 @@ and the following optional properties: | |||
74 | - marvell,pcie-lane: the physical PCIe lane number, for ports having | 77 | - marvell,pcie-lane: the physical PCIe lane number, for ports having |
75 | multiple lanes. If this property is not found, we assume that the | 78 | multiple lanes. If this property is not found, we assume that the |
76 | value is 0. | 79 | value is 0. |
80 | - reset-gpios: optional gpio to PERST# | ||
81 | - reset-delay-us: delay in us to wait after reset de-assertion | ||
77 | 82 | ||
78 | Example: | 83 | Example: |
79 | 84 | ||
@@ -86,6 +91,7 @@ pcie-controller { | |||
86 | #size-cells = <2>; | 91 | #size-cells = <2>; |
87 | 92 | ||
88 | bus-range = <0x00 0xff>; | 93 | bus-range = <0x00 0xff>; |
94 | msi-parent = <&mpic>; | ||
89 | 95 | ||
90 | ranges = | 96 | ranges = |
91 | <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */ | 97 | <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */ |
@@ -135,6 +141,10 @@ pcie-controller { | |||
135 | interrupt-map = <0 0 0 0 &mpic 58>; | 141 | interrupt-map = <0 0 0 0 &mpic 58>; |
136 | marvell,pcie-port = <0>; | 142 | marvell,pcie-port = <0>; |
137 | marvell,pcie-lane = <0>; | 143 | marvell,pcie-lane = <0>; |
144 | /* low-active PERST# reset on GPIO 25 */ | ||
145 | reset-gpios = <&gpio0 25 1>; | ||
146 | /* wait 20ms for device settle after reset deassertion */ | ||
147 | reset-delay-us = <20000>; | ||
138 | clocks = <&gateclk 5>; | 148 | clocks = <&gateclk 5>; |
139 | status = "disabled"; | 149 | status = "disabled"; |
140 | }; | 150 | }; |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9bd3a85d880f..04163fece49f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -826,7 +826,6 @@ config ARCH_DAVINCI | |||
826 | select GENERIC_CLOCKEVENTS | 826 | select GENERIC_CLOCKEVENTS |
827 | select GENERIC_IRQ_CHIP | 827 | select GENERIC_IRQ_CHIP |
828 | select HAVE_IDE | 828 | select HAVE_IDE |
829 | select NEED_MACH_GPIO_H | ||
830 | select TI_PRIV_EDMA | 829 | select TI_PRIV_EDMA |
831 | select USE_OF | 830 | select USE_OF |
832 | select ZONE_DMA | 831 | select ZONE_DMA |
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index 30a44e2df47e..40f15f133c55 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c | |||
@@ -22,17 +22,19 @@ | |||
22 | #include <linux/mtd/partitions.h> | 22 | #include <linux/mtd/partitions.h> |
23 | #include <linux/spi/spi.h> | 23 | #include <linux/spi/spi.h> |
24 | #include <linux/spi/flash.h> | 24 | #include <linux/spi/flash.h> |
25 | #include <linux/platform_data/gpio-davinci.h> | ||
26 | #include <linux/platform_data/mtd-davinci.h> | ||
27 | #include <linux/platform_data/mtd-davinci-aemif.h> | ||
28 | #include <linux/platform_data/spi-davinci.h> | ||
29 | #include <linux/platform_data/usb-davinci.h> | ||
25 | 30 | ||
26 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
27 | #include <asm/mach/arch.h> | 32 | #include <asm/mach/arch.h> |
28 | 33 | ||
34 | #include <mach/common.h> | ||
29 | #include <mach/cp_intc.h> | 35 | #include <mach/cp_intc.h> |
30 | #include <mach/mux.h> | 36 | #include <mach/mux.h> |
31 | #include <linux/platform_data/mtd-davinci.h> | ||
32 | #include <mach/da8xx.h> | 37 | #include <mach/da8xx.h> |
33 | #include <linux/platform_data/usb-davinci.h> | ||
34 | #include <linux/platform_data/mtd-davinci-aemif.h> | ||
35 | #include <linux/platform_data/spi-davinci.h> | ||
36 | 38 | ||
37 | #define DA830_EVM_PHY_ID "" | 39 | #define DA830_EVM_PHY_ID "" |
38 | /* | 40 | /* |
@@ -591,6 +593,10 @@ static __init void da830_evm_init(void) | |||
591 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 593 | struct davinci_soc_info *soc_info = &davinci_soc_info; |
592 | int ret; | 594 | int ret; |
593 | 595 | ||
596 | ret = da830_register_gpio(); | ||
597 | if (ret) | ||
598 | pr_warn("da830_evm_init: GPIO init failed: %d\n", ret); | ||
599 | |||
594 | ret = da830_register_edma(da830_edma_rsv); | 600 | ret = da830_register_edma(da830_edma_rsv); |
595 | if (ret) | 601 | if (ret) |
596 | pr_warning("da830_evm_init: edma registration failed: %d\n", | 602 | pr_warning("da830_evm_init: edma registration failed: %d\n", |
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index dd1fb24521aa..df16cb88a26b 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/mtd/partitions.h> | 28 | #include <linux/mtd/partitions.h> |
29 | #include <linux/mtd/physmap.h> | 29 | #include <linux/mtd/physmap.h> |
30 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
31 | #include <linux/platform_data/gpio-davinci.h> | ||
31 | #include <linux/platform_data/mtd-davinci.h> | 32 | #include <linux/platform_data/mtd-davinci.h> |
32 | #include <linux/platform_data/mtd-davinci-aemif.h> | 33 | #include <linux/platform_data/mtd-davinci-aemif.h> |
33 | #include <linux/platform_data/spi-davinci.h> | 34 | #include <linux/platform_data/spi-davinci.h> |
@@ -38,6 +39,7 @@ | |||
38 | #include <linux/spi/flash.h> | 39 | #include <linux/spi/flash.h> |
39 | #include <linux/wl12xx.h> | 40 | #include <linux/wl12xx.h> |
40 | 41 | ||
42 | #include <mach/common.h> | ||
41 | #include <mach/cp_intc.h> | 43 | #include <mach/cp_intc.h> |
42 | #include <mach/da8xx.h> | 44 | #include <mach/da8xx.h> |
43 | #include <mach/mux.h> | 45 | #include <mach/mux.h> |
@@ -1437,6 +1439,10 @@ static __init void da850_evm_init(void) | |||
1437 | { | 1439 | { |
1438 | int ret; | 1440 | int ret; |
1439 | 1441 | ||
1442 | ret = da850_register_gpio(); | ||
1443 | if (ret) | ||
1444 | pr_warn("%s: GPIO init failed: %d\n", __func__, ret); | ||
1445 | |||
1440 | ret = pmic_tps65070_init(); | 1446 | ret = pmic_tps65070_init(); |
1441 | if (ret) | 1447 | if (ret) |
1442 | pr_warn("%s: TPS65070 PMIC init failed: %d\n", __func__, ret); | 1448 | pr_warn("%s: TPS65070 PMIC init failed: %d\n", __func__, ret); |
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 42b23a3194a0..ecdc7d44fa70 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c | |||
@@ -22,15 +22,17 @@ | |||
22 | #include <media/tvp514x.h> | 22 | #include <media/tvp514x.h> |
23 | #include <linux/spi/spi.h> | 23 | #include <linux/spi/spi.h> |
24 | #include <linux/spi/eeprom.h> | 24 | #include <linux/spi/eeprom.h> |
25 | #include <linux/platform_data/gpio-davinci.h> | ||
26 | #include <linux/platform_data/i2c-davinci.h> | ||
27 | #include <linux/platform_data/mtd-davinci.h> | ||
28 | #include <linux/platform_data/mmc-davinci.h> | ||
29 | #include <linux/platform_data/usb-davinci.h> | ||
25 | 30 | ||
26 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
27 | #include <asm/mach/arch.h> | 32 | #include <asm/mach/arch.h> |
28 | 33 | ||
29 | #include <linux/platform_data/i2c-davinci.h> | ||
30 | #include <mach/serial.h> | 34 | #include <mach/serial.h> |
31 | #include <linux/platform_data/mtd-davinci.h> | 35 | #include <mach/common.h> |
32 | #include <linux/platform_data/mmc-davinci.h> | ||
33 | #include <linux/platform_data/usb-davinci.h> | ||
34 | 36 | ||
35 | #include "davinci.h" | 37 | #include "davinci.h" |
36 | 38 | ||
@@ -375,6 +377,11 @@ static struct spi_board_info dm355_evm_spi_info[] __initconst = { | |||
375 | static __init void dm355_evm_init(void) | 377 | static __init void dm355_evm_init(void) |
376 | { | 378 | { |
377 | struct clk *aemif; | 379 | struct clk *aemif; |
380 | int ret; | ||
381 | |||
382 | ret = dm355_gpio_register(); | ||
383 | if (ret) | ||
384 | pr_warn("%s: GPIO init failed: %d\n", __func__, ret); | ||
378 | 385 | ||
379 | gpio_request(1, "dm9000"); | 386 | gpio_request(1, "dm9000"); |
380 | gpio_direction_input(1); | 387 | gpio_direction_input(1); |
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index 65a984c52df6..43bacbf15314 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c | |||
@@ -19,15 +19,16 @@ | |||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/spi/spi.h> | 20 | #include <linux/spi/spi.h> |
21 | #include <linux/spi/eeprom.h> | 21 | #include <linux/spi/eeprom.h> |
22 | #include <linux/platform_data/i2c-davinci.h> | ||
23 | #include <linux/platform_data/mmc-davinci.h> | ||
24 | #include <linux/platform_data/mtd-davinci.h> | ||
25 | #include <linux/platform_data/usb-davinci.h> | ||
22 | 26 | ||
23 | #include <asm/mach-types.h> | 27 | #include <asm/mach-types.h> |
24 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
25 | 29 | ||
26 | #include <linux/platform_data/i2c-davinci.h> | 30 | #include <mach/common.h> |
27 | #include <mach/serial.h> | 31 | #include <mach/serial.h> |
28 | #include <linux/platform_data/mtd-davinci.h> | ||
29 | #include <linux/platform_data/mmc-davinci.h> | ||
30 | #include <linux/platform_data/usb-davinci.h> | ||
31 | 32 | ||
32 | #include "davinci.h" | 33 | #include "davinci.h" |
33 | 34 | ||
@@ -234,6 +235,11 @@ static struct spi_board_info dm355_leopard_spi_info[] __initconst = { | |||
234 | static __init void dm355_leopard_init(void) | 235 | static __init void dm355_leopard_init(void) |
235 | { | 236 | { |
236 | struct clk *aemif; | 237 | struct clk *aemif; |
238 | int ret; | ||
239 | |||
240 | ret = dm355_gpio_register(); | ||
241 | if (ret) | ||
242 | pr_warn("%s: GPIO init failed: %d\n", __func__, ret); | ||
237 | 243 | ||
238 | gpio_request(9, "dm9000"); | 244 | gpio_request(9, "dm9000"); |
239 | gpio_direction_input(9); | 245 | gpio_direction_input(9); |
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 4078ba93776b..f4a6c18912ea 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c | |||
@@ -743,6 +743,12 @@ static struct spi_board_info dm365_evm_spi_info[] __initconst = { | |||
743 | 743 | ||
744 | static __init void dm365_evm_init(void) | 744 | static __init void dm365_evm_init(void) |
745 | { | 745 | { |
746 | int ret; | ||
747 | |||
748 | ret = dm365_gpio_register(); | ||
749 | if (ret) | ||
750 | pr_warn("%s: GPIO init failed: %d\n", __func__, ret); | ||
751 | |||
746 | evm_init_i2c(); | 752 | evm_init_i2c(); |
747 | davinci_serial_init(dm365_serial_device); | 753 | davinci_serial_init(dm365_serial_device); |
748 | 754 | ||
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 40bb9b5b87e8..9cc32c283b8b 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c | |||
@@ -754,9 +754,14 @@ static int davinci_phy_fixup(struct phy_device *phydev) | |||
754 | 754 | ||
755 | static __init void davinci_evm_init(void) | 755 | static __init void davinci_evm_init(void) |
756 | { | 756 | { |
757 | int ret; | ||
757 | struct clk *aemif_clk; | 758 | struct clk *aemif_clk; |
758 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 759 | struct davinci_soc_info *soc_info = &davinci_soc_info; |
759 | 760 | ||
761 | ret = dm644x_gpio_register(); | ||
762 | if (ret) | ||
763 | pr_warn("%s: GPIO init failed: %d\n", __func__, ret); | ||
764 | |||
760 | aemif_clk = clk_get(NULL, "aemif"); | 765 | aemif_clk = clk_get(NULL, "aemif"); |
761 | clk_prepare_enable(aemif_clk); | 766 | clk_prepare_enable(aemif_clk); |
762 | 767 | ||
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 2bc3651d56cc..44b20191a9fe 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c | |||
@@ -33,17 +33,19 @@ | |||
33 | #include <linux/mtd/partitions.h> | 33 | #include <linux/mtd/partitions.h> |
34 | #include <linux/clk.h> | 34 | #include <linux/clk.h> |
35 | #include <linux/export.h> | 35 | #include <linux/export.h> |
36 | #include <linux/platform_data/gpio-davinci.h> | ||
37 | #include <linux/platform_data/i2c-davinci.h> | ||
38 | #include <linux/platform_data/mtd-davinci.h> | ||
39 | #include <linux/platform_data/mtd-davinci-aemif.h> | ||
36 | 40 | ||
37 | #include <asm/mach-types.h> | 41 | #include <asm/mach-types.h> |
38 | #include <asm/mach/arch.h> | 42 | #include <asm/mach/arch.h> |
39 | 43 | ||
40 | #include <mach/common.h> | 44 | #include <mach/common.h> |
45 | #include <mach/irqs.h> | ||
41 | #include <mach/serial.h> | 46 | #include <mach/serial.h> |
42 | #include <linux/platform_data/i2c-davinci.h> | ||
43 | #include <linux/platform_data/mtd-davinci.h> | ||
44 | #include <mach/clock.h> | 47 | #include <mach/clock.h> |
45 | #include <mach/cdce949.h> | 48 | #include <mach/cdce949.h> |
46 | #include <linux/platform_data/mtd-davinci-aemif.h> | ||
47 | 49 | ||
48 | #include "davinci.h" | 50 | #include "davinci.h" |
49 | #include "clock.h" | 51 | #include "clock.h" |
@@ -786,8 +788,13 @@ static struct edma_rsv_info dm646x_edma_rsv[] = { | |||
786 | 788 | ||
787 | static __init void evm_init(void) | 789 | static __init void evm_init(void) |
788 | { | 790 | { |
791 | int ret; | ||
789 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 792 | struct davinci_soc_info *soc_info = &davinci_soc_info; |
790 | 793 | ||
794 | ret = dm646x_gpio_register(); | ||
795 | if (ret) | ||
796 | pr_warn("%s: GPIO init failed: %d\n", __func__, ret); | ||
797 | |||
791 | evm_init_i2c(); | 798 | evm_init_i2c(); |
792 | davinci_serial_init(dm646x_serial_device); | 799 | davinci_serial_init(dm646x_serial_device); |
793 | dm646x_init_mcasp0(&dm646x_evm_snd_data[0]); | 800 | dm646x_init_mcasp0(&dm646x_evm_snd_data[0]); |
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 46f336fca803..bb680af98374 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c | |||
@@ -26,17 +26,18 @@ | |||
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/gpio.h> | 27 | #include <linux/gpio.h> |
28 | #include <linux/mtd/partitions.h> | 28 | #include <linux/mtd/partitions.h> |
29 | #include <linux/platform_data/gpio-davinci.h> | ||
30 | #include <linux/platform_data/i2c-davinci.h> | ||
31 | #include <linux/platform_data/mmc-davinci.h> | ||
32 | #include <linux/platform_data/mtd-davinci.h> | ||
33 | #include <linux/platform_data/usb-davinci.h> | ||
29 | 34 | ||
30 | #include <asm/mach-types.h> | 35 | #include <asm/mach-types.h> |
31 | #include <asm/mach/arch.h> | 36 | #include <asm/mach/arch.h> |
32 | 37 | ||
33 | #include <mach/common.h> | 38 | #include <mach/common.h> |
34 | #include <linux/platform_data/i2c-davinci.h> | ||
35 | #include <mach/serial.h> | 39 | #include <mach/serial.h> |
36 | #include <mach/mux.h> | 40 | #include <mach/mux.h> |
37 | #include <linux/platform_data/mtd-davinci.h> | ||
38 | #include <linux/platform_data/mmc-davinci.h> | ||
39 | #include <linux/platform_data/usb-davinci.h> | ||
40 | 41 | ||
41 | #include "davinci.h" | 42 | #include "davinci.h" |
42 | 43 | ||
@@ -169,9 +170,14 @@ static struct davinci_mmc_config davinci_ntosd2_mmc_config = { | |||
169 | 170 | ||
170 | static __init void davinci_ntosd2_init(void) | 171 | static __init void davinci_ntosd2_init(void) |
171 | { | 172 | { |
173 | int ret; | ||
172 | struct clk *aemif_clk; | 174 | struct clk *aemif_clk; |
173 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 175 | struct davinci_soc_info *soc_info = &davinci_soc_info; |
174 | 176 | ||
177 | ret = dm644x_gpio_register(); | ||
178 | if (ret) | ||
179 | pr_warn("%s: GPIO init failed: %d\n", __func__, ret); | ||
180 | |||
175 | aemif_clk = clk_get(NULL, "aemif"); | 181 | aemif_clk = clk_get(NULL, "aemif"); |
176 | clk_prepare_enable(aemif_clk); | 182 | clk_prepare_enable(aemif_clk); |
177 | 183 | ||
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index e0de2da41b5c..2aac51d0e853 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c | |||
@@ -13,10 +13,12 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/console.h> | 14 | #include <linux/console.h> |
15 | #include <linux/gpio.h> | 15 | #include <linux/gpio.h> |
16 | #include <linux/platform_data/gpio-davinci.h> | ||
16 | 17 | ||
17 | #include <asm/mach-types.h> | 18 | #include <asm/mach-types.h> |
18 | #include <asm/mach/arch.h> | 19 | #include <asm/mach/arch.h> |
19 | 20 | ||
21 | #include <mach/common.h> | ||
20 | #include <mach/cp_intc.h> | 22 | #include <mach/cp_intc.h> |
21 | #include <mach/da8xx.h> | 23 | #include <mach/da8xx.h> |
22 | #include <mach/mux.h> | 24 | #include <mach/mux.h> |
@@ -290,6 +292,10 @@ static __init void omapl138_hawk_init(void) | |||
290 | { | 292 | { |
291 | int ret; | 293 | int ret; |
292 | 294 | ||
295 | ret = da850_register_gpio(); | ||
296 | if (ret) | ||
297 | pr_warn("%s: GPIO init failed: %d\n", __func__, ret); | ||
298 | |||
293 | davinci_serial_init(da8xx_serial_device); | 299 | davinci_serial_init(da8xx_serial_device); |
294 | 300 | ||
295 | omapl138_hawk_config_emac(); | 301 | omapl138_hawk_config_emac(); |
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c index d6c746e35ad9..0813b5167e05 100644 --- a/arch/arm/mach-davinci/da830.c +++ b/arch/arm/mach-davinci/da830.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/gpio.h> | 11 | #include <linux/gpio.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
14 | #include <linux/platform_data/gpio-davinci.h> | ||
14 | 15 | ||
15 | #include <asm/mach/map.h> | 16 | #include <asm/mach/map.h> |
16 | 17 | ||
@@ -20,7 +21,6 @@ | |||
20 | #include <mach/common.h> | 21 | #include <mach/common.h> |
21 | #include <mach/time.h> | 22 | #include <mach/time.h> |
22 | #include <mach/da8xx.h> | 23 | #include <mach/da8xx.h> |
23 | #include <mach/gpio-davinci.h> | ||
24 | 24 | ||
25 | #include "clock.h" | 25 | #include "clock.h" |
26 | #include "mux.h" | 26 | #include "mux.h" |
@@ -1151,6 +1151,16 @@ static struct davinci_id da830_ids[] = { | |||
1151 | }, | 1151 | }, |
1152 | }; | 1152 | }; |
1153 | 1153 | ||
1154 | static struct davinci_gpio_platform_data da830_gpio_platform_data = { | ||
1155 | .ngpio = 128, | ||
1156 | .intc_irq_num = DA830_N_CP_INTC_IRQ, | ||
1157 | }; | ||
1158 | |||
1159 | int __init da830_register_gpio(void) | ||
1160 | { | ||
1161 | return da8xx_register_gpio(&da830_gpio_platform_data); | ||
1162 | } | ||
1163 | |||
1154 | static struct davinci_timer_instance da830_timer_instance[2] = { | 1164 | static struct davinci_timer_instance da830_timer_instance[2] = { |
1155 | { | 1165 | { |
1156 | .base = DA8XX_TIMER64P0_BASE, | 1166 | .base = DA8XX_TIMER64P0_BASE, |
@@ -1196,10 +1206,6 @@ static struct davinci_soc_info davinci_soc_info_da830 = { | |||
1196 | .intc_irq_prios = da830_default_priorities, | 1206 | .intc_irq_prios = da830_default_priorities, |
1197 | .intc_irq_num = DA830_N_CP_INTC_IRQ, | 1207 | .intc_irq_num = DA830_N_CP_INTC_IRQ, |
1198 | .timer_info = &da830_timer_info, | 1208 | .timer_info = &da830_timer_info, |
1199 | .gpio_type = GPIO_TYPE_DAVINCI, | ||
1200 | .gpio_base = DA8XX_GPIO_BASE, | ||
1201 | .gpio_num = 128, | ||
1202 | .gpio_irq = IRQ_DA8XX_GPIO0, | ||
1203 | .emac_pdata = &da8xx_emac_pdata, | 1209 | .emac_pdata = &da8xx_emac_pdata, |
1204 | }; | 1210 | }; |
1205 | 1211 | ||
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c index f56e5fbfa2fd..352984e1528a 100644 --- a/arch/arm/mach-davinci/da850.c +++ b/arch/arm/mach-davinci/da850.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
19 | #include <linux/regulator/consumer.h> | 19 | #include <linux/regulator/consumer.h> |
20 | #include <linux/platform_data/gpio-davinci.h> | ||
20 | 21 | ||
21 | #include <asm/mach/map.h> | 22 | #include <asm/mach/map.h> |
22 | 23 | ||
@@ -28,7 +29,6 @@ | |||
28 | #include <mach/da8xx.h> | 29 | #include <mach/da8xx.h> |
29 | #include <mach/cpufreq.h> | 30 | #include <mach/cpufreq.h> |
30 | #include <mach/pm.h> | 31 | #include <mach/pm.h> |
31 | #include <mach/gpio-davinci.h> | ||
32 | 32 | ||
33 | #include "clock.h" | 33 | #include "clock.h" |
34 | #include "mux.h" | 34 | #include "mux.h" |
@@ -1281,6 +1281,16 @@ int __init da850_register_vpif_capture(struct vpif_capture_config | |||
1281 | return platform_device_register(&da850_vpif_capture_dev); | 1281 | return platform_device_register(&da850_vpif_capture_dev); |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | static struct davinci_gpio_platform_data da850_gpio_platform_data = { | ||
1285 | .ngpio = 144, | ||
1286 | .intc_irq_num = DA850_N_CP_INTC_IRQ, | ||
1287 | }; | ||
1288 | |||
1289 | int __init da850_register_gpio(void) | ||
1290 | { | ||
1291 | return da8xx_register_gpio(&da850_gpio_platform_data); | ||
1292 | } | ||
1293 | |||
1284 | static struct davinci_soc_info davinci_soc_info_da850 = { | 1294 | static struct davinci_soc_info davinci_soc_info_da850 = { |
1285 | .io_desc = da850_io_desc, | 1295 | .io_desc = da850_io_desc, |
1286 | .io_desc_num = ARRAY_SIZE(da850_io_desc), | 1296 | .io_desc_num = ARRAY_SIZE(da850_io_desc), |
@@ -1298,10 +1308,6 @@ static struct davinci_soc_info davinci_soc_info_da850 = { | |||
1298 | .intc_irq_prios = da850_default_priorities, | 1308 | .intc_irq_prios = da850_default_priorities, |
1299 | .intc_irq_num = DA850_N_CP_INTC_IRQ, | 1309 | .intc_irq_num = DA850_N_CP_INTC_IRQ, |
1300 | .timer_info = &da850_timer_info, | 1310 | .timer_info = &da850_timer_info, |
1301 | .gpio_type = GPIO_TYPE_DAVINCI, | ||
1302 | .gpio_base = DA8XX_GPIO_BASE, | ||
1303 | .gpio_num = 144, | ||
1304 | .gpio_irq = IRQ_DA8XX_GPIO0, | ||
1305 | .emac_pdata = &da8xx_emac_pdata, | 1311 | .emac_pdata = &da8xx_emac_pdata, |
1306 | .sram_dma = DA8XX_SHARED_RAM_BASE, | 1312 | .sram_dma = DA8XX_SHARED_RAM_BASE, |
1307 | .sram_len = SZ_128K, | 1313 | .sram_len = SZ_128K, |
diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h index 2ab5d577186f..2eebc4338802 100644 --- a/arch/arm/mach-davinci/davinci.h +++ b/arch/arm/mach-davinci/davinci.h | |||
@@ -53,6 +53,9 @@ extern void __iomem *davinci_sysmod_base; | |||
53 | #define DAVINCI_SYSMOD_VIRT(x) (davinci_sysmod_base + (x)) | 53 | #define DAVINCI_SYSMOD_VIRT(x) (davinci_sysmod_base + (x)) |
54 | void davinci_map_sysmod(void); | 54 | void davinci_map_sysmod(void); |
55 | 55 | ||
56 | #define DAVINCI_GPIO_BASE 0x01C67000 | ||
57 | int davinci_gpio_register(struct resource *res, int size, void *pdata); | ||
58 | |||
56 | /* DM355 base addresses */ | 59 | /* DM355 base addresses */ |
57 | #define DM355_ASYNC_EMIF_CONTROL_BASE 0x01e10000 | 60 | #define DM355_ASYNC_EMIF_CONTROL_BASE 0x01e10000 |
58 | #define DM355_ASYNC_EMIF_DATA_CE0_BASE 0x02000000 | 61 | #define DM355_ASYNC_EMIF_DATA_CE0_BASE 0x02000000 |
@@ -82,6 +85,7 @@ void dm355_init_spi0(unsigned chipselect_mask, | |||
82 | const struct spi_board_info *info, unsigned len); | 85 | const struct spi_board_info *info, unsigned len); |
83 | void dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata); | 86 | void dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata); |
84 | int dm355_init_video(struct vpfe_config *, struct vpbe_config *); | 87 | int dm355_init_video(struct vpfe_config *, struct vpbe_config *); |
88 | int dm355_gpio_register(void); | ||
85 | 89 | ||
86 | /* DM365 function declarations */ | 90 | /* DM365 function declarations */ |
87 | void dm365_init(void); | 91 | void dm365_init(void); |
@@ -92,11 +96,13 @@ void dm365_init_rtc(void); | |||
92 | void dm365_init_spi0(unsigned chipselect_mask, | 96 | void dm365_init_spi0(unsigned chipselect_mask, |
93 | const struct spi_board_info *info, unsigned len); | 97 | const struct spi_board_info *info, unsigned len); |
94 | int dm365_init_video(struct vpfe_config *, struct vpbe_config *); | 98 | int dm365_init_video(struct vpfe_config *, struct vpbe_config *); |
99 | int dm365_gpio_register(void); | ||
95 | 100 | ||
96 | /* DM644x function declarations */ | 101 | /* DM644x function declarations */ |
97 | void dm644x_init(void); | 102 | void dm644x_init(void); |
98 | void dm644x_init_asp(struct snd_platform_data *pdata); | 103 | void dm644x_init_asp(struct snd_platform_data *pdata); |
99 | int dm644x_init_video(struct vpfe_config *, struct vpbe_config *); | 104 | int dm644x_init_video(struct vpfe_config *, struct vpbe_config *); |
105 | int dm644x_gpio_register(void); | ||
100 | 106 | ||
101 | /* DM646x function declarations */ | 107 | /* DM646x function declarations */ |
102 | void dm646x_init(void); | 108 | void dm646x_init(void); |
@@ -106,6 +112,7 @@ int dm646x_init_edma(struct edma_rsv_info *rsv); | |||
106 | void dm646x_video_init(void); | 112 | void dm646x_video_init(void); |
107 | void dm646x_setup_vpif(struct vpif_display_config *, | 113 | void dm646x_setup_vpif(struct vpif_display_config *, |
108 | struct vpif_capture_config *); | 114 | struct vpif_capture_config *); |
115 | int dm646x_gpio_register(void); | ||
109 | 116 | ||
110 | extern struct platform_device dm365_serial_device[]; | 117 | extern struct platform_device dm365_serial_device[]; |
111 | extern struct platform_device dm355_serial_device[]; | 118 | extern struct platform_device dm355_serial_device[]; |
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 2e473fefd71e..c46eccbbd512 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
@@ -665,6 +665,32 @@ int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata) | |||
665 | return platform_device_register(&da8xx_lcdc_device); | 665 | return platform_device_register(&da8xx_lcdc_device); |
666 | } | 666 | } |
667 | 667 | ||
668 | static struct resource da8xx_gpio_resources[] = { | ||
669 | { /* registers */ | ||
670 | .start = DA8XX_GPIO_BASE, | ||
671 | .end = DA8XX_GPIO_BASE + SZ_4K - 1, | ||
672 | .flags = IORESOURCE_MEM, | ||
673 | }, | ||
674 | { /* interrupt */ | ||
675 | .start = IRQ_DA8XX_GPIO0, | ||
676 | .end = IRQ_DA8XX_GPIO8, | ||
677 | .flags = IORESOURCE_IRQ, | ||
678 | }, | ||
679 | }; | ||
680 | |||
681 | static struct platform_device da8xx_gpio_device = { | ||
682 | .name = "davinci_gpio", | ||
683 | .id = -1, | ||
684 | .num_resources = ARRAY_SIZE(da8xx_gpio_resources), | ||
685 | .resource = da8xx_gpio_resources, | ||
686 | }; | ||
687 | |||
688 | int __init da8xx_register_gpio(void *pdata) | ||
689 | { | ||
690 | da8xx_gpio_device.dev.platform_data = pdata; | ||
691 | return platform_device_register(&da8xx_gpio_device); | ||
692 | } | ||
693 | |||
668 | static struct resource da8xx_mmcsd0_resources[] = { | 694 | static struct resource da8xx_mmcsd0_resources[] = { |
669 | { /* registers */ | 695 | { /* registers */ |
670 | .start = DA8XX_MMCSD0_BASE, | 696 | .start = DA8XX_MMCSD0_BASE, |
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c index 111573c0aad1..3996e98f52fb 100644 --- a/arch/arm/mach-davinci/devices.c +++ b/arch/arm/mach-davinci/devices.c | |||
@@ -318,6 +318,19 @@ static void davinci_init_wdt(void) | |||
318 | platform_device_register(&davinci_wdt_device); | 318 | platform_device_register(&davinci_wdt_device); |
319 | } | 319 | } |
320 | 320 | ||
321 | static struct platform_device davinci_gpio_device = { | ||
322 | .name = "davinci_gpio", | ||
323 | .id = -1, | ||
324 | }; | ||
325 | |||
326 | int davinci_gpio_register(struct resource *res, int size, void *pdata) | ||
327 | { | ||
328 | davinci_gpio_device.resource = res; | ||
329 | davinci_gpio_device.num_resources = size; | ||
330 | davinci_gpio_device.dev.platform_data = pdata; | ||
331 | return platform_device_register(&davinci_gpio_device); | ||
332 | } | ||
333 | |||
321 | /*-------------------------------------------------------------------------*/ | 334 | /*-------------------------------------------------------------------------*/ |
322 | 335 | ||
323 | /*-------------------------------------------------------------------------*/ | 336 | /*-------------------------------------------------------------------------*/ |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 3eaa5f6b2160..ef9ff1fb6f52 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -13,8 +13,10 @@ | |||
13 | #include <linux/serial_8250.h> | 13 | #include <linux/serial_8250.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | |||
17 | #include <linux/spi/spi.h> | 16 | #include <linux/spi/spi.h> |
17 | #include <linux/platform_data/edma.h> | ||
18 | #include <linux/platform_data/gpio-davinci.h> | ||
19 | #include <linux/platform_data/spi-davinci.h> | ||
18 | 20 | ||
19 | #include <asm/mach/map.h> | 21 | #include <asm/mach/map.h> |
20 | 22 | ||
@@ -25,9 +27,6 @@ | |||
25 | #include <mach/time.h> | 27 | #include <mach/time.h> |
26 | #include <mach/serial.h> | 28 | #include <mach/serial.h> |
27 | #include <mach/common.h> | 29 | #include <mach/common.h> |
28 | #include <linux/platform_data/spi-davinci.h> | ||
29 | #include <mach/gpio-davinci.h> | ||
30 | #include <linux/platform_data/edma.h> | ||
31 | 30 | ||
32 | #include "davinci.h" | 31 | #include "davinci.h" |
33 | #include "clock.h" | 32 | #include "clock.h" |
@@ -886,6 +885,30 @@ static struct platform_device dm355_vpbe_dev = { | |||
886 | }, | 885 | }, |
887 | }; | 886 | }; |
888 | 887 | ||
888 | static struct resource dm355_gpio_resources[] = { | ||
889 | { /* registers */ | ||
890 | .start = DAVINCI_GPIO_BASE, | ||
891 | .end = DAVINCI_GPIO_BASE + SZ_4K - 1, | ||
892 | .flags = IORESOURCE_MEM, | ||
893 | }, | ||
894 | { /* interrupt */ | ||
895 | .start = IRQ_DM355_GPIOBNK0, | ||
896 | .end = IRQ_DM355_GPIOBNK6, | ||
897 | .flags = IORESOURCE_IRQ, | ||
898 | }, | ||
899 | }; | ||
900 | |||
901 | static struct davinci_gpio_platform_data dm355_gpio_platform_data = { | ||
902 | .ngpio = 104, | ||
903 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, | ||
904 | }; | ||
905 | |||
906 | int __init dm355_gpio_register(void) | ||
907 | { | ||
908 | return davinci_gpio_register(dm355_gpio_resources, | ||
909 | sizeof(dm355_gpio_resources), | ||
910 | &dm355_gpio_platform_data); | ||
911 | } | ||
889 | /*----------------------------------------------------------------------*/ | 912 | /*----------------------------------------------------------------------*/ |
890 | 913 | ||
891 | static struct map_desc dm355_io_desc[] = { | 914 | static struct map_desc dm355_io_desc[] = { |
@@ -1005,10 +1028,6 @@ static struct davinci_soc_info davinci_soc_info_dm355 = { | |||
1005 | .intc_irq_prios = dm355_default_priorities, | 1028 | .intc_irq_prios = dm355_default_priorities, |
1006 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, | 1029 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, |
1007 | .timer_info = &dm355_timer_info, | 1030 | .timer_info = &dm355_timer_info, |
1008 | .gpio_type = GPIO_TYPE_DAVINCI, | ||
1009 | .gpio_base = DAVINCI_GPIO_BASE, | ||
1010 | .gpio_num = 104, | ||
1011 | .gpio_irq = IRQ_DM355_GPIOBNK0, | ||
1012 | .sram_dma = 0x00010000, | 1031 | .sram_dma = 0x00010000, |
1013 | .sram_len = SZ_32K, | 1032 | .sram_len = SZ_32K, |
1014 | }; | 1033 | }; |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index c29e324eb0bb..1511a0680f9a 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -19,6 +19,9 @@ | |||
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/spi/spi.h> | 20 | #include <linux/spi/spi.h> |
21 | #include <linux/platform_data/edma.h> | 21 | #include <linux/platform_data/edma.h> |
22 | #include <linux/platform_data/gpio-davinci.h> | ||
23 | #include <linux/platform_data/keyscan-davinci.h> | ||
24 | #include <linux/platform_data/spi-davinci.h> | ||
22 | 25 | ||
23 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
24 | 27 | ||
@@ -29,9 +32,6 @@ | |||
29 | #include <mach/time.h> | 32 | #include <mach/time.h> |
30 | #include <mach/serial.h> | 33 | #include <mach/serial.h> |
31 | #include <mach/common.h> | 34 | #include <mach/common.h> |
32 | #include <linux/platform_data/keyscan-davinci.h> | ||
33 | #include <linux/platform_data/spi-davinci.h> | ||
34 | #include <mach/gpio-davinci.h> | ||
35 | 35 | ||
36 | #include "davinci.h" | 36 | #include "davinci.h" |
37 | #include "clock.h" | 37 | #include "clock.h" |
@@ -698,6 +698,32 @@ void __init dm365_init_spi0(unsigned chipselect_mask, | |||
698 | platform_device_register(&dm365_spi0_device); | 698 | platform_device_register(&dm365_spi0_device); |
699 | } | 699 | } |
700 | 700 | ||
701 | static struct resource dm365_gpio_resources[] = { | ||
702 | { /* registers */ | ||
703 | .start = DAVINCI_GPIO_BASE, | ||
704 | .end = DAVINCI_GPIO_BASE + SZ_4K - 1, | ||
705 | .flags = IORESOURCE_MEM, | ||
706 | }, | ||
707 | { /* interrupt */ | ||
708 | .start = IRQ_DM365_GPIO0, | ||
709 | .end = IRQ_DM365_GPIO7, | ||
710 | .flags = IORESOURCE_IRQ, | ||
711 | }, | ||
712 | }; | ||
713 | |||
714 | static struct davinci_gpio_platform_data dm365_gpio_platform_data = { | ||
715 | .ngpio = 104, | ||
716 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, | ||
717 | .gpio_unbanked = 8, | ||
718 | }; | ||
719 | |||
720 | int __init dm365_gpio_register(void) | ||
721 | { | ||
722 | return davinci_gpio_register(dm365_gpio_resources, | ||
723 | sizeof(dm365_gpio_resources), | ||
724 | &dm365_gpio_platform_data); | ||
725 | } | ||
726 | |||
701 | static struct emac_platform_data dm365_emac_pdata = { | 727 | static struct emac_platform_data dm365_emac_pdata = { |
702 | .ctrl_reg_offset = DM365_EMAC_CNTRL_OFFSET, | 728 | .ctrl_reg_offset = DM365_EMAC_CNTRL_OFFSET, |
703 | .ctrl_mod_reg_offset = DM365_EMAC_CNTRL_MOD_OFFSET, | 729 | .ctrl_mod_reg_offset = DM365_EMAC_CNTRL_MOD_OFFSET, |
@@ -1105,11 +1131,6 @@ static struct davinci_soc_info davinci_soc_info_dm365 = { | |||
1105 | .intc_irq_prios = dm365_default_priorities, | 1131 | .intc_irq_prios = dm365_default_priorities, |
1106 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, | 1132 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, |
1107 | .timer_info = &dm365_timer_info, | 1133 | .timer_info = &dm365_timer_info, |
1108 | .gpio_type = GPIO_TYPE_DAVINCI, | ||
1109 | .gpio_base = DAVINCI_GPIO_BASE, | ||
1110 | .gpio_num = 104, | ||
1111 | .gpio_irq = IRQ_DM365_GPIO0, | ||
1112 | .gpio_unbanked = 8, /* really 16 ... skip muxed GPIOs */ | ||
1113 | .emac_pdata = &dm365_emac_pdata, | 1134 | .emac_pdata = &dm365_emac_pdata, |
1114 | .sram_dma = 0x00010000, | 1135 | .sram_dma = 0x00010000, |
1115 | .sram_len = SZ_32K, | 1136 | .sram_len = SZ_32K, |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 4f74682293d6..143a3217e8ef 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/serial_8250.h> | 13 | #include <linux/serial_8250.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/platform_data/edma.h> | 15 | #include <linux/platform_data/edma.h> |
16 | #include <linux/platform_data/gpio-davinci.h> | ||
16 | 17 | ||
17 | #include <asm/mach/map.h> | 18 | #include <asm/mach/map.h> |
18 | 19 | ||
@@ -23,7 +24,6 @@ | |||
23 | #include <mach/time.h> | 24 | #include <mach/time.h> |
24 | #include <mach/serial.h> | 25 | #include <mach/serial.h> |
25 | #include <mach/common.h> | 26 | #include <mach/common.h> |
26 | #include <mach/gpio-davinci.h> | ||
27 | 27 | ||
28 | #include "davinci.h" | 28 | #include "davinci.h" |
29 | #include "clock.h" | 29 | #include "clock.h" |
@@ -771,6 +771,30 @@ static struct platform_device dm644x_vpbe_dev = { | |||
771 | }, | 771 | }, |
772 | }; | 772 | }; |
773 | 773 | ||
774 | static struct resource dm644_gpio_resources[] = { | ||
775 | { /* registers */ | ||
776 | .start = DAVINCI_GPIO_BASE, | ||
777 | .end = DAVINCI_GPIO_BASE + SZ_4K - 1, | ||
778 | .flags = IORESOURCE_MEM, | ||
779 | }, | ||
780 | { /* interrupt */ | ||
781 | .start = IRQ_GPIOBNK0, | ||
782 | .end = IRQ_GPIOBNK4, | ||
783 | .flags = IORESOURCE_IRQ, | ||
784 | }, | ||
785 | }; | ||
786 | |||
787 | static struct davinci_gpio_platform_data dm644_gpio_platform_data = { | ||
788 | .ngpio = 71, | ||
789 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, | ||
790 | }; | ||
791 | |||
792 | int __init dm644x_gpio_register(void) | ||
793 | { | ||
794 | return davinci_gpio_register(dm644_gpio_resources, | ||
795 | sizeof(dm644_gpio_resources), | ||
796 | &dm644_gpio_platform_data); | ||
797 | } | ||
774 | /*----------------------------------------------------------------------*/ | 798 | /*----------------------------------------------------------------------*/ |
775 | 799 | ||
776 | static struct map_desc dm644x_io_desc[] = { | 800 | static struct map_desc dm644x_io_desc[] = { |
@@ -897,10 +921,6 @@ static struct davinci_soc_info davinci_soc_info_dm644x = { | |||
897 | .intc_irq_prios = dm644x_default_priorities, | 921 | .intc_irq_prios = dm644x_default_priorities, |
898 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, | 922 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, |
899 | .timer_info = &dm644x_timer_info, | 923 | .timer_info = &dm644x_timer_info, |
900 | .gpio_type = GPIO_TYPE_DAVINCI, | ||
901 | .gpio_base = DAVINCI_GPIO_BASE, | ||
902 | .gpio_num = 71, | ||
903 | .gpio_irq = IRQ_GPIOBNK0, | ||
904 | .emac_pdata = &dm644x_emac_pdata, | 924 | .emac_pdata = &dm644x_emac_pdata, |
905 | .sram_dma = 0x00008000, | 925 | .sram_dma = 0x00008000, |
906 | .sram_len = SZ_16K, | 926 | .sram_len = SZ_16K, |
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 68f8d1f1aca1..2a73f299c1d0 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/serial_8250.h> | 14 | #include <linux/serial_8250.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/platform_data/edma.h> | 16 | #include <linux/platform_data/edma.h> |
17 | #include <linux/platform_data/gpio-davinci.h> | ||
17 | 18 | ||
18 | #include <asm/mach/map.h> | 19 | #include <asm/mach/map.h> |
19 | 20 | ||
@@ -24,7 +25,6 @@ | |||
24 | #include <mach/time.h> | 25 | #include <mach/time.h> |
25 | #include <mach/serial.h> | 26 | #include <mach/serial.h> |
26 | #include <mach/common.h> | 27 | #include <mach/common.h> |
27 | #include <mach/gpio-davinci.h> | ||
28 | 28 | ||
29 | #include "davinci.h" | 29 | #include "davinci.h" |
30 | #include "clock.h" | 30 | #include "clock.h" |
@@ -748,6 +748,30 @@ static struct platform_device vpif_capture_dev = { | |||
748 | .num_resources = ARRAY_SIZE(vpif_capture_resource), | 748 | .num_resources = ARRAY_SIZE(vpif_capture_resource), |
749 | }; | 749 | }; |
750 | 750 | ||
751 | static struct resource dm646x_gpio_resources[] = { | ||
752 | { /* registers */ | ||
753 | .start = DAVINCI_GPIO_BASE, | ||
754 | .end = DAVINCI_GPIO_BASE + SZ_4K - 1, | ||
755 | .flags = IORESOURCE_MEM, | ||
756 | }, | ||
757 | { /* interrupt */ | ||
758 | .start = IRQ_DM646X_GPIOBNK0, | ||
759 | .end = IRQ_DM646X_GPIOBNK2, | ||
760 | .flags = IORESOURCE_IRQ, | ||
761 | }, | ||
762 | }; | ||
763 | |||
764 | static struct davinci_gpio_platform_data dm646x_gpio_platform_data = { | ||
765 | .ngpio = 43, | ||
766 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, | ||
767 | }; | ||
768 | |||
769 | int __init dm646x_gpio_register(void) | ||
770 | { | ||
771 | return davinci_gpio_register(dm646x_gpio_resources, | ||
772 | sizeof(dm646x_gpio_resources), | ||
773 | &dm646x_gpio_platform_data); | ||
774 | } | ||
751 | /*----------------------------------------------------------------------*/ | 775 | /*----------------------------------------------------------------------*/ |
752 | 776 | ||
753 | static struct map_desc dm646x_io_desc[] = { | 777 | static struct map_desc dm646x_io_desc[] = { |
@@ -874,10 +898,6 @@ static struct davinci_soc_info davinci_soc_info_dm646x = { | |||
874 | .intc_irq_prios = dm646x_default_priorities, | 898 | .intc_irq_prios = dm646x_default_priorities, |
875 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, | 899 | .intc_irq_num = DAVINCI_N_AINTC_IRQ, |
876 | .timer_info = &dm646x_timer_info, | 900 | .timer_info = &dm646x_timer_info, |
877 | .gpio_type = GPIO_TYPE_DAVINCI, | ||
878 | .gpio_base = DAVINCI_GPIO_BASE, | ||
879 | .gpio_num = 43, /* Only 33 usable */ | ||
880 | .gpio_irq = IRQ_DM646X_GPIOBNK0, | ||
881 | .emac_pdata = &dm646x_emac_pdata, | 901 | .emac_pdata = &dm646x_emac_pdata, |
882 | .sram_dma = 0x10010000, | 902 | .sram_dma = 0x10010000, |
883 | .sram_len = SZ_32K, | 903 | .sram_len = SZ_32K, |
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h index aae53072c0eb..39e58b48e826 100644 --- a/arch/arm/mach-davinci/include/mach/da8xx.h +++ b/arch/arm/mach-davinci/include/mach/da8xx.h | |||
@@ -97,6 +97,7 @@ int da8xx_register_mmcsd0(struct davinci_mmc_config *config); | |||
97 | int da850_register_mmcsd1(struct davinci_mmc_config *config); | 97 | int da850_register_mmcsd1(struct davinci_mmc_config *config); |
98 | void da8xx_register_mcasp(int id, struct snd_platform_data *pdata); | 98 | void da8xx_register_mcasp(int id, struct snd_platform_data *pdata); |
99 | int da8xx_register_rtc(void); | 99 | int da8xx_register_rtc(void); |
100 | int da8xx_register_gpio(void *pdata); | ||
100 | int da850_register_cpufreq(char *async_clk); | 101 | int da850_register_cpufreq(char *async_clk); |
101 | int da8xx_register_cpuidle(void); | 102 | int da8xx_register_cpuidle(void); |
102 | void __iomem *da8xx_get_mem_ctlr(void); | 103 | void __iomem *da8xx_get_mem_ctlr(void); |
@@ -110,6 +111,8 @@ int da850_register_vpif_capture | |||
110 | void da8xx_restart(enum reboot_mode mode, const char *cmd); | 111 | void da8xx_restart(enum reboot_mode mode, const char *cmd); |
111 | void da8xx_rproc_reserve_cma(void); | 112 | void da8xx_rproc_reserve_cma(void); |
112 | int da8xx_register_rproc(void); | 113 | int da8xx_register_rproc(void); |
114 | int da850_register_gpio(void); | ||
115 | int da830_register_gpio(void); | ||
113 | 116 | ||
114 | extern struct platform_device da8xx_serial_device[]; | 117 | extern struct platform_device da8xx_serial_device[]; |
115 | extern struct emac_platform_data da8xx_emac_pdata; | 118 | extern struct emac_platform_data da8xx_emac_pdata; |
diff --git a/arch/arm/mach-davinci/include/mach/gpio-davinci.h b/arch/arm/mach-davinci/include/mach/gpio-davinci.h deleted file mode 100644 index 1fdd1fd35448..000000000000 --- a/arch/arm/mach-davinci/include/mach/gpio-davinci.h +++ /dev/null | |||
@@ -1,91 +0,0 @@ | |||
1 | /* | ||
2 | * TI DaVinci GPIO Support | ||
3 | * | ||
4 | * Copyright (c) 2006 David Brownell | ||
5 | * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #ifndef __DAVINCI_DAVINCI_GPIO_H | ||
14 | #define __DAVINCI_DAVINCI_GPIO_H | ||
15 | |||
16 | #include <linux/io.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | |||
19 | #include <asm-generic/gpio.h> | ||
20 | |||
21 | #include <mach/irqs.h> | ||
22 | #include <mach/common.h> | ||
23 | |||
24 | #define DAVINCI_GPIO_BASE 0x01C67000 | ||
25 | |||
26 | enum davinci_gpio_type { | ||
27 | GPIO_TYPE_DAVINCI = 0, | ||
28 | GPIO_TYPE_TNETV107X, | ||
29 | }; | ||
30 | |||
31 | /* | ||
32 | * basic gpio routines | ||
33 | * | ||
34 | * board-specific init should be done by arch/.../.../board-XXX.c (maybe | ||
35 | * initializing banks together) rather than boot loaders; kexec() won't | ||
36 | * go through boot loaders. | ||
37 | * | ||
38 | * the gpio clock will be turned on when gpios are used, and you may also | ||
39 | * need to pay attention to PINMUX registers to be sure those pins are | ||
40 | * used as gpios, not with other peripherals. | ||
41 | * | ||
42 | * On-chip GPIOs are numbered 0..(DAVINCI_N_GPIO-1). For documentation, | ||
43 | * and maybe for later updates, code may write GPIO(N). These may be | ||
44 | * all 1.8V signals, all 3.3V ones, or a mix of the two. A given chip | ||
45 | * may not support all the GPIOs in that range. | ||
46 | * | ||
47 | * GPIOs can also be on external chips, numbered after the ones built-in | ||
48 | * to the DaVinci chip. For now, they won't be usable as IRQ sources. | ||
49 | */ | ||
50 | #define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */ | ||
51 | |||
52 | /* Convert GPIO signal to GPIO pin number */ | ||
53 | #define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio)) | ||
54 | |||
55 | struct davinci_gpio_controller { | ||
56 | struct gpio_chip chip; | ||
57 | int irq_base; | ||
58 | spinlock_t lock; | ||
59 | void __iomem *regs; | ||
60 | void __iomem *set_data; | ||
61 | void __iomem *clr_data; | ||
62 | void __iomem *in_data; | ||
63 | }; | ||
64 | |||
65 | /* The __gpio_to_controller() and __gpio_mask() functions inline to constants | ||
66 | * with constant parameters; or in outlined code they execute at runtime. | ||
67 | * | ||
68 | * You'd access the controller directly when reading or writing more than | ||
69 | * one gpio value at a time, and to support wired logic where the value | ||
70 | * being driven by the cpu need not match the value read back. | ||
71 | * | ||
72 | * These are NOT part of the cross-platform GPIO interface | ||
73 | */ | ||
74 | static inline struct davinci_gpio_controller * | ||
75 | __gpio_to_controller(unsigned gpio) | ||
76 | { | ||
77 | struct davinci_gpio_controller *ctlrs = davinci_soc_info.gpio_ctlrs; | ||
78 | int index = gpio / 32; | ||
79 | |||
80 | if (!ctlrs || index >= davinci_soc_info.gpio_ctlrs_num) | ||
81 | return NULL; | ||
82 | |||
83 | return ctlrs + index; | ||
84 | } | ||
85 | |||
86 | static inline u32 __gpio_mask(unsigned gpio) | ||
87 | { | ||
88 | return 1 << (gpio % 32); | ||
89 | } | ||
90 | |||
91 | #endif /* __DAVINCI_DAVINCI_GPIO_H */ | ||
diff --git a/arch/arm/mach-davinci/include/mach/gpio.h b/arch/arm/mach-davinci/include/mach/gpio.h deleted file mode 100644 index 960e9de47e1e..000000000000 --- a/arch/arm/mach-davinci/include/mach/gpio.h +++ /dev/null | |||
@@ -1,88 +0,0 @@ | |||
1 | /* | ||
2 | * TI DaVinci GPIO Support | ||
3 | * | ||
4 | * Copyright (c) 2006 David Brownell | ||
5 | * Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #ifndef __DAVINCI_GPIO_H | ||
14 | #define __DAVINCI_GPIO_H | ||
15 | |||
16 | #include <asm-generic/gpio.h> | ||
17 | |||
18 | #define __ARM_GPIOLIB_COMPLEX | ||
19 | |||
20 | /* The inline versions use the static inlines in the driver header */ | ||
21 | #include "gpio-davinci.h" | ||
22 | |||
23 | /* | ||
24 | * The get/set/clear functions will inline when called with constant | ||
25 | * parameters referencing built-in GPIOs, for low-overhead bitbanging. | ||
26 | * | ||
27 | * gpio_set_value() will inline only on traditional Davinci style controllers | ||
28 | * with distinct set/clear registers. | ||
29 | * | ||
30 | * Otherwise, calls with variable parameters or referencing external | ||
31 | * GPIOs (e.g. on GPIO expander chips) use outlined functions. | ||
32 | */ | ||
33 | static inline void gpio_set_value(unsigned gpio, int value) | ||
34 | { | ||
35 | if (__builtin_constant_p(value) && gpio < davinci_soc_info.gpio_num) { | ||
36 | struct davinci_gpio_controller *ctlr; | ||
37 | u32 mask; | ||
38 | |||
39 | ctlr = __gpio_to_controller(gpio); | ||
40 | |||
41 | if (ctlr->set_data != ctlr->clr_data) { | ||
42 | mask = __gpio_mask(gpio); | ||
43 | if (value) | ||
44 | __raw_writel(mask, ctlr->set_data); | ||
45 | else | ||
46 | __raw_writel(mask, ctlr->clr_data); | ||
47 | return; | ||
48 | } | ||
49 | } | ||
50 | |||
51 | __gpio_set_value(gpio, value); | ||
52 | } | ||
53 | |||
54 | /* Returns zero or nonzero; works for gpios configured as inputs OR | ||
55 | * as outputs, at least for built-in GPIOs. | ||
56 | * | ||
57 | * NOTE: for built-in GPIOs, changes in reported values are synchronized | ||
58 | * to the GPIO clock. This is easily seen after calling gpio_set_value() | ||
59 | * and then immediately gpio_get_value(), where the gpio_get_value() will | ||
60 | * return the old value until the GPIO clock ticks and the new value gets | ||
61 | * latched. | ||
62 | */ | ||
63 | static inline int gpio_get_value(unsigned gpio) | ||
64 | { | ||
65 | struct davinci_gpio_controller *ctlr; | ||
66 | |||
67 | if (!__builtin_constant_p(gpio) || gpio >= davinci_soc_info.gpio_num) | ||
68 | return __gpio_get_value(gpio); | ||
69 | |||
70 | ctlr = __gpio_to_controller(gpio); | ||
71 | return __gpio_mask(gpio) & __raw_readl(ctlr->in_data); | ||
72 | } | ||
73 | |||
74 | static inline int gpio_cansleep(unsigned gpio) | ||
75 | { | ||
76 | if (__builtin_constant_p(gpio) && gpio < davinci_soc_info.gpio_num) | ||
77 | return 0; | ||
78 | else | ||
79 | return __gpio_cansleep(gpio); | ||
80 | } | ||
81 | |||
82 | static inline int irq_to_gpio(unsigned irq) | ||
83 | { | ||
84 | /* don't support the reverse mapping */ | ||
85 | return -ENOSYS; | ||
86 | } | ||
87 | |||
88 | #endif /* __DAVINCI_GPIO_H */ | ||
diff --git a/arch/arm/mach-dove/board-dt.c b/arch/arm/mach-dove/board-dt.c index ddb86631f16a..49fa9abd09da 100644 --- a/arch/arm/mach-dove/board-dt.c +++ b/arch/arm/mach-dove/board-dt.c | |||
@@ -19,35 +19,6 @@ | |||
19 | #include <plat/common.h> | 19 | #include <plat/common.h> |
20 | #include "common.h" | 20 | #include "common.h" |
21 | 21 | ||
22 | /* | ||
23 | * There are still devices that doesn't even know about DT, | ||
24 | * get clock gates here and add a clock lookup. | ||
25 | */ | ||
26 | static void __init dove_legacy_clk_init(void) | ||
27 | { | ||
28 | struct device_node *np = of_find_compatible_node(NULL, NULL, | ||
29 | "marvell,dove-gating-clock"); | ||
30 | struct of_phandle_args clkspec; | ||
31 | |||
32 | clkspec.np = np; | ||
33 | clkspec.args_count = 1; | ||
34 | |||
35 | clkspec.args[0] = CLOCK_GATING_BIT_PCIE0; | ||
36 | orion_clkdev_add("0", "pcie", | ||
37 | of_clk_get_from_provider(&clkspec)); | ||
38 | |||
39 | clkspec.args[0] = CLOCK_GATING_BIT_PCIE1; | ||
40 | orion_clkdev_add("1", "pcie", | ||
41 | of_clk_get_from_provider(&clkspec)); | ||
42 | } | ||
43 | |||
44 | static void __init dove_dt_init_early(void) | ||
45 | { | ||
46 | mvebu_mbus_init("marvell,dove-mbus", | ||
47 | BRIDGE_WINS_BASE, BRIDGE_WINS_SZ, | ||
48 | DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ); | ||
49 | } | ||
50 | |||
51 | static void __init dove_dt_init(void) | 22 | static void __init dove_dt_init(void) |
52 | { | 23 | { |
53 | pr_info("Dove 88AP510 SoC\n"); | 24 | pr_info("Dove 88AP510 SoC\n"); |
@@ -55,14 +26,7 @@ static void __init dove_dt_init(void) | |||
55 | #ifdef CONFIG_CACHE_TAUROS2 | 26 | #ifdef CONFIG_CACHE_TAUROS2 |
56 | tauros2_init(0); | 27 | tauros2_init(0); |
57 | #endif | 28 | #endif |
58 | dove_setup_cpu_wins(); | 29 | BUG_ON(mvebu_mbus_dt_init()); |
59 | |||
60 | /* Setup clocks for legacy devices */ | ||
61 | dove_legacy_clk_init(); | ||
62 | |||
63 | /* Internal devices not ported to DT yet */ | ||
64 | dove_pcie_init(1, 1); | ||
65 | |||
66 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 30 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
67 | } | 31 | } |
68 | 32 | ||
@@ -73,7 +37,6 @@ static const char * const dove_dt_board_compat[] = { | |||
73 | 37 | ||
74 | DT_MACHINE_START(DOVE_DT, "Marvell Dove (Flattened Device Tree)") | 38 | DT_MACHINE_START(DOVE_DT, "Marvell Dove (Flattened Device Tree)") |
75 | .map_io = dove_map_io, | 39 | .map_io = dove_map_io, |
76 | .init_early = dove_dt_init_early, | ||
77 | .init_machine = dove_dt_init, | 40 | .init_machine = dove_dt_init, |
78 | .restart = dove_restart, | 41 | .restart = dove_restart, |
79 | .dt_compat = dove_dt_board_compat, | 42 | .dt_compat = dove_dt_board_compat, |
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index dba2173e70f3..8f1d327e0cd1 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig | |||
@@ -28,6 +28,7 @@ config CPU_S3C2410 | |||
28 | select CPU_ARM920T | 28 | select CPU_ARM920T |
29 | select CPU_LLSERIAL_S3C2410 | 29 | select CPU_LLSERIAL_S3C2410 |
30 | select S3C2410_CLOCK | 30 | select S3C2410_CLOCK |
31 | select S3C2410_DMA if S3C24XX_DMA | ||
31 | select ARM_S3C2410_CPUFREQ if ARM_S3C24XX_CPUFREQ | 32 | select ARM_S3C2410_CPUFREQ if ARM_S3C24XX_CPUFREQ |
32 | select S3C2410_PM if PM | 33 | select S3C2410_PM if PM |
33 | select SAMSUNG_WDT_RESET | 34 | select SAMSUNG_WDT_RESET |
@@ -70,6 +71,7 @@ config CPU_S3C2442 | |||
70 | select CPU_ARM920T | 71 | select CPU_ARM920T |
71 | select CPU_LLSERIAL_S3C2440 | 72 | select CPU_LLSERIAL_S3C2440 |
72 | select S3C2410_CLOCK | 73 | select S3C2410_CLOCK |
74 | select S3C2410_DMA if S3C24XX_DMA | ||
73 | select S3C2410_PM if PM | 75 | select S3C2410_PM if PM |
74 | help | 76 | help |
75 | Support for S3C2442 Samsung Mobile CPU based systems. | 77 | Support for S3C2442 Samsung Mobile CPU based systems. |
@@ -148,7 +150,6 @@ config S3C2410_DMA_DEBUG | |||
148 | config S3C2410_DMA | 150 | config S3C2410_DMA |
149 | bool | 151 | bool |
150 | depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442) | 152 | depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442) |
151 | default y if CPU_S3C2410 || CPU_S3C2442 | ||
152 | help | 153 | help |
153 | DMA device selection for S3C2410 and compatible CPUs | 154 | DMA device selection for S3C2410 and compatible CPUs |
154 | 155 | ||
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2412.c b/arch/arm/mach-s3c24xx/clock-s3c2412.c index d8f253f2b486..11b3b28457bb 100644 --- a/arch/arm/mach-s3c24xx/clock-s3c2412.c +++ b/arch/arm/mach-s3c24xx/clock-s3c2412.c | |||
@@ -484,22 +484,22 @@ static struct clk init_clocks_disable[] = { | |||
484 | 484 | ||
485 | static struct clk init_clocks[] = { | 485 | static struct clk init_clocks[] = { |
486 | { | 486 | { |
487 | .name = "dma", | 487 | .name = "dma.0", |
488 | .parent = &clk_h, | 488 | .parent = &clk_h, |
489 | .enable = s3c2412_clkcon_enable, | 489 | .enable = s3c2412_clkcon_enable, |
490 | .ctrlbit = S3C2412_CLKCON_DMA0, | 490 | .ctrlbit = S3C2412_CLKCON_DMA0, |
491 | }, { | 491 | }, { |
492 | .name = "dma", | 492 | .name = "dma.1", |
493 | .parent = &clk_h, | 493 | .parent = &clk_h, |
494 | .enable = s3c2412_clkcon_enable, | 494 | .enable = s3c2412_clkcon_enable, |
495 | .ctrlbit = S3C2412_CLKCON_DMA1, | 495 | .ctrlbit = S3C2412_CLKCON_DMA1, |
496 | }, { | 496 | }, { |
497 | .name = "dma", | 497 | .name = "dma.2", |
498 | .parent = &clk_h, | 498 | .parent = &clk_h, |
499 | .enable = s3c2412_clkcon_enable, | 499 | .enable = s3c2412_clkcon_enable, |
500 | .ctrlbit = S3C2412_CLKCON_DMA2, | 500 | .ctrlbit = S3C2412_CLKCON_DMA2, |
501 | }, { | 501 | }, { |
502 | .name = "dma", | 502 | .name = "dma.3", |
503 | .parent = &clk_h, | 503 | .parent = &clk_h, |
504 | .enable = s3c2412_clkcon_enable, | 504 | .enable = s3c2412_clkcon_enable, |
505 | .ctrlbit = S3C2412_CLKCON_DMA3, | 505 | .ctrlbit = S3C2412_CLKCON_DMA3, |
diff --git a/arch/arm/mach-s3c24xx/common-s3c2443.c b/arch/arm/mach-s3c24xx/common-s3c2443.c index f6b9f2ef01bd..65d3eef73090 100644 --- a/arch/arm/mach-s3c24xx/common-s3c2443.c +++ b/arch/arm/mach-s3c24xx/common-s3c2443.c | |||
@@ -438,32 +438,32 @@ static struct clk init_clocks_off[] = { | |||
438 | 438 | ||
439 | static struct clk init_clocks[] = { | 439 | static struct clk init_clocks[] = { |
440 | { | 440 | { |
441 | .name = "dma", | 441 | .name = "dma.0", |
442 | .parent = &clk_h, | 442 | .parent = &clk_h, |
443 | .enable = s3c2443_clkcon_enable_h, | 443 | .enable = s3c2443_clkcon_enable_h, |
444 | .ctrlbit = S3C2443_HCLKCON_DMA0, | 444 | .ctrlbit = S3C2443_HCLKCON_DMA0, |
445 | }, { | 445 | }, { |
446 | .name = "dma", | 446 | .name = "dma.1", |
447 | .parent = &clk_h, | 447 | .parent = &clk_h, |
448 | .enable = s3c2443_clkcon_enable_h, | 448 | .enable = s3c2443_clkcon_enable_h, |
449 | .ctrlbit = S3C2443_HCLKCON_DMA1, | 449 | .ctrlbit = S3C2443_HCLKCON_DMA1, |
450 | }, { | 450 | }, { |
451 | .name = "dma", | 451 | .name = "dma.2", |
452 | .parent = &clk_h, | 452 | .parent = &clk_h, |
453 | .enable = s3c2443_clkcon_enable_h, | 453 | .enable = s3c2443_clkcon_enable_h, |
454 | .ctrlbit = S3C2443_HCLKCON_DMA2, | 454 | .ctrlbit = S3C2443_HCLKCON_DMA2, |
455 | }, { | 455 | }, { |
456 | .name = "dma", | 456 | .name = "dma.3", |
457 | .parent = &clk_h, | 457 | .parent = &clk_h, |
458 | .enable = s3c2443_clkcon_enable_h, | 458 | .enable = s3c2443_clkcon_enable_h, |
459 | .ctrlbit = S3C2443_HCLKCON_DMA3, | 459 | .ctrlbit = S3C2443_HCLKCON_DMA3, |
460 | }, { | 460 | }, { |
461 | .name = "dma", | 461 | .name = "dma.4", |
462 | .parent = &clk_h, | 462 | .parent = &clk_h, |
463 | .enable = s3c2443_clkcon_enable_h, | 463 | .enable = s3c2443_clkcon_enable_h, |
464 | .ctrlbit = S3C2443_HCLKCON_DMA4, | 464 | .ctrlbit = S3C2443_HCLKCON_DMA4, |
465 | }, { | 465 | }, { |
466 | .name = "dma", | 466 | .name = "dma.5", |
467 | .parent = &clk_h, | 467 | .parent = &clk_h, |
468 | .enable = s3c2443_clkcon_enable_h, | 468 | .enable = s3c2443_clkcon_enable_h, |
469 | .ctrlbit = S3C2443_HCLKCON_DMA5, | 469 | .ctrlbit = S3C2443_HCLKCON_DMA5, |
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c index 457261c98433..4adaa4b43ffe 100644 --- a/arch/arm/mach-s3c24xx/common.c +++ b/arch/arm/mach-s3c24xx/common.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/platform_device.h> | 31 | #include <linux/platform_device.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/io.h> | 33 | #include <linux/io.h> |
34 | #include <linux/platform_data/dma-s3c24xx.h> | ||
34 | 35 | ||
35 | #include <mach/hardware.h> | 36 | #include <mach/hardware.h> |
36 | #include <mach/regs-clock.h> | 37 | #include <mach/regs-clock.h> |
@@ -44,6 +45,7 @@ | |||
44 | 45 | ||
45 | #include <mach/regs-gpio.h> | 46 | #include <mach/regs-gpio.h> |
46 | #include <plat/regs-serial.h> | 47 | #include <plat/regs-serial.h> |
48 | #include <mach/dma.h> | ||
47 | 49 | ||
48 | #include <plat/cpu.h> | 50 | #include <plat/cpu.h> |
49 | #include <plat/devs.h> | 51 | #include <plat/devs.h> |
@@ -329,3 +331,207 @@ void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk, | |||
329 | clk_p.rate = pclk; | 331 | clk_p.rate = pclk; |
330 | clk_f.rate = fclk; | 332 | clk_f.rate = fclk; |
331 | } | 333 | } |
334 | |||
335 | #if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2412) || \ | ||
336 | defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2442) | ||
337 | static struct resource s3c2410_dma_resource[] = { | ||
338 | [0] = DEFINE_RES_MEM(S3C24XX_PA_DMA, S3C24XX_SZ_DMA), | ||
339 | [1] = DEFINE_RES_IRQ(IRQ_DMA0), | ||
340 | [2] = DEFINE_RES_IRQ(IRQ_DMA1), | ||
341 | [3] = DEFINE_RES_IRQ(IRQ_DMA2), | ||
342 | [4] = DEFINE_RES_IRQ(IRQ_DMA3), | ||
343 | }; | ||
344 | #endif | ||
345 | |||
346 | #if defined(CONFIG_CPU_S3C2410) || defined(CONFIG_CPU_S3C2442) | ||
347 | static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = { | ||
348 | [DMACH_XD0] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 0), }, | ||
349 | [DMACH_XD1] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 1), }, | ||
350 | [DMACH_SDI] = { S3C24XX_DMA_APB, false, S3C24XX_DMA_CHANREQ(2, 0) | | ||
351 | S3C24XX_DMA_CHANREQ(2, 2) | | ||
352 | S3C24XX_DMA_CHANREQ(1, 3), | ||
353 | }, | ||
354 | [DMACH_SPI0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 1), }, | ||
355 | [DMACH_SPI1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 3), }, | ||
356 | [DMACH_UART0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 0), }, | ||
357 | [DMACH_UART1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 1), }, | ||
358 | [DMACH_UART2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 3), }, | ||
359 | [DMACH_TIMER] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 0) | | ||
360 | S3C24XX_DMA_CHANREQ(3, 2) | | ||
361 | S3C24XX_DMA_CHANREQ(3, 3), | ||
362 | }, | ||
363 | [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 1) | | ||
364 | S3C24XX_DMA_CHANREQ(1, 2), | ||
365 | }, | ||
366 | [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 2), }, | ||
367 | [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 0), }, | ||
368 | [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 1), }, | ||
369 | [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 2), }, | ||
370 | [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), }, | ||
371 | }; | ||
372 | |||
373 | static struct s3c24xx_dma_platdata s3c2410_dma_platdata = { | ||
374 | .num_phy_channels = 4, | ||
375 | .channels = s3c2410_dma_channels, | ||
376 | .num_channels = DMACH_MAX, | ||
377 | }; | ||
378 | |||
379 | struct platform_device s3c2410_device_dma = { | ||
380 | .name = "s3c2410-dma", | ||
381 | .id = 0, | ||
382 | .num_resources = ARRAY_SIZE(s3c2410_dma_resource), | ||
383 | .resource = s3c2410_dma_resource, | ||
384 | .dev = { | ||
385 | .platform_data = &s3c2410_dma_platdata, | ||
386 | }, | ||
387 | }; | ||
388 | #endif | ||
389 | |||
390 | #ifdef CONFIG_CPU_S3C2412 | ||
391 | static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = { | ||
392 | [DMACH_XD0] = { S3C24XX_DMA_AHB, true, 17 }, | ||
393 | [DMACH_XD1] = { S3C24XX_DMA_AHB, true, 18 }, | ||
394 | [DMACH_SDI] = { S3C24XX_DMA_APB, false, 10 }, | ||
395 | [DMACH_SPI0_RX] = { S3C24XX_DMA_APB, true, 1 }, | ||
396 | [DMACH_SPI0_TX] = { S3C24XX_DMA_APB, true, 0 }, | ||
397 | [DMACH_SPI1_RX] = { S3C24XX_DMA_APB, true, 3 }, | ||
398 | [DMACH_SPI1_TX] = { S3C24XX_DMA_APB, true, 2 }, | ||
399 | [DMACH_UART0] = { S3C24XX_DMA_APB, true, 19 }, | ||
400 | [DMACH_UART1] = { S3C24XX_DMA_APB, true, 21 }, | ||
401 | [DMACH_UART2] = { S3C24XX_DMA_APB, true, 23 }, | ||
402 | [DMACH_UART0_SRC2] = { S3C24XX_DMA_APB, true, 20 }, | ||
403 | [DMACH_UART1_SRC2] = { S3C24XX_DMA_APB, true, 22 }, | ||
404 | [DMACH_UART2_SRC2] = { S3C24XX_DMA_APB, true, 24 }, | ||
405 | [DMACH_TIMER] = { S3C24XX_DMA_APB, true, 9 }, | ||
406 | [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, 5 }, | ||
407 | [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, 4 }, | ||
408 | [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, 13 }, | ||
409 | [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, 14 }, | ||
410 | [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, 15 }, | ||
411 | [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 }, | ||
412 | }; | ||
413 | |||
414 | static struct s3c24xx_dma_platdata s3c2412_dma_platdata = { | ||
415 | .num_phy_channels = 4, | ||
416 | .channels = s3c2412_dma_channels, | ||
417 | .num_channels = DMACH_MAX, | ||
418 | }; | ||
419 | |||
420 | struct platform_device s3c2412_device_dma = { | ||
421 | .name = "s3c2412-dma", | ||
422 | .id = 0, | ||
423 | .num_resources = ARRAY_SIZE(s3c2410_dma_resource), | ||
424 | .resource = s3c2410_dma_resource, | ||
425 | .dev = { | ||
426 | .platform_data = &s3c2412_dma_platdata, | ||
427 | }, | ||
428 | }; | ||
429 | #endif | ||
430 | |||
431 | #if defined(CONFIG_CPU_S3C2440) | ||
432 | static struct s3c24xx_dma_channel s3c2440_dma_channels[DMACH_MAX] = { | ||
433 | [DMACH_XD0] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 0), }, | ||
434 | [DMACH_XD1] = { S3C24XX_DMA_AHB, true, S3C24XX_DMA_CHANREQ(0, 1), }, | ||
435 | [DMACH_SDI] = { S3C24XX_DMA_APB, false, S3C24XX_DMA_CHANREQ(2, 0) | | ||
436 | S3C24XX_DMA_CHANREQ(6, 1) | | ||
437 | S3C24XX_DMA_CHANREQ(2, 2) | | ||
438 | S3C24XX_DMA_CHANREQ(1, 3), | ||
439 | }, | ||
440 | [DMACH_SPI0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 1), }, | ||
441 | [DMACH_SPI1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 3), }, | ||
442 | [DMACH_UART0] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 0), }, | ||
443 | [DMACH_UART1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(1, 1), }, | ||
444 | [DMACH_UART2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(0, 3), }, | ||
445 | [DMACH_TIMER] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(3, 0) | | ||
446 | S3C24XX_DMA_CHANREQ(3, 2) | | ||
447 | S3C24XX_DMA_CHANREQ(3, 3), | ||
448 | }, | ||
449 | [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(2, 1) | | ||
450 | S3C24XX_DMA_CHANREQ(1, 2), | ||
451 | }, | ||
452 | [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(5, 0) | | ||
453 | S3C24XX_DMA_CHANREQ(0, 2), | ||
454 | }, | ||
455 | [DMACH_PCM_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(6, 0) | | ||
456 | S3C24XX_DMA_CHANREQ(5, 2), | ||
457 | }, | ||
458 | [DMACH_PCM_OUT] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(5, 1) | | ||
459 | S3C24XX_DMA_CHANREQ(6, 3), | ||
460 | }, | ||
461 | [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(6, 2) | | ||
462 | S3C24XX_DMA_CHANREQ(5, 3), | ||
463 | }, | ||
464 | [DMACH_USB_EP1] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 0), }, | ||
465 | [DMACH_USB_EP2] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 1), }, | ||
466 | [DMACH_USB_EP3] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 2), }, | ||
467 | [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), }, | ||
468 | }; | ||
469 | |||
470 | static struct s3c24xx_dma_platdata s3c2440_dma_platdata = { | ||
471 | .num_phy_channels = 4, | ||
472 | .channels = s3c2440_dma_channels, | ||
473 | .num_channels = DMACH_MAX, | ||
474 | }; | ||
475 | |||
476 | struct platform_device s3c2440_device_dma = { | ||
477 | .name = "s3c2410-dma", | ||
478 | .id = 0, | ||
479 | .num_resources = ARRAY_SIZE(s3c2410_dma_resource), | ||
480 | .resource = s3c2410_dma_resource, | ||
481 | .dev = { | ||
482 | .platform_data = &s3c2440_dma_platdata, | ||
483 | }, | ||
484 | }; | ||
485 | #endif | ||
486 | |||
487 | #if defined(CONFIG_CPUS_3C2443) || defined(CONFIG_CPU_S3C2416) | ||
488 | static struct resource s3c2443_dma_resource[] = { | ||
489 | [0] = DEFINE_RES_MEM(S3C24XX_PA_DMA, S3C24XX_SZ_DMA), | ||
490 | [1] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA0), | ||
491 | [2] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA1), | ||
492 | [3] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA2), | ||
493 | [4] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA3), | ||
494 | [5] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA4), | ||
495 | [6] = DEFINE_RES_IRQ(IRQ_S3C2443_DMA5), | ||
496 | }; | ||
497 | |||
498 | static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = { | ||
499 | [DMACH_XD0] = { S3C24XX_DMA_AHB, true, 17 }, | ||
500 | [DMACH_XD1] = { S3C24XX_DMA_AHB, true, 18 }, | ||
501 | [DMACH_SDI] = { S3C24XX_DMA_APB, false, 10 }, | ||
502 | [DMACH_SPI0_RX] = { S3C24XX_DMA_APB, true, 1 }, | ||
503 | [DMACH_SPI0_TX] = { S3C24XX_DMA_APB, true, 0 }, | ||
504 | [DMACH_SPI1_RX] = { S3C24XX_DMA_APB, true, 3 }, | ||
505 | [DMACH_SPI1_TX] = { S3C24XX_DMA_APB, true, 2 }, | ||
506 | [DMACH_UART0] = { S3C24XX_DMA_APB, true, 19 }, | ||
507 | [DMACH_UART1] = { S3C24XX_DMA_APB, true, 21 }, | ||
508 | [DMACH_UART2] = { S3C24XX_DMA_APB, true, 23 }, | ||
509 | [DMACH_UART3] = { S3C24XX_DMA_APB, true, 25 }, | ||
510 | [DMACH_UART0_SRC2] = { S3C24XX_DMA_APB, true, 20 }, | ||
511 | [DMACH_UART1_SRC2] = { S3C24XX_DMA_APB, true, 22 }, | ||
512 | [DMACH_UART2_SRC2] = { S3C24XX_DMA_APB, true, 24 }, | ||
513 | [DMACH_UART3_SRC2] = { S3C24XX_DMA_APB, true, 26 }, | ||
514 | [DMACH_TIMER] = { S3C24XX_DMA_APB, true, 9 }, | ||
515 | [DMACH_I2S_IN] = { S3C24XX_DMA_APB, true, 5 }, | ||
516 | [DMACH_I2S_OUT] = { S3C24XX_DMA_APB, true, 4 }, | ||
517 | [DMACH_PCM_IN] = { S3C24XX_DMA_APB, true, 28 }, | ||
518 | [DMACH_PCM_OUT] = { S3C24XX_DMA_APB, true, 27 }, | ||
519 | [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 }, | ||
520 | }; | ||
521 | |||
522 | static struct s3c24xx_dma_platdata s3c2443_dma_platdata = { | ||
523 | .num_phy_channels = 6, | ||
524 | .channels = s3c2443_dma_channels, | ||
525 | .num_channels = DMACH_MAX, | ||
526 | }; | ||
527 | |||
528 | struct platform_device s3c2443_device_dma = { | ||
529 | .name = "s3c2443-dma", | ||
530 | .id = 0, | ||
531 | .num_resources = ARRAY_SIZE(s3c2443_dma_resource), | ||
532 | .resource = s3c2443_dma_resource, | ||
533 | .dev = { | ||
534 | .platform_data = &s3c2443_dma_platdata, | ||
535 | }, | ||
536 | }; | ||
537 | #endif | ||
diff --git a/arch/arm/mach-s3c24xx/common.h b/arch/arm/mach-s3c24xx/common.h index 84b280654f4c..e46c10417216 100644 --- a/arch/arm/mach-s3c24xx/common.h +++ b/arch/arm/mach-s3c24xx/common.h | |||
@@ -109,4 +109,9 @@ extern void s3c2443_init_irq(void); | |||
109 | 109 | ||
110 | extern struct syscore_ops s3c24xx_irq_syscore_ops; | 110 | extern struct syscore_ops s3c24xx_irq_syscore_ops; |
111 | 111 | ||
112 | extern struct platform_device s3c2410_device_dma; | ||
113 | extern struct platform_device s3c2412_device_dma; | ||
114 | extern struct platform_device s3c2440_device_dma; | ||
115 | extern struct platform_device s3c2443_device_dma; | ||
116 | |||
112 | #endif /* __ARCH_ARM_MACH_S3C24XX_COMMON_H */ | 117 | #endif /* __ARCH_ARM_MACH_S3C24XX_COMMON_H */ |
diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c index a45fcd8ccf79..43c23e220f5b 100644 --- a/arch/arm/mach-s3c24xx/mach-jive.c +++ b/arch/arm/mach-s3c24xx/mach-jive.c | |||
@@ -466,6 +466,7 @@ static struct platform_device *jive_devices[] __initdata = { | |||
466 | &jive_device_wm8750, | 466 | &jive_device_wm8750, |
467 | &s3c_device_nand, | 467 | &s3c_device_nand, |
468 | &s3c_device_usbgadget, | 468 | &s3c_device_usbgadget, |
469 | &s3c2412_device_dma, | ||
469 | }; | 470 | }; |
470 | 471 | ||
471 | static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = { | 472 | static struct s3c2410_udc_mach_info jive_udc_cfg __initdata = { |
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2413.c b/arch/arm/mach-s3c24xx/mach-smdk2413.c index 8146e920f10d..c9d31ef28dd1 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2413.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2413.c | |||
@@ -89,6 +89,7 @@ static struct platform_device *smdk2413_devices[] __initdata = { | |||
89 | &s3c_device_i2c0, | 89 | &s3c_device_i2c0, |
90 | &s3c_device_iis, | 90 | &s3c_device_iis, |
91 | &s3c_device_usbgadget, | 91 | &s3c_device_usbgadget, |
92 | &s3c2412_device_dma, | ||
92 | }; | 93 | }; |
93 | 94 | ||
94 | static void __init smdk2413_fixup(struct tag *tags, char **cmdline, | 95 | static void __init smdk2413_fixup(struct tag *tags, char **cmdline, |
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2416.c b/arch/arm/mach-s3c24xx/mach-smdk2416.c index cb46847c66b4..f88e672ad1e4 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2416.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2416.c | |||
@@ -215,6 +215,7 @@ static struct platform_device *smdk2416_devices[] __initdata = { | |||
215 | &s3c_device_hsmmc0, | 215 | &s3c_device_hsmmc0, |
216 | &s3c_device_hsmmc1, | 216 | &s3c_device_hsmmc1, |
217 | &s3c_device_usb_hsudc, | 217 | &s3c_device_usb_hsudc, |
218 | &s3c2443_device_dma, | ||
218 | }; | 219 | }; |
219 | 220 | ||
220 | static void __init smdk2416_map_io(void) | 221 | static void __init smdk2416_map_io(void) |
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2443.c b/arch/arm/mach-s3c24xx/mach-smdk2443.c index 9435c3bef18a..d9933fcc6cc8 100644 --- a/arch/arm/mach-s3c24xx/mach-smdk2443.c +++ b/arch/arm/mach-s3c24xx/mach-smdk2443.c | |||
@@ -115,6 +115,7 @@ static struct platform_device *smdk2443_devices[] __initdata = { | |||
115 | #ifdef CONFIG_SND_SOC_SMDK2443_WM9710 | 115 | #ifdef CONFIG_SND_SOC_SMDK2443_WM9710 |
116 | &s3c_device_ac97, | 116 | &s3c_device_ac97, |
117 | #endif | 117 | #endif |
118 | &s3c2443_device_dma, | ||
118 | }; | 119 | }; |
119 | 120 | ||
120 | static void __init smdk2443_map_io(void) | 121 | static void __init smdk2443_map_io(void) |
diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c index b66588428ec9..f7ec9c550787 100644 --- a/arch/arm/mach-s3c24xx/mach-vstms.c +++ b/arch/arm/mach-s3c24xx/mach-vstms.c | |||
@@ -126,6 +126,7 @@ static struct platform_device *vstms_devices[] __initdata = { | |||
126 | &s3c_device_iis, | 126 | &s3c_device_iis, |
127 | &s3c_device_rtc, | 127 | &s3c_device_rtc, |
128 | &s3c_device_nand, | 128 | &s3c_device_nand, |
129 | &s3c2412_device_dma, | ||
129 | }; | 130 | }; |
130 | 131 | ||
131 | static void __init vstms_fixup(struct tag *tags, char **cmdline, | 132 | static void __init vstms_fixup(struct tag *tags, char **cmdline, |
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index 25f40c9b7f62..99a3590f0349 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/ioport.h> | 32 | #include <linux/ioport.h> |
33 | #include <linux/platform_data/s3c-hsudc.h> | 33 | #include <linux/platform_data/s3c-hsudc.h> |
34 | #include <linux/platform_data/s3c-hsotg.h> | 34 | #include <linux/platform_data/s3c-hsotg.h> |
35 | #include <linux/platform_data/dma-s3c24xx.h> | ||
35 | 36 | ||
36 | #include <media/s5p_hdmi.h> | 37 | #include <media/s5p_hdmi.h> |
37 | 38 | ||
@@ -1465,8 +1466,10 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, | |||
1465 | pd.num_cs = num_cs; | 1466 | pd.num_cs = num_cs; |
1466 | pd.src_clk_nr = src_clk_nr; | 1467 | pd.src_clk_nr = src_clk_nr; |
1467 | pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; | 1468 | pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio; |
1468 | #ifdef CONFIG_PL330_DMA | 1469 | #if defined(CONFIG_PL330_DMA) |
1469 | pd.filter = pl330_filter; | 1470 | pd.filter = pl330_filter; |
1471 | #elif defined(CONFIG_S3C24XX_DMAC) | ||
1472 | pd.filter = s3c24xx_dma_filter; | ||
1470 | #endif | 1473 | #endif |
1471 | 1474 | ||
1472 | s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); | 1475 | s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0); |
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 200926699778..bb5b90e8e768 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -18,11 +18,21 @@ | |||
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/of_address.h> | 20 | #include <linux/of_address.h> |
21 | #include <linux/of_irq.h> | ||
22 | #include <linux/of_platform.h> | ||
23 | #include <linux/platform_device.h> | ||
21 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/spinlock.h> | ||
22 | 26 | ||
23 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
28 | #include <asm/irq_regs.h> | ||
29 | #include <asm/pmu.h> | ||
24 | #include <asm/smp_plat.h> | 30 | #include <asm/smp_plat.h> |
25 | 31 | ||
32 | #define DRIVER_NAME "CCI-400" | ||
33 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" | ||
34 | #define PMU_NAME "CCI_400" | ||
35 | |||
26 | #define CCI_PORT_CTRL 0x0 | 36 | #define CCI_PORT_CTRL 0x0 |
27 | #define CCI_CTRL_STATUS 0xc | 37 | #define CCI_CTRL_STATUS 0xc |
28 | 38 | ||
@@ -54,6 +64,568 @@ static unsigned int nb_cci_ports; | |||
54 | static void __iomem *cci_ctrl_base; | 64 | static void __iomem *cci_ctrl_base; |
55 | static unsigned long cci_ctrl_phys; | 65 | static unsigned long cci_ctrl_phys; |
56 | 66 | ||
67 | #ifdef CONFIG_HW_PERF_EVENTS | ||
68 | |||
69 | #define CCI_PMCR 0x0100 | ||
70 | #define CCI_PID2 0x0fe8 | ||
71 | |||
72 | #define CCI_PMCR_CEN 0x00000001 | ||
73 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | ||
74 | #define CCI_PMCR_NCNT_SHIFT 11 | ||
75 | |||
76 | #define CCI_PID2_REV_MASK 0xf0 | ||
77 | #define CCI_PID2_REV_SHIFT 4 | ||
78 | |||
79 | /* Port ids */ | ||
80 | #define CCI_PORT_S0 0 | ||
81 | #define CCI_PORT_S1 1 | ||
82 | #define CCI_PORT_S2 2 | ||
83 | #define CCI_PORT_S3 3 | ||
84 | #define CCI_PORT_S4 4 | ||
85 | #define CCI_PORT_M0 5 | ||
86 | #define CCI_PORT_M1 6 | ||
87 | #define CCI_PORT_M2 7 | ||
88 | |||
89 | #define CCI_REV_R0 0 | ||
90 | #define CCI_REV_R1 1 | ||
91 | #define CCI_REV_R0_P4 4 | ||
92 | #define CCI_REV_R1_P2 6 | ||
93 | |||
94 | #define CCI_PMU_EVT_SEL 0x000 | ||
95 | #define CCI_PMU_CNTR 0x004 | ||
96 | #define CCI_PMU_CNTR_CTRL 0x008 | ||
97 | #define CCI_PMU_OVRFLW 0x00c | ||
98 | |||
99 | #define CCI_PMU_OVRFLW_FLAG 1 | ||
100 | |||
101 | #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) | ||
102 | |||
103 | /* | ||
104 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | ||
105 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | ||
106 | * make use of this event in hardware. | ||
107 | */ | ||
108 | enum cci400_perf_events { | ||
109 | CCI_PMU_CYCLES = 0xff | ||
110 | }; | ||
111 | |||
112 | #define CCI_PMU_EVENT_MASK 0xff | ||
113 | #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) | ||
114 | #define CCI_PMU_EVENT_CODE(event) (event & 0x1f) | ||
115 | |||
116 | #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ | ||
117 | |||
118 | #define CCI_PMU_CYCLE_CNTR_IDX 0 | ||
119 | #define CCI_PMU_CNTR0_IDX 1 | ||
120 | #define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) | ||
121 | |||
122 | /* | ||
123 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | ||
124 | * ports and bits 4:0 are event codes. There are different event codes | ||
125 | * associated with each port type. | ||
126 | * | ||
127 | * Additionally, the range of events associated with the port types changed | ||
128 | * between Rev0 and Rev1. | ||
129 | * | ||
130 | * The constants below define the range of valid codes for each port type for | ||
131 | * the different revisions and are used to validate the event to be monitored. | ||
132 | */ | ||
133 | |||
134 | #define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00 | ||
135 | #define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13 | ||
136 | #define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14 | ||
137 | #define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a | ||
138 | |||
139 | #define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00 | ||
140 | #define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14 | ||
141 | #define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00 | ||
142 | #define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11 | ||
143 | |||
144 | struct pmu_port_event_ranges { | ||
145 | u8 slave_min; | ||
146 | u8 slave_max; | ||
147 | u8 master_min; | ||
148 | u8 master_max; | ||
149 | }; | ||
150 | |||
151 | static struct pmu_port_event_ranges port_event_range[] = { | ||
152 | [CCI_REV_R0] = { | ||
153 | .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV, | ||
154 | .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV, | ||
155 | .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV, | ||
156 | .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV, | ||
157 | }, | ||
158 | [CCI_REV_R1] = { | ||
159 | .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV, | ||
160 | .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV, | ||
161 | .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV, | ||
162 | .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV, | ||
163 | }, | ||
164 | }; | ||
165 | |||
166 | struct cci_pmu_drv_data { | ||
167 | void __iomem *base; | ||
168 | struct arm_pmu *cci_pmu; | ||
169 | int nr_irqs; | ||
170 | int irqs[CCI_PMU_MAX_HW_EVENTS]; | ||
171 | unsigned long active_irqs; | ||
172 | struct perf_event *events[CCI_PMU_MAX_HW_EVENTS]; | ||
173 | unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)]; | ||
174 | struct pmu_port_event_ranges *port_ranges; | ||
175 | struct pmu_hw_events hw_events; | ||
176 | }; | ||
177 | static struct cci_pmu_drv_data *pmu; | ||
178 | |||
179 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | for (i = 0; i < nr_irqs; i++) | ||
184 | if (irq == irqs[i]) | ||
185 | return true; | ||
186 | |||
187 | return false; | ||
188 | } | ||
189 | |||
190 | static int probe_cci_revision(void) | ||
191 | { | ||
192 | int rev; | ||
193 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; | ||
194 | rev >>= CCI_PID2_REV_SHIFT; | ||
195 | |||
196 | if (rev <= CCI_REV_R0_P4) | ||
197 | return CCI_REV_R0; | ||
198 | else if (rev <= CCI_REV_R1_P2) | ||
199 | return CCI_REV_R1; | ||
200 | |||
201 | return -ENOENT; | ||
202 | } | ||
203 | |||
204 | static struct pmu_port_event_ranges *port_range_by_rev(void) | ||
205 | { | ||
206 | int rev = probe_cci_revision(); | ||
207 | |||
208 | if (rev < 0) | ||
209 | return NULL; | ||
210 | |||
211 | return &port_event_range[rev]; | ||
212 | } | ||
213 | |||
214 | static int pmu_is_valid_slave_event(u8 ev_code) | ||
215 | { | ||
216 | return pmu->port_ranges->slave_min <= ev_code && | ||
217 | ev_code <= pmu->port_ranges->slave_max; | ||
218 | } | ||
219 | |||
220 | static int pmu_is_valid_master_event(u8 ev_code) | ||
221 | { | ||
222 | return pmu->port_ranges->master_min <= ev_code && | ||
223 | ev_code <= pmu->port_ranges->master_max; | ||
224 | } | ||
225 | |||
226 | static int pmu_validate_hw_event(u8 hw_event) | ||
227 | { | ||
228 | u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event); | ||
229 | u8 ev_code = CCI_PMU_EVENT_CODE(hw_event); | ||
230 | |||
231 | switch (ev_source) { | ||
232 | case CCI_PORT_S0: | ||
233 | case CCI_PORT_S1: | ||
234 | case CCI_PORT_S2: | ||
235 | case CCI_PORT_S3: | ||
236 | case CCI_PORT_S4: | ||
237 | /* Slave Interface */ | ||
238 | if (pmu_is_valid_slave_event(ev_code)) | ||
239 | return hw_event; | ||
240 | break; | ||
241 | case CCI_PORT_M0: | ||
242 | case CCI_PORT_M1: | ||
243 | case CCI_PORT_M2: | ||
244 | /* Master Interface */ | ||
245 | if (pmu_is_valid_master_event(ev_code)) | ||
246 | return hw_event; | ||
247 | break; | ||
248 | } | ||
249 | |||
250 | return -ENOENT; | ||
251 | } | ||
252 | |||
253 | static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx) | ||
254 | { | ||
255 | return CCI_PMU_CYCLE_CNTR_IDX <= idx && | ||
256 | idx <= CCI_PMU_CNTR_LAST(cci_pmu); | ||
257 | } | ||
258 | |||
259 | static u32 pmu_read_register(int idx, unsigned int offset) | ||
260 | { | ||
261 | return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | ||
262 | } | ||
263 | |||
264 | static void pmu_write_register(u32 value, int idx, unsigned int offset) | ||
265 | { | ||
266 | return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | ||
267 | } | ||
268 | |||
269 | static void pmu_disable_counter(int idx) | ||
270 | { | ||
271 | pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL); | ||
272 | } | ||
273 | |||
274 | static void pmu_enable_counter(int idx) | ||
275 | { | ||
276 | pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL); | ||
277 | } | ||
278 | |||
279 | static void pmu_set_event(int idx, unsigned long event) | ||
280 | { | ||
281 | event &= CCI_PMU_EVENT_MASK; | ||
282 | pmu_write_register(event, idx, CCI_PMU_EVT_SEL); | ||
283 | } | ||
284 | |||
285 | static u32 pmu_get_max_counters(void) | ||
286 | { | ||
287 | u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) & | ||
288 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; | ||
289 | |||
290 | /* add 1 for cycle counter */ | ||
291 | return n_cnts + 1; | ||
292 | } | ||
293 | |||
294 | static struct pmu_hw_events *pmu_get_hw_events(void) | ||
295 | { | ||
296 | return &pmu->hw_events; | ||
297 | } | ||
298 | |||
299 | static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event) | ||
300 | { | ||
301 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
302 | struct hw_perf_event *hw_event = &event->hw; | ||
303 | unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; | ||
304 | int idx; | ||
305 | |||
306 | if (cci_event == CCI_PMU_CYCLES) { | ||
307 | if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | ||
308 | return -EAGAIN; | ||
309 | |||
310 | return CCI_PMU_CYCLE_CNTR_IDX; | ||
311 | } | ||
312 | |||
313 | for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | ||
314 | if (!test_and_set_bit(idx, hw->used_mask)) | ||
315 | return idx; | ||
316 | |||
317 | /* No counters available */ | ||
318 | return -EAGAIN; | ||
319 | } | ||
320 | |||
321 | static int pmu_map_event(struct perf_event *event) | ||
322 | { | ||
323 | int mapping; | ||
324 | u8 config = event->attr.config & CCI_PMU_EVENT_MASK; | ||
325 | |||
326 | if (event->attr.type < PERF_TYPE_MAX) | ||
327 | return -ENOENT; | ||
328 | |||
329 | if (config == CCI_PMU_CYCLES) | ||
330 | mapping = config; | ||
331 | else | ||
332 | mapping = pmu_validate_hw_event(config); | ||
333 | |||
334 | return mapping; | ||
335 | } | ||
336 | |||
337 | static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler) | ||
338 | { | ||
339 | int i; | ||
340 | struct platform_device *pmu_device = cci_pmu->plat_device; | ||
341 | |||
342 | if (unlikely(!pmu_device)) | ||
343 | return -ENODEV; | ||
344 | |||
345 | if (pmu->nr_irqs < 1) { | ||
346 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | ||
347 | return -ENODEV; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * Register all available CCI PMU interrupts. In the interrupt handler | ||
352 | * we iterate over the counters checking for interrupt source (the | ||
353 | * overflowing counter) and clear it. | ||
354 | * | ||
355 | * This should allow handling of non-unique interrupt for the counters. | ||
356 | */ | ||
357 | for (i = 0; i < pmu->nr_irqs; i++) { | ||
358 | int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED, | ||
359 | "arm-cci-pmu", cci_pmu); | ||
360 | if (err) { | ||
361 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | ||
362 | pmu->irqs[i]); | ||
363 | return err; | ||
364 | } | ||
365 | |||
366 | set_bit(i, &pmu->active_irqs); | ||
367 | } | ||
368 | |||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | ||
373 | { | ||
374 | unsigned long flags; | ||
375 | struct arm_pmu *cci_pmu = (struct arm_pmu *)dev; | ||
376 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
377 | struct perf_sample_data data; | ||
378 | struct pt_regs *regs; | ||
379 | int idx, handled = IRQ_NONE; | ||
380 | |||
381 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
382 | regs = get_irq_regs(); | ||
383 | /* | ||
384 | * Iterate over counters and update the corresponding perf events. | ||
385 | * This should work regardless of whether we have per-counter overflow | ||
386 | * interrupt or a combined overflow interrupt. | ||
387 | */ | ||
388 | for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | ||
389 | struct perf_event *event = events->events[idx]; | ||
390 | struct hw_perf_event *hw_counter; | ||
391 | |||
392 | if (!event) | ||
393 | continue; | ||
394 | |||
395 | hw_counter = &event->hw; | ||
396 | |||
397 | /* Did this counter overflow? */ | ||
398 | if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG) | ||
399 | continue; | ||
400 | |||
401 | pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); | ||
402 | |||
403 | handled = IRQ_HANDLED; | ||
404 | |||
405 | armpmu_event_update(event); | ||
406 | perf_sample_data_init(&data, 0, hw_counter->last_period); | ||
407 | if (!armpmu_event_set_period(event)) | ||
408 | continue; | ||
409 | |||
410 | if (perf_event_overflow(event, &data, regs)) | ||
411 | cci_pmu->disable(event); | ||
412 | } | ||
413 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
414 | |||
415 | return IRQ_RETVAL(handled); | ||
416 | } | ||
417 | |||
418 | static void pmu_free_irq(struct arm_pmu *cci_pmu) | ||
419 | { | ||
420 | int i; | ||
421 | |||
422 | for (i = 0; i < pmu->nr_irqs; i++) { | ||
423 | if (!test_and_clear_bit(i, &pmu->active_irqs)) | ||
424 | continue; | ||
425 | |||
426 | free_irq(pmu->irqs[i], cci_pmu); | ||
427 | } | ||
428 | } | ||
429 | |||
430 | static void pmu_enable_event(struct perf_event *event) | ||
431 | { | ||
432 | unsigned long flags; | ||
433 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
434 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
435 | struct hw_perf_event *hw_counter = &event->hw; | ||
436 | int idx = hw_counter->idx; | ||
437 | |||
438 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
439 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
440 | return; | ||
441 | } | ||
442 | |||
443 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
444 | |||
445 | /* Configure the event to count, unless you are counting cycles */ | ||
446 | if (idx != CCI_PMU_CYCLE_CNTR_IDX) | ||
447 | pmu_set_event(idx, hw_counter->config_base); | ||
448 | |||
449 | pmu_enable_counter(idx); | ||
450 | |||
451 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
452 | } | ||
453 | |||
454 | static void pmu_disable_event(struct perf_event *event) | ||
455 | { | ||
456 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
457 | struct hw_perf_event *hw_counter = &event->hw; | ||
458 | int idx = hw_counter->idx; | ||
459 | |||
460 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
461 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
462 | return; | ||
463 | } | ||
464 | |||
465 | pmu_disable_counter(idx); | ||
466 | } | ||
467 | |||
468 | static void pmu_start(struct arm_pmu *cci_pmu) | ||
469 | { | ||
470 | u32 val; | ||
471 | unsigned long flags; | ||
472 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
473 | |||
474 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
475 | |||
476 | /* Enable all the PMU counters. */ | ||
477 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; | ||
478 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
479 | |||
480 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
481 | } | ||
482 | |||
483 | static void pmu_stop(struct arm_pmu *cci_pmu) | ||
484 | { | ||
485 | u32 val; | ||
486 | unsigned long flags; | ||
487 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
488 | |||
489 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
490 | |||
491 | /* Disable all the PMU counters. */ | ||
492 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; | ||
493 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
494 | |||
495 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
496 | } | ||
497 | |||
498 | static u32 pmu_read_counter(struct perf_event *event) | ||
499 | { | ||
500 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
501 | struct hw_perf_event *hw_counter = &event->hw; | ||
502 | int idx = hw_counter->idx; | ||
503 | u32 value; | ||
504 | |||
505 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
506 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
507 | return 0; | ||
508 | } | ||
509 | value = pmu_read_register(idx, CCI_PMU_CNTR); | ||
510 | |||
511 | return value; | ||
512 | } | ||
513 | |||
514 | static void pmu_write_counter(struct perf_event *event, u32 value) | ||
515 | { | ||
516 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
517 | struct hw_perf_event *hw_counter = &event->hw; | ||
518 | int idx = hw_counter->idx; | ||
519 | |||
520 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) | ||
521 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
522 | else | ||
523 | pmu_write_register(value, idx, CCI_PMU_CNTR); | ||
524 | } | ||
525 | |||
526 | static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) | ||
527 | { | ||
528 | *cci_pmu = (struct arm_pmu){ | ||
529 | .name = PMU_NAME, | ||
530 | .max_period = (1LLU << 32) - 1, | ||
531 | .get_hw_events = pmu_get_hw_events, | ||
532 | .get_event_idx = pmu_get_event_idx, | ||
533 | .map_event = pmu_map_event, | ||
534 | .request_irq = pmu_request_irq, | ||
535 | .handle_irq = pmu_handle_irq, | ||
536 | .free_irq = pmu_free_irq, | ||
537 | .enable = pmu_enable_event, | ||
538 | .disable = pmu_disable_event, | ||
539 | .start = pmu_start, | ||
540 | .stop = pmu_stop, | ||
541 | .read_counter = pmu_read_counter, | ||
542 | .write_counter = pmu_write_counter, | ||
543 | }; | ||
544 | |||
545 | cci_pmu->plat_device = pdev; | ||
546 | cci_pmu->num_events = pmu_get_max_counters(); | ||
547 | |||
548 | return armpmu_register(cci_pmu, -1); | ||
549 | } | ||
550 | |||
551 | static const struct of_device_id arm_cci_pmu_matches[] = { | ||
552 | { | ||
553 | .compatible = "arm,cci-400-pmu", | ||
554 | }, | ||
555 | {}, | ||
556 | }; | ||
557 | |||
558 | static int cci_pmu_probe(struct platform_device *pdev) | ||
559 | { | ||
560 | struct resource *res; | ||
561 | int i, ret, irq; | ||
562 | |||
563 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | ||
564 | if (!pmu) | ||
565 | return -ENOMEM; | ||
566 | |||
567 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
568 | pmu->base = devm_ioremap_resource(&pdev->dev, res); | ||
569 | if (IS_ERR(pmu->base)) | ||
570 | return -ENOMEM; | ||
571 | |||
572 | /* | ||
573 | * CCI PMU has 5 overflow signals - one per counter; but some may be tied | ||
574 | * together to a common interrupt. | ||
575 | */ | ||
576 | pmu->nr_irqs = 0; | ||
577 | for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) { | ||
578 | irq = platform_get_irq(pdev, i); | ||
579 | if (irq < 0) | ||
580 | break; | ||
581 | |||
582 | if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs)) | ||
583 | continue; | ||
584 | |||
585 | pmu->irqs[pmu->nr_irqs++] = irq; | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * Ensure that the device tree has as many interrupts as the number | ||
590 | * of counters. | ||
591 | */ | ||
592 | if (i < CCI_PMU_MAX_HW_EVENTS) { | ||
593 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | ||
594 | i, CCI_PMU_MAX_HW_EVENTS); | ||
595 | return -EINVAL; | ||
596 | } | ||
597 | |||
598 | pmu->port_ranges = port_range_by_rev(); | ||
599 | if (!pmu->port_ranges) { | ||
600 | dev_warn(&pdev->dev, "CCI PMU version not supported\n"); | ||
601 | return -EINVAL; | ||
602 | } | ||
603 | |||
604 | pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL); | ||
605 | if (!pmu->cci_pmu) | ||
606 | return -ENOMEM; | ||
607 | |||
608 | pmu->hw_events.events = pmu->events; | ||
609 | pmu->hw_events.used_mask = pmu->used_mask; | ||
610 | raw_spin_lock_init(&pmu->hw_events.pmu_lock); | ||
611 | |||
612 | ret = cci_pmu_init(pmu->cci_pmu, pdev); | ||
613 | if (ret) | ||
614 | return ret; | ||
615 | |||
616 | return 0; | ||
617 | } | ||
618 | |||
619 | static int cci_platform_probe(struct platform_device *pdev) | ||
620 | { | ||
621 | if (!cci_probed()) | ||
622 | return -ENODEV; | ||
623 | |||
624 | return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | ||
625 | } | ||
626 | |||
627 | #endif /* CONFIG_HW_PERF_EVENTS */ | ||
628 | |||
57 | struct cpu_port { | 629 | struct cpu_port { |
58 | u64 mpidr; | 630 | u64 mpidr; |
59 | u32 port; | 631 | u32 port; |
@@ -120,7 +692,7 @@ int cci_ace_get_port(struct device_node *dn) | |||
120 | } | 692 | } |
121 | EXPORT_SYMBOL_GPL(cci_ace_get_port); | 693 | EXPORT_SYMBOL_GPL(cci_ace_get_port); |
122 | 694 | ||
123 | static void __init cci_ace_init_ports(void) | 695 | static void cci_ace_init_ports(void) |
124 | { | 696 | { |
125 | int port, cpu; | 697 | int port, cpu; |
126 | struct device_node *cpun; | 698 | struct device_node *cpun; |
@@ -386,7 +958,7 @@ static const struct of_device_id arm_cci_ctrl_if_matches[] = { | |||
386 | {}, | 958 | {}, |
387 | }; | 959 | }; |
388 | 960 | ||
389 | static int __init cci_probe(void) | 961 | static int cci_probe(void) |
390 | { | 962 | { |
391 | struct cci_nb_ports const *cci_config; | 963 | struct cci_nb_ports const *cci_config; |
392 | int ret, i, nb_ace = 0, nb_ace_lite = 0; | 964 | int ret, i, nb_ace = 0, nb_ace_lite = 0; |
@@ -490,7 +1062,7 @@ memalloc_err: | |||
490 | static int cci_init_status = -EAGAIN; | 1062 | static int cci_init_status = -EAGAIN; |
491 | static DEFINE_MUTEX(cci_probing); | 1063 | static DEFINE_MUTEX(cci_probing); |
492 | 1064 | ||
493 | static int __init cci_init(void) | 1065 | static int cci_init(void) |
494 | { | 1066 | { |
495 | if (cci_init_status != -EAGAIN) | 1067 | if (cci_init_status != -EAGAIN) |
496 | return cci_init_status; | 1068 | return cci_init_status; |
@@ -502,18 +1074,55 @@ static int __init cci_init(void) | |||
502 | return cci_init_status; | 1074 | return cci_init_status; |
503 | } | 1075 | } |
504 | 1076 | ||
1077 | #ifdef CONFIG_HW_PERF_EVENTS | ||
1078 | static struct platform_driver cci_pmu_driver = { | ||
1079 | .driver = { | ||
1080 | .name = DRIVER_NAME_PMU, | ||
1081 | .of_match_table = arm_cci_pmu_matches, | ||
1082 | }, | ||
1083 | .probe = cci_pmu_probe, | ||
1084 | }; | ||
1085 | |||
1086 | static struct platform_driver cci_platform_driver = { | ||
1087 | .driver = { | ||
1088 | .name = DRIVER_NAME, | ||
1089 | .of_match_table = arm_cci_matches, | ||
1090 | }, | ||
1091 | .probe = cci_platform_probe, | ||
1092 | }; | ||
1093 | |||
1094 | static int __init cci_platform_init(void) | ||
1095 | { | ||
1096 | int ret; | ||
1097 | |||
1098 | ret = platform_driver_register(&cci_pmu_driver); | ||
1099 | if (ret) | ||
1100 | return ret; | ||
1101 | |||
1102 | return platform_driver_register(&cci_platform_driver); | ||
1103 | } | ||
1104 | |||
1105 | #else | ||
1106 | |||
1107 | static int __init cci_platform_init(void) | ||
1108 | { | ||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | #endif | ||
505 | /* | 1113 | /* |
506 | * To sort out early init calls ordering a helper function is provided to | 1114 | * To sort out early init calls ordering a helper function is provided to |
507 | * check if the CCI driver has beed initialized. Function check if the driver | 1115 | * check if the CCI driver has beed initialized. Function check if the driver |
508 | * has been initialized, if not it calls the init function that probes | 1116 | * has been initialized, if not it calls the init function that probes |
509 | * the driver and updates the return value. | 1117 | * the driver and updates the return value. |
510 | */ | 1118 | */ |
511 | bool __init cci_probed(void) | 1119 | bool cci_probed(void) |
512 | { | 1120 | { |
513 | return cci_init() == 0; | 1121 | return cci_init() == 0; |
514 | } | 1122 | } |
515 | EXPORT_SYMBOL_GPL(cci_probed); | 1123 | EXPORT_SYMBOL_GPL(cci_probed); |
516 | 1124 | ||
517 | early_initcall(cci_init); | 1125 | early_initcall(cci_init); |
1126 | core_initcall(cci_platform_init); | ||
518 | MODULE_LICENSE("GPL"); | 1127 | MODULE_LICENSE("GPL"); |
519 | MODULE_DESCRIPTION("ARM CCI support"); | 1128 | MODULE_DESCRIPTION("ARM CCI support"); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 56fe803adcb1..c61a6eccf169 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -154,6 +154,18 @@ config TEGRA20_APB_DMA | |||
154 | This DMA controller transfers data from memory to peripheral fifo | 154 | This DMA controller transfers data from memory to peripheral fifo |
155 | or vice versa. It does not support memory to memory data transfer. | 155 | or vice versa. It does not support memory to memory data transfer. |
156 | 156 | ||
157 | config S3C24XX_DMAC | ||
158 | tristate "Samsung S3C24XX DMA support" | ||
159 | depends on ARCH_S3C24XX && !S3C24XX_DMA | ||
160 | select DMA_ENGINE | ||
161 | select DMA_VIRTUAL_CHANNELS | ||
162 | help | ||
163 | Support for the Samsung S3C24XX DMA controller driver. The | ||
164 | DMA controller is having multiple DMA channels which can be | ||
165 | configured for different peripherals like audio, UART, SPI. | ||
166 | The DMA controller can transfer data from memory to peripheral, | ||
167 | periphal to memory, periphal to periphal and memory to memory. | ||
168 | |||
157 | source "drivers/dma/sh/Kconfig" | 169 | source "drivers/dma/sh/Kconfig" |
158 | 170 | ||
159 | config COH901318 | 171 | config COH901318 |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index db89035b3626..0ce2da97e429 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -30,6 +30,7 @@ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | |||
30 | obj-$(CONFIG_TI_EDMA) += edma.o | 30 | obj-$(CONFIG_TI_EDMA) += edma.o |
31 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 31 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
32 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 32 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
33 | obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o | ||
33 | obj-$(CONFIG_PL330_DMA) += pl330.o | 34 | obj-$(CONFIG_PL330_DMA) += pl330.o |
34 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 35 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
35 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 36 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c new file mode 100644 index 000000000000..4cb127978636 --- /dev/null +++ b/drivers/dma/s3c24xx-dma.c | |||
@@ -0,0 +1,1350 @@ | |||
1 | /* | ||
2 | * S3C24XX DMA handling | ||
3 | * | ||
4 | * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> | ||
5 | * | ||
6 | * based on amba-pl08x.c | ||
7 | * | ||
8 | * Copyright (c) 2006 ARM Ltd. | ||
9 | * Copyright (c) 2010 ST-Ericsson SA | ||
10 | * | ||
11 | * Author: Peter Pearse <peter.pearse@arm.com> | ||
12 | * Author: Linus Walleij <linus.walleij@stericsson.com> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify it | ||
15 | * under the terms of the GNU General Public License as published by the Free | ||
16 | * Software Foundation; either version 2 of the License, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals | ||
20 | * that can be routed to any of the 4 to 8 hardware-channels. | ||
21 | * | ||
22 | * Therefore on these DMA controllers the number of channels | ||
23 | * and the number of incoming DMA signals are two totally different things. | ||
24 | * It is usually not possible to theoretically handle all physical signals, | ||
25 | * so a multiplexing scheme with possible denial of use is necessary. | ||
26 | * | ||
27 | * Open items: | ||
28 | * - bursts | ||
29 | */ | ||
30 | |||
31 | #include <linux/platform_device.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/dmaengine.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include <linux/clk.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/platform_data/dma-s3c24xx.h> | ||
40 | |||
41 | #include "dmaengine.h" | ||
42 | #include "virt-dma.h" | ||
43 | |||
44 | #define MAX_DMA_CHANNELS 8 | ||
45 | |||
46 | #define S3C24XX_DISRC 0x00 | ||
47 | #define S3C24XX_DISRCC 0x04 | ||
48 | #define S3C24XX_DISRCC_INC_INCREMENT 0 | ||
49 | #define S3C24XX_DISRCC_INC_FIXED BIT(0) | ||
50 | #define S3C24XX_DISRCC_LOC_AHB 0 | ||
51 | #define S3C24XX_DISRCC_LOC_APB BIT(1) | ||
52 | |||
53 | #define S3C24XX_DIDST 0x08 | ||
54 | #define S3C24XX_DIDSTC 0x0c | ||
55 | #define S3C24XX_DIDSTC_INC_INCREMENT 0 | ||
56 | #define S3C24XX_DIDSTC_INC_FIXED BIT(0) | ||
57 | #define S3C24XX_DIDSTC_LOC_AHB 0 | ||
58 | #define S3C24XX_DIDSTC_LOC_APB BIT(1) | ||
59 | #define S3C24XX_DIDSTC_INT_TC0 0 | ||
60 | #define S3C24XX_DIDSTC_INT_RELOAD BIT(2) | ||
61 | |||
62 | #define S3C24XX_DCON 0x10 | ||
63 | |||
64 | #define S3C24XX_DCON_TC_MASK 0xfffff | ||
65 | #define S3C24XX_DCON_DSZ_BYTE (0 << 20) | ||
66 | #define S3C24XX_DCON_DSZ_HALFWORD (1 << 20) | ||
67 | #define S3C24XX_DCON_DSZ_WORD (2 << 20) | ||
68 | #define S3C24XX_DCON_DSZ_MASK (3 << 20) | ||
69 | #define S3C24XX_DCON_DSZ_SHIFT 20 | ||
70 | #define S3C24XX_DCON_AUTORELOAD 0 | ||
71 | #define S3C24XX_DCON_NORELOAD BIT(22) | ||
72 | #define S3C24XX_DCON_HWTRIG BIT(23) | ||
73 | #define S3C24XX_DCON_HWSRC_SHIFT 24 | ||
74 | #define S3C24XX_DCON_SERV_SINGLE 0 | ||
75 | #define S3C24XX_DCON_SERV_WHOLE BIT(27) | ||
76 | #define S3C24XX_DCON_TSZ_UNIT 0 | ||
77 | #define S3C24XX_DCON_TSZ_BURST4 BIT(28) | ||
78 | #define S3C24XX_DCON_INT BIT(29) | ||
79 | #define S3C24XX_DCON_SYNC_PCLK 0 | ||
80 | #define S3C24XX_DCON_SYNC_HCLK BIT(30) | ||
81 | #define S3C24XX_DCON_DEMAND 0 | ||
82 | #define S3C24XX_DCON_HANDSHAKE BIT(31) | ||
83 | |||
84 | #define S3C24XX_DSTAT 0x14 | ||
85 | #define S3C24XX_DSTAT_STAT_BUSY BIT(20) | ||
86 | #define S3C24XX_DSTAT_CURRTC_MASK 0xfffff | ||
87 | |||
88 | #define S3C24XX_DMASKTRIG 0x20 | ||
89 | #define S3C24XX_DMASKTRIG_SWTRIG BIT(0) | ||
90 | #define S3C24XX_DMASKTRIG_ON BIT(1) | ||
91 | #define S3C24XX_DMASKTRIG_STOP BIT(2) | ||
92 | |||
93 | #define S3C24XX_DMAREQSEL 0x24 | ||
94 | #define S3C24XX_DMAREQSEL_HW BIT(0) | ||
95 | |||
96 | /* | ||
97 | * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel | ||
98 | * for a DMA source. Instead only specific channels are valid. | ||
99 | * All of these SoCs have 4 physical channels and the number of request | ||
100 | * source bits is 3. Additionally we also need 1 bit to mark the channel | ||
101 | * as valid. | ||
102 | * Therefore we separate the chansel element of the channel data into 4 | ||
103 | * parts of 4 bits each, to hold the information if the channel is valid | ||
104 | * and the hw request source to use. | ||
105 | * | ||
106 | * Example: | ||
107 | * SDI is valid on channels 0, 2 and 3 - with varying hw request sources. | ||
108 | * For it the chansel field would look like | ||
109 | * | ||
110 | * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1 | ||
111 | * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2 | ||
112 | * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2 | ||
113 | */ | ||
114 | #define S3C24XX_CHANSEL_WIDTH 4 | ||
115 | #define S3C24XX_CHANSEL_VALID BIT(3) | ||
116 | #define S3C24XX_CHANSEL_REQ_MASK 7 | ||
117 | |||
118 | /* | ||
119 | * struct soc_data - vendor-specific config parameters for individual SoCs | ||
120 | * @stride: spacing between the registers of each channel | ||
121 | * @has_reqsel: does the controller use the newer requestselection mechanism | ||
122 | * @has_clocks: are controllable dma-clocks present | ||
123 | */ | ||
124 | struct soc_data { | ||
125 | int stride; | ||
126 | bool has_reqsel; | ||
127 | bool has_clocks; | ||
128 | }; | ||
129 | |||
130 | /* | ||
131 | * enum s3c24xx_dma_chan_state - holds the virtual channel states | ||
132 | * @S3C24XX_DMA_CHAN_IDLE: the channel is idle | ||
133 | * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport | ||
134 | * channel and is running a transfer on it | ||
135 | * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport | ||
136 | * channel to become available (only pertains to memcpy channels) | ||
137 | */ | ||
138 | enum s3c24xx_dma_chan_state { | ||
139 | S3C24XX_DMA_CHAN_IDLE, | ||
140 | S3C24XX_DMA_CHAN_RUNNING, | ||
141 | S3C24XX_DMA_CHAN_WAITING, | ||
142 | }; | ||
143 | |||
144 | /* | ||
145 | * struct s3c24xx_sg - structure containing data per sg | ||
146 | * @src_addr: src address of sg | ||
147 | * @dst_addr: dst address of sg | ||
148 | * @len: transfer len in bytes | ||
149 | * @node: node for txd's dsg_list | ||
150 | */ | ||
151 | struct s3c24xx_sg { | ||
152 | dma_addr_t src_addr; | ||
153 | dma_addr_t dst_addr; | ||
154 | size_t len; | ||
155 | struct list_head node; | ||
156 | }; | ||
157 | |||
158 | /* | ||
159 | * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor | ||
160 | * @vd: virtual DMA descriptor | ||
161 | * @dsg_list: list of children sg's | ||
162 | * @at: sg currently being transfered | ||
163 | * @width: transfer width | ||
164 | * @disrcc: value for source control register | ||
165 | * @didstc: value for destination control register | ||
166 | * @dcon: base value for dcon register | ||
167 | */ | ||
168 | struct s3c24xx_txd { | ||
169 | struct virt_dma_desc vd; | ||
170 | struct list_head dsg_list; | ||
171 | struct list_head *at; | ||
172 | u8 width; | ||
173 | u32 disrcc; | ||
174 | u32 didstc; | ||
175 | u32 dcon; | ||
176 | }; | ||
177 | |||
178 | struct s3c24xx_dma_chan; | ||
179 | |||
180 | /* | ||
181 | * struct s3c24xx_dma_phy - holder for the physical channels | ||
182 | * @id: physical index to this channel | ||
183 | * @valid: does the channel have all required elements | ||
184 | * @base: virtual memory base (remapped) for the this channel | ||
185 | * @irq: interrupt for this channel | ||
186 | * @clk: clock for this channel | ||
187 | * @lock: a lock to use when altering an instance of this struct | ||
188 | * @serving: virtual channel currently being served by this physicalchannel | ||
189 | * @host: a pointer to the host (internal use) | ||
190 | */ | ||
191 | struct s3c24xx_dma_phy { | ||
192 | unsigned int id; | ||
193 | bool valid; | ||
194 | void __iomem *base; | ||
195 | unsigned int irq; | ||
196 | struct clk *clk; | ||
197 | spinlock_t lock; | ||
198 | struct s3c24xx_dma_chan *serving; | ||
199 | struct s3c24xx_dma_engine *host; | ||
200 | }; | ||
201 | |||
202 | /* | ||
203 | * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel | ||
204 | * @id: the id of the channel | ||
205 | * @name: name of the channel | ||
206 | * @vc: wrappped virtual channel | ||
207 | * @phy: the physical channel utilized by this channel, if there is one | ||
208 | * @runtime_addr: address for RX/TX according to the runtime config | ||
209 | * @at: active transaction on this channel | ||
210 | * @lock: a lock for this channel data | ||
211 | * @host: a pointer to the host (internal use) | ||
212 | * @state: whether the channel is idle, running etc | ||
213 | * @slave: whether this channel is a device (slave) or for memcpy | ||
214 | */ | ||
215 | struct s3c24xx_dma_chan { | ||
216 | int id; | ||
217 | const char *name; | ||
218 | struct virt_dma_chan vc; | ||
219 | struct s3c24xx_dma_phy *phy; | ||
220 | struct dma_slave_config cfg; | ||
221 | struct s3c24xx_txd *at; | ||
222 | struct s3c24xx_dma_engine *host; | ||
223 | enum s3c24xx_dma_chan_state state; | ||
224 | bool slave; | ||
225 | }; | ||
226 | |||
227 | /* | ||
228 | * struct s3c24xx_dma_engine - the local state holder for the S3C24XX | ||
229 | * @pdev: the corresponding platform device | ||
230 | * @pdata: platform data passed in from the platform/machine | ||
231 | * @base: virtual memory base (remapped) | ||
232 | * @slave: slave engine for this instance | ||
233 | * @memcpy: memcpy engine for this instance | ||
234 | * @phy_chans: array of data for the physical channels | ||
235 | */ | ||
236 | struct s3c24xx_dma_engine { | ||
237 | struct platform_device *pdev; | ||
238 | const struct s3c24xx_dma_platdata *pdata; | ||
239 | struct soc_data *sdata; | ||
240 | void __iomem *base; | ||
241 | struct dma_device slave; | ||
242 | struct dma_device memcpy; | ||
243 | struct s3c24xx_dma_phy *phy_chans; | ||
244 | }; | ||
245 | |||
246 | /* | ||
247 | * Physical channel handling | ||
248 | */ | ||
249 | |||
250 | /* | ||
251 | * Check whether a certain channel is busy or not. | ||
252 | */ | ||
253 | static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy) | ||
254 | { | ||
255 | unsigned int val = readl(phy->base + S3C24XX_DSTAT); | ||
256 | return val & S3C24XX_DSTAT_STAT_BUSY; | ||
257 | } | ||
258 | |||
259 | static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan, | ||
260 | struct s3c24xx_dma_phy *phy) | ||
261 | { | ||
262 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
263 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
264 | struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; | ||
265 | int phyvalid; | ||
266 | |||
267 | /* every phy is valid for memcopy channels */ | ||
268 | if (!s3cchan->slave) | ||
269 | return true; | ||
270 | |||
271 | /* On newer variants all phys can be used for all virtual channels */ | ||
272 | if (s3cdma->sdata->has_reqsel) | ||
273 | return true; | ||
274 | |||
275 | phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH)); | ||
276 | return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Allocate a physical channel for a virtual channel | ||
281 | * | ||
282 | * Try to locate a physical channel to be used for this transfer. If all | ||
283 | * are taken return NULL and the requester will have to cope by using | ||
284 | * some fallback PIO mode or retrying later. | ||
285 | */ | ||
286 | static | ||
287 | struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan) | ||
288 | { | ||
289 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
290 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
291 | struct s3c24xx_dma_channel *cdata; | ||
292 | struct s3c24xx_dma_phy *phy = NULL; | ||
293 | unsigned long flags; | ||
294 | int i; | ||
295 | int ret; | ||
296 | |||
297 | if (s3cchan->slave) | ||
298 | cdata = &pdata->channels[s3cchan->id]; | ||
299 | |||
300 | for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { | ||
301 | phy = &s3cdma->phy_chans[i]; | ||
302 | |||
303 | if (!phy->valid) | ||
304 | continue; | ||
305 | |||
306 | if (!s3c24xx_dma_phy_valid(s3cchan, phy)) | ||
307 | continue; | ||
308 | |||
309 | spin_lock_irqsave(&phy->lock, flags); | ||
310 | |||
311 | if (!phy->serving) { | ||
312 | phy->serving = s3cchan; | ||
313 | spin_unlock_irqrestore(&phy->lock, flags); | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | spin_unlock_irqrestore(&phy->lock, flags); | ||
318 | } | ||
319 | |||
320 | /* No physical channel available, cope with it */ | ||
321 | if (i == s3cdma->pdata->num_phy_channels) { | ||
322 | dev_warn(&s3cdma->pdev->dev, "no phy channel available\n"); | ||
323 | return NULL; | ||
324 | } | ||
325 | |||
326 | /* start the phy clock */ | ||
327 | if (s3cdma->sdata->has_clocks) { | ||
328 | ret = clk_enable(phy->clk); | ||
329 | if (ret) { | ||
330 | dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n", | ||
331 | phy->id, ret); | ||
332 | phy->serving = NULL; | ||
333 | return NULL; | ||
334 | } | ||
335 | } | ||
336 | |||
337 | return phy; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * Mark the physical channel as free. | ||
342 | * | ||
343 | * This drops the link between the physical and virtual channel. | ||
344 | */ | ||
345 | static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy) | ||
346 | { | ||
347 | struct s3c24xx_dma_engine *s3cdma = phy->host; | ||
348 | |||
349 | if (s3cdma->sdata->has_clocks) | ||
350 | clk_disable(phy->clk); | ||
351 | |||
352 | phy->serving = NULL; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Stops the channel by writing the stop bit. | ||
357 | * This should not be used for an on-going transfer, but as a method of | ||
358 | * shutting down a channel (eg, when it's no longer used) or terminating a | ||
359 | * transfer. | ||
360 | */ | ||
361 | static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy) | ||
362 | { | ||
363 | writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG); | ||
364 | } | ||
365 | |||
366 | /* | ||
367 | * Virtual channel handling | ||
368 | */ | ||
369 | |||
370 | static inline | ||
371 | struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan) | ||
372 | { | ||
373 | return container_of(chan, struct s3c24xx_dma_chan, vc.chan); | ||
374 | } | ||
375 | |||
376 | static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan) | ||
377 | { | ||
378 | struct s3c24xx_dma_phy *phy = s3cchan->phy; | ||
379 | struct s3c24xx_txd *txd = s3cchan->at; | ||
380 | u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK; | ||
381 | |||
382 | return tc * txd->width; | ||
383 | } | ||
384 | |||
385 | static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, | ||
386 | struct dma_slave_config *config) | ||
387 | { | ||
388 | if (!s3cchan->slave) | ||
389 | return -EINVAL; | ||
390 | |||
391 | /* Reject definitely invalid configurations */ | ||
392 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | ||
393 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
394 | return -EINVAL; | ||
395 | |||
396 | s3cchan->cfg = *config; | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Transfer handling | ||
403 | */ | ||
404 | |||
405 | static inline | ||
406 | struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx) | ||
407 | { | ||
408 | return container_of(tx, struct s3c24xx_txd, vd.tx); | ||
409 | } | ||
410 | |||
411 | static struct s3c24xx_txd *s3c24xx_dma_get_txd(void) | ||
412 | { | ||
413 | struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | ||
414 | |||
415 | if (txd) { | ||
416 | INIT_LIST_HEAD(&txd->dsg_list); | ||
417 | txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD; | ||
418 | } | ||
419 | |||
420 | return txd; | ||
421 | } | ||
422 | |||
423 | static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd) | ||
424 | { | ||
425 | struct s3c24xx_sg *dsg, *_dsg; | ||
426 | |||
427 | list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { | ||
428 | list_del(&dsg->node); | ||
429 | kfree(dsg); | ||
430 | } | ||
431 | |||
432 | kfree(txd); | ||
433 | } | ||
434 | |||
435 | static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan, | ||
436 | struct s3c24xx_txd *txd) | ||
437 | { | ||
438 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
439 | struct s3c24xx_dma_phy *phy = s3cchan->phy; | ||
440 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
441 | struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node); | ||
442 | u32 dcon = txd->dcon; | ||
443 | u32 val; | ||
444 | |||
445 | /* transfer-size and -count from len and width */ | ||
446 | switch (txd->width) { | ||
447 | case 1: | ||
448 | dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len; | ||
449 | break; | ||
450 | case 2: | ||
451 | dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2); | ||
452 | break; | ||
453 | case 4: | ||
454 | dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4); | ||
455 | break; | ||
456 | } | ||
457 | |||
458 | if (s3cchan->slave) { | ||
459 | struct s3c24xx_dma_channel *cdata = | ||
460 | &pdata->channels[s3cchan->id]; | ||
461 | |||
462 | if (s3cdma->sdata->has_reqsel) { | ||
463 | writel_relaxed((cdata->chansel << 1) | | ||
464 | S3C24XX_DMAREQSEL_HW, | ||
465 | phy->base + S3C24XX_DMAREQSEL); | ||
466 | } else { | ||
467 | int csel = cdata->chansel >> (phy->id * | ||
468 | S3C24XX_CHANSEL_WIDTH); | ||
469 | |||
470 | csel &= S3C24XX_CHANSEL_REQ_MASK; | ||
471 | dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT; | ||
472 | dcon |= S3C24XX_DCON_HWTRIG; | ||
473 | } | ||
474 | } else { | ||
475 | if (s3cdma->sdata->has_reqsel) | ||
476 | writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL); | ||
477 | } | ||
478 | |||
479 | writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC); | ||
480 | writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC); | ||
481 | writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST); | ||
482 | writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC); | ||
483 | writel_relaxed(dcon, phy->base + S3C24XX_DCON); | ||
484 | |||
485 | val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG); | ||
486 | val &= ~S3C24XX_DMASKTRIG_STOP; | ||
487 | val |= S3C24XX_DMASKTRIG_ON; | ||
488 | |||
489 | /* trigger the dma operation for memcpy transfers */ | ||
490 | if (!s3cchan->slave) | ||
491 | val |= S3C24XX_DMASKTRIG_SWTRIG; | ||
492 | |||
493 | writel(val, phy->base + S3C24XX_DMASKTRIG); | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Set the initial DMA register values and start first sg. | ||
498 | */ | ||
499 | static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) | ||
500 | { | ||
501 | struct s3c24xx_dma_phy *phy = s3cchan->phy; | ||
502 | struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc); | ||
503 | struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); | ||
504 | |||
505 | list_del(&txd->vd.node); | ||
506 | |||
507 | s3cchan->at = txd; | ||
508 | |||
509 | /* Wait for channel inactive */ | ||
510 | while (s3c24xx_dma_phy_busy(phy)) | ||
511 | cpu_relax(); | ||
512 | |||
513 | /* point to the first element of the sg list */ | ||
514 | txd->at = txd->dsg_list.next; | ||
515 | s3c24xx_dma_start_next_sg(s3cchan, txd); | ||
516 | } | ||
517 | |||
518 | static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma, | ||
519 | struct s3c24xx_dma_chan *s3cchan) | ||
520 | { | ||
521 | LIST_HEAD(head); | ||
522 | |||
523 | vchan_get_all_descriptors(&s3cchan->vc, &head); | ||
524 | vchan_dma_desc_free_list(&s3cchan->vc, &head); | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Try to allocate a physical channel. When successful, assign it to | ||
529 | * this virtual channel, and initiate the next descriptor. The | ||
530 | * virtual channel lock must be held at this point. | ||
531 | */ | ||
532 | static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan) | ||
533 | { | ||
534 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
535 | struct s3c24xx_dma_phy *phy; | ||
536 | |||
537 | phy = s3c24xx_dma_get_phy(s3cchan); | ||
538 | if (!phy) { | ||
539 | dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n", | ||
540 | s3cchan->name); | ||
541 | s3cchan->state = S3C24XX_DMA_CHAN_WAITING; | ||
542 | return; | ||
543 | } | ||
544 | |||
545 | dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n", | ||
546 | phy->id, s3cchan->name); | ||
547 | |||
548 | s3cchan->phy = phy; | ||
549 | s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; | ||
550 | |||
551 | s3c24xx_dma_start_next_txd(s3cchan); | ||
552 | } | ||
553 | |||
554 | static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy, | ||
555 | struct s3c24xx_dma_chan *s3cchan) | ||
556 | { | ||
557 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
558 | |||
559 | dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n", | ||
560 | phy->id, s3cchan->name); | ||
561 | |||
562 | /* | ||
563 | * We do this without taking the lock; we're really only concerned | ||
564 | * about whether this pointer is NULL or not, and we're guaranteed | ||
565 | * that this will only be called when it _already_ is non-NULL. | ||
566 | */ | ||
567 | phy->serving = s3cchan; | ||
568 | s3cchan->phy = phy; | ||
569 | s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; | ||
570 | s3c24xx_dma_start_next_txd(s3cchan); | ||
571 | } | ||
572 | |||
573 | /* | ||
574 | * Free a physical DMA channel, potentially reallocating it to another | ||
575 | * virtual channel if we have any pending. | ||
576 | */ | ||
577 | static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan) | ||
578 | { | ||
579 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
580 | struct s3c24xx_dma_chan *p, *next; | ||
581 | |||
582 | retry: | ||
583 | next = NULL; | ||
584 | |||
585 | /* Find a waiting virtual channel for the next transfer. */ | ||
586 | list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node) | ||
587 | if (p->state == S3C24XX_DMA_CHAN_WAITING) { | ||
588 | next = p; | ||
589 | break; | ||
590 | } | ||
591 | |||
592 | if (!next) { | ||
593 | list_for_each_entry(p, &s3cdma->slave.channels, | ||
594 | vc.chan.device_node) | ||
595 | if (p->state == S3C24XX_DMA_CHAN_WAITING && | ||
596 | s3c24xx_dma_phy_valid(p, s3cchan->phy)) { | ||
597 | next = p; | ||
598 | break; | ||
599 | } | ||
600 | } | ||
601 | |||
602 | /* Ensure that the physical channel is stopped */ | ||
603 | s3c24xx_dma_terminate_phy(s3cchan->phy); | ||
604 | |||
605 | if (next) { | ||
606 | bool success; | ||
607 | |||
608 | /* | ||
609 | * Eww. We know this isn't going to deadlock | ||
610 | * but lockdep probably doesn't. | ||
611 | */ | ||
612 | spin_lock(&next->vc.lock); | ||
613 | /* Re-check the state now that we have the lock */ | ||
614 | success = next->state == S3C24XX_DMA_CHAN_WAITING; | ||
615 | if (success) | ||
616 | s3c24xx_dma_phy_reassign_start(s3cchan->phy, next); | ||
617 | spin_unlock(&next->vc.lock); | ||
618 | |||
619 | /* If the state changed, try to find another channel */ | ||
620 | if (!success) | ||
621 | goto retry; | ||
622 | } else { | ||
623 | /* No more jobs, so free up the physical channel */ | ||
624 | s3c24xx_dma_put_phy(s3cchan->phy); | ||
625 | } | ||
626 | |||
627 | s3cchan->phy = NULL; | ||
628 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; | ||
629 | } | ||
630 | |||
631 | static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd) | ||
632 | { | ||
633 | struct device *dev = txd->vd.tx.chan->device->dev; | ||
634 | struct s3c24xx_sg *dsg; | ||
635 | |||
636 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
637 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
638 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
639 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | ||
640 | DMA_TO_DEVICE); | ||
641 | else { | ||
642 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
643 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
644 | DMA_TO_DEVICE); | ||
645 | } | ||
646 | } | ||
647 | |||
648 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
649 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
650 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
651 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
652 | DMA_FROM_DEVICE); | ||
653 | else | ||
654 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
655 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
656 | DMA_FROM_DEVICE); | ||
657 | } | ||
658 | } | ||
659 | |||
660 | static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) | ||
661 | { | ||
662 | struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); | ||
663 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); | ||
664 | |||
665 | if (!s3cchan->slave) | ||
666 | s3c24xx_dma_unmap_buffers(txd); | ||
667 | |||
668 | s3c24xx_dma_free_txd(txd); | ||
669 | } | ||
670 | |||
671 | static irqreturn_t s3c24xx_dma_irq(int irq, void *data) | ||
672 | { | ||
673 | struct s3c24xx_dma_phy *phy = data; | ||
674 | struct s3c24xx_dma_chan *s3cchan = phy->serving; | ||
675 | struct s3c24xx_txd *txd; | ||
676 | |||
677 | dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id); | ||
678 | |||
679 | /* | ||
680 | * Interrupts happen to notify the completion of a transfer and the | ||
681 | * channel should have moved into its stop state already on its own. | ||
682 | * Therefore interrupts on channels not bound to a virtual channel | ||
683 | * should never happen. Nevertheless send a terminate command to the | ||
684 | * channel if the unlikely case happens. | ||
685 | */ | ||
686 | if (unlikely(!s3cchan)) { | ||
687 | dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n", | ||
688 | phy->id); | ||
689 | |||
690 | s3c24xx_dma_terminate_phy(phy); | ||
691 | |||
692 | return IRQ_HANDLED; | ||
693 | } | ||
694 | |||
695 | spin_lock(&s3cchan->vc.lock); | ||
696 | txd = s3cchan->at; | ||
697 | if (txd) { | ||
698 | /* when more sg's are in this txd, start the next one */ | ||
699 | if (!list_is_last(txd->at, &txd->dsg_list)) { | ||
700 | txd->at = txd->at->next; | ||
701 | s3c24xx_dma_start_next_sg(s3cchan, txd); | ||
702 | } else { | ||
703 | s3cchan->at = NULL; | ||
704 | vchan_cookie_complete(&txd->vd); | ||
705 | |||
706 | /* | ||
707 | * And start the next descriptor (if any), | ||
708 | * otherwise free this channel. | ||
709 | */ | ||
710 | if (vchan_next_desc(&s3cchan->vc)) | ||
711 | s3c24xx_dma_start_next_txd(s3cchan); | ||
712 | else | ||
713 | s3c24xx_dma_phy_free(s3cchan); | ||
714 | } | ||
715 | } | ||
716 | spin_unlock(&s3cchan->vc.lock); | ||
717 | |||
718 | return IRQ_HANDLED; | ||
719 | } | ||
720 | |||
721 | /* | ||
722 | * The DMA ENGINE API | ||
723 | */ | ||
724 | |||
725 | static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
726 | unsigned long arg) | ||
727 | { | ||
728 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
729 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
730 | unsigned long flags; | ||
731 | int ret = 0; | ||
732 | |||
733 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | ||
734 | |||
735 | switch (cmd) { | ||
736 | case DMA_SLAVE_CONFIG: | ||
737 | ret = s3c24xx_dma_set_runtime_config(s3cchan, | ||
738 | (struct dma_slave_config *)arg); | ||
739 | break; | ||
740 | case DMA_TERMINATE_ALL: | ||
741 | if (!s3cchan->phy && !s3cchan->at) { | ||
742 | dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", | ||
743 | s3cchan->id); | ||
744 | ret = -EINVAL; | ||
745 | break; | ||
746 | } | ||
747 | |||
748 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; | ||
749 | |||
750 | /* Mark physical channel as free */ | ||
751 | if (s3cchan->phy) | ||
752 | s3c24xx_dma_phy_free(s3cchan); | ||
753 | |||
754 | /* Dequeue current job */ | ||
755 | if (s3cchan->at) { | ||
756 | s3c24xx_dma_desc_free(&s3cchan->at->vd); | ||
757 | s3cchan->at = NULL; | ||
758 | } | ||
759 | |||
760 | /* Dequeue jobs not yet fired as well */ | ||
761 | s3c24xx_dma_free_txd_list(s3cdma, s3cchan); | ||
762 | break; | ||
763 | default: | ||
764 | /* Unknown command */ | ||
765 | ret = -ENXIO; | ||
766 | break; | ||
767 | } | ||
768 | |||
769 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
770 | |||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) | ||
775 | { | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) | ||
780 | { | ||
781 | /* Ensure all queued descriptors are freed */ | ||
782 | vchan_free_chan_resources(to_virt_chan(chan)); | ||
783 | } | ||
784 | |||
785 | static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, | ||
786 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
787 | { | ||
788 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
789 | struct s3c24xx_txd *txd; | ||
790 | struct s3c24xx_sg *dsg; | ||
791 | struct virt_dma_desc *vd; | ||
792 | unsigned long flags; | ||
793 | enum dma_status ret; | ||
794 | size_t bytes = 0; | ||
795 | |||
796 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | ||
797 | ret = dma_cookie_status(chan, cookie, txstate); | ||
798 | if (ret == DMA_SUCCESS) { | ||
799 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
800 | return ret; | ||
801 | } | ||
802 | |||
803 | /* | ||
804 | * There's no point calculating the residue if there's | ||
805 | * no txstate to store the value. | ||
806 | */ | ||
807 | if (!txstate) { | ||
808 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
809 | return ret; | ||
810 | } | ||
811 | |||
812 | vd = vchan_find_desc(&s3cchan->vc, cookie); | ||
813 | if (vd) { | ||
814 | /* On the issued list, so hasn't been processed yet */ | ||
815 | txd = to_s3c24xx_txd(&vd->tx); | ||
816 | |||
817 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
818 | bytes += dsg->len; | ||
819 | } else { | ||
820 | /* | ||
821 | * Currently running, so sum over the pending sg's and | ||
822 | * the currently active one. | ||
823 | */ | ||
824 | txd = s3cchan->at; | ||
825 | |||
826 | dsg = list_entry(txd->at, struct s3c24xx_sg, node); | ||
827 | list_for_each_entry_from(dsg, &txd->dsg_list, node) | ||
828 | bytes += dsg->len; | ||
829 | |||
830 | bytes += s3c24xx_dma_getbytes_chan(s3cchan); | ||
831 | } | ||
832 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
833 | |||
834 | /* | ||
835 | * This cookie not complete yet | ||
836 | * Get number of bytes left in the active transactions and queue | ||
837 | */ | ||
838 | dma_set_residue(txstate, bytes); | ||
839 | |||
840 | /* Whether waiting or running, we're in progress */ | ||
841 | return ret; | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * Initialize a descriptor to be used by memcpy submit | ||
846 | */ | ||
847 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( | ||
848 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
849 | size_t len, unsigned long flags) | ||
850 | { | ||
851 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
852 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
853 | struct s3c24xx_txd *txd; | ||
854 | struct s3c24xx_sg *dsg; | ||
855 | int src_mod, dest_mod; | ||
856 | |||
857 | dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n", | ||
858 | len, s3cchan->name); | ||
859 | |||
860 | if ((len & S3C24XX_DCON_TC_MASK) != len) { | ||
861 | dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len); | ||
862 | return NULL; | ||
863 | } | ||
864 | |||
865 | txd = s3c24xx_dma_get_txd(); | ||
866 | if (!txd) | ||
867 | return NULL; | ||
868 | |||
869 | dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); | ||
870 | if (!dsg) { | ||
871 | s3c24xx_dma_free_txd(txd); | ||
872 | return NULL; | ||
873 | } | ||
874 | list_add_tail(&dsg->node, &txd->dsg_list); | ||
875 | |||
876 | dsg->src_addr = src; | ||
877 | dsg->dst_addr = dest; | ||
878 | dsg->len = len; | ||
879 | |||
880 | /* | ||
881 | * Determine a suitable transfer width. | ||
882 | * The DMA controller cannot fetch/store information which is not | ||
883 | * naturally aligned on the bus, i.e., a 4 byte fetch must start at | ||
884 | * an address divisible by 4 - more generally addr % width must be 0. | ||
885 | */ | ||
886 | src_mod = src % 4; | ||
887 | dest_mod = dest % 4; | ||
888 | switch (len % 4) { | ||
889 | case 0: | ||
890 | txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1; | ||
891 | break; | ||
892 | case 2: | ||
893 | txd->width = ((src_mod == 2 || src_mod == 0) && | ||
894 | (dest_mod == 2 || dest_mod == 0)) ? 2 : 1; | ||
895 | break; | ||
896 | default: | ||
897 | txd->width = 1; | ||
898 | break; | ||
899 | } | ||
900 | |||
901 | txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT; | ||
902 | txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT; | ||
903 | txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK | | ||
904 | S3C24XX_DCON_SERV_WHOLE; | ||
905 | |||
906 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); | ||
907 | } | ||
908 | |||
909 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( | ||
910 | struct dma_chan *chan, struct scatterlist *sgl, | ||
911 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
912 | unsigned long flags, void *context) | ||
913 | { | ||
914 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
915 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
916 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
917 | struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; | ||
918 | struct s3c24xx_txd *txd; | ||
919 | struct s3c24xx_sg *dsg; | ||
920 | struct scatterlist *sg; | ||
921 | dma_addr_t slave_addr; | ||
922 | u32 hwcfg = 0; | ||
923 | int tmp; | ||
924 | |||
925 | dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n", | ||
926 | sg_dma_len(sgl), s3cchan->name); | ||
927 | |||
928 | txd = s3c24xx_dma_get_txd(); | ||
929 | if (!txd) | ||
930 | return NULL; | ||
931 | |||
932 | if (cdata->handshake) | ||
933 | txd->dcon |= S3C24XX_DCON_HANDSHAKE; | ||
934 | |||
935 | switch (cdata->bus) { | ||
936 | case S3C24XX_DMA_APB: | ||
937 | txd->dcon |= S3C24XX_DCON_SYNC_PCLK; | ||
938 | hwcfg |= S3C24XX_DISRCC_LOC_APB; | ||
939 | break; | ||
940 | case S3C24XX_DMA_AHB: | ||
941 | txd->dcon |= S3C24XX_DCON_SYNC_HCLK; | ||
942 | hwcfg |= S3C24XX_DISRCC_LOC_AHB; | ||
943 | break; | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * Always assume our peripheral desintation is a fixed | ||
948 | * address in memory. | ||
949 | */ | ||
950 | hwcfg |= S3C24XX_DISRCC_INC_FIXED; | ||
951 | |||
952 | /* | ||
953 | * Individual dma operations are requested by the slave, | ||
954 | * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). | ||
955 | */ | ||
956 | txd->dcon |= S3C24XX_DCON_SERV_SINGLE; | ||
957 | |||
958 | if (direction == DMA_MEM_TO_DEV) { | ||
959 | txd->disrcc = S3C24XX_DISRCC_LOC_AHB | | ||
960 | S3C24XX_DISRCC_INC_INCREMENT; | ||
961 | txd->didstc = hwcfg; | ||
962 | slave_addr = s3cchan->cfg.dst_addr; | ||
963 | txd->width = s3cchan->cfg.dst_addr_width; | ||
964 | } else if (direction == DMA_DEV_TO_MEM) { | ||
965 | txd->disrcc = hwcfg; | ||
966 | txd->didstc = S3C24XX_DIDSTC_LOC_AHB | | ||
967 | S3C24XX_DIDSTC_INC_INCREMENT; | ||
968 | slave_addr = s3cchan->cfg.src_addr; | ||
969 | txd->width = s3cchan->cfg.src_addr_width; | ||
970 | } else { | ||
971 | s3c24xx_dma_free_txd(txd); | ||
972 | dev_err(&s3cdma->pdev->dev, | ||
973 | "direction %d unsupported\n", direction); | ||
974 | return NULL; | ||
975 | } | ||
976 | |||
977 | for_each_sg(sgl, sg, sg_len, tmp) { | ||
978 | dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); | ||
979 | if (!dsg) { | ||
980 | s3c24xx_dma_free_txd(txd); | ||
981 | return NULL; | ||
982 | } | ||
983 | list_add_tail(&dsg->node, &txd->dsg_list); | ||
984 | |||
985 | dsg->len = sg_dma_len(sg); | ||
986 | if (direction == DMA_MEM_TO_DEV) { | ||
987 | dsg->src_addr = sg_dma_address(sg); | ||
988 | dsg->dst_addr = slave_addr; | ||
989 | } else { /* DMA_DEV_TO_MEM */ | ||
990 | dsg->src_addr = slave_addr; | ||
991 | dsg->dst_addr = sg_dma_address(sg); | ||
992 | } | ||
993 | break; | ||
994 | } | ||
995 | |||
996 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); | ||
997 | } | ||
998 | |||
999 | /* | ||
1000 | * Slave transactions callback to the slave device to allow | ||
1001 | * synchronization of slave DMA signals with the DMAC enable | ||
1002 | */ | ||
1003 | static void s3c24xx_dma_issue_pending(struct dma_chan *chan) | ||
1004 | { | ||
1005 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
1006 | unsigned long flags; | ||
1007 | |||
1008 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | ||
1009 | if (vchan_issue_pending(&s3cchan->vc)) { | ||
1010 | if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING) | ||
1011 | s3c24xx_dma_phy_alloc_and_start(s3cchan); | ||
1012 | } | ||
1013 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
1014 | } | ||
1015 | |||
1016 | /* | ||
1017 | * Bringup and teardown | ||
1018 | */ | ||
1019 | |||
1020 | /* | ||
1021 | * Initialise the DMAC memcpy/slave channels. | ||
1022 | * Make a local wrapper to hold required data | ||
1023 | */ | ||
1024 | static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma, | ||
1025 | struct dma_device *dmadev, unsigned int channels, bool slave) | ||
1026 | { | ||
1027 | struct s3c24xx_dma_chan *chan; | ||
1028 | int i; | ||
1029 | |||
1030 | INIT_LIST_HEAD(&dmadev->channels); | ||
1031 | |||
1032 | /* | ||
1033 | * Register as many many memcpy as we have physical channels, | ||
1034 | * we won't always be able to use all but the code will have | ||
1035 | * to cope with that situation. | ||
1036 | */ | ||
1037 | for (i = 0; i < channels; i++) { | ||
1038 | chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); | ||
1039 | if (!chan) { | ||
1040 | dev_err(dmadev->dev, | ||
1041 | "%s no memory for channel\n", __func__); | ||
1042 | return -ENOMEM; | ||
1043 | } | ||
1044 | |||
1045 | chan->id = i; | ||
1046 | chan->host = s3cdma; | ||
1047 | chan->state = S3C24XX_DMA_CHAN_IDLE; | ||
1048 | |||
1049 | if (slave) { | ||
1050 | chan->slave = true; | ||
1051 | chan->name = kasprintf(GFP_KERNEL, "slave%d", i); | ||
1052 | if (!chan->name) | ||
1053 | return -ENOMEM; | ||
1054 | } else { | ||
1055 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | ||
1056 | if (!chan->name) | ||
1057 | return -ENOMEM; | ||
1058 | } | ||
1059 | dev_dbg(dmadev->dev, | ||
1060 | "initialize virtual channel \"%s\"\n", | ||
1061 | chan->name); | ||
1062 | |||
1063 | chan->vc.desc_free = s3c24xx_dma_desc_free; | ||
1064 | vchan_init(&chan->vc, dmadev); | ||
1065 | } | ||
1066 | dev_info(dmadev->dev, "initialized %d virtual %s channels\n", | ||
1067 | i, slave ? "slave" : "memcpy"); | ||
1068 | return i; | ||
1069 | } | ||
1070 | |||
1071 | static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev) | ||
1072 | { | ||
1073 | struct s3c24xx_dma_chan *chan = NULL; | ||
1074 | struct s3c24xx_dma_chan *next; | ||
1075 | |||
1076 | list_for_each_entry_safe(chan, | ||
1077 | next, &dmadev->channels, vc.chan.device_node) | ||
1078 | list_del(&chan->vc.chan.device_node); | ||
1079 | } | ||
1080 | |||
1081 | /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */ | ||
1082 | static struct soc_data soc_s3c2410 = { | ||
1083 | .stride = 0x40, | ||
1084 | .has_reqsel = false, | ||
1085 | .has_clocks = false, | ||
1086 | }; | ||
1087 | |||
1088 | /* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */ | ||
1089 | static struct soc_data soc_s3c2412 = { | ||
1090 | .stride = 0x40, | ||
1091 | .has_reqsel = true, | ||
1092 | .has_clocks = true, | ||
1093 | }; | ||
1094 | |||
1095 | /* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */ | ||
1096 | static struct soc_data soc_s3c2443 = { | ||
1097 | .stride = 0x100, | ||
1098 | .has_reqsel = true, | ||
1099 | .has_clocks = true, | ||
1100 | }; | ||
1101 | |||
1102 | static struct platform_device_id s3c24xx_dma_driver_ids[] = { | ||
1103 | { | ||
1104 | .name = "s3c2410-dma", | ||
1105 | .driver_data = (kernel_ulong_t)&soc_s3c2410, | ||
1106 | }, { | ||
1107 | .name = "s3c2412-dma", | ||
1108 | .driver_data = (kernel_ulong_t)&soc_s3c2412, | ||
1109 | }, { | ||
1110 | .name = "s3c2443-dma", | ||
1111 | .driver_data = (kernel_ulong_t)&soc_s3c2443, | ||
1112 | }, | ||
1113 | { }, | ||
1114 | }; | ||
1115 | |||
1116 | static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev) | ||
1117 | { | ||
1118 | return (struct soc_data *) | ||
1119 | platform_get_device_id(pdev)->driver_data; | ||
1120 | } | ||
1121 | |||
1122 | static int s3c24xx_dma_probe(struct platform_device *pdev) | ||
1123 | { | ||
1124 | const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); | ||
1125 | struct s3c24xx_dma_engine *s3cdma; | ||
1126 | struct soc_data *sdata; | ||
1127 | struct resource *res; | ||
1128 | int ret; | ||
1129 | int i; | ||
1130 | |||
1131 | if (!pdata) { | ||
1132 | dev_err(&pdev->dev, "platform data missing\n"); | ||
1133 | return -ENODEV; | ||
1134 | } | ||
1135 | |||
1136 | /* Basic sanity check */ | ||
1137 | if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { | ||
1138 | dev_err(&pdev->dev, "to many dma channels %d, max %d\n", | ||
1139 | pdata->num_phy_channels, MAX_DMA_CHANNELS); | ||
1140 | return -EINVAL; | ||
1141 | } | ||
1142 | |||
1143 | sdata = s3c24xx_dma_get_soc_data(pdev); | ||
1144 | if (!sdata) | ||
1145 | return -EINVAL; | ||
1146 | |||
1147 | s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL); | ||
1148 | if (!s3cdma) | ||
1149 | return -ENOMEM; | ||
1150 | |||
1151 | s3cdma->pdev = pdev; | ||
1152 | s3cdma->pdata = pdata; | ||
1153 | s3cdma->sdata = sdata; | ||
1154 | |||
1155 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1156 | s3cdma->base = devm_ioremap_resource(&pdev->dev, res); | ||
1157 | if (IS_ERR(s3cdma->base)) | ||
1158 | return PTR_ERR(s3cdma->base); | ||
1159 | |||
1160 | s3cdma->phy_chans = devm_kzalloc(&pdev->dev, | ||
1161 | sizeof(struct s3c24xx_dma_phy) * | ||
1162 | pdata->num_phy_channels, | ||
1163 | GFP_KERNEL); | ||
1164 | if (!s3cdma->phy_chans) | ||
1165 | return -ENOMEM; | ||
1166 | |||
1167 | /* aquire irqs and clocks for all physical channels */ | ||
1168 | for (i = 0; i < pdata->num_phy_channels; i++) { | ||
1169 | struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; | ||
1170 | char clk_name[6]; | ||
1171 | |||
1172 | phy->id = i; | ||
1173 | phy->base = s3cdma->base + (i * sdata->stride); | ||
1174 | phy->host = s3cdma; | ||
1175 | |||
1176 | phy->irq = platform_get_irq(pdev, i); | ||
1177 | if (phy->irq < 0) { | ||
1178 | dev_err(&pdev->dev, "failed to get irq %d, err %d\n", | ||
1179 | i, phy->irq); | ||
1180 | continue; | ||
1181 | } | ||
1182 | |||
1183 | ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq, | ||
1184 | 0, pdev->name, phy); | ||
1185 | if (ret) { | ||
1186 | dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n", | ||
1187 | i, ret); | ||
1188 | continue; | ||
1189 | } | ||
1190 | |||
1191 | if (sdata->has_clocks) { | ||
1192 | sprintf(clk_name, "dma.%d", i); | ||
1193 | phy->clk = devm_clk_get(&pdev->dev, clk_name); | ||
1194 | if (IS_ERR(phy->clk) && sdata->has_clocks) { | ||
1195 | dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu", | ||
1196 | i, PTR_ERR(phy->clk)); | ||
1197 | continue; | ||
1198 | } | ||
1199 | |||
1200 | ret = clk_prepare(phy->clk); | ||
1201 | if (ret) { | ||
1202 | dev_err(&pdev->dev, "clock for phy %d failed, error %d\n", | ||
1203 | i, ret); | ||
1204 | continue; | ||
1205 | } | ||
1206 | } | ||
1207 | |||
1208 | spin_lock_init(&phy->lock); | ||
1209 | phy->valid = true; | ||
1210 | |||
1211 | dev_dbg(&pdev->dev, "physical channel %d is %s\n", | ||
1212 | i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE"); | ||
1213 | } | ||
1214 | |||
1215 | /* Initialize memcpy engine */ | ||
1216 | dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask); | ||
1217 | dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask); | ||
1218 | s3cdma->memcpy.dev = &pdev->dev; | ||
1219 | s3cdma->memcpy.device_alloc_chan_resources = | ||
1220 | s3c24xx_dma_alloc_chan_resources; | ||
1221 | s3cdma->memcpy.device_free_chan_resources = | ||
1222 | s3c24xx_dma_free_chan_resources; | ||
1223 | s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; | ||
1224 | s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; | ||
1225 | s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; | ||
1226 | s3cdma->memcpy.device_control = s3c24xx_dma_control; | ||
1227 | |||
1228 | /* Initialize slave engine for SoC internal dedicated peripherals */ | ||
1229 | dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); | ||
1230 | dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); | ||
1231 | s3cdma->slave.dev = &pdev->dev; | ||
1232 | s3cdma->slave.device_alloc_chan_resources = | ||
1233 | s3c24xx_dma_alloc_chan_resources; | ||
1234 | s3cdma->slave.device_free_chan_resources = | ||
1235 | s3c24xx_dma_free_chan_resources; | ||
1236 | s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; | ||
1237 | s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; | ||
1238 | s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; | ||
1239 | s3cdma->slave.device_control = s3c24xx_dma_control; | ||
1240 | |||
1241 | /* Register as many memcpy channels as there are physical channels */ | ||
1242 | ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, | ||
1243 | pdata->num_phy_channels, false); | ||
1244 | if (ret <= 0) { | ||
1245 | dev_warn(&pdev->dev, | ||
1246 | "%s failed to enumerate memcpy channels - %d\n", | ||
1247 | __func__, ret); | ||
1248 | goto err_memcpy; | ||
1249 | } | ||
1250 | |||
1251 | /* Register slave channels */ | ||
1252 | ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave, | ||
1253 | pdata->num_channels, true); | ||
1254 | if (ret <= 0) { | ||
1255 | dev_warn(&pdev->dev, | ||
1256 | "%s failed to enumerate slave channels - %d\n", | ||
1257 | __func__, ret); | ||
1258 | goto err_slave; | ||
1259 | } | ||
1260 | |||
1261 | ret = dma_async_device_register(&s3cdma->memcpy); | ||
1262 | if (ret) { | ||
1263 | dev_warn(&pdev->dev, | ||
1264 | "%s failed to register memcpy as an async device - %d\n", | ||
1265 | __func__, ret); | ||
1266 | goto err_memcpy_reg; | ||
1267 | } | ||
1268 | |||
1269 | ret = dma_async_device_register(&s3cdma->slave); | ||
1270 | if (ret) { | ||
1271 | dev_warn(&pdev->dev, | ||
1272 | "%s failed to register slave as an async device - %d\n", | ||
1273 | __func__, ret); | ||
1274 | goto err_slave_reg; | ||
1275 | } | ||
1276 | |||
1277 | platform_set_drvdata(pdev, s3cdma); | ||
1278 | dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n", | ||
1279 | pdata->num_phy_channels); | ||
1280 | |||
1281 | return 0; | ||
1282 | |||
1283 | err_slave_reg: | ||
1284 | dma_async_device_unregister(&s3cdma->memcpy); | ||
1285 | err_memcpy_reg: | ||
1286 | s3c24xx_dma_free_virtual_channels(&s3cdma->slave); | ||
1287 | err_slave: | ||
1288 | s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); | ||
1289 | err_memcpy: | ||
1290 | if (sdata->has_clocks) | ||
1291 | for (i = 0; i < pdata->num_phy_channels; i++) { | ||
1292 | struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; | ||
1293 | if (phy->valid) | ||
1294 | clk_unprepare(phy->clk); | ||
1295 | } | ||
1296 | |||
1297 | return ret; | ||
1298 | } | ||
1299 | |||
1300 | static int s3c24xx_dma_remove(struct platform_device *pdev) | ||
1301 | { | ||
1302 | const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); | ||
1303 | struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev); | ||
1304 | struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev); | ||
1305 | int i; | ||
1306 | |||
1307 | dma_async_device_unregister(&s3cdma->slave); | ||
1308 | dma_async_device_unregister(&s3cdma->memcpy); | ||
1309 | |||
1310 | s3c24xx_dma_free_virtual_channels(&s3cdma->slave); | ||
1311 | s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); | ||
1312 | |||
1313 | if (sdata->has_clocks) | ||
1314 | for (i = 0; i < pdata->num_phy_channels; i++) { | ||
1315 | struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; | ||
1316 | if (phy->valid) | ||
1317 | clk_unprepare(phy->clk); | ||
1318 | } | ||
1319 | |||
1320 | return 0; | ||
1321 | } | ||
1322 | |||
1323 | static struct platform_driver s3c24xx_dma_driver = { | ||
1324 | .driver = { | ||
1325 | .name = "s3c24xx-dma", | ||
1326 | .owner = THIS_MODULE, | ||
1327 | }, | ||
1328 | .id_table = s3c24xx_dma_driver_ids, | ||
1329 | .probe = s3c24xx_dma_probe, | ||
1330 | .remove = s3c24xx_dma_remove, | ||
1331 | }; | ||
1332 | |||
1333 | module_platform_driver(s3c24xx_dma_driver); | ||
1334 | |||
1335 | bool s3c24xx_dma_filter(struct dma_chan *chan, void *param) | ||
1336 | { | ||
1337 | struct s3c24xx_dma_chan *s3cchan; | ||
1338 | |||
1339 | if (chan->device->dev->driver != &s3c24xx_dma_driver.driver) | ||
1340 | return false; | ||
1341 | |||
1342 | s3cchan = to_s3c24xx_dma_chan(chan); | ||
1343 | |||
1344 | return s3cchan->id == (int)param; | ||
1345 | } | ||
1346 | EXPORT_SYMBOL(s3c24xx_dma_filter); | ||
1347 | |||
1348 | MODULE_DESCRIPTION("S3C24XX DMA Driver"); | ||
1349 | MODULE_AUTHOR("Heiko Stuebner"); | ||
1350 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 17df6db5dca7..8847adf392b7 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c | |||
@@ -15,8 +15,9 @@ | |||
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | 18 | #include <linux/irq.h> | |
19 | #include <asm/mach/irq.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/platform_data/gpio-davinci.h> | ||
20 | 21 | ||
21 | struct davinci_gpio_regs { | 22 | struct davinci_gpio_regs { |
22 | u32 dir; | 23 | u32 dir; |
@@ -31,13 +32,14 @@ struct davinci_gpio_regs { | |||
31 | u32 intstat; | 32 | u32 intstat; |
32 | }; | 33 | }; |
33 | 34 | ||
35 | #define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */ | ||
36 | |||
34 | #define chip2controller(chip) \ | 37 | #define chip2controller(chip) \ |
35 | container_of(chip, struct davinci_gpio_controller, chip) | 38 | container_of(chip, struct davinci_gpio_controller, chip) |
36 | 39 | ||
37 | static struct davinci_gpio_controller chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)]; | ||
38 | static void __iomem *gpio_base; | 40 | static void __iomem *gpio_base; |
39 | 41 | ||
40 | static struct davinci_gpio_regs __iomem __init *gpio2regs(unsigned gpio) | 42 | static struct davinci_gpio_regs __iomem *gpio2regs(unsigned gpio) |
41 | { | 43 | { |
42 | void __iomem *ptr; | 44 | void __iomem *ptr; |
43 | 45 | ||
@@ -65,7 +67,7 @@ static inline struct davinci_gpio_regs __iomem *irq2regs(int irq) | |||
65 | return g; | 67 | return g; |
66 | } | 68 | } |
67 | 69 | ||
68 | static int __init davinci_gpio_irq_setup(void); | 70 | static int davinci_gpio_irq_setup(struct platform_device *pdev); |
69 | 71 | ||
70 | /*--------------------------------------------------------------------------*/ | 72 | /*--------------------------------------------------------------------------*/ |
71 | 73 | ||
@@ -131,33 +133,53 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
131 | __raw_writel((1 << offset), value ? &g->set_data : &g->clr_data); | 133 | __raw_writel((1 << offset), value ? &g->set_data : &g->clr_data); |
132 | } | 134 | } |
133 | 135 | ||
134 | static int __init davinci_gpio_setup(void) | 136 | static int davinci_gpio_probe(struct platform_device *pdev) |
135 | { | 137 | { |
136 | int i, base; | 138 | int i, base; |
137 | unsigned ngpio; | 139 | unsigned ngpio; |
138 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 140 | struct davinci_gpio_controller *chips; |
139 | struct davinci_gpio_regs *regs; | 141 | struct davinci_gpio_platform_data *pdata; |
140 | 142 | struct davinci_gpio_regs __iomem *regs; | |
141 | if (soc_info->gpio_type != GPIO_TYPE_DAVINCI) | 143 | struct device *dev = &pdev->dev; |
142 | return 0; | 144 | struct resource *res; |
145 | |||
146 | pdata = dev->platform_data; | ||
147 | if (!pdata) { | ||
148 | dev_err(dev, "No platform data found\n"); | ||
149 | return -EINVAL; | ||
150 | } | ||
143 | 151 | ||
144 | /* | 152 | /* |
145 | * The gpio banks conceptually expose a segmented bitmap, | 153 | * The gpio banks conceptually expose a segmented bitmap, |
146 | * and "ngpio" is one more than the largest zero-based | 154 | * and "ngpio" is one more than the largest zero-based |
147 | * bit index that's valid. | 155 | * bit index that's valid. |
148 | */ | 156 | */ |
149 | ngpio = soc_info->gpio_num; | 157 | ngpio = pdata->ngpio; |
150 | if (ngpio == 0) { | 158 | if (ngpio == 0) { |
151 | pr_err("GPIO setup: how many GPIOs?\n"); | 159 | dev_err(dev, "How many GPIOs?\n"); |
152 | return -EINVAL; | 160 | return -EINVAL; |
153 | } | 161 | } |
154 | 162 | ||
155 | if (WARN_ON(DAVINCI_N_GPIO < ngpio)) | 163 | if (WARN_ON(DAVINCI_N_GPIO < ngpio)) |
156 | ngpio = DAVINCI_N_GPIO; | 164 | ngpio = DAVINCI_N_GPIO; |
157 | 165 | ||
158 | gpio_base = ioremap(soc_info->gpio_base, SZ_4K); | 166 | chips = devm_kzalloc(dev, |
159 | if (WARN_ON(!gpio_base)) | 167 | ngpio * sizeof(struct davinci_gpio_controller), |
168 | GFP_KERNEL); | ||
169 | if (!chips) { | ||
170 | dev_err(dev, "Memory allocation failed\n"); | ||
160 | return -ENOMEM; | 171 | return -ENOMEM; |
172 | } | ||
173 | |||
174 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
175 | if (!res) { | ||
176 | dev_err(dev, "Invalid memory resource\n"); | ||
177 | return -EBUSY; | ||
178 | } | ||
179 | |||
180 | gpio_base = devm_ioremap_resource(dev, res); | ||
181 | if (IS_ERR(gpio_base)) | ||
182 | return PTR_ERR(gpio_base); | ||
161 | 183 | ||
162 | for (i = 0, base = 0; base < ngpio; i++, base += 32) { | 184 | for (i = 0, base = 0; base < ngpio; i++, base += 32) { |
163 | chips[i].chip.label = "DaVinci"; | 185 | chips[i].chip.label = "DaVinci"; |
@@ -183,13 +205,10 @@ static int __init davinci_gpio_setup(void) | |||
183 | gpiochip_add(&chips[i].chip); | 205 | gpiochip_add(&chips[i].chip); |
184 | } | 206 | } |
185 | 207 | ||
186 | soc_info->gpio_ctlrs = chips; | 208 | platform_set_drvdata(pdev, chips); |
187 | soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32); | 209 | davinci_gpio_irq_setup(pdev); |
188 | |||
189 | davinci_gpio_irq_setup(); | ||
190 | return 0; | 210 | return 0; |
191 | } | 211 | } |
192 | pure_initcall(davinci_gpio_setup); | ||
193 | 212 | ||
194 | /*--------------------------------------------------------------------------*/ | 213 | /*--------------------------------------------------------------------------*/ |
195 | /* | 214 | /* |
@@ -302,13 +321,14 @@ static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset) | |||
302 | 321 | ||
303 | static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) | 322 | static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) |
304 | { | 323 | { |
305 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 324 | struct davinci_gpio_controller *d = chip2controller(chip); |
306 | 325 | ||
307 | /* NOTE: we assume for now that only irqs in the first gpio_chip | 326 | /* |
327 | * NOTE: we assume for now that only irqs in the first gpio_chip | ||
308 | * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). | 328 | * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). |
309 | */ | 329 | */ |
310 | if (offset < soc_info->gpio_unbanked) | 330 | if (offset < d->irq_base) |
311 | return soc_info->gpio_irq + offset; | 331 | return d->gpio_irq + offset; |
312 | else | 332 | else |
313 | return -ENODEV; | 333 | return -ENODEV; |
314 | } | 334 | } |
@@ -317,12 +337,11 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) | |||
317 | { | 337 | { |
318 | struct davinci_gpio_controller *d; | 338 | struct davinci_gpio_controller *d; |
319 | struct davinci_gpio_regs __iomem *g; | 339 | struct davinci_gpio_regs __iomem *g; |
320 | struct davinci_soc_info *soc_info = &davinci_soc_info; | ||
321 | u32 mask; | 340 | u32 mask; |
322 | 341 | ||
323 | d = (struct davinci_gpio_controller *)data->handler_data; | 342 | d = (struct davinci_gpio_controller *)data->handler_data; |
324 | g = (struct davinci_gpio_regs __iomem *)d->regs; | 343 | g = (struct davinci_gpio_regs __iomem *)d->regs; |
325 | mask = __gpio_mask(data->irq - soc_info->gpio_irq); | 344 | mask = __gpio_mask(data->irq - d->gpio_irq); |
326 | 345 | ||
327 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 346 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
328 | return -EINVAL; | 347 | return -EINVAL; |
@@ -343,24 +362,33 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) | |||
343 | * (dm6446) can be set appropriately for GPIOV33 pins. | 362 | * (dm6446) can be set appropriately for GPIOV33 pins. |
344 | */ | 363 | */ |
345 | 364 | ||
346 | static int __init davinci_gpio_irq_setup(void) | 365 | static int davinci_gpio_irq_setup(struct platform_device *pdev) |
347 | { | 366 | { |
348 | unsigned gpio, irq, bank; | 367 | unsigned gpio, irq, bank; |
349 | struct clk *clk; | 368 | struct clk *clk; |
350 | u32 binten = 0; | 369 | u32 binten = 0; |
351 | unsigned ngpio, bank_irq; | 370 | unsigned ngpio, bank_irq; |
352 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 371 | struct device *dev = &pdev->dev; |
353 | struct davinci_gpio_regs __iomem *g; | 372 | struct resource *res; |
373 | struct davinci_gpio_controller *chips = platform_get_drvdata(pdev); | ||
374 | struct davinci_gpio_platform_data *pdata = dev->platform_data; | ||
375 | struct davinci_gpio_regs __iomem *g; | ||
354 | 376 | ||
355 | ngpio = soc_info->gpio_num; | 377 | ngpio = pdata->ngpio; |
378 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
379 | if (!res) { | ||
380 | dev_err(dev, "Invalid IRQ resource\n"); | ||
381 | return -EBUSY; | ||
382 | } | ||
356 | 383 | ||
357 | bank_irq = soc_info->gpio_irq; | 384 | bank_irq = res->start; |
358 | if (bank_irq == 0) { | 385 | |
359 | printk(KERN_ERR "Don't know first GPIO bank IRQ.\n"); | 386 | if (!bank_irq) { |
360 | return -EINVAL; | 387 | dev_err(dev, "Invalid IRQ resource\n"); |
388 | return -ENODEV; | ||
361 | } | 389 | } |
362 | 390 | ||
363 | clk = clk_get(NULL, "gpio"); | 391 | clk = devm_clk_get(dev, "gpio"); |
364 | if (IS_ERR(clk)) { | 392 | if (IS_ERR(clk)) { |
365 | printk(KERN_ERR "Error %ld getting gpio clock?\n", | 393 | printk(KERN_ERR "Error %ld getting gpio clock?\n", |
366 | PTR_ERR(clk)); | 394 | PTR_ERR(clk)); |
@@ -368,16 +396,17 @@ static int __init davinci_gpio_irq_setup(void) | |||
368 | } | 396 | } |
369 | clk_prepare_enable(clk); | 397 | clk_prepare_enable(clk); |
370 | 398 | ||
371 | /* Arrange gpio_to_irq() support, handling either direct IRQs or | 399 | /* |
400 | * Arrange gpio_to_irq() support, handling either direct IRQs or | ||
372 | * banked IRQs. Having GPIOs in the first GPIO bank use direct | 401 | * banked IRQs. Having GPIOs in the first GPIO bank use direct |
373 | * IRQs, while the others use banked IRQs, would need some setup | 402 | * IRQs, while the others use banked IRQs, would need some setup |
374 | * tweaks to recognize hardware which can do that. | 403 | * tweaks to recognize hardware which can do that. |
375 | */ | 404 | */ |
376 | for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) { | 405 | for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) { |
377 | chips[bank].chip.to_irq = gpio_to_irq_banked; | 406 | chips[bank].chip.to_irq = gpio_to_irq_banked; |
378 | chips[bank].irq_base = soc_info->gpio_unbanked | 407 | chips[bank].irq_base = pdata->gpio_unbanked |
379 | ? -EINVAL | 408 | ? -EINVAL |
380 | : (soc_info->intc_irq_num + gpio); | 409 | : (pdata->intc_irq_num + gpio); |
381 | } | 410 | } |
382 | 411 | ||
383 | /* | 412 | /* |
@@ -385,7 +414,7 @@ static int __init davinci_gpio_irq_setup(void) | |||
385 | * controller only handling trigger modes. We currently assume no | 414 | * controller only handling trigger modes. We currently assume no |
386 | * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs. | 415 | * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs. |
387 | */ | 416 | */ |
388 | if (soc_info->gpio_unbanked) { | 417 | if (pdata->gpio_unbanked) { |
389 | static struct irq_chip_type gpio_unbanked; | 418 | static struct irq_chip_type gpio_unbanked; |
390 | 419 | ||
391 | /* pass "bank 0" GPIO IRQs to AINTC */ | 420 | /* pass "bank 0" GPIO IRQs to AINTC */ |
@@ -405,7 +434,7 @@ static int __init davinci_gpio_irq_setup(void) | |||
405 | __raw_writel(~0, &g->set_rising); | 434 | __raw_writel(~0, &g->set_rising); |
406 | 435 | ||
407 | /* set the direct IRQs up to use that irqchip */ | 436 | /* set the direct IRQs up to use that irqchip */ |
408 | for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) { | 437 | for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) { |
409 | irq_set_chip(irq, &gpio_unbanked.chip); | 438 | irq_set_chip(irq, &gpio_unbanked.chip); |
410 | irq_set_handler_data(irq, &chips[gpio / 32]); | 439 | irq_set_handler_data(irq, &chips[gpio / 32]); |
411 | irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH); | 440 | irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH); |
@@ -450,12 +479,31 @@ static int __init davinci_gpio_irq_setup(void) | |||
450 | } | 479 | } |
451 | 480 | ||
452 | done: | 481 | done: |
453 | /* BINTEN -- per-bank interrupt enable. genirq would also let these | 482 | /* |
483 | * BINTEN -- per-bank interrupt enable. genirq would also let these | ||
454 | * bits be set/cleared dynamically. | 484 | * bits be set/cleared dynamically. |
455 | */ | 485 | */ |
456 | __raw_writel(binten, gpio_base + 0x08); | 486 | __raw_writel(binten, gpio_base + BINTEN); |
457 | 487 | ||
458 | printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0)); | 488 | printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0)); |
459 | 489 | ||
460 | return 0; | 490 | return 0; |
461 | } | 491 | } |
492 | |||
493 | static struct platform_driver davinci_gpio_driver = { | ||
494 | .probe = davinci_gpio_probe, | ||
495 | .driver = { | ||
496 | .name = "davinci_gpio", | ||
497 | .owner = THIS_MODULE, | ||
498 | }, | ||
499 | }; | ||
500 | |||
501 | /** | ||
502 | * GPIO driver registration needs to be done before machine_init functions | ||
503 | * access GPIO. Hence davinci_gpio_drv_reg() is a postcore_initcall. | ||
504 | */ | ||
505 | static int __init davinci_gpio_drv_reg(void) | ||
506 | { | ||
507 | return platform_driver_register(&davinci_gpio_driver); | ||
508 | } | ||
509 | postcore_initcall(davinci_gpio_drv_reg); | ||
diff --git a/drivers/gpio/gpio-tnetv107x.c b/drivers/gpio/gpio-tnetv107x.c index 3fa3e2867e19..58445bb69106 100644 --- a/drivers/gpio/gpio-tnetv107x.c +++ b/drivers/gpio/gpio-tnetv107x.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
18 | #include <linux/platform_data/gpio-davinci.h> | ||
18 | 19 | ||
19 | #include <mach/common.h> | 20 | #include <mach/common.h> |
20 | #include <mach/tnetv107x.h> | 21 | #include <mach/tnetv107x.h> |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index bb328a366122..433cc8568dec 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
@@ -21,7 +21,10 @@ | |||
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
23 | #include <linux/of_irq.h> | 23 | #include <linux/of_irq.h> |
24 | #include <linux/of_pci.h> | ||
24 | #include <linux/irqdomain.h> | 25 | #include <linux/irqdomain.h> |
26 | #include <linux/slab.h> | ||
27 | #include <linux/msi.h> | ||
25 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
26 | #include <asm/exception.h> | 29 | #include <asm/exception.h> |
27 | #include <asm/smp_plat.h> | 30 | #include <asm/smp_plat.h> |
@@ -51,12 +54,22 @@ | |||
51 | #define IPI_DOORBELL_START (0) | 54 | #define IPI_DOORBELL_START (0) |
52 | #define IPI_DOORBELL_END (8) | 55 | #define IPI_DOORBELL_END (8) |
53 | #define IPI_DOORBELL_MASK 0xFF | 56 | #define IPI_DOORBELL_MASK 0xFF |
57 | #define PCI_MSI_DOORBELL_START (16) | ||
58 | #define PCI_MSI_DOORBELL_NR (16) | ||
59 | #define PCI_MSI_DOORBELL_END (32) | ||
60 | #define PCI_MSI_DOORBELL_MASK 0xFFFF0000 | ||
54 | 61 | ||
55 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | 62 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
56 | 63 | ||
57 | static void __iomem *per_cpu_int_base; | 64 | static void __iomem *per_cpu_int_base; |
58 | static void __iomem *main_int_base; | 65 | static void __iomem *main_int_base; |
59 | static struct irq_domain *armada_370_xp_mpic_domain; | 66 | static struct irq_domain *armada_370_xp_mpic_domain; |
67 | #ifdef CONFIG_PCI_MSI | ||
68 | static struct irq_domain *armada_370_xp_msi_domain; | ||
69 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); | ||
70 | static DEFINE_MUTEX(msi_used_lock); | ||
71 | static phys_addr_t msi_doorbell_addr; | ||
72 | #endif | ||
60 | 73 | ||
61 | /* | 74 | /* |
62 | * In SMP mode: | 75 | * In SMP mode: |
@@ -87,6 +100,144 @@ static void armada_370_xp_irq_unmask(struct irq_data *d) | |||
87 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | 100 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); |
88 | } | 101 | } |
89 | 102 | ||
103 | #ifdef CONFIG_PCI_MSI | ||
104 | |||
105 | static int armada_370_xp_alloc_msi(void) | ||
106 | { | ||
107 | int hwirq; | ||
108 | |||
109 | mutex_lock(&msi_used_lock); | ||
110 | hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR); | ||
111 | if (hwirq >= PCI_MSI_DOORBELL_NR) | ||
112 | hwirq = -ENOSPC; | ||
113 | else | ||
114 | set_bit(hwirq, msi_used); | ||
115 | mutex_unlock(&msi_used_lock); | ||
116 | |||
117 | return hwirq; | ||
118 | } | ||
119 | |||
120 | static void armada_370_xp_free_msi(int hwirq) | ||
121 | { | ||
122 | mutex_lock(&msi_used_lock); | ||
123 | if (!test_bit(hwirq, msi_used)) | ||
124 | pr_err("trying to free unused MSI#%d\n", hwirq); | ||
125 | else | ||
126 | clear_bit(hwirq, msi_used); | ||
127 | mutex_unlock(&msi_used_lock); | ||
128 | } | ||
129 | |||
130 | static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, | ||
131 | struct pci_dev *pdev, | ||
132 | struct msi_desc *desc) | ||
133 | { | ||
134 | struct msi_msg msg; | ||
135 | irq_hw_number_t hwirq; | ||
136 | int virq; | ||
137 | |||
138 | hwirq = armada_370_xp_alloc_msi(); | ||
139 | if (hwirq < 0) | ||
140 | return hwirq; | ||
141 | |||
142 | virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq); | ||
143 | if (!virq) { | ||
144 | armada_370_xp_free_msi(hwirq); | ||
145 | return -EINVAL; | ||
146 | } | ||
147 | |||
148 | irq_set_msi_desc(virq, desc); | ||
149 | |||
150 | msg.address_lo = msi_doorbell_addr; | ||
151 | msg.address_hi = 0; | ||
152 | msg.data = 0xf00 | (hwirq + 16); | ||
153 | |||
154 | write_msi_msg(virq, &msg); | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, | ||
159 | unsigned int irq) | ||
160 | { | ||
161 | struct irq_data *d = irq_get_irq_data(irq); | ||
162 | irq_dispose_mapping(irq); | ||
163 | armada_370_xp_free_msi(d->hwirq); | ||
164 | } | ||
165 | |||
166 | static struct irq_chip armada_370_xp_msi_irq_chip = { | ||
167 | .name = "armada_370_xp_msi_irq", | ||
168 | .irq_enable = unmask_msi_irq, | ||
169 | .irq_disable = mask_msi_irq, | ||
170 | .irq_mask = mask_msi_irq, | ||
171 | .irq_unmask = unmask_msi_irq, | ||
172 | }; | ||
173 | |||
174 | static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, | ||
175 | irq_hw_number_t hw) | ||
176 | { | ||
177 | irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, | ||
178 | handle_simple_irq); | ||
179 | set_irq_flags(virq, IRQF_VALID); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { | ||
185 | .map = armada_370_xp_msi_map, | ||
186 | }; | ||
187 | |||
188 | static int armada_370_xp_msi_init(struct device_node *node, | ||
189 | phys_addr_t main_int_phys_base) | ||
190 | { | ||
191 | struct msi_chip *msi_chip; | ||
192 | u32 reg; | ||
193 | int ret; | ||
194 | |||
195 | msi_doorbell_addr = main_int_phys_base + | ||
196 | ARMADA_370_XP_SW_TRIG_INT_OFFS; | ||
197 | |||
198 | msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL); | ||
199 | if (!msi_chip) | ||
200 | return -ENOMEM; | ||
201 | |||
202 | msi_chip->setup_irq = armada_370_xp_setup_msi_irq; | ||
203 | msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; | ||
204 | msi_chip->of_node = node; | ||
205 | |||
206 | armada_370_xp_msi_domain = | ||
207 | irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, | ||
208 | &armada_370_xp_msi_irq_ops, | ||
209 | NULL); | ||
210 | if (!armada_370_xp_msi_domain) { | ||
211 | kfree(msi_chip); | ||
212 | return -ENOMEM; | ||
213 | } | ||
214 | |||
215 | ret = of_pci_msi_chip_add(msi_chip); | ||
216 | if (ret < 0) { | ||
217 | irq_domain_remove(armada_370_xp_msi_domain); | ||
218 | kfree(msi_chip); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) | ||
223 | | PCI_MSI_DOORBELL_MASK; | ||
224 | |||
225 | writel(reg, per_cpu_int_base + | ||
226 | ARMADA_370_XP_IN_DRBEL_MSK_OFFS); | ||
227 | |||
228 | /* Unmask IPI interrupt */ | ||
229 | writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | #else | ||
234 | static inline int armada_370_xp_msi_init(struct device_node *node, | ||
235 | phys_addr_t main_int_phys_base) | ||
236 | { | ||
237 | return 0; | ||
238 | } | ||
239 | #endif | ||
240 | |||
90 | #ifdef CONFIG_SMP | 241 | #ifdef CONFIG_SMP |
91 | static int armada_xp_set_affinity(struct irq_data *d, | 242 | static int armada_xp_set_affinity(struct irq_data *d, |
92 | const struct cpumask *mask_val, bool force) | 243 | const struct cpumask *mask_val, bool force) |
@@ -214,12 +365,39 @@ armada_370_xp_handle_irq(struct pt_regs *regs) | |||
214 | if (irqnr > 1022) | 365 | if (irqnr > 1022) |
215 | break; | 366 | break; |
216 | 367 | ||
217 | if (irqnr > 0) { | 368 | if (irqnr > 1) { |
218 | irqnr = irq_find_mapping(armada_370_xp_mpic_domain, | 369 | irqnr = irq_find_mapping(armada_370_xp_mpic_domain, |
219 | irqnr); | 370 | irqnr); |
220 | handle_IRQ(irqnr, regs); | 371 | handle_IRQ(irqnr, regs); |
221 | continue; | 372 | continue; |
222 | } | 373 | } |
374 | |||
375 | #ifdef CONFIG_PCI_MSI | ||
376 | /* MSI handling */ | ||
377 | if (irqnr == 1) { | ||
378 | u32 msimask, msinr; | ||
379 | |||
380 | msimask = readl_relaxed(per_cpu_int_base + | ||
381 | ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) | ||
382 | & PCI_MSI_DOORBELL_MASK; | ||
383 | |||
384 | writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base + | ||
385 | ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); | ||
386 | |||
387 | for (msinr = PCI_MSI_DOORBELL_START; | ||
388 | msinr < PCI_MSI_DOORBELL_END; msinr++) { | ||
389 | int irq; | ||
390 | |||
391 | if (!(msimask & BIT(msinr))) | ||
392 | continue; | ||
393 | |||
394 | irq = irq_find_mapping(armada_370_xp_msi_domain, | ||
395 | msinr - 16); | ||
396 | handle_IRQ(irq, regs); | ||
397 | } | ||
398 | } | ||
399 | #endif | ||
400 | |||
223 | #ifdef CONFIG_SMP | 401 | #ifdef CONFIG_SMP |
224 | /* IPI Handling */ | 402 | /* IPI Handling */ |
225 | if (irqnr == 0) { | 403 | if (irqnr == 0) { |
@@ -248,12 +426,25 @@ armada_370_xp_handle_irq(struct pt_regs *regs) | |||
248 | static int __init armada_370_xp_mpic_of_init(struct device_node *node, | 426 | static int __init armada_370_xp_mpic_of_init(struct device_node *node, |
249 | struct device_node *parent) | 427 | struct device_node *parent) |
250 | { | 428 | { |
429 | struct resource main_int_res, per_cpu_int_res; | ||
251 | u32 control; | 430 | u32 control; |
252 | 431 | ||
253 | main_int_base = of_iomap(node, 0); | 432 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); |
254 | per_cpu_int_base = of_iomap(node, 1); | 433 | BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res)); |
434 | |||
435 | BUG_ON(!request_mem_region(main_int_res.start, | ||
436 | resource_size(&main_int_res), | ||
437 | node->full_name)); | ||
438 | BUG_ON(!request_mem_region(per_cpu_int_res.start, | ||
439 | resource_size(&per_cpu_int_res), | ||
440 | node->full_name)); | ||
255 | 441 | ||
442 | main_int_base = ioremap(main_int_res.start, | ||
443 | resource_size(&main_int_res)); | ||
256 | BUG_ON(!main_int_base); | 444 | BUG_ON(!main_int_base); |
445 | |||
446 | per_cpu_int_base = ioremap(per_cpu_int_res.start, | ||
447 | resource_size(&per_cpu_int_res)); | ||
257 | BUG_ON(!per_cpu_int_base); | 448 | BUG_ON(!per_cpu_int_base); |
258 | 449 | ||
259 | control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); | 450 | control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); |
@@ -262,8 +453,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
262 | irq_domain_add_linear(node, (control >> 2) & 0x3ff, | 453 | irq_domain_add_linear(node, (control >> 2) & 0x3ff, |
263 | &armada_370_xp_mpic_irq_ops, NULL); | 454 | &armada_370_xp_mpic_irq_ops, NULL); |
264 | 455 | ||
265 | if (!armada_370_xp_mpic_domain) | 456 | BUG_ON(!armada_370_xp_mpic_domain); |
266 | panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n"); | ||
267 | 457 | ||
268 | irq_set_default_host(armada_370_xp_mpic_domain); | 458 | irq_set_default_host(armada_370_xp_mpic_domain); |
269 | 459 | ||
@@ -280,6 +470,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
280 | 470 | ||
281 | #endif | 471 | #endif |
282 | 472 | ||
473 | armada_370_xp_msi_init(node, main_int_res.start); | ||
474 | |||
283 | set_handle_irq(armada_370_xp_handle_irq); | 475 | set_handle_irq(armada_370_xp_handle_irq); |
284 | 476 | ||
285 | return 0; | 477 | return 0; |
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 3d9504811126..43186feb4294 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig | |||
@@ -3,7 +3,7 @@ menu "PCI host controller drivers" | |||
3 | 3 | ||
4 | config PCI_MVEBU | 4 | config PCI_MVEBU |
5 | bool "Marvell EBU PCIe controller" | 5 | bool "Marvell EBU PCIe controller" |
6 | depends on ARCH_MVEBU || ARCH_KIRKWOOD | 6 | depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD |
7 | depends on OF | 7 | depends on OF |
8 | 8 | ||
9 | config PCIE_DW | 9 | config PCIE_DW |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 729d5a101d62..80b2250ea19a 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -9,13 +9,17 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
11 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
12 | #include <linux/delay.h> | ||
13 | #include <linux/gpio.h> | ||
12 | #include <linux/module.h> | 14 | #include <linux/module.h> |
13 | #include <linux/mbus.h> | 15 | #include <linux/mbus.h> |
16 | #include <linux/msi.h> | ||
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
15 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
16 | #include <linux/of_address.h> | 19 | #include <linux/of_address.h> |
17 | #include <linux/of_pci.h> | ||
18 | #include <linux/of_irq.h> | 20 | #include <linux/of_irq.h> |
21 | #include <linux/of_gpio.h> | ||
22 | #include <linux/of_pci.h> | ||
19 | #include <linux/of_platform.h> | 23 | #include <linux/of_platform.h> |
20 | 24 | ||
21 | /* | 25 | /* |
@@ -103,6 +107,7 @@ struct mvebu_pcie_port; | |||
103 | struct mvebu_pcie { | 107 | struct mvebu_pcie { |
104 | struct platform_device *pdev; | 108 | struct platform_device *pdev; |
105 | struct mvebu_pcie_port *ports; | 109 | struct mvebu_pcie_port *ports; |
110 | struct msi_chip *msi; | ||
106 | struct resource io; | 111 | struct resource io; |
107 | struct resource realio; | 112 | struct resource realio; |
108 | struct resource mem; | 113 | struct resource mem; |
@@ -115,7 +120,6 @@ struct mvebu_pcie_port { | |||
115 | char *name; | 120 | char *name; |
116 | void __iomem *base; | 121 | void __iomem *base; |
117 | spinlock_t conf_lock; | 122 | spinlock_t conf_lock; |
118 | int haslink; | ||
119 | u32 port; | 123 | u32 port; |
120 | u32 lane; | 124 | u32 lane; |
121 | int devfn; | 125 | int devfn; |
@@ -124,6 +128,9 @@ struct mvebu_pcie_port { | |||
124 | unsigned int io_target; | 128 | unsigned int io_target; |
125 | unsigned int io_attr; | 129 | unsigned int io_attr; |
126 | struct clk *clk; | 130 | struct clk *clk; |
131 | int reset_gpio; | ||
132 | int reset_active_low; | ||
133 | char *reset_name; | ||
127 | struct mvebu_sw_pci_bridge bridge; | 134 | struct mvebu_sw_pci_bridge bridge; |
128 | struct device_node *dn; | 135 | struct device_node *dn; |
129 | struct mvebu_pcie *pcie; | 136 | struct mvebu_pcie *pcie; |
@@ -133,29 +140,39 @@ struct mvebu_pcie_port { | |||
133 | size_t iowin_size; | 140 | size_t iowin_size; |
134 | }; | 141 | }; |
135 | 142 | ||
143 | static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) | ||
144 | { | ||
145 | writel(val, port->base + reg); | ||
146 | } | ||
147 | |||
148 | static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) | ||
149 | { | ||
150 | return readl(port->base + reg); | ||
151 | } | ||
152 | |||
136 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) | 153 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) |
137 | { | 154 | { |
138 | return !(readl(port->base + PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); | 155 | return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); |
139 | } | 156 | } |
140 | 157 | ||
141 | static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) | 158 | static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) |
142 | { | 159 | { |
143 | u32 stat; | 160 | u32 stat; |
144 | 161 | ||
145 | stat = readl(port->base + PCIE_STAT_OFF); | 162 | stat = mvebu_readl(port, PCIE_STAT_OFF); |
146 | stat &= ~PCIE_STAT_BUS; | 163 | stat &= ~PCIE_STAT_BUS; |
147 | stat |= nr << 8; | 164 | stat |= nr << 8; |
148 | writel(stat, port->base + PCIE_STAT_OFF); | 165 | mvebu_writel(port, stat, PCIE_STAT_OFF); |
149 | } | 166 | } |
150 | 167 | ||
151 | static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) | 168 | static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) |
152 | { | 169 | { |
153 | u32 stat; | 170 | u32 stat; |
154 | 171 | ||
155 | stat = readl(port->base + PCIE_STAT_OFF); | 172 | stat = mvebu_readl(port, PCIE_STAT_OFF); |
156 | stat &= ~PCIE_STAT_DEV; | 173 | stat &= ~PCIE_STAT_DEV; |
157 | stat |= nr << 16; | 174 | stat |= nr << 16; |
158 | writel(stat, port->base + PCIE_STAT_OFF); | 175 | mvebu_writel(port, stat, PCIE_STAT_OFF); |
159 | } | 176 | } |
160 | 177 | ||
161 | /* | 178 | /* |
@@ -163,7 +180,7 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) | |||
163 | * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks | 180 | * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks |
164 | * WIN[0-3] -> DRAM bank[0-3] | 181 | * WIN[0-3] -> DRAM bank[0-3] |
165 | */ | 182 | */ |
166 | static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) | 183 | static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) |
167 | { | 184 | { |
168 | const struct mbus_dram_target_info *dram; | 185 | const struct mbus_dram_target_info *dram; |
169 | u32 size; | 186 | u32 size; |
@@ -173,33 +190,34 @@ static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) | |||
173 | 190 | ||
174 | /* First, disable and clear BARs and windows. */ | 191 | /* First, disable and clear BARs and windows. */ |
175 | for (i = 1; i < 3; i++) { | 192 | for (i = 1; i < 3; i++) { |
176 | writel(0, port->base + PCIE_BAR_CTRL_OFF(i)); | 193 | mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); |
177 | writel(0, port->base + PCIE_BAR_LO_OFF(i)); | 194 | mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); |
178 | writel(0, port->base + PCIE_BAR_HI_OFF(i)); | 195 | mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i)); |
179 | } | 196 | } |
180 | 197 | ||
181 | for (i = 0; i < 5; i++) { | 198 | for (i = 0; i < 5; i++) { |
182 | writel(0, port->base + PCIE_WIN04_CTRL_OFF(i)); | 199 | mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i)); |
183 | writel(0, port->base + PCIE_WIN04_BASE_OFF(i)); | 200 | mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i)); |
184 | writel(0, port->base + PCIE_WIN04_REMAP_OFF(i)); | 201 | mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); |
185 | } | 202 | } |
186 | 203 | ||
187 | writel(0, port->base + PCIE_WIN5_CTRL_OFF); | 204 | mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); |
188 | writel(0, port->base + PCIE_WIN5_BASE_OFF); | 205 | mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); |
189 | writel(0, port->base + PCIE_WIN5_REMAP_OFF); | 206 | mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); |
190 | 207 | ||
191 | /* Setup windows for DDR banks. Count total DDR size on the fly. */ | 208 | /* Setup windows for DDR banks. Count total DDR size on the fly. */ |
192 | size = 0; | 209 | size = 0; |
193 | for (i = 0; i < dram->num_cs; i++) { | 210 | for (i = 0; i < dram->num_cs; i++) { |
194 | const struct mbus_dram_window *cs = dram->cs + i; | 211 | const struct mbus_dram_window *cs = dram->cs + i; |
195 | 212 | ||
196 | writel(cs->base & 0xffff0000, | 213 | mvebu_writel(port, cs->base & 0xffff0000, |
197 | port->base + PCIE_WIN04_BASE_OFF(i)); | 214 | PCIE_WIN04_BASE_OFF(i)); |
198 | writel(0, port->base + PCIE_WIN04_REMAP_OFF(i)); | 215 | mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); |
199 | writel(((cs->size - 1) & 0xffff0000) | | 216 | mvebu_writel(port, |
200 | (cs->mbus_attr << 8) | | 217 | ((cs->size - 1) & 0xffff0000) | |
201 | (dram->mbus_dram_target_id << 4) | 1, | 218 | (cs->mbus_attr << 8) | |
202 | port->base + PCIE_WIN04_CTRL_OFF(i)); | 219 | (dram->mbus_dram_target_id << 4) | 1, |
220 | PCIE_WIN04_CTRL_OFF(i)); | ||
203 | 221 | ||
204 | size += cs->size; | 222 | size += cs->size; |
205 | } | 223 | } |
@@ -209,41 +227,40 @@ static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) | |||
209 | size = 1 << fls(size); | 227 | size = 1 << fls(size); |
210 | 228 | ||
211 | /* Setup BAR[1] to all DRAM banks. */ | 229 | /* Setup BAR[1] to all DRAM banks. */ |
212 | writel(dram->cs[0].base, port->base + PCIE_BAR_LO_OFF(1)); | 230 | mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); |
213 | writel(0, port->base + PCIE_BAR_HI_OFF(1)); | 231 | mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); |
214 | writel(((size - 1) & 0xffff0000) | 1, | 232 | mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, |
215 | port->base + PCIE_BAR_CTRL_OFF(1)); | 233 | PCIE_BAR_CTRL_OFF(1)); |
216 | } | 234 | } |
217 | 235 | ||
218 | static void __init mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) | 236 | static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) |
219 | { | 237 | { |
220 | u16 cmd; | 238 | u32 cmd, mask; |
221 | u32 mask; | ||
222 | 239 | ||
223 | /* Point PCIe unit MBUS decode windows to DRAM space. */ | 240 | /* Point PCIe unit MBUS decode windows to DRAM space. */ |
224 | mvebu_pcie_setup_wins(port); | 241 | mvebu_pcie_setup_wins(port); |
225 | 242 | ||
226 | /* Master + slave enable. */ | 243 | /* Master + slave enable. */ |
227 | cmd = readw(port->base + PCIE_CMD_OFF); | 244 | cmd = mvebu_readl(port, PCIE_CMD_OFF); |
228 | cmd |= PCI_COMMAND_IO; | 245 | cmd |= PCI_COMMAND_IO; |
229 | cmd |= PCI_COMMAND_MEMORY; | 246 | cmd |= PCI_COMMAND_MEMORY; |
230 | cmd |= PCI_COMMAND_MASTER; | 247 | cmd |= PCI_COMMAND_MASTER; |
231 | writew(cmd, port->base + PCIE_CMD_OFF); | 248 | mvebu_writel(port, cmd, PCIE_CMD_OFF); |
232 | 249 | ||
233 | /* Enable interrupt lines A-D. */ | 250 | /* Enable interrupt lines A-D. */ |
234 | mask = readl(port->base + PCIE_MASK_OFF); | 251 | mask = mvebu_readl(port, PCIE_MASK_OFF); |
235 | mask |= PCIE_MASK_ENABLE_INTS; | 252 | mask |= PCIE_MASK_ENABLE_INTS; |
236 | writel(mask, port->base + PCIE_MASK_OFF); | 253 | mvebu_writel(port, mask, PCIE_MASK_OFF); |
237 | } | 254 | } |
238 | 255 | ||
239 | static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, | 256 | static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, |
240 | struct pci_bus *bus, | 257 | struct pci_bus *bus, |
241 | u32 devfn, int where, int size, u32 *val) | 258 | u32 devfn, int where, int size, u32 *val) |
242 | { | 259 | { |
243 | writel(PCIE_CONF_ADDR(bus->number, devfn, where), | 260 | mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), |
244 | port->base + PCIE_CONF_ADDR_OFF); | 261 | PCIE_CONF_ADDR_OFF); |
245 | 262 | ||
246 | *val = readl(port->base + PCIE_CONF_DATA_OFF); | 263 | *val = mvebu_readl(port, PCIE_CONF_DATA_OFF); |
247 | 264 | ||
248 | if (size == 1) | 265 | if (size == 1) |
249 | *val = (*val >> (8 * (where & 3))) & 0xff; | 266 | *val = (*val >> (8 * (where & 3))) & 0xff; |
@@ -257,21 +274,24 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, | |||
257 | struct pci_bus *bus, | 274 | struct pci_bus *bus, |
258 | u32 devfn, int where, int size, u32 val) | 275 | u32 devfn, int where, int size, u32 val) |
259 | { | 276 | { |
260 | int ret = PCIBIOS_SUCCESSFUL; | 277 | u32 _val, shift = 8 * (where & 3); |
261 | 278 | ||
262 | writel(PCIE_CONF_ADDR(bus->number, devfn, where), | 279 | mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), |
263 | port->base + PCIE_CONF_ADDR_OFF); | 280 | PCIE_CONF_ADDR_OFF); |
281 | _val = mvebu_readl(port, PCIE_CONF_DATA_OFF); | ||
264 | 282 | ||
265 | if (size == 4) | 283 | if (size == 4) |
266 | writel(val, port->base + PCIE_CONF_DATA_OFF); | 284 | _val = val; |
267 | else if (size == 2) | 285 | else if (size == 2) |
268 | writew(val, port->base + PCIE_CONF_DATA_OFF + (where & 3)); | 286 | _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift); |
269 | else if (size == 1) | 287 | else if (size == 1) |
270 | writeb(val, port->base + PCIE_CONF_DATA_OFF + (where & 3)); | 288 | _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift); |
271 | else | 289 | else |
272 | ret = PCIBIOS_BAD_REGISTER_NUMBER; | 290 | return PCIBIOS_BAD_REGISTER_NUMBER; |
273 | 291 | ||
274 | return ret; | 292 | mvebu_writel(port, _val, PCIE_CONF_DATA_OFF); |
293 | |||
294 | return PCIBIOS_SUCCESSFUL; | ||
275 | } | 295 | } |
276 | 296 | ||
277 | static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | 297 | static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) |
@@ -552,7 +572,7 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | |||
552 | if (bus->number == 0) | 572 | if (bus->number == 0) |
553 | return mvebu_sw_pci_bridge_write(port, where, size, val); | 573 | return mvebu_sw_pci_bridge_write(port, where, size, val); |
554 | 574 | ||
555 | if (!port->haslink) | 575 | if (!mvebu_pcie_link_up(port)) |
556 | return PCIBIOS_DEVICE_NOT_FOUND; | 576 | return PCIBIOS_DEVICE_NOT_FOUND; |
557 | 577 | ||
558 | /* | 578 | /* |
@@ -594,7 +614,7 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | |||
594 | if (bus->number == 0) | 614 | if (bus->number == 0) |
595 | return mvebu_sw_pci_bridge_read(port, where, size, val); | 615 | return mvebu_sw_pci_bridge_read(port, where, size, val); |
596 | 616 | ||
597 | if (!port->haslink) { | 617 | if (!mvebu_pcie_link_up(port)) { |
598 | *val = 0xffffffff; | 618 | *val = 0xffffffff; |
599 | return PCIBIOS_DEVICE_NOT_FOUND; | 619 | return PCIBIOS_DEVICE_NOT_FOUND; |
600 | } | 620 | } |
@@ -626,7 +646,7 @@ static struct pci_ops mvebu_pcie_ops = { | |||
626 | .write = mvebu_pcie_wr_conf, | 646 | .write = mvebu_pcie_wr_conf, |
627 | }; | 647 | }; |
628 | 648 | ||
629 | static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys) | 649 | static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) |
630 | { | 650 | { |
631 | struct mvebu_pcie *pcie = sys_to_pcie(sys); | 651 | struct mvebu_pcie *pcie = sys_to_pcie(sys); |
632 | int i; | 652 | int i; |
@@ -645,7 +665,7 @@ static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys) | |||
645 | return 1; | 665 | return 1; |
646 | } | 666 | } |
647 | 667 | ||
648 | static int __init mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 668 | static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
649 | { | 669 | { |
650 | struct of_irq oirq; | 670 | struct of_irq oirq; |
651 | int ret; | 671 | int ret; |
@@ -673,11 +693,17 @@ static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys) | |||
673 | return bus; | 693 | return bus; |
674 | } | 694 | } |
675 | 695 | ||
676 | resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | 696 | static void mvebu_pcie_add_bus(struct pci_bus *bus) |
677 | const struct resource *res, | 697 | { |
678 | resource_size_t start, | 698 | struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); |
679 | resource_size_t size, | 699 | bus->msi = pcie->msi; |
680 | resource_size_t align) | 700 | } |
701 | |||
702 | static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | ||
703 | const struct resource *res, | ||
704 | resource_size_t start, | ||
705 | resource_size_t size, | ||
706 | resource_size_t align) | ||
681 | { | 707 | { |
682 | if (dev->bus->number != 0) | 708 | if (dev->bus->number != 0) |
683 | return start; | 709 | return start; |
@@ -696,7 +722,7 @@ resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | |||
696 | return start; | 722 | return start; |
697 | } | 723 | } |
698 | 724 | ||
699 | static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie) | 725 | static void mvebu_pcie_enable(struct mvebu_pcie *pcie) |
700 | { | 726 | { |
701 | struct hw_pci hw; | 727 | struct hw_pci hw; |
702 | 728 | ||
@@ -709,6 +735,7 @@ static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie) | |||
709 | hw.map_irq = mvebu_pcie_map_irq; | 735 | hw.map_irq = mvebu_pcie_map_irq; |
710 | hw.ops = &mvebu_pcie_ops; | 736 | hw.ops = &mvebu_pcie_ops; |
711 | hw.align_resource = mvebu_pcie_align_resource; | 737 | hw.align_resource = mvebu_pcie_align_resource; |
738 | hw.add_bus = mvebu_pcie_add_bus; | ||
712 | 739 | ||
713 | pci_common_init(&hw); | 740 | pci_common_init(&hw); |
714 | } | 741 | } |
@@ -718,10 +745,8 @@ static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie) | |||
718 | * <...> property for one that matches the given port/lane. Once | 745 | * <...> property for one that matches the given port/lane. Once |
719 | * found, maps it. | 746 | * found, maps it. |
720 | */ | 747 | */ |
721 | static void __iomem * __init | 748 | static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, |
722 | mvebu_pcie_map_registers(struct platform_device *pdev, | 749 | struct device_node *np, struct mvebu_pcie_port *port) |
723 | struct device_node *np, | ||
724 | struct mvebu_pcie_port *port) | ||
725 | { | 750 | { |
726 | struct resource regs; | 751 | struct resource regs; |
727 | int ret = 0; | 752 | int ret = 0; |
@@ -777,7 +802,22 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn, | |||
777 | return -ENOENT; | 802 | return -ENOENT; |
778 | } | 803 | } |
779 | 804 | ||
780 | static int __init mvebu_pcie_probe(struct platform_device *pdev) | 805 | static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie) |
806 | { | ||
807 | struct device_node *msi_node; | ||
808 | |||
809 | msi_node = of_parse_phandle(pcie->pdev->dev.of_node, | ||
810 | "msi-parent", 0); | ||
811 | if (!msi_node) | ||
812 | return; | ||
813 | |||
814 | pcie->msi = of_pci_find_msi_chip_by_node(msi_node); | ||
815 | |||
816 | if (pcie->msi) | ||
817 | pcie->msi->dev = &pcie->pdev->dev; | ||
818 | } | ||
819 | |||
820 | static int mvebu_pcie_probe(struct platform_device *pdev) | ||
781 | { | 821 | { |
782 | struct mvebu_pcie *pcie; | 822 | struct mvebu_pcie *pcie; |
783 | struct device_node *np = pdev->dev.of_node; | 823 | struct device_node *np = pdev->dev.of_node; |
@@ -790,6 +830,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
790 | return -ENOMEM; | 830 | return -ENOMEM; |
791 | 831 | ||
792 | pcie->pdev = pdev; | 832 | pcie->pdev = pdev; |
833 | platform_set_drvdata(pdev, pcie); | ||
793 | 834 | ||
794 | /* Get the PCIe memory and I/O aperture */ | 835 | /* Get the PCIe memory and I/O aperture */ |
795 | mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); | 836 | mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); |
@@ -818,13 +859,14 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
818 | return ret; | 859 | return ret; |
819 | } | 860 | } |
820 | 861 | ||
862 | i = 0; | ||
821 | for_each_child_of_node(pdev->dev.of_node, child) { | 863 | for_each_child_of_node(pdev->dev.of_node, child) { |
822 | if (!of_device_is_available(child)) | 864 | if (!of_device_is_available(child)) |
823 | continue; | 865 | continue; |
824 | pcie->nports++; | 866 | i++; |
825 | } | 867 | } |
826 | 868 | ||
827 | pcie->ports = devm_kzalloc(&pdev->dev, pcie->nports * | 869 | pcie->ports = devm_kzalloc(&pdev->dev, i * |
828 | sizeof(struct mvebu_pcie_port), | 870 | sizeof(struct mvebu_pcie_port), |
829 | GFP_KERNEL); | 871 | GFP_KERNEL); |
830 | if (!pcie->ports) | 872 | if (!pcie->ports) |
@@ -833,6 +875,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
833 | i = 0; | 875 | i = 0; |
834 | for_each_child_of_node(pdev->dev.of_node, child) { | 876 | for_each_child_of_node(pdev->dev.of_node, child) { |
835 | struct mvebu_pcie_port *port = &pcie->ports[i]; | 877 | struct mvebu_pcie_port *port = &pcie->ports[i]; |
878 | enum of_gpio_flags flags; | ||
836 | 879 | ||
837 | if (!of_device_is_available(child)) | 880 | if (!of_device_is_available(child)) |
838 | continue; | 881 | continue; |
@@ -873,45 +916,68 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
873 | continue; | 916 | continue; |
874 | } | 917 | } |
875 | 918 | ||
919 | port->reset_gpio = of_get_named_gpio_flags(child, | ||
920 | "reset-gpios", 0, &flags); | ||
921 | if (gpio_is_valid(port->reset_gpio)) { | ||
922 | u32 reset_udelay = 20000; | ||
923 | |||
924 | port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW; | ||
925 | port->reset_name = kasprintf(GFP_KERNEL, | ||
926 | "pcie%d.%d-reset", port->port, port->lane); | ||
927 | of_property_read_u32(child, "reset-delay-us", | ||
928 | &reset_udelay); | ||
929 | |||
930 | ret = devm_gpio_request_one(&pdev->dev, | ||
931 | port->reset_gpio, GPIOF_DIR_OUT, port->reset_name); | ||
932 | if (ret) { | ||
933 | if (ret == -EPROBE_DEFER) | ||
934 | return ret; | ||
935 | continue; | ||
936 | } | ||
937 | |||
938 | gpio_set_value(port->reset_gpio, | ||
939 | (port->reset_active_low) ? 1 : 0); | ||
940 | msleep(reset_udelay/1000); | ||
941 | } | ||
942 | |||
943 | port->clk = of_clk_get_by_name(child, NULL); | ||
944 | if (IS_ERR(port->clk)) { | ||
945 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", | ||
946 | port->port, port->lane); | ||
947 | continue; | ||
948 | } | ||
949 | |||
950 | ret = clk_prepare_enable(port->clk); | ||
951 | if (ret) | ||
952 | continue; | ||
953 | |||
876 | port->base = mvebu_pcie_map_registers(pdev, child, port); | 954 | port->base = mvebu_pcie_map_registers(pdev, child, port); |
877 | if (IS_ERR(port->base)) { | 955 | if (IS_ERR(port->base)) { |
878 | dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n", | 956 | dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n", |
879 | port->port, port->lane); | 957 | port->port, port->lane); |
880 | port->base = NULL; | 958 | port->base = NULL; |
959 | clk_disable_unprepare(port->clk); | ||
881 | continue; | 960 | continue; |
882 | } | 961 | } |
883 | 962 | ||
884 | mvebu_pcie_set_local_dev_nr(port, 1); | 963 | mvebu_pcie_set_local_dev_nr(port, 1); |
885 | 964 | ||
886 | if (mvebu_pcie_link_up(port)) { | ||
887 | port->haslink = 1; | ||
888 | dev_info(&pdev->dev, "PCIe%d.%d: link up\n", | ||
889 | port->port, port->lane); | ||
890 | } else { | ||
891 | port->haslink = 0; | ||
892 | dev_info(&pdev->dev, "PCIe%d.%d: link down\n", | ||
893 | port->port, port->lane); | ||
894 | } | ||
895 | |||
896 | port->clk = of_clk_get_by_name(child, NULL); | 965 | port->clk = of_clk_get_by_name(child, NULL); |
897 | if (IS_ERR(port->clk)) { | 966 | if (IS_ERR(port->clk)) { |
898 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", | 967 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", |
899 | port->port, port->lane); | 968 | port->port, port->lane); |
900 | iounmap(port->base); | 969 | iounmap(port->base); |
901 | port->haslink = 0; | ||
902 | continue; | 970 | continue; |
903 | } | 971 | } |
904 | 972 | ||
905 | port->dn = child; | 973 | port->dn = child; |
906 | |||
907 | clk_prepare_enable(port->clk); | ||
908 | spin_lock_init(&port->conf_lock); | 974 | spin_lock_init(&port->conf_lock); |
909 | |||
910 | mvebu_sw_pci_bridge_init(port); | 975 | mvebu_sw_pci_bridge_init(port); |
911 | |||
912 | i++; | 976 | i++; |
913 | } | 977 | } |
914 | 978 | ||
979 | pcie->nports = i; | ||
980 | mvebu_pcie_msi_enable(pcie); | ||
915 | mvebu_pcie_enable(pcie); | 981 | mvebu_pcie_enable(pcie); |
916 | 982 | ||
917 | return 0; | 983 | return 0; |
@@ -920,6 +986,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
920 | static const struct of_device_id mvebu_pcie_of_match_table[] = { | 986 | static const struct of_device_id mvebu_pcie_of_match_table[] = { |
921 | { .compatible = "marvell,armada-xp-pcie", }, | 987 | { .compatible = "marvell,armada-xp-pcie", }, |
922 | { .compatible = "marvell,armada-370-pcie", }, | 988 | { .compatible = "marvell,armada-370-pcie", }, |
989 | { .compatible = "marvell,dove-pcie", }, | ||
923 | { .compatible = "marvell,kirkwood-pcie", }, | 990 | { .compatible = "marvell,kirkwood-pcie", }, |
924 | {}, | 991 | {}, |
925 | }; | 992 | }; |
@@ -931,16 +998,12 @@ static struct platform_driver mvebu_pcie_driver = { | |||
931 | .name = "mvebu-pcie", | 998 | .name = "mvebu-pcie", |
932 | .of_match_table = | 999 | .of_match_table = |
933 | of_match_ptr(mvebu_pcie_of_match_table), | 1000 | of_match_ptr(mvebu_pcie_of_match_table), |
1001 | /* driver unloading/unbinding currently not supported */ | ||
1002 | .suppress_bind_attrs = true, | ||
934 | }, | 1003 | }, |
1004 | .probe = mvebu_pcie_probe, | ||
935 | }; | 1005 | }; |
936 | 1006 | module_platform_driver(mvebu_pcie_driver); | |
937 | static int __init mvebu_pcie_init(void) | ||
938 | { | ||
939 | return platform_driver_probe(&mvebu_pcie_driver, | ||
940 | mvebu_pcie_probe); | ||
941 | } | ||
942 | |||
943 | subsys_initcall(mvebu_pcie_init); | ||
944 | 1007 | ||
945 | MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); | 1008 | MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); |
946 | MODULE_DESCRIPTION("Marvell EBU PCIe driver"); | 1009 | MODULE_DESCRIPTION("Marvell EBU PCIe driver"); |
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h new file mode 100644 index 000000000000..89ba1b0c90e4 --- /dev/null +++ b/include/linux/platform_data/dma-s3c24xx.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * S3C24XX DMA handling | ||
3 | * | ||
4 | * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the Free | ||
8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
9 | * any later version. | ||
10 | */ | ||
11 | |||
12 | /* Helper to encode the source selection constraints for early s3c socs. */ | ||
13 | #define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4) | ||
14 | |||
15 | enum s3c24xx_dma_bus { | ||
16 | S3C24XX_DMA_APB, | ||
17 | S3C24XX_DMA_AHB, | ||
18 | }; | ||
19 | |||
20 | /** | ||
21 | * @bus: on which bus does the peripheral reside - AHB or APB. | ||
22 | * @handshake: is a handshake with the peripheral necessary | ||
23 | * @chansel: channel selection information, depending on variant; reqsel for | ||
24 | * s3c2443 and later and channel-selection map for earlier SoCs | ||
25 | * see CHANSEL doc in s3c2443-dma.c | ||
26 | */ | ||
27 | struct s3c24xx_dma_channel { | ||
28 | enum s3c24xx_dma_bus bus; | ||
29 | bool handshake; | ||
30 | u16 chansel; | ||
31 | }; | ||
32 | |||
33 | /** | ||
34 | * struct s3c24xx_dma_platdata - platform specific settings | ||
35 | * @num_phy_channels: number of physical channels | ||
36 | * @channels: array of virtual channel descriptions | ||
37 | * @num_channels: number of virtual channels | ||
38 | */ | ||
39 | struct s3c24xx_dma_platdata { | ||
40 | int num_phy_channels; | ||
41 | struct s3c24xx_dma_channel *channels; | ||
42 | int num_channels; | ||
43 | }; | ||
44 | |||
45 | struct dma_chan; | ||
46 | bool s3c24xx_dma_filter(struct dma_chan *chan, void *param); | ||
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h new file mode 100644 index 000000000000..6efd20264585 --- /dev/null +++ b/include/linux/platform_data/gpio-davinci.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * DaVinci GPIO Platform Related Defines | ||
3 | * | ||
4 | * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation version 2. | ||
9 | * | ||
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
11 | * kind, whether express or implied; without even the implied warranty | ||
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef __DAVINCI_GPIO_PLATFORM_H | ||
17 | #define __DAVINCI_GPIO_PLATFORM_H | ||
18 | |||
19 | #include <linux/io.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | |||
22 | #include <asm-generic/gpio.h> | ||
23 | |||
24 | enum davinci_gpio_type { | ||
25 | GPIO_TYPE_TNETV107X = 0, | ||
26 | }; | ||
27 | |||
28 | struct davinci_gpio_platform_data { | ||
29 | u32 ngpio; | ||
30 | u32 gpio_unbanked; | ||
31 | u32 intc_irq_num; | ||
32 | }; | ||
33 | |||
34 | |||
35 | struct davinci_gpio_controller { | ||
36 | struct gpio_chip chip; | ||
37 | int irq_base; | ||
38 | /* Serialize access to GPIO registers */ | ||
39 | spinlock_t lock; | ||
40 | void __iomem *regs; | ||
41 | void __iomem *set_data; | ||
42 | void __iomem *clr_data; | ||
43 | void __iomem *in_data; | ||
44 | int gpio_unbanked; | ||
45 | unsigned gpio_irq; | ||
46 | }; | ||
47 | |||
48 | /* | ||
49 | * basic gpio routines | ||
50 | */ | ||
51 | #define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */ | ||
52 | |||
53 | /* Convert GPIO signal to GPIO pin number */ | ||
54 | #define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio)) | ||
55 | |||
56 | static inline u32 __gpio_mask(unsigned gpio) | ||
57 | { | ||
58 | return 1 << (gpio % 32); | ||
59 | } | ||
60 | #endif | ||
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig index 2eea1840315d..37459dfd168d 100644 --- a/sound/soc/samsung/Kconfig +++ b/sound/soc/samsung/Kconfig | |||
@@ -2,7 +2,7 @@ config SND_SOC_SAMSUNG | |||
2 | tristate "ASoC support for Samsung" | 2 | tristate "ASoC support for Samsung" |
3 | depends on PLAT_SAMSUNG | 3 | depends on PLAT_SAMSUNG |
4 | select S3C64XX_DMA if ARCH_S3C64XX | 4 | select S3C64XX_DMA if ARCH_S3C64XX |
5 | select S3C2410_DMA if ARCH_S3C24XX | 5 | select S3C24XX_DMA if ARCH_S3C24XX |
6 | help | 6 | help |
7 | Say Y or M if you want to add support for codecs attached to | 7 | Say Y or M if you want to add support for codecs attached to |
8 | the Samsung SoCs' Audio interfaces. You will also need to | 8 | the Samsung SoCs' Audio interfaces. You will also need to |