diff options
58 files changed, 2605 insertions, 1062 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-brcmstb-gisb-arb b/Documentation/ABI/testing/sysfs-platform-brcmstb-gisb-arb new file mode 100644 index 000000000000..f1bad92bbe27 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-brcmstb-gisb-arb | |||
@@ -0,0 +1,8 @@ | |||
1 | What: /sys/devices/../../gisb_arb_timeout | ||
2 | Date: May 2014 | ||
3 | KernelVersion: 3.17 | ||
4 | Contact: Florian Fainelli <f.fainelli@gmail.com> | ||
5 | Description: | ||
6 | Returns the currently configured raw timeout value of the | ||
7 | Broadcom Set Top Box internal GISB bus arbiter. Minimum value | ||
8 | is 1, and maximum value is 0xffffffff. | ||
diff --git a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt index c0105de55cbd..974624ea68f6 100644 --- a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt +++ b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt | |||
@@ -6,6 +6,8 @@ provided by Arteris. | |||
6 | Required properties: | 6 | Required properties: |
7 | - compatible : Should be "ti,omap3-l3-smx" for OMAP3 family | 7 | - compatible : Should be "ti,omap3-l3-smx" for OMAP3 family |
8 | Should be "ti,omap4-l3-noc" for OMAP4 family | 8 | Should be "ti,omap4-l3-noc" for OMAP4 family |
9 | Should be "ti,dra7-l3-noc" for DRA7 family | ||
10 | Should be "ti,am4372-l3-noc" for AM43 family | ||
9 | - reg: Contains L3 register address range for each noc domain. | 11 | - reg: Contains L3 register address range for each noc domain. |
10 | - ti,hwmods: "l3_main_1", ... One hwmod for each noc domain. | 12 | - ti,hwmods: "l3_main_1", ... One hwmod for each noc domain. |
11 | 13 | ||
diff --git a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt new file mode 100644 index 000000000000..e2d501d20c9a --- /dev/null +++ b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt | |||
@@ -0,0 +1,30 @@ | |||
1 | Broadcom GISB bus Arbiter controller | ||
2 | |||
3 | Required properties: | ||
4 | |||
5 | - compatible: should be "brcm,gisb-arb" | ||
6 | - reg: specifies the base physical address and size of the registers | ||
7 | - interrupt-parent: specifies the phandle to the parent interrupt controller | ||
8 | this arbiter gets interrupt line from | ||
9 | - interrupts: specifies the two interrupts (timeout and TEA) to be used from | ||
10 | the parent interrupt controller | ||
11 | |||
12 | Optional properties: | ||
13 | |||
14 | - brcm,gisb-arb-master-mask: 32-bits wide bitmask used to specify which GISB | ||
15 | masters are valid at the system level | ||
16 | - brcm,gisb-arb-master-names: string list of the litteral name of the GISB | ||
17 | masters. Should match the number of bits set in brcm,gisb-master-mask and | ||
18 | the order in which they appear | ||
19 | |||
20 | Example: | ||
21 | |||
22 | gisb-arb@f0400000 { | ||
23 | compatible = "brcm,gisb-arb"; | ||
24 | reg = <0xf0400000 0x800>; | ||
25 | interrupts = <0>, <2>; | ||
26 | interrupt-parent = <&sun_l2_intc>; | ||
27 | |||
28 | brcm,gisb-arb-master-mask = <0x7>; | ||
29 | brcm,gisb-arb-master-names = "bsp_0", "scpu_0", "cpu_0"; | ||
30 | }; | ||
diff --git a/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt b/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt new file mode 100644 index 000000000000..3e6a81e99804 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/ti-keystone-pllctrl.txt | |||
@@ -0,0 +1,20 @@ | |||
1 | * Device tree bindings for Texas Instruments keystone pll controller | ||
2 | |||
3 | The main pll controller used to drive theC66x CorePacs, the switch fabric, | ||
4 | and a majority of the peripheral clocks (all but the ARM CorePacs, DDR3 and | ||
5 | the NETCP modules) requires a PLL Controller to manage the various clock | ||
6 | divisions, gating, and synchronization. | ||
7 | |||
8 | Required properties: | ||
9 | |||
10 | - compatible: "ti,keystone-pllctrl", "syscon" | ||
11 | |||
12 | - reg: contains offset/length value for pll controller | ||
13 | registers space. | ||
14 | |||
15 | Example: | ||
16 | |||
17 | pllctrl: pll-controller@0x02310000 { | ||
18 | compatible = "ti,keystone-pllctrl", "syscon"; | ||
19 | reg = <0x02310000 0x200>; | ||
20 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index 68ff2137bae7..5ba525a10035 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt | |||
@@ -2,11 +2,8 @@ TI EDMA | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : "ti,edma3" | 4 | - compatible : "ti,edma3" |
5 | - ti,edma-regions: Number of regions | ||
6 | - ti,edma-slots: Number of slots | ||
7 | - #dma-cells: Should be set to <1> | 5 | - #dma-cells: Should be set to <1> |
8 | Clients should use a single channel number per DMA request. | 6 | Clients should use a single channel number per DMA request. |
9 | - dma-channels: Specify total DMA channels per CC | ||
10 | - reg: Memory map for accessing module | 7 | - reg: Memory map for accessing module |
11 | - interrupt-parent: Interrupt controller the interrupt is routed through | 8 | - interrupt-parent: Interrupt controller the interrupt is routed through |
12 | - interrupts: Exactly 3 interrupts need to be specified in the order: | 9 | - interrupts: Exactly 3 interrupts need to be specified in the order: |
@@ -17,6 +14,13 @@ Optional properties: | |||
17 | - ti,hwmods: Name of the hwmods associated to the EDMA | 14 | - ti,hwmods: Name of the hwmods associated to the EDMA |
18 | - ti,edma-xbar-event-map: Crossbar event to channel map | 15 | - ti,edma-xbar-event-map: Crossbar event to channel map |
19 | 16 | ||
17 | Deprecated properties: | ||
18 | Listed here in case one wants to boot an old kernel with new DTB. These | ||
19 | properties might need to be added to the new DTS files. | ||
20 | - ti,edma-regions: Number of regions | ||
21 | - ti,edma-slots: Number of slots | ||
22 | - dma-channels: Specify total DMA channels per CC | ||
23 | |||
20 | Example: | 24 | Example: |
21 | 25 | ||
22 | edma: edma@49000000 { | 26 | edma: edma@49000000 { |
@@ -26,9 +30,6 @@ edma: edma@49000000 { | |||
26 | compatible = "ti,edma3"; | 30 | compatible = "ti,edma3"; |
27 | ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; | 31 | ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; |
28 | #dma-cells = <1>; | 32 | #dma-cells = <1>; |
29 | dma-channels = <64>; | ||
30 | ti,edma-regions = <4>; | ||
31 | ti,edma-slots = <256>; | ||
32 | ti,edma-xbar-event-map = /bits/ 16 <1 12 | 33 | ti,edma-xbar-event-map = /bits/ 16 <1 12 |
33 | 2 13>; | 34 | 2 13>; |
34 | }; | 35 | }; |
diff --git a/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt b/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt index 653c90c34a71..1ee3bc09f319 100644 --- a/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt +++ b/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt | |||
@@ -6,10 +6,11 @@ The actual devices are instantiated from the child nodes of a Device Bus node. | |||
6 | 6 | ||
7 | Required properties: | 7 | Required properties: |
8 | 8 | ||
9 | - compatible: Currently only Armada 370/XP SoC are supported, | 9 | - compatible: Armada 370/XP SoC are supported using the |
10 | with this compatible string: | 10 | "marvell,mvebu-devbus" compatible string. |
11 | 11 | ||
12 | marvell,mvebu-devbus | 12 | Orion5x SoC are supported using the |
13 | "marvell,orion-devbus" compatible string. | ||
13 | 14 | ||
14 | - reg: A resource specifier for the register space. | 15 | - reg: A resource specifier for the register space. |
15 | This is the base address of a chip select within | 16 | This is the base address of a chip select within |
@@ -22,7 +23,14 @@ Required properties: | |||
22 | integer values for each chip-select line in use: | 23 | integer values for each chip-select line in use: |
23 | 0 <physical address of mapping> <size> | 24 | 0 <physical address of mapping> <size> |
24 | 25 | ||
25 | Mandatory timing properties for child nodes: | 26 | Optional properties: |
27 | |||
28 | - devbus,keep-config This property can optionally be used to keep | ||
29 | using the timing parameters set by the | ||
30 | bootloader. It makes all the timing properties | ||
31 | described below unused. | ||
32 | |||
33 | Timing properties for child nodes: | ||
26 | 34 | ||
27 | Read parameters: | 35 | Read parameters: |
28 | 36 | ||
@@ -30,21 +38,26 @@ Read parameters: | |||
30 | drive the AD bus after the completion of a device read. | 38 | drive the AD bus after the completion of a device read. |
31 | This prevents contentions on the Device Bus after a read | 39 | This prevents contentions on the Device Bus after a read |
32 | cycle from a slow device. | 40 | cycle from a slow device. |
41 | Mandatory, except if devbus,keep-config is used. | ||
33 | 42 | ||
34 | - devbus,bus-width: Defines the bus width (e.g. <16>) | 43 | - devbus,bus-width: Defines the bus width, in bits (e.g. <16>). |
44 | Mandatory, except if devbus,keep-config is used. | ||
35 | 45 | ||
36 | - devbus,badr-skew-ps: Defines the time delay from from A[2:0] toggle, | 46 | - devbus,badr-skew-ps: Defines the time delay from from A[2:0] toggle, |
37 | to read data sample. This parameter is useful for | 47 | to read data sample. This parameter is useful for |
38 | synchronous pipelined devices, where the address | 48 | synchronous pipelined devices, where the address |
39 | precedes the read data by one or two cycles. | 49 | precedes the read data by one or two cycles. |
50 | Mandatory, except if devbus,keep-config is used. | ||
40 | 51 | ||
41 | - devbus,acc-first-ps: Defines the time delay from the negation of | 52 | - devbus,acc-first-ps: Defines the time delay from the negation of |
42 | ALE[0] to the cycle that the first read data is sampled | 53 | ALE[0] to the cycle that the first read data is sampled |
43 | by the controller. | 54 | by the controller. |
55 | Mandatory, except if devbus,keep-config is used. | ||
44 | 56 | ||
45 | - devbus,acc-next-ps: Defines the time delay between the cycle that | 57 | - devbus,acc-next-ps: Defines the time delay between the cycle that |
46 | samples data N and the cycle that samples data N+1 | 58 | samples data N and the cycle that samples data N+1 |
47 | (in burst accesses). | 59 | (in burst accesses). |
60 | Mandatory, except if devbus,keep-config is used. | ||
48 | 61 | ||
49 | - devbus,rd-setup-ps: Defines the time delay between DEV_CSn assertion to | 62 | - devbus,rd-setup-ps: Defines the time delay between DEV_CSn assertion to |
50 | DEV_OEn assertion. If set to 0 (default), | 63 | DEV_OEn assertion. If set to 0 (default), |
@@ -52,6 +65,8 @@ Read parameters: | |||
52 | This parameter has no affect on <acc-first-ps> parameter | 65 | This parameter has no affect on <acc-first-ps> parameter |
53 | (no affect on first data sample). Set <rd-setup-ps> | 66 | (no affect on first data sample). Set <rd-setup-ps> |
54 | to a value smaller than <acc-first-ps>. | 67 | to a value smaller than <acc-first-ps>. |
68 | Mandatory for "marvell,mvebu-devbus" compatible string, | ||
69 | except if devbus,keep-config is used. | ||
55 | 70 | ||
56 | - devbus,rd-hold-ps: Defines the time between the last data sample to the | 71 | - devbus,rd-hold-ps: Defines the time between the last data sample to the |
57 | de-assertion of DEV_CSn. If set to 0 (default), | 72 | de-assertion of DEV_CSn. If set to 0 (default), |
@@ -62,16 +77,20 @@ Read parameters: | |||
62 | last data sampled. Also this parameter has no | 77 | last data sampled. Also this parameter has no |
63 | affect on <turn-off-ps> parameter. | 78 | affect on <turn-off-ps> parameter. |
64 | Set <rd-hold-ps> to a value smaller than <turn-off-ps>. | 79 | Set <rd-hold-ps> to a value smaller than <turn-off-ps>. |
80 | Mandatory for "marvell,mvebu-devbus" compatible string, | ||
81 | except if devbus,keep-config is used. | ||
65 | 82 | ||
66 | Write parameters: | 83 | Write parameters: |
67 | 84 | ||
68 | - devbus,ale-wr-ps: Defines the time delay from the ALE[0] negation cycle | 85 | - devbus,ale-wr-ps: Defines the time delay from the ALE[0] negation cycle |
69 | to the DEV_WEn assertion. | 86 | to the DEV_WEn assertion. |
87 | Mandatory. | ||
70 | 88 | ||
71 | - devbus,wr-low-ps: Defines the time during which DEV_WEn is active. | 89 | - devbus,wr-low-ps: Defines the time during which DEV_WEn is active. |
72 | A[2:0] and Data are kept valid as long as DEV_WEn | 90 | A[2:0] and Data are kept valid as long as DEV_WEn |
73 | is active. This parameter defines the setup time of | 91 | is active. This parameter defines the setup time of |
74 | address and data to DEV_WEn rise. | 92 | address and data to DEV_WEn rise. |
93 | Mandatory. | ||
75 | 94 | ||
76 | - devbus,wr-high-ps: Defines the time during which DEV_WEn is kept | 95 | - devbus,wr-high-ps: Defines the time during which DEV_WEn is kept |
77 | inactive (high) between data beats of a burst write. | 96 | inactive (high) between data beats of a burst write. |
@@ -79,10 +98,13 @@ Write parameters: | |||
79 | <wr-high-ps> - <tick> ps. | 98 | <wr-high-ps> - <tick> ps. |
80 | This parameter defines the hold time of address and | 99 | This parameter defines the hold time of address and |
81 | data after DEV_WEn rise. | 100 | data after DEV_WEn rise. |
101 | Mandatory. | ||
82 | 102 | ||
83 | - devbus,sync-enable: Synchronous device enable. | 103 | - devbus,sync-enable: Synchronous device enable. |
84 | 1: True | 104 | 1: True |
85 | 0: False | 105 | 0: False |
106 | Mandatory for "marvell,mvebu-devbus" compatible string, | ||
107 | except if devbus,keep-config is used. | ||
86 | 108 | ||
87 | An example for an Armada XP GP board, with a 16 MiB NOR device as child | 109 | An example for an Armada XP GP board, with a 16 MiB NOR device as child |
88 | is showed below. Note that the Device Bus driver is in charge of allocating | 110 | is showed below. Note that the Device Bus driver is in charge of allocating |
diff --git a/Documentation/devicetree/bindings/power/reset/keystone-reset.txt b/Documentation/devicetree/bindings/power/reset/keystone-reset.txt new file mode 100644 index 000000000000..c82f12e2d85c --- /dev/null +++ b/Documentation/devicetree/bindings/power/reset/keystone-reset.txt | |||
@@ -0,0 +1,67 @@ | |||
1 | * Device tree bindings for Texas Instruments keystone reset | ||
2 | |||
3 | This node is intended to allow SoC reset in case of software reset | ||
4 | of selected watchdogs. | ||
5 | |||
6 | The Keystone SoCs can contain up to 4 watchdog timers to reset | ||
7 | SoC. Each watchdog timer event input is connected to the Reset Mux | ||
8 | block. The Reset Mux block can be configured to cause reset or not. | ||
9 | |||
10 | Additionally soft or hard reset can be configured. | ||
11 | |||
12 | Required properties: | ||
13 | |||
14 | - compatible: ti,keystone-reset | ||
15 | |||
16 | - ti,syscon-pll: phandle/offset pair. The phandle to syscon used to | ||
17 | access pll controller registers and the offset to use | ||
18 | reset control registers. | ||
19 | |||
20 | - ti,syscon-dev: phandle/offset pair. The phandle to syscon used to | ||
21 | access device state control registers and the offset | ||
22 | in order to use mux block registers for all watchdogs. | ||
23 | |||
24 | Optional properties: | ||
25 | |||
26 | - ti,soft-reset: Boolean option indicating soft reset. | ||
27 | By default hard reset is used. | ||
28 | |||
29 | - ti,wdt-list: WDT list that can cause SoC reset. It's not related | ||
30 | to WDT driver, it's just needed to enable a SoC related | ||
31 | reset that's triggered by one of WDTs. The list is | ||
32 | in format: <0>, <2>; It can be in random order and | ||
33 | begins from 0 to 3, as keystone can contain up to 4 SoC | ||
34 | reset watchdogs and can be in random order. | ||
35 | |||
36 | Example 1: | ||
37 | Setup keystone reset so that in case software reset or | ||
38 | WDT0 is triggered it issues hard reset for SoC. | ||
39 | |||
40 | pllctrl: pll-controller@02310000 { | ||
41 | compatible = "ti,keystone-pllctrl", "syscon"; | ||
42 | reg = <0x02310000 0x200>; | ||
43 | }; | ||
44 | |||
45 | devctrl: device-state-control@02620000 { | ||
46 | compatible = "ti,keystone-devctrl", "syscon"; | ||
47 | reg = <0x02620000 0x1000>; | ||
48 | }; | ||
49 | |||
50 | rstctrl: reset-controller { | ||
51 | compatible = "ti,keystone-reset"; | ||
52 | ti,syscon-pll = <&pllctrl 0xe4>; | ||
53 | ti,syscon-dev = <&devctrl 0x328>; | ||
54 | ti,wdt-list = <0>; | ||
55 | }; | ||
56 | |||
57 | Example 2: | ||
58 | Setup keystone reset so that in case of software reset or | ||
59 | WDT0 or WDT2 is triggered it issues soft reset for SoC. | ||
60 | |||
61 | rstctrl: reset-controller { | ||
62 | compatible = "ti,keystone-reset"; | ||
63 | ti,syscon-pll = <&pllctrl 0xe4>; | ||
64 | ti,syscon-dev = <&devctrl 0x328>; | ||
65 | ti,wdt-list = <0>, <2>; | ||
66 | ti,soft-reset; | ||
67 | }; | ||
diff --git a/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt b/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt new file mode 100644 index 000000000000..c8f775714887 --- /dev/null +++ b/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt | |||
@@ -0,0 +1,21 @@ | |||
1 | Allwinner sunxi Peripheral Reset Controller | ||
2 | =========================================== | ||
3 | |||
4 | Please also refer to reset.txt in this directory for common reset | ||
5 | controller binding usage. | ||
6 | |||
7 | Required properties: | ||
8 | - compatible: Should be one of the following: | ||
9 | "allwinner,sun6i-a31-ahb1-reset" | ||
10 | "allwinner,sun6i-a31-clock-reset" | ||
11 | - reg: should be register base and length as documented in the | ||
12 | datasheet | ||
13 | - #reset-cells: 1, see below | ||
14 | |||
15 | example: | ||
16 | |||
17 | ahb1_rst: reset@01c202c0 { | ||
18 | #reset-cells = <1>; | ||
19 | compatible = "allwinner,sun6i-a31-ahb1-reset"; | ||
20 | reg = <0x01c202c0 0xc>; | ||
21 | }; | ||
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 586397cf6e9c..9f53e824b037 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
@@ -147,9 +147,6 @@ | |||
147 | <0x44e10f90 0x40>; | 147 | <0x44e10f90 0x40>; |
148 | interrupts = <12 13 14>; | 148 | interrupts = <12 13 14>; |
149 | #dma-cells = <1>; | 149 | #dma-cells = <1>; |
150 | dma-channels = <64>; | ||
151 | ti,edma-regions = <4>; | ||
152 | ti,edma-slots = <256>; | ||
153 | }; | 150 | }; |
154 | 151 | ||
155 | gpio0: gpio@44e07000 { | 152 | gpio0: gpio@44e07000 { |
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 1704e853f163..794c73e5c4e4 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
@@ -112,9 +112,6 @@ | |||
112 | <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, | 112 | <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, |
113 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; | 113 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; |
114 | #dma-cells = <1>; | 114 | #dma-cells = <1>; |
115 | dma-channels = <64>; | ||
116 | ti,edma-regions = <4>; | ||
117 | ti,edma-slots = <256>; | ||
118 | }; | 115 | }; |
119 | 116 | ||
120 | uart0: serial@44e09000 { | 117 | uart0: serial@44e09000 { |
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 5339009b3c0c..485be42519b9 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c | |||
@@ -102,7 +102,13 @@ | |||
102 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | 102 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) |
103 | 103 | ||
104 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | 104 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ |
105 | #define CHMAP_EXIST BIT(24) | 105 | |
106 | /* CCCFG register */ | ||
107 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | ||
108 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ | ||
109 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | ||
110 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | ||
111 | #define CHMAP_EXIST BIT(24) | ||
106 | 112 | ||
107 | #define EDMA_MAX_DMACH 64 | 113 | #define EDMA_MAX_DMACH 64 |
108 | #define EDMA_MAX_PARAMENTRY 512 | 114 | #define EDMA_MAX_PARAMENTRY 512 |
@@ -233,7 +239,6 @@ struct edma { | |||
233 | unsigned num_region; | 239 | unsigned num_region; |
234 | unsigned num_slots; | 240 | unsigned num_slots; |
235 | unsigned num_tc; | 241 | unsigned num_tc; |
236 | unsigned num_cc; | ||
237 | enum dma_event_q default_queue; | 242 | enum dma_event_q default_queue; |
238 | 243 | ||
239 | /* list of channels with no even trigger; terminated by "-1" */ | 244 | /* list of channels with no even trigger; terminated by "-1" */ |
@@ -290,12 +295,6 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no, | |||
290 | ~(0x7 << bit), queue_no << bit); | 295 | ~(0x7 << bit), queue_no << bit); |
291 | } | 296 | } |
292 | 297 | ||
293 | static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no) | ||
294 | { | ||
295 | int bit = queue_no * 4; | ||
296 | edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); | ||
297 | } | ||
298 | |||
299 | static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, | 298 | static void __init assign_priority_to_queue(unsigned ctlr, int queue_no, |
300 | int priority) | 299 | int priority) |
301 | { | 300 | { |
@@ -994,29 +993,23 @@ void edma_set_dest(unsigned slot, dma_addr_t dest_port, | |||
994 | EXPORT_SYMBOL(edma_set_dest); | 993 | EXPORT_SYMBOL(edma_set_dest); |
995 | 994 | ||
996 | /** | 995 | /** |
997 | * edma_get_position - returns the current transfer points | 996 | * edma_get_position - returns the current transfer point |
998 | * @slot: parameter RAM slot being examined | 997 | * @slot: parameter RAM slot being examined |
999 | * @src: pointer to source port position | 998 | * @dst: true selects the dest position, false the source |
1000 | * @dst: pointer to destination port position | ||
1001 | * | 999 | * |
1002 | * Returns current source and destination addresses for a particular | 1000 | * Returns the position of the current active slot |
1003 | * parameter RAM slot. Its channel should not be active when this is called. | ||
1004 | */ | 1001 | */ |
1005 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) | 1002 | dma_addr_t edma_get_position(unsigned slot, bool dst) |
1006 | { | 1003 | { |
1007 | struct edmacc_param temp; | 1004 | u32 offs, ctlr = EDMA_CTLR(slot); |
1008 | unsigned ctlr; | ||
1009 | 1005 | ||
1010 | ctlr = EDMA_CTLR(slot); | ||
1011 | slot = EDMA_CHAN_SLOT(slot); | 1006 | slot = EDMA_CHAN_SLOT(slot); |
1012 | 1007 | ||
1013 | edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp); | 1008 | offs = PARM_OFFSET(slot); |
1014 | if (src != NULL) | 1009 | offs += dst ? PARM_DST : PARM_SRC; |
1015 | *src = temp.src; | 1010 | |
1016 | if (dst != NULL) | 1011 | return edma_read(ctlr, offs); |
1017 | *dst = temp.dst; | ||
1018 | } | 1012 | } |
1019 | EXPORT_SYMBOL(edma_get_position); | ||
1020 | 1013 | ||
1021 | /** | 1014 | /** |
1022 | * edma_set_src_index - configure DMA source address indexing | 1015 | * edma_set_src_index - configure DMA source address indexing |
@@ -1421,6 +1414,67 @@ void edma_clear_event(unsigned channel) | |||
1421 | } | 1414 | } |
1422 | EXPORT_SYMBOL(edma_clear_event); | 1415 | EXPORT_SYMBOL(edma_clear_event); |
1423 | 1416 | ||
1417 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, | ||
1418 | struct edma *edma_cc) | ||
1419 | { | ||
1420 | int i; | ||
1421 | u32 value, cccfg; | ||
1422 | s8 (*queue_priority_map)[2]; | ||
1423 | |||
1424 | /* Decode the eDMA3 configuration from CCCFG register */ | ||
1425 | cccfg = edma_read(0, EDMA_CCCFG); | ||
1426 | |||
1427 | value = GET_NUM_REGN(cccfg); | ||
1428 | edma_cc->num_region = BIT(value); | ||
1429 | |||
1430 | value = GET_NUM_DMACH(cccfg); | ||
1431 | edma_cc->num_channels = BIT(value + 1); | ||
1432 | |||
1433 | value = GET_NUM_PAENTRY(cccfg); | ||
1434 | edma_cc->num_slots = BIT(value + 4); | ||
1435 | |||
1436 | value = GET_NUM_EVQUE(cccfg); | ||
1437 | edma_cc->num_tc = value + 1; | ||
1438 | |||
1439 | dev_dbg(dev, "eDMA3 HW configuration (cccfg: 0x%08x):\n", cccfg); | ||
1440 | dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); | ||
1441 | dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); | ||
1442 | dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); | ||
1443 | dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc); | ||
1444 | |||
1445 | /* Nothing need to be done if queue priority is provided */ | ||
1446 | if (pdata->queue_priority_mapping) | ||
1447 | return 0; | ||
1448 | |||
1449 | /* | ||
1450 | * Configure TC/queue priority as follows: | ||
1451 | * Q0 - priority 0 | ||
1452 | * Q1 - priority 1 | ||
1453 | * Q2 - priority 2 | ||
1454 | * ... | ||
1455 | * The meaning of priority numbers: 0 highest priority, 7 lowest | ||
1456 | * priority. So Q0 is the highest priority queue and the last queue has | ||
1457 | * the lowest priority. | ||
1458 | */ | ||
1459 | queue_priority_map = devm_kzalloc(dev, | ||
1460 | (edma_cc->num_tc + 1) * sizeof(s8), | ||
1461 | GFP_KERNEL); | ||
1462 | if (!queue_priority_map) | ||
1463 | return -ENOMEM; | ||
1464 | |||
1465 | for (i = 0; i < edma_cc->num_tc; i++) { | ||
1466 | queue_priority_map[i][0] = i; | ||
1467 | queue_priority_map[i][1] = i; | ||
1468 | } | ||
1469 | queue_priority_map[i][0] = -1; | ||
1470 | queue_priority_map[i][1] = -1; | ||
1471 | |||
1472 | pdata->queue_priority_mapping = queue_priority_map; | ||
1473 | pdata->default_queue = 0; | ||
1474 | |||
1475 | return 0; | ||
1476 | } | ||
1477 | |||
1424 | #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) | 1478 | #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) |
1425 | 1479 | ||
1426 | static int edma_xbar_event_map(struct device *dev, struct device_node *node, | 1480 | static int edma_xbar_event_map(struct device *dev, struct device_node *node, |
@@ -1471,65 +1525,16 @@ static int edma_of_parse_dt(struct device *dev, | |||
1471 | struct device_node *node, | 1525 | struct device_node *node, |
1472 | struct edma_soc_info *pdata) | 1526 | struct edma_soc_info *pdata) |
1473 | { | 1527 | { |
1474 | int ret = 0, i; | 1528 | int ret = 0; |
1475 | u32 value; | ||
1476 | struct property *prop; | 1529 | struct property *prop; |
1477 | size_t sz; | 1530 | size_t sz; |
1478 | struct edma_rsv_info *rsv_info; | 1531 | struct edma_rsv_info *rsv_info; |
1479 | s8 (*queue_tc_map)[2], (*queue_priority_map)[2]; | ||
1480 | |||
1481 | memset(pdata, 0, sizeof(struct edma_soc_info)); | ||
1482 | |||
1483 | ret = of_property_read_u32(node, "dma-channels", &value); | ||
1484 | if (ret < 0) | ||
1485 | return ret; | ||
1486 | pdata->n_channel = value; | ||
1487 | |||
1488 | ret = of_property_read_u32(node, "ti,edma-regions", &value); | ||
1489 | if (ret < 0) | ||
1490 | return ret; | ||
1491 | pdata->n_region = value; | ||
1492 | |||
1493 | ret = of_property_read_u32(node, "ti,edma-slots", &value); | ||
1494 | if (ret < 0) | ||
1495 | return ret; | ||
1496 | pdata->n_slot = value; | ||
1497 | |||
1498 | pdata->n_cc = 1; | ||
1499 | 1532 | ||
1500 | rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); | 1533 | rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); |
1501 | if (!rsv_info) | 1534 | if (!rsv_info) |
1502 | return -ENOMEM; | 1535 | return -ENOMEM; |
1503 | pdata->rsv = rsv_info; | 1536 | pdata->rsv = rsv_info; |
1504 | 1537 | ||
1505 | queue_tc_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); | ||
1506 | if (!queue_tc_map) | ||
1507 | return -ENOMEM; | ||
1508 | |||
1509 | for (i = 0; i < 3; i++) { | ||
1510 | queue_tc_map[i][0] = i; | ||
1511 | queue_tc_map[i][1] = i; | ||
1512 | } | ||
1513 | queue_tc_map[i][0] = -1; | ||
1514 | queue_tc_map[i][1] = -1; | ||
1515 | |||
1516 | pdata->queue_tc_mapping = queue_tc_map; | ||
1517 | |||
1518 | queue_priority_map = devm_kzalloc(dev, 8*sizeof(s8), GFP_KERNEL); | ||
1519 | if (!queue_priority_map) | ||
1520 | return -ENOMEM; | ||
1521 | |||
1522 | for (i = 0; i < 3; i++) { | ||
1523 | queue_priority_map[i][0] = i; | ||
1524 | queue_priority_map[i][1] = i; | ||
1525 | } | ||
1526 | queue_priority_map[i][0] = -1; | ||
1527 | queue_priority_map[i][1] = -1; | ||
1528 | |||
1529 | pdata->queue_priority_mapping = queue_priority_map; | ||
1530 | |||
1531 | pdata->default_queue = 0; | ||
1532 | |||
1533 | prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); | 1538 | prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); |
1534 | if (prop) | 1539 | if (prop) |
1535 | ret = edma_xbar_event_map(dev, node, pdata, sz); | 1540 | ret = edma_xbar_event_map(dev, node, pdata, sz); |
@@ -1556,6 +1561,7 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | |||
1556 | return ERR_PTR(ret); | 1561 | return ERR_PTR(ret); |
1557 | 1562 | ||
1558 | dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); | 1563 | dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); |
1564 | dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap); | ||
1559 | of_dma_controller_register(dev->of_node, of_dma_simple_xlate, | 1565 | of_dma_controller_register(dev->of_node, of_dma_simple_xlate, |
1560 | &edma_filter_info); | 1566 | &edma_filter_info); |
1561 | 1567 | ||
@@ -1574,7 +1580,6 @@ static int edma_probe(struct platform_device *pdev) | |||
1574 | struct edma_soc_info **info = pdev->dev.platform_data; | 1580 | struct edma_soc_info **info = pdev->dev.platform_data; |
1575 | struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; | 1581 | struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; |
1576 | s8 (*queue_priority_mapping)[2]; | 1582 | s8 (*queue_priority_mapping)[2]; |
1577 | s8 (*queue_tc_mapping)[2]; | ||
1578 | int i, j, off, ln, found = 0; | 1583 | int i, j, off, ln, found = 0; |
1579 | int status = -1; | 1584 | int status = -1; |
1580 | const s16 (*rsv_chans)[2]; | 1585 | const s16 (*rsv_chans)[2]; |
@@ -1585,7 +1590,6 @@ static int edma_probe(struct platform_device *pdev) | |||
1585 | struct resource *r[EDMA_MAX_CC] = {NULL}; | 1590 | struct resource *r[EDMA_MAX_CC] = {NULL}; |
1586 | struct resource res[EDMA_MAX_CC]; | 1591 | struct resource res[EDMA_MAX_CC]; |
1587 | char res_name[10]; | 1592 | char res_name[10]; |
1588 | char irq_name[10]; | ||
1589 | struct device_node *node = pdev->dev.of_node; | 1593 | struct device_node *node = pdev->dev.of_node; |
1590 | struct device *dev = &pdev->dev; | 1594 | struct device *dev = &pdev->dev; |
1591 | int ret; | 1595 | int ret; |
@@ -1650,12 +1654,10 @@ static int edma_probe(struct platform_device *pdev) | |||
1650 | if (!edma_cc[j]) | 1654 | if (!edma_cc[j]) |
1651 | return -ENOMEM; | 1655 | return -ENOMEM; |
1652 | 1656 | ||
1653 | edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel, | 1657 | /* Get eDMA3 configuration from IP */ |
1654 | EDMA_MAX_DMACH); | 1658 | ret = edma_setup_from_hw(dev, info[j], edma_cc[j]); |
1655 | edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot, | 1659 | if (ret) |
1656 | EDMA_MAX_PARAMENTRY); | 1660 | return ret; |
1657 | edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc, | ||
1658 | EDMA_MAX_CC); | ||
1659 | 1661 | ||
1660 | edma_cc[j]->default_queue = info[j]->default_queue; | 1662 | edma_cc[j]->default_queue = info[j]->default_queue; |
1661 | 1663 | ||
@@ -1707,14 +1709,21 @@ static int edma_probe(struct platform_device *pdev) | |||
1707 | 1709 | ||
1708 | if (node) { | 1710 | if (node) { |
1709 | irq[j] = irq_of_parse_and_map(node, 0); | 1711 | irq[j] = irq_of_parse_and_map(node, 0); |
1712 | err_irq[j] = irq_of_parse_and_map(node, 2); | ||
1710 | } else { | 1713 | } else { |
1714 | char irq_name[10]; | ||
1715 | |||
1711 | sprintf(irq_name, "edma%d", j); | 1716 | sprintf(irq_name, "edma%d", j); |
1712 | irq[j] = platform_get_irq_byname(pdev, irq_name); | 1717 | irq[j] = platform_get_irq_byname(pdev, irq_name); |
1718 | |||
1719 | sprintf(irq_name, "edma%d_err", j); | ||
1720 | err_irq[j] = platform_get_irq_byname(pdev, irq_name); | ||
1713 | } | 1721 | } |
1714 | edma_cc[j]->irq_res_start = irq[j]; | 1722 | edma_cc[j]->irq_res_start = irq[j]; |
1715 | status = devm_request_irq(&pdev->dev, irq[j], | 1723 | edma_cc[j]->irq_res_end = err_irq[j]; |
1716 | dma_irq_handler, 0, "edma", | 1724 | |
1717 | &pdev->dev); | 1725 | status = devm_request_irq(dev, irq[j], dma_irq_handler, 0, |
1726 | "edma", dev); | ||
1718 | if (status < 0) { | 1727 | if (status < 0) { |
1719 | dev_dbg(&pdev->dev, | 1728 | dev_dbg(&pdev->dev, |
1720 | "devm_request_irq %d failed --> %d\n", | 1729 | "devm_request_irq %d failed --> %d\n", |
@@ -1722,16 +1731,8 @@ static int edma_probe(struct platform_device *pdev) | |||
1722 | return status; | 1731 | return status; |
1723 | } | 1732 | } |
1724 | 1733 | ||
1725 | if (node) { | 1734 | status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0, |
1726 | err_irq[j] = irq_of_parse_and_map(node, 2); | 1735 | "edma_error", dev); |
1727 | } else { | ||
1728 | sprintf(irq_name, "edma%d_err", j); | ||
1729 | err_irq[j] = platform_get_irq_byname(pdev, irq_name); | ||
1730 | } | ||
1731 | edma_cc[j]->irq_res_end = err_irq[j]; | ||
1732 | status = devm_request_irq(&pdev->dev, err_irq[j], | ||
1733 | dma_ccerr_handler, 0, | ||
1734 | "edma_error", &pdev->dev); | ||
1735 | if (status < 0) { | 1736 | if (status < 0) { |
1736 | dev_dbg(&pdev->dev, | 1737 | dev_dbg(&pdev->dev, |
1737 | "devm_request_irq %d failed --> %d\n", | 1738 | "devm_request_irq %d failed --> %d\n", |
@@ -1742,14 +1743,8 @@ static int edma_probe(struct platform_device *pdev) | |||
1742 | for (i = 0; i < edma_cc[j]->num_channels; i++) | 1743 | for (i = 0; i < edma_cc[j]->num_channels; i++) |
1743 | map_dmach_queue(j, i, info[j]->default_queue); | 1744 | map_dmach_queue(j, i, info[j]->default_queue); |
1744 | 1745 | ||
1745 | queue_tc_mapping = info[j]->queue_tc_mapping; | ||
1746 | queue_priority_mapping = info[j]->queue_priority_mapping; | 1746 | queue_priority_mapping = info[j]->queue_priority_mapping; |
1747 | 1747 | ||
1748 | /* Event queue to TC mapping */ | ||
1749 | for (i = 0; queue_tc_mapping[i][0] != -1; i++) | ||
1750 | map_queue_tc(j, queue_tc_mapping[i][0], | ||
1751 | queue_tc_mapping[i][1]); | ||
1752 | |||
1753 | /* Event queue priority mapping */ | 1748 | /* Event queue priority mapping */ |
1754 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | 1749 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) |
1755 | assign_priority_to_queue(j, | 1750 | assign_priority_to_queue(j, |
@@ -1762,7 +1757,7 @@ static int edma_probe(struct platform_device *pdev) | |||
1762 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) | 1757 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) |
1763 | map_dmach_param(j); | 1758 | map_dmach_param(j); |
1764 | 1759 | ||
1765 | for (i = 0; i < info[j]->n_region; i++) { | 1760 | for (i = 0; i < edma_cc[j]->num_region; i++) { |
1766 | edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); | 1761 | edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); |
1767 | edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); | 1762 | edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); |
1768 | edma_write_array(j, EDMA_QRAE, i, 0x0); | 1763 | edma_write_array(j, EDMA_QRAE, i, 0x0); |
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 56ea41d5f849..b85b781b05fd 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
@@ -134,13 +134,6 @@ struct platform_device da8xx_serial_device[] = { | |||
134 | } | 134 | } |
135 | }; | 135 | }; |
136 | 136 | ||
137 | static s8 da8xx_queue_tc_mapping[][2] = { | ||
138 | /* {event queue no, TC no} */ | ||
139 | {0, 0}, | ||
140 | {1, 1}, | ||
141 | {-1, -1} | ||
142 | }; | ||
143 | |||
144 | static s8 da8xx_queue_priority_mapping[][2] = { | 137 | static s8 da8xx_queue_priority_mapping[][2] = { |
145 | /* {event queue no, Priority} */ | 138 | /* {event queue no, Priority} */ |
146 | {0, 3}, | 139 | {0, 3}, |
@@ -148,12 +141,6 @@ static s8 da8xx_queue_priority_mapping[][2] = { | |||
148 | {-1, -1} | 141 | {-1, -1} |
149 | }; | 142 | }; |
150 | 143 | ||
151 | static s8 da850_queue_tc_mapping[][2] = { | ||
152 | /* {event queue no, TC no} */ | ||
153 | {0, 0}, | ||
154 | {-1, -1} | ||
155 | }; | ||
156 | |||
157 | static s8 da850_queue_priority_mapping[][2] = { | 144 | static s8 da850_queue_priority_mapping[][2] = { |
158 | /* {event queue no, Priority} */ | 145 | /* {event queue no, Priority} */ |
159 | {0, 3}, | 146 | {0, 3}, |
@@ -161,12 +148,6 @@ static s8 da850_queue_priority_mapping[][2] = { | |||
161 | }; | 148 | }; |
162 | 149 | ||
163 | static struct edma_soc_info da830_edma_cc0_info = { | 150 | static struct edma_soc_info da830_edma_cc0_info = { |
164 | .n_channel = 32, | ||
165 | .n_region = 4, | ||
166 | .n_slot = 128, | ||
167 | .n_tc = 2, | ||
168 | .n_cc = 1, | ||
169 | .queue_tc_mapping = da8xx_queue_tc_mapping, | ||
170 | .queue_priority_mapping = da8xx_queue_priority_mapping, | 151 | .queue_priority_mapping = da8xx_queue_priority_mapping, |
171 | .default_queue = EVENTQ_1, | 152 | .default_queue = EVENTQ_1, |
172 | }; | 153 | }; |
@@ -177,22 +158,10 @@ static struct edma_soc_info *da830_edma_info[EDMA_MAX_CC] = { | |||
177 | 158 | ||
178 | static struct edma_soc_info da850_edma_cc_info[] = { | 159 | static struct edma_soc_info da850_edma_cc_info[] = { |
179 | { | 160 | { |
180 | .n_channel = 32, | ||
181 | .n_region = 4, | ||
182 | .n_slot = 128, | ||
183 | .n_tc = 2, | ||
184 | .n_cc = 1, | ||
185 | .queue_tc_mapping = da8xx_queue_tc_mapping, | ||
186 | .queue_priority_mapping = da8xx_queue_priority_mapping, | 161 | .queue_priority_mapping = da8xx_queue_priority_mapping, |
187 | .default_queue = EVENTQ_1, | 162 | .default_queue = EVENTQ_1, |
188 | }, | 163 | }, |
189 | { | 164 | { |
190 | .n_channel = 32, | ||
191 | .n_region = 4, | ||
192 | .n_slot = 128, | ||
193 | .n_tc = 1, | ||
194 | .n_cc = 1, | ||
195 | .queue_tc_mapping = da850_queue_tc_mapping, | ||
196 | .queue_priority_mapping = da850_queue_priority_mapping, | 165 | .queue_priority_mapping = da850_queue_priority_mapping, |
197 | .default_queue = EVENTQ_0, | 166 | .default_queue = EVENTQ_0, |
198 | }, | 167 | }, |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 07381d8cea62..2f3ed3a58d57 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -569,14 +569,6 @@ static u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
569 | /*----------------------------------------------------------------------*/ | 569 | /*----------------------------------------------------------------------*/ |
570 | 570 | ||
571 | static s8 | 571 | static s8 |
572 | queue_tc_mapping[][2] = { | ||
573 | /* {event queue no, TC no} */ | ||
574 | {0, 0}, | ||
575 | {1, 1}, | ||
576 | {-1, -1}, | ||
577 | }; | ||
578 | |||
579 | static s8 | ||
580 | queue_priority_mapping[][2] = { | 572 | queue_priority_mapping[][2] = { |
581 | /* {event queue no, Priority} */ | 573 | /* {event queue no, Priority} */ |
582 | {0, 3}, | 574 | {0, 3}, |
@@ -585,12 +577,6 @@ queue_priority_mapping[][2] = { | |||
585 | }; | 577 | }; |
586 | 578 | ||
587 | static struct edma_soc_info edma_cc0_info = { | 579 | static struct edma_soc_info edma_cc0_info = { |
588 | .n_channel = 64, | ||
589 | .n_region = 4, | ||
590 | .n_slot = 128, | ||
591 | .n_tc = 2, | ||
592 | .n_cc = 1, | ||
593 | .queue_tc_mapping = queue_tc_mapping, | ||
594 | .queue_priority_mapping = queue_priority_mapping, | 580 | .queue_priority_mapping = queue_priority_mapping, |
595 | .default_queue = EVENTQ_1, | 581 | .default_queue = EVENTQ_1, |
596 | }; | 582 | }; |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 08a61b938333..0ae8114f5cc9 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -853,16 +853,6 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
853 | 853 | ||
854 | /* Four Transfer Controllers on DM365 */ | 854 | /* Four Transfer Controllers on DM365 */ |
855 | static s8 | 855 | static s8 |
856 | dm365_queue_tc_mapping[][2] = { | ||
857 | /* {event queue no, TC no} */ | ||
858 | {0, 0}, | ||
859 | {1, 1}, | ||
860 | {2, 2}, | ||
861 | {3, 3}, | ||
862 | {-1, -1}, | ||
863 | }; | ||
864 | |||
865 | static s8 | ||
866 | dm365_queue_priority_mapping[][2] = { | 856 | dm365_queue_priority_mapping[][2] = { |
867 | /* {event queue no, Priority} */ | 857 | /* {event queue no, Priority} */ |
868 | {0, 7}, | 858 | {0, 7}, |
@@ -873,12 +863,6 @@ dm365_queue_priority_mapping[][2] = { | |||
873 | }; | 863 | }; |
874 | 864 | ||
875 | static struct edma_soc_info edma_cc0_info = { | 865 | static struct edma_soc_info edma_cc0_info = { |
876 | .n_channel = 64, | ||
877 | .n_region = 4, | ||
878 | .n_slot = 256, | ||
879 | .n_tc = 4, | ||
880 | .n_cc = 1, | ||
881 | .queue_tc_mapping = dm365_queue_tc_mapping, | ||
882 | .queue_priority_mapping = dm365_queue_priority_mapping, | 866 | .queue_priority_mapping = dm365_queue_priority_mapping, |
883 | .default_queue = EVENTQ_3, | 867 | .default_queue = EVENTQ_3, |
884 | }; | 868 | }; |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 5debffba4b24..dc52657909c4 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -499,14 +499,6 @@ static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
499 | /*----------------------------------------------------------------------*/ | 499 | /*----------------------------------------------------------------------*/ |
500 | 500 | ||
501 | static s8 | 501 | static s8 |
502 | queue_tc_mapping[][2] = { | ||
503 | /* {event queue no, TC no} */ | ||
504 | {0, 0}, | ||
505 | {1, 1}, | ||
506 | {-1, -1}, | ||
507 | }; | ||
508 | |||
509 | static s8 | ||
510 | queue_priority_mapping[][2] = { | 502 | queue_priority_mapping[][2] = { |
511 | /* {event queue no, Priority} */ | 503 | /* {event queue no, Priority} */ |
512 | {0, 3}, | 504 | {0, 3}, |
@@ -515,12 +507,6 @@ queue_priority_mapping[][2] = { | |||
515 | }; | 507 | }; |
516 | 508 | ||
517 | static struct edma_soc_info edma_cc0_info = { | 509 | static struct edma_soc_info edma_cc0_info = { |
518 | .n_channel = 64, | ||
519 | .n_region = 4, | ||
520 | .n_slot = 128, | ||
521 | .n_tc = 2, | ||
522 | .n_cc = 1, | ||
523 | .queue_tc_mapping = queue_tc_mapping, | ||
524 | .queue_priority_mapping = queue_priority_mapping, | 510 | .queue_priority_mapping = queue_priority_mapping, |
525 | .default_queue = EVENTQ_1, | 511 | .default_queue = EVENTQ_1, |
526 | }; | 512 | }; |
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 332d00d24dc2..6c3bbea7d77d 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -533,16 +533,6 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
533 | 533 | ||
534 | /* Four Transfer Controllers on DM646x */ | 534 | /* Four Transfer Controllers on DM646x */ |
535 | static s8 | 535 | static s8 |
536 | dm646x_queue_tc_mapping[][2] = { | ||
537 | /* {event queue no, TC no} */ | ||
538 | {0, 0}, | ||
539 | {1, 1}, | ||
540 | {2, 2}, | ||
541 | {3, 3}, | ||
542 | {-1, -1}, | ||
543 | }; | ||
544 | |||
545 | static s8 | ||
546 | dm646x_queue_priority_mapping[][2] = { | 536 | dm646x_queue_priority_mapping[][2] = { |
547 | /* {event queue no, Priority} */ | 537 | /* {event queue no, Priority} */ |
548 | {0, 4}, | 538 | {0, 4}, |
@@ -553,12 +543,6 @@ dm646x_queue_priority_mapping[][2] = { | |||
553 | }; | 543 | }; |
554 | 544 | ||
555 | static struct edma_soc_info edma_cc0_info = { | 545 | static struct edma_soc_info edma_cc0_info = { |
556 | .n_channel = 64, | ||
557 | .n_region = 6, /* 0-1, 4-7 */ | ||
558 | .n_slot = 512, | ||
559 | .n_tc = 4, | ||
560 | .n_cc = 1, | ||
561 | .queue_tc_mapping = dm646x_queue_tc_mapping, | ||
562 | .queue_priority_mapping = dm646x_queue_priority_mapping, | 546 | .queue_priority_mapping = dm646x_queue_priority_mapping, |
563 | .default_queue = EVENTQ_1, | 547 | .default_queue = EVENTQ_1, |
564 | }; | 548 | }; |
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile index 9f3258880949..788f26d21141 100644 --- a/arch/arm/mach-exynos/Makefile +++ b/arch/arm/mach-exynos/Makefile | |||
@@ -18,7 +18,6 @@ obj-$(CONFIG_ARCH_EXYNOS) += exynos.o pmu.o exynos-smc.o firmware.o | |||
18 | 18 | ||
19 | obj-$(CONFIG_PM_SLEEP) += pm.o sleep.o | 19 | obj-$(CONFIG_PM_SLEEP) += pm.o sleep.o |
20 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o | 20 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o |
21 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o | ||
22 | 21 | ||
23 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o | 22 | obj-$(CONFIG_SMP) += platsmp.o headsmp.o |
24 | 23 | ||
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index 5dba5a1ee6c2..80b90e346ca0 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h | |||
@@ -115,6 +115,7 @@ void mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1); | |||
115 | 115 | ||
116 | struct map_desc; | 116 | struct map_desc; |
117 | extern void __iomem *sysram_ns_base_addr; | 117 | extern void __iomem *sysram_ns_base_addr; |
118 | extern void __iomem *sysram_base_addr; | ||
118 | void exynos_init_io(void); | 119 | void exynos_init_io(void); |
119 | void exynos_restart(enum reboot_mode mode, const char *cmd); | 120 | void exynos_restart(enum reboot_mode mode, const char *cmd); |
120 | void exynos_cpuidle_init(void); | 121 | void exynos_cpuidle_init(void); |
@@ -165,6 +166,7 @@ extern int exynos_cpu_power_state(int cpu); | |||
165 | extern void exynos_cluster_power_down(int cluster); | 166 | extern void exynos_cluster_power_down(int cluster); |
166 | extern void exynos_cluster_power_up(int cluster); | 167 | extern void exynos_cluster_power_up(int cluster); |
167 | extern int exynos_cluster_power_state(int cluster); | 168 | extern int exynos_cluster_power_state(int cluster); |
169 | extern void exynos_enter_aftr(void); | ||
168 | 170 | ||
169 | extern void s5p_init_cpu(void __iomem *cpuid_addr); | 171 | extern void s5p_init_cpu(void __iomem *cpuid_addr); |
170 | extern unsigned int samsung_rev(void); | 172 | extern unsigned int samsung_rev(void); |
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c deleted file mode 100644 index 3dd385ebf195..000000000000 --- a/arch/arm/mach-exynos/cpuidle.c +++ /dev/null | |||
@@ -1,255 +0,0 @@ | |||
1 | /* linux/arch/arm/mach-exynos4/cpuidle.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/cpuidle.h> | ||
14 | #include <linux/cpu_pm.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/export.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/time.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | |||
21 | #include <asm/proc-fns.h> | ||
22 | #include <asm/smp_scu.h> | ||
23 | #include <asm/suspend.h> | ||
24 | #include <asm/unified.h> | ||
25 | #include <asm/cpuidle.h> | ||
26 | |||
27 | #include <plat/pm.h> | ||
28 | |||
29 | #include <mach/map.h> | ||
30 | |||
31 | #include "common.h" | ||
32 | #include "regs-pmu.h" | ||
33 | |||
34 | #define REG_DIRECTGO_ADDR (samsung_rev() == EXYNOS4210_REV_1_1 ? \ | ||
35 | S5P_INFORM7 : (samsung_rev() == EXYNOS4210_REV_1_0 ? \ | ||
36 | (S5P_VA_SYSRAM + 0x24) : S5P_INFORM0)) | ||
37 | #define REG_DIRECTGO_FLAG (samsung_rev() == EXYNOS4210_REV_1_1 ? \ | ||
38 | S5P_INFORM6 : (samsung_rev() == EXYNOS4210_REV_1_0 ? \ | ||
39 | (S5P_VA_SYSRAM + 0x20) : S5P_INFORM1)) | ||
40 | |||
41 | #define S5P_CHECK_AFTR 0xFCBA0D10 | ||
42 | |||
43 | #define EXYNOS5_PWR_CTRL1 (S5P_VA_CMU + 0x01020) | ||
44 | #define EXYNOS5_PWR_CTRL2 (S5P_VA_CMU + 0x01024) | ||
45 | |||
46 | #define PWR_CTRL1_CORE2_DOWN_RATIO (7 << 28) | ||
47 | #define PWR_CTRL1_CORE1_DOWN_RATIO (7 << 16) | ||
48 | #define PWR_CTRL1_DIV2_DOWN_EN (1 << 9) | ||
49 | #define PWR_CTRL1_DIV1_DOWN_EN (1 << 8) | ||
50 | #define PWR_CTRL1_USE_CORE1_WFE (1 << 5) | ||
51 | #define PWR_CTRL1_USE_CORE0_WFE (1 << 4) | ||
52 | #define PWR_CTRL1_USE_CORE1_WFI (1 << 1) | ||
53 | #define PWR_CTRL1_USE_CORE0_WFI (1 << 0) | ||
54 | |||
55 | #define PWR_CTRL2_DIV2_UP_EN (1 << 25) | ||
56 | #define PWR_CTRL2_DIV1_UP_EN (1 << 24) | ||
57 | #define PWR_CTRL2_DUR_STANDBY2_VAL (1 << 16) | ||
58 | #define PWR_CTRL2_DUR_STANDBY1_VAL (1 << 8) | ||
59 | #define PWR_CTRL2_CORE2_UP_RATIO (1 << 4) | ||
60 | #define PWR_CTRL2_CORE1_UP_RATIO (1 << 0) | ||
61 | |||
62 | static int exynos4_enter_lowpower(struct cpuidle_device *dev, | ||
63 | struct cpuidle_driver *drv, | ||
64 | int index); | ||
65 | |||
66 | static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device); | ||
67 | |||
68 | static struct cpuidle_driver exynos4_idle_driver = { | ||
69 | .name = "exynos4_idle", | ||
70 | .owner = THIS_MODULE, | ||
71 | .states = { | ||
72 | [0] = ARM_CPUIDLE_WFI_STATE, | ||
73 | [1] = { | ||
74 | .enter = exynos4_enter_lowpower, | ||
75 | .exit_latency = 300, | ||
76 | .target_residency = 100000, | ||
77 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
78 | .name = "C1", | ||
79 | .desc = "ARM power down", | ||
80 | }, | ||
81 | }, | ||
82 | .state_count = 2, | ||
83 | .safe_state_index = 0, | ||
84 | }; | ||
85 | |||
86 | /* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */ | ||
87 | static void exynos4_set_wakeupmask(void) | ||
88 | { | ||
89 | __raw_writel(0x0000ff3e, S5P_WAKEUP_MASK); | ||
90 | } | ||
91 | |||
92 | static unsigned int g_pwr_ctrl, g_diag_reg; | ||
93 | |||
94 | static void save_cpu_arch_register(void) | ||
95 | { | ||
96 | /*read power control register*/ | ||
97 | asm("mrc p15, 0, %0, c15, c0, 0" : "=r"(g_pwr_ctrl) : : "cc"); | ||
98 | /*read diagnostic register*/ | ||
99 | asm("mrc p15, 0, %0, c15, c0, 1" : "=r"(g_diag_reg) : : "cc"); | ||
100 | return; | ||
101 | } | ||
102 | |||
103 | static void restore_cpu_arch_register(void) | ||
104 | { | ||
105 | /*write power control register*/ | ||
106 | asm("mcr p15, 0, %0, c15, c0, 0" : : "r"(g_pwr_ctrl) : "cc"); | ||
107 | /*write diagnostic register*/ | ||
108 | asm("mcr p15, 0, %0, c15, c0, 1" : : "r"(g_diag_reg) : "cc"); | ||
109 | return; | ||
110 | } | ||
111 | |||
112 | static int idle_finisher(unsigned long flags) | ||
113 | { | ||
114 | cpu_do_idle(); | ||
115 | return 1; | ||
116 | } | ||
117 | |||
118 | static int exynos4_enter_core0_aftr(struct cpuidle_device *dev, | ||
119 | struct cpuidle_driver *drv, | ||
120 | int index) | ||
121 | { | ||
122 | unsigned long tmp; | ||
123 | |||
124 | exynos4_set_wakeupmask(); | ||
125 | |||
126 | /* Set value of power down register for aftr mode */ | ||
127 | exynos_sys_powerdown_conf(SYS_AFTR); | ||
128 | |||
129 | __raw_writel(virt_to_phys(exynos_cpu_resume), REG_DIRECTGO_ADDR); | ||
130 | __raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG); | ||
131 | |||
132 | save_cpu_arch_register(); | ||
133 | |||
134 | /* Setting Central Sequence Register for power down mode */ | ||
135 | tmp = __raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); | ||
136 | tmp &= ~S5P_CENTRAL_LOWPWR_CFG; | ||
137 | __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); | ||
138 | |||
139 | cpu_pm_enter(); | ||
140 | cpu_suspend(0, idle_finisher); | ||
141 | |||
142 | #ifdef CONFIG_SMP | ||
143 | if (!soc_is_exynos5250()) | ||
144 | scu_enable(S5P_VA_SCU); | ||
145 | #endif | ||
146 | cpu_pm_exit(); | ||
147 | |||
148 | restore_cpu_arch_register(); | ||
149 | |||
150 | /* | ||
151 | * If PMU failed while entering sleep mode, WFI will be | ||
152 | * ignored by PMU and then exiting cpu_do_idle(). | ||
153 | * S5P_CENTRAL_LOWPWR_CFG bit will not be set automatically | ||
154 | * in this situation. | ||
155 | */ | ||
156 | tmp = __raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); | ||
157 | if (!(tmp & S5P_CENTRAL_LOWPWR_CFG)) { | ||
158 | tmp |= S5P_CENTRAL_LOWPWR_CFG; | ||
159 | __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); | ||
160 | } | ||
161 | |||
162 | /* Clear wakeup state register */ | ||
163 | __raw_writel(0x0, S5P_WAKEUP_STAT); | ||
164 | |||
165 | return index; | ||
166 | } | ||
167 | |||
168 | static int exynos4_enter_lowpower(struct cpuidle_device *dev, | ||
169 | struct cpuidle_driver *drv, | ||
170 | int index) | ||
171 | { | ||
172 | int new_index = index; | ||
173 | |||
174 | /* AFTR can only be entered when cores other than CPU0 are offline */ | ||
175 | if (num_online_cpus() > 1 || dev->cpu != 0) | ||
176 | new_index = drv->safe_state_index; | ||
177 | |||
178 | if (new_index == 0) | ||
179 | return arm_cpuidle_simple_enter(dev, drv, new_index); | ||
180 | else | ||
181 | return exynos4_enter_core0_aftr(dev, drv, new_index); | ||
182 | } | ||
183 | |||
184 | static void __init exynos5_core_down_clk(void) | ||
185 | { | ||
186 | unsigned int tmp; | ||
187 | |||
188 | /* | ||
189 | * Enable arm clock down (in idle) and set arm divider | ||
190 | * ratios in WFI/WFE state. | ||
191 | */ | ||
192 | tmp = PWR_CTRL1_CORE2_DOWN_RATIO | \ | ||
193 | PWR_CTRL1_CORE1_DOWN_RATIO | \ | ||
194 | PWR_CTRL1_DIV2_DOWN_EN | \ | ||
195 | PWR_CTRL1_DIV1_DOWN_EN | \ | ||
196 | PWR_CTRL1_USE_CORE1_WFE | \ | ||
197 | PWR_CTRL1_USE_CORE0_WFE | \ | ||
198 | PWR_CTRL1_USE_CORE1_WFI | \ | ||
199 | PWR_CTRL1_USE_CORE0_WFI; | ||
200 | __raw_writel(tmp, EXYNOS5_PWR_CTRL1); | ||
201 | |||
202 | /* | ||
203 | * Enable arm clock up (on exiting idle). Set arm divider | ||
204 | * ratios when not in idle along with the standby duration | ||
205 | * ratios. | ||
206 | */ | ||
207 | tmp = PWR_CTRL2_DIV2_UP_EN | \ | ||
208 | PWR_CTRL2_DIV1_UP_EN | \ | ||
209 | PWR_CTRL2_DUR_STANDBY2_VAL | \ | ||
210 | PWR_CTRL2_DUR_STANDBY1_VAL | \ | ||
211 | PWR_CTRL2_CORE2_UP_RATIO | \ | ||
212 | PWR_CTRL2_CORE1_UP_RATIO; | ||
213 | __raw_writel(tmp, EXYNOS5_PWR_CTRL2); | ||
214 | } | ||
215 | |||
216 | static int exynos_cpuidle_probe(struct platform_device *pdev) | ||
217 | { | ||
218 | int cpu_id, ret; | ||
219 | struct cpuidle_device *device; | ||
220 | |||
221 | if (soc_is_exynos5250()) | ||
222 | exynos5_core_down_clk(); | ||
223 | |||
224 | if (soc_is_exynos5440()) | ||
225 | exynos4_idle_driver.state_count = 1; | ||
226 | |||
227 | ret = cpuidle_register_driver(&exynos4_idle_driver); | ||
228 | if (ret) { | ||
229 | dev_err(&pdev->dev, "failed to register cpuidle driver\n"); | ||
230 | return ret; | ||
231 | } | ||
232 | |||
233 | for_each_online_cpu(cpu_id) { | ||
234 | device = &per_cpu(exynos4_cpuidle_device, cpu_id); | ||
235 | device->cpu = cpu_id; | ||
236 | |||
237 | ret = cpuidle_register_device(device); | ||
238 | if (ret) { | ||
239 | dev_err(&pdev->dev, "failed to register cpuidle device\n"); | ||
240 | return ret; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static struct platform_driver exynos_cpuidle_driver = { | ||
248 | .probe = exynos_cpuidle_probe, | ||
249 | .driver = { | ||
250 | .name = "exynos_cpuidle", | ||
251 | .owner = THIS_MODULE, | ||
252 | }, | ||
253 | }; | ||
254 | |||
255 | module_platform_driver(exynos_cpuidle_driver); | ||
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index 4800b1ce3d71..bc43e22693b7 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c | |||
@@ -169,12 +169,16 @@ void exynos_restart(enum reboot_mode mode, const char *cmd) | |||
169 | } | 169 | } |
170 | 170 | ||
171 | static struct platform_device exynos_cpuidle = { | 171 | static struct platform_device exynos_cpuidle = { |
172 | .name = "exynos_cpuidle", | 172 | .name = "exynos_cpuidle", |
173 | .id = -1, | 173 | .dev.platform_data = exynos_enter_aftr, |
174 | .id = -1, | ||
174 | }; | 175 | }; |
175 | 176 | ||
176 | void __init exynos_cpuidle_init(void) | 177 | void __init exynos_cpuidle_init(void) |
177 | { | 178 | { |
179 | if (soc_is_exynos5440()) | ||
180 | return; | ||
181 | |||
178 | platform_device_register(&exynos_cpuidle); | 182 | platform_device_register(&exynos_cpuidle); |
179 | } | 183 | } |
180 | 184 | ||
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 112bc66927a1..ec02422e8499 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | extern void exynos4_secondary_startup(void); | 33 | extern void exynos4_secondary_startup(void); |
34 | 34 | ||
35 | static void __iomem *sysram_base_addr; | 35 | void __iomem *sysram_base_addr; |
36 | void __iomem *sysram_ns_base_addr; | 36 | void __iomem *sysram_ns_base_addr; |
37 | 37 | ||
38 | static void __init exynos_smp_prepare_sysram(void) | 38 | static void __init exynos_smp_prepare_sysram(void) |
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c index aba2ff6e443d..87c0d34c7fba 100644 --- a/arch/arm/mach-exynos/pm.c +++ b/arch/arm/mach-exynos/pm.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/suspend.h> | 17 | #include <linux/suspend.h> |
18 | #include <linux/syscore_ops.h> | 18 | #include <linux/syscore_ops.h> |
19 | #include <linux/cpu_pm.h> | ||
19 | #include <linux/io.h> | 20 | #include <linux/io.h> |
20 | #include <linux/irqchip/arm-gic.h> | 21 | #include <linux/irqchip/arm-gic.h> |
21 | #include <linux/err.h> | 22 | #include <linux/err.h> |
@@ -165,9 +166,75 @@ int exynos_cluster_power_state(int cluster) | |||
165 | S5P_CORE_LOCAL_PWR_EN); | 166 | S5P_CORE_LOCAL_PWR_EN); |
166 | } | 167 | } |
167 | 168 | ||
169 | #define EXYNOS_BOOT_VECTOR_ADDR (samsung_rev() == EXYNOS4210_REV_1_1 ? \ | ||
170 | S5P_INFORM7 : (samsung_rev() == EXYNOS4210_REV_1_0 ? \ | ||
171 | (sysram_base_addr + 0x24) : S5P_INFORM0)) | ||
172 | #define EXYNOS_BOOT_VECTOR_FLAG (samsung_rev() == EXYNOS4210_REV_1_1 ? \ | ||
173 | S5P_INFORM6 : (samsung_rev() == EXYNOS4210_REV_1_0 ? \ | ||
174 | (sysram_base_addr + 0x20) : S5P_INFORM1)) | ||
175 | |||
176 | #define S5P_CHECK_AFTR 0xFCBA0D10 | ||
177 | #define S5P_CHECK_SLEEP 0x00000BAD | ||
178 | |||
179 | /* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */ | ||
180 | static void exynos_set_wakeupmask(long mask) | ||
181 | { | ||
182 | __raw_writel(mask, S5P_WAKEUP_MASK); | ||
183 | } | ||
184 | |||
185 | static void exynos_cpu_set_boot_vector(long flags) | ||
186 | { | ||
187 | __raw_writel(virt_to_phys(exynos_cpu_resume), EXYNOS_BOOT_VECTOR_ADDR); | ||
188 | __raw_writel(flags, EXYNOS_BOOT_VECTOR_FLAG); | ||
189 | } | ||
190 | |||
191 | void exynos_enter_aftr(void) | ||
192 | { | ||
193 | exynos_set_wakeupmask(0x0000ff3e); | ||
194 | exynos_cpu_set_boot_vector(S5P_CHECK_AFTR); | ||
195 | /* Set value of power down register for aftr mode */ | ||
196 | exynos_sys_powerdown_conf(SYS_AFTR); | ||
197 | } | ||
198 | |||
168 | /* For Cortex-A9 Diagnostic and Power control register */ | 199 | /* For Cortex-A9 Diagnostic and Power control register */ |
169 | static unsigned int save_arm_register[2]; | 200 | static unsigned int save_arm_register[2]; |
170 | 201 | ||
202 | static void exynos_cpu_save_register(void) | ||
203 | { | ||
204 | unsigned long tmp; | ||
205 | |||
206 | /* Save Power control register */ | ||
207 | asm ("mrc p15, 0, %0, c15, c0, 0" | ||
208 | : "=r" (tmp) : : "cc"); | ||
209 | |||
210 | save_arm_register[0] = tmp; | ||
211 | |||
212 | /* Save Diagnostic register */ | ||
213 | asm ("mrc p15, 0, %0, c15, c0, 1" | ||
214 | : "=r" (tmp) : : "cc"); | ||
215 | |||
216 | save_arm_register[1] = tmp; | ||
217 | } | ||
218 | |||
219 | static void exynos_cpu_restore_register(void) | ||
220 | { | ||
221 | unsigned long tmp; | ||
222 | |||
223 | /* Restore Power control register */ | ||
224 | tmp = save_arm_register[0]; | ||
225 | |||
226 | asm volatile ("mcr p15, 0, %0, c15, c0, 0" | ||
227 | : : "r" (tmp) | ||
228 | : "cc"); | ||
229 | |||
230 | /* Restore Diagnostic register */ | ||
231 | tmp = save_arm_register[1]; | ||
232 | |||
233 | asm volatile ("mcr p15, 0, %0, c15, c0, 1" | ||
234 | : : "r" (tmp) | ||
235 | : "cc"); | ||
236 | } | ||
237 | |||
171 | static int exynos_cpu_suspend(unsigned long arg) | 238 | static int exynos_cpu_suspend(unsigned long arg) |
172 | { | 239 | { |
173 | #ifdef CONFIG_CACHE_L2X0 | 240 | #ifdef CONFIG_CACHE_L2X0 |
@@ -212,37 +279,34 @@ static void exynos_pm_prepare(void) | |||
212 | __raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0); | 279 | __raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0); |
213 | } | 280 | } |
214 | 281 | ||
215 | static int exynos_pm_suspend(void) | 282 | static void exynos_pm_central_suspend(void) |
216 | { | 283 | { |
217 | unsigned long tmp; | 284 | unsigned long tmp; |
218 | 285 | ||
219 | /* Setting Central Sequence Register for power down mode */ | 286 | /* Setting Central Sequence Register for power down mode */ |
220 | |||
221 | tmp = __raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); | 287 | tmp = __raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); |
222 | tmp &= ~S5P_CENTRAL_LOWPWR_CFG; | 288 | tmp &= ~S5P_CENTRAL_LOWPWR_CFG; |
223 | __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); | 289 | __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); |
290 | } | ||
291 | |||
292 | static int exynos_pm_suspend(void) | ||
293 | { | ||
294 | unsigned long tmp; | ||
295 | |||
296 | exynos_pm_central_suspend(); | ||
224 | 297 | ||
225 | /* Setting SEQ_OPTION register */ | 298 | /* Setting SEQ_OPTION register */ |
226 | 299 | ||
227 | tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0); | 300 | tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0); |
228 | __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION); | 301 | __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION); |
229 | 302 | ||
230 | if (!soc_is_exynos5250()) { | 303 | if (!soc_is_exynos5250()) |
231 | /* Save Power control register */ | 304 | exynos_cpu_save_register(); |
232 | asm ("mrc p15, 0, %0, c15, c0, 0" | ||
233 | : "=r" (tmp) : : "cc"); | ||
234 | save_arm_register[0] = tmp; | ||
235 | |||
236 | /* Save Diagnostic register */ | ||
237 | asm ("mrc p15, 0, %0, c15, c0, 1" | ||
238 | : "=r" (tmp) : : "cc"); | ||
239 | save_arm_register[1] = tmp; | ||
240 | } | ||
241 | 305 | ||
242 | return 0; | 306 | return 0; |
243 | } | 307 | } |
244 | 308 | ||
245 | static void exynos_pm_resume(void) | 309 | static int exynos_pm_central_resume(void) |
246 | { | 310 | { |
247 | unsigned long tmp; | 311 | unsigned long tmp; |
248 | 312 | ||
@@ -259,22 +323,20 @@ static void exynos_pm_resume(void) | |||
259 | /* clear the wakeup state register */ | 323 | /* clear the wakeup state register */ |
260 | __raw_writel(0x0, S5P_WAKEUP_STAT); | 324 | __raw_writel(0x0, S5P_WAKEUP_STAT); |
261 | /* No need to perform below restore code */ | 325 | /* No need to perform below restore code */ |
262 | goto early_wakeup; | 326 | return -1; |
263 | } | ||
264 | if (!soc_is_exynos5250()) { | ||
265 | /* Restore Power control register */ | ||
266 | tmp = save_arm_register[0]; | ||
267 | asm volatile ("mcr p15, 0, %0, c15, c0, 0" | ||
268 | : : "r" (tmp) | ||
269 | : "cc"); | ||
270 | |||
271 | /* Restore Diagnostic register */ | ||
272 | tmp = save_arm_register[1]; | ||
273 | asm volatile ("mcr p15, 0, %0, c15, c0, 1" | ||
274 | : : "r" (tmp) | ||
275 | : "cc"); | ||
276 | } | 327 | } |
277 | 328 | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static void exynos_pm_resume(void) | ||
333 | { | ||
334 | if (exynos_pm_central_resume()) | ||
335 | goto early_wakeup; | ||
336 | |||
337 | if (!soc_is_exynos5250()) | ||
338 | exynos_cpu_restore_register(); | ||
339 | |||
278 | /* For release retention */ | 340 | /* For release retention */ |
279 | 341 | ||
280 | __raw_writel((1 << 28), S5P_PAD_RET_MAUDIO_OPTION); | 342 | __raw_writel((1 << 28), S5P_PAD_RET_MAUDIO_OPTION); |
@@ -291,7 +353,7 @@ static void exynos_pm_resume(void) | |||
291 | 353 | ||
292 | s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); | 354 | s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); |
293 | 355 | ||
294 | if (IS_ENABLED(CONFIG_SMP) && !soc_is_exynos5250()) | 356 | if (!soc_is_exynos5250()) |
295 | scu_enable(S5P_VA_SCU); | 357 | scu_enable(S5P_VA_SCU); |
296 | 358 | ||
297 | early_wakeup: | 359 | early_wakeup: |
@@ -369,10 +431,42 @@ static const struct platform_suspend_ops exynos_suspend_ops = { | |||
369 | .valid = suspend_valid_only_mem, | 431 | .valid = suspend_valid_only_mem, |
370 | }; | 432 | }; |
371 | 433 | ||
434 | static int exynos_cpu_pm_notifier(struct notifier_block *self, | ||
435 | unsigned long cmd, void *v) | ||
436 | { | ||
437 | int cpu = smp_processor_id(); | ||
438 | |||
439 | switch (cmd) { | ||
440 | case CPU_PM_ENTER: | ||
441 | if (cpu == 0) { | ||
442 | exynos_pm_central_suspend(); | ||
443 | exynos_cpu_save_register(); | ||
444 | } | ||
445 | break; | ||
446 | |||
447 | case CPU_PM_EXIT: | ||
448 | if (cpu == 0) { | ||
449 | if (!soc_is_exynos5250()) | ||
450 | scu_enable(S5P_VA_SCU); | ||
451 | exynos_cpu_restore_register(); | ||
452 | exynos_pm_central_resume(); | ||
453 | } | ||
454 | break; | ||
455 | } | ||
456 | |||
457 | return NOTIFY_OK; | ||
458 | } | ||
459 | |||
460 | static struct notifier_block exynos_cpu_pm_notifier_block = { | ||
461 | .notifier_call = exynos_cpu_pm_notifier, | ||
462 | }; | ||
463 | |||
372 | void __init exynos_pm_init(void) | 464 | void __init exynos_pm_init(void) |
373 | { | 465 | { |
374 | u32 tmp; | 466 | u32 tmp; |
375 | 467 | ||
468 | cpu_pm_register_notifier(&exynos_cpu_pm_notifier_block); | ||
469 | |||
376 | /* Platform-specific GIC callback */ | 470 | /* Platform-specific GIC callback */ |
377 | gic_arch_extn.irq_set_wake = exynos_irq_set_wake; | 471 | gic_arch_extn.irq_set_wake = exynos_irq_set_wake; |
378 | 472 | ||
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h index 4179f6a6d595..1d13b08708f0 100644 --- a/arch/arm/mach-exynos/regs-pmu.h +++ b/arch/arm/mach-exynos/regs-pmu.h | |||
@@ -129,8 +129,6 @@ | |||
129 | #define S5P_CORE_LOCAL_PWR_EN 0x3 | 129 | #define S5P_CORE_LOCAL_PWR_EN 0x3 |
130 | #define S5P_INT_LOCAL_PWR_EN 0x7 | 130 | #define S5P_INT_LOCAL_PWR_EN 0x7 |
131 | 131 | ||
132 | #define S5P_CHECK_SLEEP 0x00000BAD | ||
133 | |||
134 | /* Only for EXYNOS4210 */ | 132 | /* Only for EXYNOS4210 */ |
135 | #define S5P_CMU_CLKSTOP_LCD1_LOWPWR S5P_PMUREG(0x1154) | 133 | #define S5P_CMU_CLKSTOP_LCD1_LOWPWR S5P_PMUREG(0x1154) |
136 | #define S5P_CMU_RESET_LCD1_LOWPWR S5P_PMUREG(0x1174) | 134 | #define S5P_CMU_RESET_LCD1_LOWPWR S5P_PMUREG(0x1174) |
diff --git a/drivers/Kconfig b/drivers/Kconfig index 0a0a90f52d26..0e87a34b6472 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -132,6 +132,8 @@ source "drivers/staging/Kconfig" | |||
132 | 132 | ||
133 | source "drivers/platform/Kconfig" | 133 | source "drivers/platform/Kconfig" |
134 | 134 | ||
135 | source "drivers/soc/Kconfig" | ||
136 | |||
135 | source "drivers/clk/Kconfig" | 137 | source "drivers/clk/Kconfig" |
136 | 138 | ||
137 | source "drivers/hwspinlock/Kconfig" | 139 | source "drivers/hwspinlock/Kconfig" |
diff --git a/drivers/Makefile b/drivers/Makefile index 7183b6af5dac..1a1790e4de6a 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -33,6 +33,9 @@ obj-y += amba/ | |||
33 | # really early. | 33 | # really early. |
34 | obj-$(CONFIG_DMADEVICES) += dma/ | 34 | obj-$(CONFIG_DMADEVICES) += dma/ |
35 | 35 | ||
36 | # SOC specific infrastructure drivers. | ||
37 | obj-y += soc/ | ||
38 | |||
36 | obj-$(CONFIG_VIRTIO) += virtio/ | 39 | obj-$(CONFIG_VIRTIO) += virtio/ |
37 | obj-$(CONFIG_XEN) += xen/ | 40 | obj-$(CONFIG_XEN) += xen/ |
38 | 41 | ||
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 286342778884..a118ec1650fa 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig | |||
@@ -4,6 +4,14 @@ | |||
4 | 4 | ||
5 | menu "Bus devices" | 5 | menu "Bus devices" |
6 | 6 | ||
7 | config BRCMSTB_GISB_ARB | ||
8 | bool "Broadcom STB GISB bus arbiter" | ||
9 | depends on ARM | ||
10 | help | ||
11 | Driver for the Broadcom Set Top Box System-on-a-chip internal bus | ||
12 | arbiter. This driver provides timeout and target abort error handling | ||
13 | and internal bus master decoding. | ||
14 | |||
7 | config IMX_WEIM | 15 | config IMX_WEIM |
8 | bool "Freescale EIM DRIVER" | 16 | bool "Freescale EIM DRIVER" |
9 | depends on ARCH_MXC | 17 | depends on ARCH_MXC |
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index f095aa771de9..6a4ea7e4af1a 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile | |||
@@ -2,6 +2,7 @@ | |||
2 | # Makefile for the bus drivers. | 2 | # Makefile for the bus drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o | ||
5 | obj-$(CONFIG_IMX_WEIM) += imx-weim.o | 6 | obj-$(CONFIG_IMX_WEIM) += imx-weim.o |
6 | obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o | 7 | obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o |
7 | obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o | 8 | obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o |
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c new file mode 100644 index 000000000000..6159b7752a64 --- /dev/null +++ b/drivers/bus/brcmstb_gisb.c | |||
@@ -0,0 +1,289 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Broadcom Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/init.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/sysfs.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/list.h> | ||
24 | #include <linux/of.h> | ||
25 | #include <linux/bitops.h> | ||
26 | |||
27 | #include <asm/bug.h> | ||
28 | #include <asm/signal.h> | ||
29 | |||
30 | #define ARB_TIMER 0x008 | ||
31 | #define ARB_ERR_CAP_CLR 0x7e4 | ||
32 | #define ARB_ERR_CAP_CLEAR (1 << 0) | ||
33 | #define ARB_ERR_CAP_HI_ADDR 0x7e8 | ||
34 | #define ARB_ERR_CAP_ADDR 0x7ec | ||
35 | #define ARB_ERR_CAP_DATA 0x7f0 | ||
36 | #define ARB_ERR_CAP_STATUS 0x7f4 | ||
37 | #define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) | ||
38 | #define ARB_ERR_CAP_STATUS_TEA (1 << 11) | ||
39 | #define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) | ||
40 | #define ARB_ERR_CAP_STATUS_BS_MASK 0x3c | ||
41 | #define ARB_ERR_CAP_STATUS_WRITE (1 << 1) | ||
42 | #define ARB_ERR_CAP_STATUS_VALID (1 << 0) | ||
43 | #define ARB_ERR_CAP_MASTER 0x7f8 | ||
44 | |||
45 | struct brcmstb_gisb_arb_device { | ||
46 | void __iomem *base; | ||
47 | struct mutex lock; | ||
48 | struct list_head next; | ||
49 | u32 valid_mask; | ||
50 | const char *master_names[sizeof(u32) * BITS_PER_BYTE]; | ||
51 | }; | ||
52 | |||
53 | static LIST_HEAD(brcmstb_gisb_arb_device_list); | ||
54 | |||
55 | static ssize_t gisb_arb_get_timeout(struct device *dev, | ||
56 | struct device_attribute *attr, | ||
57 | char *buf) | ||
58 | { | ||
59 | struct platform_device *pdev = to_platform_device(dev); | ||
60 | struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); | ||
61 | u32 timeout; | ||
62 | |||
63 | mutex_lock(&gdev->lock); | ||
64 | timeout = ioread32(gdev->base + ARB_TIMER); | ||
65 | mutex_unlock(&gdev->lock); | ||
66 | |||
67 | return sprintf(buf, "%d", timeout); | ||
68 | } | ||
69 | |||
70 | static ssize_t gisb_arb_set_timeout(struct device *dev, | ||
71 | struct device_attribute *attr, | ||
72 | const char *buf, size_t count) | ||
73 | { | ||
74 | struct platform_device *pdev = to_platform_device(dev); | ||
75 | struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); | ||
76 | int val, ret; | ||
77 | |||
78 | ret = kstrtoint(buf, 10, &val); | ||
79 | if (ret < 0) | ||
80 | return ret; | ||
81 | |||
82 | if (val == 0 || val >= 0xffffffff) | ||
83 | return -EINVAL; | ||
84 | |||
85 | mutex_lock(&gdev->lock); | ||
86 | iowrite32(val, gdev->base + ARB_TIMER); | ||
87 | mutex_unlock(&gdev->lock); | ||
88 | |||
89 | return count; | ||
90 | } | ||
91 | |||
92 | static const char * | ||
93 | brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev, | ||
94 | u32 masters) | ||
95 | { | ||
96 | u32 mask = gdev->valid_mask & masters; | ||
97 | |||
98 | if (hweight_long(mask) != 1) | ||
99 | return NULL; | ||
100 | |||
101 | return gdev->master_names[ffs(mask) - 1]; | ||
102 | } | ||
103 | |||
104 | static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, | ||
105 | const char *reason) | ||
106 | { | ||
107 | u32 cap_status; | ||
108 | unsigned long arb_addr; | ||
109 | u32 master; | ||
110 | const char *m_name; | ||
111 | char m_fmt[11]; | ||
112 | |||
113 | cap_status = ioread32(gdev->base + ARB_ERR_CAP_STATUS); | ||
114 | |||
115 | /* Invalid captured address, bail out */ | ||
116 | if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) | ||
117 | return 1; | ||
118 | |||
119 | /* Read the address and master */ | ||
120 | arb_addr = ioread32(gdev->base + ARB_ERR_CAP_ADDR) & 0xffffffff; | ||
121 | #if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) | ||
122 | arb_addr |= (u64)ioread32(gdev->base + ARB_ERR_CAP_HI_ADDR) << 32; | ||
123 | #endif | ||
124 | master = ioread32(gdev->base + ARB_ERR_CAP_MASTER); | ||
125 | |||
126 | m_name = brcmstb_gisb_master_to_str(gdev, master); | ||
127 | if (!m_name) { | ||
128 | snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master); | ||
129 | m_name = m_fmt; | ||
130 | } | ||
131 | |||
132 | pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n", | ||
133 | __func__, reason, arb_addr, | ||
134 | cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R', | ||
135 | cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "", | ||
136 | m_name); | ||
137 | |||
138 | /* clear the GISB error */ | ||
139 | iowrite32(ARB_ERR_CAP_CLEAR, gdev->base + ARB_ERR_CAP_CLR); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr, | ||
145 | struct pt_regs *regs) | ||
146 | { | ||
147 | int ret = 0; | ||
148 | struct brcmstb_gisb_arb_device *gdev; | ||
149 | |||
150 | /* iterate over each GISB arb registered handlers */ | ||
151 | list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) | ||
152 | ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error"); | ||
153 | /* | ||
154 | * If it was an imprecise abort, then we need to correct the | ||
155 | * return address to be _after_ the instruction. | ||
156 | */ | ||
157 | if (fsr & (1 << 10)) | ||
158 | regs->ARM_pc += 4; | ||
159 | |||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | void __init brcmstb_hook_fault_code(void) | ||
164 | { | ||
165 | hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0, | ||
166 | "imprecise external abort"); | ||
167 | } | ||
168 | |||
169 | static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id) | ||
170 | { | ||
171 | brcmstb_gisb_arb_decode_addr(dev_id, "timeout"); | ||
172 | |||
173 | return IRQ_HANDLED; | ||
174 | } | ||
175 | |||
176 | static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id) | ||
177 | { | ||
178 | brcmstb_gisb_arb_decode_addr(dev_id, "target abort"); | ||
179 | |||
180 | return IRQ_HANDLED; | ||
181 | } | ||
182 | |||
183 | static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO, | ||
184 | gisb_arb_get_timeout, gisb_arb_set_timeout); | ||
185 | |||
186 | static struct attribute *gisb_arb_sysfs_attrs[] = { | ||
187 | &dev_attr_gisb_arb_timeout.attr, | ||
188 | NULL, | ||
189 | }; | ||
190 | |||
191 | static struct attribute_group gisb_arb_sysfs_attr_group = { | ||
192 | .attrs = gisb_arb_sysfs_attrs, | ||
193 | }; | ||
194 | |||
195 | static int brcmstb_gisb_arb_probe(struct platform_device *pdev) | ||
196 | { | ||
197 | struct device_node *dn = pdev->dev.of_node; | ||
198 | struct brcmstb_gisb_arb_device *gdev; | ||
199 | struct resource *r; | ||
200 | int err, timeout_irq, tea_irq; | ||
201 | unsigned int num_masters, j = 0; | ||
202 | int i, first, last; | ||
203 | |||
204 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
205 | timeout_irq = platform_get_irq(pdev, 0); | ||
206 | tea_irq = platform_get_irq(pdev, 1); | ||
207 | |||
208 | gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL); | ||
209 | if (!gdev) | ||
210 | return -ENOMEM; | ||
211 | |||
212 | mutex_init(&gdev->lock); | ||
213 | INIT_LIST_HEAD(&gdev->next); | ||
214 | |||
215 | gdev->base = devm_request_and_ioremap(&pdev->dev, r); | ||
216 | if (!gdev->base) | ||
217 | return -ENOMEM; | ||
218 | |||
219 | err = devm_request_irq(&pdev->dev, timeout_irq, | ||
220 | brcmstb_gisb_timeout_handler, 0, pdev->name, | ||
221 | gdev); | ||
222 | if (err < 0) | ||
223 | return err; | ||
224 | |||
225 | err = devm_request_irq(&pdev->dev, tea_irq, | ||
226 | brcmstb_gisb_tea_handler, 0, pdev->name, | ||
227 | gdev); | ||
228 | if (err < 0) | ||
229 | return err; | ||
230 | |||
231 | /* If we do not have a valid mask, assume all masters are enabled */ | ||
232 | if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask", | ||
233 | &gdev->valid_mask)) | ||
234 | gdev->valid_mask = 0xffffffff; | ||
235 | |||
236 | /* Proceed with reading the litteral names if we agree on the | ||
237 | * number of masters | ||
238 | */ | ||
239 | num_masters = of_property_count_strings(dn, | ||
240 | "brcm,gisb-arb-master-names"); | ||
241 | if (hweight_long(gdev->valid_mask) == num_masters) { | ||
242 | first = ffs(gdev->valid_mask) - 1; | ||
243 | last = fls(gdev->valid_mask) - 1; | ||
244 | |||
245 | for (i = first; i < last; i++) { | ||
246 | if (!(gdev->valid_mask & BIT(i))) | ||
247 | continue; | ||
248 | |||
249 | of_property_read_string_index(dn, | ||
250 | "brcm,gisb-arb-master-names", j, | ||
251 | &gdev->master_names[i]); | ||
252 | j++; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group); | ||
257 | if (err) | ||
258 | return err; | ||
259 | |||
260 | platform_set_drvdata(pdev, gdev); | ||
261 | |||
262 | list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); | ||
263 | |||
264 | dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", | ||
265 | gdev->base, timeout_irq, tea_irq); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static const struct of_device_id brcmstb_gisb_arb_of_match[] = { | ||
271 | { .compatible = "brcm,gisb-arb" }, | ||
272 | { }, | ||
273 | }; | ||
274 | |||
275 | static struct platform_driver brcmstb_gisb_arb_driver = { | ||
276 | .probe = brcmstb_gisb_arb_probe, | ||
277 | .driver = { | ||
278 | .name = "brcm-gisb-arb", | ||
279 | .owner = THIS_MODULE, | ||
280 | .of_match_table = brcmstb_gisb_arb_of_match, | ||
281 | }, | ||
282 | }; | ||
283 | |||
284 | static int __init brcm_gisb_driver_init(void) | ||
285 | { | ||
286 | return platform_driver_register(&brcmstb_gisb_arb_driver); | ||
287 | } | ||
288 | |||
289 | module_init(brcm_gisb_driver_init); | ||
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index feeecae623f6..531ae591783b 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c | |||
@@ -1,43 +1,45 @@ | |||
1 | /* | 1 | /* |
2 | * OMAP4XXX L3 Interconnect error handling driver | 2 | * OMAP L3 Interconnect error handling driver |
3 | * | 3 | * |
4 | * Copyright (C) 2011 Texas Corporation | 4 | * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ |
5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | 5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
6 | * Sricharan <r.sricharan@ti.com> | 6 | * Sricharan <r.sricharan@ti.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * the Free Software Foundation; either version 2 of the License, or | 10 | * published by the Free Software Foundation. |
11 | * (at your option) any later version. | ||
12 | * | 11 | * |
13 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * kind, whether express or implied; without even the implied warranty |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
21 | * USA | ||
22 | */ | 16 | */ |
23 | #include <linux/module.h> | ||
24 | #include <linux/init.h> | 17 | #include <linux/init.h> |
25 | #include <linux/io.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/io.h> | ||
28 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | ||
22 | #include <linux/of_device.h> | ||
23 | #include <linux/of.h> | ||
24 | #include <linux/platform_device.h> | ||
29 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
30 | 26 | ||
31 | #include "omap_l3_noc.h" | 27 | #include "omap_l3_noc.h" |
32 | 28 | ||
33 | /* | 29 | /** |
34 | * Interrupt Handler for L3 error detection. | 30 | * l3_handle_target() - Handle Target specific parse and reporting |
35 | * 1) Identify the L3 clockdomain partition to which the error belongs to. | 31 | * @l3: pointer to l3 struct |
36 | * 2) Identify the slave where the error information is logged | 32 | * @base: base address of clkdm |
37 | * 3) Print the logged information. | 33 | * @flag_mux: flagmux corresponding to the event |
38 | * 4) Add dump stack to provide kernel trace. | 34 | * @err_src: error source index of the slave (target) |
39 | * | 35 | * |
40 | * Two Types of errors : | 36 | * This does the second part of the error interrupt handling: |
37 | * 3) Parse in the slave information | ||
38 | * 4) Print the logged information. | ||
39 | * 5) Add dump stack to provide kernel trace. | ||
40 | * 6) Clear the source if known. | ||
41 | * | ||
42 | * This handles two types of errors: | ||
41 | * 1) Custom errors in L3 : | 43 | * 1) Custom errors in L3 : |
42 | * Target like DMM/FW/EMIF generates SRESP=ERR error | 44 | * Target like DMM/FW/EMIF generates SRESP=ERR error |
43 | * 2) Standard L3 error: | 45 | * 2) Standard L3 error: |
@@ -53,214 +55,264 @@ | |||
53 | * can be trapped as well. But the trapping is implemented as part | 55 | * can be trapped as well. But the trapping is implemented as part |
54 | * secure software and hence need not be implemented here. | 56 | * secure software and hence need not be implemented here. |
55 | */ | 57 | */ |
56 | static irqreturn_t l3_interrupt_handler(int irq, void *_l3) | 58 | static int l3_handle_target(struct omap_l3 *l3, void __iomem *base, |
59 | struct l3_flagmux_data *flag_mux, int err_src) | ||
57 | { | 60 | { |
61 | int k; | ||
62 | u32 std_err_main, clear, masterid; | ||
63 | u8 op_code, m_req_info; | ||
64 | void __iomem *l3_targ_base; | ||
65 | void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr; | ||
66 | void __iomem *l3_targ_hdr, *l3_targ_info; | ||
67 | struct l3_target_data *l3_targ_inst; | ||
68 | struct l3_masters_data *master; | ||
69 | char *target_name, *master_name = "UN IDENTIFIED"; | ||
70 | char *err_description; | ||
71 | char err_string[30] = { 0 }; | ||
72 | char info_string[60] = { 0 }; | ||
73 | |||
74 | /* We DONOT expect err_src to go out of bounds */ | ||
75 | BUG_ON(err_src > MAX_CLKDM_TARGETS); | ||
76 | |||
77 | if (err_src < flag_mux->num_targ_data) { | ||
78 | l3_targ_inst = &flag_mux->l3_targ[err_src]; | ||
79 | target_name = l3_targ_inst->name; | ||
80 | l3_targ_base = base + l3_targ_inst->offset; | ||
81 | } else { | ||
82 | target_name = L3_TARGET_NOT_SUPPORTED; | ||
83 | } | ||
58 | 84 | ||
59 | struct omap4_l3 *l3 = _l3; | 85 | if (target_name == L3_TARGET_NOT_SUPPORTED) |
60 | int inttype, i, k; | 86 | return -ENODEV; |
87 | |||
88 | /* Read the stderrlog_main_source from clk domain */ | ||
89 | l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN; | ||
90 | l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB; | ||
91 | |||
92 | std_err_main = readl_relaxed(l3_targ_stderr); | ||
93 | |||
94 | switch (std_err_main & CUSTOM_ERROR) { | ||
95 | case STANDARD_ERROR: | ||
96 | err_description = "Standard"; | ||
97 | snprintf(err_string, sizeof(err_string), | ||
98 | ": At Address: 0x%08X ", | ||
99 | readl_relaxed(l3_targ_slvofslsb)); | ||
100 | |||
101 | l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR; | ||
102 | l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR; | ||
103 | l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO; | ||
104 | break; | ||
105 | |||
106 | case CUSTOM_ERROR: | ||
107 | err_description = "Custom"; | ||
108 | |||
109 | l3_targ_mstaddr = l3_targ_base + | ||
110 | L3_TARG_STDERRLOG_CINFO_MSTADDR; | ||
111 | l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE; | ||
112 | l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO; | ||
113 | break; | ||
114 | |||
115 | default: | ||
116 | /* Nothing to be handled here as of now */ | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /* STDERRLOG_MSTADDR Stores the NTTP master address. */ | ||
121 | masterid = (readl_relaxed(l3_targ_mstaddr) & | ||
122 | l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask); | ||
123 | |||
124 | for (k = 0, master = l3->l3_masters; k < l3->num_masters; | ||
125 | k++, master++) { | ||
126 | if (masterid == master->id) { | ||
127 | master_name = master->name; | ||
128 | break; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | op_code = readl_relaxed(l3_targ_hdr) & 0x7; | ||
133 | |||
134 | m_req_info = readl_relaxed(l3_targ_info) & 0xF; | ||
135 | snprintf(info_string, sizeof(info_string), | ||
136 | ": %s in %s mode during %s access", | ||
137 | (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access", | ||
138 | (m_req_info & BIT(1)) ? "Supervisor" : "User", | ||
139 | (m_req_info & BIT(3)) ? "Debug" : "Functional"); | ||
140 | |||
141 | WARN(true, | ||
142 | "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n", | ||
143 | dev_name(l3->dev), | ||
144 | err_description, | ||
145 | master_name, target_name, | ||
146 | l3_transaction_type[op_code], | ||
147 | err_string, info_string); | ||
148 | |||
149 | /* clear the std error log*/ | ||
150 | clear = std_err_main | CLEAR_STDERR_LOG; | ||
151 | writel_relaxed(clear, l3_targ_stderr); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * l3_interrupt_handler() - interrupt handler for l3 events | ||
158 | * @irq: irq number | ||
159 | * @_l3: pointer to l3 structure | ||
160 | * | ||
161 | * Interrupt Handler for L3 error detection. | ||
162 | * 1) Identify the L3 clockdomain partition to which the error belongs to. | ||
163 | * 2) Identify the slave where the error information is logged | ||
164 | * ... handle the slave event.. | ||
165 | * 7) if the slave is unknown, mask out the slave. | ||
166 | */ | ||
167 | static irqreturn_t l3_interrupt_handler(int irq, void *_l3) | ||
168 | { | ||
169 | struct omap_l3 *l3 = _l3; | ||
170 | int inttype, i, ret; | ||
61 | int err_src = 0; | 171 | int err_src = 0; |
62 | u32 std_err_main, err_reg, clear, masterid; | 172 | u32 err_reg, mask_val; |
63 | void __iomem *base, *l3_targ_base; | 173 | void __iomem *base, *mask_reg; |
64 | char *target_name, *master_name = "UN IDENTIFIED"; | 174 | struct l3_flagmux_data *flag_mux; |
65 | 175 | ||
66 | /* Get the Type of interrupt */ | 176 | /* Get the Type of interrupt */ |
67 | inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; | 177 | inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; |
68 | 178 | ||
69 | for (i = 0; i < L3_MODULES; i++) { | 179 | for (i = 0; i < l3->num_modules; i++) { |
70 | /* | 180 | /* |
71 | * Read the regerr register of the clock domain | 181 | * Read the regerr register of the clock domain |
72 | * to determine the source | 182 | * to determine the source |
73 | */ | 183 | */ |
74 | base = l3->l3_base[i]; | 184 | base = l3->l3_base[i]; |
75 | err_reg = __raw_readl(base + l3_flagmux[i] + | 185 | flag_mux = l3->l3_flagmux[i]; |
76 | + L3_FLAGMUX_REGERR0 + (inttype << 3)); | 186 | err_reg = readl_relaxed(base + flag_mux->offset + |
187 | L3_FLAGMUX_REGERR0 + (inttype << 3)); | ||
188 | |||
189 | err_reg &= ~(inttype ? flag_mux->mask_app_bits : | ||
190 | flag_mux->mask_dbg_bits); | ||
77 | 191 | ||
78 | /* Get the corresponding error and analyse */ | 192 | /* Get the corresponding error and analyse */ |
79 | if (err_reg) { | 193 | if (err_reg) { |
80 | /* Identify the source from control status register */ | 194 | /* Identify the source from control status register */ |
81 | err_src = __ffs(err_reg); | 195 | err_src = __ffs(err_reg); |
82 | 196 | ||
83 | /* Read the stderrlog_main_source from clk domain */ | 197 | ret = l3_handle_target(l3, base, flag_mux, err_src); |
84 | l3_targ_base = base + *(l3_targ[i] + err_src); | 198 | |
85 | std_err_main = __raw_readl(l3_targ_base + | 199 | /* |
86 | L3_TARG_STDERRLOG_MAIN); | 200 | * Certain plaforms may have "undocumented" status |
87 | masterid = __raw_readl(l3_targ_base + | 201 | * pending on boot. So dont generate a severe warning |
88 | L3_TARG_STDERRLOG_MSTADDR); | 202 | * here. Just mask it off to prevent the error from |
89 | 203 | * reoccuring and locking up the system. | |
90 | switch (std_err_main & CUSTOM_ERROR) { | 204 | */ |
91 | case STANDARD_ERROR: | 205 | if (ret) { |
92 | target_name = | 206 | dev_err(l3->dev, |
93 | l3_targ_inst_name[i][err_src]; | 207 | "L3 %s error: target %d mod:%d %s\n", |
94 | WARN(true, "L3 standard error: TARGET:%s at address 0x%x\n", | 208 | inttype ? "debug" : "application", |
95 | target_name, | 209 | err_src, i, "(unclearable)"); |
96 | __raw_readl(l3_targ_base + | 210 | |
97 | L3_TARG_STDERRLOG_SLVOFSLSB)); | 211 | mask_reg = base + flag_mux->offset + |
98 | /* clear the std error log*/ | 212 | L3_FLAGMUX_MASK0 + (inttype << 3); |
99 | clear = std_err_main | CLEAR_STDERR_LOG; | 213 | mask_val = readl_relaxed(mask_reg); |
100 | writel(clear, l3_targ_base + | 214 | mask_val &= ~(1 << err_src); |
101 | L3_TARG_STDERRLOG_MAIN); | 215 | writel_relaxed(mask_val, mask_reg); |
102 | break; | 216 | |
103 | 217 | /* Mark these bits as to be ignored */ | |
104 | case CUSTOM_ERROR: | 218 | if (inttype) |
105 | target_name = | 219 | flag_mux->mask_app_bits |= 1 << err_src; |
106 | l3_targ_inst_name[i][err_src]; | 220 | else |
107 | for (k = 0; k < NUM_OF_L3_MASTERS; k++) { | 221 | flag_mux->mask_dbg_bits |= 1 << err_src; |
108 | if (masterid == l3_masters[k].id) | ||
109 | master_name = | ||
110 | l3_masters[k].name; | ||
111 | } | ||
112 | WARN(true, "L3 custom error: MASTER:%s TARGET:%s\n", | ||
113 | master_name, target_name); | ||
114 | /* clear the std error log*/ | ||
115 | clear = std_err_main | CLEAR_STDERR_LOG; | ||
116 | writel(clear, l3_targ_base + | ||
117 | L3_TARG_STDERRLOG_MAIN); | ||
118 | break; | ||
119 | |||
120 | default: | ||
121 | /* Nothing to be handled here as of now */ | ||
122 | break; | ||
123 | } | 222 | } |
124 | /* Error found so break the for loop */ | 223 | |
125 | break; | 224 | /* Error found so break the for loop */ |
225 | break; | ||
126 | } | 226 | } |
127 | } | 227 | } |
128 | return IRQ_HANDLED; | 228 | return IRQ_HANDLED; |
129 | } | 229 | } |
130 | 230 | ||
131 | static int omap4_l3_probe(struct platform_device *pdev) | 231 | static const struct of_device_id l3_noc_match[] = { |
232 | {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data}, | ||
233 | {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data}, | ||
234 | {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data}, | ||
235 | {}, | ||
236 | }; | ||
237 | MODULE_DEVICE_TABLE(of, l3_noc_match); | ||
238 | |||
239 | static int omap_l3_probe(struct platform_device *pdev) | ||
132 | { | 240 | { |
133 | static struct omap4_l3 *l3; | 241 | const struct of_device_id *of_id; |
134 | struct resource *res; | 242 | static struct omap_l3 *l3; |
135 | int ret; | 243 | int ret, i, res_idx; |
244 | |||
245 | of_id = of_match_device(l3_noc_match, &pdev->dev); | ||
246 | if (!of_id) { | ||
247 | dev_err(&pdev->dev, "OF data missing\n"); | ||
248 | return -EINVAL; | ||
249 | } | ||
136 | 250 | ||
137 | l3 = kzalloc(sizeof(*l3), GFP_KERNEL); | 251 | l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL); |
138 | if (!l3) | 252 | if (!l3) |
139 | return -ENOMEM; | 253 | return -ENOMEM; |
140 | 254 | ||
255 | memcpy(l3, of_id->data, sizeof(*l3)); | ||
256 | l3->dev = &pdev->dev; | ||
141 | platform_set_drvdata(pdev, l3); | 257 | platform_set_drvdata(pdev, l3); |
142 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
143 | if (!res) { | ||
144 | dev_err(&pdev->dev, "couldn't find resource 0\n"); | ||
145 | ret = -ENODEV; | ||
146 | goto err0; | ||
147 | } | ||
148 | |||
149 | l3->l3_base[0] = ioremap(res->start, resource_size(res)); | ||
150 | if (!l3->l3_base[0]) { | ||
151 | dev_err(&pdev->dev, "ioremap failed\n"); | ||
152 | ret = -ENOMEM; | ||
153 | goto err0; | ||
154 | } | ||
155 | |||
156 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
157 | if (!res) { | ||
158 | dev_err(&pdev->dev, "couldn't find resource 1\n"); | ||
159 | ret = -ENODEV; | ||
160 | goto err1; | ||
161 | } | ||
162 | |||
163 | l3->l3_base[1] = ioremap(res->start, resource_size(res)); | ||
164 | if (!l3->l3_base[1]) { | ||
165 | dev_err(&pdev->dev, "ioremap failed\n"); | ||
166 | ret = -ENOMEM; | ||
167 | goto err1; | ||
168 | } | ||
169 | 258 | ||
170 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | 259 | /* Get mem resources */ |
171 | if (!res) { | 260 | for (i = 0, res_idx = 0; i < l3->num_modules; i++) { |
172 | dev_err(&pdev->dev, "couldn't find resource 2\n"); | 261 | struct resource *res; |
173 | ret = -ENODEV; | ||
174 | goto err2; | ||
175 | } | ||
176 | 262 | ||
177 | l3->l3_base[2] = ioremap(res->start, resource_size(res)); | 263 | if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) { |
178 | if (!l3->l3_base[2]) { | 264 | /* First entry cannot be submodule */ |
179 | dev_err(&pdev->dev, "ioremap failed\n"); | 265 | BUG_ON(i == 0); |
180 | ret = -ENOMEM; | 266 | l3->l3_base[i] = l3->l3_base[i - 1]; |
181 | goto err2; | 267 | continue; |
268 | } | ||
269 | res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx); | ||
270 | l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res); | ||
271 | if (IS_ERR(l3->l3_base[i])) { | ||
272 | dev_err(l3->dev, "ioremap %d failed\n", i); | ||
273 | return PTR_ERR(l3->l3_base[i]); | ||
274 | } | ||
275 | res_idx++; | ||
182 | } | 276 | } |
183 | 277 | ||
184 | /* | 278 | /* |
185 | * Setup interrupt Handlers | 279 | * Setup interrupt Handlers |
186 | */ | 280 | */ |
187 | l3->debug_irq = platform_get_irq(pdev, 0); | 281 | l3->debug_irq = platform_get_irq(pdev, 0); |
188 | ret = request_irq(l3->debug_irq, | 282 | ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler, |
189 | l3_interrupt_handler, | 283 | IRQF_DISABLED, "l3-dbg-irq", l3); |
190 | IRQF_DISABLED, "l3-dbg-irq", l3); | ||
191 | if (ret) { | 284 | if (ret) { |
192 | pr_crit("L3: request_irq failed to register for 0x%x\n", | 285 | dev_err(l3->dev, "request_irq failed for %d\n", |
193 | l3->debug_irq); | 286 | l3->debug_irq); |
194 | goto err3; | 287 | return ret; |
195 | } | 288 | } |
196 | 289 | ||
197 | l3->app_irq = platform_get_irq(pdev, 1); | 290 | l3->app_irq = platform_get_irq(pdev, 1); |
198 | ret = request_irq(l3->app_irq, | 291 | ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler, |
199 | l3_interrupt_handler, | 292 | IRQF_DISABLED, "l3-app-irq", l3); |
200 | IRQF_DISABLED, "l3-app-irq", l3); | 293 | if (ret) |
201 | if (ret) { | 294 | dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq); |
202 | pr_crit("L3: request_irq failed to register for 0x%x\n", | ||
203 | l3->app_irq); | ||
204 | goto err4; | ||
205 | } | ||
206 | 295 | ||
207 | return 0; | ||
208 | |||
209 | err4: | ||
210 | free_irq(l3->debug_irq, l3); | ||
211 | err3: | ||
212 | iounmap(l3->l3_base[2]); | ||
213 | err2: | ||
214 | iounmap(l3->l3_base[1]); | ||
215 | err1: | ||
216 | iounmap(l3->l3_base[0]); | ||
217 | err0: | ||
218 | kfree(l3); | ||
219 | return ret; | 296 | return ret; |
220 | } | 297 | } |
221 | 298 | ||
222 | static int omap4_l3_remove(struct platform_device *pdev) | 299 | static struct platform_driver omap_l3_driver = { |
223 | { | 300 | .probe = omap_l3_probe, |
224 | struct omap4_l3 *l3 = platform_get_drvdata(pdev); | ||
225 | |||
226 | free_irq(l3->app_irq, l3); | ||
227 | free_irq(l3->debug_irq, l3); | ||
228 | iounmap(l3->l3_base[0]); | ||
229 | iounmap(l3->l3_base[1]); | ||
230 | iounmap(l3->l3_base[2]); | ||
231 | kfree(l3); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | #if defined(CONFIG_OF) | ||
237 | static const struct of_device_id l3_noc_match[] = { | ||
238 | {.compatible = "ti,omap4-l3-noc", }, | ||
239 | {}, | ||
240 | }; | ||
241 | MODULE_DEVICE_TABLE(of, l3_noc_match); | ||
242 | #else | ||
243 | #define l3_noc_match NULL | ||
244 | #endif | ||
245 | |||
246 | static struct platform_driver omap4_l3_driver = { | ||
247 | .probe = omap4_l3_probe, | ||
248 | .remove = omap4_l3_remove, | ||
249 | .driver = { | 301 | .driver = { |
250 | .name = "omap_l3_noc", | 302 | .name = "omap_l3_noc", |
251 | .owner = THIS_MODULE, | 303 | .owner = THIS_MODULE, |
252 | .of_match_table = l3_noc_match, | 304 | .of_match_table = of_match_ptr(l3_noc_match), |
253 | }, | 305 | }, |
254 | }; | 306 | }; |
255 | 307 | ||
256 | static int __init omap4_l3_init(void) | 308 | static int __init omap_l3_init(void) |
257 | { | 309 | { |
258 | return platform_driver_register(&omap4_l3_driver); | 310 | return platform_driver_register(&omap_l3_driver); |
259 | } | 311 | } |
260 | postcore_initcall_sync(omap4_l3_init); | 312 | postcore_initcall_sync(omap_l3_init); |
261 | 313 | ||
262 | static void __exit omap4_l3_exit(void) | 314 | static void __exit omap_l3_exit(void) |
263 | { | 315 | { |
264 | platform_driver_unregister(&omap4_l3_driver); | 316 | platform_driver_unregister(&omap_l3_driver); |
265 | } | 317 | } |
266 | module_exit(omap4_l3_exit); | 318 | module_exit(omap_l3_exit); |
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h index a6ce34dc4814..551e01061434 100644 --- a/drivers/bus/omap_l3_noc.h +++ b/drivers/bus/omap_l3_noc.h | |||
@@ -1,29 +1,25 @@ | |||
1 | /* | 1 | /* |
2 | * OMAP4XXX L3 Interconnect error handling driver header | 2 | * OMAP L3 Interconnect error handling driver header |
3 | * | 3 | * |
4 | * Copyright (C) 2011 Texas Corporation | 4 | * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ |
5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | 5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
6 | * sricharan <r.sricharan@ti.com> | 6 | * sricharan <r.sricharan@ti.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License version 2 as |
10 | * the Free Software Foundation; either version 2 of the License, or | 10 | * published by the Free Software Foundation. |
11 | * (at your option) any later version. | ||
12 | * | 11 | * |
13 | * This program is distributed in the hope that it will be useful, | 12 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * kind, whether express or implied; without even the implied warranty |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
21 | * USA | ||
22 | */ | 16 | */ |
23 | #ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H | 17 | #ifndef __OMAP_L3_NOC_H |
24 | #define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H | 18 | #define __OMAP_L3_NOC_H |
19 | |||
20 | #define MAX_L3_MODULES 3 | ||
21 | #define MAX_CLKDM_TARGETS 31 | ||
25 | 22 | ||
26 | #define L3_MODULES 3 | ||
27 | #define CLEAR_STDERR_LOG (1 << 31) | 23 | #define CLEAR_STDERR_LOG (1 << 31) |
28 | #define CUSTOM_ERROR 0x2 | 24 | #define CUSTOM_ERROR 0x2 |
29 | #define STANDARD_ERROR 0x0 | 25 | #define STANDARD_ERROR 0x0 |
@@ -33,63 +29,165 @@ | |||
33 | 29 | ||
34 | /* L3 TARG register offsets */ | 30 | /* L3 TARG register offsets */ |
35 | #define L3_TARG_STDERRLOG_MAIN 0x48 | 31 | #define L3_TARG_STDERRLOG_MAIN 0x48 |
32 | #define L3_TARG_STDERRLOG_HDR 0x4c | ||
33 | #define L3_TARG_STDERRLOG_MSTADDR 0x50 | ||
34 | #define L3_TARG_STDERRLOG_INFO 0x58 | ||
36 | #define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c | 35 | #define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c |
37 | #define L3_TARG_STDERRLOG_MSTADDR 0x68 | 36 | #define L3_TARG_STDERRLOG_CINFO_INFO 0x64 |
37 | #define L3_TARG_STDERRLOG_CINFO_MSTADDR 0x68 | ||
38 | #define L3_TARG_STDERRLOG_CINFO_OPCODE 0x6c | ||
38 | #define L3_FLAGMUX_REGERR0 0xc | 39 | #define L3_FLAGMUX_REGERR0 0xc |
40 | #define L3_FLAGMUX_MASK0 0x8 | ||
41 | |||
42 | #define L3_TARGET_NOT_SUPPORTED NULL | ||
43 | |||
44 | #define L3_BASE_IS_SUBMODULE ((void __iomem *)(1 << 0)) | ||
45 | |||
46 | static const char * const l3_transaction_type[] = { | ||
47 | /* 0 0 0 */ "Idle", | ||
48 | /* 0 0 1 */ "Write", | ||
49 | /* 0 1 0 */ "Read", | ||
50 | /* 0 1 1 */ "ReadEx", | ||
51 | /* 1 0 0 */ "Read Link", | ||
52 | /* 1 0 1 */ "Write Non-Posted", | ||
53 | /* 1 1 0 */ "Write Conditional", | ||
54 | /* 1 1 1 */ "Write Broadcast", | ||
55 | }; | ||
39 | 56 | ||
40 | #define NUM_OF_L3_MASTERS (sizeof(l3_masters)/sizeof(l3_masters[0])) | 57 | /** |
41 | 58 | * struct l3_masters_data - L3 Master information | |
42 | static u32 l3_flagmux[L3_MODULES] = { | 59 | * @id: ID of the L3 Master |
43 | 0x500, | 60 | * @name: master name |
44 | 0x1000, | 61 | */ |
45 | 0X0200 | 62 | struct l3_masters_data { |
46 | }; | ||
47 | |||
48 | /* L3 Target standard Error register offsets */ | ||
49 | static u32 l3_targ_inst_clk1[] = { | ||
50 | 0x100, /* DMM1 */ | ||
51 | 0x200, /* DMM2 */ | ||
52 | 0x300, /* ABE */ | ||
53 | 0x400, /* L4CFG */ | ||
54 | 0x600, /* CLK2 PWR DISC */ | ||
55 | 0x0, /* Host CLK1 */ | ||
56 | 0x900 /* L4 Wakeup */ | ||
57 | }; | ||
58 | |||
59 | static u32 l3_targ_inst_clk2[] = { | ||
60 | 0x500, /* CORTEX M3 */ | ||
61 | 0x300, /* DSS */ | ||
62 | 0x100, /* GPMC */ | ||
63 | 0x400, /* ISS */ | ||
64 | 0x700, /* IVAHD */ | ||
65 | 0xD00, /* missing in TRM corresponds to AES1*/ | ||
66 | 0x900, /* L4 PER0*/ | ||
67 | 0x200, /* OCMRAM */ | ||
68 | 0x100, /* missing in TRM corresponds to GPMC sERROR*/ | ||
69 | 0x600, /* SGX */ | ||
70 | 0x800, /* SL2 */ | ||
71 | 0x1600, /* C2C */ | ||
72 | 0x1100, /* missing in TRM corresponds PWR DISC CLK1*/ | ||
73 | 0xF00, /* missing in TRM corrsponds to SHA1*/ | ||
74 | 0xE00, /* missing in TRM corresponds to AES2*/ | ||
75 | 0xC00, /* L4 PER3 */ | ||
76 | 0xA00, /* L4 PER1*/ | ||
77 | 0xB00, /* L4 PER2*/ | ||
78 | 0x0, /* HOST CLK2 */ | ||
79 | 0x1800, /* CAL */ | ||
80 | 0x1700 /* LLI */ | ||
81 | }; | ||
82 | |||
83 | static u32 l3_targ_inst_clk3[] = { | ||
84 | 0x0100 /* EMUSS */, | ||
85 | 0x0300, /* DEBUGSS_CT_TBR */ | ||
86 | 0x0 /* HOST CLK3 */ | ||
87 | }; | ||
88 | |||
89 | static struct l3_masters_data { | ||
90 | u32 id; | 63 | u32 id; |
91 | char name[10]; | 64 | char *name; |
92 | } l3_masters[] = { | 65 | }; |
66 | |||
67 | /** | ||
68 | * struct l3_target_data - L3 Target information | ||
69 | * @offset: Offset from base for L3 Target | ||
70 | * @name: Target name | ||
71 | * | ||
72 | * Target information is organized indexed by bit field definitions. | ||
73 | */ | ||
74 | struct l3_target_data { | ||
75 | u32 offset; | ||
76 | char *name; | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * struct l3_flagmux_data - Flag Mux information | ||
81 | * @offset: offset from base for flagmux register | ||
82 | * @l3_targ: array indexed by flagmux index (bit offset) pointing to the | ||
83 | * target data. unsupported ones are marked with | ||
84 | * L3_TARGET_NOT_SUPPORTED | ||
85 | * @num_targ_data: number of entries in target data | ||
86 | * @mask_app_bits: ignore these from raw application irq status | ||
87 | * @mask_dbg_bits: ignore these from raw debug irq status | ||
88 | */ | ||
89 | struct l3_flagmux_data { | ||
90 | u32 offset; | ||
91 | struct l3_target_data *l3_targ; | ||
92 | u8 num_targ_data; | ||
93 | u32 mask_app_bits; | ||
94 | u32 mask_dbg_bits; | ||
95 | }; | ||
96 | |||
97 | |||
98 | /** | ||
99 | * struct omap_l3 - Description of data relevant for L3 bus. | ||
100 | * @dev: device representing the bus (populated runtime) | ||
101 | * @l3_base: base addresses of modules (populated runtime if 0) | ||
102 | * if set to L3_BASE_IS_SUBMODULE, then uses previous | ||
103 | * module index as the base address | ||
104 | * @l3_flag_mux: array containing flag mux data per module | ||
105 | * offset from corresponding module base indexed per | ||
106 | * module. | ||
107 | * @num_modules: number of clock domains / modules. | ||
108 | * @l3_masters: array pointing to master data containing name and register | ||
109 | * offset for the master. | ||
110 | * @num_master: number of masters | ||
111 | * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet | ||
112 | * @debug_irq: irq number of the debug interrupt (populated runtime) | ||
113 | * @app_irq: irq number of the application interrupt (populated runtime) | ||
114 | */ | ||
115 | struct omap_l3 { | ||
116 | struct device *dev; | ||
117 | |||
118 | void __iomem *l3_base[MAX_L3_MODULES]; | ||
119 | struct l3_flagmux_data **l3_flagmux; | ||
120 | int num_modules; | ||
121 | |||
122 | struct l3_masters_data *l3_masters; | ||
123 | int num_masters; | ||
124 | u32 mst_addr_mask; | ||
125 | |||
126 | int debug_irq; | ||
127 | int app_irq; | ||
128 | }; | ||
129 | |||
130 | static struct l3_target_data omap_l3_target_data_clk1[] = { | ||
131 | {0x100, "DMM1",}, | ||
132 | {0x200, "DMM2",}, | ||
133 | {0x300, "ABE",}, | ||
134 | {0x400, "L4CFG",}, | ||
135 | {0x600, "CLK2PWRDISC",}, | ||
136 | {0x0, "HOSTCLK1",}, | ||
137 | {0x900, "L4WAKEUP",}, | ||
138 | }; | ||
139 | |||
140 | static struct l3_flagmux_data omap_l3_flagmux_clk1 = { | ||
141 | .offset = 0x500, | ||
142 | .l3_targ = omap_l3_target_data_clk1, | ||
143 | .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1), | ||
144 | }; | ||
145 | |||
146 | |||
147 | static struct l3_target_data omap_l3_target_data_clk2[] = { | ||
148 | {0x500, "CORTEXM3",}, | ||
149 | {0x300, "DSS",}, | ||
150 | {0x100, "GPMC",}, | ||
151 | {0x400, "ISS",}, | ||
152 | {0x700, "IVAHD",}, | ||
153 | {0xD00, "AES1",}, | ||
154 | {0x900, "L4PER0",}, | ||
155 | {0x200, "OCMRAM",}, | ||
156 | {0x100, "GPMCsERROR",}, | ||
157 | {0x600, "SGX",}, | ||
158 | {0x800, "SL2",}, | ||
159 | {0x1600, "C2C",}, | ||
160 | {0x1100, "PWRDISCCLK1",}, | ||
161 | {0xF00, "SHA1",}, | ||
162 | {0xE00, "AES2",}, | ||
163 | {0xC00, "L4PER3",}, | ||
164 | {0xA00, "L4PER1",}, | ||
165 | {0xB00, "L4PER2",}, | ||
166 | {0x0, "HOSTCLK2",}, | ||
167 | {0x1800, "CAL",}, | ||
168 | {0x1700, "LLI",}, | ||
169 | }; | ||
170 | |||
171 | static struct l3_flagmux_data omap_l3_flagmux_clk2 = { | ||
172 | .offset = 0x1000, | ||
173 | .l3_targ = omap_l3_target_data_clk2, | ||
174 | .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2), | ||
175 | }; | ||
176 | |||
177 | |||
178 | static struct l3_target_data omap_l3_target_data_clk3[] = { | ||
179 | {0x0100, "EMUSS",}, | ||
180 | {0x0300, "DEBUG SOURCE",}, | ||
181 | {0x0, "HOST CLK3",}, | ||
182 | }; | ||
183 | |||
184 | static struct l3_flagmux_data omap_l3_flagmux_clk3 = { | ||
185 | .offset = 0x0200, | ||
186 | .l3_targ = omap_l3_target_data_clk3, | ||
187 | .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3), | ||
188 | }; | ||
189 | |||
190 | static struct l3_masters_data omap_l3_masters[] = { | ||
93 | { 0x0 , "MPU"}, | 191 | { 0x0 , "MPU"}, |
94 | { 0x10, "CS_ADP"}, | 192 | { 0x10, "CS_ADP"}, |
95 | { 0x14, "xxx"}, | 193 | { 0x14, "xxx"}, |
@@ -117,60 +215,261 @@ static struct l3_masters_data { | |||
117 | { 0xC8, "USBHOSTFS"} | 215 | { 0xC8, "USBHOSTFS"} |
118 | }; | 216 | }; |
119 | 217 | ||
120 | static char *l3_targ_inst_name[L3_MODULES][21] = { | 218 | static struct l3_flagmux_data *omap_l3_flagmux[] = { |
121 | { | 219 | &omap_l3_flagmux_clk1, |
122 | "DMM1", | 220 | &omap_l3_flagmux_clk2, |
123 | "DMM2", | 221 | &omap_l3_flagmux_clk3, |
124 | "ABE", | 222 | }; |
125 | "L4CFG", | 223 | |
126 | "CLK2 PWR DISC", | 224 | static const struct omap_l3 omap_l3_data = { |
127 | "HOST CLK1", | 225 | .l3_flagmux = omap_l3_flagmux, |
128 | "L4 WAKEUP" | 226 | .num_modules = ARRAY_SIZE(omap_l3_flagmux), |
129 | }, | 227 | .l3_masters = omap_l3_masters, |
130 | { | 228 | .num_masters = ARRAY_SIZE(omap_l3_masters), |
131 | "CORTEX M3" , | 229 | /* The 6 MSBs of register field used to distinguish initiator */ |
132 | "DSS ", | 230 | .mst_addr_mask = 0xFC, |
133 | "GPMC ", | 231 | }; |
134 | "ISS ", | ||
135 | "IVAHD ", | ||
136 | "AES1", | ||
137 | "L4 PER0", | ||
138 | "OCMRAM ", | ||
139 | "GPMC sERROR", | ||
140 | "SGX ", | ||
141 | "SL2 ", | ||
142 | "C2C ", | ||
143 | "PWR DISC CLK1", | ||
144 | "SHA1", | ||
145 | "AES2", | ||
146 | "L4 PER3", | ||
147 | "L4 PER1", | ||
148 | "L4 PER2", | ||
149 | "HOST CLK2", | ||
150 | "CAL", | ||
151 | "LLI" | ||
152 | }, | ||
153 | { | ||
154 | "EMUSS", | ||
155 | "DEBUG SOURCE", | ||
156 | "HOST CLK3" | ||
157 | }, | ||
158 | }; | ||
159 | |||
160 | static u32 *l3_targ[L3_MODULES] = { | ||
161 | l3_targ_inst_clk1, | ||
162 | l3_targ_inst_clk2, | ||
163 | l3_targ_inst_clk3, | ||
164 | }; | ||
165 | |||
166 | struct omap4_l3 { | ||
167 | struct device *dev; | ||
168 | struct clk *ick; | ||
169 | 232 | ||
170 | /* memory base */ | 233 | /* DRA7 data */ |
171 | void __iomem *l3_base[L3_MODULES]; | 234 | static struct l3_target_data dra_l3_target_data_clk1[] = { |
235 | {0x2a00, "AES1",}, | ||
236 | {0x0200, "DMM_P1",}, | ||
237 | {0x0600, "DSP2_SDMA",}, | ||
238 | {0x0b00, "EVE2",}, | ||
239 | {0x1300, "DMM_P2",}, | ||
240 | {0x2c00, "AES2",}, | ||
241 | {0x0300, "DSP1_SDMA",}, | ||
242 | {0x0a00, "EVE1",}, | ||
243 | {0x0c00, "EVE3",}, | ||
244 | {0x0d00, "EVE4",}, | ||
245 | {0x2900, "DSS",}, | ||
246 | {0x0100, "GPMC",}, | ||
247 | {0x3700, "PCIE1",}, | ||
248 | {0x1600, "IVA_CONFIG",}, | ||
249 | {0x1800, "IVA_SL2IF",}, | ||
250 | {0x0500, "L4_CFG",}, | ||
251 | {0x1d00, "L4_WKUP",}, | ||
252 | {0x3800, "PCIE2",}, | ||
253 | {0x3300, "SHA2_1",}, | ||
254 | {0x1200, "GPU",}, | ||
255 | {0x1000, "IPU1",}, | ||
256 | {0x1100, "IPU2",}, | ||
257 | {0x2000, "TPCC_EDMA",}, | ||
258 | {0x2e00, "TPTC1_EDMA",}, | ||
259 | {0x2b00, "TPTC2_EDMA",}, | ||
260 | {0x0700, "VCP1",}, | ||
261 | {0x2500, "L4_PER2_P3",}, | ||
262 | {0x0e00, "L4_PER3_P3",}, | ||
263 | {0x2200, "MMU1",}, | ||
264 | {0x1400, "PRUSS1",}, | ||
265 | {0x1500, "PRUSS2"}, | ||
266 | {0x0800, "VCP1",}, | ||
267 | }; | ||
172 | 268 | ||
173 | int debug_irq; | 269 | static struct l3_flagmux_data dra_l3_flagmux_clk1 = { |
174 | int app_irq; | 270 | .offset = 0x803500, |
271 | .l3_targ = dra_l3_target_data_clk1, | ||
272 | .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1), | ||
273 | }; | ||
274 | |||
275 | static struct l3_target_data dra_l3_target_data_clk2[] = { | ||
276 | {0x0, "HOST CLK1",}, | ||
277 | {0x0, "HOST CLK2",}, | ||
278 | {0xdead, L3_TARGET_NOT_SUPPORTED,}, | ||
279 | {0x3400, "SHA2_2",}, | ||
280 | {0x0900, "BB2D",}, | ||
281 | {0xdead, L3_TARGET_NOT_SUPPORTED,}, | ||
282 | {0x2100, "L4_PER1_P3",}, | ||
283 | {0x1c00, "L4_PER1_P1",}, | ||
284 | {0x1f00, "L4_PER1_P2",}, | ||
285 | {0x2300, "L4_PER2_P1",}, | ||
286 | {0x2400, "L4_PER2_P2",}, | ||
287 | {0x2600, "L4_PER3_P1",}, | ||
288 | {0x2700, "L4_PER3_P2",}, | ||
289 | {0x2f00, "MCASP1",}, | ||
290 | {0x3000, "MCASP2",}, | ||
291 | {0x3100, "MCASP3",}, | ||
292 | {0x2800, "MMU2",}, | ||
293 | {0x0f00, "OCMC_RAM1",}, | ||
294 | {0x1700, "OCMC_RAM2",}, | ||
295 | {0x1900, "OCMC_RAM3",}, | ||
296 | {0x1e00, "OCMC_ROM",}, | ||
297 | {0x3900, "QSPI",}, | ||
298 | }; | ||
299 | |||
300 | static struct l3_flagmux_data dra_l3_flagmux_clk2 = { | ||
301 | .offset = 0x803600, | ||
302 | .l3_targ = dra_l3_target_data_clk2, | ||
303 | .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2), | ||
304 | }; | ||
305 | |||
306 | static struct l3_target_data dra_l3_target_data_clk3[] = { | ||
307 | {0x0100, "L3_INSTR"}, | ||
308 | {0x0300, "DEBUGSS_CT_TBR"}, | ||
309 | {0x0, "HOST CLK3"}, | ||
310 | }; | ||
311 | |||
312 | static struct l3_flagmux_data dra_l3_flagmux_clk3 = { | ||
313 | .offset = 0x200, | ||
314 | .l3_targ = dra_l3_target_data_clk3, | ||
315 | .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3), | ||
316 | }; | ||
317 | |||
318 | static struct l3_masters_data dra_l3_masters[] = { | ||
319 | { 0x0, "MPU" }, | ||
320 | { 0x4, "CS_DAP" }, | ||
321 | { 0x5, "IEEE1500_2_OCP" }, | ||
322 | { 0x8, "DSP1_MDMA" }, | ||
323 | { 0x9, "DSP1_CFG" }, | ||
324 | { 0xA, "DSP1_DMA" }, | ||
325 | { 0xB, "DSP2_MDMA" }, | ||
326 | { 0xC, "DSP2_CFG" }, | ||
327 | { 0xD, "DSP2_DMA" }, | ||
328 | { 0xE, "IVA" }, | ||
329 | { 0x10, "EVE1_P1" }, | ||
330 | { 0x11, "EVE2_P1" }, | ||
331 | { 0x12, "EVE3_P1" }, | ||
332 | { 0x13, "EVE4_P1" }, | ||
333 | { 0x14, "PRUSS1 PRU1" }, | ||
334 | { 0x15, "PRUSS1 PRU2" }, | ||
335 | { 0x16, "PRUSS2 PRU1" }, | ||
336 | { 0x17, "PRUSS2 PRU2" }, | ||
337 | { 0x18, "IPU1" }, | ||
338 | { 0x19, "IPU2" }, | ||
339 | { 0x1A, "SDMA" }, | ||
340 | { 0x1B, "CDMA" }, | ||
341 | { 0x1C, "TC1_EDMA" }, | ||
342 | { 0x1D, "TC2_EDMA" }, | ||
343 | { 0x20, "DSS" }, | ||
344 | { 0x21, "MMU1" }, | ||
345 | { 0x22, "PCIE1" }, | ||
346 | { 0x23, "MMU2" }, | ||
347 | { 0x24, "VIP1" }, | ||
348 | { 0x25, "VIP2" }, | ||
349 | { 0x26, "VIP3" }, | ||
350 | { 0x27, "VPE" }, | ||
351 | { 0x28, "GPU_P1" }, | ||
352 | { 0x29, "BB2D" }, | ||
353 | { 0x29, "GPU_P2" }, | ||
354 | { 0x2B, "GMAC_SW" }, | ||
355 | { 0x2C, "USB3" }, | ||
356 | { 0x2D, "USB2_SS" }, | ||
357 | { 0x2E, "USB2_ULPI_SS1" }, | ||
358 | { 0x2F, "USB2_ULPI_SS2" }, | ||
359 | { 0x30, "CSI2_1" }, | ||
360 | { 0x31, "CSI2_2" }, | ||
361 | { 0x33, "SATA" }, | ||
362 | { 0x34, "EVE1_P2" }, | ||
363 | { 0x35, "EVE2_P2" }, | ||
364 | { 0x36, "EVE3_P2" }, | ||
365 | { 0x37, "EVE4_P2" } | ||
175 | }; | 366 | }; |
176 | #endif | 367 | |
368 | static struct l3_flagmux_data *dra_l3_flagmux[] = { | ||
369 | &dra_l3_flagmux_clk1, | ||
370 | &dra_l3_flagmux_clk2, | ||
371 | &dra_l3_flagmux_clk3, | ||
372 | }; | ||
373 | |||
374 | static const struct omap_l3 dra_l3_data = { | ||
375 | .l3_base = { [1] = L3_BASE_IS_SUBMODULE }, | ||
376 | .l3_flagmux = dra_l3_flagmux, | ||
377 | .num_modules = ARRAY_SIZE(dra_l3_flagmux), | ||
378 | .l3_masters = dra_l3_masters, | ||
379 | .num_masters = ARRAY_SIZE(dra_l3_masters), | ||
380 | /* The 6 MSBs of register field used to distinguish initiator */ | ||
381 | .mst_addr_mask = 0xFC, | ||
382 | }; | ||
383 | |||
384 | /* AM4372 data */ | ||
385 | static struct l3_target_data am4372_l3_target_data_200f[] = { | ||
386 | {0xf00, "EMIF",}, | ||
387 | {0x1200, "DES",}, | ||
388 | {0x400, "OCMCRAM",}, | ||
389 | {0x700, "TPTC0",}, | ||
390 | {0x800, "TPTC1",}, | ||
391 | {0x900, "TPTC2"}, | ||
392 | {0xb00, "TPCC",}, | ||
393 | {0xd00, "DEBUGSS",}, | ||
394 | {0xdead, L3_TARGET_NOT_SUPPORTED,}, | ||
395 | {0x200, "SHA",}, | ||
396 | {0xc00, "SGX530",}, | ||
397 | {0x500, "AES0",}, | ||
398 | {0xa00, "L4_FAST",}, | ||
399 | {0x300, "MPUSS_L2_RAM",}, | ||
400 | {0x100, "ICSS",}, | ||
401 | }; | ||
402 | |||
403 | static struct l3_flagmux_data am4372_l3_flagmux_200f = { | ||
404 | .offset = 0x1000, | ||
405 | .l3_targ = am4372_l3_target_data_200f, | ||
406 | .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f), | ||
407 | }; | ||
408 | |||
409 | static struct l3_target_data am4372_l3_target_data_100s[] = { | ||
410 | {0x100, "L4_PER_0",}, | ||
411 | {0x200, "L4_PER_1",}, | ||
412 | {0x300, "L4_PER_2",}, | ||
413 | {0x400, "L4_PER_3",}, | ||
414 | {0x800, "McASP0",}, | ||
415 | {0x900, "McASP1",}, | ||
416 | {0xC00, "MMCHS2",}, | ||
417 | {0x700, "GPMC",}, | ||
418 | {0xD00, "L4_FW",}, | ||
419 | {0xdead, L3_TARGET_NOT_SUPPORTED,}, | ||
420 | {0x500, "ADCTSC",}, | ||
421 | {0xE00, "L4_WKUP",}, | ||
422 | {0xA00, "MAG_CARD",}, | ||
423 | }; | ||
424 | |||
425 | static struct l3_flagmux_data am4372_l3_flagmux_100s = { | ||
426 | .offset = 0x600, | ||
427 | .l3_targ = am4372_l3_target_data_100s, | ||
428 | .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s), | ||
429 | }; | ||
430 | |||
431 | static struct l3_masters_data am4372_l3_masters[] = { | ||
432 | { 0x0, "M1 (128-bit)"}, | ||
433 | { 0x1, "M2 (64-bit)"}, | ||
434 | { 0x4, "DAP"}, | ||
435 | { 0x5, "P1500"}, | ||
436 | { 0xC, "ICSS0"}, | ||
437 | { 0xD, "ICSS1"}, | ||
438 | { 0x14, "Wakeup Processor"}, | ||
439 | { 0x18, "TPTC0 Read"}, | ||
440 | { 0x19, "TPTC0 Write"}, | ||
441 | { 0x1A, "TPTC1 Read"}, | ||
442 | { 0x1B, "TPTC1 Write"}, | ||
443 | { 0x1C, "TPTC2 Read"}, | ||
444 | { 0x1D, "TPTC2 Write"}, | ||
445 | { 0x20, "SGX530"}, | ||
446 | { 0x21, "OCP WP Traffic Probe"}, | ||
447 | { 0x22, "OCP WP DMA Profiling"}, | ||
448 | { 0x23, "OCP WP Event Trace"}, | ||
449 | { 0x25, "DSS"}, | ||
450 | { 0x28, "Crypto DMA RD"}, | ||
451 | { 0x29, "Crypto DMA WR"}, | ||
452 | { 0x2C, "VPFE0"}, | ||
453 | { 0x2D, "VPFE1"}, | ||
454 | { 0x30, "GEMAC"}, | ||
455 | { 0x34, "USB0 RD"}, | ||
456 | { 0x35, "USB0 WR"}, | ||
457 | { 0x36, "USB1 RD"}, | ||
458 | { 0x37, "USB1 WR"}, | ||
459 | }; | ||
460 | |||
461 | static struct l3_flagmux_data *am4372_l3_flagmux[] = { | ||
462 | &am4372_l3_flagmux_200f, | ||
463 | &am4372_l3_flagmux_100s, | ||
464 | }; | ||
465 | |||
466 | static const struct omap_l3 am4372_l3_data = { | ||
467 | .l3_flagmux = am4372_l3_flagmux, | ||
468 | .num_modules = ARRAY_SIZE(am4372_l3_flagmux), | ||
469 | .l3_masters = am4372_l3_masters, | ||
470 | .num_masters = ARRAY_SIZE(am4372_l3_masters), | ||
471 | /* All 6 bits of register field used to distinguish initiator */ | ||
472 | .mst_addr_mask = 0x3F, | ||
473 | }; | ||
474 | |||
475 | #endif /* __OMAP_L3_NOC_H */ | ||
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 870e18b9a687..1fad4c5e3f5d 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #define APLL_CON0 0x100 | 24 | #define APLL_CON0 0x100 |
25 | #define SRC_CPU 0x200 | 25 | #define SRC_CPU 0x200 |
26 | #define DIV_CPU0 0x500 | 26 | #define DIV_CPU0 0x500 |
27 | #define PWR_CTRL1 0x1020 | ||
28 | #define PWR_CTRL2 0x1024 | ||
27 | #define MPLL_LOCK 0x4000 | 29 | #define MPLL_LOCK 0x4000 |
28 | #define MPLL_CON0 0x4100 | 30 | #define MPLL_CON0 0x4100 |
29 | #define SRC_CORE1 0x4204 | 31 | #define SRC_CORE1 0x4204 |
@@ -84,6 +86,23 @@ | |||
84 | #define SRC_CDREX 0x20200 | 86 | #define SRC_CDREX 0x20200 |
85 | #define PLL_DIV2_SEL 0x20a24 | 87 | #define PLL_DIV2_SEL 0x20a24 |
86 | 88 | ||
89 | /*Below definitions are used for PWR_CTRL settings*/ | ||
90 | #define PWR_CTRL1_CORE2_DOWN_RATIO (7 << 28) | ||
91 | #define PWR_CTRL1_CORE1_DOWN_RATIO (7 << 16) | ||
92 | #define PWR_CTRL1_DIV2_DOWN_EN (1 << 9) | ||
93 | #define PWR_CTRL1_DIV1_DOWN_EN (1 << 8) | ||
94 | #define PWR_CTRL1_USE_CORE1_WFE (1 << 5) | ||
95 | #define PWR_CTRL1_USE_CORE0_WFE (1 << 4) | ||
96 | #define PWR_CTRL1_USE_CORE1_WFI (1 << 1) | ||
97 | #define PWR_CTRL1_USE_CORE0_WFI (1 << 0) | ||
98 | |||
99 | #define PWR_CTRL2_DIV2_UP_EN (1 << 25) | ||
100 | #define PWR_CTRL2_DIV1_UP_EN (1 << 24) | ||
101 | #define PWR_CTRL2_DUR_STANDBY2_VAL (1 << 16) | ||
102 | #define PWR_CTRL2_DUR_STANDBY1_VAL (1 << 8) | ||
103 | #define PWR_CTRL2_CORE2_UP_RATIO (1 << 4) | ||
104 | #define PWR_CTRL2_CORE1_UP_RATIO (1 << 0) | ||
105 | |||
87 | /* list of PLLs to be registered */ | 106 | /* list of PLLs to be registered */ |
88 | enum exynos5250_plls { | 107 | enum exynos5250_plls { |
89 | apll, mpll, cpll, epll, vpll, gpll, bpll, | 108 | apll, mpll, cpll, epll, vpll, gpll, bpll, |
@@ -102,6 +121,8 @@ static struct samsung_clk_reg_dump *exynos5250_save; | |||
102 | static unsigned long exynos5250_clk_regs[] __initdata = { | 121 | static unsigned long exynos5250_clk_regs[] __initdata = { |
103 | SRC_CPU, | 122 | SRC_CPU, |
104 | DIV_CPU0, | 123 | DIV_CPU0, |
124 | PWR_CTRL1, | ||
125 | PWR_CTRL2, | ||
105 | SRC_CORE1, | 126 | SRC_CORE1, |
106 | SRC_TOP0, | 127 | SRC_TOP0, |
107 | SRC_TOP1, | 128 | SRC_TOP1, |
@@ -736,6 +757,7 @@ static struct of_device_id ext_clk_match[] __initdata = { | |||
736 | static void __init exynos5250_clk_init(struct device_node *np) | 757 | static void __init exynos5250_clk_init(struct device_node *np) |
737 | { | 758 | { |
738 | struct samsung_clk_provider *ctx; | 759 | struct samsung_clk_provider *ctx; |
760 | unsigned int tmp; | ||
739 | 761 | ||
740 | if (np) { | 762 | if (np) { |
741 | reg_base = of_iomap(np, 0); | 763 | reg_base = of_iomap(np, 0); |
@@ -776,6 +798,26 @@ static void __init exynos5250_clk_init(struct device_node *np) | |||
776 | samsung_clk_register_gate(ctx, exynos5250_gate_clks, | 798 | samsung_clk_register_gate(ctx, exynos5250_gate_clks, |
777 | ARRAY_SIZE(exynos5250_gate_clks)); | 799 | ARRAY_SIZE(exynos5250_gate_clks)); |
778 | 800 | ||
801 | /* | ||
802 | * Enable arm clock down (in idle) and set arm divider | ||
803 | * ratios in WFI/WFE state. | ||
804 | */ | ||
805 | tmp = (PWR_CTRL1_CORE2_DOWN_RATIO | PWR_CTRL1_CORE1_DOWN_RATIO | | ||
806 | PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN | | ||
807 | PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE | | ||
808 | PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI); | ||
809 | __raw_writel(tmp, reg_base + PWR_CTRL1); | ||
810 | |||
811 | /* | ||
812 | * Enable arm clock up (on exiting idle). Set arm divider | ||
813 | * ratios when not in idle along with the standby duration | ||
814 | * ratios. | ||
815 | */ | ||
816 | tmp = (PWR_CTRL2_DIV2_UP_EN | PWR_CTRL2_DIV1_UP_EN | | ||
817 | PWR_CTRL2_DUR_STANDBY2_VAL | PWR_CTRL2_DUR_STANDBY1_VAL | | ||
818 | PWR_CTRL2_CORE2_UP_RATIO | PWR_CTRL2_CORE1_UP_RATIO); | ||
819 | __raw_writel(tmp, reg_base + PWR_CTRL2); | ||
820 | |||
779 | exynos5250_clk_sleep_init(); | 821 | exynos5250_clk_sleep_init(); |
780 | 822 | ||
781 | pr_info("Exynos5250: clock setup completed, armclk=%ld\n", | 823 | pr_info("Exynos5250: clock setup completed, armclk=%ld\n", |
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index acf5a329d538..8d6420013a04 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/of_irq.h> | 24 | #include <linux/of_irq.h> |
25 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
26 | #include <linux/clocksource.h> | 26 | #include <linux/clocksource.h> |
27 | #include <linux/sched_clock.h> | ||
27 | 28 | ||
28 | #define EXYNOS4_MCTREG(x) (x) | 29 | #define EXYNOS4_MCTREG(x) (x) |
29 | #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) | 30 | #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) |
@@ -192,12 +193,19 @@ struct clocksource mct_frc = { | |||
192 | .resume = exynos4_frc_resume, | 193 | .resume = exynos4_frc_resume, |
193 | }; | 194 | }; |
194 | 195 | ||
196 | static u64 notrace exynos4_read_sched_clock(void) | ||
197 | { | ||
198 | return exynos4_frc_read(&mct_frc); | ||
199 | } | ||
200 | |||
195 | static void __init exynos4_clocksource_init(void) | 201 | static void __init exynos4_clocksource_init(void) |
196 | { | 202 | { |
197 | exynos4_mct_frc_start(0, 0); | 203 | exynos4_mct_frc_start(0, 0); |
198 | 204 | ||
199 | if (clocksource_register_hz(&mct_frc, clk_rate)) | 205 | if (clocksource_register_hz(&mct_frc, clk_rate)) |
200 | panic("%s: can't register clocksource\n", mct_frc.name); | 206 | panic("%s: can't register clocksource\n", mct_frc.name); |
207 | |||
208 | sched_clock_register(exynos4_read_sched_clock, 64, clk_rate); | ||
201 | } | 209 | } |
202 | 210 | ||
203 | static void exynos4_mct_comp0_stop(void) | 211 | static void exynos4_mct_comp0_stop(void) |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 580503513f0f..d2c7b4b8ffd5 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -30,7 +30,7 @@ config ARM_EXYNOS_CPUFREQ | |||
30 | 30 | ||
31 | config ARM_EXYNOS4210_CPUFREQ | 31 | config ARM_EXYNOS4210_CPUFREQ |
32 | bool "SAMSUNG EXYNOS4210" | 32 | bool "SAMSUNG EXYNOS4210" |
33 | depends on CPU_EXYNOS4210 && !ARCH_MULTIPLATFORM | 33 | depends on CPU_EXYNOS4210 |
34 | default y | 34 | default y |
35 | select ARM_EXYNOS_CPUFREQ | 35 | select ARM_EXYNOS_CPUFREQ |
36 | help | 36 | help |
@@ -41,7 +41,7 @@ config ARM_EXYNOS4210_CPUFREQ | |||
41 | 41 | ||
42 | config ARM_EXYNOS4X12_CPUFREQ | 42 | config ARM_EXYNOS4X12_CPUFREQ |
43 | bool "SAMSUNG EXYNOS4x12" | 43 | bool "SAMSUNG EXYNOS4x12" |
44 | depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM | 44 | depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 |
45 | default y | 45 | default y |
46 | select ARM_EXYNOS_CPUFREQ | 46 | select ARM_EXYNOS_CPUFREQ |
47 | help | 47 | help |
@@ -52,7 +52,7 @@ config ARM_EXYNOS4X12_CPUFREQ | |||
52 | 52 | ||
53 | config ARM_EXYNOS5250_CPUFREQ | 53 | config ARM_EXYNOS5250_CPUFREQ |
54 | bool "SAMSUNG EXYNOS5250" | 54 | bool "SAMSUNG EXYNOS5250" |
55 | depends on SOC_EXYNOS5250 && !ARCH_MULTIPLATFORM | 55 | depends on SOC_EXYNOS5250 |
56 | default y | 56 | default y |
57 | select ARM_EXYNOS_CPUFREQ | 57 | select ARM_EXYNOS_CPUFREQ |
58 | help | 58 | help |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index e8a4a7ed38c1..348c8bafe436 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
@@ -19,8 +19,6 @@ | |||
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | 21 | ||
22 | #include <plat/cpu.h> | ||
23 | |||
24 | #include "exynos-cpufreq.h" | 22 | #include "exynos-cpufreq.h" |
25 | 23 | ||
26 | static struct exynos_dvfs_info *exynos_info; | 24 | static struct exynos_dvfs_info *exynos_info; |
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h index f189547bb447..51af42e1b7fe 100644 --- a/drivers/cpufreq/exynos-cpufreq.h +++ b/drivers/cpufreq/exynos-cpufreq.h | |||
@@ -49,6 +49,7 @@ struct exynos_dvfs_info { | |||
49 | struct cpufreq_frequency_table *freq_table; | 49 | struct cpufreq_frequency_table *freq_table; |
50 | void (*set_freq)(unsigned int, unsigned int); | 50 | void (*set_freq)(unsigned int, unsigned int); |
51 | bool (*need_apll_change)(unsigned int, unsigned int); | 51 | bool (*need_apll_change)(unsigned int, unsigned int); |
52 | void __iomem *cmu_regs; | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | #ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ | 55 | #ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ |
@@ -76,24 +77,21 @@ static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) | |||
76 | } | 77 | } |
77 | #endif | 78 | #endif |
78 | 79 | ||
79 | #include <plat/cpu.h> | 80 | #define EXYNOS4_CLKSRC_CPU 0x14200 |
80 | #include <mach/map.h> | 81 | #define EXYNOS4_CLKMUX_STATCPU 0x14400 |
81 | 82 | ||
82 | #define EXYNOS4_CLKSRC_CPU (S5P_VA_CMU + 0x14200) | 83 | #define EXYNOS4_CLKDIV_CPU 0x14500 |
83 | #define EXYNOS4_CLKMUX_STATCPU (S5P_VA_CMU + 0x14400) | 84 | #define EXYNOS4_CLKDIV_CPU1 0x14504 |
84 | 85 | #define EXYNOS4_CLKDIV_STATCPU 0x14600 | |
85 | #define EXYNOS4_CLKDIV_CPU (S5P_VA_CMU + 0x14500) | 86 | #define EXYNOS4_CLKDIV_STATCPU1 0x14604 |
86 | #define EXYNOS4_CLKDIV_CPU1 (S5P_VA_CMU + 0x14504) | ||
87 | #define EXYNOS4_CLKDIV_STATCPU (S5P_VA_CMU + 0x14600) | ||
88 | #define EXYNOS4_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x14604) | ||
89 | 87 | ||
90 | #define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16) | 88 | #define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16) |
91 | #define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT) | 89 | #define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT) |
92 | 90 | ||
93 | #define EXYNOS5_APLL_LOCK (S5P_VA_CMU + 0x00000) | 91 | #define EXYNOS5_APLL_LOCK 0x00000 |
94 | #define EXYNOS5_APLL_CON0 (S5P_VA_CMU + 0x00100) | 92 | #define EXYNOS5_APLL_CON0 0x00100 |
95 | #define EXYNOS5_CLKMUX_STATCPU (S5P_VA_CMU + 0x00400) | 93 | #define EXYNOS5_CLKMUX_STATCPU 0x00400 |
96 | #define EXYNOS5_CLKDIV_CPU0 (S5P_VA_CMU + 0x00500) | 94 | #define EXYNOS5_CLKDIV_CPU0 0x00500 |
97 | #define EXYNOS5_CLKDIV_CPU1 (S5P_VA_CMU + 0x00504) | 95 | #define EXYNOS5_CLKDIV_CPU1 0x00504 |
98 | #define EXYNOS5_CLKDIV_STATCPU0 (S5P_VA_CMU + 0x00600) | 96 | #define EXYNOS5_CLKDIV_STATCPU0 0x00600 |
99 | #define EXYNOS5_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x00604) | 97 | #define EXYNOS5_CLKDIV_STATCPU1 0x00604 |
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index 6384e5b9a347..61a54310a1b9 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
19 | #include <linux/of.h> | ||
20 | #include <linux/of_address.h> | ||
19 | 21 | ||
20 | #include "exynos-cpufreq.h" | 22 | #include "exynos-cpufreq.h" |
21 | 23 | ||
@@ -23,6 +25,7 @@ static struct clk *cpu_clk; | |||
23 | static struct clk *moutcore; | 25 | static struct clk *moutcore; |
24 | static struct clk *mout_mpll; | 26 | static struct clk *mout_mpll; |
25 | static struct clk *mout_apll; | 27 | static struct clk *mout_apll; |
28 | static struct exynos_dvfs_info *cpufreq; | ||
26 | 29 | ||
27 | static unsigned int exynos4210_volt_table[] = { | 30 | static unsigned int exynos4210_volt_table[] = { |
28 | 1250000, 1150000, 1050000, 975000, 950000, | 31 | 1250000, 1150000, 1050000, 975000, 950000, |
@@ -60,20 +63,20 @@ static void exynos4210_set_clkdiv(unsigned int div_index) | |||
60 | 63 | ||
61 | tmp = apll_freq_4210[div_index].clk_div_cpu0; | 64 | tmp = apll_freq_4210[div_index].clk_div_cpu0; |
62 | 65 | ||
63 | __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); | 66 | __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); |
64 | 67 | ||
65 | do { | 68 | do { |
66 | tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU); | 69 | tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU); |
67 | } while (tmp & 0x1111111); | 70 | } while (tmp & 0x1111111); |
68 | 71 | ||
69 | /* Change Divider - CPU1 */ | 72 | /* Change Divider - CPU1 */ |
70 | 73 | ||
71 | tmp = apll_freq_4210[div_index].clk_div_cpu1; | 74 | tmp = apll_freq_4210[div_index].clk_div_cpu1; |
72 | 75 | ||
73 | __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); | 76 | __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); |
74 | 77 | ||
75 | do { | 78 | do { |
76 | tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1); | 79 | tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); |
77 | } while (tmp & 0x11); | 80 | } while (tmp & 0x11); |
78 | } | 81 | } |
79 | 82 | ||
@@ -85,7 +88,7 @@ static void exynos4210_set_apll(unsigned int index) | |||
85 | clk_set_parent(moutcore, mout_mpll); | 88 | clk_set_parent(moutcore, mout_mpll); |
86 | 89 | ||
87 | do { | 90 | do { |
88 | tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) | 91 | tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) |
89 | >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); | 92 | >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); |
90 | tmp &= 0x7; | 93 | tmp &= 0x7; |
91 | } while (tmp != 0x2); | 94 | } while (tmp != 0x2); |
@@ -96,7 +99,7 @@ static void exynos4210_set_apll(unsigned int index) | |||
96 | clk_set_parent(moutcore, mout_apll); | 99 | clk_set_parent(moutcore, mout_apll); |
97 | 100 | ||
98 | do { | 101 | do { |
99 | tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); | 102 | tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); |
100 | tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; | 103 | tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; |
101 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); | 104 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); |
102 | } | 105 | } |
@@ -115,8 +118,30 @@ static void exynos4210_set_frequency(unsigned int old_index, | |||
115 | 118 | ||
116 | int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) | 119 | int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) |
117 | { | 120 | { |
121 | struct device_node *np; | ||
118 | unsigned long rate; | 122 | unsigned long rate; |
119 | 123 | ||
124 | /* | ||
125 | * HACK: This is a temporary workaround to get access to clock | ||
126 | * controller registers directly and remove static mappings and | ||
127 | * dependencies on platform headers. It is necessary to enable | ||
128 | * Exynos multi-platform support and will be removed together with | ||
129 | * this whole driver as soon as Exynos gets migrated to use | ||
130 | * cpufreq-cpu0 driver. | ||
131 | */ | ||
132 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); | ||
133 | if (!np) { | ||
134 | pr_err("%s: failed to find clock controller DT node\n", | ||
135 | __func__); | ||
136 | return -ENODEV; | ||
137 | } | ||
138 | |||
139 | info->cmu_regs = of_iomap(np, 0); | ||
140 | if (!info->cmu_regs) { | ||
141 | pr_err("%s: failed to map CMU registers\n", __func__); | ||
142 | return -EFAULT; | ||
143 | } | ||
144 | |||
120 | cpu_clk = clk_get(NULL, "armclk"); | 145 | cpu_clk = clk_get(NULL, "armclk"); |
121 | if (IS_ERR(cpu_clk)) | 146 | if (IS_ERR(cpu_clk)) |
122 | return PTR_ERR(cpu_clk); | 147 | return PTR_ERR(cpu_clk); |
@@ -143,6 +168,8 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) | |||
143 | info->freq_table = exynos4210_freq_table; | 168 | info->freq_table = exynos4210_freq_table; |
144 | info->set_freq = exynos4210_set_frequency; | 169 | info->set_freq = exynos4210_set_frequency; |
145 | 170 | ||
171 | cpufreq = info; | ||
172 | |||
146 | return 0; | 173 | return 0; |
147 | 174 | ||
148 | err_mout_apll: | 175 | err_mout_apll: |
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 63a3907ce578..351a2074cfea 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
19 | #include <linux/of.h> | ||
20 | #include <linux/of_address.h> | ||
19 | 21 | ||
20 | #include "exynos-cpufreq.h" | 22 | #include "exynos-cpufreq.h" |
21 | 23 | ||
@@ -23,6 +25,7 @@ static struct clk *cpu_clk; | |||
23 | static struct clk *moutcore; | 25 | static struct clk *moutcore; |
24 | static struct clk *mout_mpll; | 26 | static struct clk *mout_mpll; |
25 | static struct clk *mout_apll; | 27 | static struct clk *mout_apll; |
28 | static struct exynos_dvfs_info *cpufreq; | ||
26 | 29 | ||
27 | static unsigned int exynos4x12_volt_table[] = { | 30 | static unsigned int exynos4x12_volt_table[] = { |
28 | 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500, | 31 | 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500, |
@@ -105,19 +108,20 @@ static void exynos4x12_set_clkdiv(unsigned int div_index) | |||
105 | 108 | ||
106 | tmp = apll_freq_4x12[div_index].clk_div_cpu0; | 109 | tmp = apll_freq_4x12[div_index].clk_div_cpu0; |
107 | 110 | ||
108 | __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); | 111 | __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); |
109 | 112 | ||
110 | while (__raw_readl(EXYNOS4_CLKDIV_STATCPU) & 0x11111111) | 113 | while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU) |
114 | & 0x11111111) | ||
111 | cpu_relax(); | 115 | cpu_relax(); |
112 | 116 | ||
113 | /* Change Divider - CPU1 */ | 117 | /* Change Divider - CPU1 */ |
114 | tmp = apll_freq_4x12[div_index].clk_div_cpu1; | 118 | tmp = apll_freq_4x12[div_index].clk_div_cpu1; |
115 | 119 | ||
116 | __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); | 120 | __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); |
117 | 121 | ||
118 | do { | 122 | do { |
119 | cpu_relax(); | 123 | cpu_relax(); |
120 | tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1); | 124 | tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); |
121 | } while (tmp != 0x0); | 125 | } while (tmp != 0x0); |
122 | } | 126 | } |
123 | 127 | ||
@@ -130,7 +134,7 @@ static void exynos4x12_set_apll(unsigned int index) | |||
130 | 134 | ||
131 | do { | 135 | do { |
132 | cpu_relax(); | 136 | cpu_relax(); |
133 | tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) | 137 | tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) |
134 | >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); | 138 | >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); |
135 | tmp &= 0x7; | 139 | tmp &= 0x7; |
136 | } while (tmp != 0x2); | 140 | } while (tmp != 0x2); |
@@ -142,7 +146,7 @@ static void exynos4x12_set_apll(unsigned int index) | |||
142 | 146 | ||
143 | do { | 147 | do { |
144 | cpu_relax(); | 148 | cpu_relax(); |
145 | tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); | 149 | tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); |
146 | tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; | 150 | tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; |
147 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); | 151 | } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); |
148 | } | 152 | } |
@@ -161,8 +165,30 @@ static void exynos4x12_set_frequency(unsigned int old_index, | |||
161 | 165 | ||
162 | int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) | 166 | int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) |
163 | { | 167 | { |
168 | struct device_node *np; | ||
164 | unsigned long rate; | 169 | unsigned long rate; |
165 | 170 | ||
171 | /* | ||
172 | * HACK: This is a temporary workaround to get access to clock | ||
173 | * controller registers directly and remove static mappings and | ||
174 | * dependencies on platform headers. It is necessary to enable | ||
175 | * Exynos multi-platform support and will be removed together with | ||
176 | * this whole driver as soon as Exynos gets migrated to use | ||
177 | * cpufreq-cpu0 driver. | ||
178 | */ | ||
179 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); | ||
180 | if (!np) { | ||
181 | pr_err("%s: failed to find clock controller DT node\n", | ||
182 | __func__); | ||
183 | return -ENODEV; | ||
184 | } | ||
185 | |||
186 | info->cmu_regs = of_iomap(np, 0); | ||
187 | if (!info->cmu_regs) { | ||
188 | pr_err("%s: failed to map CMU registers\n", __func__); | ||
189 | return -EFAULT; | ||
190 | } | ||
191 | |||
166 | cpu_clk = clk_get(NULL, "armclk"); | 192 | cpu_clk = clk_get(NULL, "armclk"); |
167 | if (IS_ERR(cpu_clk)) | 193 | if (IS_ERR(cpu_clk)) |
168 | return PTR_ERR(cpu_clk); | 194 | return PTR_ERR(cpu_clk); |
@@ -194,6 +220,8 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) | |||
194 | info->freq_table = exynos4x12_freq_table; | 220 | info->freq_table = exynos4x12_freq_table; |
195 | info->set_freq = exynos4x12_set_frequency; | 221 | info->set_freq = exynos4x12_set_frequency; |
196 | 222 | ||
223 | cpufreq = info; | ||
224 | |||
197 | return 0; | 225 | return 0; |
198 | 226 | ||
199 | err_mout_apll: | 227 | err_mout_apll: |
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index 363a0b3fe1b1..c91ce69dc631 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c | |||
@@ -16,8 +16,8 @@ | |||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/cpufreq.h> | 18 | #include <linux/cpufreq.h> |
19 | 19 | #include <linux/of.h> | |
20 | #include <mach/map.h> | 20 | #include <linux/of_address.h> |
21 | 21 | ||
22 | #include "exynos-cpufreq.h" | 22 | #include "exynos-cpufreq.h" |
23 | 23 | ||
@@ -25,6 +25,7 @@ static struct clk *cpu_clk; | |||
25 | static struct clk *moutcore; | 25 | static struct clk *moutcore; |
26 | static struct clk *mout_mpll; | 26 | static struct clk *mout_mpll; |
27 | static struct clk *mout_apll; | 27 | static struct clk *mout_apll; |
28 | static struct exynos_dvfs_info *cpufreq; | ||
28 | 29 | ||
29 | static unsigned int exynos5250_volt_table[] = { | 30 | static unsigned int exynos5250_volt_table[] = { |
30 | 1300000, 1250000, 1225000, 1200000, 1150000, | 31 | 1300000, 1250000, 1225000, 1200000, 1150000, |
@@ -87,17 +88,18 @@ static void set_clkdiv(unsigned int div_index) | |||
87 | 88 | ||
88 | tmp = apll_freq_5250[div_index].clk_div_cpu0; | 89 | tmp = apll_freq_5250[div_index].clk_div_cpu0; |
89 | 90 | ||
90 | __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0); | 91 | __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0); |
91 | 92 | ||
92 | while (__raw_readl(EXYNOS5_CLKDIV_STATCPU0) & 0x11111111) | 93 | while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0) |
94 | & 0x11111111) | ||
93 | cpu_relax(); | 95 | cpu_relax(); |
94 | 96 | ||
95 | /* Change Divider - CPU1 */ | 97 | /* Change Divider - CPU1 */ |
96 | tmp = apll_freq_5250[div_index].clk_div_cpu1; | 98 | tmp = apll_freq_5250[div_index].clk_div_cpu1; |
97 | 99 | ||
98 | __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1); | 100 | __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1); |
99 | 101 | ||
100 | while (__raw_readl(EXYNOS5_CLKDIV_STATCPU1) & 0x11) | 102 | while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11) |
101 | cpu_relax(); | 103 | cpu_relax(); |
102 | } | 104 | } |
103 | 105 | ||
@@ -111,7 +113,8 @@ static void set_apll(unsigned int index) | |||
111 | 113 | ||
112 | do { | 114 | do { |
113 | cpu_relax(); | 115 | cpu_relax(); |
114 | tmp = (__raw_readl(EXYNOS5_CLKMUX_STATCPU) >> 16); | 116 | tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU) |
117 | >> 16); | ||
115 | tmp &= 0x7; | 118 | tmp &= 0x7; |
116 | } while (tmp != 0x2); | 119 | } while (tmp != 0x2); |
117 | 120 | ||
@@ -122,7 +125,7 @@ static void set_apll(unsigned int index) | |||
122 | 125 | ||
123 | do { | 126 | do { |
124 | cpu_relax(); | 127 | cpu_relax(); |
125 | tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU); | 128 | tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU); |
126 | tmp &= (0x7 << 16); | 129 | tmp &= (0x7 << 16); |
127 | } while (tmp != (0x1 << 16)); | 130 | } while (tmp != (0x1 << 16)); |
128 | } | 131 | } |
@@ -141,8 +144,30 @@ static void exynos5250_set_frequency(unsigned int old_index, | |||
141 | 144 | ||
142 | int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) | 145 | int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) |
143 | { | 146 | { |
147 | struct device_node *np; | ||
144 | unsigned long rate; | 148 | unsigned long rate; |
145 | 149 | ||
150 | /* | ||
151 | * HACK: This is a temporary workaround to get access to clock | ||
152 | * controller registers directly and remove static mappings and | ||
153 | * dependencies on platform headers. It is necessary to enable | ||
154 | * Exynos multi-platform support and will be removed together with | ||
155 | * this whole driver as soon as Exynos gets migrated to use | ||
156 | * cpufreq-cpu0 driver. | ||
157 | */ | ||
158 | np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); | ||
159 | if (!np) { | ||
160 | pr_err("%s: failed to find clock controller DT node\n", | ||
161 | __func__); | ||
162 | return -ENODEV; | ||
163 | } | ||
164 | |||
165 | info->cmu_regs = of_iomap(np, 0); | ||
166 | if (!info->cmu_regs) { | ||
167 | pr_err("%s: failed to map CMU registers\n", __func__); | ||
168 | return -EFAULT; | ||
169 | } | ||
170 | |||
146 | cpu_clk = clk_get(NULL, "armclk"); | 171 | cpu_clk = clk_get(NULL, "armclk"); |
147 | if (IS_ERR(cpu_clk)) | 172 | if (IS_ERR(cpu_clk)) |
148 | return PTR_ERR(cpu_clk); | 173 | return PTR_ERR(cpu_clk); |
@@ -169,6 +194,8 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) | |||
169 | info->freq_table = exynos5250_freq_table; | 194 | info->freq_table = exynos5250_freq_table; |
170 | info->set_freq = exynos5250_set_frequency; | 195 | info->set_freq = exynos5250_set_frequency; |
171 | 196 | ||
197 | cpufreq = info; | ||
198 | |||
172 | return 0; | 199 | return 0; |
173 | 200 | ||
174 | err_mout_apll: | 201 | err_mout_apll: |
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 5bb94780d377..ae1d78ea7df7 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm | |||
@@ -49,3 +49,9 @@ config ARM_AT91_CPUIDLE | |||
49 | depends on ARCH_AT91 | 49 | depends on ARCH_AT91 |
50 | help | 50 | help |
51 | Select this to enable cpuidle for AT91 processors | 51 | Select this to enable cpuidle for AT91 processors |
52 | |||
53 | config ARM_EXYNOS_CPUIDLE | ||
54 | bool "Cpu Idle Driver for the Exynos processors" | ||
55 | depends on ARCH_EXYNOS | ||
56 | help | ||
57 | Select this to enable cpuidle for Exynos processors | ||
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 9902d052bd87..cd3ab59f8461 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile | |||
@@ -14,6 +14,7 @@ obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o | |||
14 | obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o | 14 | obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o |
15 | obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o | 15 | obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o |
16 | obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o | 16 | obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o |
17 | obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o | ||
17 | 18 | ||
18 | ############################################################################### | 19 | ############################################################################### |
19 | # POWERPC drivers | 20 | # POWERPC drivers |
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c new file mode 100644 index 000000000000..7c0151263828 --- /dev/null +++ b/drivers/cpuidle/cpuidle-exynos.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* linux/arch/arm/mach-exynos/cpuidle.c | ||
2 | * | ||
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/cpuidle.h> | ||
12 | #include <linux/cpu_pm.h> | ||
13 | #include <linux/export.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | |||
17 | #include <asm/proc-fns.h> | ||
18 | #include <asm/suspend.h> | ||
19 | #include <asm/cpuidle.h> | ||
20 | |||
21 | static void (*exynos_enter_aftr)(void); | ||
22 | |||
23 | static int idle_finisher(unsigned long flags) | ||
24 | { | ||
25 | exynos_enter_aftr(); | ||
26 | cpu_do_idle(); | ||
27 | |||
28 | return 1; | ||
29 | } | ||
30 | |||
31 | static int exynos_enter_core0_aftr(struct cpuidle_device *dev, | ||
32 | struct cpuidle_driver *drv, | ||
33 | int index) | ||
34 | { | ||
35 | cpu_pm_enter(); | ||
36 | cpu_suspend(0, idle_finisher); | ||
37 | cpu_pm_exit(); | ||
38 | |||
39 | return index; | ||
40 | } | ||
41 | |||
42 | static int exynos_enter_lowpower(struct cpuidle_device *dev, | ||
43 | struct cpuidle_driver *drv, | ||
44 | int index) | ||
45 | { | ||
46 | int new_index = index; | ||
47 | |||
48 | /* AFTR can only be entered when cores other than CPU0 are offline */ | ||
49 | if (num_online_cpus() > 1 || dev->cpu != 0) | ||
50 | new_index = drv->safe_state_index; | ||
51 | |||
52 | if (new_index == 0) | ||
53 | return arm_cpuidle_simple_enter(dev, drv, new_index); | ||
54 | else | ||
55 | return exynos_enter_core0_aftr(dev, drv, new_index); | ||
56 | } | ||
57 | |||
58 | static struct cpuidle_driver exynos_idle_driver = { | ||
59 | .name = "exynos_idle", | ||
60 | .owner = THIS_MODULE, | ||
61 | .states = { | ||
62 | [0] = ARM_CPUIDLE_WFI_STATE, | ||
63 | [1] = { | ||
64 | .enter = exynos_enter_lowpower, | ||
65 | .exit_latency = 300, | ||
66 | .target_residency = 100000, | ||
67 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
68 | .name = "C1", | ||
69 | .desc = "ARM power down", | ||
70 | }, | ||
71 | }, | ||
72 | .state_count = 2, | ||
73 | .safe_state_index = 0, | ||
74 | }; | ||
75 | |||
76 | static int exynos_cpuidle_probe(struct platform_device *pdev) | ||
77 | { | ||
78 | int ret; | ||
79 | |||
80 | exynos_enter_aftr = (void *)(pdev->dev.platform_data); | ||
81 | |||
82 | ret = cpuidle_register(&exynos_idle_driver, NULL); | ||
83 | if (ret) { | ||
84 | dev_err(&pdev->dev, "failed to register cpuidle driver\n"); | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static struct platform_driver exynos_cpuidle_driver = { | ||
92 | .probe = exynos_cpuidle_probe, | ||
93 | .driver = { | ||
94 | .name = "exynos_cpuidle", | ||
95 | .owner = THIS_MODULE, | ||
96 | }, | ||
97 | }; | ||
98 | |||
99 | module_platform_driver(exynos_cpuidle_driver); | ||
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 926360c2db6a..d08c4dedef35 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -57,14 +57,48 @@ | |||
57 | #define EDMA_MAX_SLOTS MAX_NR_SG | 57 | #define EDMA_MAX_SLOTS MAX_NR_SG |
58 | #define EDMA_DESCRIPTORS 16 | 58 | #define EDMA_DESCRIPTORS 16 |
59 | 59 | ||
60 | struct edma_pset { | ||
61 | u32 len; | ||
62 | dma_addr_t addr; | ||
63 | struct edmacc_param param; | ||
64 | }; | ||
65 | |||
60 | struct edma_desc { | 66 | struct edma_desc { |
61 | struct virt_dma_desc vdesc; | 67 | struct virt_dma_desc vdesc; |
62 | struct list_head node; | 68 | struct list_head node; |
69 | enum dma_transfer_direction direction; | ||
63 | int cyclic; | 70 | int cyclic; |
64 | int absync; | 71 | int absync; |
65 | int pset_nr; | 72 | int pset_nr; |
73 | struct edma_chan *echan; | ||
66 | int processed; | 74 | int processed; |
67 | struct edmacc_param pset[0]; | 75 | |
76 | /* | ||
77 | * The following 4 elements are used for residue accounting. | ||
78 | * | ||
79 | * - processed_stat: the number of SG elements we have traversed | ||
80 | * so far to cover accounting. This is updated directly to processed | ||
81 | * during edma_callback and is always <= processed, because processed | ||
82 | * refers to the number of pending transfer (programmed to EDMA | ||
83 | * controller), where as processed_stat tracks number of transfers | ||
84 | * accounted for so far. | ||
85 | * | ||
86 | * - residue: The amount of bytes we have left to transfer for this desc | ||
87 | * | ||
88 | * - residue_stat: The residue in bytes of data we have covered | ||
89 | * so far for accounting. This is updated directly to residue | ||
90 | * during callbacks to keep it current. | ||
91 | * | ||
92 | * - sg_len: Tracks the length of the current intermediate transfer, | ||
93 | * this is required to update the residue during intermediate transfer | ||
94 | * completion callback. | ||
95 | */ | ||
96 | int processed_stat; | ||
97 | u32 sg_len; | ||
98 | u32 residue; | ||
99 | u32 residue_stat; | ||
100 | |||
101 | struct edma_pset pset[0]; | ||
68 | }; | 102 | }; |
69 | 103 | ||
70 | struct edma_cc; | 104 | struct edma_cc; |
@@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan) | |||
136 | /* Find out how many left */ | 170 | /* Find out how many left */ |
137 | left = edesc->pset_nr - edesc->processed; | 171 | left = edesc->pset_nr - edesc->processed; |
138 | nslots = min(MAX_NR_SG, left); | 172 | nslots = min(MAX_NR_SG, left); |
173 | edesc->sg_len = 0; | ||
139 | 174 | ||
140 | /* Write descriptor PaRAM set(s) */ | 175 | /* Write descriptor PaRAM set(s) */ |
141 | for (i = 0; i < nslots; i++) { | 176 | for (i = 0; i < nslots; i++) { |
142 | j = i + edesc->processed; | 177 | j = i + edesc->processed; |
143 | edma_write_slot(echan->slot[i], &edesc->pset[j]); | 178 | edma_write_slot(echan->slot[i], &edesc->pset[j].param); |
144 | dev_dbg(echan->vchan.chan.device->dev, | 179 | edesc->sg_len += edesc->pset[j].len; |
180 | dev_vdbg(echan->vchan.chan.device->dev, | ||
145 | "\n pset[%d]:\n" | 181 | "\n pset[%d]:\n" |
146 | " chnum\t%d\n" | 182 | " chnum\t%d\n" |
147 | " slot\t%d\n" | 183 | " slot\t%d\n" |
@@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan) | |||
154 | " cidx\t%08x\n" | 190 | " cidx\t%08x\n" |
155 | " lkrld\t%08x\n", | 191 | " lkrld\t%08x\n", |
156 | j, echan->ch_num, echan->slot[i], | 192 | j, echan->ch_num, echan->slot[i], |
157 | edesc->pset[j].opt, | 193 | edesc->pset[j].param.opt, |
158 | edesc->pset[j].src, | 194 | edesc->pset[j].param.src, |
159 | edesc->pset[j].dst, | 195 | edesc->pset[j].param.dst, |
160 | edesc->pset[j].a_b_cnt, | 196 | edesc->pset[j].param.a_b_cnt, |
161 | edesc->pset[j].ccnt, | 197 | edesc->pset[j].param.ccnt, |
162 | edesc->pset[j].src_dst_bidx, | 198 | edesc->pset[j].param.src_dst_bidx, |
163 | edesc->pset[j].src_dst_cidx, | 199 | edesc->pset[j].param.src_dst_cidx, |
164 | edesc->pset[j].link_bcntrld); | 200 | edesc->pset[j].param.link_bcntrld); |
165 | /* Link to the previous slot if not the last set */ | 201 | /* Link to the previous slot if not the last set */ |
166 | if (i != (nslots - 1)) | 202 | if (i != (nslots - 1)) |
167 | edma_link(echan->slot[i], echan->slot[i+1]); | 203 | edma_link(echan->slot[i], echan->slot[i+1]); |
@@ -183,7 +219,8 @@ static void edma_execute(struct edma_chan *echan) | |||
183 | } | 219 | } |
184 | 220 | ||
185 | if (edesc->processed <= MAX_NR_SG) { | 221 | if (edesc->processed <= MAX_NR_SG) { |
186 | dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); | 222 | dev_dbg(dev, "first transfer starting on channel %d\n", |
223 | echan->ch_num); | ||
187 | edma_start(echan->ch_num); | 224 | edma_start(echan->ch_num); |
188 | } else { | 225 | } else { |
189 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", | 226 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", |
@@ -197,7 +234,7 @@ static void edma_execute(struct edma_chan *echan) | |||
197 | * MAX_NR_SG | 234 | * MAX_NR_SG |
198 | */ | 235 | */ |
199 | if (echan->missed) { | 236 | if (echan->missed) { |
200 | dev_dbg(dev, "missed event in execute detected\n"); | 237 | dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); |
201 | edma_clean_channel(echan->ch_num); | 238 | edma_clean_channel(echan->ch_num); |
202 | edma_stop(echan->ch_num); | 239 | edma_stop(echan->ch_num); |
203 | edma_start(echan->ch_num); | 240 | edma_start(echan->ch_num); |
@@ -242,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan, | |||
242 | return 0; | 279 | return 0; |
243 | } | 280 | } |
244 | 281 | ||
282 | static int edma_dma_pause(struct edma_chan *echan) | ||
283 | { | ||
284 | /* Pause/Resume only allowed with cyclic mode */ | ||
285 | if (!echan->edesc->cyclic) | ||
286 | return -EINVAL; | ||
287 | |||
288 | edma_pause(echan->ch_num); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | static int edma_dma_resume(struct edma_chan *echan) | ||
293 | { | ||
294 | /* Pause/Resume only allowed with cyclic mode */ | ||
295 | if (!echan->edesc->cyclic) | ||
296 | return -EINVAL; | ||
297 | |||
298 | edma_resume(echan->ch_num); | ||
299 | return 0; | ||
300 | } | ||
301 | |||
245 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 302 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
246 | unsigned long arg) | 303 | unsigned long arg) |
247 | { | 304 | { |
@@ -257,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
257 | config = (struct dma_slave_config *)arg; | 314 | config = (struct dma_slave_config *)arg; |
258 | ret = edma_slave_config(echan, config); | 315 | ret = edma_slave_config(echan, config); |
259 | break; | 316 | break; |
317 | case DMA_PAUSE: | ||
318 | ret = edma_dma_pause(echan); | ||
319 | break; | ||
320 | |||
321 | case DMA_RESUME: | ||
322 | ret = edma_dma_resume(echan); | ||
323 | break; | ||
324 | |||
260 | default: | 325 | default: |
261 | ret = -ENOSYS; | 326 | ret = -ENOSYS; |
262 | } | 327 | } |
@@ -275,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
275 | * @dma_length: Total length of the DMA transfer | 340 | * @dma_length: Total length of the DMA transfer |
276 | * @direction: Direction of the transfer | 341 | * @direction: Direction of the transfer |
277 | */ | 342 | */ |
278 | static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | 343 | static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, |
279 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, | 344 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, |
280 | enum dma_slave_buswidth dev_width, unsigned int dma_length, | 345 | enum dma_slave_buswidth dev_width, unsigned int dma_length, |
281 | enum dma_transfer_direction direction) | 346 | enum dma_transfer_direction direction) |
282 | { | 347 | { |
283 | struct edma_chan *echan = to_edma_chan(chan); | 348 | struct edma_chan *echan = to_edma_chan(chan); |
284 | struct device *dev = chan->device->dev; | 349 | struct device *dev = chan->device->dev; |
350 | struct edmacc_param *param = &epset->param; | ||
285 | int acnt, bcnt, ccnt, cidx; | 351 | int acnt, bcnt, ccnt, cidx; |
286 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | 352 | int src_bidx, dst_bidx, src_cidx, dst_cidx; |
287 | int absync; | 353 | int absync; |
288 | 354 | ||
289 | acnt = dev_width; | 355 | acnt = dev_width; |
356 | |||
357 | /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ | ||
358 | if (!burst) | ||
359 | burst = 1; | ||
290 | /* | 360 | /* |
291 | * If the maxburst is equal to the fifo width, use | 361 | * If the maxburst is equal to the fifo width, use |
292 | * A-synced transfers. This allows for large contiguous | 362 | * A-synced transfers. This allows for large contiguous |
@@ -337,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, | |||
337 | cidx = acnt * bcnt; | 407 | cidx = acnt * bcnt; |
338 | } | 408 | } |
339 | 409 | ||
410 | epset->len = dma_length; | ||
411 | |||
340 | if (direction == DMA_MEM_TO_DEV) { | 412 | if (direction == DMA_MEM_TO_DEV) { |
341 | src_bidx = acnt; | 413 | src_bidx = acnt; |
342 | src_cidx = cidx; | 414 | src_cidx = cidx; |
343 | dst_bidx = 0; | 415 | dst_bidx = 0; |
344 | dst_cidx = 0; | 416 | dst_cidx = 0; |
417 | epset->addr = src_addr; | ||
345 | } else if (direction == DMA_DEV_TO_MEM) { | 418 | } else if (direction == DMA_DEV_TO_MEM) { |
346 | src_bidx = 0; | 419 | src_bidx = 0; |
347 | src_cidx = 0; | 420 | src_cidx = 0; |
348 | dst_bidx = acnt; | 421 | dst_bidx = acnt; |
349 | dst_cidx = cidx; | 422 | dst_cidx = cidx; |
423 | epset->addr = dst_addr; | ||
424 | } else if (direction == DMA_MEM_TO_MEM) { | ||
425 | src_bidx = acnt; | ||
426 | src_cidx = cidx; | ||
427 | dst_bidx = acnt; | ||
428 | dst_cidx = cidx; | ||
350 | } else { | 429 | } else { |
351 | dev_err(dev, "%s: direction not implemented yet\n", __func__); | 430 | dev_err(dev, "%s: direction not implemented yet\n", __func__); |
352 | return -EINVAL; | 431 | return -EINVAL; |
353 | } | 432 | } |
354 | 433 | ||
355 | pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | 434 | param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); |
356 | /* Configure A or AB synchronized transfers */ | 435 | /* Configure A or AB synchronized transfers */ |
357 | if (absync) | 436 | if (absync) |
358 | pset->opt |= SYNCDIM; | 437 | param->opt |= SYNCDIM; |
359 | 438 | ||
360 | pset->src = src_addr; | 439 | param->src = src_addr; |
361 | pset->dst = dst_addr; | 440 | param->dst = dst_addr; |
362 | 441 | ||
363 | pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; | 442 | param->src_dst_bidx = (dst_bidx << 16) | src_bidx; |
364 | pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; | 443 | param->src_dst_cidx = (dst_cidx << 16) | src_cidx; |
365 | 444 | ||
366 | pset->a_b_cnt = bcnt << 16 | acnt; | 445 | param->a_b_cnt = bcnt << 16 | acnt; |
367 | pset->ccnt = ccnt; | 446 | param->ccnt = ccnt; |
368 | /* | 447 | /* |
369 | * Only time when (bcntrld) auto reload is required is for | 448 | * Only time when (bcntrld) auto reload is required is for |
370 | * A-sync case, and in this case, a requirement of reload value | 449 | * A-sync case, and in this case, a requirement of reload value |
371 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL | 450 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL |
372 | * and then later will be populated by edma_execute. | 451 | * and then later will be populated by edma_execute. |
373 | */ | 452 | */ |
374 | pset->link_bcntrld = 0xffffffff; | 453 | param->link_bcntrld = 0xffffffff; |
375 | return absync; | 454 | return absync; |
376 | } | 455 | } |
377 | 456 | ||
@@ -401,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
401 | dev_width = echan->cfg.dst_addr_width; | 480 | dev_width = echan->cfg.dst_addr_width; |
402 | burst = echan->cfg.dst_maxburst; | 481 | burst = echan->cfg.dst_maxburst; |
403 | } else { | 482 | } else { |
404 | dev_err(dev, "%s: bad direction?\n", __func__); | 483 | dev_err(dev, "%s: bad direction: %d\n", __func__, direction); |
405 | return NULL; | 484 | return NULL; |
406 | } | 485 | } |
407 | 486 | ||
408 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | 487 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
409 | dev_err(dev, "Undefined slave buswidth\n"); | 488 | dev_err(dev, "%s: Undefined slave buswidth\n", __func__); |
410 | return NULL; | 489 | return NULL; |
411 | } | 490 | } |
412 | 491 | ||
413 | edesc = kzalloc(sizeof(*edesc) + sg_len * | 492 | edesc = kzalloc(sizeof(*edesc) + sg_len * |
414 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 493 | sizeof(edesc->pset[0]), GFP_ATOMIC); |
415 | if (!edesc) { | 494 | if (!edesc) { |
416 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | 495 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
417 | return NULL; | 496 | return NULL; |
418 | } | 497 | } |
419 | 498 | ||
420 | edesc->pset_nr = sg_len; | 499 | edesc->pset_nr = sg_len; |
500 | edesc->residue = 0; | ||
501 | edesc->direction = direction; | ||
502 | edesc->echan = echan; | ||
421 | 503 | ||
422 | /* Allocate a PaRAM slot, if needed */ | 504 | /* Allocate a PaRAM slot, if needed */ |
423 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); | 505 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); |
@@ -429,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
429 | EDMA_SLOT_ANY); | 511 | EDMA_SLOT_ANY); |
430 | if (echan->slot[i] < 0) { | 512 | if (echan->slot[i] < 0) { |
431 | kfree(edesc); | 513 | kfree(edesc); |
432 | dev_err(dev, "Failed to allocate slot\n"); | 514 | dev_err(dev, "%s: Failed to allocate slot\n", |
515 | __func__); | ||
433 | return NULL; | 516 | return NULL; |
434 | } | 517 | } |
435 | } | 518 | } |
@@ -452,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
452 | } | 535 | } |
453 | 536 | ||
454 | edesc->absync = ret; | 537 | edesc->absync = ret; |
538 | edesc->residue += sg_dma_len(sg); | ||
455 | 539 | ||
456 | /* If this is the last in a current SG set of transactions, | 540 | /* If this is the last in a current SG set of transactions, |
457 | enable interrupts so that next set is processed */ | 541 | enable interrupts so that next set is processed */ |
458 | if (!((i+1) % MAX_NR_SG)) | 542 | if (!((i+1) % MAX_NR_SG)) |
459 | edesc->pset[i].opt |= TCINTEN; | 543 | edesc->pset[i].param.opt |= TCINTEN; |
460 | 544 | ||
461 | /* If this is the last set, enable completion interrupt flag */ | 545 | /* If this is the last set, enable completion interrupt flag */ |
462 | if (i == sg_len - 1) | 546 | if (i == sg_len - 1) |
463 | edesc->pset[i].opt |= TCINTEN; | 547 | edesc->pset[i].param.opt |= TCINTEN; |
464 | } | 548 | } |
549 | edesc->residue_stat = edesc->residue; | ||
550 | |||
551 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | ||
552 | } | ||
553 | |||
554 | struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | ||
555 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
556 | size_t len, unsigned long tx_flags) | ||
557 | { | ||
558 | int ret; | ||
559 | struct edma_desc *edesc; | ||
560 | struct device *dev = chan->device->dev; | ||
561 | struct edma_chan *echan = to_edma_chan(chan); | ||
562 | |||
563 | if (unlikely(!echan || !len)) | ||
564 | return NULL; | ||
565 | |||
566 | edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
567 | if (!edesc) { | ||
568 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
569 | return NULL; | ||
570 | } | ||
571 | |||
572 | edesc->pset_nr = 1; | ||
573 | |||
574 | ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, | ||
575 | DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM); | ||
576 | if (ret < 0) | ||
577 | return NULL; | ||
578 | |||
579 | edesc->absync = ret; | ||
580 | |||
581 | /* | ||
582 | * Enable intermediate transfer chaining to re-trigger channel | ||
583 | * on completion of every TR, and enable transfer-completion | ||
584 | * interrupt on completion of the whole transfer. | ||
585 | */ | ||
586 | edesc->pset[0].param.opt |= ITCCHEN; | ||
587 | edesc->pset[0].param.opt |= TCINTEN; | ||
465 | 588 | ||
466 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 589 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
467 | } | 590 | } |
@@ -493,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
493 | dev_width = echan->cfg.dst_addr_width; | 616 | dev_width = echan->cfg.dst_addr_width; |
494 | burst = echan->cfg.dst_maxburst; | 617 | burst = echan->cfg.dst_maxburst; |
495 | } else { | 618 | } else { |
496 | dev_err(dev, "%s: bad direction?\n", __func__); | 619 | dev_err(dev, "%s: bad direction: %d\n", __func__, direction); |
497 | return NULL; | 620 | return NULL; |
498 | } | 621 | } |
499 | 622 | ||
500 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | 623 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
501 | dev_err(dev, "Undefined slave buswidth\n"); | 624 | dev_err(dev, "%s: Undefined slave buswidth\n", __func__); |
502 | return NULL; | 625 | return NULL; |
503 | } | 626 | } |
504 | 627 | ||
@@ -523,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
523 | edesc = kzalloc(sizeof(*edesc) + nslots * | 646 | edesc = kzalloc(sizeof(*edesc) + nslots * |
524 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 647 | sizeof(edesc->pset[0]), GFP_ATOMIC); |
525 | if (!edesc) { | 648 | if (!edesc) { |
526 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | 649 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
527 | return NULL; | 650 | return NULL; |
528 | } | 651 | } |
529 | 652 | ||
530 | edesc->cyclic = 1; | 653 | edesc->cyclic = 1; |
531 | edesc->pset_nr = nslots; | 654 | edesc->pset_nr = nslots; |
655 | edesc->residue = edesc->residue_stat = buf_len; | ||
656 | edesc->direction = direction; | ||
657 | edesc->echan = echan; | ||
532 | 658 | ||
533 | dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); | 659 | dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", |
534 | dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); | 660 | __func__, echan->ch_num, nslots, period_len, buf_len); |
535 | dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); | ||
536 | 661 | ||
537 | for (i = 0; i < nslots; i++) { | 662 | for (i = 0; i < nslots; i++) { |
538 | /* Allocate a PaRAM slot, if needed */ | 663 | /* Allocate a PaRAM slot, if needed */ |
@@ -542,7 +667,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
542 | EDMA_SLOT_ANY); | 667 | EDMA_SLOT_ANY); |
543 | if (echan->slot[i] < 0) { | 668 | if (echan->slot[i] < 0) { |
544 | kfree(edesc); | 669 | kfree(edesc); |
545 | dev_err(dev, "Failed to allocate slot\n"); | 670 | dev_err(dev, "%s: Failed to allocate slot\n", |
671 | __func__); | ||
546 | return NULL; | 672 | return NULL; |
547 | } | 673 | } |
548 | } | 674 | } |
@@ -566,8 +692,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
566 | else | 692 | else |
567 | src_addr += period_len; | 693 | src_addr += period_len; |
568 | 694 | ||
569 | dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); | 695 | dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i); |
570 | dev_dbg(dev, | 696 | dev_vdbg(dev, |
571 | "\n pset[%d]:\n" | 697 | "\n pset[%d]:\n" |
572 | " chnum\t%d\n" | 698 | " chnum\t%d\n" |
573 | " slot\t%d\n" | 699 | " slot\t%d\n" |
@@ -580,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
580 | " cidx\t%08x\n" | 706 | " cidx\t%08x\n" |
581 | " lkrld\t%08x\n", | 707 | " lkrld\t%08x\n", |
582 | i, echan->ch_num, echan->slot[i], | 708 | i, echan->ch_num, echan->slot[i], |
583 | edesc->pset[i].opt, | 709 | edesc->pset[i].param.opt, |
584 | edesc->pset[i].src, | 710 | edesc->pset[i].param.src, |
585 | edesc->pset[i].dst, | 711 | edesc->pset[i].param.dst, |
586 | edesc->pset[i].a_b_cnt, | 712 | edesc->pset[i].param.a_b_cnt, |
587 | edesc->pset[i].ccnt, | 713 | edesc->pset[i].param.ccnt, |
588 | edesc->pset[i].src_dst_bidx, | 714 | edesc->pset[i].param.src_dst_bidx, |
589 | edesc->pset[i].src_dst_cidx, | 715 | edesc->pset[i].param.src_dst_cidx, |
590 | edesc->pset[i].link_bcntrld); | 716 | edesc->pset[i].param.link_bcntrld); |
591 | 717 | ||
592 | edesc->absync = ret; | 718 | edesc->absync = ret; |
593 | 719 | ||
@@ -595,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
595 | * Enable interrupts for every period because callback | 721 | * Enable interrupts for every period because callback |
596 | * has to be called for every period. | 722 | * has to be called for every period. |
597 | */ | 723 | */ |
598 | edesc->pset[i].opt |= TCINTEN; | 724 | edesc->pset[i].param.opt |= TCINTEN; |
599 | } | 725 | } |
600 | 726 | ||
601 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 727 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
@@ -606,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
606 | struct edma_chan *echan = data; | 732 | struct edma_chan *echan = data; |
607 | struct device *dev = echan->vchan.chan.device->dev; | 733 | struct device *dev = echan->vchan.chan.device->dev; |
608 | struct edma_desc *edesc; | 734 | struct edma_desc *edesc; |
609 | unsigned long flags; | ||
610 | struct edmacc_param p; | 735 | struct edmacc_param p; |
611 | 736 | ||
612 | edesc = echan->edesc; | 737 | edesc = echan->edesc; |
@@ -617,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
617 | 742 | ||
618 | switch (ch_status) { | 743 | switch (ch_status) { |
619 | case EDMA_DMA_COMPLETE: | 744 | case EDMA_DMA_COMPLETE: |
620 | spin_lock_irqsave(&echan->vchan.lock, flags); | 745 | spin_lock(&echan->vchan.lock); |
621 | 746 | ||
622 | if (edesc) { | 747 | if (edesc) { |
623 | if (edesc->cyclic) { | 748 | if (edesc->cyclic) { |
624 | vchan_cyclic_callback(&edesc->vdesc); | 749 | vchan_cyclic_callback(&edesc->vdesc); |
625 | } else if (edesc->processed == edesc->pset_nr) { | 750 | } else if (edesc->processed == edesc->pset_nr) { |
626 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | 751 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); |
752 | edesc->residue = 0; | ||
627 | edma_stop(echan->ch_num); | 753 | edma_stop(echan->ch_num); |
628 | vchan_cookie_complete(&edesc->vdesc); | 754 | vchan_cookie_complete(&edesc->vdesc); |
629 | edma_execute(echan); | 755 | edma_execute(echan); |
630 | } else { | 756 | } else { |
631 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | 757 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); |
758 | |||
759 | /* Update statistics for tx_status */ | ||
760 | edesc->residue -= edesc->sg_len; | ||
761 | edesc->residue_stat = edesc->residue; | ||
762 | edesc->processed_stat = edesc->processed; | ||
763 | |||
632 | edma_execute(echan); | 764 | edma_execute(echan); |
633 | } | 765 | } |
634 | } | 766 | } |
635 | 767 | ||
636 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 768 | spin_unlock(&echan->vchan.lock); |
637 | 769 | ||
638 | break; | 770 | break; |
639 | case EDMA_DMA_CC_ERROR: | 771 | case EDMA_DMA_CC_ERROR: |
640 | spin_lock_irqsave(&echan->vchan.lock, flags); | 772 | spin_lock(&echan->vchan.lock); |
641 | 773 | ||
642 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | 774 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); |
643 | 775 | ||
@@ -668,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | |||
668 | edma_trigger_channel(echan->ch_num); | 800 | edma_trigger_channel(echan->ch_num); |
669 | } | 801 | } |
670 | 802 | ||
671 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 803 | spin_unlock(&echan->vchan.lock); |
672 | 804 | ||
673 | break; | 805 | break; |
674 | default: | 806 | default: |
@@ -704,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) | |||
704 | echan->alloced = true; | 836 | echan->alloced = true; |
705 | echan->slot[0] = echan->ch_num; | 837 | echan->slot[0] = echan->ch_num; |
706 | 838 | ||
707 | dev_dbg(dev, "allocated channel for %u:%u\n", | 839 | dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, |
708 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | 840 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); |
709 | 841 | ||
710 | return 0; | 842 | return 0; |
@@ -756,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan) | |||
756 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 888 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
757 | } | 889 | } |
758 | 890 | ||
759 | static size_t edma_desc_size(struct edma_desc *edesc) | 891 | static u32 edma_residue(struct edma_desc *edesc) |
760 | { | 892 | { |
893 | bool dst = edesc->direction == DMA_DEV_TO_MEM; | ||
894 | struct edma_pset *pset = edesc->pset; | ||
895 | dma_addr_t done, pos; | ||
761 | int i; | 896 | int i; |
762 | size_t size; | 897 | |
763 | 898 | /* | |
764 | if (edesc->absync) | 899 | * We always read the dst/src position from the first RamPar |
765 | for (size = i = 0; i < edesc->pset_nr; i++) | 900 | * pset. That's the one which is active now. |
766 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | 901 | */ |
767 | (edesc->pset[i].a_b_cnt >> 16) * | 902 | pos = edma_get_position(edesc->echan->slot[0], dst); |
768 | edesc->pset[i].ccnt; | 903 | |
769 | else | 904 | /* |
770 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | 905 | * Cyclic is simple. Just subtract pset[0].addr from pos. |
771 | (edesc->pset[0].a_b_cnt >> 16) + | 906 | * |
772 | (edesc->pset[0].a_b_cnt & 0xffff) * | 907 | * We never update edesc->residue in the cyclic case, so we |
773 | (SZ_64K - 1) * edesc->pset[0].ccnt; | 908 | * can tell the remaining room to the end of the circular |
774 | 909 | * buffer. | |
775 | return size; | 910 | */ |
911 | if (edesc->cyclic) { | ||
912 | done = pos - pset->addr; | ||
913 | edesc->residue_stat = edesc->residue - done; | ||
914 | return edesc->residue_stat; | ||
915 | } | ||
916 | |||
917 | /* | ||
918 | * For SG operation we catch up with the last processed | ||
919 | * status. | ||
920 | */ | ||
921 | pset += edesc->processed_stat; | ||
922 | |||
923 | for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { | ||
924 | /* | ||
925 | * If we are inside this pset address range, we know | ||
926 | * this is the active one. Get the current delta and | ||
927 | * stop walking the psets. | ||
928 | */ | ||
929 | if (pos >= pset->addr && pos < pset->addr + pset->len) | ||
930 | return edesc->residue_stat - (pos - pset->addr); | ||
931 | |||
932 | /* Otherwise mark it done and update residue_stat. */ | ||
933 | edesc->processed_stat++; | ||
934 | edesc->residue_stat -= pset->len; | ||
935 | } | ||
936 | return edesc->residue_stat; | ||
776 | } | 937 | } |
777 | 938 | ||
778 | /* Check request completion status */ | 939 | /* Check request completion status */ |
@@ -790,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
790 | return ret; | 951 | return ret; |
791 | 952 | ||
792 | spin_lock_irqsave(&echan->vchan.lock, flags); | 953 | spin_lock_irqsave(&echan->vchan.lock, flags); |
793 | vdesc = vchan_find_desc(&echan->vchan, cookie); | 954 | if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) |
794 | if (vdesc) { | 955 | txstate->residue = edma_residue(echan->edesc); |
795 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | 956 | else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) |
796 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | 957 | txstate->residue = to_edma_desc(&vdesc->tx)->residue; |
797 | struct edma_desc *edesc = echan->edesc; | ||
798 | txstate->residue = edma_desc_size(edesc); | ||
799 | } | ||
800 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | 958 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
801 | 959 | ||
802 | return ret; | 960 | return ret; |
@@ -822,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc, | |||
822 | } | 980 | } |
823 | } | 981 | } |
824 | 982 | ||
983 | #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
984 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
985 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
986 | |||
987 | static int edma_dma_device_slave_caps(struct dma_chan *dchan, | ||
988 | struct dma_slave_caps *caps) | ||
989 | { | ||
990 | caps->src_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
991 | caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
992 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
993 | caps->cmd_pause = true; | ||
994 | caps->cmd_terminate = true; | ||
995 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
996 | |||
997 | return 0; | ||
998 | } | ||
999 | |||
825 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | 1000 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, |
826 | struct device *dev) | 1001 | struct device *dev) |
827 | { | 1002 | { |
828 | dma->device_prep_slave_sg = edma_prep_slave_sg; | 1003 | dma->device_prep_slave_sg = edma_prep_slave_sg; |
829 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; | 1004 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; |
1005 | dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; | ||
830 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | 1006 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; |
831 | dma->device_free_chan_resources = edma_free_chan_resources; | 1007 | dma->device_free_chan_resources = edma_free_chan_resources; |
832 | dma->device_issue_pending = edma_issue_pending; | 1008 | dma->device_issue_pending = edma_issue_pending; |
833 | dma->device_tx_status = edma_tx_status; | 1009 | dma->device_tx_status = edma_tx_status; |
834 | dma->device_control = edma_control; | 1010 | dma->device_control = edma_control; |
1011 | dma->device_slave_caps = edma_dma_device_slave_caps; | ||
835 | dma->dev = dev; | 1012 | dma->dev = dev; |
836 | 1013 | ||
1014 | /* | ||
1015 | * code using dma memcpy must make sure alignment of | ||
1016 | * length is at dma->copy_align boundary. | ||
1017 | */ | ||
1018 | dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1019 | |||
837 | INIT_LIST_HEAD(&dma->channels); | 1020 | INIT_LIST_HEAD(&dma->channels); |
838 | } | 1021 | } |
839 | 1022 | ||
@@ -861,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev) | |||
861 | 1044 | ||
862 | dma_cap_zero(ecc->dma_slave.cap_mask); | 1045 | dma_cap_zero(ecc->dma_slave.cap_mask); |
863 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | 1046 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); |
1047 | dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); | ||
1048 | dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); | ||
864 | 1049 | ||
865 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | 1050 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); |
866 | 1051 | ||
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c index b59a17fb7c3e..ff7138fd66d1 100644 --- a/drivers/memory/mvebu-devbus.c +++ b/drivers/memory/mvebu-devbus.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Marvell EBU SoC Device Bus Controller | 2 | * Marvell EBU SoC Device Bus Controller |
3 | * (memory controller for NOR/NAND/SRAM/FPGA devices) | 3 | * (memory controller for NOR/NAND/SRAM/FPGA devices) |
4 | * | 4 | * |
5 | * Copyright (C) 2013 Marvell | 5 | * Copyright (C) 2013-2014 Marvell |
6 | * | 6 | * |
7 | * This program is free software: you can redistribute it and/or modify | 7 | * This program is free software: you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -30,19 +30,47 @@ | |||
30 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
31 | 31 | ||
32 | /* Register definitions */ | 32 | /* Register definitions */ |
33 | #define DEV_WIDTH_BIT 30 | 33 | #define ARMADA_DEV_WIDTH_SHIFT 30 |
34 | #define BADR_SKEW_BIT 28 | 34 | #define ARMADA_BADR_SKEW_SHIFT 28 |
35 | #define RD_HOLD_BIT 23 | 35 | #define ARMADA_RD_HOLD_SHIFT 23 |
36 | #define ACC_NEXT_BIT 17 | 36 | #define ARMADA_ACC_NEXT_SHIFT 17 |
37 | #define RD_SETUP_BIT 12 | 37 | #define ARMADA_RD_SETUP_SHIFT 12 |
38 | #define ACC_FIRST_BIT 6 | 38 | #define ARMADA_ACC_FIRST_SHIFT 6 |
39 | 39 | ||
40 | #define SYNC_ENABLE_BIT 24 | 40 | #define ARMADA_SYNC_ENABLE_SHIFT 24 |
41 | #define WR_HIGH_BIT 16 | 41 | #define ARMADA_WR_HIGH_SHIFT 16 |
42 | #define WR_LOW_BIT 8 | 42 | #define ARMADA_WR_LOW_SHIFT 8 |
43 | 43 | ||
44 | #define READ_PARAM_OFFSET 0x0 | 44 | #define ARMADA_READ_PARAM_OFFSET 0x0 |
45 | #define WRITE_PARAM_OFFSET 0x4 | 45 | #define ARMADA_WRITE_PARAM_OFFSET 0x4 |
46 | |||
47 | #define ORION_RESERVED (0x2 << 30) | ||
48 | #define ORION_BADR_SKEW_SHIFT 28 | ||
49 | #define ORION_WR_HIGH_EXT_BIT BIT(27) | ||
50 | #define ORION_WR_HIGH_EXT_MASK 0x8 | ||
51 | #define ORION_WR_LOW_EXT_BIT BIT(26) | ||
52 | #define ORION_WR_LOW_EXT_MASK 0x8 | ||
53 | #define ORION_ALE_WR_EXT_BIT BIT(25) | ||
54 | #define ORION_ALE_WR_EXT_MASK 0x8 | ||
55 | #define ORION_ACC_NEXT_EXT_BIT BIT(24) | ||
56 | #define ORION_ACC_NEXT_EXT_MASK 0x10 | ||
57 | #define ORION_ACC_FIRST_EXT_BIT BIT(23) | ||
58 | #define ORION_ACC_FIRST_EXT_MASK 0x10 | ||
59 | #define ORION_TURN_OFF_EXT_BIT BIT(22) | ||
60 | #define ORION_TURN_OFF_EXT_MASK 0x8 | ||
61 | #define ORION_DEV_WIDTH_SHIFT 20 | ||
62 | #define ORION_WR_HIGH_SHIFT 17 | ||
63 | #define ORION_WR_HIGH_MASK 0x7 | ||
64 | #define ORION_WR_LOW_SHIFT 14 | ||
65 | #define ORION_WR_LOW_MASK 0x7 | ||
66 | #define ORION_ALE_WR_SHIFT 11 | ||
67 | #define ORION_ALE_WR_MASK 0x7 | ||
68 | #define ORION_ACC_NEXT_SHIFT 7 | ||
69 | #define ORION_ACC_NEXT_MASK 0xF | ||
70 | #define ORION_ACC_FIRST_SHIFT 3 | ||
71 | #define ORION_ACC_FIRST_MASK 0xF | ||
72 | #define ORION_TURN_OFF_SHIFT 0 | ||
73 | #define ORION_TURN_OFF_MASK 0x7 | ||
46 | 74 | ||
47 | struct devbus_read_params { | 75 | struct devbus_read_params { |
48 | u32 bus_width; | 76 | u32 bus_width; |
@@ -89,19 +117,14 @@ static int get_timing_param_ps(struct devbus *devbus, | |||
89 | return 0; | 117 | return 0; |
90 | } | 118 | } |
91 | 119 | ||
92 | static int devbus_set_timing_params(struct devbus *devbus, | 120 | static int devbus_get_timing_params(struct devbus *devbus, |
93 | struct device_node *node) | 121 | struct device_node *node, |
122 | struct devbus_read_params *r, | ||
123 | struct devbus_write_params *w) | ||
94 | { | 124 | { |
95 | struct devbus_read_params r; | ||
96 | struct devbus_write_params w; | ||
97 | u32 value; | ||
98 | int err; | 125 | int err; |
99 | 126 | ||
100 | dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n", | 127 | err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width); |
101 | devbus->tick_ps); | ||
102 | |||
103 | /* Get read timings */ | ||
104 | err = of_property_read_u32(node, "devbus,bus-width", &r.bus_width); | ||
105 | if (err < 0) { | 128 | if (err < 0) { |
106 | dev_err(devbus->dev, | 129 | dev_err(devbus->dev, |
107 | "%s has no 'devbus,bus-width' property\n", | 130 | "%s has no 'devbus,bus-width' property\n", |
@@ -113,104 +136,148 @@ static int devbus_set_timing_params(struct devbus *devbus, | |||
113 | * The bus width is encoded into the register as 0 for 8 bits, | 136 | * The bus width is encoded into the register as 0 for 8 bits, |
114 | * and 1 for 16 bits, so we do the necessary conversion here. | 137 | * and 1 for 16 bits, so we do the necessary conversion here. |
115 | */ | 138 | */ |
116 | if (r.bus_width == 8) | 139 | if (r->bus_width == 8) |
117 | r.bus_width = 0; | 140 | r->bus_width = 0; |
118 | else if (r.bus_width == 16) | 141 | else if (r->bus_width == 16) |
119 | r.bus_width = 1; | 142 | r->bus_width = 1; |
120 | else { | 143 | else { |
121 | dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width); | 144 | dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width); |
122 | return -EINVAL; | 145 | return -EINVAL; |
123 | } | 146 | } |
124 | 147 | ||
125 | err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", | 148 | err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", |
126 | &r.badr_skew); | 149 | &r->badr_skew); |
127 | if (err < 0) | 150 | if (err < 0) |
128 | return err; | 151 | return err; |
129 | 152 | ||
130 | err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps", | 153 | err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps", |
131 | &r.turn_off); | 154 | &r->turn_off); |
132 | if (err < 0) | 155 | if (err < 0) |
133 | return err; | 156 | return err; |
134 | 157 | ||
135 | err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps", | 158 | err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps", |
136 | &r.acc_first); | 159 | &r->acc_first); |
137 | if (err < 0) | 160 | if (err < 0) |
138 | return err; | 161 | return err; |
139 | 162 | ||
140 | err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps", | 163 | err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps", |
141 | &r.acc_next); | 164 | &r->acc_next); |
142 | if (err < 0) | ||
143 | return err; | ||
144 | |||
145 | err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps", | ||
146 | &r.rd_setup); | ||
147 | if (err < 0) | 165 | if (err < 0) |
148 | return err; | 166 | return err; |
149 | 167 | ||
150 | err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps", | 168 | if (of_device_is_compatible(devbus->dev->of_node, "marvell,mvebu-devbus")) { |
151 | &r.rd_hold); | 169 | err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps", |
152 | if (err < 0) | 170 | &r->rd_setup); |
153 | return err; | 171 | if (err < 0) |
154 | 172 | return err; | |
155 | /* Get write timings */ | 173 | |
156 | err = of_property_read_u32(node, "devbus,sync-enable", | 174 | err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps", |
157 | &w.sync_enable); | 175 | &r->rd_hold); |
158 | if (err < 0) { | 176 | if (err < 0) |
159 | dev_err(devbus->dev, | 177 | return err; |
160 | "%s has no 'devbus,sync-enable' property\n", | 178 | |
161 | node->full_name); | 179 | err = of_property_read_u32(node, "devbus,sync-enable", |
162 | return err; | 180 | &w->sync_enable); |
181 | if (err < 0) { | ||
182 | dev_err(devbus->dev, | ||
183 | "%s has no 'devbus,sync-enable' property\n", | ||
184 | node->full_name); | ||
185 | return err; | ||
186 | } | ||
163 | } | 187 | } |
164 | 188 | ||
165 | err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps", | 189 | err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps", |
166 | &w.ale_wr); | 190 | &w->ale_wr); |
167 | if (err < 0) | 191 | if (err < 0) |
168 | return err; | 192 | return err; |
169 | 193 | ||
170 | err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps", | 194 | err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps", |
171 | &w.wr_low); | 195 | &w->wr_low); |
172 | if (err < 0) | 196 | if (err < 0) |
173 | return err; | 197 | return err; |
174 | 198 | ||
175 | err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps", | 199 | err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps", |
176 | &w.wr_high); | 200 | &w->wr_high); |
177 | if (err < 0) | 201 | if (err < 0) |
178 | return err; | 202 | return err; |
179 | 203 | ||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static void devbus_orion_set_timing_params(struct devbus *devbus, | ||
208 | struct device_node *node, | ||
209 | struct devbus_read_params *r, | ||
210 | struct devbus_write_params *w) | ||
211 | { | ||
212 | u32 value; | ||
213 | |||
214 | /* | ||
215 | * The hardware designers found it would be a good idea to | ||
216 | * split most of the values in the register into two fields: | ||
217 | * one containing all the low-order bits, and another one | ||
218 | * containing just the high-order bit. For all of those | ||
219 | * fields, we have to split the value into these two parts. | ||
220 | */ | ||
221 | value = (r->turn_off & ORION_TURN_OFF_MASK) << ORION_TURN_OFF_SHIFT | | ||
222 | (r->acc_first & ORION_ACC_FIRST_MASK) << ORION_ACC_FIRST_SHIFT | | ||
223 | (r->acc_next & ORION_ACC_NEXT_MASK) << ORION_ACC_NEXT_SHIFT | | ||
224 | (w->ale_wr & ORION_ALE_WR_MASK) << ORION_ALE_WR_SHIFT | | ||
225 | (w->wr_low & ORION_WR_LOW_MASK) << ORION_WR_LOW_SHIFT | | ||
226 | (w->wr_high & ORION_WR_HIGH_MASK) << ORION_WR_HIGH_SHIFT | | ||
227 | r->bus_width << ORION_DEV_WIDTH_SHIFT | | ||
228 | ((r->turn_off & ORION_TURN_OFF_EXT_MASK) ? ORION_TURN_OFF_EXT_BIT : 0) | | ||
229 | ((r->acc_first & ORION_ACC_FIRST_EXT_MASK) ? ORION_ACC_FIRST_EXT_BIT : 0) | | ||
230 | ((r->acc_next & ORION_ACC_NEXT_EXT_MASK) ? ORION_ACC_NEXT_EXT_BIT : 0) | | ||
231 | ((w->ale_wr & ORION_ALE_WR_EXT_MASK) ? ORION_ALE_WR_EXT_BIT : 0) | | ||
232 | ((w->wr_low & ORION_WR_LOW_EXT_MASK) ? ORION_WR_LOW_EXT_BIT : 0) | | ||
233 | ((w->wr_high & ORION_WR_HIGH_EXT_MASK) ? ORION_WR_HIGH_EXT_BIT : 0) | | ||
234 | (r->badr_skew << ORION_BADR_SKEW_SHIFT) | | ||
235 | ORION_RESERVED; | ||
236 | |||
237 | writel(value, devbus->base); | ||
238 | } | ||
239 | |||
240 | static void devbus_armada_set_timing_params(struct devbus *devbus, | ||
241 | struct device_node *node, | ||
242 | struct devbus_read_params *r, | ||
243 | struct devbus_write_params *w) | ||
244 | { | ||
245 | u32 value; | ||
246 | |||
180 | /* Set read timings */ | 247 | /* Set read timings */ |
181 | value = r.bus_width << DEV_WIDTH_BIT | | 248 | value = r->bus_width << ARMADA_DEV_WIDTH_SHIFT | |
182 | r.badr_skew << BADR_SKEW_BIT | | 249 | r->badr_skew << ARMADA_BADR_SKEW_SHIFT | |
183 | r.rd_hold << RD_HOLD_BIT | | 250 | r->rd_hold << ARMADA_RD_HOLD_SHIFT | |
184 | r.acc_next << ACC_NEXT_BIT | | 251 | r->acc_next << ARMADA_ACC_NEXT_SHIFT | |
185 | r.rd_setup << RD_SETUP_BIT | | 252 | r->rd_setup << ARMADA_RD_SETUP_SHIFT | |
186 | r.acc_first << ACC_FIRST_BIT | | 253 | r->acc_first << ARMADA_ACC_FIRST_SHIFT | |
187 | r.turn_off; | 254 | r->turn_off; |
188 | 255 | ||
189 | dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n", | 256 | dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n", |
190 | devbus->base + READ_PARAM_OFFSET, | 257 | devbus->base + ARMADA_READ_PARAM_OFFSET, |
191 | value); | 258 | value); |
192 | 259 | ||
193 | writel(value, devbus->base + READ_PARAM_OFFSET); | 260 | writel(value, devbus->base + ARMADA_READ_PARAM_OFFSET); |
194 | 261 | ||
195 | /* Set write timings */ | 262 | /* Set write timings */ |
196 | value = w.sync_enable << SYNC_ENABLE_BIT | | 263 | value = w->sync_enable << ARMADA_SYNC_ENABLE_SHIFT | |
197 | w.wr_low << WR_LOW_BIT | | 264 | w->wr_low << ARMADA_WR_LOW_SHIFT | |
198 | w.wr_high << WR_HIGH_BIT | | 265 | w->wr_high << ARMADA_WR_HIGH_SHIFT | |
199 | w.ale_wr; | 266 | w->ale_wr; |
200 | 267 | ||
201 | dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n", | 268 | dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n", |
202 | devbus->base + WRITE_PARAM_OFFSET, | 269 | devbus->base + ARMADA_WRITE_PARAM_OFFSET, |
203 | value); | 270 | value); |
204 | 271 | ||
205 | writel(value, devbus->base + WRITE_PARAM_OFFSET); | 272 | writel(value, devbus->base + ARMADA_WRITE_PARAM_OFFSET); |
206 | |||
207 | return 0; | ||
208 | } | 273 | } |
209 | 274 | ||
210 | static int mvebu_devbus_probe(struct platform_device *pdev) | 275 | static int mvebu_devbus_probe(struct platform_device *pdev) |
211 | { | 276 | { |
212 | struct device *dev = &pdev->dev; | 277 | struct device *dev = &pdev->dev; |
213 | struct device_node *node = pdev->dev.of_node; | 278 | struct device_node *node = pdev->dev.of_node; |
279 | struct devbus_read_params r; | ||
280 | struct devbus_write_params w; | ||
214 | struct devbus *devbus; | 281 | struct devbus *devbus; |
215 | struct resource *res; | 282 | struct resource *res; |
216 | struct clk *clk; | 283 | struct clk *clk; |
@@ -240,10 +307,21 @@ static int mvebu_devbus_probe(struct platform_device *pdev) | |||
240 | rate = clk_get_rate(clk) / 1000; | 307 | rate = clk_get_rate(clk) / 1000; |
241 | devbus->tick_ps = 1000000000 / rate; | 308 | devbus->tick_ps = 1000000000 / rate; |
242 | 309 | ||
243 | /* Read the device tree node and set the new timing parameters */ | 310 | dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n", |
244 | err = devbus_set_timing_params(devbus, node); | 311 | devbus->tick_ps); |
245 | if (err < 0) | 312 | |
246 | return err; | 313 | if (!of_property_read_bool(node, "devbus,keep-config")) { |
314 | /* Read the Device Tree node */ | ||
315 | err = devbus_get_timing_params(devbus, node, &r, &w); | ||
316 | if (err < 0) | ||
317 | return err; | ||
318 | |||
319 | /* Set the new timing parameters */ | ||
320 | if (of_device_is_compatible(node, "marvell,orion-devbus")) | ||
321 | devbus_orion_set_timing_params(devbus, node, &r, &w); | ||
322 | else | ||
323 | devbus_armada_set_timing_params(devbus, node, &r, &w); | ||
324 | } | ||
247 | 325 | ||
248 | /* | 326 | /* |
249 | * We need to create a child device explicitly from here to | 327 | * We need to create a child device explicitly from here to |
@@ -259,6 +337,7 @@ static int mvebu_devbus_probe(struct platform_device *pdev) | |||
259 | 337 | ||
260 | static const struct of_device_id mvebu_devbus_of_match[] = { | 338 | static const struct of_device_id mvebu_devbus_of_match[] = { |
261 | { .compatible = "marvell,mvebu-devbus" }, | 339 | { .compatible = "marvell,mvebu-devbus" }, |
340 | { .compatible = "marvell,orion-devbus" }, | ||
262 | {}, | 341 | {}, |
263 | }; | 342 | }; |
264 | MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match); | 343 | MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match); |
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 49b46e6ca959..bdcf5173e377 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig | |||
@@ -51,6 +51,13 @@ config POWER_RESET_RESTART | |||
51 | Instead they restart, and u-boot holds the SoC until the | 51 | Instead they restart, and u-boot holds the SoC until the |
52 | user presses a key. u-boot then boots into Linux. | 52 | user presses a key. u-boot then boots into Linux. |
53 | 53 | ||
54 | config POWER_RESET_SUN6I | ||
55 | bool "Allwinner A31 SoC reset driver" | ||
56 | depends on ARCH_SUNXI | ||
57 | depends on POWER_RESET | ||
58 | help | ||
59 | Reboot support for the Allwinner A31 SoCs. | ||
60 | |||
54 | config POWER_RESET_VEXPRESS | 61 | config POWER_RESET_VEXPRESS |
55 | bool "ARM Versatile Express power-off and reset driver" | 62 | bool "ARM Versatile Express power-off and reset driver" |
56 | depends on ARM || ARM64 | 63 | depends on ARM || ARM64 |
@@ -65,3 +72,11 @@ config POWER_RESET_XGENE | |||
65 | depends on POWER_RESET | 72 | depends on POWER_RESET |
66 | help | 73 | help |
67 | Reboot support for the APM SoC X-Gene Eval boards. | 74 | Reboot support for the APM SoC X-Gene Eval boards. |
75 | |||
76 | config POWER_RESET_KEYSTONE | ||
77 | bool "Keystone reset driver" | ||
78 | depends on ARCH_KEYSTONE | ||
79 | select MFD_SYSCON | ||
80 | help | ||
81 | Reboot support for the KEYSTONE SoCs. | ||
82 | |||
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index 16c0516e5a19..dde2e8bbac53 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile | |||
@@ -4,5 +4,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o | |||
4 | obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o | 4 | obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o |
5 | obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o | 5 | obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o |
6 | obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o | 6 | obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o |
7 | obj-$(CONFIG_POWER_RESET_SUN6I) += sun6i-reboot.o | ||
7 | obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o | 8 | obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o |
8 | obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o | 9 | obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o |
10 | obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o | ||
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c new file mode 100644 index 000000000000..408a18fd91cb --- /dev/null +++ b/drivers/power/reset/keystone-reset.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * TI keystone reboot driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Texas Instruments Incorporated. http://www.ti.com/ | ||
5 | * | ||
6 | * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/io.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/reboot.h> | ||
16 | #include <linux/regmap.h> | ||
17 | #include <asm/system_misc.h> | ||
18 | #include <linux/mfd/syscon.h> | ||
19 | #include <linux/of_platform.h> | ||
20 | |||
21 | #define RSTYPE_RG 0x0 | ||
22 | #define RSCTRL_RG 0x4 | ||
23 | #define RSCFG_RG 0x8 | ||
24 | #define RSISO_RG 0xc | ||
25 | |||
26 | #define RSCTRL_KEY_MASK 0x0000ffff | ||
27 | #define RSCTRL_RESET_MASK BIT(16) | ||
28 | #define RSCTRL_KEY 0x5a69 | ||
29 | |||
30 | #define RSMUX_OMODE_MASK 0xe | ||
31 | #define RSMUX_OMODE_RESET_ON 0xa | ||
32 | #define RSMUX_OMODE_RESET_OFF 0x0 | ||
33 | #define RSMUX_LOCK_MASK 0x1 | ||
34 | #define RSMUX_LOCK_SET 0x1 | ||
35 | |||
36 | #define RSCFG_RSTYPE_SOFT 0x300f | ||
37 | #define RSCFG_RSTYPE_HARD 0x0 | ||
38 | |||
39 | #define WDT_MUX_NUMBER 0x4 | ||
40 | |||
41 | static int rspll_offset; | ||
42 | static struct regmap *pllctrl_regs; | ||
43 | |||
44 | /** | ||
45 | * rsctrl_enable_rspll_write - enable access to RSCTRL, RSCFG | ||
46 | * To be able to access to RSCTRL, RSCFG registers | ||
47 | * we have to write a key before | ||
48 | */ | ||
49 | static inline int rsctrl_enable_rspll_write(void) | ||
50 | { | ||
51 | return regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG, | ||
52 | RSCTRL_KEY_MASK, RSCTRL_KEY); | ||
53 | } | ||
54 | |||
55 | static void rsctrl_restart(enum reboot_mode mode, const char *cmd) | ||
56 | { | ||
57 | /* enable write access to RSTCTRL */ | ||
58 | rsctrl_enable_rspll_write(); | ||
59 | |||
60 | /* reset the SOC */ | ||
61 | regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG, | ||
62 | RSCTRL_RESET_MASK, 0); | ||
63 | } | ||
64 | |||
65 | static struct of_device_id rsctrl_of_match[] = { | ||
66 | {.compatible = "ti,keystone-reset", }, | ||
67 | {}, | ||
68 | }; | ||
69 | |||
70 | static int rsctrl_probe(struct platform_device *pdev) | ||
71 | { | ||
72 | int i; | ||
73 | int ret; | ||
74 | u32 val; | ||
75 | unsigned int rg; | ||
76 | u32 rsmux_offset; | ||
77 | struct regmap *devctrl_regs; | ||
78 | struct device *dev = &pdev->dev; | ||
79 | struct device_node *np = dev->of_node; | ||
80 | |||
81 | if (!np) | ||
82 | return -ENODEV; | ||
83 | |||
84 | /* get regmaps */ | ||
85 | pllctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pll"); | ||
86 | if (IS_ERR(pllctrl_regs)) | ||
87 | return PTR_ERR(pllctrl_regs); | ||
88 | |||
89 | devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev"); | ||
90 | if (IS_ERR(devctrl_regs)) | ||
91 | return PTR_ERR(devctrl_regs); | ||
92 | |||
93 | ret = of_property_read_u32_index(np, "ti,syscon-pll", 1, &rspll_offset); | ||
94 | if (ret) { | ||
95 | dev_err(dev, "couldn't read the reset pll offset!\n"); | ||
96 | return -EINVAL; | ||
97 | } | ||
98 | |||
99 | ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, &rsmux_offset); | ||
100 | if (ret) { | ||
101 | dev_err(dev, "couldn't read the rsmux offset!\n"); | ||
102 | return -EINVAL; | ||
103 | } | ||
104 | |||
105 | /* set soft/hard reset */ | ||
106 | val = of_property_read_bool(np, "ti,soft-reset"); | ||
107 | val = val ? RSCFG_RSTYPE_SOFT : RSCFG_RSTYPE_HARD; | ||
108 | |||
109 | ret = rsctrl_enable_rspll_write(); | ||
110 | if (ret) | ||
111 | return ret; | ||
112 | |||
113 | ret = regmap_write(pllctrl_regs, rspll_offset + RSCFG_RG, val); | ||
114 | if (ret) | ||
115 | return ret; | ||
116 | |||
117 | arm_pm_restart = rsctrl_restart; | ||
118 | |||
119 | /* disable a reset isolation for all module clocks */ | ||
120 | ret = regmap_write(pllctrl_regs, rspll_offset + RSISO_RG, 0); | ||
121 | if (ret) | ||
122 | return ret; | ||
123 | |||
124 | /* enable a reset for watchdogs from wdt-list */ | ||
125 | for (i = 0; i < WDT_MUX_NUMBER; i++) { | ||
126 | ret = of_property_read_u32_index(np, "ti,wdt-list", i, &val); | ||
127 | if (ret == -EOVERFLOW && !i) { | ||
128 | dev_err(dev, "ti,wdt-list property has to contain at" | ||
129 | "least one entry\n"); | ||
130 | return -EINVAL; | ||
131 | } else if (ret) { | ||
132 | break; | ||
133 | } | ||
134 | |||
135 | if (val >= WDT_MUX_NUMBER) { | ||
136 | dev_err(dev, "ti,wdt-list property can contain" | ||
137 | "only numbers < 4\n"); | ||
138 | return -EINVAL; | ||
139 | } | ||
140 | |||
141 | rg = rsmux_offset + val * 4; | ||
142 | |||
143 | ret = regmap_update_bits(devctrl_regs, rg, RSMUX_OMODE_MASK, | ||
144 | RSMUX_OMODE_RESET_ON | | ||
145 | RSMUX_LOCK_SET); | ||
146 | if (ret) | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static struct platform_driver rsctrl_driver = { | ||
154 | .probe = rsctrl_probe, | ||
155 | .driver = { | ||
156 | .owner = THIS_MODULE, | ||
157 | .name = KBUILD_MODNAME, | ||
158 | .of_match_table = rsctrl_of_match, | ||
159 | }, | ||
160 | }; | ||
161 | module_platform_driver(rsctrl_driver); | ||
162 | |||
163 | MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>"); | ||
164 | MODULE_DESCRIPTION("Texas Instruments keystone reset driver"); | ||
165 | MODULE_LICENSE("GPL v2"); | ||
166 | MODULE_ALIAS("platform:" KBUILD_MODNAME); | ||
diff --git a/drivers/power/reset/sun6i-reboot.c b/drivers/power/reset/sun6i-reboot.c new file mode 100644 index 000000000000..af2cd7ff2fe8 --- /dev/null +++ b/drivers/power/reset/sun6i-reboot.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Allwinner A31 SoCs reset code | ||
3 | * | ||
4 | * Copyright (C) 2012-2014 Maxime Ripard | ||
5 | * | ||
6 | * Maxime Ripard <maxime.ripard@free-electrons.com> | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public | ||
9 | * License version 2. This program is licensed "as is" without any | ||
10 | * warranty of any kind, whether express or implied. | ||
11 | */ | ||
12 | |||
13 | #include <linux/delay.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/of_address.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/reboot.h> | ||
19 | |||
20 | #include <asm/system_misc.h> | ||
21 | |||
22 | #define SUN6I_WATCHDOG1_IRQ_REG 0x00 | ||
23 | #define SUN6I_WATCHDOG1_CTRL_REG 0x10 | ||
24 | #define SUN6I_WATCHDOG1_CTRL_RESTART BIT(0) | ||
25 | #define SUN6I_WATCHDOG1_CONFIG_REG 0x14 | ||
26 | #define SUN6I_WATCHDOG1_CONFIG_RESTART BIT(0) | ||
27 | #define SUN6I_WATCHDOG1_CONFIG_IRQ BIT(1) | ||
28 | #define SUN6I_WATCHDOG1_MODE_REG 0x18 | ||
29 | #define SUN6I_WATCHDOG1_MODE_ENABLE BIT(0) | ||
30 | |||
31 | static void __iomem *wdt_base; | ||
32 | |||
33 | static void sun6i_wdt_restart(enum reboot_mode mode, const char *cmd) | ||
34 | { | ||
35 | if (!wdt_base) | ||
36 | return; | ||
37 | |||
38 | /* Disable interrupts */ | ||
39 | writel(0, wdt_base + SUN6I_WATCHDOG1_IRQ_REG); | ||
40 | |||
41 | /* We want to disable the IRQ and just reset the whole system */ | ||
42 | writel(SUN6I_WATCHDOG1_CONFIG_RESTART, | ||
43 | wdt_base + SUN6I_WATCHDOG1_CONFIG_REG); | ||
44 | |||
45 | /* Enable timer. The default and lowest interval value is 0.5s */ | ||
46 | writel(SUN6I_WATCHDOG1_MODE_ENABLE, | ||
47 | wdt_base + SUN6I_WATCHDOG1_MODE_REG); | ||
48 | |||
49 | /* Restart the watchdog. */ | ||
50 | writel(SUN6I_WATCHDOG1_CTRL_RESTART, | ||
51 | wdt_base + SUN6I_WATCHDOG1_CTRL_REG); | ||
52 | |||
53 | while (1) { | ||
54 | mdelay(5); | ||
55 | writel(SUN6I_WATCHDOG1_MODE_ENABLE, | ||
56 | wdt_base + SUN6I_WATCHDOG1_MODE_REG); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | static int sun6i_reboot_probe(struct platform_device *pdev) | ||
61 | { | ||
62 | wdt_base = of_iomap(pdev->dev.of_node, 0); | ||
63 | if (!wdt_base) { | ||
64 | WARN(1, "failed to map watchdog base address"); | ||
65 | return -ENODEV; | ||
66 | } | ||
67 | |||
68 | arm_pm_restart = sun6i_wdt_restart; | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static struct of_device_id sun6i_reboot_of_match[] = { | ||
74 | { .compatible = "allwinner,sun6i-a31-wdt" }, | ||
75 | {} | ||
76 | }; | ||
77 | |||
78 | static struct platform_driver sun6i_reboot_driver = { | ||
79 | .probe = sun6i_reboot_probe, | ||
80 | .driver = { | ||
81 | .name = "sun6i-reboot", | ||
82 | .of_match_table = sun6i_reboot_of_match, | ||
83 | }, | ||
84 | }; | ||
85 | module_platform_driver(sun6i_reboot_driver); | ||
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 4f60caf750ce..60fed3d7820b 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-$(CONFIG_RESET_CONTROLLER) += core.o | 1 | obj-$(CONFIG_RESET_CONTROLLER) += core.o |
2 | obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o | ||
2 | obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o | 3 | obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o |
3 | obj-$(CONFIG_ARCH_STI) += sti/ | 4 | obj-$(CONFIG_ARCH_STI) += sti/ |
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c new file mode 100644 index 000000000000..79c32ca84ef1 --- /dev/null +++ b/drivers/reset/reset-socfpga.c | |||
@@ -0,0 +1,146 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de> | ||
3 | * | ||
4 | * based on | ||
5 | * Allwinner SoCs Reset Controller driver | ||
6 | * | ||
7 | * Copyright 2013 Maxime Ripard | ||
8 | * | ||
9 | * Maxime Ripard <maxime.ripard@free-electrons.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/reset-controller.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/types.h> | ||
25 | |||
26 | #define NR_BANKS 4 | ||
27 | #define OFFSET_MODRST 0x10 | ||
28 | |||
29 | struct socfpga_reset_data { | ||
30 | spinlock_t lock; | ||
31 | void __iomem *membase; | ||
32 | struct reset_controller_dev rcdev; | ||
33 | }; | ||
34 | |||
35 | static int socfpga_reset_assert(struct reset_controller_dev *rcdev, | ||
36 | unsigned long id) | ||
37 | { | ||
38 | struct socfpga_reset_data *data = container_of(rcdev, | ||
39 | struct socfpga_reset_data, | ||
40 | rcdev); | ||
41 | int bank = id / BITS_PER_LONG; | ||
42 | int offset = id % BITS_PER_LONG; | ||
43 | unsigned long flags; | ||
44 | u32 reg; | ||
45 | |||
46 | spin_lock_irqsave(&data->lock, flags); | ||
47 | |||
48 | reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS)); | ||
49 | writel(reg | BIT(offset), data->membase + OFFSET_MODRST + | ||
50 | (bank * NR_BANKS)); | ||
51 | spin_unlock_irqrestore(&data->lock, flags); | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int socfpga_reset_deassert(struct reset_controller_dev *rcdev, | ||
57 | unsigned long id) | ||
58 | { | ||
59 | struct socfpga_reset_data *data = container_of(rcdev, | ||
60 | struct socfpga_reset_data, | ||
61 | rcdev); | ||
62 | |||
63 | int bank = id / BITS_PER_LONG; | ||
64 | int offset = id % BITS_PER_LONG; | ||
65 | unsigned long flags; | ||
66 | u32 reg; | ||
67 | |||
68 | spin_lock_irqsave(&data->lock, flags); | ||
69 | |||
70 | reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS)); | ||
71 | writel(reg & ~BIT(offset), data->membase + OFFSET_MODRST + | ||
72 | (bank * NR_BANKS)); | ||
73 | |||
74 | spin_unlock_irqrestore(&data->lock, flags); | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static struct reset_control_ops socfpga_reset_ops = { | ||
80 | .assert = socfpga_reset_assert, | ||
81 | .deassert = socfpga_reset_deassert, | ||
82 | }; | ||
83 | |||
84 | static int socfpga_reset_probe(struct platform_device *pdev) | ||
85 | { | ||
86 | struct socfpga_reset_data *data; | ||
87 | struct resource *res; | ||
88 | |||
89 | /* | ||
90 | * The binding was mainlined without the required property. | ||
91 | * Do not continue, when we encounter an old DT. | ||
92 | */ | ||
93 | if (!of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) { | ||
94 | dev_err(&pdev->dev, "%s missing #reset-cells property\n", | ||
95 | pdev->dev.of_node->full_name); | ||
96 | return -EINVAL; | ||
97 | } | ||
98 | |||
99 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | ||
100 | if (!data) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
104 | data->membase = devm_ioremap_resource(&pdev->dev, res); | ||
105 | if (IS_ERR(data->membase)) | ||
106 | return PTR_ERR(data->membase); | ||
107 | |||
108 | spin_lock_init(&data->lock); | ||
109 | |||
110 | data->rcdev.owner = THIS_MODULE; | ||
111 | data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG; | ||
112 | data->rcdev.ops = &socfpga_reset_ops; | ||
113 | data->rcdev.of_node = pdev->dev.of_node; | ||
114 | reset_controller_register(&data->rcdev); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static int socfpga_reset_remove(struct platform_device *pdev) | ||
120 | { | ||
121 | struct socfpga_reset_data *data = platform_get_drvdata(pdev); | ||
122 | |||
123 | reset_controller_unregister(&data->rcdev); | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static const struct of_device_id socfpga_reset_dt_ids[] = { | ||
129 | { .compatible = "altr,rst-mgr", }, | ||
130 | { /* sentinel */ }, | ||
131 | }; | ||
132 | |||
133 | static struct platform_driver socfpga_reset_driver = { | ||
134 | .probe = socfpga_reset_probe, | ||
135 | .remove = socfpga_reset_remove, | ||
136 | .driver = { | ||
137 | .name = "socfpga-reset", | ||
138 | .owner = THIS_MODULE, | ||
139 | .of_match_table = socfpga_reset_dt_ids, | ||
140 | }, | ||
141 | }; | ||
142 | module_platform_driver(socfpga_reset_driver); | ||
143 | |||
144 | MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de"); | ||
145 | MODULE_DESCRIPTION("Socfpga Reset Controller Driver"); | ||
146 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index 695bd3496eba..a94e7a7820b4 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c | |||
@@ -145,7 +145,24 @@ MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids); | |||
145 | 145 | ||
146 | static int sunxi_reset_probe(struct platform_device *pdev) | 146 | static int sunxi_reset_probe(struct platform_device *pdev) |
147 | { | 147 | { |
148 | return sunxi_reset_init(pdev->dev.of_node); | 148 | struct sunxi_reset_data *data; |
149 | struct resource *res; | ||
150 | |||
151 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | ||
152 | if (!data) | ||
153 | return -ENOMEM; | ||
154 | |||
155 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
156 | data->membase = devm_ioremap_resource(&pdev->dev, res); | ||
157 | if (IS_ERR(data->membase)) | ||
158 | return PTR_ERR(data->membase); | ||
159 | |||
160 | data->rcdev.owner = THIS_MODULE; | ||
161 | data->rcdev.nr_resets = resource_size(res) * 32; | ||
162 | data->rcdev.ops = &sunxi_reset_ops; | ||
163 | data->rcdev.of_node = pdev->dev.of_node; | ||
164 | |||
165 | return reset_controller_register(&data->rcdev); | ||
149 | } | 166 | } |
150 | 167 | ||
151 | static int sunxi_reset_remove(struct platform_device *pdev) | 168 | static int sunxi_reset_remove(struct platform_device *pdev) |
@@ -153,8 +170,6 @@ static int sunxi_reset_remove(struct platform_device *pdev) | |||
153 | struct sunxi_reset_data *data = platform_get_drvdata(pdev); | 170 | struct sunxi_reset_data *data = platform_get_drvdata(pdev); |
154 | 171 | ||
155 | reset_controller_unregister(&data->rcdev); | 172 | reset_controller_unregister(&data->rcdev); |
156 | iounmap(data->membase); | ||
157 | kfree(data); | ||
158 | 173 | ||
159 | return 0; | 174 | return 0; |
160 | } | 175 | } |
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig new file mode 100644 index 000000000000..c8543855aa82 --- /dev/null +++ b/drivers/soc/Kconfig | |||
@@ -0,0 +1,5 @@ | |||
1 | menu "SOC (System On Chip) specific Drivers" | ||
2 | |||
3 | source "drivers/soc/qcom/Kconfig" | ||
4 | |||
5 | endmenu | ||
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile new file mode 100644 index 000000000000..0f7c44793b29 --- /dev/null +++ b/drivers/soc/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for the Linux Kernel SOC specific device drivers. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_ARCH_QCOM) += qcom/ | ||
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig new file mode 100644 index 000000000000..7bd2c94f54a4 --- /dev/null +++ b/drivers/soc/qcom/Kconfig | |||
@@ -0,0 +1,11 @@ | |||
1 | # | ||
2 | # QCOM Soc drivers | ||
3 | # | ||
4 | config QCOM_GSBI | ||
5 | tristate "QCOM General Serial Bus Interface" | ||
6 | depends on ARCH_QCOM | ||
7 | help | ||
8 | Say y here to enable GSBI support. The GSBI provides control | ||
9 | functions for connecting the underlying serial UART, SPI, and I2C | ||
10 | devices to the output pins. | ||
11 | |||
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile new file mode 100644 index 000000000000..438901257ac1 --- /dev/null +++ b/drivers/soc/qcom/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o | |||
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c new file mode 100644 index 000000000000..447458e696a9 --- /dev/null +++ b/drivers/soc/qcom/qcom_gsbi.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, The Linux foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License rev 2 and | ||
6 | * only rev 2 as published by the free Software foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/of_platform.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | |||
22 | #define GSBI_CTRL_REG 0x0000 | ||
23 | #define GSBI_PROTOCOL_SHIFT 4 | ||
24 | |||
25 | static int gsbi_probe(struct platform_device *pdev) | ||
26 | { | ||
27 | struct device_node *node = pdev->dev.of_node; | ||
28 | struct resource *res; | ||
29 | void __iomem *base; | ||
30 | struct clk *hclk; | ||
31 | u32 mode, crci = 0; | ||
32 | |||
33 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
34 | base = devm_ioremap_resource(&pdev->dev, res); | ||
35 | if (IS_ERR(base)) | ||
36 | return PTR_ERR(base); | ||
37 | |||
38 | if (of_property_read_u32(node, "qcom,mode", &mode)) { | ||
39 | dev_err(&pdev->dev, "missing mode configuration\n"); | ||
40 | return -EINVAL; | ||
41 | } | ||
42 | |||
43 | /* not required, so default to 0 if not present */ | ||
44 | of_property_read_u32(node, "qcom,crci", &crci); | ||
45 | |||
46 | dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci); | ||
47 | |||
48 | hclk = devm_clk_get(&pdev->dev, "iface"); | ||
49 | if (IS_ERR(hclk)) | ||
50 | return PTR_ERR(hclk); | ||
51 | |||
52 | clk_prepare_enable(hclk); | ||
53 | |||
54 | writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci, | ||
55 | base + GSBI_CTRL_REG); | ||
56 | |||
57 | /* make sure the gsbi control write is not reordered */ | ||
58 | wmb(); | ||
59 | |||
60 | clk_disable_unprepare(hclk); | ||
61 | |||
62 | return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | ||
63 | } | ||
64 | |||
65 | static const struct of_device_id gsbi_dt_match[] = { | ||
66 | { .compatible = "qcom,gsbi-v1.0.0", }, | ||
67 | { }, | ||
68 | }; | ||
69 | |||
70 | MODULE_DEVICE_TABLE(of, gsbi_dt_match); | ||
71 | |||
72 | static struct platform_driver gsbi_driver = { | ||
73 | .driver = { | ||
74 | .name = "gsbi", | ||
75 | .owner = THIS_MODULE, | ||
76 | .of_match_table = gsbi_dt_match, | ||
77 | }, | ||
78 | .probe = gsbi_probe, | ||
79 | }; | ||
80 | |||
81 | module_platform_driver(gsbi_driver); | ||
82 | |||
83 | MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); | ||
84 | MODULE_DESCRIPTION("QCOM GSBI driver"); | ||
85 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 053b98eb46c8..778e376f197e 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c | |||
@@ -52,7 +52,6 @@ struct msm_port { | |||
52 | struct clk *clk; | 52 | struct clk *clk; |
53 | struct clk *pclk; | 53 | struct clk *pclk; |
54 | unsigned int imr; | 54 | unsigned int imr; |
55 | void __iomem *gsbi_base; | ||
56 | int is_uartdm; | 55 | int is_uartdm; |
57 | unsigned int old_snap_state; | 56 | unsigned int old_snap_state; |
58 | }; | 57 | }; |
@@ -599,9 +598,7 @@ static const char *msm_type(struct uart_port *port) | |||
599 | static void msm_release_port(struct uart_port *port) | 598 | static void msm_release_port(struct uart_port *port) |
600 | { | 599 | { |
601 | struct platform_device *pdev = to_platform_device(port->dev); | 600 | struct platform_device *pdev = to_platform_device(port->dev); |
602 | struct msm_port *msm_port = UART_TO_MSM(port); | ||
603 | struct resource *uart_resource; | 601 | struct resource *uart_resource; |
604 | struct resource *gsbi_resource; | ||
605 | resource_size_t size; | 602 | resource_size_t size; |
606 | 603 | ||
607 | uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 604 | uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -612,28 +609,12 @@ static void msm_release_port(struct uart_port *port) | |||
612 | release_mem_region(port->mapbase, size); | 609 | release_mem_region(port->mapbase, size); |
613 | iounmap(port->membase); | 610 | iounmap(port->membase); |
614 | port->membase = NULL; | 611 | port->membase = NULL; |
615 | |||
616 | if (msm_port->gsbi_base) { | ||
617 | writel_relaxed(GSBI_PROTOCOL_IDLE, | ||
618 | msm_port->gsbi_base + GSBI_CONTROL); | ||
619 | |||
620 | gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
621 | if (unlikely(!gsbi_resource)) | ||
622 | return; | ||
623 | |||
624 | size = resource_size(gsbi_resource); | ||
625 | release_mem_region(gsbi_resource->start, size); | ||
626 | iounmap(msm_port->gsbi_base); | ||
627 | msm_port->gsbi_base = NULL; | ||
628 | } | ||
629 | } | 612 | } |
630 | 613 | ||
631 | static int msm_request_port(struct uart_port *port) | 614 | static int msm_request_port(struct uart_port *port) |
632 | { | 615 | { |
633 | struct msm_port *msm_port = UART_TO_MSM(port); | ||
634 | struct platform_device *pdev = to_platform_device(port->dev); | 616 | struct platform_device *pdev = to_platform_device(port->dev); |
635 | struct resource *uart_resource; | 617 | struct resource *uart_resource; |
636 | struct resource *gsbi_resource; | ||
637 | resource_size_t size; | 618 | resource_size_t size; |
638 | int ret; | 619 | int ret; |
639 | 620 | ||
@@ -652,30 +633,8 @@ static int msm_request_port(struct uart_port *port) | |||
652 | goto fail_release_port; | 633 | goto fail_release_port; |
653 | } | 634 | } |
654 | 635 | ||
655 | gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
656 | /* Is this a GSBI-based port? */ | ||
657 | if (gsbi_resource) { | ||
658 | size = resource_size(gsbi_resource); | ||
659 | |||
660 | if (!request_mem_region(gsbi_resource->start, size, | ||
661 | "msm_serial")) { | ||
662 | ret = -EBUSY; | ||
663 | goto fail_release_port_membase; | ||
664 | } | ||
665 | |||
666 | msm_port->gsbi_base = ioremap(gsbi_resource->start, size); | ||
667 | if (!msm_port->gsbi_base) { | ||
668 | ret = -EBUSY; | ||
669 | goto fail_release_gsbi; | ||
670 | } | ||
671 | } | ||
672 | |||
673 | return 0; | 636 | return 0; |
674 | 637 | ||
675 | fail_release_gsbi: | ||
676 | release_mem_region(gsbi_resource->start, size); | ||
677 | fail_release_port_membase: | ||
678 | iounmap(port->membase); | ||
679 | fail_release_port: | 638 | fail_release_port: |
680 | release_mem_region(port->mapbase, size); | 639 | release_mem_region(port->mapbase, size); |
681 | return ret; | 640 | return ret; |
@@ -683,7 +642,6 @@ fail_release_port: | |||
683 | 642 | ||
684 | static void msm_config_port(struct uart_port *port, int flags) | 643 | static void msm_config_port(struct uart_port *port, int flags) |
685 | { | 644 | { |
686 | struct msm_port *msm_port = UART_TO_MSM(port); | ||
687 | int ret; | 645 | int ret; |
688 | if (flags & UART_CONFIG_TYPE) { | 646 | if (flags & UART_CONFIG_TYPE) { |
689 | port->type = PORT_MSM; | 647 | port->type = PORT_MSM; |
@@ -691,9 +649,6 @@ static void msm_config_port(struct uart_port *port, int flags) | |||
691 | if (ret) | 649 | if (ret) |
692 | return; | 650 | return; |
693 | } | 651 | } |
694 | if (msm_port->gsbi_base) | ||
695 | writel_relaxed(GSBI_PROTOCOL_UART, | ||
696 | msm_port->gsbi_base + GSBI_CONTROL); | ||
697 | } | 652 | } |
698 | 653 | ||
699 | static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) | 654 | static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) |
@@ -1110,6 +1065,7 @@ static struct of_device_id msm_match_table[] = { | |||
1110 | 1065 | ||
1111 | static struct platform_driver msm_platform_driver = { | 1066 | static struct platform_driver msm_platform_driver = { |
1112 | .remove = msm_serial_remove, | 1067 | .remove = msm_serial_remove, |
1068 | .probe = msm_serial_probe, | ||
1113 | .driver = { | 1069 | .driver = { |
1114 | .name = "msm_serial", | 1070 | .name = "msm_serial", |
1115 | .owner = THIS_MODULE, | 1071 | .owner = THIS_MODULE, |
@@ -1125,7 +1081,7 @@ static int __init msm_serial_init(void) | |||
1125 | if (unlikely(ret)) | 1081 | if (unlikely(ret)) |
1126 | return ret; | 1082 | return ret; |
1127 | 1083 | ||
1128 | ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe); | 1084 | ret = platform_driver_register(&msm_platform_driver); |
1129 | if (unlikely(ret)) | 1085 | if (unlikely(ret)) |
1130 | uart_unregister_driver(&msm_uart_driver); | 1086 | uart_unregister_driver(&msm_uart_driver); |
1131 | 1087 | ||
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h index 1e9b68b6f9eb..d98d45efdf86 100644 --- a/drivers/tty/serial/msm_serial.h +++ b/drivers/tty/serial/msm_serial.h | |||
@@ -109,11 +109,6 @@ | |||
109 | #define UART_ISR 0x0014 | 109 | #define UART_ISR 0x0014 |
110 | #define UART_ISR_TX_READY (1 << 7) | 110 | #define UART_ISR_TX_READY (1 << 7) |
111 | 111 | ||
112 | #define GSBI_CONTROL 0x0 | ||
113 | #define GSBI_PROTOCOL_CODE 0x30 | ||
114 | #define GSBI_PROTOCOL_UART 0x40 | ||
115 | #define GSBI_PROTOCOL_IDLE 0x0 | ||
116 | |||
117 | #define UARTDM_RXFS 0x50 | 112 | #define UARTDM_RXFS 0x50 |
118 | #define UARTDM_RXFS_BUF_SHIFT 0x7 | 113 | #define UARTDM_RXFS_BUF_SHIFT 0x7 |
119 | #define UARTDM_RXFS_BUF_MASK 0x7 | 114 | #define UARTDM_RXFS_BUF_MASK 0x7 |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index f50821cb64be..eb8d5627d080 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
@@ -43,15 +43,15 @@ | |||
43 | 43 | ||
44 | /* PaRAM slots are laid out like this */ | 44 | /* PaRAM slots are laid out like this */ |
45 | struct edmacc_param { | 45 | struct edmacc_param { |
46 | unsigned int opt; | 46 | u32 opt; |
47 | unsigned int src; | 47 | u32 src; |
48 | unsigned int a_b_cnt; | 48 | u32 a_b_cnt; |
49 | unsigned int dst; | 49 | u32 dst; |
50 | unsigned int src_dst_bidx; | 50 | u32 src_dst_bidx; |
51 | unsigned int link_bcntrld; | 51 | u32 link_bcntrld; |
52 | unsigned int src_dst_cidx; | 52 | u32 src_dst_cidx; |
53 | unsigned int ccnt; | 53 | u32 ccnt; |
54 | }; | 54 | } __packed; |
55 | 55 | ||
56 | /* fields in edmacc_param.opt */ | 56 | /* fields in edmacc_param.opt */ |
57 | #define SAM BIT(0) | 57 | #define SAM BIT(0) |
@@ -130,7 +130,7 @@ void edma_set_src(unsigned slot, dma_addr_t src_port, | |||
130 | enum address_mode mode, enum fifo_width); | 130 | enum address_mode mode, enum fifo_width); |
131 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | 131 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, |
132 | enum address_mode mode, enum fifo_width); | 132 | enum address_mode mode, enum fifo_width); |
133 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst); | 133 | dma_addr_t edma_get_position(unsigned slot, bool dst); |
134 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); | 134 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); |
135 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); | 135 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); |
136 | void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, | 136 | void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, |
@@ -158,13 +158,6 @@ struct edma_rsv_info { | |||
158 | 158 | ||
159 | /* platform_data for EDMA driver */ | 159 | /* platform_data for EDMA driver */ |
160 | struct edma_soc_info { | 160 | struct edma_soc_info { |
161 | |||
162 | /* how many dma resources of each type */ | ||
163 | unsigned n_channel; | ||
164 | unsigned n_region; | ||
165 | unsigned n_slot; | ||
166 | unsigned n_tc; | ||
167 | unsigned n_cc; | ||
168 | /* | 161 | /* |
169 | * Default queue is expected to be a low-priority queue. | 162 | * Default queue is expected to be a low-priority queue. |
170 | * This way, long transfers on the default queue started | 163 | * This way, long transfers on the default queue started |
@@ -175,7 +168,6 @@ struct edma_soc_info { | |||
175 | /* Resource reservation for other cores */ | 168 | /* Resource reservation for other cores */ |
176 | struct edma_rsv_info *rsv; | 169 | struct edma_rsv_info *rsv; |
177 | 170 | ||
178 | s8 (*queue_tc_mapping)[2]; | ||
179 | s8 (*queue_priority_mapping)[2]; | 171 | s8 (*queue_priority_mapping)[2]; |
180 | const s16 (*xbar_chans)[2]; | 172 | const s16 (*xbar_chans)[2]; |
181 | }; | 173 | }; |